Home | History | Annotate | Download | only in MCTargetDesc
      1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "AArch64.h"
     11 #include "AArch64RegisterInfo.h"
     12 #include "MCTargetDesc/AArch64FixupKinds.h"
     13 #include "llvm/ADT/Triple.h"
     14 #include "llvm/MC/MCAsmBackend.h"
     15 #include "llvm/MC/MCDirectives.h"
     16 #include "llvm/MC/MCFixupKindInfo.h"
     17 #include "llvm/MC/MCObjectWriter.h"
     18 #include "llvm/MC/MCSectionMachO.h"
     19 #include "llvm/MC/MCSectionELF.h"
     20 #include "llvm/Support/ErrorHandling.h"
     21 #include "llvm/Support/MachO.h"
     22 using namespace llvm;
     23 
     24 namespace {
     25 
     26 class AArch64AsmBackend : public MCAsmBackend {
     27   static const unsigned PCRelFlagVal =
     28       MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
     29 
     30 public:
     31   AArch64AsmBackend(const Target &T) : MCAsmBackend() {}
     32 
     33   unsigned getNumFixupKinds() const override {
     34     return AArch64::NumTargetFixupKinds;
     35   }
     36 
     37   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
     38     const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
     39       // This table *must* be in the order that the fixup_* kinds are defined in
     40       // AArch64FixupKinds.h.
     41       //
     42       // Name                           Offset (bits) Size (bits)     Flags
     43       { "fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal },
     44       { "fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal },
     45       { "fixup_aarch64_add_imm12", 10, 12, 0 },
     46       { "fixup_aarch64_ldst_imm12_scale1", 10, 12, 0 },
     47       { "fixup_aarch64_ldst_imm12_scale2", 10, 12, 0 },
     48       { "fixup_aarch64_ldst_imm12_scale4", 10, 12, 0 },
     49       { "fixup_aarch64_ldst_imm12_scale8", 10, 12, 0 },
     50       { "fixup_aarch64_ldst_imm12_scale16", 10, 12, 0 },
     51       { "fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal },
     52       { "fixup_aarch64_movw", 5, 16, 0 },
     53       { "fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal },
     54       { "fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal },
     55       { "fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal },
     56       { "fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal },
     57       { "fixup_aarch64_tlsdesc_call", 0, 0, 0 }
     58     };
     59 
     60     if (Kind < FirstTargetFixupKind)
     61       return MCAsmBackend::getFixupKindInfo(Kind);
     62 
     63     assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
     64            "Invalid kind!");
     65     return Infos[Kind - FirstTargetFixupKind];
     66   }
     67 
     68   void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
     69                   uint64_t Value, bool IsPCRel) const override;
     70 
     71   bool mayNeedRelaxation(const MCInst &Inst) const override;
     72   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
     73                             const MCRelaxableFragment *DF,
     74                             const MCAsmLayout &Layout) const override;
     75   void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
     76   bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
     77 
     78   void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
     79 
     80   unsigned getPointerSize() const { return 8; }
     81 };
     82 
     83 } // end anonymous namespace
     84 
     85 /// \brief The number of bytes the fixup may change.
     86 static unsigned getFixupKindNumBytes(unsigned Kind) {
     87   switch (Kind) {
     88   default:
     89     llvm_unreachable("Unknown fixup kind!");
     90 
     91   case AArch64::fixup_aarch64_tlsdesc_call:
     92     return 0;
     93 
     94   case FK_Data_1:
     95     return 1;
     96 
     97   case FK_Data_2:
     98   case AArch64::fixup_aarch64_movw:
     99     return 2;
    100 
    101   case AArch64::fixup_aarch64_pcrel_branch14:
    102   case AArch64::fixup_aarch64_add_imm12:
    103   case AArch64::fixup_aarch64_ldst_imm12_scale1:
    104   case AArch64::fixup_aarch64_ldst_imm12_scale2:
    105   case AArch64::fixup_aarch64_ldst_imm12_scale4:
    106   case AArch64::fixup_aarch64_ldst_imm12_scale8:
    107   case AArch64::fixup_aarch64_ldst_imm12_scale16:
    108   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
    109   case AArch64::fixup_aarch64_pcrel_branch19:
    110     return 3;
    111 
    112   case AArch64::fixup_aarch64_pcrel_adr_imm21:
    113   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
    114   case AArch64::fixup_aarch64_pcrel_branch26:
    115   case AArch64::fixup_aarch64_pcrel_call26:
    116   case FK_Data_4:
    117     return 4;
    118 
    119   case FK_Data_8:
    120     return 8;
    121   }
    122 }
    123 
    124 static unsigned AdrImmBits(unsigned Value) {
    125   unsigned lo2 = Value & 0x3;
    126   unsigned hi19 = (Value & 0x1ffffc) >> 2;
    127   return (hi19 << 5) | (lo2 << 29);
    128 }
    129 
    130 static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
    131   int64_t SignedValue = static_cast<int64_t>(Value);
    132   switch (Kind) {
    133   default:
    134     assert(false && "Unknown fixup kind!");
    135   case AArch64::fixup_aarch64_pcrel_adr_imm21:
    136     if (SignedValue > 2097151 || SignedValue < -2097152)
    137       report_fatal_error("fixup value out of range");
    138     return AdrImmBits(Value & 0x1fffffULL);
    139   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
    140     return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
    141   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
    142   case AArch64::fixup_aarch64_pcrel_branch19:
    143     // Signed 21-bit immediate
    144     if (SignedValue > 2097151 || SignedValue < -2097152)
    145       report_fatal_error("fixup value out of range");
    146     // Low two bits are not encoded.
    147     return (Value >> 2) & 0x7ffff;
    148   case AArch64::fixup_aarch64_add_imm12:
    149   case AArch64::fixup_aarch64_ldst_imm12_scale1:
    150     // Unsigned 12-bit immediate
    151     if (Value >= 0x1000)
    152       report_fatal_error("invalid imm12 fixup value");
    153     return Value;
    154   case AArch64::fixup_aarch64_ldst_imm12_scale2:
    155     // Unsigned 12-bit immediate which gets multiplied by 2
    156     if (Value & 1 || Value >= 0x2000)
    157       report_fatal_error("invalid imm12 fixup value");
    158     return Value >> 1;
    159   case AArch64::fixup_aarch64_ldst_imm12_scale4:
    160     // Unsigned 12-bit immediate which gets multiplied by 4
    161     if (Value & 3 || Value >= 0x4000)
    162       report_fatal_error("invalid imm12 fixup value");
    163     return Value >> 2;
    164   case AArch64::fixup_aarch64_ldst_imm12_scale8:
    165     // Unsigned 12-bit immediate which gets multiplied by 8
    166     if (Value & 7 || Value >= 0x8000)
    167       report_fatal_error("invalid imm12 fixup value");
    168     return Value >> 3;
    169   case AArch64::fixup_aarch64_ldst_imm12_scale16:
    170     // Unsigned 12-bit immediate which gets multiplied by 16
    171     if (Value & 15 || Value >= 0x10000)
    172       report_fatal_error("invalid imm12 fixup value");
    173     return Value >> 4;
    174   case AArch64::fixup_aarch64_movw:
    175     report_fatal_error("no resolvable MOVZ/MOVK fixups supported yet");
    176     return Value;
    177   case AArch64::fixup_aarch64_pcrel_branch14:
    178     // Signed 16-bit immediate
    179     if (SignedValue > 32767 || SignedValue < -32768)
    180       report_fatal_error("fixup value out of range");
    181     // Low two bits are not encoded (4-byte alignment assumed).
    182     if (Value & 0x3)
    183       report_fatal_error("fixup not sufficiently aligned");
    184     return (Value >> 2) & 0x3fff;
    185   case AArch64::fixup_aarch64_pcrel_branch26:
    186   case AArch64::fixup_aarch64_pcrel_call26:
    187     // Signed 28-bit immediate
    188     if (SignedValue > 134217727 || SignedValue < -134217728)
    189       report_fatal_error("fixup value out of range");
    190     // Low two bits are not encoded (4-byte alignment assumed).
    191     if (Value & 0x3)
    192       report_fatal_error("fixup not sufficiently aligned");
    193     return (Value >> 2) & 0x3ffffff;
    194   case FK_Data_1:
    195   case FK_Data_2:
    196   case FK_Data_4:
    197   case FK_Data_8:
    198     return Value;
    199   }
    200 }
    201 
    202 void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
    203                                    unsigned DataSize, uint64_t Value,
    204                                    bool IsPCRel) const {
    205   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
    206   if (!Value)
    207     return; // Doesn't change encoding.
    208   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
    209   // Apply any target-specific value adjustments.
    210   Value = adjustFixupValue(Fixup.getKind(), Value);
    211 
    212   // Shift the value into position.
    213   Value <<= Info.TargetOffset;
    214 
    215   unsigned Offset = Fixup.getOffset();
    216   assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
    217 
    218   // For each byte of the fragment that the fixup touches, mask in the
    219   // bits from the fixup value.
    220   for (unsigned i = 0; i != NumBytes; ++i)
    221     Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
    222 }
    223 
    224 bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
    225   return false;
    226 }
    227 
    228 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
    229                                              uint64_t Value,
    230                                              const MCRelaxableFragment *DF,
    231                                              const MCAsmLayout &Layout) const {
    232   // FIXME:  This isn't correct for AArch64. Just moving the "generic" logic
    233   // into the targets for now.
    234   //
    235   // Relax if the value is too big for a (signed) i8.
    236   return int64_t(Value) != int64_t(int8_t(Value));
    237 }
    238 
    239 void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
    240                                          MCInst &Res) const {
    241   assert(false && "AArch64AsmBackend::relaxInstruction() unimplemented");
    242 }
    243 
    244 bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
    245   // If the count is not 4-byte aligned, we must be writing data into the text
    246   // section (otherwise we have unaligned instructions, and thus have far
    247   // bigger problems), so just write zeros instead.
    248   if ((Count & 3) != 0) {
    249     for (uint64_t i = 0, e = (Count & 3); i != e; ++i)
    250       OW->Write8(0);
    251   }
    252 
    253   // We are properly aligned, so write NOPs as requested.
    254   Count /= 4;
    255   for (uint64_t i = 0; i != Count; ++i)
    256     OW->Write32(0xd503201f);
    257   return true;
    258 }
    259 
    260 namespace {
    261 
    262 namespace CU {
    263 
    264 /// \brief Compact unwind encoding values.
    265 enum CompactUnwindEncodings {
    266   /// \brief A "frameless" leaf function, where no non-volatile registers are
    267   /// saved. The return remains in LR throughout the function.
    268   UNWIND_AArch64_MODE_FRAMELESS = 0x02000000,
    269 
    270   /// \brief No compact unwind encoding available. Instead the low 23-bits of
    271   /// the compact unwind encoding is the offset of the DWARF FDE in the
    272   /// __eh_frame section. This mode is never used in object files. It is only
    273   /// generated by the linker in final linked images, which have only DWARF info
    274   /// for a function.
    275   UNWIND_AArch64_MODE_DWARF = 0x03000000,
    276 
    277   /// \brief This is a standard arm64 prologue where FP/LR are immediately
    278   /// pushed on the stack, then SP is copied to FP. If there are any
    279   /// non-volatile register saved, they are copied into the stack fame in pairs
    280   /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
    281   /// five X pairs and four D pairs can be saved, but the memory layout must be
    282   /// in register number order.
    283   UNWIND_AArch64_MODE_FRAME = 0x04000000,
    284 
    285   /// \brief Frame register pair encodings.
    286   UNWIND_AArch64_FRAME_X19_X20_PAIR = 0x00000001,
    287   UNWIND_AArch64_FRAME_X21_X22_PAIR = 0x00000002,
    288   UNWIND_AArch64_FRAME_X23_X24_PAIR = 0x00000004,
    289   UNWIND_AArch64_FRAME_X25_X26_PAIR = 0x00000008,
    290   UNWIND_AArch64_FRAME_X27_X28_PAIR = 0x00000010,
    291   UNWIND_AArch64_FRAME_D8_D9_PAIR = 0x00000100,
    292   UNWIND_AArch64_FRAME_D10_D11_PAIR = 0x00000200,
    293   UNWIND_AArch64_FRAME_D12_D13_PAIR = 0x00000400,
    294   UNWIND_AArch64_FRAME_D14_D15_PAIR = 0x00000800
    295 };
    296 
    297 } // end CU namespace
    298 
    299 // FIXME: This should be in a separate file.
    300 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
    301   const MCRegisterInfo &MRI;
    302 
    303   /// \brief Encode compact unwind stack adjustment for frameless functions.
    304   /// See UNWIND_AArch64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
    305   /// The stack size always needs to be 16 byte aligned.
    306   uint32_t encodeStackAdjustment(uint32_t StackSize) const {
    307     return (StackSize / 16) << 12;
    308   }
    309 
    310 public:
    311   DarwinAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI)
    312       : AArch64AsmBackend(T), MRI(MRI) {}
    313 
    314   MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
    315     return createAArch64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64,
    316                                          MachO::CPU_SUBTYPE_ARM64_ALL);
    317   }
    318 
    319   bool doesSectionRequireSymbols(const MCSection &Section) const override {
    320     // Any section for which the linker breaks things into atoms needs to
    321     // preserve symbols, including assembler local symbols, to identify
    322     // those atoms. These sections are:
    323     // Sections of type:
    324     //
    325     //    S_CSTRING_LITERALS  (e.g. __cstring)
    326     //    S_LITERAL_POINTERS  (e.g.  objc selector pointers)
    327     //    S_16BYTE_LITERALS, S_8BYTE_LITERALS, S_4BYTE_LITERALS
    328     //
    329     // Sections named:
    330     //
    331     //    __TEXT,__eh_frame
    332     //    __TEXT,__ustring
    333     //    __DATA,__cfstring
    334     //    __DATA,__objc_classrefs
    335     //    __DATA,__objc_catlist
    336     //
    337     // FIXME: It would be better if the compiler used actual linker local
    338     // symbols for each of these sections rather than preserving what
    339     // are ostensibly assembler local symbols.
    340     const MCSectionMachO &SMO = static_cast<const MCSectionMachO &>(Section);
    341     return (SMO.getType() == MachO::S_CSTRING_LITERALS ||
    342             SMO.getType() == MachO::S_4BYTE_LITERALS ||
    343             SMO.getType() == MachO::S_8BYTE_LITERALS ||
    344             SMO.getType() == MachO::S_16BYTE_LITERALS ||
    345             SMO.getType() == MachO::S_LITERAL_POINTERS ||
    346             (SMO.getSegmentName() == "__TEXT" &&
    347              (SMO.getSectionName() == "__eh_frame" ||
    348               SMO.getSectionName() == "__ustring")) ||
    349             (SMO.getSegmentName() == "__DATA" &&
    350              (SMO.getSectionName() == "__cfstring" ||
    351               SMO.getSectionName() == "__objc_classrefs" ||
    352               SMO.getSectionName() == "__objc_catlist")));
    353   }
    354 
    355   /// \brief Generate the compact unwind encoding from the CFI directives.
    356   uint32_t generateCompactUnwindEncoding(
    357                              ArrayRef<MCCFIInstruction> Instrs) const override {
    358     if (Instrs.empty())
    359       return CU::UNWIND_AArch64_MODE_FRAMELESS;
    360 
    361     bool HasFP = false;
    362     unsigned StackSize = 0;
    363 
    364     uint32_t CompactUnwindEncoding = 0;
    365     for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
    366       const MCCFIInstruction &Inst = Instrs[i];
    367 
    368       switch (Inst.getOperation()) {
    369       default:
    370         // Cannot handle this directive:  bail out.
    371         return CU::UNWIND_AArch64_MODE_DWARF;
    372       case MCCFIInstruction::OpDefCfa: {
    373         // Defines a frame pointer.
    374         assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) ==
    375                    AArch64::FP &&
    376                "Invalid frame pointer!");
    377         assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
    378 
    379         const MCCFIInstruction &LRPush = Instrs[++i];
    380         assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
    381                "Link register not pushed!");
    382         const MCCFIInstruction &FPPush = Instrs[++i];
    383         assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
    384                "Frame pointer not pushed!");
    385 
    386         unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true);
    387         unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true);
    388 
    389         LRReg = getXRegFromWReg(LRReg);
    390         FPReg = getXRegFromWReg(FPReg);
    391 
    392         assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
    393                "Pushing invalid registers for frame!");
    394 
    395         // Indicate that the function has a frame.
    396         CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAME;
    397         HasFP = true;
    398         break;
    399       }
    400       case MCCFIInstruction::OpDefCfaOffset: {
    401         assert(StackSize == 0 && "We already have the CFA offset!");
    402         StackSize = std::abs(Inst.getOffset());
    403         break;
    404       }
    405       case MCCFIInstruction::OpOffset: {
    406         // Registers are saved in pairs. We expect there to be two consecutive
    407         // `.cfi_offset' instructions with the appropriate registers specified.
    408         unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true);
    409         if (i + 1 == e)
    410           return CU::UNWIND_AArch64_MODE_DWARF;
    411 
    412         const MCCFIInstruction &Inst2 = Instrs[++i];
    413         if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
    414           return CU::UNWIND_AArch64_MODE_DWARF;
    415         unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true);
    416 
    417         // N.B. The encodings must be in register number order, and the X
    418         // registers before the D registers.
    419 
    420         // X19/X20 pair = 0x00000001,
    421         // X21/X22 pair = 0x00000002,
    422         // X23/X24 pair = 0x00000004,
    423         // X25/X26 pair = 0x00000008,
    424         // X27/X28 pair = 0x00000010
    425         Reg1 = getXRegFromWReg(Reg1);
    426         Reg2 = getXRegFromWReg(Reg2);
    427 
    428         if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
    429             (CompactUnwindEncoding & 0xF1E) == 0)
    430           CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X19_X20_PAIR;
    431         else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
    432                  (CompactUnwindEncoding & 0xF1C) == 0)
    433           CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X21_X22_PAIR;
    434         else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
    435                  (CompactUnwindEncoding & 0xF18) == 0)
    436           CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X23_X24_PAIR;
    437         else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
    438                  (CompactUnwindEncoding & 0xF10) == 0)
    439           CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X25_X26_PAIR;
    440         else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
    441                  (CompactUnwindEncoding & 0xF00) == 0)
    442           CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X27_X28_PAIR;
    443         else {
    444           Reg1 = getDRegFromBReg(Reg1);
    445           Reg2 = getDRegFromBReg(Reg2);
    446 
    447           // D8/D9 pair   = 0x00000100,
    448           // D10/D11 pair = 0x00000200,
    449           // D12/D13 pair = 0x00000400,
    450           // D14/D15 pair = 0x00000800
    451           if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
    452               (CompactUnwindEncoding & 0xE00) == 0)
    453             CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D8_D9_PAIR;
    454           else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
    455                    (CompactUnwindEncoding & 0xC00) == 0)
    456             CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D10_D11_PAIR;
    457           else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
    458                    (CompactUnwindEncoding & 0x800) == 0)
    459             CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D12_D13_PAIR;
    460           else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
    461             CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D14_D15_PAIR;
    462           else
    463             // A pair was pushed which we cannot handle.
    464             return CU::UNWIND_AArch64_MODE_DWARF;
    465         }
    466 
    467         break;
    468       }
    469       }
    470     }
    471 
    472     if (!HasFP) {
    473       // With compact unwind info we can only represent stack adjustments of up
    474       // to 65520 bytes.
    475       if (StackSize > 65520)
    476         return CU::UNWIND_AArch64_MODE_DWARF;
    477 
    478       CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAMELESS;
    479       CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
    480     }
    481 
    482     return CompactUnwindEncoding;
    483   }
    484 };
    485 
    486 } // end anonymous namespace
    487 
    488 namespace {
    489 
    490 class ELFAArch64AsmBackend : public AArch64AsmBackend {
    491 public:
    492   uint8_t OSABI;
    493   bool IsLittleEndian;
    494 
    495   ELFAArch64AsmBackend(const Target &T, uint8_t OSABI, bool IsLittleEndian)
    496     : AArch64AsmBackend(T), OSABI(OSABI), IsLittleEndian(IsLittleEndian) {}
    497 
    498   MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
    499     return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian);
    500   }
    501 
    502   void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
    503                          const MCFixup &Fixup, const MCFragment *DF,
    504                          const MCValue &Target, uint64_t &Value,
    505                          bool &IsResolved) override;
    506 
    507   void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
    508                   uint64_t Value, bool IsPCRel) const override;
    509 };
    510 
    511 void ELFAArch64AsmBackend::processFixupValue(
    512     const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup,
    513     const MCFragment *DF, const MCValue &Target, uint64_t &Value,
    514     bool &IsResolved) {
    515   // The ADRP instruction adds some multiple of 0x1000 to the current PC &
    516   // ~0xfff. This means that the required offset to reach a symbol can vary by
    517   // up to one step depending on where the ADRP is in memory. For example:
    518   //
    519   //     ADRP x0, there
    520   //  there:
    521   //
    522   // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
    523   // we'll need that as an offset. At any other address "there" will be in the
    524   // same page as the ADRP and the instruction should encode 0x0. Assuming the
    525   // section isn't 0x1000-aligned, we therefore need to delegate this decision
    526   // to the linker -- a relocation!
    527   if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
    528     IsResolved = false;
    529 }
    530 
    531 void ELFAArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
    532                                       unsigned DataSize, uint64_t Value,
    533                                       bool IsPCRel) const {
    534   // store fixups in .eh_frame section in big endian order
    535   if (!IsLittleEndian && Fixup.getKind() == FK_Data_4) {
    536     const MCSection *Sec = Fixup.getValue()->FindAssociatedSection();
    537     const MCSectionELF *SecELF = static_cast<const MCSectionELF *>(Sec);
    538     if (SecELF->getSectionName() == ".eh_frame")
    539       Value = ByteSwap_32(unsigned(Value));
    540   }
    541   AArch64AsmBackend::applyFixup (Fixup, Data, DataSize, Value, IsPCRel);
    542 }
    543 }
    544 
    545 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
    546                                             const MCRegisterInfo &MRI,
    547                                             StringRef TT, StringRef CPU) {
    548   Triple TheTriple(TT);
    549 
    550   if (TheTriple.isOSDarwin())
    551     return new DarwinAArch64AsmBackend(T, MRI);
    552 
    553   assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target");
    554   return new ELFAArch64AsmBackend(T, TheTriple.getOS(), /*IsLittleEndian=*/true);
    555 }
    556 
    557 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
    558                                             const MCRegisterInfo &MRI,
    559                                             StringRef TT, StringRef CPU) {
    560   Triple TheTriple(TT);
    561 
    562   assert(TheTriple.isOSBinFormatELF() &&
    563          "Big endian is only supported for ELF targets!");
    564   return new ELFAArch64AsmBackend(T, TheTriple.getOS(),
    565                                   /*IsLittleEndian=*/false);
    566 }
    567