Home | History | Annotate | Download | only in Targets
      1 //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
     11 #define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
     12 
     13 #include "../RuntimeDyldMachO.h"
     14 #include "llvm/Support/Endian.h"
     15 
     16 #define DEBUG_TYPE "dyld"
     17 
     18 namespace llvm {
     19 
     20 class RuntimeDyldMachOAArch64
     21     : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
     22 public:
     23 
     24   typedef uint64_t TargetPtrT;
     25 
     26   RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
     27                           RuntimeDyld::SymbolResolver &Resolver)
     28       : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
     29 
     30   unsigned getMaxStubSize() override { return 8; }
     31 
     32   unsigned getStubAlignment() override { return 8; }
     33 
     34   /// Extract the addend encoded in the instruction / memory location.
     35   int64_t decodeAddend(const RelocationEntry &RE) const {
     36     const SectionEntry &Section = Sections[RE.SectionID];
     37     uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
     38     unsigned NumBytes = 1 << RE.Size;
     39     int64_t Addend = 0;
     40     // Verify that the relocation has the correct size and alignment.
     41     switch (RE.RelType) {
     42     default:
     43       llvm_unreachable("Unsupported relocation type!");
     44     case MachO::ARM64_RELOC_UNSIGNED:
     45       assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
     46       break;
     47     case MachO::ARM64_RELOC_BRANCH26:
     48     case MachO::ARM64_RELOC_PAGE21:
     49     case MachO::ARM64_RELOC_PAGEOFF12:
     50     case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
     51     case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
     52       assert(NumBytes == 4 && "Invalid relocation size.");
     53       assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
     54              "Instruction address is not aligned to 4 bytes.");
     55       break;
     56     }
     57 
     58     switch (RE.RelType) {
     59     default:
     60       llvm_unreachable("Unsupported relocation type!");
     61     case MachO::ARM64_RELOC_UNSIGNED:
     62       // This could be an unaligned memory location.
     63       if (NumBytes == 4)
     64         Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
     65       else
     66         Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
     67       break;
     68     case MachO::ARM64_RELOC_BRANCH26: {
     69       // Verify that the relocation points to the expected branch instruction.
     70       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
     71       assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
     72 
     73       // Get the 26 bit addend encoded in the branch instruction and sign-extend
     74       // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
     75       // (<< 2).
     76       Addend = (*p & 0x03FFFFFF) << 2;
     77       Addend = SignExtend64(Addend, 28);
     78       break;
     79     }
     80     case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
     81     case MachO::ARM64_RELOC_PAGE21: {
     82       // Verify that the relocation points to the expected adrp instruction.
     83       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
     84       assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
     85 
     86       // Get the 21 bit addend encoded in the adrp instruction and sign-extend
     87       // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
     88       // therefore implicit (<< 12).
     89       Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
     90       Addend = SignExtend64(Addend, 33);
     91       break;
     92     }
     93     case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
     94       // Verify that the relocation points to one of the expected load / store
     95       // instructions.
     96       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
     97       (void)p;
     98       assert((*p & 0x3B000000) == 0x39000000 &&
     99              "Only expected load / store instructions.");
    100     } // fall-through
    101     case MachO::ARM64_RELOC_PAGEOFF12: {
    102       // Verify that the relocation points to one of the expected load / store
    103       // or add / sub instructions.
    104       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
    105       assert((((*p & 0x3B000000) == 0x39000000) ||
    106               ((*p & 0x11C00000) == 0x11000000)   ) &&
    107              "Expected load / store  or add/sub instruction.");
    108 
    109       // Get the 12 bit addend encoded in the instruction.
    110       Addend = (*p & 0x003FFC00) >> 10;
    111 
    112       // Check which instruction we are decoding to obtain the implicit shift
    113       // factor of the instruction.
    114       int ImplicitShift = 0;
    115       if ((*p & 0x3B000000) == 0x39000000) { // << load / store
    116         // For load / store instructions the size is encoded in bits 31:30.
    117         ImplicitShift = ((*p >> 30) & 0x3);
    118         if (ImplicitShift == 0) {
    119           // Check if this a vector op to get the correct shift value.
    120           if ((*p & 0x04800000) == 0x04800000)
    121             ImplicitShift = 4;
    122         }
    123       }
    124       // Compensate for implicit shift.
    125       Addend <<= ImplicitShift;
    126       break;
    127     }
    128     }
    129     return Addend;
    130   }
    131 
    132   /// Extract the addend encoded in the instruction.
    133   void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
    134                     MachO::RelocationInfoType RelType, int64_t Addend) const {
    135     // Verify that the relocation has the correct alignment.
    136     switch (RelType) {
    137     default:
    138       llvm_unreachable("Unsupported relocation type!");
    139     case MachO::ARM64_RELOC_UNSIGNED:
    140       assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
    141       break;
    142     case MachO::ARM64_RELOC_BRANCH26:
    143     case MachO::ARM64_RELOC_PAGE21:
    144     case MachO::ARM64_RELOC_PAGEOFF12:
    145     case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
    146     case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
    147       assert(NumBytes == 4 && "Invalid relocation size.");
    148       assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
    149              "Instruction address is not aligned to 4 bytes.");
    150       break;
    151     }
    152 
    153     switch (RelType) {
    154     default:
    155       llvm_unreachable("Unsupported relocation type!");
    156     case MachO::ARM64_RELOC_UNSIGNED:
    157       // This could be an unaligned memory location.
    158       if (NumBytes == 4)
    159         *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
    160       else
    161         *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
    162       break;
    163     case MachO::ARM64_RELOC_BRANCH26: {
    164       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
    165       // Verify that the relocation points to the expected branch instruction.
    166       assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
    167 
    168       // Verify addend value.
    169       assert((Addend & 0x3) == 0 && "Branch target is not aligned");
    170       assert(isInt<28>(Addend) && "Branch target is out of range.");
    171 
    172       // Encode the addend as 26 bit immediate in the branch instruction.
    173       *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
    174       break;
    175     }
    176     case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
    177     case MachO::ARM64_RELOC_PAGE21: {
    178       // Verify that the relocation points to the expected adrp instruction.
    179       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
    180       assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
    181 
    182       // Check that the addend fits into 21 bits (+ 12 lower bits).
    183       assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
    184       assert(isInt<33>(Addend) && "Invalid page reloc value.");
    185 
    186       // Encode the addend into the instruction.
    187       uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
    188       uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
    189       *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
    190       break;
    191     }
    192     case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
    193       // Verify that the relocation points to one of the expected load / store
    194       // instructions.
    195       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
    196       assert((*p & 0x3B000000) == 0x39000000 &&
    197              "Only expected load / store instructions.");
    198       (void)p;
    199     } // fall-through
    200     case MachO::ARM64_RELOC_PAGEOFF12: {
    201       // Verify that the relocation points to one of the expected load / store
    202       // or add / sub instructions.
    203       auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
    204       assert((((*p & 0x3B000000) == 0x39000000) ||
    205               ((*p & 0x11C00000) == 0x11000000)   ) &&
    206              "Expected load / store  or add/sub instruction.");
    207 
    208       // Check which instruction we are decoding to obtain the implicit shift
    209       // factor of the instruction and verify alignment.
    210       int ImplicitShift = 0;
    211       if ((*p & 0x3B000000) == 0x39000000) { // << load / store
    212         // For load / store instructions the size is encoded in bits 31:30.
    213         ImplicitShift = ((*p >> 30) & 0x3);
    214         switch (ImplicitShift) {
    215         case 0:
    216           // Check if this a vector op to get the correct shift value.
    217           if ((*p & 0x04800000) == 0x04800000) {
    218             ImplicitShift = 4;
    219             assert(((Addend & 0xF) == 0) &&
    220                    "128-bit LDR/STR not 16-byte aligned.");
    221           }
    222           break;
    223         case 1:
    224           assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
    225           break;
    226         case 2:
    227           assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
    228           break;
    229         case 3:
    230           assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
    231           break;
    232         }
    233       }
    234       // Compensate for implicit shift.
    235       Addend >>= ImplicitShift;
    236       assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
    237 
    238       // Encode the addend into the instruction.
    239       *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
    240       break;
    241     }
    242     }
    243   }
    244 
    245   Expected<relocation_iterator>
    246   processRelocationRef(unsigned SectionID, relocation_iterator RelI,
    247                        const ObjectFile &BaseObjT,
    248                        ObjSectionToIDMap &ObjSectionToID,
    249                        StubMap &Stubs) override {
    250     const MachOObjectFile &Obj =
    251       static_cast<const MachOObjectFile &>(BaseObjT);
    252     MachO::any_relocation_info RelInfo =
    253         Obj.getRelocation(RelI->getRawDataRefImpl());
    254 
    255     if (Obj.isRelocationScattered(RelInfo))
    256       return make_error<RuntimeDyldError>("Scattered relocations not supported "
    257                                           "for MachO AArch64");
    258 
    259     // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
    260     // addend for the following relocation. If found: (1) store the associated
    261     // addend, (2) consume the next relocation, and (3) use the stored addend to
    262     // override the addend.
    263     int64_t ExplicitAddend = 0;
    264     if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
    265       assert(!Obj.getPlainRelocationExternal(RelInfo));
    266       assert(!Obj.getAnyRelocationPCRel(RelInfo));
    267       assert(Obj.getAnyRelocationLength(RelInfo) == 2);
    268       int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
    269       // Sign-extend the 24-bit to 64-bit.
    270       ExplicitAddend = SignExtend64(RawAddend, 24);
    271       ++RelI;
    272       RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
    273     }
    274 
    275     if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_SUBTRACTOR)
    276       return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
    277 
    278     RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
    279     RE.Addend = decodeAddend(RE);
    280 
    281     assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
    282       "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
    283     if (ExplicitAddend)
    284       RE.Addend = ExplicitAddend;
    285 
    286     RelocationValueRef Value;
    287     if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
    288       Value = *ValueOrErr;
    289     else
    290       return ValueOrErr.takeError();
    291 
    292     bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
    293     if (!IsExtern && RE.IsPCRel)
    294       makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
    295 
    296     RE.Addend = Value.Offset;
    297 
    298     if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
    299         RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
    300       processGOTRelocation(RE, Value, Stubs);
    301     else {
    302       if (Value.SymbolName)
    303         addRelocationForSymbol(RE, Value.SymbolName);
    304       else
    305         addRelocationForSection(RE, Value.SectionID);
    306     }
    307 
    308     return ++RelI;
    309   }
    310 
    311   void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
    312     DEBUG(dumpRelocationToResolve(RE, Value));
    313 
    314     const SectionEntry &Section = Sections[RE.SectionID];
    315     uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
    316     MachO::RelocationInfoType RelType =
    317       static_cast<MachO::RelocationInfoType>(RE.RelType);
    318 
    319     switch (RelType) {
    320     default:
    321       llvm_unreachable("Invalid relocation type!");
    322     case MachO::ARM64_RELOC_UNSIGNED: {
    323       assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
    324       // Mask in the target value a byte at a time (we don't have an alignment
    325       // guarantee for the target address, so this is safest).
    326       if (RE.Size < 2)
    327         llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
    328 
    329       encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
    330       break;
    331     }
    332     case MachO::ARM64_RELOC_BRANCH26: {
    333       assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
    334       // Check if branch is in range.
    335       uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
    336       int64_t PCRelVal = Value - FinalAddress + RE.Addend;
    337       encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
    338       break;
    339     }
    340     case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
    341     case MachO::ARM64_RELOC_PAGE21: {
    342       assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
    343       // Adjust for PC-relative relocation and offset.
    344       uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
    345       int64_t PCRelVal =
    346         ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
    347       encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
    348       break;
    349     }
    350     case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
    351     case MachO::ARM64_RELOC_PAGEOFF12: {
    352       assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
    353       // Add the offset from the symbol.
    354       Value += RE.Addend;
    355       // Mask out the page address and only use the lower 12 bits.
    356       Value &= 0xFFF;
    357       encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
    358       break;
    359     }
    360     case MachO::ARM64_RELOC_SUBTRACTOR: {
    361       uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
    362       uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
    363       assert((Value == SectionABase || Value == SectionBBase) &&
    364              "Unexpected SUBTRACTOR relocation value.");
    365       Value = SectionABase - SectionBBase + RE.Addend;
    366       writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
    367       break;
    368     }
    369     case MachO::ARM64_RELOC_POINTER_TO_GOT:
    370     case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
    371     case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
    372       llvm_unreachable("Relocation type not yet implemented!");
    373     case MachO::ARM64_RELOC_ADDEND:
    374       llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
    375                        "processRelocationRef!");
    376     }
    377   }
    378 
    379   Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
    380                        const SectionRef &Section) {
    381     return Error::success();
    382   }
    383 
    384 private:
    385   void processGOTRelocation(const RelocationEntry &RE,
    386                             RelocationValueRef &Value, StubMap &Stubs) {
    387     assert(RE.Size == 2);
    388     SectionEntry &Section = Sections[RE.SectionID];
    389     StubMap::const_iterator i = Stubs.find(Value);
    390     int64_t Offset;
    391     if (i != Stubs.end())
    392       Offset = static_cast<int64_t>(i->second);
    393     else {
    394       // FIXME: There must be a better way to do this then to check and fix the
    395       // alignment every time!!!
    396       uintptr_t BaseAddress = uintptr_t(Section.getAddress());
    397       uintptr_t StubAlignment = getStubAlignment();
    398       uintptr_t StubAddress =
    399           (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
    400           -StubAlignment;
    401       unsigned StubOffset = StubAddress - BaseAddress;
    402       Stubs[Value] = StubOffset;
    403       assert(((StubAddress % getStubAlignment()) == 0) &&
    404              "GOT entry not aligned");
    405       RelocationEntry GOTRE(RE.SectionID, StubOffset,
    406                             MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
    407                             /*IsPCRel=*/false, /*Size=*/3);
    408       if (Value.SymbolName)
    409         addRelocationForSymbol(GOTRE, Value.SymbolName);
    410       else
    411         addRelocationForSection(GOTRE, Value.SectionID);
    412       Section.advanceStubOffset(getMaxStubSize());
    413       Offset = static_cast<int64_t>(StubOffset);
    414     }
    415     RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
    416                              RE.IsPCRel, RE.Size);
    417     addRelocationForSection(TargetRE, RE.SectionID);
    418   }
    419 
    420   Expected<relocation_iterator>
    421   processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
    422                             const ObjectFile &BaseObjT,
    423                             ObjSectionToIDMap &ObjSectionToID) {
    424     const MachOObjectFile &Obj =
    425         static_cast<const MachOObjectFile&>(BaseObjT);
    426     MachO::any_relocation_info RE =
    427         Obj.getRelocation(RelI->getRawDataRefImpl());
    428 
    429     unsigned Size = Obj.getAnyRelocationLength(RE);
    430     uint64_t Offset = RelI->getOffset();
    431     uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
    432     unsigned NumBytes = 1 << Size;
    433 
    434     Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
    435     if (!SubtrahendNameOrErr)
    436       return SubtrahendNameOrErr.takeError();
    437     auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
    438     unsigned SectionBID = SubtrahendI->second.getSectionID();
    439     uint64_t SectionBOffset = SubtrahendI->second.getOffset();
    440     int64_t Addend =
    441       SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
    442 
    443     ++RelI;
    444     Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
    445     if (!MinuendNameOrErr)
    446       return MinuendNameOrErr.takeError();
    447     auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
    448     unsigned SectionAID = MinuendI->second.getSectionID();
    449     uint64_t SectionAOffset = MinuendI->second.getOffset();
    450 
    451     RelocationEntry R(SectionID, Offset, MachO::ARM64_RELOC_SUBTRACTOR, (uint64_t)Addend,
    452                       SectionAID, SectionAOffset, SectionBID, SectionBOffset,
    453                       false, Size);
    454 
    455     addRelocationForSection(R, SectionAID);
    456 
    457     return ++RelI;
    458   }
    459 
    460 };
    461 }
    462 
    463 #undef DEBUG_TYPE
    464 
    465 #endif
    466