Home | History | Annotate | Download | only in arm64
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "linker/relative_patcher_test.h"
     18 #include "linker/arm64/relative_patcher_arm64.h"
     19 #include "oat_quick_method_header.h"
     20 
     21 namespace art {
     22 namespace linker {
     23 
     24 class Arm64RelativePatcherTest : public RelativePatcherTest {
     25  public:
     26   explicit Arm64RelativePatcherTest(const std::string& variant)
     27       : RelativePatcherTest(kArm64, variant) { }
     28 
     29  protected:
     30   static const uint8_t kCallRawCode[];
     31   static const ArrayRef<const uint8_t> kCallCode;
     32   static const uint8_t kNopRawCode[];
     33   static const ArrayRef<const uint8_t> kNopCode;
     34 
     35   // All branches can be created from kBlPlus0 or kBPlus0 by adding the low 26 bits.
     36   static constexpr uint32_t kBlPlus0 = 0x94000000u;
     37   static constexpr uint32_t kBPlus0 = 0x14000000u;
     38 
     39   // Special BL values.
     40   static constexpr uint32_t kBlPlusMax = 0x95ffffffu;
     41   static constexpr uint32_t kBlMinusMax = 0x96000000u;
     42 
     43   // LDR immediate, 32-bit.
     44   static constexpr uint32_t kLdrWInsn = 0xb9400000u;
     45 
     46   // ADD/ADDS/SUB/SUBS immediate, 64-bit.
     47   static constexpr uint32_t kAddXInsn = 0x91000000u;
     48   static constexpr uint32_t kAddsXInsn = 0xb1000000u;
     49   static constexpr uint32_t kSubXInsn = 0xd1000000u;
     50   static constexpr uint32_t kSubsXInsn = 0xf1000000u;
     51 
     52   // LDUR x2, [sp, #4], i.e. unaligned load crossing 64-bit boundary (assuming aligned sp).
     53   static constexpr uint32_t kLdurInsn = 0xf840405fu;
     54 
     55   // LDR w12, <label> and LDR x12, <label>. Bits 5-23 contain label displacement in 4-byte units.
     56   static constexpr uint32_t kLdrWPcRelInsn = 0x1800000cu;
     57   static constexpr uint32_t kLdrXPcRelInsn = 0x5800000cu;
     58 
     59   // LDR w13, [SP, #<pimm>] and LDR x13, [SP, #<pimm>]. Bits 10-21 contain displacement from SP
     60   // in units of 4-bytes (for 32-bit load) or 8-bytes (for 64-bit load).
     61   static constexpr uint32_t kLdrWSpRelInsn = 0xb94003edu;
     62   static constexpr uint32_t kLdrXSpRelInsn = 0xf94003edu;
     63 
     64   uint32_t Create2MethodsWithGap(const ArrayRef<const uint8_t>& method1_code,
     65                                  const ArrayRef<const LinkerPatch>& method1_patches,
     66                                  const ArrayRef<const uint8_t>& last_method_code,
     67                                  const ArrayRef<const LinkerPatch>& last_method_patches,
     68                                  uint32_t distance_without_thunks) {
     69     CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
     70     const uint32_t method1_offset =
     71         CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
     72     AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
     73     const uint32_t gap_start =
     74         CompiledCode::AlignCode(method1_offset + method1_code.size(), kArm64);
     75 
     76     // We want to put the method3 at a very precise offset.
     77     const uint32_t last_method_offset = method1_offset + distance_without_thunks;
     78     const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
     79     CHECK_ALIGNED(gap_end, kArm64Alignment);
     80 
     81     // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB).
     82     // (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
     83     // offsets by this test.)
     84     uint32_t method_idx = 2u;
     85     constexpr uint32_t kSmallChunkSize = 2 * MB;
     86     std::vector<uint8_t> gap_code;
     87     size_t gap_size = gap_end - gap_start;
     88     for (; gap_size >= 2u * kSmallChunkSize; gap_size -= kSmallChunkSize) {
     89       uint32_t chunk_code_size = kSmallChunkSize - sizeof(OatQuickMethodHeader);
     90       gap_code.resize(chunk_code_size, 0u);
     91       AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
     92                         ArrayRef<const LinkerPatch>());
     93       method_idx += 1u;
     94     }
     95     uint32_t chunk_code_size = gap_size - sizeof(OatQuickMethodHeader);
     96     gap_code.resize(chunk_code_size, 0u);
     97     AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
     98                       ArrayRef<const LinkerPatch>());
     99     method_idx += 1u;
    100 
    101     // Add the last method and link
    102     AddCompiledMethod(MethodRef(method_idx), last_method_code, last_method_patches);
    103     Link();
    104 
    105     // Check assumptions.
    106     CHECK_EQ(GetMethodOffset(1), method1_offset);
    107     auto last_result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
    108     CHECK(last_result.first);
    109     // There may be a thunk before method2.
    110     if (last_result.second != last_method_offset) {
    111       // Thunk present. Check that there's only one.
    112       uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kArm64);
    113       CHECK_EQ(last_result.second, last_method_offset + aligned_thunk_size);
    114     }
    115     return method_idx;
    116   }
    117 
    118   uint32_t GetMethodOffset(uint32_t method_idx) {
    119     auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
    120     CHECK(result.first);
    121     CHECK_ALIGNED(result.second, 4u);
    122     return result.second;
    123   }
    124 
    125   uint32_t ThunkSize() {
    126     return static_cast<Arm64RelativePatcher*>(patcher_.get())->thunk_code_.size();
    127   }
    128 
    129   bool CheckThunk(uint32_t thunk_offset) {
    130     Arm64RelativePatcher* patcher = static_cast<Arm64RelativePatcher*>(patcher_.get());
    131     ArrayRef<const uint8_t> expected_code(patcher->thunk_code_);
    132     if (output_.size() < thunk_offset + expected_code.size()) {
    133       LOG(ERROR) << "output_.size() == " << output_.size() << " < "
    134           << "thunk_offset + expected_code.size() == " << (thunk_offset + expected_code.size());
    135       return false;
    136     }
    137     ArrayRef<const uint8_t> linked_code(&output_[thunk_offset], expected_code.size());
    138     if (linked_code == expected_code) {
    139       return true;
    140     }
    141     // Log failure info.
    142     DumpDiff(expected_code, linked_code);
    143     return false;
    144   }
    145 
    146   std::vector<uint8_t> GenNopsAndBl(size_t num_nops, uint32_t bl) {
    147     std::vector<uint8_t> result;
    148     result.reserve(num_nops * 4u + 4u);
    149     for (size_t i = 0; i != num_nops; ++i) {
    150       result.insert(result.end(), kNopCode.begin(), kNopCode.end());
    151     }
    152     result.push_back(static_cast<uint8_t>(bl));
    153     result.push_back(static_cast<uint8_t>(bl >> 8));
    154     result.push_back(static_cast<uint8_t>(bl >> 16));
    155     result.push_back(static_cast<uint8_t>(bl >> 24));
    156     return result;
    157   }
    158 
    159   std::vector<uint8_t> GenNopsAndAdrpAndUse(size_t num_nops,
    160                                             uint32_t method_offset,
    161                                             uint32_t target_offset,
    162                                             uint32_t use_insn) {
    163     std::vector<uint8_t> result;
    164     result.reserve(num_nops * 4u + 8u);
    165     for (size_t i = 0; i != num_nops; ++i) {
    166       result.insert(result.end(), kNopCode.begin(), kNopCode.end());
    167     }
    168     CHECK_ALIGNED(method_offset, 4u);
    169     CHECK_ALIGNED(target_offset, 4u);
    170     uint32_t adrp_offset = method_offset + num_nops * 4u;
    171     uint32_t disp = target_offset - (adrp_offset & ~0xfffu);
    172     if (use_insn == kLdrWInsn) {
    173       DCHECK_ALIGNED(disp, 1u << 2);
    174       use_insn |= 1 |                         // LDR x1, [x0, #(imm12 << 2)]
    175           ((disp & 0xfffu) << (10 - 2));      // imm12 = ((disp & 0xfffu) >> 2) is at bit 10.
    176     } else if (use_insn == kAddXInsn) {
    177       use_insn |= 1 |                         // ADD x1, x0, #imm
    178           (disp & 0xfffu) << 10;              // imm12 = (disp & 0xfffu) is at bit 10.
    179     } else {
    180       LOG(FATAL) << "Unexpected instruction: 0x" << std::hex << use_insn;
    181     }
    182     uint32_t adrp = 0x90000000 |              // ADRP x0, +SignExtend(immhi:immlo:Zeros(12), 64)
    183         ((disp & 0x3000u) << (29 - 12)) |     // immlo = ((disp & 0x3000u) >> 12) is at bit 29,
    184         ((disp & 0xffffc000) >> (14 - 5)) |   // immhi = (disp >> 14) is at bit 5,
    185         // We take the sign bit from the disp, limiting disp to +- 2GiB.
    186         ((disp & 0x80000000) >> (31 - 23));   // sign bit in immhi is at bit 23.
    187     result.push_back(static_cast<uint8_t>(adrp));
    188     result.push_back(static_cast<uint8_t>(adrp >> 8));
    189     result.push_back(static_cast<uint8_t>(adrp >> 16));
    190     result.push_back(static_cast<uint8_t>(adrp >> 24));
    191     result.push_back(static_cast<uint8_t>(use_insn));
    192     result.push_back(static_cast<uint8_t>(use_insn >> 8));
    193     result.push_back(static_cast<uint8_t>(use_insn >> 16));
    194     result.push_back(static_cast<uint8_t>(use_insn >> 24));
    195     return result;
    196   }
    197 
    198   std::vector<uint8_t> GenNopsAndAdrpLdr(size_t num_nops,
    199                                          uint32_t method_offset,
    200                                          uint32_t target_offset) {
    201     return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kLdrWInsn);
    202   }
    203 
    204   void TestNopsAdrpLdr(size_t num_nops, uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
    205     dex_cache_arrays_begin_ = dex_cache_arrays_begin;
    206     auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u);  // Unpatched.
    207     LinkerPatch patches[] = {
    208         LinkerPatch::DexCacheArrayPatch(num_nops * 4u     , nullptr, num_nops * 4u, element_offset),
    209         LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, element_offset),
    210     };
    211     AddCompiledMethod(MethodRef(1u),
    212                       ArrayRef<const uint8_t>(code),
    213                       ArrayRef<const LinkerPatch>(patches));
    214     Link();
    215 
    216     uint32_t method1_offset = GetMethodOffset(1u);
    217     uint32_t target_offset = dex_cache_arrays_begin_ + element_offset;
    218     auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset);
    219     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    220   }
    221 
    222   std::vector<uint8_t> GenNopsAndAdrpAdd(size_t num_nops,
    223                                          uint32_t method_offset,
    224                                          uint32_t target_offset) {
    225     return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kAddXInsn);
    226   }
    227 
    228   void TestNopsAdrpAdd(size_t num_nops, uint32_t string_offset) {
    229     constexpr uint32_t kStringIndex = 1u;
    230     string_index_to_offset_map_.Put(kStringIndex, string_offset);
    231     auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u);  // Unpatched.
    232     LinkerPatch patches[] = {
    233         LinkerPatch::RelativeStringPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
    234         LinkerPatch::RelativeStringPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, kStringIndex),
    235     };
    236     AddCompiledMethod(MethodRef(1u),
    237                       ArrayRef<const uint8_t>(code),
    238                       ArrayRef<const LinkerPatch>(patches));
    239     Link();
    240 
    241     uint32_t method1_offset = GetMethodOffset(1u);
    242     auto expected_code = GenNopsAndAdrpAdd(num_nops, method1_offset, string_offset);
    243     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    244   }
    245 
    246   void InsertInsn(std::vector<uint8_t>* code, size_t pos, uint32_t insn) {
    247     CHECK_LE(pos, code->size());
    248     const uint8_t insn_code[] = {
    249         static_cast<uint8_t>(insn), static_cast<uint8_t>(insn >> 8),
    250         static_cast<uint8_t>(insn >> 16), static_cast<uint8_t>(insn >> 24),
    251     };
    252     static_assert(sizeof(insn_code) == 4u, "Invalid sizeof(insn_code).");
    253     code->insert(code->begin() + pos, insn_code, insn_code + sizeof(insn_code));
    254   }
    255 
    256   void PrepareNopsAdrpInsn2Ldr(size_t num_nops,
    257                                uint32_t insn2,
    258                                uint32_t dex_cache_arrays_begin,
    259                                uint32_t element_offset) {
    260     dex_cache_arrays_begin_ = dex_cache_arrays_begin;
    261     auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u);  // Unpatched.
    262     InsertInsn(&code, num_nops * 4u + 4u, insn2);
    263     LinkerPatch patches[] = {
    264         LinkerPatch::DexCacheArrayPatch(num_nops * 4u     , nullptr, num_nops * 4u, element_offset),
    265         LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, element_offset),
    266     };
    267     AddCompiledMethod(MethodRef(1u),
    268                       ArrayRef<const uint8_t>(code),
    269                       ArrayRef<const LinkerPatch>(patches));
    270     Link();
    271   }
    272 
    273   void PrepareNopsAdrpInsn2Add(size_t num_nops, uint32_t insn2, uint32_t string_offset) {
    274     constexpr uint32_t kStringIndex = 1u;
    275     string_index_to_offset_map_.Put(kStringIndex, string_offset);
    276     auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u);  // Unpatched.
    277     InsertInsn(&code, num_nops * 4u + 4u, insn2);
    278     LinkerPatch patches[] = {
    279         LinkerPatch::RelativeStringPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
    280         LinkerPatch::RelativeStringPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, kStringIndex),
    281     };
    282     AddCompiledMethod(MethodRef(1u),
    283                       ArrayRef<const uint8_t>(code),
    284                       ArrayRef<const LinkerPatch>(patches));
    285     Link();
    286   }
    287 
    288   void TestNopsAdrpInsn2AndUse(size_t num_nops,
    289                                uint32_t insn2,
    290                                uint32_t target_offset,
    291                                uint32_t use_insn) {
    292     uint32_t method1_offset = GetMethodOffset(1u);
    293     auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn);
    294     InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
    295     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    296   }
    297 
    298   void TestNopsAdrpInsn2AndUseHasThunk(size_t num_nops,
    299                                        uint32_t insn2,
    300                                        uint32_t target_offset,
    301                                        uint32_t use_insn) {
    302     uint32_t method1_offset = GetMethodOffset(1u);
    303     CHECK(!compiled_method_refs_.empty());
    304     CHECK_EQ(compiled_method_refs_[0].dex_method_index, 1u);
    305     CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
    306     uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
    307     uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
    308     uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
    309     CHECK_ALIGNED(b_diff, 4u);
    310     ASSERT_LT(b_diff, 128 * MB);
    311     uint32_t b_out = kBPlus0 + ((b_diff >> 2) & 0x03ffffffu);
    312     uint32_t b_in = kBPlus0 + ((-b_diff >> 2) & 0x03ffffffu);
    313 
    314     auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn);
    315     InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
    316     // Replace adrp with bl.
    317     expected_code.erase(expected_code.begin() + num_nops * 4u,
    318                         expected_code.begin() + num_nops * 4u + 4u);
    319     InsertInsn(&expected_code, num_nops * 4u, b_out);
    320     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    321 
    322     auto expected_thunk_code = GenNopsAndAdrpLdr(0u, thunk_offset, target_offset);
    323     ASSERT_EQ(expected_thunk_code.size(), 8u);
    324     expected_thunk_code.erase(expected_thunk_code.begin() + 4u, expected_thunk_code.begin() + 8u);
    325     InsertInsn(&expected_thunk_code, 4u, b_in);
    326     ASSERT_EQ(expected_thunk_code.size(), 8u);
    327 
    328     uint32_t thunk_size = ThunkSize();
    329     ASSERT_EQ(thunk_offset + thunk_size, output_.size());
    330     ASSERT_EQ(thunk_size, expected_thunk_code.size());
    331     ArrayRef<const uint8_t> thunk_code(&output_[thunk_offset], thunk_size);
    332     if (ArrayRef<const uint8_t>(expected_thunk_code) != thunk_code) {
    333       DumpDiff(ArrayRef<const uint8_t>(expected_thunk_code), thunk_code);
    334       FAIL();
    335     }
    336   }
    337 
    338   void TestAdrpInsn2Ldr(uint32_t insn2,
    339                         uint32_t adrp_offset,
    340                         bool has_thunk,
    341                         uint32_t dex_cache_arrays_begin,
    342                         uint32_t element_offset) {
    343     uint32_t method1_offset =
    344         CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
    345     ASSERT_LT(method1_offset, adrp_offset);
    346     CHECK_ALIGNED(adrp_offset, 4u);
    347     uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
    348     PrepareNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset);
    349     uint32_t target_offset = dex_cache_arrays_begin_ + element_offset;
    350     if (has_thunk) {
    351       TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, target_offset, kLdrWInsn);
    352     } else {
    353       TestNopsAdrpInsn2AndUse(num_nops, insn2, target_offset, kLdrWInsn);
    354     }
    355     ASSERT_EQ(method1_offset, GetMethodOffset(1u));  // If this fails, num_nops is wrong.
    356   }
    357 
    358   void TestAdrpLdurLdr(uint32_t adrp_offset,
    359                        bool has_thunk,
    360                        uint32_t dex_cache_arrays_begin,
    361                        uint32_t element_offset) {
    362     TestAdrpInsn2Ldr(kLdurInsn, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
    363   }
    364 
    365   void TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn,
    366                            int32_t pcrel_disp,
    367                            uint32_t adrp_offset,
    368                            bool has_thunk,
    369                            uint32_t dex_cache_arrays_begin,
    370                            uint32_t element_offset) {
    371     ASSERT_LT(pcrel_disp, 0x100000);
    372     ASSERT_GE(pcrel_disp, -0x100000);
    373     ASSERT_EQ(pcrel_disp & 0x3, 0);
    374     uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5);
    375     TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
    376   }
    377 
    378   void TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn,
    379                            uint32_t sprel_disp_in_load_units,
    380                            uint32_t adrp_offset,
    381                            bool has_thunk,
    382                            uint32_t dex_cache_arrays_begin,
    383                            uint32_t element_offset) {
    384     ASSERT_LT(sprel_disp_in_load_units, 0x1000u);
    385     uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10);
    386     TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
    387   }
    388 
    389   void TestAdrpInsn2Add(uint32_t insn2,
    390                         uint32_t adrp_offset,
    391                         bool has_thunk,
    392                         uint32_t string_offset) {
    393     uint32_t method1_offset =
    394         CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
    395     ASSERT_LT(method1_offset, adrp_offset);
    396     CHECK_ALIGNED(adrp_offset, 4u);
    397     uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
    398     PrepareNopsAdrpInsn2Add(num_nops, insn2, string_offset);
    399     if (has_thunk) {
    400       TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, string_offset, kAddXInsn);
    401     } else {
    402       TestNopsAdrpInsn2AndUse(num_nops, insn2, string_offset, kAddXInsn);
    403     }
    404     ASSERT_EQ(method1_offset, GetMethodOffset(1u));  // If this fails, num_nops is wrong.
    405   }
    406 
    407   void TestAdrpLdurAdd(uint32_t adrp_offset, bool has_thunk, uint32_t string_offset) {
    408     TestAdrpInsn2Add(kLdurInsn, adrp_offset, has_thunk, string_offset);
    409   }
    410 
    411   void TestAdrpLdrPcRelAdd(uint32_t pcrel_ldr_insn,
    412                            int32_t pcrel_disp,
    413                            uint32_t adrp_offset,
    414                            bool has_thunk,
    415                            uint32_t string_offset) {
    416     ASSERT_LT(pcrel_disp, 0x100000);
    417     ASSERT_GE(pcrel_disp, -0x100000);
    418     ASSERT_EQ(pcrel_disp & 0x3, 0);
    419     uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5);
    420     TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset);
    421   }
    422 
    423   void TestAdrpLdrSpRelAdd(uint32_t sprel_ldr_insn,
    424                            uint32_t sprel_disp_in_load_units,
    425                            uint32_t adrp_offset,
    426                            bool has_thunk,
    427                            uint32_t string_offset) {
    428     ASSERT_LT(sprel_disp_in_load_units, 0x1000u);
    429     uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10);
    430     TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset);
    431   }
    432 };
    433 
    434 const uint8_t Arm64RelativePatcherTest::kCallRawCode[] = {
    435     0x00, 0x00, 0x00, 0x94
    436 };
    437 
    438 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kCallCode(kCallRawCode);
    439 
    440 const uint8_t Arm64RelativePatcherTest::kNopRawCode[] = {
    441     0x1f, 0x20, 0x03, 0xd5
    442 };
    443 
    444 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kNopCode(kNopRawCode);
    445 
    446 class Arm64RelativePatcherTestDefault : public Arm64RelativePatcherTest {
    447  public:
    448   Arm64RelativePatcherTestDefault() : Arm64RelativePatcherTest("default") { }
    449 };
    450 
    451 class Arm64RelativePatcherTestDenver64 : public Arm64RelativePatcherTest {
    452  public:
    453   Arm64RelativePatcherTestDenver64() : Arm64RelativePatcherTest("denver64") { }
    454 };
    455 
    456 TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
    457   LinkerPatch patches[] = {
    458       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
    459   };
    460   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
    461   Link();
    462 
    463   static const uint8_t expected_code[] = {
    464       0x00, 0x00, 0x00, 0x94
    465   };
    466   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    467 }
    468 
    469 TEST_F(Arm64RelativePatcherTestDefault, CallOther) {
    470   LinkerPatch method1_patches[] = {
    471       LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
    472   };
    473   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
    474   LinkerPatch method2_patches[] = {
    475       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
    476   };
    477   AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
    478   Link();
    479 
    480   uint32_t method1_offset = GetMethodOffset(1u);
    481   uint32_t method2_offset = GetMethodOffset(2u);
    482   uint32_t diff_after = method2_offset - method1_offset;
    483   CHECK_ALIGNED(diff_after, 4u);
    484   ASSERT_LT(diff_after >> 2, 1u << 8);  // Simple encoding, (diff_after >> 2) fits into 8 bits.
    485   static const uint8_t method1_expected_code[] = {
    486       static_cast<uint8_t>(diff_after >> 2), 0x00, 0x00, 0x94
    487   };
    488   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code)));
    489   uint32_t diff_before = method1_offset - method2_offset;
    490   CHECK_ALIGNED(diff_before, 4u);
    491   ASSERT_GE(diff_before, -1u << 27);
    492   auto method2_expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff_before >> 2) & 0x03ffffffu));
    493   EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code)));
    494 }
    495 
    496 TEST_F(Arm64RelativePatcherTestDefault, CallTrampoline) {
    497   LinkerPatch patches[] = {
    498       LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
    499   };
    500   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
    501   Link();
    502 
    503   uint32_t method1_offset = GetMethodOffset(1u);
    504   uint32_t diff = kTrampolineOffset - method1_offset;
    505   ASSERT_EQ(diff & 1u, 0u);
    506   ASSERT_GE(diff, -1u << 9);  // Simple encoding, -256 <= (diff >> 1) < 0 (checked as unsigned).
    507   auto expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff >> 2) & 0x03ffffffu));
    508   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    509 }
    510 
    511 TEST_F(Arm64RelativePatcherTestDefault, CallTrampolineTooFar) {
    512   constexpr uint32_t missing_method_index = 1024u;
    513   auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
    514   constexpr uint32_t bl_offset_in_last_method = 1u * 4u;  // After NOPs.
    515   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
    516   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
    517   LinkerPatch last_method_patches[] = {
    518       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, missing_method_index),
    519   };
    520 
    521   constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
    522   uint32_t last_method_idx = Create2MethodsWithGap(
    523       kNopCode, ArrayRef<const LinkerPatch>(), last_method_code,
    524       ArrayRef<const LinkerPatch>(last_method_patches),
    525       just_over_max_negative_disp - bl_offset_in_last_method);
    526   uint32_t method1_offset = GetMethodOffset(1u);
    527   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    528   ASSERT_EQ(method1_offset,
    529             last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
    530   ASSERT_FALSE(method_offset_map_.FindMethodOffset(MethodRef(missing_method_index)).first);
    531 
    532   // Check linked code.
    533   uint32_t thunk_offset =
    534       CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
    535   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
    536   CHECK_ALIGNED(diff, 4u);
    537   ASSERT_LT(diff, 128 * MB);
    538   auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
    539   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
    540                                 ArrayRef<const uint8_t>(expected_code)));
    541   EXPECT_TRUE(CheckThunk(thunk_offset));
    542 }
    543 
    544 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarAfter) {
    545   auto method1_raw_code = GenNopsAndBl(1u, kBlPlus0);
    546   constexpr uint32_t bl_offset_in_method1 = 1u * 4u;  // After NOPs.
    547   ArrayRef<const uint8_t> method1_code(method1_raw_code);
    548   ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
    549   uint32_t expected_last_method_idx = 65;  // Based on 2MiB chunks in Create2MethodsWithGap().
    550   LinkerPatch method1_patches[] = {
    551       LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
    552   };
    553 
    554   constexpr uint32_t max_positive_disp = 128 * MB - 4u;
    555   uint32_t last_method_idx = Create2MethodsWithGap(method1_code,
    556                                                    ArrayRef<const LinkerPatch>(method1_patches),
    557                                                    kNopCode,
    558                                                    ArrayRef<const LinkerPatch>(),
    559                                                    bl_offset_in_method1 + max_positive_disp);
    560   ASSERT_EQ(expected_last_method_idx, last_method_idx);
    561 
    562   uint32_t method1_offset = GetMethodOffset(1u);
    563   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    564   ASSERT_EQ(method1_offset + bl_offset_in_method1 + max_positive_disp, last_method_offset);
    565 
    566   // Check linked code.
    567   auto expected_code = GenNopsAndBl(1u, kBlPlusMax);
    568   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    569 }
    570 
    571 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarBefore) {
    572   auto last_method_raw_code = GenNopsAndBl(0u, kBlPlus0);
    573   constexpr uint32_t bl_offset_in_last_method = 0u * 4u;  // After NOPs.
    574   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
    575   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
    576   LinkerPatch last_method_patches[] = {
    577       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
    578   };
    579 
    580   constexpr uint32_t max_negative_disp = 128 * MB;
    581   uint32_t last_method_idx = Create2MethodsWithGap(kNopCode,
    582                                                    ArrayRef<const LinkerPatch>(),
    583                                                    last_method_code,
    584                                                    ArrayRef<const LinkerPatch>(last_method_patches),
    585                                                    max_negative_disp - bl_offset_in_last_method);
    586   uint32_t method1_offset = GetMethodOffset(1u);
    587   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    588   ASSERT_EQ(method1_offset, last_method_offset + bl_offset_in_last_method - max_negative_disp);
    589 
    590   // Check linked code.
    591   auto expected_code = GenNopsAndBl(0u, kBlMinusMax);
    592   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
    593                                 ArrayRef<const uint8_t>(expected_code)));
    594 }
    595 
    596 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) {
    597   auto method1_raw_code = GenNopsAndBl(0u, kBlPlus0);
    598   constexpr uint32_t bl_offset_in_method1 = 0u * 4u;  // After NOPs.
    599   ArrayRef<const uint8_t> method1_code(method1_raw_code);
    600   ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
    601   uint32_t expected_last_method_idx = 65;  // Based on 2MiB chunks in Create2MethodsWithGap().
    602   LinkerPatch method1_patches[] = {
    603       LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
    604   };
    605 
    606   constexpr uint32_t just_over_max_positive_disp = 128 * MB;
    607   uint32_t last_method_idx = Create2MethodsWithGap(
    608       method1_code,
    609       ArrayRef<const LinkerPatch>(method1_patches),
    610       kNopCode,
    611       ArrayRef<const LinkerPatch>(),
    612       bl_offset_in_method1 + just_over_max_positive_disp);
    613   ASSERT_EQ(expected_last_method_idx, last_method_idx);
    614 
    615   uint32_t method1_offset = GetMethodOffset(1u);
    616   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    617   uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
    618   ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_header_offset));
    619   uint32_t thunk_offset = last_method_header_offset - CompiledCode::AlignCode(ThunkSize(), kArm64);
    620   ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset));
    621   uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
    622   CHECK_ALIGNED(diff, 4u);
    623   ASSERT_LT(diff, 128 * MB);
    624   auto expected_code = GenNopsAndBl(0u, kBlPlus0 | (diff >> 2));
    625   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    626   CheckThunk(thunk_offset);
    627 }
    628 
    629 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) {
    630   auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
    631   constexpr uint32_t bl_offset_in_last_method = 1u * 4u;  // After NOPs.
    632   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
    633   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
    634   LinkerPatch last_method_patches[] = {
    635       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
    636   };
    637 
    638   constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
    639   uint32_t last_method_idx = Create2MethodsWithGap(
    640       kNopCode, ArrayRef<const LinkerPatch>(), last_method_code,
    641       ArrayRef<const LinkerPatch>(last_method_patches),
    642       just_over_max_negative_disp - bl_offset_in_last_method);
    643   uint32_t method1_offset = GetMethodOffset(1u);
    644   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    645   ASSERT_EQ(method1_offset,
    646             last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
    647 
    648   // Check linked code.
    649   uint32_t thunk_offset =
    650       CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
    651   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
    652   CHECK_ALIGNED(diff, 4u);
    653   ASSERT_LT(diff, 128 * MB);
    654   auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
    655   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
    656                                 ArrayRef<const uint8_t>(expected_code)));
    657   EXPECT_TRUE(CheckThunk(thunk_offset));
    658 }
    659 
    660 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference1) {
    661   TestNopsAdrpLdr(0u, 0x12345678u, 0x1234u);
    662 }
    663 
    664 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference2) {
    665   TestNopsAdrpLdr(0u, -0x12345678u, 0x4444u);
    666 }
    667 
    668 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference3) {
    669   TestNopsAdrpLdr(0u, 0x12345000u, 0x3ffcu);
    670 }
    671 
    672 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference4) {
    673   TestNopsAdrpLdr(0u, 0x12345000u, 0x4000u);
    674 }
    675 
    676 TEST_F(Arm64RelativePatcherTestDefault, StringReference1) {
    677   TestNopsAdrpAdd(0u, 0x12345678u);
    678 }
    679 
    680 TEST_F(Arm64RelativePatcherTestDefault, StringReference2) {
    681   TestNopsAdrpAdd(0u, -0x12345678u);
    682 }
    683 
    684 TEST_F(Arm64RelativePatcherTestDefault, StringReference3) {
    685   TestNopsAdrpAdd(0u, 0x12345000u);
    686 }
    687 
    688 TEST_F(Arm64RelativePatcherTestDefault, StringReference4) {
    689   TestNopsAdrpAdd(0u, 0x12345ffcu);
    690 }
    691 
    692 #define TEST_FOR_OFFSETS(test, disp1, disp2) \
    693   test(0xff4u, disp1) test(0xff8u, disp1) test(0xffcu, disp1) test(0x1000u, disp1) \
    694   test(0xff4u, disp2) test(0xff8u, disp2) test(0xffcu, disp2) test(0x1000u, disp2)
    695 
    696 #define DEFAULT_LDUR_LDR_TEST(adrp_offset, disp) \
    697   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## Ldur ## disp) { \
    698     bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \
    699     TestAdrpLdurLdr(adrp_offset, has_thunk, 0x12345678u, disp); \
    700   }
    701 
    702 TEST_FOR_OFFSETS(DEFAULT_LDUR_LDR_TEST, 0x1234, 0x1238)
    703 
    704 #define DENVER64_LDUR_LDR_TEST(adrp_offset, disp) \
    705   TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference ## adrp_offset ## Ldur ## disp) { \
    706     TestAdrpLdurLdr(adrp_offset, false, 0x12345678u, disp); \
    707   }
    708 
    709 TEST_FOR_OFFSETS(DENVER64_LDUR_LDR_TEST, 0x1234, 0x1238)
    710 
    711 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
    712 #define LDRW_PCREL_LDR_TEST(adrp_offset, disp) \
    713   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WPcRel ## disp) { \
    714     TestAdrpLdrPcRelLdr(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u, 0x1234u); \
    715   }
    716 
    717 TEST_FOR_OFFSETS(LDRW_PCREL_LDR_TEST, 0x1234, 0x1238)
    718 
    719 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
    720 #define LDRX_PCREL_LDR_TEST(adrp_offset, disp) \
    721   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XPcRel ## disp) { \
    722     bool unaligned = !IsAligned<8u>(adrp_offset + 4u + static_cast<uint32_t>(disp)); \
    723     bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu) && unaligned; \
    724     TestAdrpLdrPcRelLdr(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u, 0x1234u); \
    725   }
    726 
    727 TEST_FOR_OFFSETS(LDRX_PCREL_LDR_TEST, 0x1234, 0x1238)
    728 
    729 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
    730 #define LDRW_SPREL_LDR_TEST(adrp_offset, disp) \
    731   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WSpRel ## disp) { \
    732     TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, disp >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \
    733   }
    734 
    735 TEST_FOR_OFFSETS(LDRW_SPREL_LDR_TEST, 0, 4)
    736 
    737 #define LDRX_SPREL_LDR_TEST(adrp_offset, disp) \
    738   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XSpRel ## disp) { \
    739     TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, disp >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \
    740   }
    741 
    742 TEST_FOR_OFFSETS(LDRX_SPREL_LDR_TEST, 0, 8)
    743 
    744 #define DEFAULT_LDUR_ADD_TEST(adrp_offset, disp) \
    745   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## Ldur ## disp) { \
    746     bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \
    747     TestAdrpLdurAdd(adrp_offset, has_thunk, disp); \
    748   }
    749 
    750 TEST_FOR_OFFSETS(DEFAULT_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
    751 
    752 #define DENVER64_LDUR_ADD_TEST(adrp_offset, disp) \
    753   TEST_F(Arm64RelativePatcherTestDenver64, StringReference ## adrp_offset ## Ldur ## disp) { \
    754     TestAdrpLdurAdd(adrp_offset, false, disp); \
    755   }
    756 
    757 TEST_FOR_OFFSETS(DENVER64_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
    758 
    759 #define DEFAULT_SUBX3X2_ADD_TEST(adrp_offset, disp) \
    760   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubX3X2 ## disp) { \
    761     /* SUB unrelated to "ADRP x0, addr". */ \
    762     uint32_t sub = kSubXInsn | (100 << 10) | (2u << 5) | 3u;  /* SUB x3, x2, #100 */ \
    763     TestAdrpInsn2Add(sub, adrp_offset, false, disp); \
    764   }
    765 
    766 TEST_FOR_OFFSETS(DEFAULT_SUBX3X2_ADD_TEST, 0x12345678, 0xffffc840)
    767 
    768 #define DEFAULT_SUBSX3X0_ADD_TEST(adrp_offset, disp) \
    769   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubsX3X0 ## disp) { \
    770     /* SUBS that uses the result of "ADRP x0, addr". */ \
    771     uint32_t subs = kSubsXInsn | (100 << 10) | (0u << 5) | 3u;  /* SUBS x3, x0, #100 */ \
    772     TestAdrpInsn2Add(subs, adrp_offset, false, disp); \
    773   }
    774 
    775 TEST_FOR_OFFSETS(DEFAULT_SUBSX3X0_ADD_TEST, 0x12345678, 0xffffc840)
    776 
    777 #define DEFAULT_ADDX0X0_ADD_TEST(adrp_offset, disp) \
    778   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddX0X0 ## disp) { \
    779     /* ADD that uses the result register of "ADRP x0, addr" as both source and destination. */ \
    780     uint32_t add = kSubXInsn | (100 << 10) | (0u << 5) | 0u;  /* ADD x0, x0, #100 */ \
    781     TestAdrpInsn2Add(add, adrp_offset, false, disp); \
    782   }
    783 
    784 TEST_FOR_OFFSETS(DEFAULT_ADDX0X0_ADD_TEST, 0x12345678, 0xffffc840)
    785 
    786 #define DEFAULT_ADDSX0X2_ADD_TEST(adrp_offset, disp) \
    787   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddsX0X2 ## disp) { \
    788     /* ADDS that does not use the result of "ADRP x0, addr" but overwrites that register. */ \
    789     uint32_t adds = kAddsXInsn | (100 << 10) | (2u << 5) | 0u;  /* ADDS x0, x2, #100 */ \
    790     bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \
    791     TestAdrpInsn2Add(adds, adrp_offset, has_thunk, disp); \
    792   }
    793 
    794 TEST_FOR_OFFSETS(DEFAULT_ADDSX0X2_ADD_TEST, 0x12345678, 0xffffc840)
    795 
    796 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
    797 #define LDRW_PCREL_ADD_TEST(adrp_offset, disp) \
    798   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WPcRel ## disp) { \
    799     TestAdrpLdrPcRelAdd(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u); \
    800   }
    801 
    802 TEST_FOR_OFFSETS(LDRW_PCREL_ADD_TEST, 0x1234, 0x1238)
    803 
    804 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
    805 #define LDRX_PCREL_ADD_TEST(adrp_offset, disp) \
    806   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XPcRel ## disp) { \
    807     bool unaligned = !IsAligned<8u>(adrp_offset + 4u + static_cast<uint32_t>(disp)); \
    808     bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu) && unaligned; \
    809     TestAdrpLdrPcRelAdd(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u); \
    810   }
    811 
    812 TEST_FOR_OFFSETS(LDRX_PCREL_ADD_TEST, 0x1234, 0x1238)
    813 
    814 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
    815 #define LDRW_SPREL_ADD_TEST(adrp_offset, disp) \
    816   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WSpRel ## disp) { \
    817     TestAdrpLdrSpRelAdd(kLdrWSpRelInsn, disp >> 2, adrp_offset, false, 0x12345678u); \
    818   }
    819 
    820 TEST_FOR_OFFSETS(LDRW_SPREL_ADD_TEST, 0, 4)
    821 
    822 #define LDRX_SPREL_ADD_TEST(adrp_offset, disp) \
    823   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XSpRel ## disp) { \
    824     TestAdrpLdrSpRelAdd(kLdrXSpRelInsn, disp >> 3, adrp_offset, false, 0x12345678u); \
    825   }
    826 
    827 TEST_FOR_OFFSETS(LDRX_SPREL_ADD_TEST, 0, 8)
    828 
    829 }  // namespace linker
    830 }  // namespace art
    831