Home | History | Annotate | Download | only in arm64
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "base/casts.h"
     18 #include "linker/relative_patcher_test.h"
     19 #include "linker/arm64/relative_patcher_arm64.h"
     20 #include "lock_word.h"
     21 #include "mirror/array-inl.h"
     22 #include "mirror/object.h"
     23 #include "oat_quick_method_header.h"
     24 
     25 namespace art {
     26 namespace linker {
     27 
     28 class Arm64RelativePatcherTest : public RelativePatcherTest {
     29  public:
     30   explicit Arm64RelativePatcherTest(const std::string& variant)
     31       : RelativePatcherTest(kArm64, variant) { }
     32 
     33  protected:
     34   static const uint8_t kCallRawCode[];
     35   static const ArrayRef<const uint8_t> kCallCode;
     36   static const uint8_t kNopRawCode[];
     37   static const ArrayRef<const uint8_t> kNopCode;
     38 
     39   // NOP instruction.
     40   static constexpr uint32_t kNopInsn = 0xd503201f;
     41 
     42   // All branches can be created from kBlPlus0 or kBPlus0 by adding the low 26 bits.
     43   static constexpr uint32_t kBlPlus0 = 0x94000000u;
     44   static constexpr uint32_t kBPlus0 = 0x14000000u;
     45 
     46   // Special BL values.
     47   static constexpr uint32_t kBlPlusMax = 0x95ffffffu;
     48   static constexpr uint32_t kBlMinusMax = 0x96000000u;
     49 
     50   // LDR immediate, 32-bit, unsigned offset.
     51   static constexpr uint32_t kLdrWInsn = 0xb9400000u;
     52 
     53   // LDR register, 32-bit, LSL #2.
     54   static constexpr uint32_t kLdrWLsl2Insn = 0xb8607800u;
     55 
     56   // LDUR, 32-bit.
     57   static constexpr uint32_t kLdurWInsn = 0xb8400000u;
     58 
     59   // ADD/ADDS/SUB/SUBS immediate, 64-bit.
     60   static constexpr uint32_t kAddXInsn = 0x91000000u;
     61   static constexpr uint32_t kAddsXInsn = 0xb1000000u;
     62   static constexpr uint32_t kSubXInsn = 0xd1000000u;
     63   static constexpr uint32_t kSubsXInsn = 0xf1000000u;
     64 
     65   // LDUR x2, [sp, #4], i.e. unaligned load crossing 64-bit boundary (assuming aligned sp).
     66   static constexpr uint32_t kLdurInsn = 0xf840405fu;
     67 
     68   // LDR w12, <label> and LDR x12, <label>. Bits 5-23 contain label displacement in 4-byte units.
     69   static constexpr uint32_t kLdrWPcRelInsn = 0x1800000cu;
     70   static constexpr uint32_t kLdrXPcRelInsn = 0x5800000cu;
     71 
     72   // LDR w13, [SP, #<pimm>] and LDR x13, [SP, #<pimm>]. Bits 10-21 contain displacement from SP
     73   // in units of 4-bytes (for 32-bit load) or 8-bytes (for 64-bit load).
     74   static constexpr uint32_t kLdrWSpRelInsn = 0xb94003edu;
     75   static constexpr uint32_t kLdrXSpRelInsn = 0xf94003edu;
     76 
     77   // CBNZ x17, +0. Bits 5-23 are a placeholder for target offset from PC in units of 4-bytes.
     78   static constexpr uint32_t kCbnzIP1Plus0Insn = 0xb5000011u;
     79 
     80   void InsertInsn(std::vector<uint8_t>* code, size_t pos, uint32_t insn) {
     81     CHECK_LE(pos, code->size());
     82     const uint8_t insn_code[] = {
     83         static_cast<uint8_t>(insn),
     84         static_cast<uint8_t>(insn >> 8),
     85         static_cast<uint8_t>(insn >> 16),
     86         static_cast<uint8_t>(insn >> 24),
     87     };
     88     static_assert(sizeof(insn_code) == 4u, "Invalid sizeof(insn_code).");
     89     code->insert(code->begin() + pos, insn_code, insn_code + sizeof(insn_code));
     90   }
     91 
     92   void PushBackInsn(std::vector<uint8_t>* code, uint32_t insn) {
     93     InsertInsn(code, code->size(), insn);
     94   }
     95 
     96   std::vector<uint8_t> RawCode(std::initializer_list<uint32_t> insns) {
     97     std::vector<uint8_t> raw_code;
     98     raw_code.reserve(insns.size() * 4u);
     99     for (uint32_t insn : insns) {
    100       PushBackInsn(&raw_code, insn);
    101     }
    102     return raw_code;
    103   }
    104 
    105   uint32_t Create2MethodsWithGap(const ArrayRef<const uint8_t>& method1_code,
    106                                  const ArrayRef<const LinkerPatch>& method1_patches,
    107                                  const ArrayRef<const uint8_t>& last_method_code,
    108                                  const ArrayRef<const LinkerPatch>& last_method_patches,
    109                                  uint32_t distance_without_thunks) {
    110     CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
    111     uint32_t method1_offset =
    112         kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
    113     AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
    114     const uint32_t gap_start = method1_offset + method1_code.size();
    115 
    116     // We want to put the method3 at a very precise offset.
    117     const uint32_t last_method_offset = method1_offset + distance_without_thunks;
    118     CHECK_ALIGNED(last_method_offset, kArm64Alignment);
    119     const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
    120 
    121     // Fill the gap with intermediate methods in chunks of 2MiB and the first in [2MiB, 4MiB).
    122     // (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
    123     // offsets by this test. Making the first chunk bigger makes it easy to give all intermediate
    124     // methods the same alignment of the end, so the thunk insertion adds a predictable size as
    125     // long as it's after the first chunk.)
    126     uint32_t method_idx = 2u;
    127     constexpr uint32_t kSmallChunkSize = 2 * MB;
    128     std::vector<uint8_t> gap_code;
    129     uint32_t gap_size = gap_end - gap_start;
    130     uint32_t num_small_chunks = std::max(gap_size / kSmallChunkSize, 1u) - 1u;
    131     uint32_t chunk_start = gap_start;
    132     uint32_t chunk_size = gap_size - num_small_chunks * kSmallChunkSize;
    133     for (uint32_t i = 0; i <= num_small_chunks; ++i) {  // num_small_chunks+1 iterations.
    134       uint32_t chunk_code_size =
    135           chunk_size - CodeAlignmentSize(chunk_start) - sizeof(OatQuickMethodHeader);
    136       gap_code.resize(chunk_code_size, 0u);
    137       AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code));
    138       method_idx += 1u;
    139       chunk_start += chunk_size;
    140       chunk_size = kSmallChunkSize;  // For all but the first chunk.
    141       DCHECK_EQ(CodeAlignmentSize(gap_end), CodeAlignmentSize(chunk_start));
    142     }
    143 
    144     // Add the last method and link
    145     AddCompiledMethod(MethodRef(method_idx), last_method_code, last_method_patches);
    146     Link();
    147 
    148     // Check assumptions.
    149     CHECK_EQ(GetMethodOffset(1), method1_offset);
    150     auto last_result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
    151     CHECK(last_result.first);
    152     // There may be a thunk before method2.
    153     if (last_result.second != last_method_offset) {
    154       // Thunk present. Check that there's only one.
    155       uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + MethodCallThunkSize();
    156       uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
    157       CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
    158     }
    159     return method_idx;
    160   }
    161 
    162   uint32_t GetMethodOffset(uint32_t method_idx) {
    163     auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
    164     CHECK(result.first);
    165     CHECK_ALIGNED(result.second, 4u);
    166     return result.second;
    167   }
    168 
    169   std::vector<uint8_t> CompileMethodCallThunk() {
    170     ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetMethodCallKey();
    171     return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
    172   }
    173 
    174   uint32_t MethodCallThunkSize() {
    175     return CompileMethodCallThunk().size();
    176   }
    177 
    178   bool CheckThunk(uint32_t thunk_offset) {
    179     const std::vector<uint8_t> expected_code = CompileMethodCallThunk();
    180     if (output_.size() < thunk_offset + expected_code.size()) {
    181       LOG(ERROR) << "output_.size() == " << output_.size() << " < "
    182           << "thunk_offset + expected_code.size() == " << (thunk_offset + expected_code.size());
    183       return false;
    184     }
    185     ArrayRef<const uint8_t> linked_code(&output_[thunk_offset], expected_code.size());
    186     if (linked_code == ArrayRef<const uint8_t>(expected_code)) {
    187       return true;
    188     }
    189     // Log failure info.
    190     DumpDiff(ArrayRef<const uint8_t>(expected_code), linked_code);
    191     return false;
    192   }
    193 
    194   std::vector<uint8_t> GenNops(size_t num_nops) {
    195     std::vector<uint8_t> result;
    196     result.reserve(num_nops * 4u);
    197     for (size_t i = 0; i != num_nops; ++i) {
    198       PushBackInsn(&result, kNopInsn);
    199     }
    200     return result;
    201   }
    202 
    203   std::vector<uint8_t> GenNopsAndBl(size_t num_nops, uint32_t bl) {
    204     std::vector<uint8_t> result;
    205     result.reserve(num_nops * 4u + 4u);
    206     for (size_t i = 0; i != num_nops; ++i) {
    207       PushBackInsn(&result, kNopInsn);
    208     }
    209     PushBackInsn(&result, bl);
    210     return result;
    211   }
    212 
    213   std::vector<uint8_t> GenNopsAndAdrpAndUse(size_t num_nops,
    214                                             uint32_t method_offset,
    215                                             uint32_t target_offset,
    216                                             uint32_t use_insn) {
    217     std::vector<uint8_t> result;
    218     result.reserve(num_nops * 4u + 8u);
    219     for (size_t i = 0; i != num_nops; ++i) {
    220       PushBackInsn(&result, kNopInsn);
    221     }
    222     CHECK_ALIGNED(method_offset, 4u);
    223     CHECK_ALIGNED(target_offset, 4u);
    224     uint32_t adrp_offset = method_offset + num_nops * 4u;
    225     uint32_t disp = target_offset - (adrp_offset & ~0xfffu);
    226     if (use_insn == kLdrWInsn) {
    227       DCHECK_ALIGNED(disp, 1u << 2);
    228       use_insn |= 1 |                         // LDR x1, [x0, #(imm12 << 2)]
    229           ((disp & 0xfffu) << (10 - 2));      // imm12 = ((disp & 0xfffu) >> 2) is at bit 10.
    230     } else if (use_insn == kAddXInsn) {
    231       use_insn |= 1 |                         // ADD x1, x0, #imm
    232           (disp & 0xfffu) << 10;              // imm12 = (disp & 0xfffu) is at bit 10.
    233     } else {
    234       LOG(FATAL) << "Unexpected instruction: 0x" << std::hex << use_insn;
    235     }
    236     uint32_t adrp = 0x90000000u |             // ADRP x0, +SignExtend(immhi:immlo:Zeros(12), 64)
    237         ((disp & 0x3000u) << (29 - 12)) |     // immlo = ((disp & 0x3000u) >> 12) is at bit 29,
    238         ((disp & 0xffffc000) >> (14 - 5)) |   // immhi = (disp >> 14) is at bit 5,
    239         // We take the sign bit from the disp, limiting disp to +- 2GiB.
    240         ((disp & 0x80000000) >> (31 - 23));   // sign bit in immhi is at bit 23.
    241     PushBackInsn(&result, adrp);
    242     PushBackInsn(&result, use_insn);
    243     return result;
    244   }
    245 
    246   std::vector<uint8_t> GenNopsAndAdrpLdr(size_t num_nops,
    247                                          uint32_t method_offset,
    248                                          uint32_t target_offset) {
    249     return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kLdrWInsn);
    250   }
    251 
    252   void TestNopsAdrpLdr(size_t num_nops, uint32_t bss_begin, uint32_t string_entry_offset) {
    253     constexpr uint32_t kStringIndex = 1u;
    254     string_index_to_offset_map_.Put(kStringIndex, string_entry_offset);
    255     bss_begin_ = bss_begin;
    256     auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u);  // Unpatched.
    257     const LinkerPatch patches[] = {
    258         LinkerPatch::StringBssEntryPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
    259         LinkerPatch::StringBssEntryPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, kStringIndex),
    260     };
    261     AddCompiledMethod(MethodRef(1u),
    262                       ArrayRef<const uint8_t>(code),
    263                       ArrayRef<const LinkerPatch>(patches));
    264     Link();
    265 
    266     uint32_t method1_offset = GetMethodOffset(1u);
    267     uint32_t target_offset = bss_begin_ + string_entry_offset;
    268     auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset);
    269     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    270   }
    271 
    272   std::vector<uint8_t> GenNopsAndAdrpAdd(size_t num_nops,
    273                                          uint32_t method_offset,
    274                                          uint32_t target_offset) {
    275     return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kAddXInsn);
    276   }
    277 
    278   void TestNopsAdrpAdd(size_t num_nops, uint32_t string_offset) {
    279     constexpr uint32_t kStringIndex = 1u;
    280     string_index_to_offset_map_.Put(kStringIndex, string_offset);
    281     auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u);  // Unpatched.
    282     const LinkerPatch patches[] = {
    283         LinkerPatch::RelativeStringPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
    284         LinkerPatch::RelativeStringPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, kStringIndex),
    285     };
    286     AddCompiledMethod(MethodRef(1u),
    287                       ArrayRef<const uint8_t>(code),
    288                       ArrayRef<const LinkerPatch>(patches));
    289     Link();
    290 
    291     uint32_t method1_offset = GetMethodOffset(1u);
    292     auto expected_code = GenNopsAndAdrpAdd(num_nops, method1_offset, string_offset);
    293     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    294   }
    295 
    296   void PrepareNopsAdrpInsn2Ldr(size_t num_nops,
    297                                uint32_t insn2,
    298                                uint32_t bss_begin,
    299                                uint32_t string_entry_offset) {
    300     constexpr uint32_t kStringIndex = 1u;
    301     string_index_to_offset_map_.Put(kStringIndex, string_entry_offset);
    302     bss_begin_ = bss_begin;
    303     auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u);  // Unpatched.
    304     InsertInsn(&code, num_nops * 4u + 4u, insn2);
    305     const LinkerPatch patches[] = {
    306         LinkerPatch::StringBssEntryPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
    307         LinkerPatch::StringBssEntryPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, kStringIndex),
    308     };
    309     AddCompiledMethod(MethodRef(1u),
    310                       ArrayRef<const uint8_t>(code),
    311                       ArrayRef<const LinkerPatch>(patches));
    312     Link();
    313   }
    314 
    315   void PrepareNopsAdrpInsn2Add(size_t num_nops, uint32_t insn2, uint32_t string_offset) {
    316     constexpr uint32_t kStringIndex = 1u;
    317     string_index_to_offset_map_.Put(kStringIndex, string_offset);
    318     auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u);  // Unpatched.
    319     InsertInsn(&code, num_nops * 4u + 4u, insn2);
    320     const LinkerPatch patches[] = {
    321         LinkerPatch::RelativeStringPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
    322         LinkerPatch::RelativeStringPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, kStringIndex),
    323     };
    324     AddCompiledMethod(MethodRef(1u),
    325                       ArrayRef<const uint8_t>(code),
    326                       ArrayRef<const LinkerPatch>(patches));
    327     Link();
    328   }
    329 
    330   void TestNopsAdrpInsn2AndUse(size_t num_nops,
    331                                uint32_t insn2,
    332                                uint32_t target_offset,
    333                                uint32_t use_insn) {
    334     uint32_t method1_offset = GetMethodOffset(1u);
    335     auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn);
    336     InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
    337     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    338   }
    339 
    340   void TestNopsAdrpInsn2AndUseHasThunk(size_t num_nops,
    341                                        uint32_t insn2,
    342                                        uint32_t target_offset,
    343                                        uint32_t use_insn) {
    344     uint32_t method1_offset = GetMethodOffset(1u);
    345     CHECK(!compiled_method_refs_.empty());
    346     CHECK_EQ(compiled_method_refs_[0].dex_method_index, 1u);
    347     CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
    348     uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
    349     uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
    350     uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
    351     CHECK_ALIGNED(b_diff, 4u);
    352     ASSERT_LT(b_diff, 128 * MB);
    353     uint32_t b_out = kBPlus0 + ((b_diff >> 2) & 0x03ffffffu);
    354     uint32_t b_in = kBPlus0 + ((-b_diff >> 2) & 0x03ffffffu);
    355 
    356     auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn);
    357     InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
    358     // Replace adrp with bl.
    359     expected_code.erase(expected_code.begin() + num_nops * 4u,
    360                         expected_code.begin() + num_nops * 4u + 4u);
    361     InsertInsn(&expected_code, num_nops * 4u, b_out);
    362     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    363 
    364     auto expected_thunk_code = GenNopsAndAdrpLdr(0u, thunk_offset, target_offset);
    365     ASSERT_EQ(expected_thunk_code.size(), 8u);
    366     expected_thunk_code.erase(expected_thunk_code.begin() + 4u, expected_thunk_code.begin() + 8u);
    367     InsertInsn(&expected_thunk_code, 4u, b_in);
    368     ASSERT_EQ(expected_thunk_code.size(), 8u);
    369 
    370     uint32_t thunk_size = MethodCallThunkSize();
    371     ASSERT_EQ(thunk_offset + thunk_size, output_.size());
    372     ASSERT_EQ(thunk_size, expected_thunk_code.size());
    373     ArrayRef<const uint8_t> thunk_code(&output_[thunk_offset], thunk_size);
    374     if (ArrayRef<const uint8_t>(expected_thunk_code) != thunk_code) {
    375       DumpDiff(ArrayRef<const uint8_t>(expected_thunk_code), thunk_code);
    376       FAIL();
    377     }
    378   }
    379 
    380   void TestAdrpInsn2Ldr(uint32_t insn2,
    381                         uint32_t adrp_offset,
    382                         bool has_thunk,
    383                         uint32_t bss_begin,
    384                         uint32_t string_entry_offset) {
    385     uint32_t method1_offset =
    386         kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
    387     ASSERT_LT(method1_offset, adrp_offset);
    388     CHECK_ALIGNED(adrp_offset, 4u);
    389     uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
    390     PrepareNopsAdrpInsn2Ldr(num_nops, insn2, bss_begin, string_entry_offset);
    391     uint32_t target_offset = bss_begin_ + string_entry_offset;
    392     if (has_thunk) {
    393       TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, target_offset, kLdrWInsn);
    394     } else {
    395       TestNopsAdrpInsn2AndUse(num_nops, insn2, target_offset, kLdrWInsn);
    396     }
    397     ASSERT_EQ(method1_offset, GetMethodOffset(1u));  // If this fails, num_nops is wrong.
    398   }
    399 
    400   void TestAdrpLdurLdr(uint32_t adrp_offset,
    401                        bool has_thunk,
    402                        uint32_t bss_begin,
    403                        uint32_t string_entry_offset) {
    404     TestAdrpInsn2Ldr(kLdurInsn, adrp_offset, has_thunk, bss_begin, string_entry_offset);
    405   }
    406 
    407   void TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn,
    408                            int32_t pcrel_disp,
    409                            uint32_t adrp_offset,
    410                            bool has_thunk,
    411                            uint32_t bss_begin,
    412                            uint32_t string_entry_offset) {
    413     ASSERT_LT(pcrel_disp, 0x100000);
    414     ASSERT_GE(pcrel_disp, -0x100000);
    415     ASSERT_EQ(pcrel_disp & 0x3, 0);
    416     uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5);
    417     TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, bss_begin, string_entry_offset);
    418   }
    419 
    420   void TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn,
    421                            uint32_t sprel_disp_in_load_units,
    422                            uint32_t adrp_offset,
    423                            bool has_thunk,
    424                            uint32_t bss_begin,
    425                            uint32_t string_entry_offset) {
    426     ASSERT_LT(sprel_disp_in_load_units, 0x1000u);
    427     uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10);
    428     TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, bss_begin, string_entry_offset);
    429   }
    430 
    431   void TestAdrpInsn2Add(uint32_t insn2,
    432                         uint32_t adrp_offset,
    433                         bool has_thunk,
    434                         uint32_t string_offset) {
    435     uint32_t method1_offset =
    436         kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
    437     ASSERT_LT(method1_offset, adrp_offset);
    438     CHECK_ALIGNED(adrp_offset, 4u);
    439     uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
    440     PrepareNopsAdrpInsn2Add(num_nops, insn2, string_offset);
    441     if (has_thunk) {
    442       TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, string_offset, kAddXInsn);
    443     } else {
    444       TestNopsAdrpInsn2AndUse(num_nops, insn2, string_offset, kAddXInsn);
    445     }
    446     ASSERT_EQ(method1_offset, GetMethodOffset(1u));  // If this fails, num_nops is wrong.
    447   }
    448 
    449   void TestAdrpLdurAdd(uint32_t adrp_offset, bool has_thunk, uint32_t string_offset) {
    450     TestAdrpInsn2Add(kLdurInsn, adrp_offset, has_thunk, string_offset);
    451   }
    452 
    453   void TestAdrpLdrPcRelAdd(uint32_t pcrel_ldr_insn,
    454                            int32_t pcrel_disp,
    455                            uint32_t adrp_offset,
    456                            bool has_thunk,
    457                            uint32_t string_offset) {
    458     ASSERT_LT(pcrel_disp, 0x100000);
    459     ASSERT_GE(pcrel_disp, -0x100000);
    460     ASSERT_EQ(pcrel_disp & 0x3, 0);
    461     uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5);
    462     TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset);
    463   }
    464 
    465   void TestAdrpLdrSpRelAdd(uint32_t sprel_ldr_insn,
    466                            uint32_t sprel_disp_in_load_units,
    467                            uint32_t adrp_offset,
    468                            bool has_thunk,
    469                            uint32_t string_offset) {
    470     ASSERT_LT(sprel_disp_in_load_units, 0x1000u);
    471     uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10);
    472     TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset);
    473   }
    474 
    475   std::vector<uint8_t> CompileBakerOffsetThunk(uint32_t base_reg, uint32_t holder_reg) {
    476     const LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
    477         0u, Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg));
    478     ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
    479     return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
    480   }
    481 
    482   std::vector<uint8_t> CompileBakerArrayThunk(uint32_t base_reg) {
    483     LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
    484         0u, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg));
    485     ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
    486     return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
    487   }
    488 
    489   std::vector<uint8_t> CompileBakerGcRootThunk(uint32_t root_reg) {
    490     LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
    491         0u, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg));
    492     ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch);
    493     return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
    494   }
    495 
    496   uint32_t GetOutputInsn(uint32_t offset) {
    497     CHECK_LE(offset, output_.size());
    498     CHECK_GE(output_.size() - offset, 4u);
    499     return (static_cast<uint32_t>(output_[offset]) << 0) |
    500            (static_cast<uint32_t>(output_[offset + 1]) << 8) |
    501            (static_cast<uint32_t>(output_[offset + 2]) << 16) |
    502            (static_cast<uint32_t>(output_[offset + 3]) << 24);
    503   }
    504 
    505   void TestBakerField(uint32_t offset, uint32_t ref_reg);
    506 };
    507 
    508 const uint8_t Arm64RelativePatcherTest::kCallRawCode[] = {
    509     0x00, 0x00, 0x00, 0x94
    510 };
    511 
    512 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kCallCode(kCallRawCode);
    513 
    514 const uint8_t Arm64RelativePatcherTest::kNopRawCode[] = {
    515     0x1f, 0x20, 0x03, 0xd5
    516 };
    517 
    518 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kNopCode(kNopRawCode);
    519 
    520 class Arm64RelativePatcherTestDefault : public Arm64RelativePatcherTest {
    521  public:
    522   Arm64RelativePatcherTestDefault() : Arm64RelativePatcherTest("default") { }
    523 };
    524 
    525 class Arm64RelativePatcherTestDenver64 : public Arm64RelativePatcherTest {
    526  public:
    527   Arm64RelativePatcherTestDenver64() : Arm64RelativePatcherTest("denver64") { }
    528 };
    529 
    530 TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
    531   const LinkerPatch patches[] = {
    532       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
    533   };
    534   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
    535   Link();
    536 
    537   const std::vector<uint8_t> expected_code = RawCode({kBlPlus0});
    538   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    539 }
    540 
    541 TEST_F(Arm64RelativePatcherTestDefault, CallOther) {
    542   const LinkerPatch method1_patches[] = {
    543       LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
    544   };
    545   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
    546   const LinkerPatch method2_patches[] = {
    547       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
    548   };
    549   AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
    550   Link();
    551 
    552   uint32_t method1_offset = GetMethodOffset(1u);
    553   uint32_t method2_offset = GetMethodOffset(2u);
    554   uint32_t diff_after = method2_offset - method1_offset;
    555   CHECK_ALIGNED(diff_after, 4u);
    556   ASSERT_LT(diff_after >> 2, 1u << 8);  // Simple encoding, (diff_after >> 2) fits into 8 bits.
    557   const std::vector<uint8_t> method1_expected_code = RawCode({kBlPlus0 + (diff_after >> 2)});
    558   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code)));
    559   uint32_t diff_before = method1_offset - method2_offset;
    560   CHECK_ALIGNED(diff_before, 4u);
    561   ASSERT_GE(diff_before, -1u << 27);
    562   auto method2_expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff_before >> 2) & 0x03ffffffu));
    563   EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code)));
    564 }
    565 
    566 TEST_F(Arm64RelativePatcherTestDefault, CallTrampoline) {
    567   const LinkerPatch patches[] = {
    568       LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
    569   };
    570   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
    571   Link();
    572 
    573   uint32_t method1_offset = GetMethodOffset(1u);
    574   uint32_t diff = kTrampolineOffset - method1_offset;
    575   ASSERT_EQ(diff & 1u, 0u);
    576   ASSERT_GE(diff, -1u << 9);  // Simple encoding, -256 <= (diff >> 1) < 0 (checked as unsigned).
    577   auto expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff >> 2) & 0x03ffffffu));
    578   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    579 }
    580 
    581 TEST_F(Arm64RelativePatcherTestDefault, CallTrampolineTooFar) {
    582   constexpr uint32_t missing_method_index = 1024u;
    583   auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
    584   constexpr uint32_t bl_offset_in_last_method = 1u * 4u;  // After NOPs.
    585   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
    586   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
    587   const LinkerPatch last_method_patches[] = {
    588       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, missing_method_index),
    589   };
    590 
    591   constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
    592   uint32_t last_method_idx = Create2MethodsWithGap(
    593       kNopCode, ArrayRef<const LinkerPatch>(), last_method_code,
    594       ArrayRef<const LinkerPatch>(last_method_patches),
    595       just_over_max_negative_disp - bl_offset_in_last_method);
    596   uint32_t method1_offset = GetMethodOffset(1u);
    597   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    598   ASSERT_EQ(method1_offset,
    599             last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
    600   ASSERT_FALSE(method_offset_map_.FindMethodOffset(MethodRef(missing_method_index)).first);
    601 
    602   // Check linked code.
    603   uint32_t thunk_offset =
    604       CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
    605   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
    606   CHECK_ALIGNED(diff, 4u);
    607   ASSERT_LT(diff, 128 * MB);
    608   auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
    609   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
    610                                 ArrayRef<const uint8_t>(expected_code)));
    611   EXPECT_TRUE(CheckThunk(thunk_offset));
    612 }
    613 
    614 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarAfter) {
    615   auto method1_raw_code = GenNopsAndBl(1u, kBlPlus0);
    616   constexpr uint32_t bl_offset_in_method1 = 1u * 4u;  // After NOPs.
    617   ArrayRef<const uint8_t> method1_code(method1_raw_code);
    618   ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
    619   uint32_t expected_last_method_idx = 65;  // Based on 2MiB chunks in Create2MethodsWithGap().
    620   const LinkerPatch method1_patches[] = {
    621       LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
    622   };
    623 
    624   constexpr uint32_t max_positive_disp = 128 * MB - 4u;
    625   uint32_t last_method_idx = Create2MethodsWithGap(method1_code,
    626                                                    ArrayRef<const LinkerPatch>(method1_patches),
    627                                                    kNopCode,
    628                                                    ArrayRef<const LinkerPatch>(),
    629                                                    bl_offset_in_method1 + max_positive_disp);
    630   ASSERT_EQ(expected_last_method_idx, last_method_idx);
    631 
    632   uint32_t method1_offset = GetMethodOffset(1u);
    633   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    634   ASSERT_EQ(method1_offset + bl_offset_in_method1 + max_positive_disp, last_method_offset);
    635 
    636   // Check linked code.
    637   auto expected_code = GenNopsAndBl(1u, kBlPlusMax);
    638   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    639 }
    640 
    641 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarBefore) {
    642   auto last_method_raw_code = GenNopsAndBl(0u, kBlPlus0);
    643   constexpr uint32_t bl_offset_in_last_method = 0u * 4u;  // After NOPs.
    644   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
    645   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
    646   const LinkerPatch last_method_patches[] = {
    647       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
    648   };
    649 
    650   constexpr uint32_t max_negative_disp = 128 * MB;
    651   uint32_t last_method_idx = Create2MethodsWithGap(kNopCode,
    652                                                    ArrayRef<const LinkerPatch>(),
    653                                                    last_method_code,
    654                                                    ArrayRef<const LinkerPatch>(last_method_patches),
    655                                                    max_negative_disp - bl_offset_in_last_method);
    656   uint32_t method1_offset = GetMethodOffset(1u);
    657   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    658   ASSERT_EQ(method1_offset, last_method_offset + bl_offset_in_last_method - max_negative_disp);
    659 
    660   // Check linked code.
    661   auto expected_code = GenNopsAndBl(0u, kBlMinusMax);
    662   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
    663                                 ArrayRef<const uint8_t>(expected_code)));
    664 }
    665 
    666 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) {
    667   auto method1_raw_code = GenNopsAndBl(0u, kBlPlus0);
    668   constexpr uint32_t bl_offset_in_method1 = 0u * 4u;  // After NOPs.
    669   ArrayRef<const uint8_t> method1_code(method1_raw_code);
    670   ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
    671   uint32_t expected_last_method_idx = 65;  // Based on 2MiB chunks in Create2MethodsWithGap().
    672   const LinkerPatch method1_patches[] = {
    673       LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
    674   };
    675 
    676   constexpr uint32_t just_over_max_positive_disp = 128 * MB;
    677   uint32_t last_method_idx = Create2MethodsWithGap(
    678       method1_code,
    679       ArrayRef<const LinkerPatch>(method1_patches),
    680       kNopCode,
    681       ArrayRef<const LinkerPatch>(),
    682       bl_offset_in_method1 + just_over_max_positive_disp);
    683   ASSERT_EQ(expected_last_method_idx, last_method_idx);
    684 
    685   uint32_t method1_offset = GetMethodOffset(1u);
    686   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    687   ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
    688   uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
    689   uint32_t thunk_size = MethodCallThunkSize();
    690   uint32_t thunk_offset =
    691       RoundDown(last_method_header_offset - thunk_size, GetInstructionSetAlignment(kArm64));
    692   DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
    693             last_method_header_offset);
    694   uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
    695   CHECK_ALIGNED(diff, 4u);
    696   ASSERT_LT(diff, 128 * MB);
    697   auto expected_code = GenNopsAndBl(0u, kBlPlus0 | (diff >> 2));
    698   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
    699   CheckThunk(thunk_offset);
    700 }
    701 
    702 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) {
    703   auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
    704   constexpr uint32_t bl_offset_in_last_method = 1u * 4u;  // After NOPs.
    705   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
    706   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
    707   const LinkerPatch last_method_patches[] = {
    708       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
    709   };
    710 
    711   constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
    712   uint32_t last_method_idx = Create2MethodsWithGap(
    713       kNopCode, ArrayRef<const LinkerPatch>(), last_method_code,
    714       ArrayRef<const LinkerPatch>(last_method_patches),
    715       just_over_max_negative_disp - bl_offset_in_last_method);
    716   uint32_t method1_offset = GetMethodOffset(1u);
    717   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
    718   ASSERT_EQ(method1_offset,
    719             last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
    720 
    721   // Check linked code.
    722   uint32_t thunk_offset =
    723       CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
    724   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
    725   CHECK_ALIGNED(diff, 4u);
    726   ASSERT_LT(diff, 128 * MB);
    727   auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
    728   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
    729                                 ArrayRef<const uint8_t>(expected_code)));
    730   EXPECT_TRUE(CheckThunk(thunk_offset));
    731 }
    732 
    733 TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry1) {
    734   TestNopsAdrpLdr(0u, 0x12345678u, 0x1234u);
    735 }
    736 
    737 TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry2) {
    738   TestNopsAdrpLdr(0u, -0x12345678u, 0x4444u);
    739 }
    740 
    741 TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry3) {
    742   TestNopsAdrpLdr(0u, 0x12345000u, 0x3ffcu);
    743 }
    744 
    745 TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry4) {
    746   TestNopsAdrpLdr(0u, 0x12345000u, 0x4000u);
    747 }
    748 
    749 TEST_F(Arm64RelativePatcherTestDefault, StringReference1) {
    750   TestNopsAdrpAdd(0u, 0x12345678u);
    751 }
    752 
    753 TEST_F(Arm64RelativePatcherTestDefault, StringReference2) {
    754   TestNopsAdrpAdd(0u, -0x12345678u);
    755 }
    756 
    757 TEST_F(Arm64RelativePatcherTestDefault, StringReference3) {
    758   TestNopsAdrpAdd(0u, 0x12345000u);
    759 }
    760 
    761 TEST_F(Arm64RelativePatcherTestDefault, StringReference4) {
    762   TestNopsAdrpAdd(0u, 0x12345ffcu);
    763 }
    764 
    765 #define TEST_FOR_OFFSETS(test, disp1, disp2) \
    766   test(0xff4u, disp1) test(0xff8u, disp1) test(0xffcu, disp1) test(0x1000u, disp1) \
    767   test(0xff4u, disp2) test(0xff8u, disp2) test(0xffcu, disp2) test(0x1000u, disp2)
    768 
    769 #define DEFAULT_LDUR_LDR_TEST(adrp_offset, disp) \
    770   TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## Ldur ## disp) { \
    771     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
    772     TestAdrpLdurLdr(adrp_offset, has_thunk, 0x12345678u, disp); \
    773   }
    774 
    775 TEST_FOR_OFFSETS(DEFAULT_LDUR_LDR_TEST, 0x1234, 0x1238)
    776 
    777 #define DENVER64_LDUR_LDR_TEST(adrp_offset, disp) \
    778   TEST_F(Arm64RelativePatcherTestDenver64, StringBssEntry ## adrp_offset ## Ldur ## disp) { \
    779     TestAdrpLdurLdr(adrp_offset, false, 0x12345678u, disp); \
    780   }
    781 
    782 TEST_FOR_OFFSETS(DENVER64_LDUR_LDR_TEST, 0x1234, 0x1238)
    783 
    784 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
    785 #define LDRW_PCREL_LDR_TEST(adrp_offset, disp) \
    786   TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## WPcRel ## disp) { \
    787     TestAdrpLdrPcRelLdr(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u, 0x1234u); \
    788   }
    789 
    790 TEST_FOR_OFFSETS(LDRW_PCREL_LDR_TEST, 0x1234, 0x1238)
    791 
    792 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
    793 #define LDRX_PCREL_LDR_TEST(adrp_offset, disp) \
    794   TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## XPcRel ## disp) { \
    795     bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
    796     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
    797     TestAdrpLdrPcRelLdr(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u, 0x1234u); \
    798   }
    799 
    800 TEST_FOR_OFFSETS(LDRX_PCREL_LDR_TEST, 0x1234, 0x1238)
    801 
    802 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
    803 #define LDRW_SPREL_LDR_TEST(adrp_offset, disp) \
    804   TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## WSpRel ## disp) { \
    805     TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \
    806   }
    807 
    808 TEST_FOR_OFFSETS(LDRW_SPREL_LDR_TEST, 0, 4)
    809 
    810 #define LDRX_SPREL_LDR_TEST(adrp_offset, disp) \
    811   TEST_F(Arm64RelativePatcherTestDefault, StringBssEntry ## adrp_offset ## XSpRel ## disp) { \
    812     TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \
    813   }
    814 
    815 TEST_FOR_OFFSETS(LDRX_SPREL_LDR_TEST, 0, 8)
    816 
    817 #define DEFAULT_LDUR_ADD_TEST(adrp_offset, disp) \
    818   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## Ldur ## disp) { \
    819     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
    820     TestAdrpLdurAdd(adrp_offset, has_thunk, disp); \
    821   }
    822 
    823 TEST_FOR_OFFSETS(DEFAULT_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
    824 
    825 #define DENVER64_LDUR_ADD_TEST(adrp_offset, disp) \
    826   TEST_F(Arm64RelativePatcherTestDenver64, StringReference ## adrp_offset ## Ldur ## disp) { \
    827     TestAdrpLdurAdd(adrp_offset, false, disp); \
    828   }
    829 
    830 TEST_FOR_OFFSETS(DENVER64_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
    831 
    832 #define DEFAULT_SUBX3X2_ADD_TEST(adrp_offset, disp) \
    833   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubX3X2 ## disp) { \
    834     /* SUB unrelated to "ADRP x0, addr". */ \
    835     uint32_t sub = kSubXInsn | (100 << 10) | (2u << 5) | 3u;  /* SUB x3, x2, #100 */ \
    836     TestAdrpInsn2Add(sub, adrp_offset, false, disp); \
    837   }
    838 
    839 TEST_FOR_OFFSETS(DEFAULT_SUBX3X2_ADD_TEST, 0x12345678, 0xffffc840)
    840 
    841 #define DEFAULT_SUBSX3X0_ADD_TEST(adrp_offset, disp) \
    842   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubsX3X0 ## disp) { \
    843     /* SUBS that uses the result of "ADRP x0, addr". */ \
    844     uint32_t subs = kSubsXInsn | (100 << 10) | (0u << 5) | 3u;  /* SUBS x3, x0, #100 */ \
    845     TestAdrpInsn2Add(subs, adrp_offset, false, disp); \
    846   }
    847 
    848 TEST_FOR_OFFSETS(DEFAULT_SUBSX3X0_ADD_TEST, 0x12345678, 0xffffc840)
    849 
    850 #define DEFAULT_ADDX0X0_ADD_TEST(adrp_offset, disp) \
    851   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddX0X0 ## disp) { \
    852     /* ADD that uses the result register of "ADRP x0, addr" as both source and destination. */ \
    853     uint32_t add = kSubXInsn | (100 << 10) | (0u << 5) | 0u;  /* ADD x0, x0, #100 */ \
    854     TestAdrpInsn2Add(add, adrp_offset, false, disp); \
    855   }
    856 
    857 TEST_FOR_OFFSETS(DEFAULT_ADDX0X0_ADD_TEST, 0x12345678, 0xffffc840)
    858 
    859 #define DEFAULT_ADDSX0X2_ADD_TEST(adrp_offset, disp) \
    860   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddsX0X2 ## disp) { \
    861     /* ADDS that does not use the result of "ADRP x0, addr" but overwrites that register. */ \
    862     uint32_t adds = kAddsXInsn | (100 << 10) | (2u << 5) | 0u;  /* ADDS x0, x2, #100 */ \
    863     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
    864     TestAdrpInsn2Add(adds, adrp_offset, has_thunk, disp); \
    865   }
    866 
    867 TEST_FOR_OFFSETS(DEFAULT_ADDSX0X2_ADD_TEST, 0x12345678, 0xffffc840)
    868 
    869 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
    870 #define LDRW_PCREL_ADD_TEST(adrp_offset, disp) \
    871   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WPcRel ## disp) { \
    872     TestAdrpLdrPcRelAdd(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u); \
    873   }
    874 
    875 TEST_FOR_OFFSETS(LDRW_PCREL_ADD_TEST, 0x1234, 0x1238)
    876 
    877 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
    878 #define LDRX_PCREL_ADD_TEST(adrp_offset, disp) \
    879   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XPcRel ## disp) { \
    880     bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
    881     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
    882     TestAdrpLdrPcRelAdd(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u); \
    883   }
    884 
    885 TEST_FOR_OFFSETS(LDRX_PCREL_ADD_TEST, 0x1234, 0x1238)
    886 
    887 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
    888 #define LDRW_SPREL_ADD_TEST(adrp_offset, disp) \
    889   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WSpRel ## disp) { \
    890     TestAdrpLdrSpRelAdd(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u); \
    891   }
    892 
    893 TEST_FOR_OFFSETS(LDRW_SPREL_ADD_TEST, 0, 4)
    894 
    895 #define LDRX_SPREL_ADD_TEST(adrp_offset, disp) \
    896   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XSpRel ## disp) { \
    897     TestAdrpLdrSpRelAdd(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u); \
    898   }
    899 
    900 TEST_FOR_OFFSETS(LDRX_SPREL_ADD_TEST, 0, 8)
    901 
    902 void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg) {
    903   uint32_t valid_regs[] = {
    904       0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
    905       10, 11, 12, 13, 14, 15,         18, 19,  // IP0 and IP1 are reserved.
    906       20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
    907       // LR and SP/ZR are reserved.
    908   };
    909   DCHECK_ALIGNED(offset, 4u);
    910   DCHECK_LT(offset, 16 * KB);
    911   constexpr size_t kMethodCodeSize = 8u;
    912   constexpr size_t kLiteralOffset = 0u;
    913   uint32_t method_idx = 0u;
    914   for (uint32_t base_reg : valid_regs) {
    915     for (uint32_t holder_reg : valid_regs) {
    916       uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | ref_reg;
    917       const std::vector<uint8_t> raw_code = RawCode({kCbnzIP1Plus0Insn, ldr});
    918       ASSERT_EQ(kMethodCodeSize, raw_code.size());
    919       ArrayRef<const uint8_t> code(raw_code);
    920       uint32_t encoded_data =
    921           Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg);
    922       const LinkerPatch patches[] = {
    923           LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data),
    924       };
    925       ++method_idx;
    926       AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
    927     }
    928   }
    929   Link();
    930 
    931   // All thunks are at the end.
    932   uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment);
    933   method_idx = 0u;
    934   for (uint32_t base_reg : valid_regs) {
    935     for (uint32_t holder_reg : valid_regs) {
    936       ++method_idx;
    937       uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset);
    938       uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
    939       uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | ref_reg;
    940       const std::vector<uint8_t> expected_code = RawCode({cbnz, ldr});
    941       ASSERT_EQ(kMethodCodeSize, expected_code.size());
    942       ASSERT_TRUE(
    943           CheckLinkedMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(expected_code)));
    944 
    945       std::vector<uint8_t> expected_thunk = CompileBakerOffsetThunk(base_reg, holder_reg);
    946       ASSERT_GT(output_.size(), thunk_offset);
    947       ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size());
    948       ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset,
    949                                              expected_thunk.size());
    950       if (ArrayRef<const uint8_t>(expected_thunk) != compiled_thunk) {
    951         DumpDiff(ArrayRef<const uint8_t>(expected_thunk), compiled_thunk);
    952         ASSERT_TRUE(false);
    953       }
    954 
    955       size_t gray_check_offset = thunk_offset;
    956       if (holder_reg == base_reg) {
    957         // Verify that the null-check CBZ uses the correct register, i.e. holder_reg.
    958         ASSERT_GE(output_.size() - gray_check_offset, 4u);
    959         ASSERT_EQ(0x34000000u | holder_reg, GetOutputInsn(thunk_offset) & 0xff00001fu);
    960         gray_check_offset +=4u;
    961       }
    962       // Verify that the lock word for gray bit check is loaded from the holder address.
    963       static constexpr size_t kGrayCheckInsns = 5;
    964       ASSERT_GE(output_.size() - gray_check_offset, 4u * kGrayCheckInsns);
    965       const uint32_t load_lock_word =
    966           kLdrWInsn |
    967           (mirror::Object::MonitorOffset().Uint32Value() << (10 - 2)) |
    968           (holder_reg << 5) |
    969           /* ip0 */ 16;
    970       EXPECT_EQ(load_lock_word, GetOutputInsn(gray_check_offset));
    971       // Verify the gray bit check.
    972       const uint32_t check_gray_bit_without_offset =
    973           0x37000000u | (LockWord::kReadBarrierStateShift << 19) | /* ip0 */ 16;
    974       EXPECT_EQ(check_gray_bit_without_offset, GetOutputInsn(gray_check_offset + 4u) & 0xfff8001fu);
    975       // Verify the fake dependency.
    976       const uint32_t fake_dependency =
    977           0x8b408000u |             // ADD Xd, Xn, Xm, LSR 32
    978           (/* ip0 */ 16 << 16) |    // Xm = ip0
    979           (base_reg << 5) |         // Xn = base_reg
    980           base_reg;                 // Xd = base_reg
    981       EXPECT_EQ(fake_dependency, GetOutputInsn(gray_check_offset + 12u));
    982       // Do not check the rest of the implementation.
    983 
    984       // The next thunk follows on the next aligned offset.
    985       thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment);
    986     }
    987   }
    988 }
    989 
    990 #define TEST_BAKER_FIELD(offset, ref_reg)     \
    991   TEST_F(Arm64RelativePatcherTestDefault,     \
    992     BakerOffset##offset##_##ref_reg) {        \
    993     TestBakerField(offset, ref_reg);          \
    994   }
    995 
    996 TEST_BAKER_FIELD(/* offset */ 0, /* ref_reg */ 0)
    997 TEST_BAKER_FIELD(/* offset */ 8, /* ref_reg */ 15)
    998 TEST_BAKER_FIELD(/* offset */ 0x3ffc, /* ref_reg */ 29)
    999 
   1000 TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) {
   1001   // One thunk in the middle with maximum distance branches to it from both sides.
   1002   // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`.
   1003   constexpr uint32_t kLiteralOffset1 = 4;
   1004   const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
   1005   ArrayRef<const uint8_t> code1(raw_code1);
   1006   uint32_t encoded_data =
   1007       Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
   1008   const LinkerPatch patches1[] = {
   1009       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
   1010   };
   1011   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(patches1));
   1012 
   1013   // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4
   1014   // allows the branch to reach that thunk.
   1015   size_t filler1_size =
   1016       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment);
   1017   std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u);
   1018   ArrayRef<const uint8_t> filler1_code(raw_filler1_code);
   1019   AddCompiledMethod(MethodRef(2u), filler1_code);
   1020 
   1021   // Enforce thunk reservation with a tiny method.
   1022   AddCompiledMethod(MethodRef(3u), kNopCode);
   1023 
   1024   // Allow reaching the thunk from the very beginning of a method 1MiB away. Backward branch
   1025   // reaches the full 1MiB. Things to subtract:
   1026   //   - thunk size and method 3 pre-header, rounded up (padding in between if needed)
   1027   //   - method 3 code and method 4 pre-header, rounded up (padding in between if needed)
   1028   //   - method 4 header (let there be no padding between method 4 code and method 5 pre-header).
   1029   size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0).size();
   1030   size_t filler2_size =
   1031       1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64Alignment)
   1032              - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64Alignment)
   1033              - sizeof(OatQuickMethodHeader);
   1034   std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 4u);
   1035   ArrayRef<const uint8_t> filler2_code(raw_filler2_code);
   1036   AddCompiledMethod(MethodRef(4u), filler2_code);
   1037 
   1038   constexpr uint32_t kLiteralOffset2 = 0;
   1039   const std::vector<uint8_t> raw_code2 = RawCode({kCbnzIP1Plus0Insn, kLdrWInsn});
   1040   ArrayRef<const uint8_t> code2(raw_code2);
   1041   const LinkerPatch patches2[] = {
   1042       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset2, encoded_data),
   1043   };
   1044   AddCompiledMethod(MethodRef(5u), code2, ArrayRef<const LinkerPatch>(patches2));
   1045 
   1046   Link();
   1047 
   1048   uint32_t first_method_offset = GetMethodOffset(1u);
   1049   uint32_t last_method_offset = GetMethodOffset(5u);
   1050   EXPECT_EQ(2 * MB, last_method_offset - first_method_offset);
   1051 
   1052   const uint32_t cbnz_max_forward = kCbnzIP1Plus0Insn | 0x007fffe0;
   1053   const uint32_t cbnz_max_backward = kCbnzIP1Plus0Insn | 0x00800000;
   1054   const std::vector<uint8_t> expected_code1 = RawCode({kNopInsn, cbnz_max_forward, kLdrWInsn});
   1055   const std::vector<uint8_t> expected_code2 = RawCode({cbnz_max_backward, kLdrWInsn});
   1056   ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1)));
   1057   ASSERT_TRUE(CheckLinkedMethod(MethodRef(5), ArrayRef<const uint8_t>(expected_code2)));
   1058 }
   1059 
   1060 TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkBeforeFiller) {
   1061   // Based on the first part of BakerOffsetThunkInTheMiddle but the CBNZ is one instruction
   1062   // earlier, so the thunk is emitted before the filler.
   1063   // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`.
   1064   constexpr uint32_t kLiteralOffset1 = 0;
   1065   const std::vector<uint8_t> raw_code1 = RawCode({kCbnzIP1Plus0Insn, kLdrWInsn, kNopInsn});
   1066   ArrayRef<const uint8_t> code1(raw_code1);
   1067   uint32_t encoded_data =
   1068       Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
   1069   const LinkerPatch patches1[] = {
   1070       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
   1071   };
   1072   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(patches1));
   1073 
   1074   // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4
   1075   // allows the branch to reach that thunk.
   1076   size_t filler1_size =
   1077       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment);
   1078   std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u);
   1079   ArrayRef<const uint8_t> filler1_code(raw_filler1_code);
   1080   AddCompiledMethod(MethodRef(2u), filler1_code);
   1081 
   1082   Link();
   1083 
   1084   const uint32_t cbnz_offset = RoundUp(raw_code1.size(), kArm64Alignment) - kLiteralOffset1;
   1085   const uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
   1086   const std::vector<uint8_t> expected_code1 = RawCode({cbnz, kLdrWInsn, kNopInsn});
   1087   ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1)));
   1088 }
   1089 
   1090 TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFromLast) {
   1091   // Based on the BakerOffsetThunkInTheMiddle but the CBNZ in the last method is preceded
   1092   // by NOP and cannot reach the thunk in the middle, so we emit an extra thunk at the end.
   1093   // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`.
   1094   constexpr uint32_t kLiteralOffset1 = 4;
   1095   const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
   1096   ArrayRef<const uint8_t> code1(raw_code1);
   1097   uint32_t encoded_data =
   1098       Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
   1099   const LinkerPatch patches1[] = {
   1100       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
   1101   };
   1102   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(patches1));
   1103 
   1104   // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4
   1105   // allows the branch to reach that thunk.
   1106   size_t filler1_size =
   1107       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment);
   1108   std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u);
   1109   ArrayRef<const uint8_t> filler1_code(raw_filler1_code);
   1110   AddCompiledMethod(MethodRef(2u), filler1_code);
   1111 
   1112   // Enforce thunk reservation with a tiny method.
   1113   AddCompiledMethod(MethodRef(3u), kNopCode);
   1114 
   1115   // If not for the extra NOP, this would allow reaching the thunk from the very beginning
   1116   // of a method 1MiB away. Backward branch reaches the full 1MiB. Things to subtract:
   1117   //   - thunk size and method 3 pre-header, rounded up (padding in between if needed)
   1118   //   - method 3 code and method 4 pre-header, rounded up (padding in between if needed)
   1119   //   - method 4 header (let there be no padding between method 4 code and method 5 pre-header).
   1120   size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0).size();
   1121   size_t filler2_size =
   1122       1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64Alignment)
   1123              - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64Alignment)
   1124              - sizeof(OatQuickMethodHeader);
   1125   std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 4u);
   1126   ArrayRef<const uint8_t> filler2_code(raw_filler2_code);
   1127   AddCompiledMethod(MethodRef(4u), filler2_code);
   1128 
   1129   // Extra NOP compared to BakerOffsetThunkInTheMiddle.
   1130   constexpr uint32_t kLiteralOffset2 = 4;
   1131   const std::vector<uint8_t> raw_code2 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
   1132   ArrayRef<const uint8_t> code2(raw_code2);
   1133   const LinkerPatch patches2[] = {
   1134       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset2, encoded_data),
   1135   };
   1136   AddCompiledMethod(MethodRef(5u), code2, ArrayRef<const LinkerPatch>(patches2));
   1137 
   1138   Link();
   1139 
   1140   const uint32_t cbnz_max_forward = kCbnzIP1Plus0Insn | 0x007fffe0;
   1141   const uint32_t cbnz_last_offset = RoundUp(raw_code2.size(), kArm64Alignment) - kLiteralOffset2;
   1142   const uint32_t cbnz_last = kCbnzIP1Plus0Insn | (cbnz_last_offset << (5 - 2));
   1143   const std::vector<uint8_t> expected_code1 = RawCode({kNopInsn, cbnz_max_forward, kLdrWInsn});
   1144   const std::vector<uint8_t> expected_code2 = RawCode({kNopInsn, cbnz_last, kLdrWInsn});
   1145   ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1)));
   1146   ASSERT_TRUE(CheckLinkedMethod(MethodRef(5), ArrayRef<const uint8_t>(expected_code2)));
   1147 }
   1148 
   1149 TEST_F(Arm64RelativePatcherTestDefault, BakerArray) {
   1150   uint32_t valid_regs[] = {
   1151       0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
   1152       10, 11, 12, 13, 14, 15,         18, 19,  // IP0 and IP1 are reserved.
   1153       20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
   1154       // LR and SP/ZR are reserved.
   1155   };
   1156   auto ldr = [](uint32_t base_reg) {
   1157     uint32_t index_reg = (base_reg == 0u) ? 1u : 0u;
   1158     uint32_t ref_reg = (base_reg == 2) ? 3u : 2u;
   1159     return kLdrWLsl2Insn | (index_reg << 16) | (base_reg << 5) | ref_reg;
   1160   };
   1161   constexpr size_t kMethodCodeSize = 8u;
   1162   constexpr size_t kLiteralOffset = 0u;
   1163   uint32_t method_idx = 0u;
   1164   for (uint32_t base_reg : valid_regs) {
   1165     ++method_idx;
   1166     const std::vector<uint8_t> raw_code = RawCode({kCbnzIP1Plus0Insn, ldr(base_reg)});
   1167     ASSERT_EQ(kMethodCodeSize, raw_code.size());
   1168     ArrayRef<const uint8_t> code(raw_code);
   1169     const LinkerPatch patches[] = {
   1170         LinkerPatch::BakerReadBarrierBranchPatch(
   1171             kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)),
   1172     };
   1173     AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
   1174   }
   1175   Link();
   1176 
   1177   // All thunks are at the end.
   1178   uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment);
   1179   method_idx = 0u;
   1180   for (uint32_t base_reg : valid_regs) {
   1181     ++method_idx;
   1182     uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset);
   1183     uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
   1184     const std::vector<uint8_t> expected_code = RawCode({cbnz, ldr(base_reg)});
   1185     ASSERT_EQ(kMethodCodeSize, expected_code.size());
   1186     EXPECT_TRUE(CheckLinkedMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(expected_code)));
   1187 
   1188     std::vector<uint8_t> expected_thunk = CompileBakerArrayThunk(base_reg);
   1189     ASSERT_GT(output_.size(), thunk_offset);
   1190     ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size());
   1191     ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset,
   1192                                            expected_thunk.size());
   1193     if (ArrayRef<const uint8_t>(expected_thunk) != compiled_thunk) {
   1194       DumpDiff(ArrayRef<const uint8_t>(expected_thunk), compiled_thunk);
   1195       ASSERT_TRUE(false);
   1196     }
   1197 
   1198     // Verify that the lock word for gray bit check is loaded from the correct address
   1199     // before the base_reg which points to the array data.
   1200     static constexpr size_t kGrayCheckInsns = 5;
   1201     ASSERT_GE(output_.size() - thunk_offset, 4u * kGrayCheckInsns);
   1202     int32_t data_offset =
   1203         mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
   1204     int32_t offset = mirror::Object::MonitorOffset().Int32Value() - data_offset;
   1205     ASSERT_LT(offset, 0);
   1206     const uint32_t load_lock_word =
   1207         kLdurWInsn |
   1208         ((offset & 0x1ffu) << 12) |
   1209         (base_reg << 5) |
   1210         /* ip0 */ 16;
   1211     EXPECT_EQ(load_lock_word, GetOutputInsn(thunk_offset));
   1212     // Verify the gray bit check.
   1213     const uint32_t check_gray_bit_without_offset =
   1214         0x37000000u | (LockWord::kReadBarrierStateShift << 19) | /* ip0 */ 16;
   1215     EXPECT_EQ(check_gray_bit_without_offset, GetOutputInsn(thunk_offset + 4u) & 0xfff8001fu);
   1216     // Verify the fake dependency.
   1217     const uint32_t fake_dependency =
   1218         0x8b408000u |             // ADD Xd, Xn, Xm, LSR 32
   1219         (/* ip0 */ 16 << 16) |    // Xm = ip0
   1220         (base_reg << 5) |         // Xn = base_reg
   1221         base_reg;                 // Xd = base_reg
   1222     EXPECT_EQ(fake_dependency, GetOutputInsn(thunk_offset + 12u));
   1223     // Do not check the rest of the implementation.
   1224 
   1225     // The next thunk follows on the next aligned offset.
   1226     thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment);
   1227   }
   1228 }
   1229 
   1230 TEST_F(Arm64RelativePatcherTestDefault, BakerGcRoot) {
   1231   uint32_t valid_regs[] = {
   1232       0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
   1233       10, 11, 12, 13, 14, 15,         18, 19,  // IP0 and IP1 are reserved.
   1234       20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
   1235       // LR and SP/ZR are reserved.
   1236   };
   1237   constexpr size_t kMethodCodeSize = 8u;
   1238   constexpr size_t kLiteralOffset = 4u;
   1239   uint32_t method_idx = 0u;
   1240   for (uint32_t root_reg : valid_regs) {
   1241     ++method_idx;
   1242     uint32_t ldr = kLdrWInsn | (/* offset */ 8 << (10 - 2)) | (/* base_reg */ 0 << 5) | root_reg;
   1243     const std::vector<uint8_t> raw_code = RawCode({ldr, kCbnzIP1Plus0Insn});
   1244     ASSERT_EQ(kMethodCodeSize, raw_code.size());
   1245     ArrayRef<const uint8_t> code(raw_code);
   1246     const LinkerPatch patches[] = {
   1247         LinkerPatch::BakerReadBarrierBranchPatch(
   1248             kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg)),
   1249     };
   1250     AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
   1251   }
   1252   Link();
   1253 
   1254   // All thunks are at the end.
   1255   uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment);
   1256   method_idx = 0u;
   1257   for (uint32_t root_reg : valid_regs) {
   1258     ++method_idx;
   1259     uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset);
   1260     uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
   1261     uint32_t ldr = kLdrWInsn | (/* offset */ 8 << (10 - 2)) | (/* base_reg */ 0 << 5) | root_reg;
   1262     const std::vector<uint8_t> expected_code = RawCode({ldr, cbnz});
   1263     ASSERT_EQ(kMethodCodeSize, expected_code.size());
   1264     EXPECT_TRUE(CheckLinkedMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(expected_code)));
   1265 
   1266     std::vector<uint8_t> expected_thunk = CompileBakerGcRootThunk(root_reg);
   1267     ASSERT_GT(output_.size(), thunk_offset);
   1268     ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size());
   1269     ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset,
   1270                                            expected_thunk.size());
   1271     if (ArrayRef<const uint8_t>(expected_thunk) != compiled_thunk) {
   1272       DumpDiff(ArrayRef<const uint8_t>(expected_thunk), compiled_thunk);
   1273       ASSERT_TRUE(false);
   1274     }
   1275 
   1276     // Verify that the fast-path null-check CBZ uses the correct register, i.e. root_reg.
   1277     ASSERT_GE(output_.size() - thunk_offset, 4u);
   1278     ASSERT_EQ(0x34000000u | root_reg, GetOutputInsn(thunk_offset) & 0xff00001fu);
   1279     // Do not check the rest of the implementation.
   1280 
   1281     // The next thunk follows on the next aligned offset.
   1282     thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment);
   1283   }
   1284 }
   1285 
   1286 TEST_F(Arm64RelativePatcherTestDefault, BakerAndMethodCallInteraction) {
   1287   // During development, there was a `DCHECK_LE(MaxNextOffset(), next_thunk.MaxNextOffset());`
   1288   // in `ArmBaseRelativePatcher::ThunkData::MakeSpaceBefore()` which does not necessarily
   1289   // hold when we're reserving thunks of different sizes. This test exposes the situation
   1290   // by using Baker thunks and a method call thunk.
   1291 
   1292   // Add a method call patch that can reach to method 1 offset + 128MiB.
   1293   uint32_t method_idx = 0u;
   1294   constexpr size_t kMethodCallLiteralOffset = 4u;
   1295   constexpr uint32_t kMissingMethodIdx = 2u;
   1296   const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kBlPlus0});
   1297   const LinkerPatch method1_patches[] = {
   1298       LinkerPatch::RelativeCodePatch(kMethodCallLiteralOffset, nullptr, 2u),
   1299   };
   1300   ArrayRef<const uint8_t> code1(raw_code1);
   1301   ++method_idx;
   1302   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(method1_patches));
   1303 
   1304   // Skip kMissingMethodIdx.
   1305   ++method_idx;
   1306   ASSERT_EQ(kMissingMethodIdx, method_idx);
   1307   // Add a method with the right size that the method code for the next one starts 1MiB
   1308   // after code for method 1.
   1309   size_t filler_size =
   1310       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment)
   1311              - sizeof(OatQuickMethodHeader);
   1312   std::vector<uint8_t> filler_code = GenNops(filler_size / 4u);
   1313   ++method_idx;
   1314   AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(filler_code));
   1315   // Add 126 methods with 1MiB code+header, making the code for the next method start 1MiB
   1316   // before the currently scheduled MaxNextOffset() for the method call thunk.
   1317   for (uint32_t i = 0; i != 126; ++i) {
   1318     filler_size = 1 * MB - sizeof(OatQuickMethodHeader);
   1319     filler_code = GenNops(filler_size / 4u);
   1320     ++method_idx;
   1321     AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(filler_code));
   1322   }
   1323 
   1324   // Add 2 Baker GC root patches to the last method, one that would allow the thunk at
   1325   // 1MiB + kArm64Alignment, i.e. kArm64Alignment after the method call thunk, and the
   1326   // second that needs it kArm64Alignment after that. Given the size of the GC root thunk
   1327   // is more than the space required by the method call thunk plus kArm64Alignment,
   1328   // this pushes the first GC root thunk's pending MaxNextOffset() before the method call
   1329   // thunk's pending MaxNextOffset() which needs to be adjusted.
   1330   ASSERT_LT(RoundUp(CompileMethodCallThunk().size(), kArm64Alignment) + kArm64Alignment,
   1331             CompileBakerGcRootThunk(/* root_reg */ 0).size());
   1332   static_assert(kArm64Alignment == 16, "Code below assumes kArm64Alignment == 16");
   1333   constexpr size_t kBakerLiteralOffset1 = 4u + kArm64Alignment;
   1334   constexpr size_t kBakerLiteralOffset2 = 4u + 2 * kArm64Alignment;
   1335   // Use offset = 0, base_reg = 0, the LDR is simply `kLdrWInsn | root_reg`.
   1336   const uint32_t ldr1 = kLdrWInsn | /* root_reg */ 1;
   1337   const uint32_t ldr2 = kLdrWInsn | /* root_reg */ 2;
   1338   const std::vector<uint8_t> last_method_raw_code = RawCode({
   1339       kNopInsn, kNopInsn, kNopInsn, kNopInsn,   // Padding before first GC root read barrier.
   1340       ldr1, kCbnzIP1Plus0Insn,                  // First GC root LDR with read barrier.
   1341       kNopInsn, kNopInsn,                       // Padding before second GC root read barrier.
   1342       ldr2, kCbnzIP1Plus0Insn,                  // Second GC root LDR with read barrier.
   1343   });
   1344   uint32_t encoded_data1 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 1);
   1345   uint32_t encoded_data2 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 2);
   1346   const LinkerPatch last_method_patches[] = {
   1347       LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset1, encoded_data1),
   1348       LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset2, encoded_data2),
   1349   };
   1350   ++method_idx;
   1351   AddCompiledMethod(MethodRef(method_idx),
   1352                     ArrayRef<const uint8_t>(last_method_raw_code),
   1353                     ArrayRef<const LinkerPatch>(last_method_patches));
   1354 
   1355   // The main purpose of the test is to check that Link() does not cause a crash.
   1356   Link();
   1357 
   1358   ASSERT_EQ(127 * MB, GetMethodOffset(method_idx) - GetMethodOffset(1u));
   1359 }
   1360 
   1361 }  // namespace linker
   1362 }  // namespace art
   1363