Home | History | Annotate | Download | only in arm
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "linker/arm/relative_patcher_thumb2.h"
     18 
     19 #include "arch/arm/asm_support_arm.h"
     20 #include "art_method.h"
     21 #include "base/bit_utils.h"
     22 #include "compiled_method.h"
     23 #include "entrypoints/quick/quick_entrypoints_enum.h"
     24 #include "lock_word.h"
     25 #include "mirror/object.h"
     26 #include "mirror/array-inl.h"
     27 #include "read_barrier.h"
     28 #include "utils/arm/assembler_arm_vixl.h"
     29 
     30 namespace art {
     31 namespace linker {
     32 
     33 // PC displacement from patch location; Thumb2 PC is always at instruction address + 4.
     34 static constexpr int32_t kPcDisplacement = 4;
     35 
     36 // Maximum positive and negative displacement for method call measured from the patch location.
     37 // (Signed 25 bit displacement with the last bit 0 has range [-2^24, 2^24-2] measured from
     38 // the Thumb2 PC pointing right after the BL, i.e. 4 bytes later than the patch location.)
     39 constexpr uint32_t kMaxMethodCallPositiveDisplacement = (1u << 24) - 2 + kPcDisplacement;
     40 constexpr uint32_t kMaxMethodCallNegativeDisplacement = (1u << 24) - kPcDisplacement;
     41 
     42 // Maximum positive and negative displacement for a conditional branch measured from the patch
     43 // location. (Signed 21 bit displacement with the last bit 0 has range [-2^20, 2^20-2] measured
     44 // from the Thumb2 PC pointing right after the B.cond, i.e. 4 bytes later than the patch location.)
     45 constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 2u + kPcDisplacement;
     46 constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement;
     47 
     48 Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider)
     49     : ArmBaseRelativePatcher(provider, kThumb2) {
     50 }
     51 
     52 void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code,
     53                                       uint32_t literal_offset,
     54                                       uint32_t patch_offset,
     55                                       uint32_t target_offset) {
     56   DCHECK_LE(literal_offset + 4u, code->size());
     57   DCHECK_EQ(literal_offset & 1u, 0u);
     58   DCHECK_EQ(patch_offset & 1u, 0u);
     59   DCHECK_EQ(target_offset & 1u, 1u);  // Thumb2 mode bit.
     60   uint32_t displacement = CalculateMethodCallDisplacement(patch_offset, target_offset & ~1u);
     61   displacement -= kPcDisplacement;  // The base PC is at the end of the 4-byte patch.
     62   DCHECK_EQ(displacement & 1u, 0u);
     63   DCHECK((displacement >> 24) == 0u || (displacement >> 24) == 255u);  // 25-bit signed.
     64   uint32_t signbit = (displacement >> 31) & 0x1;
     65   uint32_t i1 = (displacement >> 23) & 0x1;
     66   uint32_t i2 = (displacement >> 22) & 0x1;
     67   uint32_t imm10 = (displacement >> 12) & 0x03ff;
     68   uint32_t imm11 = (displacement >> 1) & 0x07ff;
     69   uint32_t j1 = i1 ^ (signbit ^ 1);
     70   uint32_t j2 = i2 ^ (signbit ^ 1);
     71   uint32_t value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) | imm11;
     72   value |= 0xf000d000;  // BL
     73 
     74   // Check that we're just overwriting an existing BL.
     75   DCHECK_EQ(GetInsn32(code, literal_offset) & 0xf800d000, 0xf000d000);
     76   // Write the new BL.
     77   SetInsn32(code, literal_offset, value);
     78 }
     79 
     80 void Thumb2RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
     81                                                      const LinkerPatch& patch,
     82                                                      uint32_t patch_offset,
     83                                                      uint32_t target_offset) {
     84   uint32_t literal_offset = patch.LiteralOffset();
     85   uint32_t pc_literal_offset = patch.PcInsnOffset();
     86   uint32_t pc_base = patch_offset + (pc_literal_offset - literal_offset) + 4u /* PC adjustment */;
     87   uint32_t diff = target_offset - pc_base;
     88 
     89   uint32_t insn = GetInsn32(code, literal_offset);
     90   DCHECK_EQ(insn & 0xff7ff0ffu, 0xf2400000u);  // MOVW/MOVT, unpatched (imm16 == 0).
     91   uint32_t diff16 = ((insn & 0x00800000u) != 0u) ? (diff >> 16) : (diff & 0xffffu);
     92   uint32_t imm4 = (diff16 >> 12) & 0xfu;
     93   uint32_t imm = (diff16 >> 11) & 0x1u;
     94   uint32_t imm3 = (diff16 >> 8) & 0x7u;
     95   uint32_t imm8 = diff16 & 0xffu;
     96   insn = (insn & 0xfbf08f00u) | (imm << 26) | (imm4 << 16) | (imm3 << 12) | imm8;
     97   SetInsn32(code, literal_offset, insn);
     98 }
     99 
    100 void Thumb2RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
    101                                                         const LinkerPatch& patch,
    102                                                         uint32_t patch_offset) {
    103   DCHECK_ALIGNED(patch_offset, 2u);
    104   uint32_t literal_offset = patch.LiteralOffset();
    105   DCHECK_ALIGNED(literal_offset, 2u);
    106   DCHECK_LT(literal_offset, code->size());
    107   uint32_t insn = GetInsn32(code, literal_offset);
    108   DCHECK_EQ(insn, 0xf0408000);  // BNE +0 (unpatched)
    109   ThunkKey key = GetBakerThunkKey(patch);
    110   if (kIsDebugBuild) {
    111     const uint32_t encoded_data = key.GetCustomValue1();
    112     BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
    113     // Check that the next instruction matches the expected LDR.
    114     switch (kind) {
    115       case BakerReadBarrierKind::kField: {
    116         BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
    117         if (width == BakerReadBarrierWidth::kWide) {
    118           DCHECK_GE(code->size() - literal_offset, 8u);
    119           uint32_t next_insn = GetInsn32(code, literal_offset + 4u);
    120           // LDR (immediate), encoding T3, with correct base_reg.
    121           CheckValidReg((next_insn >> 12) & 0xfu);  // Check destination register.
    122           const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
    123           CHECK_EQ(next_insn & 0xffff0000u, 0xf8d00000u | (base_reg << 16));
    124         } else {
    125           DCHECK_GE(code->size() - literal_offset, 6u);
    126           uint32_t next_insn = GetInsn16(code, literal_offset + 4u);
    127           // LDR (immediate), encoding T1, with correct base_reg.
    128           CheckValidReg(next_insn & 0x7u);  // Check destination register.
    129           const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
    130           CHECK_EQ(next_insn & 0xf838u, 0x6800u | (base_reg << 3));
    131         }
    132         break;
    133       }
    134       case BakerReadBarrierKind::kArray: {
    135         DCHECK_GE(code->size() - literal_offset, 8u);
    136         uint32_t next_insn = GetInsn32(code, literal_offset + 4u);
    137         // LDR (register) with correct base_reg, S=1 and option=011 (LDR Wt, [Xn, Xm, LSL #2]).
    138         CheckValidReg((next_insn >> 12) & 0xfu);  // Check destination register.
    139         const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
    140         CHECK_EQ(next_insn & 0xffff0ff0u, 0xf8500020u | (base_reg << 16));
    141         CheckValidReg(next_insn & 0xf);  // Check index register
    142         break;
    143       }
    144       case BakerReadBarrierKind::kGcRoot: {
    145         BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
    146         if (width == BakerReadBarrierWidth::kWide) {
    147           DCHECK_GE(literal_offset, 4u);
    148           uint32_t prev_insn = GetInsn32(code, literal_offset - 4u);
    149           // LDR (immediate), encoding T3, with correct root_reg.
    150           const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
    151           CHECK_EQ(prev_insn & 0xfff0f000u, 0xf8d00000u | (root_reg << 12));
    152         } else {
    153           DCHECK_GE(literal_offset, 2u);
    154           uint32_t prev_insn = GetInsn16(code, literal_offset - 2u);
    155           // LDR (immediate), encoding T1, with correct root_reg.
    156           const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
    157           CHECK_EQ(prev_insn & 0xf807u, 0x6800u | root_reg);
    158         }
    159         break;
    160       }
    161       default:
    162         LOG(FATAL) << "Unexpected type: " << static_cast<uint32_t>(key.GetType());
    163         UNREACHABLE();
    164     }
    165   }
    166   uint32_t target_offset = GetThunkTargetOffset(key, patch_offset);
    167   DCHECK_ALIGNED(target_offset, 4u);
    168   uint32_t disp = target_offset - (patch_offset + kPcDisplacement);
    169   DCHECK((disp >> 20) == 0u || (disp >> 20) == 0xfffu);   // 21-bit signed.
    170   insn |= ((disp << (26 - 20)) & 0x04000000u) |           // Shift bit 20 to 26, "S".
    171           ((disp >> (19 - 11)) & 0x00000800u) |           // Shift bit 19 to 13, "J1".
    172           ((disp >> (18 - 13)) & 0x00002000u) |           // Shift bit 18 to 11, "J2".
    173           ((disp << (16 - 12)) & 0x003f0000u) |           // Shift bits 12-17 to 16-25, "imm6".
    174           ((disp >> (1 - 0)) & 0x000007ffu);              // Shift bits 1-12 to 0-11, "imm11".
    175   SetInsn32(code, literal_offset, insn);
    176 }
    177 
    178 #define __ assembler.GetVIXLAssembler()->
    179 
    180 static void EmitGrayCheckAndFastPath(arm::ArmVIXLAssembler& assembler,
    181                                      vixl::aarch32::Register base_reg,
    182                                      vixl::aarch32::MemOperand& lock_word,
    183                                      vixl::aarch32::Label* slow_path,
    184                                      int32_t raw_ldr_offset) {
    185   using namespace vixl::aarch32;  // NOLINT(build/namespaces)
    186   // Load the lock word containing the rb_state.
    187   __ Ldr(ip, lock_word);
    188   // Given the numeric representation, it's enough to check the low bit of the rb_state.
    189   static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
    190   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
    191   __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
    192   __ B(ne, slow_path, /* is_far_target */ false);
    193   __ Add(lr, lr, raw_ldr_offset);
    194   // Introduce a dependency on the lock_word including rb_state,
    195   // to prevent load-load reordering, and without using
    196   // a memory barrier (which would be more expensive).
    197   __ Add(base_reg, base_reg, Operand(ip, LSR, 32));
    198   __ Bx(lr);          // And return back to the function.
    199   // Note: The fake dependency is unnecessary for the slow path.
    200 }
    201 
    202 // Load the read barrier introspection entrypoint in register `entrypoint`
    203 static void LoadReadBarrierMarkIntrospectionEntrypoint(arm::ArmVIXLAssembler& assembler,
    204                                                        vixl::aarch32::Register entrypoint) {
    205   using vixl::aarch32::MemOperand;
    206   using vixl::aarch32::ip;
    207   // Thread Register.
    208   const vixl::aarch32::Register tr = vixl::aarch32::r9;
    209 
    210   // The register where the read barrier introspection entrypoint is loaded
    211   // is fixed: `Thumb2RelativePatcher::kBakerCcEntrypointRegister` (R4).
    212   DCHECK_EQ(entrypoint.GetCode(), Thumb2RelativePatcher::kBakerCcEntrypointRegister);
    213   // entrypoint = Thread::Current()->pReadBarrierMarkReg12, i.e. pReadBarrierMarkIntrospection.
    214   DCHECK_EQ(ip.GetCode(), 12u);
    215   const int32_t entry_point_offset =
    216       Thread::ReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ip.GetCode());
    217   __ Ldr(entrypoint, MemOperand(tr, entry_point_offset));
    218 }
    219 
    220 void Thumb2RelativePatcher::CompileBakerReadBarrierThunk(arm::ArmVIXLAssembler& assembler,
    221                                                          uint32_t encoded_data) {
    222   using namespace vixl::aarch32;  // NOLINT(build/namespaces)
    223   BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
    224   switch (kind) {
    225     case BakerReadBarrierKind::kField: {
    226       // Check if the holder is gray and, if not, add fake dependency to the base register
    227       // and return to the LDR instruction to load the reference. Otherwise, use introspection
    228       // to load the reference and call the entrypoint (in kBakerCcEntrypointRegister)
    229       // that performs further checks on the reference and marks it if needed.
    230       Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
    231       CheckValidReg(base_reg.GetCode());
    232       Register holder_reg(BakerReadBarrierSecondRegField::Decode(encoded_data));
    233       CheckValidReg(holder_reg.GetCode());
    234       BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
    235       UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
    236       temps.Exclude(ip);
    237       // If base_reg differs from holder_reg, the offset was too large and we must have
    238       // emitted an explicit null check before the load. Otherwise, we need to null-check
    239       // the holder as we do not necessarily do that check before going to the thunk.
    240       vixl::aarch32::Label throw_npe;
    241       if (holder_reg.Is(base_reg)) {
    242         __ CompareAndBranchIfZero(holder_reg, &throw_npe, /* is_far_target */ false);
    243       }
    244       vixl::aarch32::Label slow_path;
    245       MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value());
    246       const int32_t raw_ldr_offset = (width == BakerReadBarrierWidth::kWide)
    247           ? BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET
    248           : BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET;
    249       EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset);
    250       __ Bind(&slow_path);
    251       const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 +
    252                                  raw_ldr_offset;
    253       Register ep_reg(kBakerCcEntrypointRegister);
    254       LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
    255       if (width == BakerReadBarrierWidth::kWide) {
    256         MemOperand ldr_half_address(lr, ldr_offset + 2);
    257         __ Ldrh(ip, ldr_half_address);        // Load the LDR immediate half-word with "Rt | imm12".
    258         __ Ubfx(ip, ip, 0, 12);               // Extract the offset imm12.
    259         __ Ldr(ip, MemOperand(base_reg, ip));   // Load the reference.
    260       } else {
    261         MemOperand ldr_address(lr, ldr_offset);
    262         __ Ldrh(ip, ldr_address);             // Load the LDR immediate, encoding T1.
    263         __ Add(ep_reg,                        // Adjust the entrypoint address to the entrypoint
    264                ep_reg,                        // for narrow LDR.
    265                Operand(BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET));
    266         __ Ubfx(ip, ip, 6, 5);                // Extract the imm5, i.e. offset / 4.
    267         __ Ldr(ip, MemOperand(base_reg, ip, LSL, 2));   // Load the reference.
    268       }
    269       // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference.
    270       __ Bx(ep_reg);                          // Jump to the entrypoint.
    271       if (holder_reg.Is(base_reg)) {
    272         // Add null check slow path. The stack map is at the address pointed to by LR.
    273         __ Bind(&throw_npe);
    274         int32_t offset = GetThreadOffset<kArmPointerSize>(kQuickThrowNullPointer).Int32Value();
    275         __ Ldr(ip, MemOperand(/* Thread* */ vixl::aarch32::r9, offset));
    276         __ Bx(ip);
    277       }
    278       break;
    279     }
    280     case BakerReadBarrierKind::kArray: {
    281       Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
    282       CheckValidReg(base_reg.GetCode());
    283       DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
    284       DCHECK(BakerReadBarrierWidth::kWide == BakerReadBarrierWidthField::Decode(encoded_data));
    285       UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
    286       temps.Exclude(ip);
    287       vixl::aarch32::Label slow_path;
    288       int32_t data_offset =
    289           mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
    290       MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset);
    291       DCHECK_LT(lock_word.GetOffsetImmediate(), 0);
    292       const int32_t raw_ldr_offset = BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET;
    293       EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset);
    294       __ Bind(&slow_path);
    295       const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 +
    296                                  raw_ldr_offset;
    297       MemOperand ldr_address(lr, ldr_offset + 2);
    298       __ Ldrb(ip, ldr_address);               // Load the LDR (register) byte with "00 | imm2 | Rm",
    299                                               // i.e. Rm+32 because the scale in imm2 is 2.
    300       Register ep_reg(kBakerCcEntrypointRegister);
    301       LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
    302       __ Bfi(ep_reg, ip, 3, 6);               // Insert ip to the entrypoint address to create
    303                                               // a switch case target based on the index register.
    304       __ Mov(ip, base_reg);                   // Move the base register to ip0.
    305       __ Bx(ep_reg);                          // Jump to the entrypoint's array switch case.
    306       break;
    307     }
    308     case BakerReadBarrierKind::kGcRoot: {
    309       // Check if the reference needs to be marked and if so (i.e. not null, not marked yet
    310       // and it does not have a forwarding address), call the correct introspection entrypoint;
    311       // otherwise return the reference (or the extracted forwarding address).
    312       // There is no gray bit check for GC roots.
    313       Register root_reg(BakerReadBarrierFirstRegField::Decode(encoded_data));
    314       CheckValidReg(root_reg.GetCode());
    315       DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
    316       BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data);
    317       UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
    318       temps.Exclude(ip);
    319       vixl::aarch32::Label return_label, not_marked, forwarding_address;
    320       __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
    321       MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
    322       __ Ldr(ip, lock_word);
    323       __ Tst(ip, LockWord::kMarkBitStateMaskShifted);
    324       __ B(eq, &not_marked);
    325       __ Bind(&return_label);
    326       __ Bx(lr);
    327       __ Bind(&not_marked);
    328       static_assert(LockWord::kStateShift == 30 && LockWord::kStateForwardingAddress == 3,
    329                     "To use 'CMP ip, #modified-immediate; BHS', we need the lock word state in "
    330                     " the highest bits and the 'forwarding address' state to have all bits set");
    331       __ Cmp(ip, Operand(0xc0000000));
    332       __ B(hs, &forwarding_address);
    333       Register ep_reg(kBakerCcEntrypointRegister);
    334       LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg);
    335       // Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister
    336       // to art_quick_read_barrier_mark_introspection_gc_roots.
    337       int32_t entrypoint_offset = (width == BakerReadBarrierWidth::kWide)
    338           ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET
    339           : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET;
    340       __ Add(ep_reg, ep_reg, Operand(entrypoint_offset));
    341       __ Mov(ip, root_reg);
    342       __ Bx(ep_reg);
    343       __ Bind(&forwarding_address);
    344       __ Lsl(root_reg, ip, LockWord::kForwardingAddressShift);
    345       __ Bx(lr);
    346       break;
    347     }
    348     default:
    349       LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
    350       UNREACHABLE();
    351   }
    352 }
    353 
    354 std::vector<uint8_t> Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) {
    355   ArenaPool pool;
    356   ArenaAllocator arena(&pool);
    357   arm::ArmVIXLAssembler assembler(&arena);
    358 
    359   switch (key.GetType()) {
    360     case ThunkType::kMethodCall:
    361       // The thunk just uses the entry point in the ArtMethod. This works even for calls
    362       // to the generic JNI and interpreter trampolines.
    363       assembler.LoadFromOffset(
    364           arm::kLoadWord,
    365           vixl::aarch32::pc,
    366           vixl::aarch32::r0,
    367           ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
    368       __ Bkpt(0);
    369       break;
    370     case ThunkType::kBakerReadBarrier:
    371       CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1());
    372       break;
    373   }
    374 
    375   assembler.FinalizeCode();
    376   std::vector<uint8_t> thunk_code(assembler.CodeSize());
    377   MemoryRegion code(thunk_code.data(), thunk_code.size());
    378   assembler.FinalizeInstructions(code);
    379   return thunk_code;
    380 }
    381 
    382 #undef __
    383 
    384 uint32_t Thumb2RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) {
    385   switch (key.GetType()) {
    386     case ThunkType::kMethodCall:
    387       return kMaxMethodCallPositiveDisplacement;
    388     case ThunkType::kBakerReadBarrier:
    389       return kMaxBcondPositiveDisplacement;
    390   }
    391 }
    392 
    393 uint32_t Thumb2RelativePatcher::MaxNegativeDisplacement(const ThunkKey& key) {
    394   switch (key.GetType()) {
    395     case ThunkType::kMethodCall:
    396       return kMaxMethodCallNegativeDisplacement;
    397     case ThunkType::kBakerReadBarrier:
    398       return kMaxBcondNegativeDisplacement;
    399   }
    400 }
    401 
    402 void Thumb2RelativePatcher::SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value) {
    403   DCHECK_LE(offset + 4u, code->size());
    404   DCHECK_ALIGNED(offset, 2u);
    405   uint8_t* addr = &(*code)[offset];
    406   addr[0] = (value >> 16) & 0xff;
    407   addr[1] = (value >> 24) & 0xff;
    408   addr[2] = (value >> 0) & 0xff;
    409   addr[3] = (value >> 8) & 0xff;
    410 }
    411 
    412 uint32_t Thumb2RelativePatcher::GetInsn32(ArrayRef<const uint8_t> code, uint32_t offset) {
    413   DCHECK_LE(offset + 4u, code.size());
    414   DCHECK_ALIGNED(offset, 2u);
    415   const uint8_t* addr = &code[offset];
    416   return
    417       (static_cast<uint32_t>(addr[0]) << 16) +
    418       (static_cast<uint32_t>(addr[1]) << 24) +
    419       (static_cast<uint32_t>(addr[2]) << 0)+
    420       (static_cast<uint32_t>(addr[3]) << 8);
    421 }
    422 
    423 template <typename Vector>
    424 uint32_t Thumb2RelativePatcher::GetInsn32(Vector* code, uint32_t offset) {
    425   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
    426   return GetInsn32(ArrayRef<const uint8_t>(*code), offset);
    427 }
    428 
    429 uint32_t Thumb2RelativePatcher::GetInsn16(ArrayRef<const uint8_t> code, uint32_t offset) {
    430   DCHECK_LE(offset + 2u, code.size());
    431   DCHECK_ALIGNED(offset, 2u);
    432   const uint8_t* addr = &code[offset];
    433   return (static_cast<uint32_t>(addr[0]) << 0) + (static_cast<uint32_t>(addr[1]) << 8);
    434 }
    435 
    436 template <typename Vector>
    437 uint32_t Thumb2RelativePatcher::GetInsn16(Vector* code, uint32_t offset) {
    438   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
    439   return GetInsn16(ArrayRef<const uint8_t>(*code), offset);
    440 }
    441 
    442 }  // namespace linker
    443 }  // namespace art
    444