1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "linker/arm64/relative_patcher_arm64.h" 18 19 #include "arch/arm64/asm_support_arm64.h" 20 #include "arch/arm64/instruction_set_features_arm64.h" 21 #include "art_method.h" 22 #include "base/bit_utils.h" 23 #include "compiled_method.h" 24 #include "driver/compiler_driver.h" 25 #include "entrypoints/quick/quick_entrypoints_enum.h" 26 #include "linker/output_stream.h" 27 #include "lock_word.h" 28 #include "mirror/object.h" 29 #include "mirror/array-inl.h" 30 #include "oat.h" 31 #include "oat_quick_method_header.h" 32 #include "read_barrier.h" 33 #include "utils/arm64/assembler_arm64.h" 34 35 namespace art { 36 namespace linker { 37 38 namespace { 39 40 // Maximum positive and negative displacement for method call measured from the patch location. 41 // (Signed 28 bit displacement with the last two bits 0 has range [-2^27, 2^27-4] measured from 42 // the ARM64 PC pointing to the BL.) 43 constexpr uint32_t kMaxMethodCallPositiveDisplacement = (1u << 27) - 4u; 44 constexpr uint32_t kMaxMethodCallNegativeDisplacement = (1u << 27); 45 46 // Maximum positive and negative displacement for a conditional branch measured from the patch 47 // location. (Signed 21 bit displacement with the last two bits 0 has range [-2^20, 2^20-4] 48 // measured from the ARM64 PC pointing to the B.cond.) 49 constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 4u; 50 constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20); 51 52 // The ADRP thunk for erratum 843419 is 2 instructions, i.e. 8 bytes. 53 constexpr uint32_t kAdrpThunkSize = 8u; 54 55 inline bool IsAdrpPatch(const LinkerPatch& patch) { 56 switch (patch.GetType()) { 57 case LinkerPatch::Type::kCall: 58 case LinkerPatch::Type::kCallRelative: 59 case LinkerPatch::Type::kBakerReadBarrierBranch: 60 return false; 61 case LinkerPatch::Type::kMethodRelative: 62 case LinkerPatch::Type::kMethodBssEntry: 63 case LinkerPatch::Type::kTypeRelative: 64 case LinkerPatch::Type::kTypeBssEntry: 65 case LinkerPatch::Type::kStringRelative: 66 case LinkerPatch::Type::kStringBssEntry: 67 return patch.LiteralOffset() == patch.PcInsnOffset(); 68 } 69 } 70 71 inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) { 72 if (num_adrp == 0u) { 73 return 0u; 74 } 75 uint32_t alignment_bytes = CompiledMethod::AlignCode(code_size, kArm64) - code_size; 76 return kAdrpThunkSize * num_adrp + alignment_bytes; 77 } 78 79 } // anonymous namespace 80 81 Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider, 82 const Arm64InstructionSetFeatures* features) 83 : ArmBaseRelativePatcher(provider, kArm64), 84 fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()), 85 reserved_adrp_thunks_(0u), 86 processed_adrp_thunks_(0u) { 87 if (fix_cortex_a53_843419_) { 88 adrp_thunk_locations_.reserve(16u); 89 current_method_thunks_.reserve(16u * kAdrpThunkSize); 90 } 91 } 92 93 uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset, 94 const CompiledMethod* compiled_method, 95 MethodReference method_ref) { 96 if (!fix_cortex_a53_843419_) { 97 DCHECK(adrp_thunk_locations_.empty()); 98 return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u); 99 } 100 101 // Add thunks for previous method if any. 102 if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { 103 size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; 104 offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; 105 reserved_adrp_thunks_ = adrp_thunk_locations_.size(); 106 } 107 108 // Count the number of ADRP insns as the upper bound on the number of thunks needed 109 // and use it to reserve space for other linker patches. 110 size_t num_adrp = 0u; 111 DCHECK(compiled_method != nullptr); 112 for (const LinkerPatch& patch : compiled_method->GetPatches()) { 113 if (IsAdrpPatch(patch)) { 114 ++num_adrp; 115 } 116 } 117 ArrayRef<const uint8_t> code = compiled_method->GetQuickCode(); 118 uint32_t max_extra_space = MaxExtraSpace(num_adrp, code.size()); 119 offset = ReserveSpaceInternal(offset, compiled_method, method_ref, max_extra_space); 120 if (num_adrp == 0u) { 121 return offset; 122 } 123 124 // Now that we have the actual offset where the code will be placed, locate the ADRP insns 125 // that actually require the thunk. 126 uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader)); 127 uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size()); 128 DCHECK(compiled_method != nullptr); 129 for (const LinkerPatch& patch : compiled_method->GetPatches()) { 130 if (IsAdrpPatch(patch)) { 131 uint32_t patch_offset = quick_code_offset + patch.LiteralOffset(); 132 if (NeedsErratum843419Thunk(code, patch.LiteralOffset(), patch_offset)) { 133 adrp_thunk_locations_.emplace_back(patch_offset, thunk_offset); 134 thunk_offset += kAdrpThunkSize; 135 } 136 } 137 } 138 return offset; 139 } 140 141 uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) { 142 if (!fix_cortex_a53_843419_) { 143 DCHECK(adrp_thunk_locations_.empty()); 144 } else { 145 // Add thunks for the last method if any. 146 if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { 147 size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; 148 offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; 149 reserved_adrp_thunks_ = adrp_thunk_locations_.size(); 150 } 151 } 152 return ArmBaseRelativePatcher::ReserveSpaceEnd(offset); 153 } 154 155 uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { 156 if (fix_cortex_a53_843419_) { 157 if (!current_method_thunks_.empty()) { 158 uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64); 159 if (kIsDebugBuild) { 160 CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); 161 size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; 162 CHECK_LE(num_thunks, processed_adrp_thunks_); 163 for (size_t i = 0u; i != num_thunks; ++i) { 164 const auto& entry = adrp_thunk_locations_[processed_adrp_thunks_ - num_thunks + i]; 165 CHECK_EQ(entry.second, aligned_offset + i * kAdrpThunkSize); 166 } 167 } 168 uint32_t aligned_code_delta = aligned_offset - offset; 169 if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) { 170 return 0u; 171 } 172 if (!WriteMiscThunk(out, ArrayRef<const uint8_t>(current_method_thunks_))) { 173 return 0u; 174 } 175 offset = aligned_offset + current_method_thunks_.size(); 176 current_method_thunks_.clear(); 177 } 178 } 179 return ArmBaseRelativePatcher::WriteThunks(out, offset); 180 } 181 182 void Arm64RelativePatcher::PatchCall(std::vector<uint8_t>* code, 183 uint32_t literal_offset, 184 uint32_t patch_offset, uint32_t 185 target_offset) { 186 DCHECK_LE(literal_offset + 4u, code->size()); 187 DCHECK_EQ(literal_offset & 3u, 0u); 188 DCHECK_EQ(patch_offset & 3u, 0u); 189 DCHECK_EQ(target_offset & 3u, 0u); 190 uint32_t displacement = CalculateMethodCallDisplacement(patch_offset, target_offset & ~1u); 191 DCHECK_EQ(displacement & 3u, 0u); 192 DCHECK((displacement >> 27) == 0u || (displacement >> 27) == 31u); // 28-bit signed. 193 uint32_t insn = (displacement & 0x0fffffffu) >> 2; 194 insn |= 0x94000000; // BL 195 196 // Check that we're just overwriting an existing BL. 197 DCHECK_EQ(GetInsn(code, literal_offset) & 0xfc000000u, 0x94000000u); 198 // Write the new BL. 199 SetInsn(code, literal_offset, insn); 200 } 201 202 void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code, 203 const LinkerPatch& patch, 204 uint32_t patch_offset, 205 uint32_t target_offset) { 206 DCHECK_EQ(patch_offset & 3u, 0u); 207 DCHECK_EQ(target_offset & 3u, 0u); 208 uint32_t literal_offset = patch.LiteralOffset(); 209 uint32_t insn = GetInsn(code, literal_offset); 210 uint32_t pc_insn_offset = patch.PcInsnOffset(); 211 uint32_t disp = target_offset - ((patch_offset - literal_offset + pc_insn_offset) & ~0xfffu); 212 bool wide = (insn & 0x40000000) != 0; 213 uint32_t shift = wide ? 3u : 2u; 214 if (literal_offset == pc_insn_offset) { 215 // Check it's an ADRP with imm == 0 (unset). 216 DCHECK_EQ((insn & 0xffffffe0u), 0x90000000u) 217 << literal_offset << ", " << pc_insn_offset << ", 0x" << std::hex << insn; 218 if (fix_cortex_a53_843419_ && processed_adrp_thunks_ != adrp_thunk_locations_.size() && 219 adrp_thunk_locations_[processed_adrp_thunks_].first == patch_offset) { 220 DCHECK(NeedsErratum843419Thunk(ArrayRef<const uint8_t>(*code), 221 literal_offset, patch_offset)); 222 uint32_t thunk_offset = adrp_thunk_locations_[processed_adrp_thunks_].second; 223 uint32_t adrp_disp = target_offset - (thunk_offset & ~0xfffu); 224 uint32_t adrp = PatchAdrp(insn, adrp_disp); 225 226 uint32_t out_disp = thunk_offset - patch_offset; 227 DCHECK_EQ(out_disp & 3u, 0u); 228 DCHECK((out_disp >> 27) == 0u || (out_disp >> 27) == 31u); // 28-bit signed. 229 insn = (out_disp & 0x0fffffffu) >> shift; 230 insn |= 0x14000000; // B <thunk> 231 232 uint32_t back_disp = -out_disp; 233 DCHECK_EQ(back_disp & 3u, 0u); 234 DCHECK((back_disp >> 27) == 0u || (back_disp >> 27) == 31u); // 28-bit signed. 235 uint32_t b_back = (back_disp & 0x0fffffffu) >> 2; 236 b_back |= 0x14000000; // B <back> 237 size_t thunks_code_offset = current_method_thunks_.size(); 238 current_method_thunks_.resize(thunks_code_offset + kAdrpThunkSize); 239 SetInsn(¤t_method_thunks_, thunks_code_offset, adrp); 240 SetInsn(¤t_method_thunks_, thunks_code_offset + 4u, b_back); 241 static_assert(kAdrpThunkSize == 2 * 4u, "thunk has 2 instructions"); 242 243 processed_adrp_thunks_ += 1u; 244 } else { 245 insn = PatchAdrp(insn, disp); 246 } 247 // Write the new ADRP (or B to the erratum 843419 thunk). 248 SetInsn(code, literal_offset, insn); 249 } else { 250 if ((insn & 0xfffffc00) == 0x91000000) { 251 // ADD immediate, 64-bit with imm12 == 0 (unset). 252 if (!kEmitCompilerReadBarrier) { 253 DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative || 254 patch.GetType() == LinkerPatch::Type::kTypeRelative || 255 patch.GetType() == LinkerPatch::Type::kStringRelative) << patch.GetType(); 256 } else { 257 // With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry. 258 DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative || 259 patch.GetType() == LinkerPatch::Type::kTypeRelative || 260 patch.GetType() == LinkerPatch::Type::kStringRelative || 261 patch.GetType() == LinkerPatch::Type::kTypeBssEntry || 262 patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType(); 263 } 264 shift = 0u; // No shift for ADD. 265 } else { 266 // LDR/STR 32-bit or 64-bit with imm12 == 0 (unset). 267 DCHECK(patch.GetType() == LinkerPatch::Type::kMethodBssEntry || 268 patch.GetType() == LinkerPatch::Type::kTypeBssEntry || 269 patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType(); 270 DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn; 271 } 272 if (kIsDebugBuild) { 273 uint32_t adrp = GetInsn(code, pc_insn_offset); 274 if ((adrp & 0x9f000000u) != 0x90000000u) { 275 CHECK(fix_cortex_a53_843419_); 276 CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B <thunk> 277 CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); 278 size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; 279 CHECK_LE(num_thunks, processed_adrp_thunks_); 280 uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset; 281 for (size_t i = processed_adrp_thunks_ - num_thunks; ; ++i) { 282 CHECK_NE(i, processed_adrp_thunks_); 283 if (adrp_thunk_locations_[i].first == b_offset) { 284 size_t idx = num_thunks - (processed_adrp_thunks_ - i); 285 adrp = GetInsn(¤t_method_thunks_, idx * kAdrpThunkSize); 286 break; 287 } 288 } 289 } 290 CHECK_EQ(adrp & 0x9f00001fu, // Check that pc_insn_offset points 291 0x90000000 | ((insn >> 5) & 0x1fu)); // to ADRP with matching register. 292 } 293 uint32_t imm12 = (disp & 0xfffu) >> shift; 294 insn = (insn & ~(0xfffu << 10)) | (imm12 << 10); 295 SetInsn(code, literal_offset, insn); 296 } 297 } 298 299 void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code, 300 const LinkerPatch& patch, 301 uint32_t patch_offset) { 302 DCHECK_ALIGNED(patch_offset, 4u); 303 uint32_t literal_offset = patch.LiteralOffset(); 304 DCHECK_ALIGNED(literal_offset, 4u); 305 DCHECK_LT(literal_offset, code->size()); 306 uint32_t insn = GetInsn(code, literal_offset); 307 DCHECK_EQ(insn & 0xffffffe0u, 0xb5000000); // CBNZ Xt, +0 (unpatched) 308 ThunkKey key = GetBakerThunkKey(patch); 309 if (kIsDebugBuild) { 310 const uint32_t encoded_data = key.GetCustomValue1(); 311 BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); 312 // Check that the next instruction matches the expected LDR. 313 switch (kind) { 314 case BakerReadBarrierKind::kField: { 315 DCHECK_GE(code->size() - literal_offset, 8u); 316 uint32_t next_insn = GetInsn(code, literal_offset + 4u); 317 // LDR (immediate) with correct base_reg. 318 CheckValidReg(next_insn & 0x1fu); // Check destination register. 319 const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); 320 CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5)); 321 break; 322 } 323 case BakerReadBarrierKind::kArray: { 324 DCHECK_GE(code->size() - literal_offset, 8u); 325 uint32_t next_insn = GetInsn(code, literal_offset + 4u); 326 // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL), 327 // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2]. 328 CheckValidReg(next_insn & 0x1fu); // Check destination register. 329 const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); 330 CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5)); 331 CheckValidReg((next_insn >> 16) & 0x1f); // Check index register 332 break; 333 } 334 case BakerReadBarrierKind::kGcRoot: { 335 DCHECK_GE(literal_offset, 4u); 336 uint32_t prev_insn = GetInsn(code, literal_offset - 4u); 337 // LDR (immediate) with correct root_reg. 338 const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); 339 CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); 340 break; 341 } 342 default: 343 LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); 344 UNREACHABLE(); 345 } 346 } 347 uint32_t target_offset = GetThunkTargetOffset(key, patch_offset); 348 DCHECK_ALIGNED(target_offset, 4u); 349 uint32_t disp = target_offset - patch_offset; 350 DCHECK((disp >> 20) == 0u || (disp >> 20) == 4095u); // 21-bit signed. 351 insn |= (disp << (5 - 2)) & 0x00ffffe0u; // Shift bits 2-20 to 5-23. 352 SetInsn(code, literal_offset, insn); 353 } 354 355 #define __ assembler.GetVIXLAssembler()-> 356 357 static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler, 358 vixl::aarch64::Register base_reg, 359 vixl::aarch64::MemOperand& lock_word, 360 vixl::aarch64::Label* slow_path) { 361 using namespace vixl::aarch64; // NOLINT(build/namespaces) 362 // Load the lock word containing the rb_state. 363 __ Ldr(ip0.W(), lock_word); 364 // Given the numeric representation, it's enough to check the low bit of the rb_state. 365 static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); 366 static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); 367 __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path); 368 static_assert( 369 BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET, 370 "Field and array LDR offsets must be the same to reuse the same code."); 371 // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning). 372 static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), 373 "Field LDR must be 1 instruction (4B) before the return address label; " 374 " 2 instructions (8B) for heap poisoning."); 375 __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); 376 // Introduce a dependency on the lock_word including rb_state, 377 // to prevent load-load reordering, and without using 378 // a memory barrier (which would be more expensive). 379 __ Add(base_reg, base_reg, Operand(ip0, LSR, 32)); 380 __ Br(lr); // And return back to the function. 381 // Note: The fake dependency is unnecessary for the slow path. 382 } 383 384 // Load the read barrier introspection entrypoint in register `entrypoint`. 385 static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler, 386 vixl::aarch64::Register entrypoint) { 387 using vixl::aarch64::MemOperand; 388 using vixl::aarch64::ip0; 389 // Thread Register. 390 const vixl::aarch64::Register tr = vixl::aarch64::x19; 391 392 // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection. 393 DCHECK_EQ(ip0.GetCode(), 16u); 394 const int32_t entry_point_offset = 395 Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode()); 396 __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); 397 } 398 399 void Arm64RelativePatcher::CompileBakerReadBarrierThunk(arm64::Arm64Assembler& assembler, 400 uint32_t encoded_data) { 401 using namespace vixl::aarch64; // NOLINT(build/namespaces) 402 BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); 403 switch (kind) { 404 case BakerReadBarrierKind::kField: { 405 // Check if the holder is gray and, if not, add fake dependency to the base register 406 // and return to the LDR instruction to load the reference. Otherwise, use introspection 407 // to load the reference and call the entrypoint (in IP1) that performs further checks 408 // on the reference and marks it if needed. 409 auto base_reg = 410 Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); 411 CheckValidReg(base_reg.GetCode()); 412 auto holder_reg = 413 Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data)); 414 CheckValidReg(holder_reg.GetCode()); 415 UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); 416 temps.Exclude(ip0, ip1); 417 // If base_reg differs from holder_reg, the offset was too large and we must have 418 // emitted an explicit null check before the load. Otherwise, we need to null-check 419 // the holder as we do not necessarily do that check before going to the thunk. 420 vixl::aarch64::Label throw_npe; 421 if (holder_reg.Is(base_reg)) { 422 __ Cbz(holder_reg.W(), &throw_npe); 423 } 424 vixl::aarch64::Label slow_path; 425 MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); 426 EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); 427 __ Bind(&slow_path); 428 MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); 429 __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset. 430 LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); 431 __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset. 432 __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference. 433 // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. 434 __ Br(ip1); // Jump to the entrypoint. 435 if (holder_reg.Is(base_reg)) { 436 // Add null check slow path. The stack map is at the address pointed to by LR. 437 __ Bind(&throw_npe); 438 int32_t offset = GetThreadOffset<kArm64PointerSize>(kQuickThrowNullPointer).Int32Value(); 439 __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset)); 440 __ Br(ip0); 441 } 442 break; 443 } 444 case BakerReadBarrierKind::kArray: { 445 auto base_reg = 446 Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); 447 CheckValidReg(base_reg.GetCode()); 448 DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); 449 UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); 450 temps.Exclude(ip0, ip1); 451 vixl::aarch64::Label slow_path; 452 int32_t data_offset = 453 mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); 454 MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); 455 DCHECK_LT(lock_word.GetOffset(), 0); 456 EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); 457 __ Bind(&slow_path); 458 MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); 459 __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset. 460 LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); 461 __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set). 462 __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create 463 // a switch case target based on the index register. 464 __ Mov(ip0, base_reg); // Move the base register to ip0. 465 __ Br(ip1); // Jump to the entrypoint's array switch case. 466 break; 467 } 468 case BakerReadBarrierKind::kGcRoot: { 469 // Check if the reference needs to be marked and if so (i.e. not null, not marked yet 470 // and it does not have a forwarding address), call the correct introspection entrypoint; 471 // otherwise return the reference (or the extracted forwarding address). 472 // There is no gray bit check for GC roots. 473 auto root_reg = 474 Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); 475 CheckValidReg(root_reg.GetCode()); 476 DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); 477 UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); 478 temps.Exclude(ip0, ip1); 479 vixl::aarch64::Label return_label, not_marked, forwarding_address; 480 __ Cbz(root_reg, &return_label); 481 MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value()); 482 __ Ldr(ip0.W(), lock_word); 483 __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, ¬_marked); 484 __ Bind(&return_label); 485 __ Br(lr); 486 __ Bind(¬_marked); 487 __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1)); 488 __ B(&forwarding_address, mi); 489 LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); 490 // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to 491 // art_quick_read_barrier_mark_introspection_gc_roots. 492 __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET)); 493 __ Mov(ip0.W(), root_reg); 494 __ Br(ip1); 495 __ Bind(&forwarding_address); 496 __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift); 497 __ Br(lr); 498 break; 499 } 500 default: 501 LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); 502 UNREACHABLE(); 503 } 504 } 505 506 std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) { 507 ArenaPool pool; 508 ArenaAllocator arena(&pool); 509 arm64::Arm64Assembler assembler(&arena); 510 511 switch (key.GetType()) { 512 case ThunkType::kMethodCall: { 513 // The thunk just uses the entry point in the ArtMethod. This works even for calls 514 // to the generic JNI and interpreter trampolines. 515 Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( 516 kArm64PointerSize).Int32Value()); 517 assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); 518 break; 519 } 520 case ThunkType::kBakerReadBarrier: { 521 CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1()); 522 break; 523 } 524 } 525 526 // Ensure we emit the literal pool. 527 assembler.FinalizeCode(); 528 std::vector<uint8_t> thunk_code(assembler.CodeSize()); 529 MemoryRegion code(thunk_code.data(), thunk_code.size()); 530 assembler.FinalizeInstructions(code); 531 return thunk_code; 532 } 533 534 #undef __ 535 536 uint32_t Arm64RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) { 537 switch (key.GetType()) { 538 case ThunkType::kMethodCall: 539 return kMaxMethodCallPositiveDisplacement; 540 case ThunkType::kBakerReadBarrier: 541 return kMaxBcondPositiveDisplacement; 542 } 543 } 544 545 uint32_t Arm64RelativePatcher::MaxNegativeDisplacement(const ThunkKey& key) { 546 switch (key.GetType()) { 547 case ThunkType::kMethodCall: 548 return kMaxMethodCallNegativeDisplacement; 549 case ThunkType::kBakerReadBarrier: 550 return kMaxBcondNegativeDisplacement; 551 } 552 } 553 554 uint32_t Arm64RelativePatcher::PatchAdrp(uint32_t adrp, uint32_t disp) { 555 return (adrp & 0x9f00001fu) | // Clear offset bits, keep ADRP with destination reg. 556 // Bottom 12 bits are ignored, the next 2 lowest bits are encoded in bits 29-30. 557 ((disp & 0x00003000u) << (29 - 12)) | 558 // The next 16 bits are encoded in bits 5-22. 559 ((disp & 0xffffc000u) >> (12 + 2 - 5)) | 560 // Since the target_offset is based on the beginning of the oat file and the 561 // image space precedes the oat file, the target_offset into image space will 562 // be negative yet passed as uint32_t. Therefore we limit the displacement 563 // to +-2GiB (rather than the maximim +-4GiB) and determine the sign bit from 564 // the highest bit of the displacement. This is encoded in bit 23. 565 ((disp & 0x80000000u) >> (31 - 23)); 566 } 567 568 bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef<const uint8_t> code, 569 uint32_t literal_offset, 570 uint32_t patch_offset) { 571 DCHECK_EQ(patch_offset & 0x3u, 0u); 572 if ((patch_offset & 0xff8) == 0xff8) { // ...ff8 or ...ffc 573 uint32_t adrp = GetInsn(code, literal_offset); 574 DCHECK_EQ(adrp & 0x9f000000, 0x90000000); 575 uint32_t next_offset = patch_offset + 4u; 576 uint32_t next_insn = GetInsn(code, literal_offset + 4u); 577 578 // Below we avoid patching sequences where the adrp is followed by a load which can easily 579 // be proved to be aligned. 580 581 // First check if the next insn is the LDR using the result of the ADRP. 582 // LDR <Wt>, [<Xn>, #pimm], where <Xn> == ADRP destination reg. 583 if ((next_insn & 0xffc00000) == 0xb9400000 && 584 (((next_insn >> 5) ^ adrp) & 0x1f) == 0) { 585 return false; 586 } 587 588 // And since LinkerPatch::Type::k{Method,Type,String}Relative is using the result 589 // of the ADRP for an ADD immediate, check for that as well. We generalize a bit 590 // to include ADD/ADDS/SUB/SUBS immediate that either uses the ADRP destination 591 // or stores the result to a different register. 592 if ((next_insn & 0x1f000000) == 0x11000000 && 593 ((((next_insn >> 5) ^ adrp) & 0x1f) == 0 || ((next_insn ^ adrp) & 0x1f) != 0)) { 594 return false; 595 } 596 597 // LDR <Wt>, <label> is always aligned and thus it doesn't cause boundary crossing. 598 if ((next_insn & 0xff000000) == 0x18000000) { 599 return false; 600 } 601 602 // LDR <Xt>, <label> is aligned iff the pc + displacement is a multiple of 8. 603 if ((next_insn & 0xff000000) == 0x58000000) { 604 bool is_aligned_load = (((next_offset >> 2) ^ (next_insn >> 5)) & 1) == 0; 605 return !is_aligned_load; 606 } 607 608 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned loads, as SP is 609 // guaranteed to be 128-bits aligned and <pimm> is multiple of the load size. 610 if ((next_insn & 0xbfc003e0) == 0xb94003e0) { 611 return false; 612 } 613 return true; 614 } 615 return false; 616 } 617 618 void Arm64RelativePatcher::SetInsn(std::vector<uint8_t>* code, uint32_t offset, uint32_t value) { 619 DCHECK_LE(offset + 4u, code->size()); 620 DCHECK_EQ(offset & 3u, 0u); 621 uint8_t* addr = &(*code)[offset]; 622 addr[0] = (value >> 0) & 0xff; 623 addr[1] = (value >> 8) & 0xff; 624 addr[2] = (value >> 16) & 0xff; 625 addr[3] = (value >> 24) & 0xff; 626 } 627 628 uint32_t Arm64RelativePatcher::GetInsn(ArrayRef<const uint8_t> code, uint32_t offset) { 629 DCHECK_LE(offset + 4u, code.size()); 630 DCHECK_EQ(offset & 3u, 0u); 631 const uint8_t* addr = &code[offset]; 632 return 633 (static_cast<uint32_t>(addr[0]) << 0) + 634 (static_cast<uint32_t>(addr[1]) << 8) + 635 (static_cast<uint32_t>(addr[2]) << 16)+ 636 (static_cast<uint32_t>(addr[3]) << 24); 637 } 638 639 template <typename Alloc> 640 uint32_t Arm64RelativePatcher::GetInsn(std::vector<uint8_t, Alloc>* code, uint32_t offset) { 641 return GetInsn(ArrayRef<const uint8_t>(*code), offset); 642 } 643 644 } // namespace linker 645 } // namespace art 646