1 //===-- DNBArchImpl.cpp -----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/25/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #if defined (__arm__) 15 16 #include "MacOSX/arm/DNBArchImpl.h" 17 #include "MacOSX/MachProcess.h" 18 #include "MacOSX/MachThread.h" 19 #include "DNBBreakpoint.h" 20 #include "DNBLog.h" 21 #include "DNBRegisterInfo.h" 22 #include "DNB.h" 23 #include "ARM_GCC_Registers.h" 24 #include "ARM_DWARF_Registers.h" 25 26 #include <sys/sysctl.h> 27 28 // BCR address match type 29 #define BCR_M_IMVA_MATCH ((uint32_t)(0u << 21)) 30 #define BCR_M_CONTEXT_ID_MATCH ((uint32_t)(1u << 21)) 31 #define BCR_M_IMVA_MISMATCH ((uint32_t)(2u << 21)) 32 #define BCR_M_RESERVED ((uint32_t)(3u << 21)) 33 34 // Link a BVR/BCR or WVR/WCR pair to another 35 #define E_ENABLE_LINKING ((uint32_t)(1u << 20)) 36 37 // Byte Address Select 38 #define BAS_IMVA_PLUS_0 ((uint32_t)(1u << 5)) 39 #define BAS_IMVA_PLUS_1 ((uint32_t)(1u << 6)) 40 #define BAS_IMVA_PLUS_2 ((uint32_t)(1u << 7)) 41 #define BAS_IMVA_PLUS_3 ((uint32_t)(1u << 8)) 42 #define BAS_IMVA_0_1 ((uint32_t)(3u << 5)) 43 #define BAS_IMVA_2_3 ((uint32_t)(3u << 7)) 44 #define BAS_IMVA_ALL ((uint32_t)(0xfu << 5)) 45 46 // Break only in priveleged or user mode 47 #define S_RSVD ((uint32_t)(0u << 1)) 48 #define S_PRIV ((uint32_t)(1u << 1)) 49 #define S_USER ((uint32_t)(2u << 1)) 50 #define S_PRIV_USER ((S_PRIV) | (S_USER)) 51 52 #define BCR_ENABLE ((uint32_t)(1u)) 53 #define WCR_ENABLE ((uint32_t)(1u)) 54 55 // Watchpoint load/store 56 #define WCR_LOAD ((uint32_t)(1u << 3)) 57 #define WCR_STORE ((uint32_t)(1u << 4)) 58 59 // Definitions for the Debug Status and Control Register fields: 60 // [5:2] => Method of debug entry 61 //#define WATCHPOINT_OCCURRED ((uint32_t)(2u)) 62 // I'm seeing this, instead. 63 #define WATCHPOINT_OCCURRED ((uint32_t)(10u)) 64 65 static const uint8_t g_arm_breakpoint_opcode[] = { 0xFE, 0xDE, 0xFF, 0xE7 }; 66 static const uint8_t g_thumb_breakpooint_opcode[] = { 0xFE, 0xDE }; 67 68 // ARM constants used during decoding 69 #define REG_RD 0 70 #define LDM_REGLIST 1 71 #define PC_REG 15 72 #define PC_REGLIST_BIT 0x8000 73 74 // ARM conditions 75 #define COND_EQ 0x0 76 #define COND_NE 0x1 77 #define COND_CS 0x2 78 #define COND_HS 0x2 79 #define COND_CC 0x3 80 #define COND_LO 0x3 81 #define COND_MI 0x4 82 #define COND_PL 0x5 83 #define COND_VS 0x6 84 #define COND_VC 0x7 85 #define COND_HI 0x8 86 #define COND_LS 0x9 87 #define COND_GE 0xA 88 #define COND_LT 0xB 89 #define COND_GT 0xC 90 #define COND_LE 0xD 91 #define COND_AL 0xE 92 #define COND_UNCOND 0xF 93 94 #define MASK_CPSR_T (1u << 5) 95 #define MASK_CPSR_J (1u << 24) 96 97 #define MNEMONIC_STRING_SIZE 32 98 #define OPERAND_STRING_SIZE 128 99 100 101 void 102 DNBArchMachARM::Initialize() 103 { 104 DNBArchPluginInfo arch_plugin_info = 105 { 106 CPU_TYPE_ARM, 107 DNBArchMachARM::Create, 108 DNBArchMachARM::GetRegisterSetInfo, 109 DNBArchMachARM::SoftwareBreakpointOpcode 110 }; 111 112 // Register this arch plug-in with the main protocol class 113 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info); 114 } 115 116 117 DNBArchProtocol * 118 DNBArchMachARM::Create (MachThread *thread) 119 { 120 DNBArchMachARM *obj = new DNBArchMachARM (thread); 121 return obj; 122 } 123 124 const uint8_t * const 125 DNBArchMachARM::SoftwareBreakpointOpcode (nub_size_t byte_size) 126 { 127 switch (byte_size) 128 { 129 case 2: return g_thumb_breakpooint_opcode; 130 case 4: return g_arm_breakpoint_opcode; 131 } 132 return NULL; 133 } 134 135 uint32_t 136 DNBArchMachARM::GetCPUType() 137 { 138 return CPU_TYPE_ARM; 139 } 140 141 uint64_t 142 DNBArchMachARM::GetPC(uint64_t failValue) 143 { 144 // Get program counter 145 if (GetGPRState(false) == KERN_SUCCESS) 146 return m_state.context.gpr.__pc; 147 return failValue; 148 } 149 150 kern_return_t 151 DNBArchMachARM::SetPC(uint64_t value) 152 { 153 // Get program counter 154 kern_return_t err = GetGPRState(false); 155 if (err == KERN_SUCCESS) 156 { 157 m_state.context.gpr.__pc = value; 158 err = SetGPRState(); 159 } 160 return err == KERN_SUCCESS; 161 } 162 163 uint64_t 164 DNBArchMachARM::GetSP(uint64_t failValue) 165 { 166 // Get stack pointer 167 if (GetGPRState(false) == KERN_SUCCESS) 168 return m_state.context.gpr.__sp; 169 return failValue; 170 } 171 172 kern_return_t 173 DNBArchMachARM::GetGPRState(bool force) 174 { 175 int set = e_regSetGPR; 176 // Check if we have valid cached registers 177 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 178 return KERN_SUCCESS; 179 180 // Read the registers from our thread 181 mach_msg_type_number_t count = ARM_THREAD_STATE_COUNT; 182 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count); 183 uint32_t *r = &m_state.context.gpr.__r[0]; 184 DNBLogThreadedIf(LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs r0=%8.8x r1=%8.8x r2=%8.8x r3=%8.8x r4=%8.8x r5=%8.8x r6=%8.8x r7=%8.8x r8=%8.8x r9=%8.8x r10=%8.8x r11=%8.8x s12=%8.8x sp=%8.8x lr=%8.8x pc=%8.8x cpsr=%8.8x", 185 m_thread->MachPortNumber(), 186 ARM_THREAD_STATE, 187 ARM_THREAD_STATE_COUNT, 188 kret, 189 count, 190 r[0], 191 r[1], 192 r[2], 193 r[3], 194 r[4], 195 r[5], 196 r[6], 197 r[7], 198 r[8], 199 r[9], 200 r[10], 201 r[11], 202 r[12], 203 r[13], 204 r[14], 205 r[15], 206 r[16]); 207 m_state.SetError(set, Read, kret); 208 return kret; 209 } 210 211 kern_return_t 212 DNBArchMachARM::GetVFPState(bool force) 213 { 214 int set = e_regSetVFP; 215 // Check if we have valid cached registers 216 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 217 return KERN_SUCCESS; 218 219 // Read the registers from our thread 220 mach_msg_type_number_t count = ARM_VFP_STATE_COUNT; 221 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_VFP_STATE, (thread_state_t)&m_state.context.vfp, &count); 222 if (DNBLogEnabledForAny (LOG_THREAD)) 223 { 224 uint32_t *r = &m_state.context.vfp.__r[0]; 225 DNBLogThreaded ("thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count => %u)", 226 m_thread->MachPortNumber(), 227 ARM_THREAD_STATE, 228 ARM_THREAD_STATE_COUNT, 229 kret, 230 count); 231 DNBLogThreaded(" s0=%8.8x s1=%8.8x s2=%8.8x s3=%8.8x s4=%8.8x s5=%8.8x s6=%8.8x s7=%8.8x",r[ 0],r[ 1],r[ 2],r[ 3],r[ 4],r[ 5],r[ 6],r[ 7]); 232 DNBLogThreaded(" s8=%8.8x s9=%8.8x s10=%8.8x s11=%8.8x s12=%8.8x s13=%8.8x s14=%8.8x s15=%8.8x",r[ 8],r[ 9],r[10],r[11],r[12],r[13],r[14],r[15]); 233 DNBLogThreaded(" s16=%8.8x s17=%8.8x s18=%8.8x s19=%8.8x s20=%8.8x s21=%8.8x s22=%8.8x s23=%8.8x",r[16],r[17],r[18],r[19],r[20],r[21],r[22],r[23]); 234 DNBLogThreaded(" s24=%8.8x s25=%8.8x s26=%8.8x s27=%8.8x s28=%8.8x s29=%8.8x s30=%8.8x s31=%8.8x",r[24],r[25],r[26],r[27],r[28],r[29],r[30],r[31]); 235 DNBLogThreaded(" s32=%8.8x s33=%8.8x s34=%8.8x s35=%8.8x s36=%8.8x s37=%8.8x s38=%8.8x s39=%8.8x",r[32],r[33],r[34],r[35],r[36],r[37],r[38],r[39]); 236 DNBLogThreaded(" s40=%8.8x s41=%8.8x s42=%8.8x s43=%8.8x s44=%8.8x s45=%8.8x s46=%8.8x s47=%8.8x",r[40],r[41],r[42],r[43],r[44],r[45],r[46],r[47]); 237 DNBLogThreaded(" s48=%8.8x s49=%8.8x s50=%8.8x s51=%8.8x s52=%8.8x s53=%8.8x s54=%8.8x s55=%8.8x",r[48],r[49],r[50],r[51],r[52],r[53],r[54],r[55]); 238 DNBLogThreaded(" s56=%8.8x s57=%8.8x s58=%8.8x s59=%8.8x s60=%8.8x s61=%8.8x s62=%8.8x s63=%8.8x fpscr=%8.8x",r[56],r[57],r[58],r[59],r[60],r[61],r[62],r[63],r[64]); 239 } 240 m_state.SetError(set, Read, kret); 241 return kret; 242 } 243 244 kern_return_t 245 DNBArchMachARM::GetEXCState(bool force) 246 { 247 int set = e_regSetEXC; 248 // Check if we have valid cached registers 249 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 250 return KERN_SUCCESS; 251 252 // Read the registers from our thread 253 mach_msg_type_number_t count = ARM_EXCEPTION_STATE_COUNT; 254 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count); 255 m_state.SetError(set, Read, kret); 256 return kret; 257 } 258 259 static void 260 DumpDBGState(const DNBArchMachARM::DBG& dbg) 261 { 262 uint32_t i = 0; 263 for (i=0; i<16; i++) { 264 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }", 265 i, i, dbg.__bvr[i], dbg.__bcr[i], 266 i, i, dbg.__wvr[i], dbg.__wcr[i]); 267 } 268 } 269 270 kern_return_t 271 DNBArchMachARM::GetDBGState(bool force) 272 { 273 int set = e_regSetDBG; 274 275 // Check if we have valid cached registers 276 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 277 return KERN_SUCCESS; 278 279 // Read the registers from our thread 280 mach_msg_type_number_t count = ARM_DEBUG_STATE_COUNT; 281 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, &count); 282 m_state.SetError(set, Read, kret); 283 return kret; 284 } 285 286 kern_return_t 287 DNBArchMachARM::SetGPRState() 288 { 289 int set = e_regSetGPR; 290 kern_return_t kret = ::thread_set_state(m_thread->MachPortNumber(), ARM_THREAD_STATE, (thread_state_t)&m_state.context.gpr, ARM_THREAD_STATE_COUNT); 291 m_state.SetError(set, Write, kret); // Set the current write error for this register set 292 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 293 return kret; // Return the error code 294 } 295 296 kern_return_t 297 DNBArchMachARM::SetVFPState() 298 { 299 int set = e_regSetVFP; 300 kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_VFP_STATE, (thread_state_t)&m_state.context.vfp, ARM_VFP_STATE_COUNT); 301 m_state.SetError(set, Write, kret); // Set the current write error for this register set 302 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 303 return kret; // Return the error code 304 } 305 306 kern_return_t 307 DNBArchMachARM::SetEXCState() 308 { 309 int set = e_regSetEXC; 310 kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, ARM_EXCEPTION_STATE_COUNT); 311 m_state.SetError(set, Write, kret); // Set the current write error for this register set 312 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 313 return kret; // Return the error code 314 } 315 316 kern_return_t 317 DNBArchMachARM::SetDBGState(bool also_set_on_task) 318 { 319 int set = e_regSetDBG; 320 kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE_COUNT); 321 if (also_set_on_task) 322 { 323 kern_return_t task_kret = ::task_set_state (m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE_COUNT); 324 if (task_kret != KERN_SUCCESS) 325 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::SetDBGState failed to set debug control register state: 0x%8.8x.", kret); 326 } 327 328 m_state.SetError(set, Write, kret); // Set the current write error for this register set 329 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 330 return kret; // Return the error code 331 } 332 333 void 334 DNBArchMachARM::ThreadWillResume() 335 { 336 // Do we need to step this thread? If so, let the mach thread tell us so. 337 if (m_thread->IsStepping()) 338 { 339 // This is the primary thread, let the arch do anything it needs 340 if (NumSupportedHardwareBreakpoints() > 0) 341 { 342 if (EnableHardwareSingleStep(true) != KERN_SUCCESS) 343 { 344 DNBLogThreaded("DNBArchMachARM::ThreadWillResume() failed to enable hardware single step"); 345 } 346 } 347 } 348 349 // Disable the triggered watchpoint temporarily before we resume. 350 // Plus, we try to enable hardware single step to execute past the instruction which triggered our watchpoint. 351 if (m_watchpoint_did_occur) 352 { 353 if (m_watchpoint_hw_index >= 0) 354 { 355 kern_return_t kret = GetDBGState(false); 356 if (kret == KERN_SUCCESS && !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) { 357 // The watchpoint might have been disabled by the user. We don't need to do anything at all 358 // to enable hardware single stepping. 359 m_watchpoint_did_occur = false; 360 m_watchpoint_hw_index = -1; 361 return; 362 } 363 364 DisableHardwareWatchpoint0(m_watchpoint_hw_index, true, false); 365 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() DisableHardwareWatchpoint(%d) called", 366 m_watchpoint_hw_index); 367 368 // Enable hardware single step to move past the watchpoint-triggering instruction. 369 m_watchpoint_resume_single_step_enabled = (EnableHardwareSingleStep(true) == KERN_SUCCESS); 370 371 // If we are not able to enable single step to move past the watchpoint-triggering instruction, 372 // at least we should reset the two watchpoint member variables so that the next time around 373 // this callback function is invoked, the enclosing logical branch is skipped. 374 if (!m_watchpoint_resume_single_step_enabled) { 375 // Reset the two watchpoint member variables. 376 m_watchpoint_did_occur = false; 377 m_watchpoint_hw_index = -1; 378 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() failed to enable single step"); 379 } 380 else 381 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() succeeded to enable single step"); 382 } 383 } 384 } 385 386 bool 387 DNBArchMachARM::ThreadDidStop() 388 { 389 bool success = true; 390 391 m_state.InvalidateRegisterSetState (e_regSetALL); 392 393 if (m_watchpoint_resume_single_step_enabled) 394 { 395 // Great! We now disable the hardware single step as well as re-enable the hardware watchpoint. 396 // See also ThreadWillResume(). 397 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) 398 { 399 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) 400 { 401 EnableHardwareWatchpoint0(m_watchpoint_hw_index, true, false); 402 m_watchpoint_resume_single_step_enabled = false; 403 m_watchpoint_did_occur = false; 404 m_watchpoint_hw_index = -1; 405 } 406 else 407 { 408 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) does not hold!"); 409 } 410 } 411 else 412 { 413 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but unable to disable single step!"); 414 } 415 } 416 417 // Are we stepping a single instruction? 418 if (GetGPRState(true) == KERN_SUCCESS) 419 { 420 // We are single stepping, was this the primary thread? 421 if (m_thread->IsStepping()) 422 { 423 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 424 } 425 else 426 { 427 // The MachThread will automatically restore the suspend count 428 // in ThreadDidStop(), so we don't need to do anything here if 429 // we weren't the primary thread the last time 430 } 431 } 432 return success; 433 } 434 435 bool 436 DNBArchMachARM::NotifyException(MachException::Data& exc) 437 { 438 switch (exc.exc_type) 439 { 440 default: 441 break; 442 case EXC_BREAKPOINT: 443 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) 444 { 445 // exc_code = EXC_ARM_DA_DEBUG 446 // 447 // Check whether this corresponds to a watchpoint hit event. 448 // If yes, retrieve the exc_sub_code as the data break address. 449 if (!HasWatchpointOccurred()) 450 break; 451 452 // The data break address is passed as exc_data[1]. 453 nub_addr_t addr = exc.exc_data[1]; 454 // Find the hardware index with the side effect of possibly massaging the 455 // addr to return the starting address as seen from the debugger side. 456 uint32_t hw_index = GetHardwareWatchpointHit(addr); 457 if (hw_index != INVALID_NUB_HW_INDEX) 458 { 459 m_watchpoint_did_occur = true; 460 m_watchpoint_hw_index = hw_index; 461 exc.exc_data[1] = addr; 462 // Piggyback the hw_index in the exc.data. 463 exc.exc_data.push_back(hw_index); 464 } 465 466 return true; 467 } 468 break; 469 } 470 return false; 471 } 472 473 bool 474 DNBArchMachARM::StepNotComplete () 475 { 476 if (m_hw_single_chained_step_addr != INVALID_NUB_ADDRESS) 477 { 478 kern_return_t kret = KERN_INVALID_ARGUMENT; 479 kret = GetGPRState(false); 480 if (kret == KERN_SUCCESS) 481 { 482 if (m_state.context.gpr.__pc == m_hw_single_chained_step_addr) 483 { 484 DNBLogThreadedIf(LOG_STEP, "Need to step some more at 0x%8.8x", m_hw_single_chained_step_addr); 485 return true; 486 } 487 } 488 } 489 490 m_hw_single_chained_step_addr = INVALID_NUB_ADDRESS; 491 return false; 492 } 493 494 495 // Set the single step bit in the processor status register. 496 kern_return_t 497 DNBArchMachARM::EnableHardwareSingleStep (bool enable) 498 { 499 DNBError err; 500 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable); 501 502 err = GetGPRState(false); 503 504 if (err.Fail()) 505 { 506 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__); 507 return err.Error(); 508 } 509 510 err = GetDBGState(false); 511 512 if (err.Fail()) 513 { 514 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__); 515 return err.Error(); 516 } 517 518 const uint32_t i = 0; 519 if (enable) 520 { 521 m_hw_single_chained_step_addr = INVALID_NUB_ADDRESS; 522 523 // Save our previous state 524 m_dbg_save = m_state.dbg; 525 // Set a breakpoint that will stop when the PC doesn't match the current one! 526 m_state.dbg.__bvr[i] = m_state.context.gpr.__pc & 0xFFFFFFFCu; // Set the current PC as the breakpoint address 527 m_state.dbg.__bcr[i] = BCR_M_IMVA_MISMATCH | // Stop on address mismatch 528 S_USER | // Stop only in user mode 529 BCR_ENABLE; // Enable this breakpoint 530 if (m_state.context.gpr.__cpsr & 0x20) 531 { 532 // Thumb breakpoint 533 if (m_state.context.gpr.__pc & 2) 534 m_state.dbg.__bcr[i] |= BAS_IMVA_2_3; 535 else 536 m_state.dbg.__bcr[i] |= BAS_IMVA_0_1; 537 538 uint16_t opcode; 539 if (sizeof(opcode) == m_thread->Process()->Task().ReadMemory(m_state.context.gpr.__pc, sizeof(opcode), &opcode)) 540 { 541 if (((opcode & 0xE000) == 0xE000) && opcode & 0x1800) 542 { 543 // 32 bit thumb opcode... 544 if (m_state.context.gpr.__pc & 2) 545 { 546 // We can't take care of a 32 bit thumb instruction single step 547 // with just IVA mismatching. We will need to chain an extra 548 // hardware single step in order to complete this single step... 549 m_hw_single_chained_step_addr = m_state.context.gpr.__pc + 2; 550 } 551 else 552 { 553 // Extend the number of bits to ignore for the mismatch 554 m_state.dbg.__bcr[i] |= BAS_IMVA_ALL; 555 } 556 } 557 } 558 } 559 else 560 { 561 // ARM breakpoint 562 m_state.dbg.__bcr[i] |= BAS_IMVA_ALL; // Stop when any address bits change 563 } 564 565 DNBLogThreadedIf(LOG_STEP, "%s: BVR%u=0x%8.8x BCR%u=0x%8.8x", __FUNCTION__, i, m_state.dbg.__bvr[i], i, m_state.dbg.__bcr[i]); 566 567 for (uint32_t j=i+1; j<16; ++j) 568 { 569 // Disable all others 570 m_state.dbg.__bvr[j] = 0; 571 m_state.dbg.__bcr[j] = 0; 572 } 573 } 574 else 575 { 576 // Just restore the state we had before we did single stepping 577 m_state.dbg = m_dbg_save; 578 } 579 580 return SetDBGState(false); 581 } 582 583 // return 1 if bit "BIT" is set in "value" 584 static inline uint32_t bit(uint32_t value, uint32_t bit) 585 { 586 return (value >> bit) & 1u; 587 } 588 589 // return the bitfield "value[msbit:lsbit]". 590 static inline uint32_t bits(uint32_t value, uint32_t msbit, uint32_t lsbit) 591 { 592 assert(msbit >= lsbit); 593 uint32_t shift_left = sizeof(value) * 8 - 1 - msbit; 594 value <<= shift_left; // shift anything above the msbit off of the unsigned edge 595 value >>= (shift_left + lsbit); // shift it back again down to the lsbit (including undoing any shift from above) 596 return value; // return our result 597 } 598 599 bool 600 DNBArchMachARM::ConditionPassed(uint8_t condition, uint32_t cpsr) 601 { 602 uint32_t cpsr_n = bit(cpsr, 31); // Negative condition code flag 603 uint32_t cpsr_z = bit(cpsr, 30); // Zero condition code flag 604 uint32_t cpsr_c = bit(cpsr, 29); // Carry condition code flag 605 uint32_t cpsr_v = bit(cpsr, 28); // Overflow condition code flag 606 607 switch (condition) { 608 case COND_EQ: // (0x0) 609 if (cpsr_z == 1) return true; 610 break; 611 case COND_NE: // (0x1) 612 if (cpsr_z == 0) return true; 613 break; 614 case COND_CS: // (0x2) 615 if (cpsr_c == 1) return true; 616 break; 617 case COND_CC: // (0x3) 618 if (cpsr_c == 0) return true; 619 break; 620 case COND_MI: // (0x4) 621 if (cpsr_n == 1) return true; 622 break; 623 case COND_PL: // (0x5) 624 if (cpsr_n == 0) return true; 625 break; 626 case COND_VS: // (0x6) 627 if (cpsr_v == 1) return true; 628 break; 629 case COND_VC: // (0x7) 630 if (cpsr_v == 0) return true; 631 break; 632 case COND_HI: // (0x8) 633 if ((cpsr_c == 1) && (cpsr_z == 0)) return true; 634 break; 635 case COND_LS: // (0x9) 636 if ((cpsr_c == 0) || (cpsr_z == 1)) return true; 637 break; 638 case COND_GE: // (0xA) 639 if (cpsr_n == cpsr_v) return true; 640 break; 641 case COND_LT: // (0xB) 642 if (cpsr_n != cpsr_v) return true; 643 break; 644 case COND_GT: // (0xC) 645 if ((cpsr_z == 0) && (cpsr_n == cpsr_v)) return true; 646 break; 647 case COND_LE: // (0xD) 648 if ((cpsr_z == 1) || (cpsr_n != cpsr_v)) return true; 649 break; 650 default: 651 return true; 652 break; 653 } 654 655 return false; 656 } 657 658 uint32_t 659 DNBArchMachARM::NumSupportedHardwareBreakpoints() 660 { 661 // Set the init value to something that will let us know that we need to 662 // autodetect how many breakpoints are supported dynamically... 663 static uint32_t g_num_supported_hw_breakpoints = UINT_MAX; 664 if (g_num_supported_hw_breakpoints == UINT_MAX) 665 { 666 // Set this to zero in case we can't tell if there are any HW breakpoints 667 g_num_supported_hw_breakpoints = 0; 668 669 size_t len; 670 uint32_t n = 0; 671 len = sizeof (n); 672 if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) 673 { 674 g_num_supported_hw_breakpoints = n; 675 DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n); 676 } 677 else 678 { 679 // Read the DBGDIDR to get the number of available hardware breakpoints 680 // However, in some of our current armv7 processors, hardware 681 // breakpoints/watchpoints were not properly connected. So detect those 682 // cases using a field in a sysctl. For now we are using "hw.cpusubtype" 683 // field to distinguish CPU architectures. This is a hack until we can 684 // get <rdar://problem/6372672> fixed, at which point we will switch to 685 // using a different sysctl string that will tell us how many BRPs 686 // are available to us directly without having to read DBGDIDR. 687 uint32_t register_DBGDIDR; 688 689 asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (register_DBGDIDR)); 690 uint32_t numBRPs = bits(register_DBGDIDR, 27, 24); 691 // Zero is reserved for the BRP count, so don't increment it if it is zero 692 if (numBRPs > 0) 693 numBRPs++; 694 DNBLogThreadedIf(LOG_THREAD, "DBGDIDR=0x%8.8x (number BRP pairs = %u)", register_DBGDIDR, numBRPs); 695 696 if (numBRPs > 0) 697 { 698 uint32_t cpusubtype; 699 len = sizeof(cpusubtype); 700 // TODO: remove this hack and change to using hw.optional.xx when implmented 701 if (::sysctlbyname("hw.cpusubtype", &cpusubtype, &len, NULL, 0) == 0) 702 { 703 DNBLogThreadedIf(LOG_THREAD, "hw.cpusubtype=%d", cpusubtype); 704 if (cpusubtype == CPU_SUBTYPE_ARM_V7) 705 DNBLogThreadedIf(LOG_THREAD, "Hardware breakpoints disabled for armv7 (rdar://problem/6372672)"); 706 else 707 g_num_supported_hw_breakpoints = numBRPs; 708 } 709 } 710 } 711 } 712 return g_num_supported_hw_breakpoints; 713 } 714 715 716 uint32_t 717 DNBArchMachARM::NumSupportedHardwareWatchpoints() 718 { 719 // Set the init value to something that will let us know that we need to 720 // autodetect how many watchpoints are supported dynamically... 721 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX; 722 if (g_num_supported_hw_watchpoints == UINT_MAX) 723 { 724 // Set this to zero in case we can't tell if there are any HW breakpoints 725 g_num_supported_hw_watchpoints = 0; 726 727 728 size_t len; 729 uint32_t n = 0; 730 len = sizeof (n); 731 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) 732 { 733 g_num_supported_hw_watchpoints = n; 734 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n); 735 } 736 else 737 { 738 // Read the DBGDIDR to get the number of available hardware breakpoints 739 // However, in some of our current armv7 processors, hardware 740 // breakpoints/watchpoints were not properly connected. So detect those 741 // cases using a field in a sysctl. For now we are using "hw.cpusubtype" 742 // field to distinguish CPU architectures. This is a hack until we can 743 // get <rdar://problem/6372672> fixed, at which point we will switch to 744 // using a different sysctl string that will tell us how many WRPs 745 // are available to us directly without having to read DBGDIDR. 746 747 uint32_t register_DBGDIDR; 748 asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (register_DBGDIDR)); 749 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28) + 1; 750 DNBLogThreadedIf(LOG_THREAD, "DBGDIDR=0x%8.8x (number WRP pairs = %u)", register_DBGDIDR, numWRPs); 751 752 if (numWRPs > 0) 753 { 754 uint32_t cpusubtype; 755 size_t len; 756 len = sizeof(cpusubtype); 757 // TODO: remove this hack and change to using hw.optional.xx when implmented 758 if (::sysctlbyname("hw.cpusubtype", &cpusubtype, &len, NULL, 0) == 0) 759 { 760 DNBLogThreadedIf(LOG_THREAD, "hw.cpusubtype=0x%d", cpusubtype); 761 762 if (cpusubtype == CPU_SUBTYPE_ARM_V7) 763 DNBLogThreadedIf(LOG_THREAD, "Hardware watchpoints disabled for armv7 (rdar://problem/6372672)"); 764 else 765 g_num_supported_hw_watchpoints = numWRPs; 766 } 767 } 768 } 769 } 770 return g_num_supported_hw_watchpoints; 771 } 772 773 774 uint32_t 775 DNBArchMachARM::EnableHardwareBreakpoint (nub_addr_t addr, nub_size_t size) 776 { 777 // Make sure our address isn't bogus 778 if (addr & 1) 779 return INVALID_NUB_HW_INDEX; 780 781 kern_return_t kret = GetDBGState(false); 782 783 if (kret == KERN_SUCCESS) 784 { 785 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints(); 786 uint32_t i; 787 for (i=0; i<num_hw_breakpoints; ++i) 788 { 789 if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0) 790 break; // We found an available hw breakpoint slot (in i) 791 } 792 793 // See if we found an available hw breakpoint slot above 794 if (i < num_hw_breakpoints) 795 { 796 // Make sure bits 1:0 are clear in our address 797 m_state.dbg.__bvr[i] = addr & ~((nub_addr_t)3); 798 799 if (size == 2 || addr & 2) 800 { 801 uint32_t byte_addr_select = (addr & 2) ? BAS_IMVA_2_3 : BAS_IMVA_0_1; 802 803 // We have a thumb breakpoint 804 // We have an ARM breakpoint 805 m_state.dbg.__bcr[i] = BCR_M_IMVA_MATCH | // Stop on address mismatch 806 byte_addr_select | // Set the correct byte address select so we only trigger on the correct opcode 807 S_USER | // Which modes should this breakpoint stop in? 808 BCR_ENABLE; // Enable this hardware breakpoint 809 DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint( addr = 0x%8.8llx, size = %llu ) - BVR%u/BCR%u = 0x%8.8x / 0x%8.8x (Thumb)", 810 (uint64_t)addr, 811 (uint64_t)size, 812 i, 813 i, 814 m_state.dbg.__bvr[i], 815 m_state.dbg.__bcr[i]); 816 } 817 else if (size == 4) 818 { 819 // We have an ARM breakpoint 820 m_state.dbg.__bcr[i] = BCR_M_IMVA_MATCH | // Stop on address mismatch 821 BAS_IMVA_ALL | // Stop on any of the four bytes following the IMVA 822 S_USER | // Which modes should this breakpoint stop in? 823 BCR_ENABLE; // Enable this hardware breakpoint 824 DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint( addr = 0x%8.8llx, size = %llu ) - BVR%u/BCR%u = 0x%8.8x / 0x%8.8x (ARM)", 825 (uint64_t)addr, 826 (uint64_t)size, 827 i, 828 i, 829 m_state.dbg.__bvr[i], 830 m_state.dbg.__bcr[i]); 831 } 832 833 kret = SetDBGState(false); 834 DNBLogThreadedIf(LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint() SetDBGState() => 0x%8.8x.", kret); 835 836 if (kret == KERN_SUCCESS) 837 return i; 838 } 839 else 840 { 841 DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint(addr = 0x%8.8llx, size = %llu) => all hardware breakpoint resources are being used.", (uint64_t)addr, (uint64_t)size); 842 } 843 } 844 845 return INVALID_NUB_HW_INDEX; 846 } 847 848 bool 849 DNBArchMachARM::DisableHardwareBreakpoint (uint32_t hw_index) 850 { 851 kern_return_t kret = GetDBGState(false); 852 853 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints(); 854 if (kret == KERN_SUCCESS) 855 { 856 if (hw_index < num_hw_points) 857 { 858 m_state.dbg.__bcr[hw_index] = 0; 859 DNBLogThreadedIf(LOG_BREAKPOINTS, "DNBArchMachARM::SetHardwareBreakpoint( %u ) - BVR%u = 0x%8.8x BCR%u = 0x%8.8x", 860 hw_index, 861 hw_index, 862 m_state.dbg.__bvr[hw_index], 863 hw_index, 864 m_state.dbg.__bcr[hw_index]); 865 866 kret = SetDBGState(false); 867 868 if (kret == KERN_SUCCESS) 869 return true; 870 } 871 } 872 return false; 873 } 874 875 // This stores the lo->hi mappings. It's safe to initialize to all 0's 876 // since hi > lo and therefore LoHi[i] cannot be 0. 877 static uint32_t LoHi[16] = { 0 }; 878 879 uint32_t 880 DNBArchMachARM::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task) 881 { 882 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(addr = 0x%8.8llx, size = %llu, read = %u, write = %u)", (uint64_t)addr, (uint64_t)size, read, write); 883 884 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 885 886 // Can't watch zero bytes 887 if (size == 0) 888 return INVALID_NUB_HW_INDEX; 889 890 // We must watch for either read or write 891 if (read == false && write == false) 892 return INVALID_NUB_HW_INDEX; 893 894 // Divide-and-conquer for size == 8. 895 if (size == 8) 896 { 897 uint32_t lo = EnableHardwareWatchpoint(addr, 4, read, write, also_set_on_task); 898 if (lo == INVALID_NUB_HW_INDEX) 899 return INVALID_NUB_HW_INDEX; 900 uint32_t hi = EnableHardwareWatchpoint(addr+4, 4, read, write, also_set_on_task); 901 if (hi == INVALID_NUB_HW_INDEX) 902 { 903 DisableHardwareWatchpoint(lo, also_set_on_task); 904 return INVALID_NUB_HW_INDEX; 905 } 906 // Tag this lo->hi mapping in our database. 907 LoHi[lo] = hi; 908 return lo; 909 } 910 911 // Otherwise, can't watch more than 4 bytes per WVR/WCR pair 912 if (size > 4) 913 return INVALID_NUB_HW_INDEX; 914 915 // We can only watch up to four bytes that follow a 4 byte aligned address 916 // per watchpoint register pair. Since we can only watch until the next 4 917 // byte boundary, we need to make sure we can properly encode this. 918 919 // addr_word_offset = addr % 4, i.e, is in set([0, 1, 2, 3]) 920 // 921 // +---+---+---+---+ 922 // | 0 | 1 | 2 | 3 | 923 // +---+---+---+---+ 924 // ^ 925 // | 926 // word address (4-byte aligned) = addr & 0xFFFFFFFC => goes into WVR 927 // 928 // examples: 929 // 1. addr_word_offset = 1, size = 1 to watch a uint_8 => byte_mask = (0b0001 << 1) = 0b0010 930 // 2. addr_word_offset = 2, size = 2 to watch a uint_16 => byte_mask = (0b0011 << 2) = 0b1100 931 // 932 // where byte_mask goes into WCR[8:5] 933 934 uint32_t addr_word_offset = addr % 4; 935 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() - addr_word_offset = 0x%8.8x", addr_word_offset); 936 937 uint32_t byte_mask = ((1u << size) - 1u) << addr_word_offset; 938 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() - byte_mask = 0x%8.8x", byte_mask); 939 if (byte_mask > 0xfu) 940 return INVALID_NUB_HW_INDEX; 941 942 // Read the debug state 943 kern_return_t kret = GetDBGState(true); 944 945 if (kret == KERN_SUCCESS) 946 { 947 // Check to make sure we have the needed hardware support 948 uint32_t i = 0; 949 950 for (i=0; i<num_hw_watchpoints; ++i) 951 { 952 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0) 953 break; // We found an available hw watchpoint slot (in i) 954 } 955 956 // See if we found an available hw watchpoint slot above 957 if (i < num_hw_watchpoints) 958 { 959 //DumpDBGState(m_state.dbg); 960 961 // Make the byte_mask into a valid Byte Address Select mask 962 uint32_t byte_address_select = byte_mask << 5; 963 // Make sure bits 1:0 are clear in our address 964 m_state.dbg.__wvr[i] = addr & ~((nub_addr_t)3); // DVA (Data Virtual Address) 965 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow the DVA that we will watch 966 S_USER | // Stop only in user mode 967 (read ? WCR_LOAD : 0) | // Stop on read access? 968 (write ? WCR_STORE : 0) | // Stop on write access? 969 WCR_ENABLE; // Enable this watchpoint; 970 971 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() adding watchpoint on address 0x%llx with control register value 0x%x", (uint64_t) m_state.dbg.__wvr[i], (uint32_t) m_state.dbg.__wcr[i]); 972 973 kret = SetDBGState(also_set_on_task); 974 //DumpDBGState(m_state.dbg); 975 976 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret); 977 978 if (kret == KERN_SUCCESS) 979 return i; 980 } 981 else 982 { 983 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints); 984 } 985 } 986 return INVALID_NUB_HW_INDEX; 987 } 988 989 bool 990 DNBArchMachARM::EnableHardwareWatchpoint0 (uint32_t hw_index, bool Delegate, bool also_set_on_task) 991 { 992 kern_return_t kret = GetDBGState(false); 993 if (kret != KERN_SUCCESS) 994 return false; 995 996 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 997 if (hw_index >= num_hw_points) 998 return false; 999 1000 if (Delegate && LoHi[hw_index]) { 1001 // Enable lo and hi watchpoint hardware indexes. 1002 return EnableHardwareWatchpoint0(hw_index, false, also_set_on_task) && 1003 EnableHardwareWatchpoint0(LoHi[hw_index], false, also_set_on_task); 1004 } 1005 1006 m_state.dbg.__wcr[hw_index] |= (nub_addr_t)WCR_ENABLE; 1007 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8x WCR%u = 0x%8.8x", 1008 hw_index, 1009 hw_index, 1010 m_state.dbg.__wvr[hw_index], 1011 hw_index, 1012 m_state.dbg.__wcr[hw_index]); 1013 1014 kret = SetDBGState(false); 1015 1016 return (kret == KERN_SUCCESS); 1017 } 1018 1019 bool 1020 DNBArchMachARM::DisableHardwareWatchpoint (uint32_t hw_index, bool also_set_on_task) 1021 { 1022 return DisableHardwareWatchpoint0(hw_index, true, also_set_on_task); 1023 } 1024 bool 1025 DNBArchMachARM::DisableHardwareWatchpoint0 (uint32_t hw_index, bool Delegate, bool also_set_on_task) 1026 { 1027 kern_return_t kret = GetDBGState(false); 1028 if (kret != KERN_SUCCESS) 1029 return false; 1030 1031 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 1032 if (hw_index >= num_hw_points) 1033 return false; 1034 1035 if (Delegate && LoHi[hw_index]) { 1036 // Disable lo and hi watchpoint hardware indexes. 1037 return DisableHardwareWatchpoint0(hw_index, false, also_set_on_task) && 1038 DisableHardwareWatchpoint0(LoHi[hw_index], false, also_set_on_task); 1039 } 1040 1041 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE); 1042 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::DisableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8x WCR%u = 0x%8.8x", 1043 hw_index, 1044 hw_index, 1045 m_state.dbg.__wvr[hw_index], 1046 hw_index, 1047 m_state.dbg.__wcr[hw_index]); 1048 1049 kret = SetDBGState(also_set_on_task); 1050 1051 return (kret == KERN_SUCCESS); 1052 } 1053 1054 // Returns -1 if the trailing bit patterns are not one of: 1055 // { 0b???1, 0b??10, 0b?100, 0b1000 }. 1056 static inline 1057 int32_t 1058 LowestBitSet(uint32_t val) 1059 { 1060 for (unsigned i = 0; i < 4; ++i) { 1061 if (bit(val, i)) 1062 return i; 1063 } 1064 return -1; 1065 } 1066 1067 // Iterate through the debug registers; return the index of the first watchpoint whose address matches. 1068 // As a side effect, the starting address as understood by the debugger is returned which could be 1069 // different from 'addr' passed as an in/out argument. 1070 uint32_t 1071 DNBArchMachARM::GetHardwareWatchpointHit(nub_addr_t &addr) 1072 { 1073 // Read the debug state 1074 kern_return_t kret = GetDBGState(true); 1075 //DumpDBGState(m_state.dbg); 1076 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret); 1077 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::GetHardwareWatchpointHit() addr = 0x%llx", (uint64_t)addr); 1078 1079 // This is the watchpoint value to match against, i.e., word address. 1080 nub_addr_t wp_val = addr & ~((nub_addr_t)3); 1081 if (kret == KERN_SUCCESS) 1082 { 1083 DBG &debug_state = m_state.dbg; 1084 uint32_t i, num = NumSupportedHardwareWatchpoints(); 1085 for (i = 0; i < num; ++i) 1086 { 1087 nub_addr_t wp_addr = GetWatchAddress(debug_state, i); 1088 DNBLogThreadedIf(LOG_WATCHPOINTS, 1089 "DNBArchMachARM::GetHardwareWatchpointHit() slot: %u (addr = 0x%llx).", 1090 i, (uint64_t)wp_addr); 1091 if (wp_val == wp_addr) { 1092 uint32_t byte_mask = bits(debug_state.__wcr[i], 8, 5); 1093 1094 // Sanity check the byte_mask, first. 1095 if (LowestBitSet(byte_mask) < 0) 1096 continue; 1097 1098 // Compute the starting address (from the point of view of the debugger). 1099 addr = wp_addr + LowestBitSet(byte_mask); 1100 return i; 1101 } 1102 } 1103 } 1104 return INVALID_NUB_HW_INDEX; 1105 } 1106 1107 // ThreadWillResume() calls this to clear bits[5:2] (Method of entry bits) of 1108 // the Debug Status and Control Register (DSCR). 1109 // 1110 // b0010 = a watchpoint occurred 1111 // b0000 is the reset value 1112 void 1113 DNBArchMachARM::ClearWatchpointOccurred() 1114 { 1115 uint32_t register_DBGDSCR; 1116 asm("mrc p14, 0, %0, c0, c1, 0" : "=r" (register_DBGDSCR)); 1117 if (bits(register_DBGDSCR, 5, 2) == WATCHPOINT_OCCURRED) 1118 { 1119 uint32_t mask = ~(0xF << 2); 1120 register_DBGDSCR &= mask; 1121 asm("mcr p14, 0, %0, c0, c1, 0" : "=r" (register_DBGDSCR)); 1122 } 1123 return; 1124 } 1125 1126 // NotifyException() calls this to double check that a watchpoint has occurred 1127 // by inspecting the bits[5:2] field of the Debug Status and Control Register 1128 // (DSCR). 1129 // 1130 // b0010 = a watchpoint occurred 1131 bool 1132 DNBArchMachARM::HasWatchpointOccurred() 1133 { 1134 uint32_t register_DBGDSCR; 1135 asm("mrc p14, 0, %0, c0, c1, 0" : "=r" (register_DBGDSCR)); 1136 return (bits(register_DBGDSCR, 5, 2) == WATCHPOINT_OCCURRED); 1137 } 1138 1139 bool 1140 DNBArchMachARM::IsWatchpointEnabled(const DBG &debug_state, uint32_t hw_index) 1141 { 1142 // Watchpoint Control Registers, bitfield definitions 1143 // ... 1144 // Bits Value Description 1145 // [0] 0 Watchpoint disabled 1146 // 1 Watchpoint enabled. 1147 return (debug_state.__wcr[hw_index] & 1u); 1148 } 1149 1150 nub_addr_t 1151 DNBArchMachARM::GetWatchAddress(const DBG &debug_state, uint32_t hw_index) 1152 { 1153 // Watchpoint Value Registers, bitfield definitions 1154 // Bits Description 1155 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned) 1156 // [1:0] RAZ/SBZP 1157 return bits(debug_state.__wvr[hw_index], 31, 0); 1158 } 1159 1160 //---------------------------------------------------------------------- 1161 // Register information defintions for 32 bit ARMV7. 1162 //---------------------------------------------------------------------- 1163 enum gpr_regnums 1164 { 1165 gpr_r0 = 0, 1166 gpr_r1, 1167 gpr_r2, 1168 gpr_r3, 1169 gpr_r4, 1170 gpr_r5, 1171 gpr_r6, 1172 gpr_r7, 1173 gpr_r8, 1174 gpr_r9, 1175 gpr_r10, 1176 gpr_r11, 1177 gpr_r12, 1178 gpr_sp, 1179 gpr_lr, 1180 gpr_pc, 1181 gpr_cpsr 1182 }; 1183 1184 enum 1185 { 1186 vfp_s0 = 17, // match the g_gdb_register_map_arm table in RNBRemote.cpp 1187 vfp_s1, 1188 vfp_s2, 1189 vfp_s3, 1190 vfp_s4, 1191 vfp_s5, 1192 vfp_s6, 1193 vfp_s7, 1194 vfp_s8, 1195 vfp_s9, 1196 vfp_s10, 1197 vfp_s11, 1198 vfp_s12, 1199 vfp_s13, 1200 vfp_s14, 1201 vfp_s15, 1202 vfp_s16, 1203 vfp_s17, 1204 vfp_s18, 1205 vfp_s19, 1206 vfp_s20, 1207 vfp_s21, 1208 vfp_s22, 1209 vfp_s23, 1210 vfp_s24, 1211 vfp_s25, 1212 vfp_s26, 1213 vfp_s27, 1214 vfp_s28, 1215 vfp_s29, 1216 vfp_s30, 1217 vfp_s31 1218 }; 1219 1220 enum 1221 { 1222 vfp_d0 = 49, // match the g_gdb_register_map_arm table in RNBRemote.cpp 1223 vfp_d1, 1224 vfp_d2, 1225 vfp_d3, 1226 vfp_d4, 1227 vfp_d5, 1228 vfp_d6, 1229 vfp_d7, 1230 vfp_d8, 1231 vfp_d9, 1232 vfp_d10, 1233 vfp_d11, 1234 vfp_d12, 1235 vfp_d13, 1236 vfp_d14, 1237 vfp_d15, 1238 vfp_d16, 1239 vfp_d17, 1240 vfp_d18, 1241 vfp_d19, 1242 vfp_d20, 1243 vfp_d21, 1244 vfp_d22, 1245 vfp_d23, 1246 vfp_d24, 1247 vfp_d25, 1248 vfp_d26, 1249 vfp_d27, 1250 vfp_d28, 1251 vfp_d29, 1252 vfp_d30, 1253 vfp_d31 1254 }; 1255 1256 enum 1257 { 1258 vfp_q0 = 81, // match the g_gdb_register_map_arm table in RNBRemote.cpp 1259 vfp_q1, 1260 vfp_q2, 1261 vfp_q3, 1262 vfp_q4, 1263 vfp_q5, 1264 vfp_q6, 1265 vfp_q7, 1266 vfp_q8, 1267 vfp_q9, 1268 vfp_q10, 1269 vfp_q11, 1270 vfp_q12, 1271 vfp_q13, 1272 vfp_q14, 1273 vfp_q15, 1274 vfp_fpscr 1275 }; 1276 1277 enum 1278 { 1279 exc_exception, 1280 exc_fsr, 1281 exc_far, 1282 }; 1283 1284 #define GPR_OFFSET_IDX(idx) (offsetof (DNBArchMachARM::GPR, __r[idx])) 1285 #define GPR_OFFSET_NAME(reg) (offsetof (DNBArchMachARM::GPR, __##reg)) 1286 1287 #define EXC_OFFSET(reg) (offsetof (DNBArchMachARM::EXC, __##reg) + offsetof (DNBArchMachARM::Context, exc)) 1288 1289 // These macros will auto define the register name, alt name, register size, 1290 // register offset, encoding, format and native register. This ensures that 1291 // the register state structures are defined correctly and have the correct 1292 // sizes and offsets. 1293 #define DEFINE_GPR_IDX(idx, reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 4, GPR_OFFSET_IDX(idx), gcc_##reg, dwarf_##reg, gen, INVALID_NUB_REGNUM, NULL, NULL} 1294 #define DEFINE_GPR_NAME(reg, alt, gen, inval) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 4, GPR_OFFSET_NAME(reg), gcc_##reg, dwarf_##reg, gen, INVALID_NUB_REGNUM, NULL, inval} 1295 1296 // In case we are debugging to a debug target that the ability to 1297 // change into the protected modes with folded registers (ABT, IRQ, 1298 // FIQ, SYS, USR, etc..), we should invalidate r8-r14 if the CPSR 1299 // gets modified. 1300 1301 uint32_t g_invalidate_cpsr[] = { 1302 gpr_r8, 1303 gpr_r9, 1304 gpr_r10, 1305 gpr_r11, 1306 gpr_r12, 1307 gpr_sp, 1308 gpr_lr, 1309 INVALID_NUB_REGNUM }; 1310 1311 // General purpose registers 1312 const DNBRegisterInfo 1313 DNBArchMachARM::g_gpr_registers[] = 1314 { 1315 DEFINE_GPR_IDX ( 0, r0,"arg1", GENERIC_REGNUM_ARG1 ), 1316 DEFINE_GPR_IDX ( 1, r1,"arg2", GENERIC_REGNUM_ARG2 ), 1317 DEFINE_GPR_IDX ( 2, r2,"arg3", GENERIC_REGNUM_ARG3 ), 1318 DEFINE_GPR_IDX ( 3, r3,"arg4", GENERIC_REGNUM_ARG4 ), 1319 DEFINE_GPR_IDX ( 4, r4, NULL, INVALID_NUB_REGNUM ), 1320 DEFINE_GPR_IDX ( 5, r5, NULL, INVALID_NUB_REGNUM ), 1321 DEFINE_GPR_IDX ( 6, r6, NULL, INVALID_NUB_REGNUM ), 1322 DEFINE_GPR_IDX ( 7, r7, "fp", GENERIC_REGNUM_FP ), 1323 DEFINE_GPR_IDX ( 8, r8, NULL, INVALID_NUB_REGNUM ), 1324 DEFINE_GPR_IDX ( 9, r9, NULL, INVALID_NUB_REGNUM ), 1325 DEFINE_GPR_IDX (10, r10, NULL, INVALID_NUB_REGNUM ), 1326 DEFINE_GPR_IDX (11, r11, NULL, INVALID_NUB_REGNUM ), 1327 DEFINE_GPR_IDX (12, r12, NULL, INVALID_NUB_REGNUM ), 1328 DEFINE_GPR_NAME (sp, "r13", GENERIC_REGNUM_SP, NULL), 1329 DEFINE_GPR_NAME (lr, "r14", GENERIC_REGNUM_RA, NULL), 1330 DEFINE_GPR_NAME (pc, "r15", GENERIC_REGNUM_PC, NULL), 1331 DEFINE_GPR_NAME (cpsr, "flags", GENERIC_REGNUM_FLAGS, g_invalidate_cpsr) 1332 }; 1333 1334 uint32_t g_contained_q0[] {vfp_q0, INVALID_NUB_REGNUM }; 1335 uint32_t g_contained_q1[] {vfp_q1, INVALID_NUB_REGNUM }; 1336 uint32_t g_contained_q2[] {vfp_q2, INVALID_NUB_REGNUM }; 1337 uint32_t g_contained_q3[] {vfp_q3, INVALID_NUB_REGNUM }; 1338 uint32_t g_contained_q4[] {vfp_q4, INVALID_NUB_REGNUM }; 1339 uint32_t g_contained_q5[] {vfp_q5, INVALID_NUB_REGNUM }; 1340 uint32_t g_contained_q6[] {vfp_q6, INVALID_NUB_REGNUM }; 1341 uint32_t g_contained_q7[] {vfp_q7, INVALID_NUB_REGNUM }; 1342 uint32_t g_contained_q8[] {vfp_q8, INVALID_NUB_REGNUM }; 1343 uint32_t g_contained_q9[] {vfp_q9, INVALID_NUB_REGNUM }; 1344 uint32_t g_contained_q10[] {vfp_q10, INVALID_NUB_REGNUM }; 1345 uint32_t g_contained_q11[] {vfp_q11, INVALID_NUB_REGNUM }; 1346 uint32_t g_contained_q12[] {vfp_q12, INVALID_NUB_REGNUM }; 1347 uint32_t g_contained_q13[] {vfp_q13, INVALID_NUB_REGNUM }; 1348 uint32_t g_contained_q14[] {vfp_q14, INVALID_NUB_REGNUM }; 1349 uint32_t g_contained_q15[] {vfp_q15, INVALID_NUB_REGNUM }; 1350 1351 uint32_t g_invalidate_q0[] {vfp_q0, vfp_d0, vfp_d1, vfp_s0, vfp_s1, vfp_s2, vfp_s3, INVALID_NUB_REGNUM }; 1352 uint32_t g_invalidate_q1[] {vfp_q1, vfp_d2, vfp_d3, vfp_s4, vfp_s5, vfp_s6, vfp_s7, INVALID_NUB_REGNUM }; 1353 uint32_t g_invalidate_q2[] {vfp_q2, vfp_d4, vfp_d5, vfp_s8, vfp_s9, vfp_s10, vfp_s11, INVALID_NUB_REGNUM }; 1354 uint32_t g_invalidate_q3[] {vfp_q3, vfp_d6, vfp_d7, vfp_s12, vfp_s13, vfp_s14, vfp_s15, INVALID_NUB_REGNUM }; 1355 uint32_t g_invalidate_q4[] {vfp_q4, vfp_d8, vfp_d9, vfp_s16, vfp_s17, vfp_s18, vfp_s19, INVALID_NUB_REGNUM }; 1356 uint32_t g_invalidate_q5[] {vfp_q5, vfp_d10, vfp_d11, vfp_s20, vfp_s21, vfp_s22, vfp_s23, INVALID_NUB_REGNUM }; 1357 uint32_t g_invalidate_q6[] {vfp_q6, vfp_d12, vfp_d13, vfp_s24, vfp_s25, vfp_s26, vfp_s27, INVALID_NUB_REGNUM }; 1358 uint32_t g_invalidate_q7[] {vfp_q7, vfp_d14, vfp_d15, vfp_s28, vfp_s29, vfp_s30, vfp_s31, INVALID_NUB_REGNUM }; 1359 uint32_t g_invalidate_q8[] {vfp_q8, vfp_d16, vfp_d17, INVALID_NUB_REGNUM }; 1360 uint32_t g_invalidate_q9[] {vfp_q9, vfp_d18, vfp_d19, INVALID_NUB_REGNUM }; 1361 uint32_t g_invalidate_q10[] {vfp_q10, vfp_d20, vfp_d21, INVALID_NUB_REGNUM }; 1362 uint32_t g_invalidate_q11[] {vfp_q11, vfp_d22, vfp_d23, INVALID_NUB_REGNUM }; 1363 uint32_t g_invalidate_q12[] {vfp_q12, vfp_d24, vfp_d25, INVALID_NUB_REGNUM }; 1364 uint32_t g_invalidate_q13[] {vfp_q13, vfp_d26, vfp_d27, INVALID_NUB_REGNUM }; 1365 uint32_t g_invalidate_q14[] {vfp_q14, vfp_d28, vfp_d29, INVALID_NUB_REGNUM }; 1366 uint32_t g_invalidate_q15[] {vfp_q15, vfp_d30, vfp_d31, INVALID_NUB_REGNUM }; 1367 1368 #define VFP_S_OFFSET_IDX(idx) (offsetof (DNBArchMachARM::FPU, __r[(idx)]) + offsetof (DNBArchMachARM::Context, vfp)) 1369 #define VFP_D_OFFSET_IDX(idx) (VFP_S_OFFSET_IDX ((idx) * 2)) 1370 #define VFP_Q_OFFSET_IDX(idx) (VFP_S_OFFSET_IDX ((idx) * 4)) 1371 1372 #define VFP_OFFSET_NAME(reg) (offsetof (DNBArchMachARM::FPU, __##reg) + offsetof (DNBArchMachARM::Context, vfp)) 1373 1374 #define FLOAT_FORMAT Float 1375 1376 #define DEFINE_VFP_S_IDX(idx) e_regSetVFP, vfp_s##idx - vfp_s0, "s" #idx, NULL, IEEE754, FLOAT_FORMAT, 4, VFP_S_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_s##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM 1377 #define DEFINE_VFP_D_IDX(idx) e_regSetVFP, vfp_d##idx - vfp_s0, "d" #idx, NULL, IEEE754, FLOAT_FORMAT, 8, VFP_D_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_d##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM 1378 #define DEFINE_VFP_Q_IDX(idx) e_regSetVFP, vfp_q##idx - vfp_s0, "q" #idx, NULL, Vector, VectorOfUInt8, 16, VFP_Q_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_q##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM 1379 1380 // Floating point registers 1381 const DNBRegisterInfo 1382 DNBArchMachARM::g_vfp_registers[] = 1383 { 1384 { DEFINE_VFP_S_IDX ( 0), g_contained_q0, g_invalidate_q0 }, 1385 { DEFINE_VFP_S_IDX ( 1), g_contained_q0, g_invalidate_q0 }, 1386 { DEFINE_VFP_S_IDX ( 2), g_contained_q0, g_invalidate_q0 }, 1387 { DEFINE_VFP_S_IDX ( 3), g_contained_q0, g_invalidate_q0 }, 1388 { DEFINE_VFP_S_IDX ( 4), g_contained_q1, g_invalidate_q1 }, 1389 { DEFINE_VFP_S_IDX ( 5), g_contained_q1, g_invalidate_q1 }, 1390 { DEFINE_VFP_S_IDX ( 6), g_contained_q1, g_invalidate_q1 }, 1391 { DEFINE_VFP_S_IDX ( 7), g_contained_q1, g_invalidate_q1 }, 1392 { DEFINE_VFP_S_IDX ( 8), g_contained_q2, g_invalidate_q2 }, 1393 { DEFINE_VFP_S_IDX ( 9), g_contained_q2, g_invalidate_q2 }, 1394 { DEFINE_VFP_S_IDX (10), g_contained_q2, g_invalidate_q2 }, 1395 { DEFINE_VFP_S_IDX (11), g_contained_q2, g_invalidate_q2 }, 1396 { DEFINE_VFP_S_IDX (12), g_contained_q3, g_invalidate_q3 }, 1397 { DEFINE_VFP_S_IDX (13), g_contained_q3, g_invalidate_q3 }, 1398 { DEFINE_VFP_S_IDX (14), g_contained_q3, g_invalidate_q3 }, 1399 { DEFINE_VFP_S_IDX (15), g_contained_q3, g_invalidate_q3 }, 1400 { DEFINE_VFP_S_IDX (16), g_contained_q4, g_invalidate_q4 }, 1401 { DEFINE_VFP_S_IDX (17), g_contained_q4, g_invalidate_q4 }, 1402 { DEFINE_VFP_S_IDX (18), g_contained_q4, g_invalidate_q4 }, 1403 { DEFINE_VFP_S_IDX (19), g_contained_q4, g_invalidate_q4 }, 1404 { DEFINE_VFP_S_IDX (20), g_contained_q5, g_invalidate_q5 }, 1405 { DEFINE_VFP_S_IDX (21), g_contained_q5, g_invalidate_q5 }, 1406 { DEFINE_VFP_S_IDX (22), g_contained_q5, g_invalidate_q5 }, 1407 { DEFINE_VFP_S_IDX (23), g_contained_q5, g_invalidate_q5 }, 1408 { DEFINE_VFP_S_IDX (24), g_contained_q6, g_invalidate_q6 }, 1409 { DEFINE_VFP_S_IDX (25), g_contained_q6, g_invalidate_q6 }, 1410 { DEFINE_VFP_S_IDX (26), g_contained_q6, g_invalidate_q6 }, 1411 { DEFINE_VFP_S_IDX (27), g_contained_q6, g_invalidate_q6 }, 1412 { DEFINE_VFP_S_IDX (28), g_contained_q7, g_invalidate_q7 }, 1413 { DEFINE_VFP_S_IDX (29), g_contained_q7, g_invalidate_q7 }, 1414 { DEFINE_VFP_S_IDX (30), g_contained_q7, g_invalidate_q7 }, 1415 { DEFINE_VFP_S_IDX (31), g_contained_q7, g_invalidate_q7 }, 1416 1417 { DEFINE_VFP_D_IDX (0), g_contained_q0, g_invalidate_q0 }, 1418 { DEFINE_VFP_D_IDX (1), g_contained_q0, g_invalidate_q0 }, 1419 { DEFINE_VFP_D_IDX (2), g_contained_q1, g_invalidate_q1 }, 1420 { DEFINE_VFP_D_IDX (3), g_contained_q1, g_invalidate_q1 }, 1421 { DEFINE_VFP_D_IDX (4), g_contained_q2, g_invalidate_q2 }, 1422 { DEFINE_VFP_D_IDX (5), g_contained_q2, g_invalidate_q2 }, 1423 { DEFINE_VFP_D_IDX (6), g_contained_q3, g_invalidate_q3 }, 1424 { DEFINE_VFP_D_IDX (7), g_contained_q3, g_invalidate_q3 }, 1425 { DEFINE_VFP_D_IDX (8), g_contained_q4, g_invalidate_q4 }, 1426 { DEFINE_VFP_D_IDX (9), g_contained_q4, g_invalidate_q4 }, 1427 { DEFINE_VFP_D_IDX (10), g_contained_q5, g_invalidate_q5 }, 1428 { DEFINE_VFP_D_IDX (11), g_contained_q5, g_invalidate_q5 }, 1429 { DEFINE_VFP_D_IDX (12), g_contained_q6, g_invalidate_q6 }, 1430 { DEFINE_VFP_D_IDX (13), g_contained_q6, g_invalidate_q6 }, 1431 { DEFINE_VFP_D_IDX (14), g_contained_q7, g_invalidate_q7 }, 1432 { DEFINE_VFP_D_IDX (15), g_contained_q7, g_invalidate_q7 }, 1433 { DEFINE_VFP_D_IDX (16), g_contained_q8, g_invalidate_q8 }, 1434 { DEFINE_VFP_D_IDX (17), g_contained_q8, g_invalidate_q8 }, 1435 { DEFINE_VFP_D_IDX (18), g_contained_q9, g_invalidate_q9 }, 1436 { DEFINE_VFP_D_IDX (19), g_contained_q9, g_invalidate_q9 }, 1437 { DEFINE_VFP_D_IDX (20), g_contained_q10, g_invalidate_q10 }, 1438 { DEFINE_VFP_D_IDX (21), g_contained_q10, g_invalidate_q10 }, 1439 { DEFINE_VFP_D_IDX (22), g_contained_q11, g_invalidate_q11 }, 1440 { DEFINE_VFP_D_IDX (23), g_contained_q11, g_invalidate_q11 }, 1441 { DEFINE_VFP_D_IDX (24), g_contained_q12, g_invalidate_q12 }, 1442 { DEFINE_VFP_D_IDX (25), g_contained_q12, g_invalidate_q12 }, 1443 { DEFINE_VFP_D_IDX (26), g_contained_q13, g_invalidate_q13 }, 1444 { DEFINE_VFP_D_IDX (27), g_contained_q13, g_invalidate_q13 }, 1445 { DEFINE_VFP_D_IDX (28), g_contained_q14, g_invalidate_q14 }, 1446 { DEFINE_VFP_D_IDX (29), g_contained_q14, g_invalidate_q14 }, 1447 { DEFINE_VFP_D_IDX (30), g_contained_q15, g_invalidate_q15 }, 1448 { DEFINE_VFP_D_IDX (31), g_contained_q15, g_invalidate_q15 }, 1449 1450 { DEFINE_VFP_Q_IDX (0), NULL, g_invalidate_q0 }, 1451 { DEFINE_VFP_Q_IDX (1), NULL, g_invalidate_q1 }, 1452 { DEFINE_VFP_Q_IDX (2), NULL, g_invalidate_q2 }, 1453 { DEFINE_VFP_Q_IDX (3), NULL, g_invalidate_q3 }, 1454 { DEFINE_VFP_Q_IDX (4), NULL, g_invalidate_q4 }, 1455 { DEFINE_VFP_Q_IDX (5), NULL, g_invalidate_q5 }, 1456 { DEFINE_VFP_Q_IDX (6), NULL, g_invalidate_q6 }, 1457 { DEFINE_VFP_Q_IDX (7), NULL, g_invalidate_q7 }, 1458 { DEFINE_VFP_Q_IDX (8), NULL, g_invalidate_q8 }, 1459 { DEFINE_VFP_Q_IDX (9), NULL, g_invalidate_q9 }, 1460 { DEFINE_VFP_Q_IDX (10), NULL, g_invalidate_q10 }, 1461 { DEFINE_VFP_Q_IDX (11), NULL, g_invalidate_q11 }, 1462 { DEFINE_VFP_Q_IDX (12), NULL, g_invalidate_q12 }, 1463 { DEFINE_VFP_Q_IDX (13), NULL, g_invalidate_q13 }, 1464 { DEFINE_VFP_Q_IDX (14), NULL, g_invalidate_q14 }, 1465 { DEFINE_VFP_Q_IDX (15), NULL, g_invalidate_q15 }, 1466 1467 { e_regSetVFP, vfp_fpscr, "fpscr", NULL, Uint, Hex, 4, VFP_OFFSET_NAME(fpscr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL } 1468 }; 1469 1470 // Exception registers 1471 1472 const DNBRegisterInfo 1473 DNBArchMachARM::g_exc_registers[] = 1474 { 1475 { e_regSetVFP, exc_exception , "exception" , NULL, Uint, Hex, 4, EXC_OFFSET(exception) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM }, 1476 { e_regSetVFP, exc_fsr , "fsr" , NULL, Uint, Hex, 4, EXC_OFFSET(fsr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM }, 1477 { e_regSetVFP, exc_far , "far" , NULL, Uint, Hex, 4, EXC_OFFSET(far) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM } 1478 }; 1479 1480 // Number of registers in each register set 1481 const size_t DNBArchMachARM::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo); 1482 const size_t DNBArchMachARM::k_num_vfp_registers = sizeof(g_vfp_registers)/sizeof(DNBRegisterInfo); 1483 const size_t DNBArchMachARM::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo); 1484 const size_t DNBArchMachARM::k_num_all_registers = k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers; 1485 1486 //---------------------------------------------------------------------- 1487 // Register set definitions. The first definitions at register set index 1488 // of zero is for all registers, followed by other registers sets. The 1489 // register information for the all register set need not be filled in. 1490 //---------------------------------------------------------------------- 1491 const DNBRegisterSetInfo 1492 DNBArchMachARM::g_reg_sets[] = 1493 { 1494 { "ARM Registers", NULL, k_num_all_registers }, 1495 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 1496 { "Floating Point Registers", g_vfp_registers, k_num_vfp_registers }, 1497 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 1498 }; 1499 // Total number of register sets for this architecture 1500 const size_t DNBArchMachARM::k_num_register_sets = sizeof(g_reg_sets)/sizeof(DNBRegisterSetInfo); 1501 1502 1503 const DNBRegisterSetInfo * 1504 DNBArchMachARM::GetRegisterSetInfo(nub_size_t *num_reg_sets) 1505 { 1506 *num_reg_sets = k_num_register_sets; 1507 return g_reg_sets; 1508 } 1509 1510 bool 1511 DNBArchMachARM::GetRegisterValue(int set, int reg, DNBRegisterValue *value) 1512 { 1513 if (set == REGISTER_SET_GENERIC) 1514 { 1515 switch (reg) 1516 { 1517 case GENERIC_REGNUM_PC: // Program Counter 1518 set = e_regSetGPR; 1519 reg = gpr_pc; 1520 break; 1521 1522 case GENERIC_REGNUM_SP: // Stack Pointer 1523 set = e_regSetGPR; 1524 reg = gpr_sp; 1525 break; 1526 1527 case GENERIC_REGNUM_FP: // Frame Pointer 1528 set = e_regSetGPR; 1529 reg = gpr_r7; // is this the right reg? 1530 break; 1531 1532 case GENERIC_REGNUM_RA: // Return Address 1533 set = e_regSetGPR; 1534 reg = gpr_lr; 1535 break; 1536 1537 case GENERIC_REGNUM_FLAGS: // Processor flags register 1538 set = e_regSetGPR; 1539 reg = gpr_cpsr; 1540 break; 1541 1542 default: 1543 return false; 1544 } 1545 } 1546 1547 if (GetRegisterState(set, false) != KERN_SUCCESS) 1548 return false; 1549 1550 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1551 if (regInfo) 1552 { 1553 value->info = *regInfo; 1554 switch (set) 1555 { 1556 case e_regSetGPR: 1557 if (reg < k_num_gpr_registers) 1558 { 1559 value->value.uint32 = m_state.context.gpr.__r[reg]; 1560 return true; 1561 } 1562 break; 1563 1564 case e_regSetVFP: 1565 // "reg" is an index into the floating point register set at this point. 1566 // We need to translate it up so entry 0 in the fp reg set is the same as vfp_s0 1567 // in the enumerated values for case statement below. 1568 reg += vfp_s0; 1569 if (reg >= vfp_s0 && reg <= vfp_s31) 1570 { 1571 value->value.uint32 = m_state.context.vfp.__r[reg - vfp_s0]; 1572 return true; 1573 } 1574 else if (reg >= vfp_d0 && reg <= vfp_d31) 1575 { 1576 uint32_t d_reg_idx = reg - vfp_d0; 1577 uint32_t s_reg_idx = d_reg_idx * 2; 1578 value->value.v_sint32[0] = m_state.context.vfp.__r[s_reg_idx + 0]; 1579 value->value.v_sint32[1] = m_state.context.vfp.__r[s_reg_idx + 1]; 1580 return true; 1581 } 1582 else if (reg >= vfp_q0 && reg <= vfp_q15) 1583 { 1584 uint32_t s_reg_idx = (reg - vfp_q0) * 4; 1585 memcpy (&value->value.v_uint8, (uint8_t *) &m_state.context.vfp.__r[s_reg_idx], 16); 1586 return true; 1587 } 1588 1589 else if (reg == vfp_fpscr) 1590 { 1591 value->value.uint32 = m_state.context.vfp.__fpscr; 1592 return true; 1593 } 1594 break; 1595 1596 case e_regSetEXC: 1597 if (reg < k_num_exc_registers) 1598 { 1599 value->value.uint32 = (&m_state.context.exc.__exception)[reg]; 1600 return true; 1601 } 1602 break; 1603 } 1604 } 1605 return false; 1606 } 1607 1608 bool 1609 DNBArchMachARM::SetRegisterValue(int set, int reg, const DNBRegisterValue *value) 1610 { 1611 if (set == REGISTER_SET_GENERIC) 1612 { 1613 switch (reg) 1614 { 1615 case GENERIC_REGNUM_PC: // Program Counter 1616 set = e_regSetGPR; 1617 reg = gpr_pc; 1618 break; 1619 1620 case GENERIC_REGNUM_SP: // Stack Pointer 1621 set = e_regSetGPR; 1622 reg = gpr_sp; 1623 break; 1624 1625 case GENERIC_REGNUM_FP: // Frame Pointer 1626 set = e_regSetGPR; 1627 reg = gpr_r7; 1628 break; 1629 1630 case GENERIC_REGNUM_RA: // Return Address 1631 set = e_regSetGPR; 1632 reg = gpr_lr; 1633 break; 1634 1635 case GENERIC_REGNUM_FLAGS: // Processor flags register 1636 set = e_regSetGPR; 1637 reg = gpr_cpsr; 1638 break; 1639 1640 default: 1641 return false; 1642 } 1643 } 1644 1645 if (GetRegisterState(set, false) != KERN_SUCCESS) 1646 return false; 1647 1648 bool success = false; 1649 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1650 if (regInfo) 1651 { 1652 switch (set) 1653 { 1654 case e_regSetGPR: 1655 if (reg < k_num_gpr_registers) 1656 { 1657 m_state.context.gpr.__r[reg] = value->value.uint32; 1658 success = true; 1659 } 1660 break; 1661 1662 case e_regSetVFP: 1663 // "reg" is an index into the floating point register set at this point. 1664 // We need to translate it up so entry 0 in the fp reg set is the same as vfp_s0 1665 // in the enumerated values for case statement below. 1666 reg += vfp_s0; 1667 1668 if (reg >= vfp_s0 && reg <= vfp_s31) 1669 { 1670 m_state.context.vfp.__r[reg - vfp_s0] = value->value.uint32; 1671 success = true; 1672 } 1673 else if (reg >= vfp_d0 && reg <= vfp_d31) 1674 { 1675 uint32_t d_reg_idx = reg - vfp_d0; 1676 uint32_t s_reg_idx = d_reg_idx * 2; 1677 m_state.context.vfp.__r[s_reg_idx + 0] = value->value.v_sint32[0]; 1678 m_state.context.vfp.__r[s_reg_idx + 1] = value->value.v_sint32[1]; 1679 success = true; 1680 } 1681 else if (reg >= vfp_q0 && reg <= vfp_q15) 1682 { 1683 uint32_t s_reg_idx = (reg - vfp_q0) * 4; 1684 memcpy ((uint8_t *) &m_state.context.vfp.__r[s_reg_idx], &value->value.v_uint8, 16); 1685 return true; 1686 } 1687 else if (reg == vfp_fpscr) 1688 { 1689 m_state.context.vfp.__fpscr = value->value.uint32; 1690 success = true; 1691 } 1692 break; 1693 1694 case e_regSetEXC: 1695 if (reg < k_num_exc_registers) 1696 { 1697 (&m_state.context.exc.__exception)[reg] = value->value.uint32; 1698 success = true; 1699 } 1700 break; 1701 } 1702 1703 } 1704 if (success) 1705 return SetRegisterState(set) == KERN_SUCCESS; 1706 return false; 1707 } 1708 1709 kern_return_t 1710 DNBArchMachARM::GetRegisterState(int set, bool force) 1711 { 1712 switch (set) 1713 { 1714 case e_regSetALL: return GetGPRState(force) | 1715 GetVFPState(force) | 1716 GetEXCState(force) | 1717 GetDBGState(force); 1718 case e_regSetGPR: return GetGPRState(force); 1719 case e_regSetVFP: return GetVFPState(force); 1720 case e_regSetEXC: return GetEXCState(force); 1721 case e_regSetDBG: return GetDBGState(force); 1722 default: break; 1723 } 1724 return KERN_INVALID_ARGUMENT; 1725 } 1726 1727 kern_return_t 1728 DNBArchMachARM::SetRegisterState(int set) 1729 { 1730 // Make sure we have a valid context to set. 1731 kern_return_t err = GetRegisterState(set, false); 1732 if (err != KERN_SUCCESS) 1733 return err; 1734 1735 switch (set) 1736 { 1737 case e_regSetALL: return SetGPRState() | 1738 SetVFPState() | 1739 SetEXCState() | 1740 SetDBGState(false); 1741 case e_regSetGPR: return SetGPRState(); 1742 case e_regSetVFP: return SetVFPState(); 1743 case e_regSetEXC: return SetEXCState(); 1744 case e_regSetDBG: return SetDBGState(false); 1745 default: break; 1746 } 1747 return KERN_INVALID_ARGUMENT; 1748 } 1749 1750 bool 1751 DNBArchMachARM::RegisterSetStateIsValid (int set) const 1752 { 1753 return m_state.RegsAreValid(set); 1754 } 1755 1756 1757 nub_size_t 1758 DNBArchMachARM::GetRegisterContext (void *buf, nub_size_t buf_len) 1759 { 1760 nub_size_t size = sizeof (m_state.context); 1761 1762 if (buf && buf_len) 1763 { 1764 if (size > buf_len) 1765 size = buf_len; 1766 1767 bool force = false; 1768 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force)) 1769 return 0; 1770 ::memcpy (buf, &m_state.context, size); 1771 } 1772 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size); 1773 // Return the size of the register context even if NULL was passed in 1774 return size; 1775 } 1776 1777 nub_size_t 1778 DNBArchMachARM::SetRegisterContext (const void *buf, nub_size_t buf_len) 1779 { 1780 nub_size_t size = sizeof (m_state.context); 1781 if (buf == NULL || buf_len == 0) 1782 size = 0; 1783 1784 if (size) 1785 { 1786 if (size > buf_len) 1787 size = buf_len; 1788 1789 ::memcpy (&m_state.context, buf, size); 1790 SetGPRState(); 1791 SetVFPState(); 1792 SetEXCState(); 1793 } 1794 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size); 1795 return size; 1796 } 1797 1798 1799 #endif // #if defined (__arm__) 1800 1801