1 //===-- RegisterContext_x86_64.cpp -------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include <cstring> 11 #include <errno.h> 12 #include <stdint.h> 13 14 #include "lldb/Core/DataBufferHeap.h" 15 #include "lldb/Core/DataExtractor.h" 16 #include "lldb/Core/RegisterValue.h" 17 #include "lldb/Core/Scalar.h" 18 #include "lldb/Target/Target.h" 19 #include "lldb/Target/Thread.h" 20 #include "lldb/Host/Endian.h" 21 #include "llvm/Support/Compiler.h" 22 23 #include "ProcessPOSIX.h" 24 #if defined(__linux__) or defined(__FreeBSD__) 25 #include "ProcessMonitor.h" 26 #endif 27 #include "RegisterContext_i386.h" 28 #include "RegisterContext_x86.h" 29 #include "RegisterContext_x86_64.h" 30 #include "Plugins/Process/elf-core/ProcessElfCore.h" 31 32 using namespace lldb_private; 33 using namespace lldb; 34 35 // Support ptrace extensions even when compiled without required kernel support 36 #ifndef NT_X86_XSTATE 37 #define NT_X86_XSTATE 0x202 38 #endif 39 40 enum 41 { 42 gcc_dwarf_gpr_rax = 0, 43 gcc_dwarf_gpr_rdx, 44 gcc_dwarf_gpr_rcx, 45 gcc_dwarf_gpr_rbx, 46 gcc_dwarf_gpr_rsi, 47 gcc_dwarf_gpr_rdi, 48 gcc_dwarf_gpr_rbp, 49 gcc_dwarf_gpr_rsp, 50 gcc_dwarf_gpr_r8, 51 gcc_dwarf_gpr_r9, 52 gcc_dwarf_gpr_r10, 53 gcc_dwarf_gpr_r11, 54 gcc_dwarf_gpr_r12, 55 gcc_dwarf_gpr_r13, 56 gcc_dwarf_gpr_r14, 57 gcc_dwarf_gpr_r15, 58 gcc_dwarf_gpr_rip, 59 gcc_dwarf_fpu_xmm0, 60 gcc_dwarf_fpu_xmm1, 61 gcc_dwarf_fpu_xmm2, 62 gcc_dwarf_fpu_xmm3, 63 gcc_dwarf_fpu_xmm4, 64 gcc_dwarf_fpu_xmm5, 65 gcc_dwarf_fpu_xmm6, 66 gcc_dwarf_fpu_xmm7, 67 gcc_dwarf_fpu_xmm8, 68 gcc_dwarf_fpu_xmm9, 69 gcc_dwarf_fpu_xmm10, 70 gcc_dwarf_fpu_xmm11, 71 gcc_dwarf_fpu_xmm12, 72 gcc_dwarf_fpu_xmm13, 73 gcc_dwarf_fpu_xmm14, 74 gcc_dwarf_fpu_xmm15, 75 gcc_dwarf_fpu_stmm0, 76 gcc_dwarf_fpu_stmm1, 77 gcc_dwarf_fpu_stmm2, 78 gcc_dwarf_fpu_stmm3, 79 gcc_dwarf_fpu_stmm4, 80 gcc_dwarf_fpu_stmm5, 81 gcc_dwarf_fpu_stmm6, 82 gcc_dwarf_fpu_stmm7, 83 gcc_dwarf_fpu_ymm0, 84 gcc_dwarf_fpu_ymm1, 85 gcc_dwarf_fpu_ymm2, 86 gcc_dwarf_fpu_ymm3, 87 gcc_dwarf_fpu_ymm4, 88 gcc_dwarf_fpu_ymm5, 89 gcc_dwarf_fpu_ymm6, 90 gcc_dwarf_fpu_ymm7, 91 gcc_dwarf_fpu_ymm8, 92 gcc_dwarf_fpu_ymm9, 93 gcc_dwarf_fpu_ymm10, 94 gcc_dwarf_fpu_ymm11, 95 gcc_dwarf_fpu_ymm12, 96 gcc_dwarf_fpu_ymm13, 97 gcc_dwarf_fpu_ymm14, 98 gcc_dwarf_fpu_ymm15 99 }; 100 101 enum 102 { 103 gdb_gpr_rax = 0, 104 gdb_gpr_rbx = 1, 105 gdb_gpr_rcx = 2, 106 gdb_gpr_rdx = 3, 107 gdb_gpr_rsi = 4, 108 gdb_gpr_rdi = 5, 109 gdb_gpr_rbp = 6, 110 gdb_gpr_rsp = 7, 111 gdb_gpr_r8 = 8, 112 gdb_gpr_r9 = 9, 113 gdb_gpr_r10 = 10, 114 gdb_gpr_r11 = 11, 115 gdb_gpr_r12 = 12, 116 gdb_gpr_r13 = 13, 117 gdb_gpr_r14 = 14, 118 gdb_gpr_r15 = 15, 119 gdb_gpr_rip = 16, 120 gdb_gpr_rflags = 17, 121 gdb_gpr_cs = 18, 122 gdb_gpr_ss = 19, 123 gdb_gpr_ds = 20, 124 gdb_gpr_es = 21, 125 gdb_gpr_fs = 22, 126 gdb_gpr_gs = 23, 127 gdb_fpu_stmm0 = 24, 128 gdb_fpu_stmm1 = 25, 129 gdb_fpu_stmm2 = 26, 130 gdb_fpu_stmm3 = 27, 131 gdb_fpu_stmm4 = 28, 132 gdb_fpu_stmm5 = 29, 133 gdb_fpu_stmm6 = 30, 134 gdb_fpu_stmm7 = 31, 135 gdb_fpu_fcw = 32, 136 gdb_fpu_fsw = 33, 137 gdb_fpu_ftw = 34, 138 gdb_fpu_cs_64 = 35, 139 gdb_fpu_ip = 36, 140 gdb_fpu_ds_64 = 37, 141 gdb_fpu_dp = 38, 142 gdb_fpu_fop = 39, 143 gdb_fpu_xmm0 = 40, 144 gdb_fpu_xmm1 = 41, 145 gdb_fpu_xmm2 = 42, 146 gdb_fpu_xmm3 = 43, 147 gdb_fpu_xmm4 = 44, 148 gdb_fpu_xmm5 = 45, 149 gdb_fpu_xmm6 = 46, 150 gdb_fpu_xmm7 = 47, 151 gdb_fpu_xmm8 = 48, 152 gdb_fpu_xmm9 = 49, 153 gdb_fpu_xmm10 = 50, 154 gdb_fpu_xmm11 = 51, 155 gdb_fpu_xmm12 = 52, 156 gdb_fpu_xmm13 = 53, 157 gdb_fpu_xmm14 = 54, 158 gdb_fpu_xmm15 = 55, 159 gdb_fpu_mxcsr = 56, 160 gdb_fpu_ymm0 = 57, 161 gdb_fpu_ymm1 = 58, 162 gdb_fpu_ymm2 = 59, 163 gdb_fpu_ymm3 = 60, 164 gdb_fpu_ymm4 = 61, 165 gdb_fpu_ymm5 = 62, 166 gdb_fpu_ymm6 = 63, 167 gdb_fpu_ymm7 = 64, 168 gdb_fpu_ymm8 = 65, 169 gdb_fpu_ymm9 = 66, 170 gdb_fpu_ymm10 = 67, 171 gdb_fpu_ymm11 = 68, 172 gdb_fpu_ymm12 = 69, 173 gdb_fpu_ymm13 = 70, 174 gdb_fpu_ymm14 = 71, 175 gdb_fpu_ymm15 = 72 176 }; 177 178 static const 179 uint32_t g_gpr_regnums[k_num_gpr_registers] = 180 { 181 gpr_rax, 182 gpr_rbx, 183 gpr_rcx, 184 gpr_rdx, 185 gpr_rdi, 186 gpr_rsi, 187 gpr_rbp, 188 gpr_rsp, 189 gpr_r8, 190 gpr_r9, 191 gpr_r10, 192 gpr_r11, 193 gpr_r12, 194 gpr_r13, 195 gpr_r14, 196 gpr_r15, 197 gpr_rip, 198 gpr_rflags, 199 gpr_cs, 200 gpr_fs, 201 gpr_gs, 202 gpr_ss, 203 gpr_ds, 204 gpr_es, 205 gpr_eax, 206 gpr_ebx, 207 gpr_ecx, 208 gpr_edx, 209 gpr_edi, 210 gpr_esi, 211 gpr_ebp, 212 gpr_esp, 213 gpr_eip, 214 gpr_eflags 215 }; 216 217 static const uint32_t 218 g_fpu_regnums[k_num_fpr_registers] = 219 { 220 fpu_fcw, 221 fpu_fsw, 222 fpu_ftw, 223 fpu_fop, 224 fpu_ip, 225 fpu_cs, 226 fpu_dp, 227 fpu_ds, 228 fpu_mxcsr, 229 fpu_mxcsrmask, 230 fpu_stmm0, 231 fpu_stmm1, 232 fpu_stmm2, 233 fpu_stmm3, 234 fpu_stmm4, 235 fpu_stmm5, 236 fpu_stmm6, 237 fpu_stmm7, 238 fpu_xmm0, 239 fpu_xmm1, 240 fpu_xmm2, 241 fpu_xmm3, 242 fpu_xmm4, 243 fpu_xmm5, 244 fpu_xmm6, 245 fpu_xmm7, 246 fpu_xmm8, 247 fpu_xmm9, 248 fpu_xmm10, 249 fpu_xmm11, 250 fpu_xmm12, 251 fpu_xmm13, 252 fpu_xmm14, 253 fpu_xmm15 254 }; 255 256 static const uint32_t 257 g_avx_regnums[k_num_avx_registers] = 258 { 259 fpu_ymm0, 260 fpu_ymm1, 261 fpu_ymm2, 262 fpu_ymm3, 263 fpu_ymm4, 264 fpu_ymm5, 265 fpu_ymm6, 266 fpu_ymm7, 267 fpu_ymm8, 268 fpu_ymm9, 269 fpu_ymm10, 270 fpu_ymm11, 271 fpu_ymm12, 272 fpu_ymm13, 273 fpu_ymm14, 274 fpu_ymm15 275 }; 276 277 // Number of register sets provided by this context. 278 enum 279 { 280 k_num_extended_register_sets = 1, 281 k_num_register_sets = 3 282 }; 283 284 static const RegisterSet 285 g_reg_sets[k_num_register_sets] = 286 { 287 { "General Purpose Registers", "gpr", k_num_gpr_registers, g_gpr_regnums }, 288 { "Floating Point Registers", "fpu", k_num_fpr_registers, g_fpu_regnums }, 289 { "Advanced Vector Extensions", "avx", k_num_avx_registers, g_avx_regnums } 290 }; 291 292 // Computes the offset of the given FPR in the extended data area. 293 #define FPR_OFFSET(regname) \ 294 (offsetof(RegisterContext_x86_64::FPR, xstate) + \ 295 offsetof(RegisterContext_x86_64::FXSAVE, regname)) 296 297 // Computes the offset of the YMM register assembled from register halves. 298 #define YMM_OFFSET(regname) \ 299 (offsetof(RegisterContext_x86_64::YMM, regname)) 300 301 // Number of bytes needed to represent a i386 GPR 302 #define GPR_i386_SIZE(reg) sizeof(((RegisterContext_i386::GPR*)NULL)->reg) 303 304 // Number of bytes needed to represent a FPR. 305 #define FPR_SIZE(reg) sizeof(((RegisterContext_x86_64::FXSAVE*)NULL)->reg) 306 307 // Number of bytes needed to represent the i'th FP register. 308 #define FP_SIZE sizeof(((RegisterContext_x86_64::MMSReg*)NULL)->bytes) 309 310 // Number of bytes needed to represent an XMM register. 311 #define XMM_SIZE sizeof(RegisterContext_x86_64::XMMReg) 312 313 // Number of bytes needed to represent a YMM register. 314 #define YMM_SIZE sizeof(RegisterContext_x86_64::YMMReg) 315 316 // Note that the size and offset will be updated by platform-specific classes. 317 #define DEFINE_GPR(reg, alt, kind1, kind2, kind3, kind4) \ 318 { #reg, alt, 0, 0, eEncodingUint, \ 319 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg }, NULL, NULL } 320 321 // Dummy data for RegisterInfo::value_regs as expected by DumpRegisterSet. 322 static uint32_t value_regs = LLDB_INVALID_REGNUM; 323 324 #define DEFINE_GPR_i386(reg_i386, reg_x86_64, alt, kind1, kind2, kind3, kind4) \ 325 { #reg_i386, alt, GPR_i386_SIZE(reg_i386), 0, eEncodingUint, \ 326 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg_i386 }, &value_regs, NULL } 327 328 #define DEFINE_FPR(reg, kind1, kind2, kind3, kind4) \ 329 { #reg, NULL, FPR_SIZE(reg), FPR_OFFSET(reg), eEncodingUint, \ 330 eFormatHex, { kind1, kind2, kind3, kind4, fpu_##reg }, NULL, NULL } 331 332 #define DEFINE_FP(reg, i) \ 333 { #reg#i, NULL, FP_SIZE, LLVM_EXTENSION FPR_OFFSET(reg[i]), \ 334 eEncodingVector, eFormatVectorOfUInt8, \ 335 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 336 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 337 338 #define DEFINE_XMM(reg, i) \ 339 { #reg#i, NULL, XMM_SIZE, LLVM_EXTENSION FPR_OFFSET(reg[i]), \ 340 eEncodingVector, eFormatVectorOfUInt8, \ 341 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 342 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 343 344 #define DEFINE_YMM(reg, i) \ 345 { #reg#i, NULL, YMM_SIZE, LLVM_EXTENSION YMM_OFFSET(reg[i]), \ 346 eEncodingVector, eFormatVectorOfUInt8, \ 347 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \ 348 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL } 349 350 #define DEFINE_DR(reg, i) \ 351 { #reg#i, NULL, 0, 0, eEncodingUint, eFormatHex, \ 352 { LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, \ 353 LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM }, NULL, NULL } 354 355 #define REG_CONTEXT_SIZE (GetGPRSize() + sizeof(RegisterContext_x86_64::FPR)) 356 357 static RegisterInfo 358 g_register_infos[k_num_registers] = 359 { 360 // General purpose registers. 361 DEFINE_GPR(rax, NULL, gcc_dwarf_gpr_rax, gcc_dwarf_gpr_rax, LLDB_INVALID_REGNUM, gdb_gpr_rax), 362 DEFINE_GPR(rbx, NULL, gcc_dwarf_gpr_rbx, gcc_dwarf_gpr_rbx, LLDB_INVALID_REGNUM, gdb_gpr_rbx), 363 DEFINE_GPR(rcx, NULL, gcc_dwarf_gpr_rcx, gcc_dwarf_gpr_rcx, LLDB_INVALID_REGNUM, gdb_gpr_rcx), 364 DEFINE_GPR(rdx, NULL, gcc_dwarf_gpr_rdx, gcc_dwarf_gpr_rdx, LLDB_INVALID_REGNUM, gdb_gpr_rdx), 365 DEFINE_GPR(rdi, NULL, gcc_dwarf_gpr_rdi, gcc_dwarf_gpr_rdi, LLDB_INVALID_REGNUM, gdb_gpr_rdi), 366 DEFINE_GPR(rsi, NULL, gcc_dwarf_gpr_rsi, gcc_dwarf_gpr_rsi, LLDB_INVALID_REGNUM, gdb_gpr_rsi), 367 DEFINE_GPR(rbp, "fp", gcc_dwarf_gpr_rbp, gcc_dwarf_gpr_rbp, LLDB_REGNUM_GENERIC_FP, gdb_gpr_rbp), 368 DEFINE_GPR(rsp, "sp", gcc_dwarf_gpr_rsp, gcc_dwarf_gpr_rsp, LLDB_REGNUM_GENERIC_SP, gdb_gpr_rsp), 369 DEFINE_GPR(r8, NULL, gcc_dwarf_gpr_r8, gcc_dwarf_gpr_r8, LLDB_INVALID_REGNUM, gdb_gpr_r8), 370 DEFINE_GPR(r9, NULL, gcc_dwarf_gpr_r9, gcc_dwarf_gpr_r9, LLDB_INVALID_REGNUM, gdb_gpr_r9), 371 DEFINE_GPR(r10, NULL, gcc_dwarf_gpr_r10, gcc_dwarf_gpr_r10, LLDB_INVALID_REGNUM, gdb_gpr_r10), 372 DEFINE_GPR(r11, NULL, gcc_dwarf_gpr_r11, gcc_dwarf_gpr_r11, LLDB_INVALID_REGNUM, gdb_gpr_r11), 373 DEFINE_GPR(r12, NULL, gcc_dwarf_gpr_r12, gcc_dwarf_gpr_r12, LLDB_INVALID_REGNUM, gdb_gpr_r12), 374 DEFINE_GPR(r13, NULL, gcc_dwarf_gpr_r13, gcc_dwarf_gpr_r13, LLDB_INVALID_REGNUM, gdb_gpr_r13), 375 DEFINE_GPR(r14, NULL, gcc_dwarf_gpr_r14, gcc_dwarf_gpr_r14, LLDB_INVALID_REGNUM, gdb_gpr_r14), 376 DEFINE_GPR(r15, NULL, gcc_dwarf_gpr_r15, gcc_dwarf_gpr_r15, LLDB_INVALID_REGNUM, gdb_gpr_r15), 377 DEFINE_GPR(rip, "pc", gcc_dwarf_gpr_rip, gcc_dwarf_gpr_rip, LLDB_REGNUM_GENERIC_PC, gdb_gpr_rip), 378 DEFINE_GPR(rflags, "flags", LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_REGNUM_GENERIC_FLAGS, gdb_gpr_rflags), 379 DEFINE_GPR(cs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_cs), 380 DEFINE_GPR(fs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_fs), 381 DEFINE_GPR(gs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_gs), 382 DEFINE_GPR(ss, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ss), 383 DEFINE_GPR(ds, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ds), 384 DEFINE_GPR(es, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_es), 385 // i386 registers 386 DEFINE_GPR_i386(eax, rax, NULL, gcc_eax, dwarf_eax, LLDB_INVALID_REGNUM, gdb_eax), 387 DEFINE_GPR_i386(ebx, rbx, NULL, gcc_ebx, dwarf_ebx, LLDB_INVALID_REGNUM, gdb_ebx), 388 DEFINE_GPR_i386(ecx, rcx, NULL, gcc_ecx, dwarf_ecx, LLDB_INVALID_REGNUM, gdb_ecx), 389 DEFINE_GPR_i386(edx, rdx, NULL, gcc_edx, dwarf_edx, LLDB_INVALID_REGNUM, gdb_edx), 390 DEFINE_GPR_i386(edi, rdi, NULL, gcc_edi, dwarf_edi, LLDB_INVALID_REGNUM, gdb_edi), 391 DEFINE_GPR_i386(esi, rsi, NULL, gcc_esi, dwarf_esi, LLDB_INVALID_REGNUM, gdb_esi), 392 DEFINE_GPR_i386(ebp, rbp, "fp", gcc_ebp, dwarf_ebp, LLDB_REGNUM_GENERIC_FP, gdb_ebp), 393 DEFINE_GPR_i386(esp, rsp, "sp", gcc_esp, dwarf_esp, LLDB_REGNUM_GENERIC_SP, gdb_esp), 394 DEFINE_GPR_i386(eip, rip, "pc", gcc_eip, dwarf_eip, LLDB_REGNUM_GENERIC_PC, gdb_eip), 395 DEFINE_GPR_i386(eflags, rflags, "flags", gcc_eflags, dwarf_eflags, LLDB_REGNUM_GENERIC_FLAGS, gdb_eflags), 396 // i387 Floating point registers. 397 DEFINE_FPR(fcw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fcw), 398 DEFINE_FPR(fsw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fsw), 399 DEFINE_FPR(ftw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ftw), 400 DEFINE_FPR(fop, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fop), 401 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ip), 402 // FIXME: Extract segment from ip. 403 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_cs_64), 404 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_dp), 405 // FIXME: Extract segment from dp. 406 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ds_64), 407 DEFINE_FPR(mxcsr, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_mxcsr), 408 DEFINE_FPR(mxcsrmask, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM), 409 410 // FP registers. 411 DEFINE_FP(stmm, 0), 412 DEFINE_FP(stmm, 1), 413 DEFINE_FP(stmm, 2), 414 DEFINE_FP(stmm, 3), 415 DEFINE_FP(stmm, 4), 416 DEFINE_FP(stmm, 5), 417 DEFINE_FP(stmm, 6), 418 DEFINE_FP(stmm, 7), 419 420 // XMM registers 421 DEFINE_XMM(xmm, 0), 422 DEFINE_XMM(xmm, 1), 423 DEFINE_XMM(xmm, 2), 424 DEFINE_XMM(xmm, 3), 425 DEFINE_XMM(xmm, 4), 426 DEFINE_XMM(xmm, 5), 427 DEFINE_XMM(xmm, 6), 428 DEFINE_XMM(xmm, 7), 429 DEFINE_XMM(xmm, 8), 430 DEFINE_XMM(xmm, 9), 431 DEFINE_XMM(xmm, 10), 432 DEFINE_XMM(xmm, 11), 433 DEFINE_XMM(xmm, 12), 434 DEFINE_XMM(xmm, 13), 435 DEFINE_XMM(xmm, 14), 436 DEFINE_XMM(xmm, 15), 437 438 // Copy of YMM registers assembled from xmm and ymmh 439 DEFINE_YMM(ymm, 0), 440 DEFINE_YMM(ymm, 1), 441 DEFINE_YMM(ymm, 2), 442 DEFINE_YMM(ymm, 3), 443 DEFINE_YMM(ymm, 4), 444 DEFINE_YMM(ymm, 5), 445 DEFINE_YMM(ymm, 6), 446 DEFINE_YMM(ymm, 7), 447 DEFINE_YMM(ymm, 8), 448 DEFINE_YMM(ymm, 9), 449 DEFINE_YMM(ymm, 10), 450 DEFINE_YMM(ymm, 11), 451 DEFINE_YMM(ymm, 12), 452 DEFINE_YMM(ymm, 13), 453 DEFINE_YMM(ymm, 14), 454 DEFINE_YMM(ymm, 15), 455 456 // Debug registers for lldb internal use 457 DEFINE_DR(dr, 0), 458 DEFINE_DR(dr, 1), 459 DEFINE_DR(dr, 2), 460 DEFINE_DR(dr, 3), 461 DEFINE_DR(dr, 4), 462 DEFINE_DR(dr, 5), 463 DEFINE_DR(dr, 6), 464 DEFINE_DR(dr, 7) 465 }; 466 467 static bool IsGPR(unsigned reg) 468 { 469 return reg <= k_last_gpr; // GPR's come first. 470 } 471 472 static bool IsAVX(unsigned reg) 473 { 474 return (k_first_avx <= reg && reg <= k_last_avx); 475 } 476 static bool IsFPR(unsigned reg) 477 { 478 return (k_first_fpr <= reg && reg <= k_last_fpr); 479 } 480 481 482 bool RegisterContext_x86_64::IsFPR(unsigned reg, FPRType fpr_type) 483 { 484 bool generic_fpr = ::IsFPR(reg); 485 if (fpr_type == eXSAVE) 486 return generic_fpr || IsAVX(reg); 487 488 return generic_fpr; 489 } 490 491 RegisterContext_x86_64::RegisterContext_x86_64(Thread &thread, 492 uint32_t concrete_frame_idx) 493 : RegisterContextPOSIX(thread, concrete_frame_idx) 494 { 495 // Initialize m_iovec to point to the buffer and buffer size 496 // using the conventions of Berkeley style UIO structures, as required 497 // by PTRACE extensions. 498 m_iovec.iov_base = &m_fpr.xstate.xsave; 499 m_iovec.iov_len = sizeof(m_fpr.xstate.xsave); 500 501 ::memset(&m_fpr, 0, sizeof(RegisterContext_x86_64::FPR)); 502 503 // elf-core yet to support ReadFPR() 504 ProcessSP base = CalculateProcess(); 505 if (base.get()->GetPluginName() == ProcessElfCore::GetPluginNameStatic()) 506 return; 507 508 // TODO: Use assembly to call cpuid on the inferior and query ebx or ecx 509 m_fpr_type = eXSAVE; // extended floating-point registers, if available 510 if (false == ReadFPR()) 511 m_fpr_type = eFXSAVE; // assume generic floating-point registers 512 } 513 514 RegisterContext_x86_64::~RegisterContext_x86_64() 515 { 516 } 517 518 void 519 RegisterContext_x86_64::Invalidate() 520 { 521 } 522 523 void 524 RegisterContext_x86_64::InvalidateAllRegisters() 525 { 526 } 527 528 unsigned 529 RegisterContext_x86_64::GetRegisterOffset(unsigned reg) 530 { 531 assert(reg < k_num_registers && "Invalid register number."); 532 return GetRegisterInfo()[reg].byte_offset; 533 } 534 535 unsigned 536 RegisterContext_x86_64::GetRegisterSize(unsigned reg) 537 { 538 assert(reg < k_num_registers && "Invalid register number."); 539 return GetRegisterInfo()[reg].byte_size; 540 } 541 542 size_t 543 RegisterContext_x86_64::GetRegisterCount() 544 { 545 size_t num_registers = k_num_gpr_registers + k_num_fpr_registers; 546 if (m_fpr_type == eXSAVE) 547 return num_registers + k_num_avx_registers; 548 return num_registers; 549 } 550 551 const RegisterInfo * 552 RegisterContext_x86_64::GetRegisterInfo() 553 { 554 // Commonly, this method is overridden and g_register_infos is copied and specialized. 555 // So, use GetRegisterInfo() rather than g_register_infos in this scope. 556 return g_register_infos; 557 } 558 559 const RegisterInfo * 560 RegisterContext_x86_64::GetRegisterInfoAtIndex(size_t reg) 561 { 562 if (reg < k_num_registers) 563 return &GetRegisterInfo()[reg]; 564 else 565 return NULL; 566 } 567 568 size_t 569 RegisterContext_x86_64::GetRegisterSetCount() 570 { 571 size_t sets = 0; 572 for (size_t set = 0; set < k_num_register_sets; ++set) 573 if (IsRegisterSetAvailable(set)) 574 ++sets; 575 576 return sets; 577 } 578 579 const RegisterSet * 580 RegisterContext_x86_64::GetRegisterSet(size_t set) 581 { 582 if (IsRegisterSetAvailable(set)) 583 return &g_reg_sets[set]; 584 else 585 return NULL; 586 } 587 588 unsigned 589 RegisterContext_x86_64::GetRegisterIndexFromOffset(unsigned offset) 590 { 591 unsigned reg; 592 for (reg = 0; reg < k_num_registers; reg++) 593 { 594 if (GetRegisterInfo()[reg].byte_offset == offset) 595 break; 596 } 597 assert(reg < k_num_registers && "Invalid register offset."); 598 return reg; 599 } 600 601 const char * 602 RegisterContext_x86_64::GetRegisterName(unsigned reg) 603 { 604 assert(reg < k_num_registers && "Invalid register offset."); 605 return GetRegisterInfo()[reg].name; 606 } 607 608 lldb::ByteOrder 609 RegisterContext_x86_64::GetByteOrder() 610 { 611 // Get the target process whose privileged thread was used for the register read. 612 lldb::ByteOrder byte_order = eByteOrderInvalid; 613 Process *process = CalculateProcess().get(); 614 615 if (process) 616 byte_order = process->GetByteOrder(); 617 return byte_order; 618 } 619 620 // Parse ymm registers and into xmm.bytes and ymmh.bytes. 621 bool RegisterContext_x86_64::CopyYMMtoXSTATE(uint32_t reg, lldb::ByteOrder byte_order) 622 { 623 if (!IsAVX(reg)) 624 return false; 625 626 if (byte_order == eByteOrderLittle) { 627 ::memcpy(m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 628 m_ymm_set.ymm[reg - fpu_ymm0].bytes, 629 sizeof(RegisterContext_x86_64::XMMReg)); 630 ::memcpy(m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 631 m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 632 sizeof(RegisterContext_x86_64::YMMHReg)); 633 return true; 634 } 635 636 if (byte_order == eByteOrderBig) { 637 ::memcpy(m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 638 m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 639 sizeof(RegisterContext_x86_64::XMMReg)); 640 ::memcpy(m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 641 m_ymm_set.ymm[reg - fpu_ymm0].bytes, 642 sizeof(RegisterContext_x86_64::YMMHReg)); 643 return true; 644 } 645 return false; // unsupported or invalid byte order 646 } 647 648 // Concatenate xmm.bytes with ymmh.bytes 649 bool RegisterContext_x86_64::CopyXSTATEtoYMM(uint32_t reg, lldb::ByteOrder byte_order) 650 { 651 if (!IsAVX(reg)) 652 return false; 653 654 if (byte_order == eByteOrderLittle) { 655 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes, 656 m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 657 sizeof(RegisterContext_x86_64::XMMReg)); 658 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 659 m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 660 sizeof(RegisterContext_x86_64::YMMHReg)); 661 return true; 662 } 663 if (byte_order == eByteOrderBig) { 664 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg), 665 m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes, 666 sizeof(RegisterContext_x86_64::XMMReg)); 667 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes, 668 m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes, 669 sizeof(RegisterContext_x86_64::YMMHReg)); 670 return true; 671 } 672 return false; // unsupported or invalid byte order 673 } 674 675 bool 676 RegisterContext_x86_64::IsRegisterSetAvailable(size_t set_index) 677 { 678 // Note: Extended register sets are assumed to be at the end of g_reg_sets... 679 size_t num_sets = k_num_register_sets - k_num_extended_register_sets; 680 if (m_fpr_type == eXSAVE) // ...and to start with AVX registers. 681 ++num_sets; 682 683 return (set_index < num_sets); 684 } 685 686 bool 687 RegisterContext_x86_64::ReadRegister(const RegisterInfo *reg_info, RegisterValue &value) 688 { 689 if (!reg_info) 690 return false; 691 692 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB]; 693 694 if (IsFPR(reg, m_fpr_type)) { 695 if (!ReadFPR()) 696 return false; 697 } 698 else { 699 bool success = ReadRegister(reg, value); 700 701 // If an i386 register should be parsed from an x86_64 register... 702 if (success && reg >= k_first_i386 && reg <= k_last_i386) 703 if (value.GetByteSize() > reg_info->byte_size) 704 value.SetType(reg_info); // ...use the type specified by reg_info rather than the uint64_t default 705 return success; 706 } 707 708 if (reg_info->encoding == eEncodingVector) { 709 ByteOrder byte_order = GetByteOrder(); 710 711 if (byte_order != ByteOrder::eByteOrderInvalid) { 712 if (reg >= fpu_stmm0 && reg <= fpu_stmm7) { 713 value.SetBytes(m_fpr.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, reg_info->byte_size, byte_order); 714 } 715 if (reg >= fpu_xmm0 && reg <= fpu_xmm15) { 716 value.SetBytes(m_fpr.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, reg_info->byte_size, byte_order); 717 } 718 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) { 719 // Concatenate ymm using the register halves in xmm.bytes and ymmh.bytes 720 if (m_fpr_type == eXSAVE && CopyXSTATEtoYMM(reg, byte_order)) 721 value.SetBytes(m_ymm_set.ymm[reg - fpu_ymm0].bytes, reg_info->byte_size, byte_order); 722 else 723 return false; 724 } 725 return value.GetType() == RegisterValue::eTypeBytes; 726 } 727 return false; 728 } 729 730 // Note that lldb uses slightly different naming conventions from sys/user.h 731 switch (reg) 732 { 733 default: 734 return false; 735 case fpu_dp: 736 value = m_fpr.xstate.fxsave.dp; 737 break; 738 case fpu_fcw: 739 value = m_fpr.xstate.fxsave.fcw; 740 break; 741 case fpu_fsw: 742 value = m_fpr.xstate.fxsave.fsw; 743 break; 744 case fpu_ip: 745 value = m_fpr.xstate.fxsave.ip; 746 break; 747 case fpu_fop: 748 value = m_fpr.xstate.fxsave.fop; 749 break; 750 case fpu_ftw: 751 value = m_fpr.xstate.fxsave.ftw; 752 break; 753 case fpu_mxcsr: 754 value = m_fpr.xstate.fxsave.mxcsr; 755 break; 756 case fpu_mxcsrmask: 757 value = m_fpr.xstate.fxsave.mxcsrmask; 758 break; 759 } 760 return true; 761 } 762 763 bool 764 RegisterContext_x86_64::ReadAllRegisterValues(DataBufferSP &data_sp) 765 { 766 bool success = false; 767 data_sp.reset (new DataBufferHeap (REG_CONTEXT_SIZE, 0)); 768 if (data_sp && ReadGPR () && ReadFPR ()) 769 { 770 uint8_t *dst = data_sp->GetBytes(); 771 success = dst != 0; 772 773 if (success) { 774 ::memcpy (dst, &m_gpr, GetGPRSize()); 775 dst += GetGPRSize(); 776 } 777 if (m_fpr_type == eFXSAVE) 778 ::memcpy (dst, &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave)); 779 780 if (m_fpr_type == eXSAVE) { 781 ByteOrder byte_order = GetByteOrder(); 782 783 // Assemble the YMM register content from the register halves. 784 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg) 785 success = CopyXSTATEtoYMM(reg, byte_order); 786 787 if (success) { 788 // Copy the extended register state including the assembled ymm registers. 789 ::memcpy (dst, &m_fpr, sizeof(m_fpr)); 790 } 791 } 792 } 793 return success; 794 } 795 796 bool 797 RegisterContext_x86_64::WriteRegister(const lldb_private::RegisterInfo *reg_info, 798 const lldb_private::RegisterValue &value) 799 { 800 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB]; 801 if (IsGPR(reg)) { 802 return WriteRegister(reg, value); 803 } 804 805 if (IsFPR(reg, m_fpr_type)) { 806 switch (reg) 807 { 808 default: 809 if (reg_info->encoding != eEncodingVector) 810 return false; 811 812 if (reg >= fpu_stmm0 && reg <= fpu_stmm7) 813 ::memcpy (m_fpr.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, value.GetBytes(), value.GetByteSize()); 814 815 if (reg >= fpu_xmm0 && reg <= fpu_xmm15) 816 ::memcpy (m_fpr.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, value.GetBytes(), value.GetByteSize()); 817 818 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) { 819 if (m_fpr_type != eXSAVE) 820 return false; // the target processor does not support AVX 821 822 // Store ymm register content, and split into the register halves in xmm.bytes and ymmh.bytes 823 ::memcpy (m_ymm_set.ymm[reg - fpu_ymm0].bytes, value.GetBytes(), value.GetByteSize()); 824 if (false == CopyYMMtoXSTATE(reg, GetByteOrder())) 825 return false; 826 } 827 break; 828 case fpu_dp: 829 m_fpr.xstate.fxsave.dp = value.GetAsUInt64(); 830 break; 831 case fpu_fcw: 832 m_fpr.xstate.fxsave.fcw = value.GetAsUInt16(); 833 break; 834 case fpu_fsw: 835 m_fpr.xstate.fxsave.fsw = value.GetAsUInt16(); 836 break; 837 case fpu_ip: 838 m_fpr.xstate.fxsave.ip = value.GetAsUInt64(); 839 break; 840 case fpu_fop: 841 m_fpr.xstate.fxsave.fop = value.GetAsUInt16(); 842 break; 843 case fpu_ftw: 844 m_fpr.xstate.fxsave.ftw = value.GetAsUInt16(); 845 break; 846 case fpu_mxcsr: 847 m_fpr.xstate.fxsave.mxcsr = value.GetAsUInt32(); 848 break; 849 case fpu_mxcsrmask: 850 m_fpr.xstate.fxsave.mxcsrmask = value.GetAsUInt32(); 851 break; 852 } 853 if (WriteFPR()) { 854 if (IsAVX(reg)) 855 return CopyYMMtoXSTATE(reg, GetByteOrder()); 856 return true; 857 } 858 } 859 return false; 860 } 861 862 bool 863 RegisterContext_x86_64::WriteAllRegisterValues(const DataBufferSP &data_sp) 864 { 865 bool success = false; 866 if (data_sp && data_sp->GetByteSize() == REG_CONTEXT_SIZE) 867 { 868 uint8_t *src = data_sp->GetBytes(); 869 if (src) { 870 ::memcpy (&m_gpr, src, GetGPRSize()); 871 872 if (WriteGPR()) { 873 src += GetGPRSize(); 874 if (m_fpr_type == eFXSAVE) 875 ::memcpy (&m_fpr.xstate.fxsave, src, sizeof(m_fpr.xstate.fxsave)); 876 if (m_fpr_type == eXSAVE) 877 ::memcpy (&m_fpr.xstate.xsave, src, sizeof(m_fpr.xstate.xsave)); 878 879 success = WriteFPR(); 880 if (success) { 881 success = true; 882 883 if (m_fpr_type == eXSAVE) { 884 ByteOrder byte_order = GetByteOrder(); 885 886 // Parse the YMM register content from the register halves. 887 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg) 888 success = CopyYMMtoXSTATE(reg, byte_order); 889 } 890 } 891 } 892 } 893 } 894 return success; 895 } 896 897 bool 898 RegisterContext_x86_64::UpdateAfterBreakpoint() 899 { 900 // PC points one byte past the int3 responsible for the breakpoint. 901 lldb::addr_t pc; 902 903 if ((pc = GetPC()) == LLDB_INVALID_ADDRESS) 904 return false; 905 906 SetPC(pc - 1); 907 return true; 908 } 909 910 uint32_t 911 RegisterContext_x86_64::ConvertRegisterKindToRegisterNumber(uint32_t kind, 912 uint32_t num) 913 { 914 const Process *process = CalculateProcess().get(); 915 if (process) 916 { 917 const ArchSpec arch = process->GetTarget().GetArchitecture();; 918 switch (arch.GetCore()) 919 { 920 default: 921 assert(false && "CPU type not supported!"); 922 break; 923 924 case ArchSpec::eCore_x86_32_i386: 925 case ArchSpec::eCore_x86_32_i486: 926 case ArchSpec::eCore_x86_32_i486sx: 927 { 928 if (kind == eRegisterKindGeneric) 929 { 930 switch (num) 931 { 932 case LLDB_REGNUM_GENERIC_PC: return gpr_eip; 933 case LLDB_REGNUM_GENERIC_SP: return gpr_esp; 934 case LLDB_REGNUM_GENERIC_FP: return gpr_ebp; 935 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_eflags; 936 case LLDB_REGNUM_GENERIC_RA: 937 default: 938 return LLDB_INVALID_REGNUM; 939 } 940 } 941 942 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF) 943 { 944 switch (num) 945 { 946 case dwarf_eax: return gpr_eax; 947 case dwarf_edx: return gpr_edx; 948 case dwarf_ecx: return gpr_ecx; 949 case dwarf_ebx: return gpr_ebx; 950 case dwarf_esi: return gpr_esi; 951 case dwarf_edi: return gpr_edi; 952 case dwarf_ebp: return gpr_ebp; 953 case dwarf_esp: return gpr_esp; 954 case dwarf_eip: return gpr_eip; 955 case dwarf_xmm0: return fpu_xmm0; 956 case dwarf_xmm1: return fpu_xmm1; 957 case dwarf_xmm2: return fpu_xmm2; 958 case dwarf_xmm3: return fpu_xmm3; 959 case dwarf_xmm4: return fpu_xmm4; 960 case dwarf_xmm5: return fpu_xmm5; 961 case dwarf_xmm6: return fpu_xmm6; 962 case dwarf_xmm7: return fpu_xmm7; 963 case dwarf_stmm0: return fpu_stmm0; 964 case dwarf_stmm1: return fpu_stmm1; 965 case dwarf_stmm2: return fpu_stmm2; 966 case dwarf_stmm3: return fpu_stmm3; 967 case dwarf_stmm4: return fpu_stmm4; 968 case dwarf_stmm5: return fpu_stmm5; 969 case dwarf_stmm6: return fpu_stmm6; 970 case dwarf_stmm7: return fpu_stmm7; 971 default: 972 return LLDB_INVALID_REGNUM; 973 } 974 } 975 976 if (kind == eRegisterKindGDB) 977 { 978 switch (num) 979 { 980 case gdb_eax : return gpr_eax; 981 case gdb_ebx : return gpr_ebx; 982 case gdb_ecx : return gpr_ecx; 983 case gdb_edx : return gpr_edx; 984 case gdb_esi : return gpr_esi; 985 case gdb_edi : return gpr_edi; 986 case gdb_ebp : return gpr_ebp; 987 case gdb_esp : return gpr_esp; 988 case gdb_eip : return gpr_eip; 989 case gdb_eflags : return gpr_eflags; 990 case gdb_cs : return gpr_cs; 991 case gdb_ss : return gpr_ss; 992 case gdb_ds : return gpr_ds; 993 case gdb_es : return gpr_es; 994 case gdb_fs : return gpr_fs; 995 case gdb_gs : return gpr_gs; 996 case gdb_stmm0 : return fpu_stmm0; 997 case gdb_stmm1 : return fpu_stmm1; 998 case gdb_stmm2 : return fpu_stmm2; 999 case gdb_stmm3 : return fpu_stmm3; 1000 case gdb_stmm4 : return fpu_stmm4; 1001 case gdb_stmm5 : return fpu_stmm5; 1002 case gdb_stmm6 : return fpu_stmm6; 1003 case gdb_stmm7 : return fpu_stmm7; 1004 case gdb_fcw : return fpu_fcw; 1005 case gdb_fsw : return fpu_fsw; 1006 case gdb_ftw : return fpu_ftw; 1007 case gdb_fpu_cs : return fpu_cs; 1008 case gdb_ip : return fpu_ip; 1009 case gdb_fpu_ds : return fpu_ds; //fpu_fos 1010 case gdb_dp : return fpu_dp; //fpu_foo 1011 case gdb_fop : return fpu_fop; 1012 case gdb_xmm0 : return fpu_xmm0; 1013 case gdb_xmm1 : return fpu_xmm1; 1014 case gdb_xmm2 : return fpu_xmm2; 1015 case gdb_xmm3 : return fpu_xmm3; 1016 case gdb_xmm4 : return fpu_xmm4; 1017 case gdb_xmm5 : return fpu_xmm5; 1018 case gdb_xmm6 : return fpu_xmm6; 1019 case gdb_xmm7 : return fpu_xmm7; 1020 case gdb_mxcsr : return fpu_mxcsr; 1021 default: 1022 return LLDB_INVALID_REGNUM; 1023 } 1024 } 1025 else if (kind == eRegisterKindLLDB) 1026 { 1027 return num; 1028 } 1029 1030 break; 1031 } 1032 1033 case ArchSpec::eCore_x86_64_x86_64: 1034 { 1035 if (kind == eRegisterKindGeneric) 1036 { 1037 switch (num) 1038 { 1039 case LLDB_REGNUM_GENERIC_PC: return gpr_rip; 1040 case LLDB_REGNUM_GENERIC_SP: return gpr_rsp; 1041 case LLDB_REGNUM_GENERIC_FP: return gpr_rbp; 1042 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_rflags; 1043 case LLDB_REGNUM_GENERIC_RA: 1044 default: 1045 return LLDB_INVALID_REGNUM; 1046 } 1047 } 1048 1049 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF) 1050 { 1051 switch (num) 1052 { 1053 case gcc_dwarf_gpr_rax: return gpr_rax; 1054 case gcc_dwarf_gpr_rdx: return gpr_rdx; 1055 case gcc_dwarf_gpr_rcx: return gpr_rcx; 1056 case gcc_dwarf_gpr_rbx: return gpr_rbx; 1057 case gcc_dwarf_gpr_rsi: return gpr_rsi; 1058 case gcc_dwarf_gpr_rdi: return gpr_rdi; 1059 case gcc_dwarf_gpr_rbp: return gpr_rbp; 1060 case gcc_dwarf_gpr_rsp: return gpr_rsp; 1061 case gcc_dwarf_gpr_r8: return gpr_r8; 1062 case gcc_dwarf_gpr_r9: return gpr_r9; 1063 case gcc_dwarf_gpr_r10: return gpr_r10; 1064 case gcc_dwarf_gpr_r11: return gpr_r11; 1065 case gcc_dwarf_gpr_r12: return gpr_r12; 1066 case gcc_dwarf_gpr_r13: return gpr_r13; 1067 case gcc_dwarf_gpr_r14: return gpr_r14; 1068 case gcc_dwarf_gpr_r15: return gpr_r15; 1069 case gcc_dwarf_gpr_rip: return gpr_rip; 1070 case gcc_dwarf_fpu_xmm0: return fpu_xmm0; 1071 case gcc_dwarf_fpu_xmm1: return fpu_xmm1; 1072 case gcc_dwarf_fpu_xmm2: return fpu_xmm2; 1073 case gcc_dwarf_fpu_xmm3: return fpu_xmm3; 1074 case gcc_dwarf_fpu_xmm4: return fpu_xmm4; 1075 case gcc_dwarf_fpu_xmm5: return fpu_xmm5; 1076 case gcc_dwarf_fpu_xmm6: return fpu_xmm6; 1077 case gcc_dwarf_fpu_xmm7: return fpu_xmm7; 1078 case gcc_dwarf_fpu_xmm8: return fpu_xmm8; 1079 case gcc_dwarf_fpu_xmm9: return fpu_xmm9; 1080 case gcc_dwarf_fpu_xmm10: return fpu_xmm10; 1081 case gcc_dwarf_fpu_xmm11: return fpu_xmm11; 1082 case gcc_dwarf_fpu_xmm12: return fpu_xmm12; 1083 case gcc_dwarf_fpu_xmm13: return fpu_xmm13; 1084 case gcc_dwarf_fpu_xmm14: return fpu_xmm14; 1085 case gcc_dwarf_fpu_xmm15: return fpu_xmm15; 1086 case gcc_dwarf_fpu_stmm0: return fpu_stmm0; 1087 case gcc_dwarf_fpu_stmm1: return fpu_stmm1; 1088 case gcc_dwarf_fpu_stmm2: return fpu_stmm2; 1089 case gcc_dwarf_fpu_stmm3: return fpu_stmm3; 1090 case gcc_dwarf_fpu_stmm4: return fpu_stmm4; 1091 case gcc_dwarf_fpu_stmm5: return fpu_stmm5; 1092 case gcc_dwarf_fpu_stmm6: return fpu_stmm6; 1093 case gcc_dwarf_fpu_stmm7: return fpu_stmm7; 1094 case gcc_dwarf_fpu_ymm0: return fpu_ymm0; 1095 case gcc_dwarf_fpu_ymm1: return fpu_ymm1; 1096 case gcc_dwarf_fpu_ymm2: return fpu_ymm2; 1097 case gcc_dwarf_fpu_ymm3: return fpu_ymm3; 1098 case gcc_dwarf_fpu_ymm4: return fpu_ymm4; 1099 case gcc_dwarf_fpu_ymm5: return fpu_ymm5; 1100 case gcc_dwarf_fpu_ymm6: return fpu_ymm6; 1101 case gcc_dwarf_fpu_ymm7: return fpu_ymm7; 1102 case gcc_dwarf_fpu_ymm8: return fpu_ymm8; 1103 case gcc_dwarf_fpu_ymm9: return fpu_ymm9; 1104 case gcc_dwarf_fpu_ymm10: return fpu_ymm10; 1105 case gcc_dwarf_fpu_ymm11: return fpu_ymm11; 1106 case gcc_dwarf_fpu_ymm12: return fpu_ymm12; 1107 case gcc_dwarf_fpu_ymm13: return fpu_ymm13; 1108 case gcc_dwarf_fpu_ymm14: return fpu_ymm14; 1109 case gcc_dwarf_fpu_ymm15: return fpu_ymm15; 1110 default: 1111 return LLDB_INVALID_REGNUM; 1112 } 1113 } 1114 1115 if (kind == eRegisterKindGDB) 1116 { 1117 switch (num) 1118 { 1119 case gdb_gpr_rax : return gpr_rax; 1120 case gdb_gpr_rbx : return gpr_rbx; 1121 case gdb_gpr_rcx : return gpr_rcx; 1122 case gdb_gpr_rdx : return gpr_rdx; 1123 case gdb_gpr_rsi : return gpr_rsi; 1124 case gdb_gpr_rdi : return gpr_rdi; 1125 case gdb_gpr_rbp : return gpr_rbp; 1126 case gdb_gpr_rsp : return gpr_rsp; 1127 case gdb_gpr_r8 : return gpr_r8; 1128 case gdb_gpr_r9 : return gpr_r9; 1129 case gdb_gpr_r10 : return gpr_r10; 1130 case gdb_gpr_r11 : return gpr_r11; 1131 case gdb_gpr_r12 : return gpr_r12; 1132 case gdb_gpr_r13 : return gpr_r13; 1133 case gdb_gpr_r14 : return gpr_r14; 1134 case gdb_gpr_r15 : return gpr_r15; 1135 case gdb_gpr_rip : return gpr_rip; 1136 case gdb_gpr_rflags : return gpr_rflags; 1137 case gdb_gpr_cs : return gpr_cs; 1138 case gdb_gpr_ss : return gpr_ss; 1139 case gdb_gpr_ds : return gpr_ds; 1140 case gdb_gpr_es : return gpr_es; 1141 case gdb_gpr_fs : return gpr_fs; 1142 case gdb_gpr_gs : return gpr_gs; 1143 case gdb_fpu_stmm0 : return fpu_stmm0; 1144 case gdb_fpu_stmm1 : return fpu_stmm1; 1145 case gdb_fpu_stmm2 : return fpu_stmm2; 1146 case gdb_fpu_stmm3 : return fpu_stmm3; 1147 case gdb_fpu_stmm4 : return fpu_stmm4; 1148 case gdb_fpu_stmm5 : return fpu_stmm5; 1149 case gdb_fpu_stmm6 : return fpu_stmm6; 1150 case gdb_fpu_stmm7 : return fpu_stmm7; 1151 case gdb_fpu_fcw : return fpu_fcw; 1152 case gdb_fpu_fsw : return fpu_fsw; 1153 case gdb_fpu_ftw : return fpu_ftw; 1154 case gdb_fpu_cs_64 : return fpu_cs; 1155 case gdb_fpu_ip : return fpu_ip; 1156 case gdb_fpu_ds_64 : return fpu_ds; 1157 case gdb_fpu_dp : return fpu_dp; 1158 case gdb_fpu_fop : return fpu_fop; 1159 case gdb_fpu_xmm0 : return fpu_xmm0; 1160 case gdb_fpu_xmm1 : return fpu_xmm1; 1161 case gdb_fpu_xmm2 : return fpu_xmm2; 1162 case gdb_fpu_xmm3 : return fpu_xmm3; 1163 case gdb_fpu_xmm4 : return fpu_xmm4; 1164 case gdb_fpu_xmm5 : return fpu_xmm5; 1165 case gdb_fpu_xmm6 : return fpu_xmm6; 1166 case gdb_fpu_xmm7 : return fpu_xmm7; 1167 case gdb_fpu_xmm8 : return fpu_xmm8; 1168 case gdb_fpu_xmm9 : return fpu_xmm9; 1169 case gdb_fpu_xmm10 : return fpu_xmm10; 1170 case gdb_fpu_xmm11 : return fpu_xmm11; 1171 case gdb_fpu_xmm12 : return fpu_xmm12; 1172 case gdb_fpu_xmm13 : return fpu_xmm13; 1173 case gdb_fpu_xmm14 : return fpu_xmm14; 1174 case gdb_fpu_xmm15 : return fpu_xmm15; 1175 case gdb_fpu_mxcsr : return fpu_mxcsr; 1176 case gdb_fpu_ymm0 : return fpu_ymm0; 1177 case gdb_fpu_ymm1 : return fpu_ymm1; 1178 case gdb_fpu_ymm2 : return fpu_ymm2; 1179 case gdb_fpu_ymm3 : return fpu_ymm3; 1180 case gdb_fpu_ymm4 : return fpu_ymm4; 1181 case gdb_fpu_ymm5 : return fpu_ymm5; 1182 case gdb_fpu_ymm6 : return fpu_ymm6; 1183 case gdb_fpu_ymm7 : return fpu_ymm7; 1184 case gdb_fpu_ymm8 : return fpu_ymm8; 1185 case gdb_fpu_ymm9 : return fpu_ymm9; 1186 case gdb_fpu_ymm10 : return fpu_ymm10; 1187 case gdb_fpu_ymm11 : return fpu_ymm11; 1188 case gdb_fpu_ymm12 : return fpu_ymm12; 1189 case gdb_fpu_ymm13 : return fpu_ymm13; 1190 case gdb_fpu_ymm14 : return fpu_ymm14; 1191 case gdb_fpu_ymm15 : return fpu_ymm15; 1192 default: 1193 return LLDB_INVALID_REGNUM; 1194 } 1195 } 1196 else if (kind == eRegisterKindLLDB) 1197 { 1198 return num; 1199 } 1200 } 1201 } 1202 } 1203 1204 return LLDB_INVALID_REGNUM; 1205 } 1206 1207 uint32_t 1208 RegisterContext_x86_64::NumSupportedHardwareWatchpoints() 1209 { 1210 // Available debug address registers: dr0, dr1, dr2, dr3 1211 return 4; 1212 } 1213 1214 bool 1215 RegisterContext_x86_64::IsWatchpointVacant(uint32_t hw_index) 1216 { 1217 bool is_vacant = false; 1218 RegisterValue value; 1219 1220 assert(hw_index < NumSupportedHardwareWatchpoints()); 1221 1222 if (m_watchpoints_initialized == false) 1223 { 1224 // Reset the debug status and debug control registers 1225 RegisterValue zero_bits = RegisterValue(uint64_t(0)); 1226 if (!WriteRegister(dr6, zero_bits) || !WriteRegister(dr7, zero_bits)) 1227 assert(false && "Could not initialize watchpoint registers"); 1228 m_watchpoints_initialized = true; 1229 } 1230 1231 if (ReadRegister(dr7, value)) 1232 { 1233 uint64_t val = value.GetAsUInt64(); 1234 is_vacant = (val & (3 << 2*hw_index)) == 0; 1235 } 1236 1237 return is_vacant; 1238 } 1239 1240 static uint32_t 1241 size_and_rw_bits(size_t size, bool read, bool write) 1242 { 1243 uint32_t rw; 1244 if (read) { 1245 rw = 0x3; // READ or READ/WRITE 1246 } else if (write) { 1247 rw = 0x1; // WRITE 1248 } else { 1249 assert(0 && "read and write cannot both be false"); 1250 } 1251 1252 switch (size) { 1253 case 1: 1254 return rw; 1255 case 2: 1256 return (0x1 << 2) | rw; 1257 case 4: 1258 return (0x3 << 2) | rw; 1259 case 8: 1260 return (0x2 << 2) | rw; 1261 default: 1262 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 1263 } 1264 } 1265 1266 uint32_t 1267 RegisterContext_x86_64::SetHardwareWatchpoint(addr_t addr, size_t size, 1268 bool read, bool write) 1269 { 1270 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 1271 uint32_t hw_index; 1272 1273 for (hw_index = 0; hw_index < num_hw_watchpoints; ++hw_index) 1274 { 1275 if (IsWatchpointVacant(hw_index)) 1276 return SetHardwareWatchpointWithIndex(addr, size, 1277 read, write, 1278 hw_index); 1279 } 1280 1281 return LLDB_INVALID_INDEX32; 1282 } 1283 1284 bool 1285 RegisterContext_x86_64::SetHardwareWatchpointWithIndex(addr_t addr, size_t size, 1286 bool read, bool write, 1287 uint32_t hw_index) 1288 { 1289 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 1290 1291 if (num_hw_watchpoints == 0 || hw_index >= num_hw_watchpoints) 1292 return false; 1293 1294 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 1295 return false; 1296 1297 if (read == false && write == false) 1298 return false; 1299 1300 if (!IsWatchpointVacant(hw_index)) 1301 return false; 1302 1303 // Set both dr7 (debug control register) and dri (debug address register). 1304 1305 // dr7{7-0} encodes the local/gloabl enable bits: 1306 // global enable --. .-- local enable 1307 // | | 1308 // v v 1309 // dr0 -> bits{1-0} 1310 // dr1 -> bits{3-2} 1311 // dr2 -> bits{5-4} 1312 // dr3 -> bits{7-6} 1313 // 1314 // dr7{31-16} encodes the rw/len bits: 1315 // b_x+3, b_x+2, b_x+1, b_x 1316 // where bits{x+1, x} => rw 1317 // 0b00: execute, 0b01: write, 0b11: read-or-write, 1318 // 0b10: io read-or-write (unused) 1319 // and bits{x+3, x+2} => len 1320 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 1321 // 1322 // dr0 -> bits{19-16} 1323 // dr1 -> bits{23-20} 1324 // dr2 -> bits{27-24} 1325 // dr3 -> bits{31-28} 1326 if (hw_index < num_hw_watchpoints) 1327 { 1328 RegisterValue current_dr7_bits; 1329 1330 if (ReadRegister(dr7, current_dr7_bits)) 1331 { 1332 uint64_t new_dr7_bits = current_dr7_bits.GetAsUInt64() | 1333 (1 << (2*hw_index) | 1334 size_and_rw_bits(size, read, write) << 1335 (16+4*hw_index)); 1336 1337 if (WriteRegister(dr0 + hw_index, RegisterValue(addr)) && 1338 WriteRegister(dr7, RegisterValue(new_dr7_bits))) 1339 return true; 1340 } 1341 } 1342 1343 return false; 1344 } 1345 1346 bool 1347 RegisterContext_x86_64::ClearHardwareWatchpoint(uint32_t hw_index) 1348 { 1349 if (hw_index < NumSupportedHardwareWatchpoints()) 1350 { 1351 RegisterValue current_dr7_bits; 1352 1353 if (ReadRegister(dr7, current_dr7_bits)) 1354 { 1355 uint64_t new_dr7_bits = current_dr7_bits.GetAsUInt64() & ~(3 << (2*hw_index)); 1356 1357 if (WriteRegister(dr7, RegisterValue(new_dr7_bits))) 1358 return true; 1359 } 1360 } 1361 1362 return false; 1363 } 1364 1365 bool 1366 RegisterContext_x86_64::IsWatchpointHit(uint32_t hw_index) 1367 { 1368 bool is_hit = false; 1369 1370 if (m_watchpoints_initialized == false) 1371 { 1372 // Reset the debug status and debug control registers 1373 RegisterValue zero_bits = RegisterValue(uint64_t(0)); 1374 if (!WriteRegister(dr6, zero_bits) || !WriteRegister(dr7, zero_bits)) 1375 assert(false && "Could not initialize watchpoint registers"); 1376 m_watchpoints_initialized = true; 1377 } 1378 1379 if (hw_index < NumSupportedHardwareWatchpoints()) 1380 { 1381 RegisterValue value; 1382 1383 if (ReadRegister(dr6, value)) 1384 { 1385 uint64_t val = value.GetAsUInt64(); 1386 is_hit = val & (1 << hw_index); 1387 } 1388 } 1389 1390 return is_hit; 1391 } 1392 1393 addr_t 1394 RegisterContext_x86_64::GetWatchpointAddress(uint32_t hw_index) 1395 { 1396 addr_t wp_monitor_addr = LLDB_INVALID_ADDRESS; 1397 1398 if (hw_index < NumSupportedHardwareWatchpoints()) 1399 { 1400 if (!IsWatchpointVacant(hw_index)) 1401 { 1402 RegisterValue value; 1403 1404 if (ReadRegister(dr0 + hw_index, value)) 1405 wp_monitor_addr = value.GetAsUInt64(); 1406 } 1407 } 1408 1409 return wp_monitor_addr; 1410 } 1411 1412 1413 bool 1414 RegisterContext_x86_64::ClearWatchpointHits() 1415 { 1416 return WriteRegister(dr6, RegisterValue((uint64_t)0)); 1417 } 1418 1419 bool 1420 RegisterContext_x86_64::HardwareSingleStep(bool enable) 1421 { 1422 enum { TRACE_BIT = 0x100 }; 1423 uint64_t rflags; 1424 1425 if ((rflags = ReadRegisterAsUnsigned(gpr_rflags, -1UL)) == -1UL) 1426 return false; 1427 1428 if (enable) 1429 { 1430 if (rflags & TRACE_BIT) 1431 return true; 1432 1433 rflags |= TRACE_BIT; 1434 } 1435 else 1436 { 1437 if (!(rflags & TRACE_BIT)) 1438 return false; 1439 1440 rflags &= ~TRACE_BIT; 1441 } 1442 1443 return WriteRegisterFromUnsigned(gpr_rflags, rflags); 1444 } 1445 1446 #if defined(__linux__) or defined(__FreeBSD__) 1447 1448 ProcessMonitor & 1449 RegisterContext_x86_64::GetMonitor() 1450 { 1451 ProcessSP base = CalculateProcess(); 1452 ProcessPOSIX *process = static_cast<ProcessPOSIX*>(base.get()); 1453 return process->GetMonitor(); 1454 } 1455 1456 bool 1457 RegisterContext_x86_64::ReadGPR() 1458 { 1459 ProcessMonitor &monitor = GetMonitor(); 1460 return monitor.ReadGPR(m_thread.GetID(), &m_gpr, GetGPRSize()); 1461 } 1462 1463 bool 1464 RegisterContext_x86_64::ReadFPR() 1465 { 1466 ProcessMonitor &monitor = GetMonitor(); 1467 if (m_fpr_type == eFXSAVE) 1468 return monitor.ReadFPR(m_thread.GetID(), &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave)); 1469 1470 if (m_fpr_type == eXSAVE) 1471 return monitor.ReadRegisterSet(m_thread.GetID(), &m_iovec, sizeof(m_fpr.xstate.xsave), NT_X86_XSTATE); 1472 return false; 1473 } 1474 1475 bool 1476 RegisterContext_x86_64::WriteGPR() 1477 { 1478 ProcessMonitor &monitor = GetMonitor(); 1479 return monitor.WriteGPR(m_thread.GetID(), &m_gpr, GetGPRSize()); 1480 } 1481 1482 bool 1483 RegisterContext_x86_64::WriteFPR() 1484 { 1485 ProcessMonitor &monitor = GetMonitor(); 1486 if (m_fpr_type == eFXSAVE) 1487 return monitor.WriteFPR(m_thread.GetID(), &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave)); 1488 1489 if (m_fpr_type == eXSAVE) 1490 return monitor.WriteRegisterSet(m_thread.GetID(), &m_iovec, sizeof(m_fpr.xstate.xsave), NT_X86_XSTATE); 1491 return false; 1492 } 1493 1494 bool 1495 RegisterContext_x86_64::ReadRegister(const unsigned reg, 1496 RegisterValue &value) 1497 { 1498 ProcessMonitor &monitor = GetMonitor(); 1499 return monitor.ReadRegisterValue(m_thread.GetID(), 1500 GetRegisterOffset(reg), 1501 GetRegisterName(reg), 1502 GetRegisterSize(reg), 1503 value); 1504 } 1505 1506 bool 1507 RegisterContext_x86_64::WriteRegister(const unsigned reg, 1508 const RegisterValue &value) 1509 { 1510 ProcessMonitor &monitor = GetMonitor(); 1511 return monitor.WriteRegisterValue(m_thread.GetID(), 1512 GetRegisterOffset(reg), 1513 GetRegisterName(reg), 1514 value); 1515 } 1516 1517 #else 1518 1519 bool 1520 RegisterContext_x86_64::ReadGPR() 1521 { 1522 llvm_unreachable("not implemented"); 1523 return false; 1524 } 1525 1526 bool 1527 RegisterContext_x86_64::ReadFPR() 1528 { 1529 llvm_unreachable("not implemented"); 1530 return false; 1531 } 1532 1533 bool 1534 RegisterContext_x86_64::WriteGPR() 1535 { 1536 llvm_unreachable("not implemented"); 1537 return false; 1538 } 1539 1540 bool 1541 RegisterContext_x86_64::WriteFPR() 1542 { 1543 llvm_unreachable("not implemented"); 1544 return false; 1545 } 1546 1547 bool 1548 RegisterContext_x86_64::ReadRegister(const unsigned reg, 1549 RegisterValue &value) 1550 { 1551 llvm_unreachable("not implemented"); 1552 return false; 1553 } 1554 1555 bool 1556 RegisterContext_x86_64::WriteRegister(const unsigned reg, 1557 const RegisterValue &value) 1558 { 1559 llvm_unreachable("not implemented"); 1560 return false; 1561 } 1562 1563 #endif 1564