Home | History | Annotate | Download | only in processor
      1 // Copyright (c) 2010 Google Inc.
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 //     * Redistributions of source code must retain the above copyright
      9 // notice, this list of conditions and the following disclaimer.
     10 //     * Redistributions in binary form must reproduce the above
     11 // copyright notice, this list of conditions and the following disclaimer
     12 // in the documentation and/or other materials provided with the
     13 // distribution.
     14 //     * Neither the name of Google Inc. nor the names of its
     15 // contributors may be used to endorse or promote products derived from
     16 // this software without specific prior written permission.
     17 //
     18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29 
     30 // stackwalker_amd64.cc: amd64-specific stackwalker.
     31 //
     32 // See stackwalker_amd64.h for documentation.
     33 //
     34 // Author: Mark Mentovai, Ted Mielczarek
     35 
     36 #include <assert.h>
     37 
     38 #include "common/scoped_ptr.h"
     39 #include "google_breakpad/processor/call_stack.h"
     40 #include "google_breakpad/processor/memory_region.h"
     41 #include "google_breakpad/processor/source_line_resolver_interface.h"
     42 #include "google_breakpad/processor/stack_frame_cpu.h"
     43 #include "google_breakpad/processor/system_info.h"
     44 #include "processor/cfi_frame_info.h"
     45 #include "processor/logging.h"
     46 #include "processor/stackwalker_amd64.h"
     47 
     48 namespace google_breakpad {
     49 
     50 
     51 const StackwalkerAMD64::CFIWalker::RegisterSet
     52 StackwalkerAMD64::cfi_register_map_[] = {
     53   // It may seem like $rip and $rsp are callee-saves, because the callee is
     54   // responsible for having them restored upon return. But the callee_saves
     55   // flags here really means that the walker should assume they're
     56   // unchanged if the CFI doesn't mention them --- clearly wrong for $rip
     57   // and $rsp.
     58   { "$rax", NULL, false,
     59     StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax },
     60   { "$rdx", NULL, false,
     61     StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx },
     62   { "$rcx", NULL, false,
     63     StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx },
     64   { "$rbx", NULL, true,
     65     StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx },
     66   { "$rsi", NULL, false,
     67     StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi },
     68   { "$rdi", NULL, false,
     69     StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi },
     70   { "$rbp", NULL, true,
     71     StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp },
     72   { "$rsp", ".cfa", false,
     73     StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp },
     74   { "$r8", NULL, false,
     75     StackFrameAMD64::CONTEXT_VALID_R8,  &MDRawContextAMD64::r8 },
     76   { "$r9", NULL, false,
     77     StackFrameAMD64::CONTEXT_VALID_R9,  &MDRawContextAMD64::r9 },
     78   { "$r10", NULL, false,
     79     StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 },
     80   { "$r11", NULL, false,
     81     StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 },
     82   { "$r12", NULL, true,
     83     StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 },
     84   { "$r13", NULL, true,
     85     StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 },
     86   { "$r14", NULL, true,
     87     StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 },
     88   { "$r15", NULL, true,
     89     StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 },
     90   { "$rip", ".ra", false,
     91     StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip },
     92 };
     93 
     94 StackwalkerAMD64::StackwalkerAMD64(const SystemInfo* system_info,
     95                                    const MDRawContextAMD64* context,
     96                                    MemoryRegion* memory,
     97                                    const CodeModules* modules,
     98                                    StackFrameSymbolizer* resolver_helper)
     99     : Stackwalker(system_info, memory, modules, resolver_helper),
    100       context_(context),
    101       cfi_walker_(cfi_register_map_,
    102                   (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) {
    103 }
    104 
    105 uint64_t StackFrameAMD64::ReturnAddress() const {
    106   assert(context_validity & StackFrameAMD64::CONTEXT_VALID_RIP);
    107   return context.rip;
    108 }
    109 
    110 StackFrame* StackwalkerAMD64::GetContextFrame() {
    111   if (!context_) {
    112     BPLOG(ERROR) << "Can't get context frame without context";
    113     return NULL;
    114   }
    115 
    116   StackFrameAMD64* frame = new StackFrameAMD64();
    117 
    118   // The instruction pointer is stored directly in a register, so pull it
    119   // straight out of the CPU context structure.
    120   frame->context = *context_;
    121   frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL;
    122   frame->trust = StackFrame::FRAME_TRUST_CONTEXT;
    123   frame->instruction = frame->context.rip;
    124 
    125   return frame;
    126 }
    127 
    128 StackFrameAMD64* StackwalkerAMD64::GetCallerByCFIFrameInfo(
    129     const vector<StackFrame*> &frames,
    130     CFIFrameInfo* cfi_frame_info) {
    131   StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
    132 
    133   scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64());
    134   if (!cfi_walker_
    135       .FindCallerRegisters(*memory_, *cfi_frame_info,
    136                            last_frame->context, last_frame->context_validity,
    137                            &frame->context, &frame->context_validity))
    138     return NULL;
    139 
    140   // Make sure we recovered all the essentials.
    141   static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP
    142                                  | StackFrameAMD64::CONTEXT_VALID_RSP);
    143   if ((frame->context_validity & essentials) != essentials)
    144     return NULL;
    145 
    146   frame->trust = StackFrame::FRAME_TRUST_CFI;
    147   return frame.release();
    148 }
    149 
    150 StackFrameAMD64* StackwalkerAMD64::GetCallerByFramePointerRecovery(
    151     const vector<StackFrame*>& frames) {
    152   StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
    153   uint64_t last_rsp = last_frame->context.rsp;
    154   uint64_t last_rbp = last_frame->context.rbp;
    155 
    156   // Assume the presence of a frame pointer. This is not mandated by the
    157   // AMD64 ABI, c.f. section 3.2.2 footnote 7, though it is typical for
    158   // compilers to still preserve the frame pointer and not treat %rbp as a
    159   // general purpose register.
    160   //
    161   // With this assumption, the CALL instruction pushes the return address
    162   // onto the stack and sets %rip to the procedure to enter. The procedure
    163   // then establishes the stack frame with a prologue that PUSHes the current
    164   // %rbp onto the stack, MOVes the current %rsp to %rbp, and then allocates
    165   // space for any local variables. Using this procedure linking information,
    166   // it is possible to locate frame information for the callee:
    167   //
    168   // %caller_rsp = *(%callee_rbp + 16)
    169   // %caller_rip = *(%callee_rbp + 8)
    170   // %caller_rbp = *(%callee_rbp)
    171 
    172   uint64_t caller_rip, caller_rbp;
    173   if (memory_->GetMemoryAtAddress(last_rbp + 8, &caller_rip) &&
    174       memory_->GetMemoryAtAddress(last_rbp, &caller_rbp)) {
    175     uint64_t caller_rsp = last_rbp + 16;
    176 
    177     // Simple sanity check that the stack is growing downwards as expected.
    178     if (caller_rbp < last_rbp || caller_rsp < last_rsp)
    179       return NULL;
    180 
    181     StackFrameAMD64* frame = new StackFrameAMD64();
    182     frame->trust = StackFrame::FRAME_TRUST_FP;
    183     frame->context = last_frame->context;
    184     frame->context.rip = caller_rip;
    185     frame->context.rsp = caller_rsp;
    186     frame->context.rbp = caller_rbp;
    187     frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
    188                               StackFrameAMD64::CONTEXT_VALID_RSP |
    189                               StackFrameAMD64::CONTEXT_VALID_RBP;
    190     return frame;
    191   }
    192 
    193   return NULL;
    194 }
    195 
    196 StackFrameAMD64* StackwalkerAMD64::GetCallerByStackScan(
    197     const vector<StackFrame*> &frames) {
    198   StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
    199   uint64_t last_rsp = last_frame->context.rsp;
    200   uint64_t caller_rip_address, caller_rip;
    201 
    202   if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip,
    203                             frames.size() == 1 /* is_context_frame */)) {
    204     // No plausible return address was found.
    205     return NULL;
    206   }
    207 
    208   // Create a new stack frame (ownership will be transferred to the caller)
    209   // and fill it in.
    210   StackFrameAMD64* frame = new StackFrameAMD64();
    211 
    212   frame->trust = StackFrame::FRAME_TRUST_SCAN;
    213   frame->context = last_frame->context;
    214   frame->context.rip = caller_rip;
    215   // The caller's %rsp is directly underneath the return address pushed by
    216   // the call.
    217   frame->context.rsp = caller_rip_address + 8;
    218   frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
    219                             StackFrameAMD64::CONTEXT_VALID_RSP;
    220 
    221   // Other unwinders give up if they don't have an %rbp value, so see if we
    222   // can pass some plausible value on.
    223   if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) {
    224     // Functions typically push their caller's %rbp immediately upon entry,
    225     // and then set %rbp to point to that. So if the callee's %rbp is
    226     // pointing to the first word below the alleged return address, presume
    227     // that the caller's %rbp is saved there.
    228     if (caller_rip_address - 8 == last_frame->context.rbp) {
    229       uint64_t caller_rbp = 0;
    230       if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) &&
    231           caller_rbp > caller_rip_address) {
    232         frame->context.rbp = caller_rbp;
    233         frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
    234       }
    235     } else if (last_frame->context.rbp >= caller_rip_address + 8) {
    236       // If the callee's %rbp is plausible as a value for the caller's
    237       // %rbp, presume that the callee left it unchanged.
    238       frame->context.rbp = last_frame->context.rbp;
    239       frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
    240     }
    241   }
    242 
    243   return frame;
    244 }
    245 
    246 StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack* stack,
    247                                              bool stack_scan_allowed) {
    248   if (!memory_ || !stack) {
    249     BPLOG(ERROR) << "Can't get caller frame without memory or stack";
    250     return NULL;
    251   }
    252 
    253   const vector<StackFrame*> &frames = *stack->frames();
    254   StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
    255   scoped_ptr<StackFrameAMD64> new_frame;
    256 
    257   // If we have DWARF CFI information, use it.
    258   scoped_ptr<CFIFrameInfo> cfi_frame_info(
    259       frame_symbolizer_->FindCFIFrameInfo(last_frame));
    260   if (cfi_frame_info.get())
    261     new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get()));
    262 
    263   // If CFI was not available or failed, try using frame pointer recovery.
    264   if (!new_frame.get()) {
    265     new_frame.reset(GetCallerByFramePointerRecovery(frames));
    266   }
    267 
    268   // If all else fails, fall back to stack scanning.
    269   if (stack_scan_allowed && !new_frame.get()) {
    270     new_frame.reset(GetCallerByStackScan(frames));
    271   }
    272 
    273   // If nothing worked, tell the caller.
    274   if (!new_frame.get())
    275     return NULL;
    276 
    277   if (system_info_->os_short == "nacl") {
    278     // Apply constraints from Native Client's x86-64 sandbox.  These
    279     // registers have the 4GB-aligned sandbox base address (from r15)
    280     // added to them, and only the bottom 32 bits are relevant for
    281     // stack walking.
    282     new_frame->context.rip = static_cast<uint32_t>(new_frame->context.rip);
    283     new_frame->context.rsp = static_cast<uint32_t>(new_frame->context.rsp);
    284     new_frame->context.rbp = static_cast<uint32_t>(new_frame->context.rbp);
    285   }
    286 
    287   // Treat an instruction address of 0 as end-of-stack.
    288   if (new_frame->context.rip == 0)
    289     return NULL;
    290 
    291   // If the new stack pointer is at a lower address than the old, then
    292   // that's clearly incorrect. Treat this as end-of-stack to enforce
    293   // progress and avoid infinite loops.
    294   if (new_frame->context.rsp <= last_frame->context.rsp)
    295     return NULL;
    296 
    297   // new_frame->context.rip is the return address, which is the instruction
    298   // after the CALL that caused us to arrive at the callee. Set
    299   // new_frame->instruction to one less than that, so it points within the
    300   // CALL instruction. See StackFrame::instruction for details, and
    301   // StackFrameAMD64::ReturnAddress.
    302   new_frame->instruction = new_frame->context.rip - 1;
    303 
    304   return new_frame.release();
    305 }
    306 
    307 }  // namespace google_breakpad
    308