Home | History | Annotate | Download | only in windows
      1 /* Copyright (c) 2007, Google Inc.
      2  * All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  *
     30  * ---
     31  * Author: Joi Sigurdsson
     32  * Author: Scott Francis
     33  *
     34  * Implementation of PreamblePatcher
     35  */
     36 
     37 #include "preamble_patcher.h"
     38 
     39 #include "mini_disassembler.h"
     40 
     41 // compatibility shims
     42 #include "base/logging.h"
     43 
     44 // Definitions of assembly statements we need
     45 #define ASM_JMP32REL 0xE9
     46 #define ASM_INT3 0xCC
     47 #define ASM_JMP32ABS_0 0xFF
     48 #define ASM_JMP32ABS_1 0x25
     49 #define ASM_JMP8REL 0xEB
     50 #define ASM_JCC32REL_0 0x0F
     51 #define ASM_JCC32REL_1_MASK 0x80
     52 #define ASM_NOP 0x90
     53 // X64 opcodes
     54 #define ASM_REXW 0x48
     55 #define ASM_MOVRAX_IMM 0xB8
     56 #define ASM_JMP 0xFF
     57 #define ASM_JMP_RAX 0xE0
     58 
     59 namespace sidestep {
     60 
     61 PreamblePatcher::PreamblePage* PreamblePatcher::preamble_pages_ = NULL;
     62 long PreamblePatcher::granularity_ = 0;
     63 long PreamblePatcher::pagesize_ = 0;
     64 bool PreamblePatcher::initialized_ = false;
     65 
     66 static const unsigned int kPreamblePageMagic = 0x4347414D; // "MAGC"
     67 
     68 // Handle a special case that we see with functions that point into an
     69 // IAT table (including functions linked statically into the
     70 // application): these function already starts with ASM_JMP32*.  For
     71 // instance, malloc() might be implemented as a JMP to __malloc().
     72 // This function follows the initial JMPs for us, until we get to the
     73 // place where the actual code is defined.  If we get to STOP_BEFORE,
     74 // we return the address before stop_before.  The stop_before_trampoline
     75 // flag is used in 64-bit mode.  If true, we will return the address
     76 // before a trampoline is detected.  Trampolines are defined as:
     77 //
     78 //    nop
     79 //    mov rax, <replacement_function>
     80 //    jmp rax
     81 //
     82 // See PreamblePatcher::RawPatchWithStub for more information.
     83 void* PreamblePatcher::ResolveTargetImpl(unsigned char* target,
     84                                          unsigned char* stop_before,
     85                                          bool stop_before_trampoline) {
     86   if (target == NULL)
     87     return NULL;
     88   while (1) {
     89     unsigned char* new_target;
     90     if (target[0] == ASM_JMP32REL) {
     91       // target[1-4] holds the place the jmp goes to, but it's
     92       // relative to the next instruction.
     93       int relative_offset;   // Windows guarantees int is 4 bytes
     94       SIDESTEP_ASSERT(sizeof(relative_offset) == 4);
     95       memcpy(reinterpret_cast<void*>(&relative_offset),
     96              reinterpret_cast<void*>(target + 1), 4);
     97       new_target = target + 5 + relative_offset;
     98     } else if (target[0] == ASM_JMP8REL) {
     99       // Visual Studio 7.1 implements new[] as an 8 bit jump to new
    100       signed char relative_offset;
    101       memcpy(reinterpret_cast<void*>(&relative_offset),
    102              reinterpret_cast<void*>(target + 1), 1);
    103       new_target = target + 2 + relative_offset;
    104     } else if (target[0] == ASM_JMP32ABS_0 &&
    105                target[1] == ASM_JMP32ABS_1) {
    106       // Visual studio seems to sometimes do it this way instead of the
    107       // previous way.  Not sure what the rules are, but it was happening
    108       // with operator new in some binaries.
    109       void** new_target_v;
    110       if (kIs64BitBinary) {
    111         // In 64-bit mode JMPs are RIP-relative, not absolute
    112         int target_offset;
    113         memcpy(reinterpret_cast<void*>(&target_offset),
    114                reinterpret_cast<void*>(target + 2), 4);
    115         new_target_v = reinterpret_cast<void**>(target + target_offset + 6);
    116       } else {
    117         SIDESTEP_ASSERT(sizeof(new_target) == 4);
    118         memcpy(&new_target_v, reinterpret_cast<void*>(target + 2), 4);
    119       }
    120       new_target = reinterpret_cast<unsigned char*>(*new_target_v);
    121     } else {
    122       break;
    123     }
    124     if (new_target == stop_before)
    125       break;
    126     if (stop_before_trampoline && *new_target == ASM_NOP
    127         && new_target[1] == ASM_REXW && new_target[2] == ASM_MOVRAX_IMM)
    128       break;
    129     target = new_target;
    130   }
    131   return target;
    132 }
    133 
    134 // Special case scoped_ptr to avoid dependency on scoped_ptr below.
    135 class DeleteUnsignedCharArray {
    136  public:
    137   DeleteUnsignedCharArray(unsigned char* array) : array_(array) {
    138   }
    139 
    140   ~DeleteUnsignedCharArray() {
    141     if (array_) {
    142       PreamblePatcher::FreePreambleBlock(array_);
    143     }
    144   }
    145 
    146   unsigned char* Release() {
    147     unsigned char* temp = array_;
    148     array_ = NULL;
    149     return temp;
    150   }
    151 
    152  private:
    153   unsigned char* array_;
    154 };
    155 
    156 SideStepError PreamblePatcher::RawPatchWithStubAndProtections(
    157     void* target_function, void *replacement_function,
    158     unsigned char* preamble_stub, unsigned long stub_size,
    159     unsigned long* bytes_needed) {
    160   // We need to be able to write to a process-local copy of the first
    161   // MAX_PREAMBLE_STUB_SIZE bytes of target_function
    162   DWORD old_target_function_protect = 0;
    163   BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
    164                                     MAX_PREAMBLE_STUB_SIZE,
    165                                     PAGE_EXECUTE_READWRITE,
    166                                     &old_target_function_protect);
    167   if (!succeeded) {
    168     SIDESTEP_ASSERT(false && "Failed to make page containing target function "
    169                     "copy-on-write.");
    170     return SIDESTEP_ACCESS_DENIED;
    171   }
    172 
    173   SideStepError error_code = RawPatchWithStub(target_function,
    174                                               replacement_function,
    175                                               preamble_stub,
    176                                               stub_size,
    177                                               bytes_needed);
    178 
    179   // Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
    180   // pTargetFunction to what they were before we started goofing around.
    181   // We do this regardless of whether the patch succeeded or not.
    182   succeeded = ::VirtualProtect(reinterpret_cast<void*>(target_function),
    183                                MAX_PREAMBLE_STUB_SIZE,
    184                                old_target_function_protect,
    185                                &old_target_function_protect);
    186   if (!succeeded) {
    187     SIDESTEP_ASSERT(false &&
    188                     "Failed to restore protection to target function.");
    189     // We must not return an error here because the function has
    190     // likely actually been patched, and returning an error might
    191     // cause our client code not to unpatch it.  So we just keep
    192     // going.
    193   }
    194 
    195   if (SIDESTEP_SUCCESS != error_code) {  // Testing RawPatchWithStub, above
    196     SIDESTEP_ASSERT(false);
    197     return error_code;
    198   }
    199 
    200   // Flush the instruction cache to make sure the processor doesn't execute the
    201   // old version of the instructions (before our patch).
    202   //
    203   // FlushInstructionCache is actually a no-op at least on
    204   // single-processor XP machines.  I'm not sure why this is so, but
    205   // it is, yet I want to keep the call to the API here for
    206   // correctness in case there is a difference in some variants of
    207   // Windows/hardware.
    208   succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
    209                                       target_function,
    210                                       MAX_PREAMBLE_STUB_SIZE);
    211   if (!succeeded) {
    212     SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
    213     // We must not return an error here because the function has actually
    214     // been patched, and returning an error would likely cause our client
    215     // code not to unpatch it.  So we just keep going.
    216   }
    217 
    218   return SIDESTEP_SUCCESS;
    219 }
    220 
    221 SideStepError PreamblePatcher::RawPatch(void* target_function,
    222                                         void* replacement_function,
    223                                         void** original_function_stub) {
    224   if (!target_function || !replacement_function || !original_function_stub ||
    225       (*original_function_stub) || target_function == replacement_function) {
    226     SIDESTEP_ASSERT(false && "Preconditions not met");
    227     return SIDESTEP_INVALID_PARAMETER;
    228   }
    229 
    230   BOOL succeeded = FALSE;
    231 
    232   // First, deal with a special case that we see with functions that
    233   // point into an IAT table (including functions linked statically
    234   // into the application): these function already starts with
    235   // ASM_JMP32REL.  For instance, malloc() might be implemented as a
    236   // JMP to __malloc().  In that case, we replace the destination of
    237   // the JMP (__malloc), rather than the JMP itself (malloc).  This
    238   // way we get the correct behavior no matter how malloc gets called.
    239   void* new_target = ResolveTarget(target_function);
    240   if (new_target != target_function) {
    241     target_function = new_target;
    242   }
    243 
    244   // In 64-bit mode, preamble_stub must be within 2GB of target function
    245   // so that if target contains a jump, we can translate it.
    246   unsigned char* preamble_stub = AllocPreambleBlockNear(target_function);
    247   if (!preamble_stub) {
    248     SIDESTEP_ASSERT(false && "Unable to allocate preamble-stub.");
    249     return SIDESTEP_INSUFFICIENT_BUFFER;
    250   }
    251 
    252   // Frees the array at end of scope.
    253   DeleteUnsignedCharArray guard_preamble_stub(preamble_stub);
    254 
    255   SideStepError error_code = RawPatchWithStubAndProtections(
    256       target_function, replacement_function, preamble_stub,
    257       MAX_PREAMBLE_STUB_SIZE, NULL);
    258 
    259   if (SIDESTEP_SUCCESS != error_code) {
    260     SIDESTEP_ASSERT(false);
    261     return error_code;
    262   }
    263 
    264   // Flush the instruction cache to make sure the processor doesn't execute the
    265   // old version of the instructions (before our patch).
    266   //
    267   // FlushInstructionCache is actually a no-op at least on
    268   // single-processor XP machines.  I'm not sure why this is so, but
    269   // it is, yet I want to keep the call to the API here for
    270   // correctness in case there is a difference in some variants of
    271   // Windows/hardware.
    272   succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
    273                                       target_function,
    274                                       MAX_PREAMBLE_STUB_SIZE);
    275   if (!succeeded) {
    276     SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
    277     // We must not return an error here because the function has actually
    278     // been patched, and returning an error would likely cause our client
    279     // code not to unpatch it.  So we just keep going.
    280   }
    281 
    282   SIDESTEP_LOG("PreamblePatcher::RawPatch successfully patched.");
    283 
    284   // detach the scoped pointer so the memory is not freed
    285   *original_function_stub =
    286       reinterpret_cast<void*>(guard_preamble_stub.Release());
    287   return SIDESTEP_SUCCESS;
    288 }
    289 
    290 SideStepError PreamblePatcher::Unpatch(void* target_function,
    291                                        void* replacement_function,
    292                                        void* original_function_stub) {
    293   SIDESTEP_ASSERT(target_function && replacement_function &&
    294                   original_function_stub);
    295   if (!target_function || !replacement_function ||
    296       !original_function_stub) {
    297     return SIDESTEP_INVALID_PARAMETER;
    298   }
    299 
    300   // Before unpatching, target_function should be a JMP to
    301   // replacement_function.  If it's not, then either it's an error, or
    302   // we're falling into the case where the original instruction was a
    303   // JMP, and we patched the jumped_to address rather than the JMP
    304   // itself.  (For instance, if malloc() is just a JMP to __malloc(),
    305   // we patched __malloc() and not malloc().)
    306   unsigned char* target = reinterpret_cast<unsigned char*>(target_function);
    307   target = reinterpret_cast<unsigned char*>(
    308       ResolveTargetImpl(
    309           target, reinterpret_cast<unsigned char*>(replacement_function),
    310           true));
    311   // We should end at the function we patched.  When we patch, we insert
    312   // a ASM_JMP32REL instruction, so look for that as a sanity check.
    313   if (target[0] != ASM_JMP32REL) {
    314     SIDESTEP_ASSERT(false &&
    315                     "target_function does not look like it was patched.");
    316     return SIDESTEP_INVALID_PARAMETER;
    317   }
    318 
    319   const unsigned int kRequiredTargetPatchBytes = 5;
    320 
    321   // We need to be able to write to a process-local copy of the first
    322   // kRequiredTargetPatchBytes bytes of target_function
    323   DWORD old_target_function_protect = 0;
    324   BOOL succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
    325                                     kRequiredTargetPatchBytes,
    326                                     PAGE_EXECUTE_READWRITE,
    327                                     &old_target_function_protect);
    328   if (!succeeded) {
    329     SIDESTEP_ASSERT(false && "Failed to make page containing target function "
    330                     "copy-on-write.");
    331     return SIDESTEP_ACCESS_DENIED;
    332   }
    333 
    334   unsigned char* preamble_stub = reinterpret_cast<unsigned char*>(
    335                                    original_function_stub);
    336 
    337   // Disassemble the preamble of stub and copy the bytes back to target.
    338   // If we've done any conditional jumps in the preamble we need to convert
    339   // them back to the orignal REL8 jumps in the target.
    340   MiniDisassembler disassembler;
    341   unsigned int preamble_bytes = 0;
    342   unsigned int target_bytes = 0;
    343   while (target_bytes < kRequiredTargetPatchBytes) {
    344     unsigned int cur_bytes = 0;
    345     InstructionType instruction_type =
    346         disassembler.Disassemble(preamble_stub + preamble_bytes, cur_bytes);
    347     if (IT_JUMP == instruction_type) {
    348       unsigned int jump_bytes = 0;
    349       SideStepError jump_ret = SIDESTEP_JUMP_INSTRUCTION;
    350       if (IsNearConditionalJump(preamble_stub + preamble_bytes, cur_bytes) ||
    351           IsNearRelativeJump(preamble_stub + preamble_bytes, cur_bytes) ||
    352           IsNearAbsoluteCall(preamble_stub + preamble_bytes, cur_bytes) ||
    353           IsNearRelativeCall(preamble_stub + preamble_bytes, cur_bytes)) {
    354         jump_ret = PatchNearJumpOrCall(preamble_stub + preamble_bytes,
    355                                        cur_bytes, target + target_bytes,
    356                                        &jump_bytes, MAX_PREAMBLE_STUB_SIZE);
    357       }
    358       if (jump_ret == SIDESTEP_JUMP_INSTRUCTION) {
    359         SIDESTEP_ASSERT(false &&
    360                         "Found unsupported jump instruction in stub!!");
    361         return SIDESTEP_UNSUPPORTED_INSTRUCTION;
    362       }
    363       target_bytes += jump_bytes;
    364     } else if (IT_GENERIC == instruction_type) {
    365       if (IsMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes)) {
    366         unsigned int mov_bytes = 0;
    367         if (PatchMovWithDisplacement(preamble_stub + preamble_bytes, cur_bytes,
    368                                      target + target_bytes, &mov_bytes,
    369                                      MAX_PREAMBLE_STUB_SIZE)
    370                                      != SIDESTEP_SUCCESS) {
    371           SIDESTEP_ASSERT(false &&
    372                           "Found unsupported generic instruction in stub!!");
    373           return SIDESTEP_UNSUPPORTED_INSTRUCTION;
    374         }
    375       } else {
    376         memcpy(reinterpret_cast<void*>(target + target_bytes),
    377                reinterpret_cast<void*>(reinterpret_cast<unsigned char*>(
    378                    original_function_stub) + preamble_bytes), cur_bytes);
    379         target_bytes += cur_bytes;
    380       }
    381     } else {
    382       SIDESTEP_ASSERT(false &&
    383                       "Found unsupported instruction in stub!!");
    384       return SIDESTEP_UNSUPPORTED_INSTRUCTION;
    385     }
    386     preamble_bytes += cur_bytes;
    387   }
    388 
    389   FreePreambleBlock(reinterpret_cast<unsigned char*>(original_function_stub));
    390 
    391   // Restore the protection of the first kRequiredTargetPatchBytes bytes of
    392   // target to what they were before we started goofing around.
    393   succeeded = ::VirtualProtect(reinterpret_cast<void*>(target),
    394                                kRequiredTargetPatchBytes,
    395                                old_target_function_protect,
    396                                &old_target_function_protect);
    397 
    398   // Flush the instruction cache to make sure the processor doesn't execute the
    399   // old version of the instructions (before our patch).
    400   //
    401   // See comment on FlushInstructionCache elsewhere in this file.
    402   succeeded = ::FlushInstructionCache(::GetCurrentProcess(),
    403                                       target,
    404                                       MAX_PREAMBLE_STUB_SIZE);
    405   if (!succeeded) {
    406     SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
    407     return SIDESTEP_UNEXPECTED;
    408   }
    409 
    410   SIDESTEP_LOG("PreamblePatcher::Unpatch successfully unpatched.");
    411   return SIDESTEP_SUCCESS;
    412 }
    413 
    414 void PreamblePatcher::Initialize() {
    415   if (!initialized_) {
    416     SYSTEM_INFO si = { 0 };
    417     ::GetSystemInfo(&si);
    418     granularity_ = si.dwAllocationGranularity;
    419     pagesize_ = si.dwPageSize;
    420     initialized_ = true;
    421   }
    422 }
    423 
    424 unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
    425   PreamblePage* preamble_page = preamble_pages_;
    426   while (preamble_page != NULL) {
    427     if (preamble_page->free_ != NULL) {
    428       __int64 val = reinterpret_cast<__int64>(preamble_page) -
    429           reinterpret_cast<__int64>(target);
    430       if ((val > 0 && val + pagesize_ <= INT_MAX) ||
    431           (val < 0 && val >= INT_MIN)) {
    432         break;
    433       }
    434     }
    435     preamble_page = preamble_page->next_;
    436   }
    437 
    438   // The free_ member of the page is used to store the next available block
    439   // of memory to use or NULL if there are no chunks available, in which case
    440   // we'll allocate a new page.
    441   if (preamble_page == NULL || preamble_page->free_ == NULL) {
    442     // Create a new preamble page and initialize the free list
    443     preamble_page = reinterpret_cast<PreamblePage*>(AllocPageNear(target));
    444     SIDESTEP_ASSERT(preamble_page != NULL && "Could not allocate page!");
    445     void** pp = &preamble_page->free_;
    446     unsigned char* ptr = reinterpret_cast<unsigned char*>(preamble_page) +
    447         MAX_PREAMBLE_STUB_SIZE;
    448     unsigned char* limit = reinterpret_cast<unsigned char*>(preamble_page) +
    449         pagesize_;
    450     while (ptr < limit) {
    451       *pp = ptr;
    452       pp = reinterpret_cast<void**>(ptr);
    453       ptr += MAX_PREAMBLE_STUB_SIZE;
    454     }
    455     *pp = NULL;
    456     // Insert the new page into the list
    457     preamble_page->magic_ = kPreamblePageMagic;
    458     preamble_page->next_ = preamble_pages_;
    459     preamble_pages_ = preamble_page;
    460   }
    461   unsigned char* ret = reinterpret_cast<unsigned char*>(preamble_page->free_);
    462   preamble_page->free_ = *(reinterpret_cast<void**>(preamble_page->free_));
    463   return ret;
    464 }
    465 
    466 void PreamblePatcher::FreePreambleBlock(unsigned char* block) {
    467   SIDESTEP_ASSERT(block != NULL);
    468   SIDESTEP_ASSERT(granularity_ != 0);
    469   uintptr_t ptr = reinterpret_cast<uintptr_t>(block);
    470   ptr -= ptr & (granularity_ - 1);
    471   PreamblePage* preamble_page = reinterpret_cast<PreamblePage*>(ptr);
    472   SIDESTEP_ASSERT(preamble_page->magic_ == kPreamblePageMagic);
    473   *(reinterpret_cast<void**>(block)) = preamble_page->free_;
    474   preamble_page->free_ = block;
    475 }
    476 
    477 void* PreamblePatcher::AllocPageNear(void* target) {
    478   MEMORY_BASIC_INFORMATION mbi = { 0 };
    479   if (!::VirtualQuery(target, &mbi, sizeof(mbi))) {
    480     SIDESTEP_ASSERT(false && "VirtualQuery failed on target address");
    481     return 0;
    482   }
    483   if (initialized_ == false) {
    484     PreamblePatcher::Initialize();
    485     SIDESTEP_ASSERT(initialized_);
    486   }
    487   void* pv = NULL;
    488   unsigned char* allocation_base = reinterpret_cast<unsigned char*>(
    489       mbi.AllocationBase);
    490   __int64 i = 1;
    491   bool high_target = reinterpret_cast<__int64>(target) > UINT_MAX;
    492   while (pv == NULL) {
    493     __int64 val = reinterpret_cast<__int64>(allocation_base) -
    494         (i * granularity_);
    495     if (high_target &&
    496         reinterpret_cast<__int64>(target) - val > INT_MAX) {
    497         // We're further than 2GB from the target
    498       break;
    499     } else if (val <= NULL) {
    500       // Less than 0
    501       break;
    502     }
    503     pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base -
    504                             (i++ * granularity_)),
    505                         pagesize_, MEM_COMMIT | MEM_RESERVE,
    506                         PAGE_EXECUTE_READWRITE);
    507   }
    508 
    509   // We couldn't allocate low, try to allocate high
    510   if (pv == NULL) {
    511     i = 1;
    512     // Round up to the next multiple of page granularity
    513     allocation_base = reinterpret_cast<unsigned char*>(
    514         (reinterpret_cast<__int64>(target) &
    515         (~(granularity_ - 1))) + granularity_);
    516     while (pv == NULL) {
    517       __int64 val = reinterpret_cast<__int64>(allocation_base) +
    518           (i * granularity_) - reinterpret_cast<__int64>(target);
    519       if (val > INT_MAX || val < 0) {
    520         // We're too far or we overflowed
    521         break;
    522       }
    523       pv = ::VirtualAlloc(reinterpret_cast<void*>(allocation_base +
    524                               (i++ * granularity_)),
    525                           pagesize_, MEM_COMMIT | MEM_RESERVE,
    526                           PAGE_EXECUTE_READWRITE);
    527     }
    528   }
    529   return pv;
    530 }
    531 
    532 bool PreamblePatcher::IsShortConditionalJump(
    533     unsigned char* target,
    534     unsigned int instruction_size) {
    535   return (*(target) & 0x70) == 0x70 && instruction_size == 2;
    536 }
    537 
    538 bool PreamblePatcher::IsNearConditionalJump(
    539     unsigned char* target,
    540     unsigned int instruction_size) {
    541   return *(target) == 0xf && (*(target + 1) & 0x80) == 0x80 &&
    542       instruction_size == 6;
    543 }
    544 
    545 bool PreamblePatcher::IsNearRelativeJump(
    546     unsigned char* target,
    547     unsigned int instruction_size) {
    548   return *(target) == 0xe9 && instruction_size == 5;
    549 }
    550 
    551 bool PreamblePatcher::IsNearAbsoluteCall(
    552     unsigned char* target,
    553     unsigned int instruction_size) {
    554   return *(target) == 0xff && (*(target + 1) & 0x10) == 0x10 &&
    555       instruction_size == 6;
    556 }
    557 
    558 bool PreamblePatcher::IsNearRelativeCall(
    559     unsigned char* target,
    560     unsigned int instruction_size) {
    561   return *(target) == 0xe8 && instruction_size == 5;
    562 }
    563 
    564 bool PreamblePatcher::IsMovWithDisplacement(
    565     unsigned char* target,
    566     unsigned int instruction_size) {
    567   // In this case, the ModRM byte's mod field will be 0 and r/m will be 101b (5)
    568   return instruction_size == 7 && *target == 0x48 && *(target + 1) == 0x8b &&
    569       (*(target + 2) >> 6) == 0 && (*(target + 2) & 0x7) == 5;
    570 }
    571 
    572 SideStepError PreamblePatcher::PatchShortConditionalJump(
    573     unsigned char* source,
    574     unsigned int instruction_size,
    575     unsigned char* target,
    576     unsigned int* target_bytes,
    577     unsigned int target_size) {
    578   unsigned char* original_jump_dest = (source + 2) + source[1];
    579   unsigned char* stub_jump_from = target + 6;
    580   __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
    581   if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
    582     SIDESTEP_ASSERT(false &&
    583                     "Unable to fix up short jump because target"
    584                     " is too far away.");
    585     return SIDESTEP_JUMP_INSTRUCTION;
    586   }
    587 
    588   *target_bytes = 6;
    589   if (target_size > *target_bytes) {
    590     // Convert the short jump to a near jump.
    591     //
    592     // 0f 8x xx xx xx xx = Jcc rel32off
    593     unsigned short jmpcode = ((0x80 | (source[0] & 0xf)) << 8) | 0x0f;
    594     memcpy(reinterpret_cast<void*>(target),
    595            reinterpret_cast<void*>(&jmpcode), 2);
    596     memcpy(reinterpret_cast<void*>(target + 2),
    597            reinterpret_cast<void*>(&fixup_jump_offset), 4);
    598   }
    599 
    600   return SIDESTEP_SUCCESS;
    601 }
    602 
    603 SideStepError PreamblePatcher::PatchNearJumpOrCall(
    604     unsigned char* source,
    605     unsigned int instruction_size,
    606     unsigned char* target,
    607     unsigned int* target_bytes,
    608     unsigned int target_size) {
    609   SIDESTEP_ASSERT(instruction_size == 5 || instruction_size == 6);
    610   unsigned int jmp_offset_in_instruction = instruction_size == 5 ? 1 : 2;
    611   unsigned char* original_jump_dest = reinterpret_cast<unsigned char *>(
    612       reinterpret_cast<__int64>(source + instruction_size) +
    613       *(reinterpret_cast<int*>(source + jmp_offset_in_instruction)));
    614   unsigned char* stub_jump_from = target + instruction_size;
    615   __int64 fixup_jump_offset = original_jump_dest - stub_jump_from;
    616   if (fixup_jump_offset > INT_MAX || fixup_jump_offset < INT_MIN) {
    617     SIDESTEP_ASSERT(false &&
    618                     "Unable to fix up near jump because target"
    619                     " is too far away.");
    620     return SIDESTEP_JUMP_INSTRUCTION;
    621   }
    622 
    623   if ((fixup_jump_offset < SCHAR_MAX && fixup_jump_offset > SCHAR_MIN)) {
    624     *target_bytes = 2;
    625     if (target_size > *target_bytes) {
    626       // If the new offset is in range, use a short jump instead of a near jump.
    627       if (source[0] == ASM_JCC32REL_0 &&
    628           (source[1] & ASM_JCC32REL_1_MASK) == ASM_JCC32REL_1_MASK) {
    629         unsigned short jmpcode = (static_cast<unsigned char>(
    630             fixup_jump_offset) << 8) | (0x70 | (source[1] & 0xf));
    631         memcpy(reinterpret_cast<void*>(target),
    632                reinterpret_cast<void*>(&jmpcode),
    633                2);
    634       } else {
    635         target[0] = ASM_JMP8REL;
    636         target[1] = static_cast<unsigned char>(fixup_jump_offset);
    637       }
    638     }
    639   } else {
    640     *target_bytes = instruction_size;
    641     if (target_size > *target_bytes) {
    642       memcpy(reinterpret_cast<void*>(target),
    643              reinterpret_cast<void*>(source),
    644              jmp_offset_in_instruction);
    645       memcpy(reinterpret_cast<void*>(target + jmp_offset_in_instruction),
    646              reinterpret_cast<void*>(&fixup_jump_offset),
    647              4);
    648     }
    649   }
    650 
    651   return SIDESTEP_SUCCESS;
    652 }
    653 
    654 SideStepError PreamblePatcher::PatchMovWithDisplacement(
    655      unsigned char* source,
    656      unsigned int instruction_size,
    657      unsigned char* target,
    658      unsigned int* target_bytes,
    659      unsigned int target_size) {
    660   SIDESTEP_ASSERT(instruction_size == 7);
    661   const int mov_offset_in_instruction = 3; // 0x48 0x8b 0x0d <offset>
    662   unsigned char* original_mov_dest = reinterpret_cast<unsigned char*>(
    663       reinterpret_cast<__int64>(source + instruction_size) +
    664       *(reinterpret_cast<int*>(source + mov_offset_in_instruction)));
    665   unsigned char* stub_mov_from = target + instruction_size;
    666   __int64 fixup_mov_offset = original_mov_dest - stub_mov_from;
    667   if (fixup_mov_offset > INT_MAX || fixup_mov_offset < INT_MIN) {
    668     SIDESTEP_ASSERT(false &&
    669         "Unable to fix up near MOV because target is too far away.");
    670     return SIDESTEP_UNEXPECTED;
    671   }
    672   *target_bytes = instruction_size;
    673   if (target_size > *target_bytes) {
    674     memcpy(reinterpret_cast<void*>(target),
    675            reinterpret_cast<void*>(source),
    676            mov_offset_in_instruction);
    677     memcpy(reinterpret_cast<void*>(target + mov_offset_in_instruction),
    678            reinterpret_cast<void*>(&fixup_mov_offset),
    679            4);
    680   }
    681   return SIDESTEP_SUCCESS;
    682 }
    683 
    684 };  // namespace sidestep
    685