Home | History | Annotate | Download | only in chrome_frame
      1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "chrome_frame/vtable_patch_manager.h"
      6 
      7 #include <atlcomcli.h>
      8 
      9 #include <algorithm>
     10 
     11 #include "base/atomicops.h"
     12 #include "base/logging.h"
     13 #include "base/memory/scoped_ptr.h"
     14 #include "base/synchronization/lock.h"
     15 #include "chrome_frame/function_stub.h"
     16 #include "chrome_frame/pin_module.h"
     17 
     18 namespace vtable_patch {
     19 
     20 // The number of times we retry a patch/unpatch operation in case of
     21 // VM races with other 3rd party software trying to patch the same thing.
     22 const int kMaxRetries = 3;
     23 
     24 // We hold a lock over all patching operations to make sure that we don't
     25 // e.g. race on VM operations to the same patches, or to physical pages
     26 // shared across different VTABLEs.
     27 base::Lock patch_lock_;
     28 
     29 namespace internal {
     30 // Because other parties in our process might be attempting to patch the same
     31 // virtual tables at the same time, we have a race to modify the VM protections
     32 // on the pages. We also need to do a compare/swap type operation when we
     33 // modify the function, so as to be sure that we grab the most recent value.
     34 // Hence the SEH blocks and the nasty-looking compare/swap operation.
     35 bool ReplaceFunctionPointer(void** entry, void* new_proc, void* curr_proc) {
     36   __try {
     37     base::subtle::Atomic32 prev_value;
     38 
     39     prev_value = base::subtle::NoBarrier_CompareAndSwap(
     40         reinterpret_cast<base::subtle::Atomic32 volatile*>(entry),
     41         reinterpret_cast<base::subtle::Atomic32>(curr_proc),
     42         reinterpret_cast<base::subtle::Atomic32>(new_proc));
     43 
     44     return curr_proc == reinterpret_cast<void*>(prev_value);
     45   } __except(EXCEPTION_EXECUTE_HANDLER) {
     46     // Oops, we took exception on access.
     47   }
     48 
     49   return false;
     50 }
     51 
     52 }  // namespace
     53 
     54 // Convenient definition of a VTABLE
     55 typedef PROC* Vtable;
     56 
     57 // Returns a pointer to the VTable of a COM interface.
     58 // @param unknown [in] The pointer of the COM interface.
     59 inline Vtable GetIFVTable(void* unknown) {
     60   return reinterpret_cast<Vtable>(*reinterpret_cast<void**>(unknown));
     61 }
     62 
     63 HRESULT PatchInterfaceMethods(void* unknown, MethodPatchInfo* patches) {
     64   // Do some sanity checking of the input arguments.
     65   if (NULL == unknown || NULL == patches) {
     66     NOTREACHED();
     67     return E_INVALIDARG;
     68   }
     69 
     70   Vtable vtable = GetIFVTable(unknown);
     71   DCHECK(vtable);
     72 
     73   // All VM operations, patching and manipulation of MethodPatchInfo
     74   // is done under a global lock, to ensure multiple threads don't
     75   // race, whether on an individual patch, or on VM operations to
     76   // the same physical pages.
     77   base::AutoLock lock(patch_lock_);
     78 
     79   for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) {
     80     if (it->stub_ != NULL) {
     81       // If this DCHECK fires it means that we are using the same VTable
     82       // information to patch two different interfaces, or we've lost a
     83       // race with another thread who's patching the same interface.
     84       DLOG(WARNING) << "Attempting to patch two different VTables with the "
     85           "same VTable information, or patching the same interface on "
     86           "multiple threads";
     87       continue;
     88     }
     89 
     90     PROC original_fn = vtable[it->index_];
     91     FunctionStub* stub = NULL;
     92 
     93 #ifndef NDEBUG
     94     stub = FunctionStub::FromCode(original_fn);
     95     if (stub != NULL) {
     96       DLOG(ERROR) << "attempt to patch a function that's already patched";
     97       DCHECK(stub->destination_function() ==
     98              reinterpret_cast<uintptr_t>(it->method_)) <<
     99              "patching the same method multiple times with different hooks?";
    100       continue;
    101     }
    102 #endif
    103 
    104     stub = FunctionStub::Create(reinterpret_cast<uintptr_t>(original_fn),
    105                                 it->method_);
    106     if (!stub) {
    107       NOTREACHED();
    108       return E_OUTOFMEMORY;
    109     }
    110 
    111     // Do the VM operations and the patching in a loop, to try and ensure
    112     // we succeed even if there's a VM operation or a patch race against
    113     // other 3rd parties patching.
    114     bool succeeded = false;
    115     for (int i = 0; !succeeded && i < kMaxRetries; ++i) {
    116       DWORD protect = 0;
    117       if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC),
    118                             PAGE_EXECUTE_READWRITE, &protect)) {
    119         HRESULT hr = AtlHresultFromLastError();
    120         DLOG(ERROR) << "VirtualProtect failed 0x" << std::hex << hr;
    121 
    122         // Go around again in the feeble hope that this is
    123         // a temporary problem.
    124         continue;
    125       }
    126       original_fn = vtable[it->index_];
    127       stub->set_argument(reinterpret_cast<uintptr_t>(original_fn));
    128       succeeded = internal::ReplaceFunctionPointer(
    129           reinterpret_cast<void**>(&vtable[it->index_]), stub->code(),
    130           original_fn);
    131 
    132       if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC), protect,
    133                             &protect)) {
    134         DLOG(ERROR) << "VirtualProtect failed to restore protection";
    135       }
    136     }
    137 
    138     if (!succeeded) {
    139       FunctionStub::Destroy(stub);
    140       stub = NULL;
    141 
    142       DLOG(ERROR) << "Failed to patch VTable.";
    143       return E_FAIL;
    144     } else {
    145       // Success, save the stub we created.
    146       it->stub_ = stub;
    147       chrome_frame::PinModule();
    148     }
    149   }
    150 
    151   return S_OK;
    152 }
    153 
    154 HRESULT UnpatchInterfaceMethods(MethodPatchInfo* patches) {
    155   base::AutoLock lock(patch_lock_);
    156 
    157   for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) {
    158     if (it->stub_) {
    159       DCHECK(it->stub_->destination_function() ==
    160           reinterpret_cast<uintptr_t>(it->method_));
    161       // Modify the stub to just jump directly to the original function.
    162       it->stub_->BypassStub(reinterpret_cast<void*>(it->stub_->argument()));
    163       it->stub_ = NULL;
    164       // Leave the stub in memory so that we won't break any possible chains.
    165 
    166       // TODO(siggi): why not restore the original VTBL pointer here, provided
    167       //    we haven't been chained?
    168     } else {
    169       DLOG(WARNING) << "attempt to unpatch a function that wasn't patched";
    170     }
    171   }
    172 
    173   return S_OK;
    174 }
    175 
    176 // Disabled for now as we're not using it atm.
    177 #if 0
    178 
    179 DynamicPatchManager::DynamicPatchManager(const MethodPatchInfo* patch_prototype)
    180     : patch_prototype_(patch_prototype) {
    181   DCHECK(patch_prototype_);
    182   DCHECK(patch_prototype_->stub_ == NULL);
    183 }
    184 
    185 DynamicPatchManager::~DynamicPatchManager() {
    186   UnpatchAll();
    187 }
    188 
    189 HRESULT DynamicPatchManager::PatchObject(void* unknown) {
    190   int patched_methods = 0;
    191   for (; patch_prototype_[patched_methods].index_ != -1; patched_methods++) {
    192     // If you hit this, then you are likely using the prototype instance for
    193     // patching in _addition_ to this class.  This is not a good idea :)
    194     DCHECK(patch_prototype_[patched_methods].stub_ == NULL);
    195   }
    196 
    197   // Prepare a new patch object using the patch info from the prototype.
    198   int mem_size = sizeof(PatchedObject) +
    199                  sizeof(MethodPatchInfo) * patched_methods;
    200   PatchedObject* entry = reinterpret_cast<PatchedObject*>(new char[mem_size]);
    201   entry->vtable_ = GetIFVTable(unknown);
    202   memcpy(entry->patch_info_, patch_prototype_,
    203          sizeof(MethodPatchInfo) * (patched_methods + 1));
    204 
    205   patch_list_lock_.Acquire();
    206 
    207   // See if we've already patched this vtable before.
    208   // The search is done via the == operator of the PatchedObject class.
    209   PatchList::const_iterator it = std::find(patch_list_.begin(),
    210                                            patch_list_.end(), entry);
    211   HRESULT hr;
    212   if (it == patch_list_.end()) {
    213     hr = PatchInterfaceMethods(unknown, entry->patch_info_);
    214     if (SUCCEEDED(hr)) {
    215       patch_list_.push_back(entry);
    216       entry = NULL;  // Ownership transferred to the array.
    217     }
    218   } else {
    219     hr = S_FALSE;
    220   }
    221 
    222   patch_list_lock_.Release();
    223 
    224   delete entry;
    225 
    226   return hr;
    227 }
    228 
    229 bool DynamicPatchManager::UnpatchAll() {
    230   patch_list_lock_.Acquire();
    231   PatchList::iterator it;
    232   for (it = patch_list_.begin(); it != patch_list_.end(); it++) {
    233     UnpatchInterfaceMethods((*it)->patch_info_);
    234     delete (*it);
    235   }
    236   patch_list_.clear();
    237   patch_list_lock_.Release();
    238 
    239   return true;
    240 }
    241 
    242 #endif  // disabled DynamicPatchManager
    243 
    244 }  // namespace vtable_patch
    245