Home | History | Annotate | Download | only in X86
      1 //===-- X86JITInfo.cpp - Implement the JIT interfaces for the X86 target --===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the JIT interfaces for the X86 target.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #define DEBUG_TYPE "jit"
     15 #include "X86JITInfo.h"
     16 #include "X86Relocations.h"
     17 #include "X86Subtarget.h"
     18 #include "X86TargetMachine.h"
     19 #include "llvm/Function.h"
     20 #include "llvm/Support/Compiler.h"
     21 #include "llvm/Support/ErrorHandling.h"
     22 #include "llvm/Support/Valgrind.h"
     23 #include <cstdlib>
     24 #include <cstring>
     25 using namespace llvm;
     26 
     27 // Determine the platform we're running on
     28 #if defined (__x86_64__) || defined (_M_AMD64) || defined (_M_X64)
     29 # define X86_64_JIT
     30 #elif defined(__i386__) || defined(i386) || defined(_M_IX86)
     31 # define X86_32_JIT
     32 #endif
     33 
     34 void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
     35   unsigned char *OldByte = (unsigned char *)Old;
     36   *OldByte++ = 0xE9;                // Emit JMP opcode.
     37   unsigned *OldWord = (unsigned *)OldByte;
     38   unsigned NewAddr = (intptr_t)New;
     39   unsigned OldAddr = (intptr_t)OldWord;
     40   *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.
     41 
     42   // X86 doesn't need to invalidate the processor cache, so just invalidate
     43   // Valgrind's cache directly.
     44   sys::ValgrindDiscardTranslations(Old, 5);
     45 }
     46 
     47 
     48 /// JITCompilerFunction - This contains the address of the JIT function used to
     49 /// compile a function lazily.
     50 static TargetJITInfo::JITCompilerFn JITCompilerFunction;
     51 
     52 // Get the ASMPREFIX for the current host.  This is often '_'.
     53 #ifndef __USER_LABEL_PREFIX__
     54 #define __USER_LABEL_PREFIX__
     55 #endif
     56 #define GETASMPREFIX2(X) #X
     57 #define GETASMPREFIX(X) GETASMPREFIX2(X)
     58 #define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__)
     59 
     60 // For ELF targets, use a .size and .type directive, to let tools
     61 // know the extent of functions defined in assembler.
     62 #if defined(__ELF__)
     63 # define SIZE(sym) ".size " #sym ", . - " #sym "\n"
     64 # define TYPE_FUNCTION(sym) ".type " #sym ", @function\n"
     65 #else
     66 # define SIZE(sym)
     67 # define TYPE_FUNCTION(sym)
     68 #endif
     69 
     70 // Provide a convenient way for disabling usage of CFI directives.
     71 // This is needed for old/broken assemblers (for example, gas on
     72 // Darwin is pretty old and doesn't support these directives)
     73 #if defined(__APPLE__)
     74 # define CFI(x)
     75 #else
     76 // FIXME: Disable this until we really want to use it. Also, we will
     77 //        need to add some workarounds for compilers, which support
     78 //        only subset of these directives.
     79 # define CFI(x)
     80 #endif
     81 
     82 // Provide a wrapper for X86CompilationCallback2 that saves non-traditional
     83 // callee saved registers, for the fastcc calling convention.
     84 extern "C" {
     85 #if defined(X86_64_JIT)
     86 # ifndef _MSC_VER
     87   // No need to save EAX/EDX for X86-64.
     88   void X86CompilationCallback(void);
     89   asm(
     90     ".text\n"
     91     ".align 8\n"
     92     ".globl " ASMPREFIX "X86CompilationCallback\n"
     93     TYPE_FUNCTION(X86CompilationCallback)
     94   ASMPREFIX "X86CompilationCallback:\n"
     95     CFI(".cfi_startproc\n")
     96     // Save RBP
     97     "pushq   %rbp\n"
     98     CFI(".cfi_def_cfa_offset 16\n")
     99     CFI(".cfi_offset %rbp, -16\n")
    100     // Save RSP
    101     "movq    %rsp, %rbp\n"
    102     CFI(".cfi_def_cfa_register %rbp\n")
    103     // Save all int arg registers
    104     "pushq   %rdi\n"
    105     CFI(".cfi_rel_offset %rdi, 0\n")
    106     "pushq   %rsi\n"
    107     CFI(".cfi_rel_offset %rsi, 8\n")
    108     "pushq   %rdx\n"
    109     CFI(".cfi_rel_offset %rdx, 16\n")
    110     "pushq   %rcx\n"
    111     CFI(".cfi_rel_offset %rcx, 24\n")
    112     "pushq   %r8\n"
    113     CFI(".cfi_rel_offset %r8, 32\n")
    114     "pushq   %r9\n"
    115     CFI(".cfi_rel_offset %r9, 40\n")
    116     // Align stack on 16-byte boundary. ESP might not be properly aligned
    117     // (8 byte) if this is called from an indirect stub.
    118     "andq    $-16, %rsp\n"
    119     // Save all XMM arg registers
    120     "subq    $128, %rsp\n"
    121     "movaps  %xmm0, (%rsp)\n"
    122     "movaps  %xmm1, 16(%rsp)\n"
    123     "movaps  %xmm2, 32(%rsp)\n"
    124     "movaps  %xmm3, 48(%rsp)\n"
    125     "movaps  %xmm4, 64(%rsp)\n"
    126     "movaps  %xmm5, 80(%rsp)\n"
    127     "movaps  %xmm6, 96(%rsp)\n"
    128     "movaps  %xmm7, 112(%rsp)\n"
    129     // JIT callee
    130 #ifdef _WIN64
    131     "subq    $32, %rsp\n"
    132     "movq    %rbp, %rcx\n"    // Pass prev frame and return address
    133     "movq    8(%rbp), %rdx\n"
    134     "call    " ASMPREFIX "X86CompilationCallback2\n"
    135     "addq    $32, %rsp\n"
    136 #else
    137     "movq    %rbp, %rdi\n"    // Pass prev frame and return address
    138     "movq    8(%rbp), %rsi\n"
    139     "call    " ASMPREFIX "X86CompilationCallback2\n"
    140 #endif
    141     // Restore all XMM arg registers
    142     "movaps  112(%rsp), %xmm7\n"
    143     "movaps  96(%rsp), %xmm6\n"
    144     "movaps  80(%rsp), %xmm5\n"
    145     "movaps  64(%rsp), %xmm4\n"
    146     "movaps  48(%rsp), %xmm3\n"
    147     "movaps  32(%rsp), %xmm2\n"
    148     "movaps  16(%rsp), %xmm1\n"
    149     "movaps  (%rsp), %xmm0\n"
    150     // Restore RSP
    151     "movq    %rbp, %rsp\n"
    152     CFI(".cfi_def_cfa_register %rsp\n")
    153     // Restore all int arg registers
    154     "subq    $48, %rsp\n"
    155     CFI(".cfi_adjust_cfa_offset 48\n")
    156     "popq    %r9\n"
    157     CFI(".cfi_adjust_cfa_offset -8\n")
    158     CFI(".cfi_restore %r9\n")
    159     "popq    %r8\n"
    160     CFI(".cfi_adjust_cfa_offset -8\n")
    161     CFI(".cfi_restore %r8\n")
    162     "popq    %rcx\n"
    163     CFI(".cfi_adjust_cfa_offset -8\n")
    164     CFI(".cfi_restore %rcx\n")
    165     "popq    %rdx\n"
    166     CFI(".cfi_adjust_cfa_offset -8\n")
    167     CFI(".cfi_restore %rdx\n")
    168     "popq    %rsi\n"
    169     CFI(".cfi_adjust_cfa_offset -8\n")
    170     CFI(".cfi_restore %rsi\n")
    171     "popq    %rdi\n"
    172     CFI(".cfi_adjust_cfa_offset -8\n")
    173     CFI(".cfi_restore %rdi\n")
    174     // Restore RBP
    175     "popq    %rbp\n"
    176     CFI(".cfi_adjust_cfa_offset -8\n")
    177     CFI(".cfi_restore %rbp\n")
    178     "ret\n"
    179     CFI(".cfi_endproc\n")
    180     SIZE(X86CompilationCallback)
    181   );
    182 # else
    183   // No inline assembler support on this platform. The routine is in external
    184   // file.
    185   void X86CompilationCallback();
    186 
    187 # endif
    188 #elif defined (X86_32_JIT)
    189 # ifndef _MSC_VER
    190   void X86CompilationCallback(void);
    191   asm(
    192     ".text\n"
    193     ".align 8\n"
    194     ".globl " ASMPREFIX "X86CompilationCallback\n"
    195     TYPE_FUNCTION(X86CompilationCallback)
    196   ASMPREFIX "X86CompilationCallback:\n"
    197     CFI(".cfi_startproc\n")
    198     "pushl   %ebp\n"
    199     CFI(".cfi_def_cfa_offset 8\n")
    200     CFI(".cfi_offset %ebp, -8\n")
    201     "movl    %esp, %ebp\n"    // Standard prologue
    202     CFI(".cfi_def_cfa_register %ebp\n")
    203     "pushl   %eax\n"
    204     CFI(".cfi_rel_offset %eax, 0\n")
    205     "pushl   %edx\n"          // Save EAX/EDX/ECX
    206     CFI(".cfi_rel_offset %edx, 4\n")
    207     "pushl   %ecx\n"
    208     CFI(".cfi_rel_offset %ecx, 8\n")
    209 #  if defined(__APPLE__)
    210     "andl    $-16, %esp\n"    // Align ESP on 16-byte boundary
    211 #  endif
    212     "subl    $16, %esp\n"
    213     "movl    4(%ebp), %eax\n" // Pass prev frame and return address
    214     "movl    %eax, 4(%esp)\n"
    215     "movl    %ebp, (%esp)\n"
    216     "call    " ASMPREFIX "X86CompilationCallback2\n"
    217     "movl    %ebp, %esp\n"    // Restore ESP
    218     CFI(".cfi_def_cfa_register %esp\n")
    219     "subl    $12, %esp\n"
    220     CFI(".cfi_adjust_cfa_offset 12\n")
    221     "popl    %ecx\n"
    222     CFI(".cfi_adjust_cfa_offset -4\n")
    223     CFI(".cfi_restore %ecx\n")
    224     "popl    %edx\n"
    225     CFI(".cfi_adjust_cfa_offset -4\n")
    226     CFI(".cfi_restore %edx\n")
    227     "popl    %eax\n"
    228     CFI(".cfi_adjust_cfa_offset -4\n")
    229     CFI(".cfi_restore %eax\n")
    230     "popl    %ebp\n"
    231     CFI(".cfi_adjust_cfa_offset -4\n")
    232     CFI(".cfi_restore %ebp\n")
    233     "ret\n"
    234     CFI(".cfi_endproc\n")
    235     SIZE(X86CompilationCallback)
    236   );
    237 
    238   // Same as X86CompilationCallback but also saves XMM argument registers.
    239   void X86CompilationCallback_SSE(void);
    240   asm(
    241     ".text\n"
    242     ".align 8\n"
    243     ".globl " ASMPREFIX "X86CompilationCallback_SSE\n"
    244     TYPE_FUNCTION(X86CompilationCallback_SSE)
    245   ASMPREFIX "X86CompilationCallback_SSE:\n"
    246     CFI(".cfi_startproc\n")
    247     "pushl   %ebp\n"
    248     CFI(".cfi_def_cfa_offset 8\n")
    249     CFI(".cfi_offset %ebp, -8\n")
    250     "movl    %esp, %ebp\n"    // Standard prologue
    251     CFI(".cfi_def_cfa_register %ebp\n")
    252     "pushl   %eax\n"
    253     CFI(".cfi_rel_offset %eax, 0\n")
    254     "pushl   %edx\n"          // Save EAX/EDX/ECX
    255     CFI(".cfi_rel_offset %edx, 4\n")
    256     "pushl   %ecx\n"
    257     CFI(".cfi_rel_offset %ecx, 8\n")
    258     "andl    $-16, %esp\n"    // Align ESP on 16-byte boundary
    259     // Save all XMM arg registers
    260     "subl    $64, %esp\n"
    261     // FIXME: provide frame move information for xmm registers.
    262     // This can be tricky, because CFA register is ebp (unaligned)
    263     // and we need to produce offsets relative to it.
    264     "movaps  %xmm0, (%esp)\n"
    265     "movaps  %xmm1, 16(%esp)\n"
    266     "movaps  %xmm2, 32(%esp)\n"
    267     "movaps  %xmm3, 48(%esp)\n"
    268     "subl    $16, %esp\n"
    269     "movl    4(%ebp), %eax\n" // Pass prev frame and return address
    270     "movl    %eax, 4(%esp)\n"
    271     "movl    %ebp, (%esp)\n"
    272     "call    " ASMPREFIX "X86CompilationCallback2\n"
    273     "addl    $16, %esp\n"
    274     "movaps  48(%esp), %xmm3\n"
    275     CFI(".cfi_restore %xmm3\n")
    276     "movaps  32(%esp), %xmm2\n"
    277     CFI(".cfi_restore %xmm2\n")
    278     "movaps  16(%esp), %xmm1\n"
    279     CFI(".cfi_restore %xmm1\n")
    280     "movaps  (%esp), %xmm0\n"
    281     CFI(".cfi_restore %xmm0\n")
    282     "movl    %ebp, %esp\n"    // Restore ESP
    283     CFI(".cfi_def_cfa_register esp\n")
    284     "subl    $12, %esp\n"
    285     CFI(".cfi_adjust_cfa_offset 12\n")
    286     "popl    %ecx\n"
    287     CFI(".cfi_adjust_cfa_offset -4\n")
    288     CFI(".cfi_restore %ecx\n")
    289     "popl    %edx\n"
    290     CFI(".cfi_adjust_cfa_offset -4\n")
    291     CFI(".cfi_restore %edx\n")
    292     "popl    %eax\n"
    293     CFI(".cfi_adjust_cfa_offset -4\n")
    294     CFI(".cfi_restore %eax\n")
    295     "popl    %ebp\n"
    296     CFI(".cfi_adjust_cfa_offset -4\n")
    297     CFI(".cfi_restore %ebp\n")
    298     "ret\n"
    299     CFI(".cfi_endproc\n")
    300     SIZE(X86CompilationCallback_SSE)
    301   );
    302 # else
    303   void X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr);
    304 
    305   _declspec(naked) void X86CompilationCallback(void) {
    306     __asm {
    307       push  ebp
    308       mov   ebp, esp
    309       push  eax
    310       push  edx
    311       push  ecx
    312       and   esp, -16
    313       sub   esp, 16
    314       mov   eax, dword ptr [ebp+4]
    315       mov   dword ptr [esp+4], eax
    316       mov   dword ptr [esp], ebp
    317       call  X86CompilationCallback2
    318       mov   esp, ebp
    319       sub   esp, 12
    320       pop   ecx
    321       pop   edx
    322       pop   eax
    323       pop   ebp
    324       ret
    325     }
    326   }
    327 
    328 # endif // _MSC_VER
    329 
    330 #else // Not an i386 host
    331   void X86CompilationCallback() {
    332     llvm_unreachable("Cannot call X86CompilationCallback() on a non-x86 arch!");
    333   }
    334 #endif
    335 }
    336 
    337 /// X86CompilationCallback2 - This is the target-specific function invoked by the
    338 /// function stub when we did not know the real target of a call.  This function
    339 /// must locate the start of the stub or call site and pass it into the JIT
    340 /// compiler function.
    341 extern "C" {
    342 void
    343 X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
    344   intptr_t *RetAddrLoc = &StackPtr[1];
    345   assert(*RetAddrLoc == RetAddr &&
    346          "Could not find return address on the stack!");
    347 
    348   // It's a stub if there is an interrupt marker after the call.
    349   bool isStub = ((unsigned char*)RetAddr)[0] == 0xCE;
    350 
    351   // The call instruction should have pushed the return value onto the stack...
    352 #if defined (X86_64_JIT)
    353   RetAddr--;     // Backtrack to the reference itself...
    354 #else
    355   RetAddr -= 4;  // Backtrack to the reference itself...
    356 #endif
    357 
    358 #if 0
    359   DEBUG(dbgs() << "In callback! Addr=" << (void*)RetAddr
    360                << " ESP=" << (void*)StackPtr
    361                << ": Resolving call to function: "
    362                << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n");
    363 #endif
    364 
    365   // Sanity check to make sure this really is a call instruction.
    366 #if defined (X86_64_JIT)
    367   assert(((unsigned char*)RetAddr)[-2] == 0x41 &&"Not a call instr!");
    368   assert(((unsigned char*)RetAddr)[-1] == 0xFF &&"Not a call instr!");
    369 #else
    370   assert(((unsigned char*)RetAddr)[-1] == 0xE8 &&"Not a call instr!");
    371 #endif
    372 
    373   intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)RetAddr);
    374 
    375   // Rewrite the call target... so that we don't end up here every time we
    376   // execute the call.
    377 #if defined (X86_64_JIT)
    378   assert(isStub &&
    379          "X86-64 doesn't support rewriting non-stub lazy compilation calls:"
    380          " the call instruction varies too much.");
    381 #else
    382   *(intptr_t *)RetAddr = (intptr_t)(NewVal-RetAddr-4);
    383 #endif
    384 
    385   if (isStub) {
    386     // If this is a stub, rewrite the call into an unconditional branch
    387     // instruction so that two return addresses are not pushed onto the stack
    388     // when the requested function finally gets called.  This also makes the
    389     // 0xCE byte (interrupt) dead, so the marker doesn't effect anything.
    390 #if defined (X86_64_JIT)
    391     // If the target address is within 32-bit range of the stub, use a
    392     // PC-relative branch instead of loading the actual address.  (This is
    393     // considerably shorter than the 64-bit immediate load already there.)
    394     // We assume here intptr_t is 64 bits.
    395     intptr_t diff = NewVal-RetAddr+7;
    396     if (diff >= -2147483648LL && diff <= 2147483647LL) {
    397       *(unsigned char*)(RetAddr-0xc) = 0xE9;
    398       *(intptr_t *)(RetAddr-0xb) = diff & 0xffffffff;
    399     } else {
    400       *(intptr_t *)(RetAddr - 0xa) = NewVal;
    401       ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
    402     }
    403     sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd);
    404 #else
    405     ((unsigned char*)RetAddr)[-1] = 0xE9;
    406     sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5);
    407 #endif
    408   }
    409 
    410   // Change the return address to reexecute the call instruction...
    411 #if defined (X86_64_JIT)
    412   *RetAddrLoc -= 0xd;
    413 #else
    414   *RetAddrLoc -= 5;
    415 #endif
    416 }
    417 }
    418 
    419 TargetJITInfo::LazyResolverFn
    420 X86JITInfo::getLazyResolverFunction(JITCompilerFn F) {
    421   JITCompilerFunction = F;
    422 
    423 #if defined (X86_32_JIT) && !defined (_MSC_VER)
    424   if (Subtarget->hasSSE1())
    425     return X86CompilationCallback_SSE;
    426 #endif
    427 
    428   return X86CompilationCallback;
    429 }
    430 
    431 X86JITInfo::X86JITInfo(X86TargetMachine &tm) : TM(tm) {
    432   Subtarget = &TM.getSubtarget<X86Subtarget>();
    433   useGOT = 0;
    434   TLSOffset = 0;
    435 }
    436 
    437 void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
    438                                              JITCodeEmitter &JCE) {
    439 #if defined (X86_64_JIT)
    440   const unsigned Alignment = 8;
    441   uint8_t Buffer[8];
    442   uint8_t *Cur = Buffer;
    443   MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(intptr_t)ptr);
    444   MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(((intptr_t)ptr) >> 32));
    445 #else
    446   const unsigned Alignment = 4;
    447   uint8_t Buffer[4];
    448   uint8_t *Cur = Buffer;
    449   MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)ptr);
    450 #endif
    451   return JCE.allocIndirectGV(GV, Buffer, sizeof(Buffer), Alignment);
    452 }
    453 
    454 TargetJITInfo::StubLayout X86JITInfo::getStubLayout() {
    455   // The 64-bit stub contains:
    456   //   movabs r10 <- 8-byte-target-address  # 10 bytes
    457   //   call|jmp *r10  # 3 bytes
    458   // The 32-bit stub contains a 5-byte call|jmp.
    459   // If the stub is a call to the compilation callback, an extra byte is added
    460   // to mark it as a stub.
    461   StubLayout Result = {14, 4};
    462   return Result;
    463 }
    464 
    465 void *X86JITInfo::emitFunctionStub(const Function* F, void *Target,
    466                                    JITCodeEmitter &JCE) {
    467   // Note, we cast to intptr_t here to silence a -pedantic warning that
    468   // complains about casting a function pointer to a normal pointer.
    469 #if defined (X86_32_JIT) && !defined (_MSC_VER)
    470   bool NotCC = (Target != (void*)(intptr_t)X86CompilationCallback &&
    471                 Target != (void*)(intptr_t)X86CompilationCallback_SSE);
    472 #else
    473   bool NotCC = Target != (void*)(intptr_t)X86CompilationCallback;
    474 #endif
    475   JCE.emitAlignment(4);
    476   void *Result = (void*)JCE.getCurrentPCValue();
    477   if (NotCC) {
    478 #if defined (X86_64_JIT)
    479     JCE.emitByte(0x49);          // REX prefix
    480     JCE.emitByte(0xB8+2);        // movabsq r10
    481     JCE.emitWordLE((unsigned)(intptr_t)Target);
    482     JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32));
    483     JCE.emitByte(0x41);          // REX prefix
    484     JCE.emitByte(0xFF);          // jmpq *r10
    485     JCE.emitByte(2 | (4 << 3) | (3 << 6));
    486 #else
    487     JCE.emitByte(0xE9);
    488     JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4);
    489 #endif
    490     return Result;
    491   }
    492 
    493 #if defined (X86_64_JIT)
    494   JCE.emitByte(0x49);          // REX prefix
    495   JCE.emitByte(0xB8+2);        // movabsq r10
    496   JCE.emitWordLE((unsigned)(intptr_t)Target);
    497   JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32));
    498   JCE.emitByte(0x41);          // REX prefix
    499   JCE.emitByte(0xFF);          // callq *r10
    500   JCE.emitByte(2 | (2 << 3) | (3 << 6));
    501 #else
    502   JCE.emitByte(0xE8);   // Call with 32 bit pc-rel destination...
    503 
    504   JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4);
    505 #endif
    506 
    507   // This used to use 0xCD, but that value is used by JITMemoryManager to
    508   // initialize the buffer with garbage, which means it may follow a
    509   // noreturn function call, confusing X86CompilationCallback2.  PR 4929.
    510   JCE.emitByte(0xCE);   // Interrupt - Just a marker identifying the stub!
    511   return Result;
    512 }
    513 
    514 /// getPICJumpTableEntry - Returns the value of the jumptable entry for the
    515 /// specific basic block.
    516 uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) {
    517 #if defined(X86_64_JIT)
    518   return BB - Entry;
    519 #else
    520   return BB - PICBase;
    521 #endif
    522 }
    523 
    524 /// relocate - Before the JIT can run a block of code that has been emitted,
    525 /// it must rewrite the code to contain the actual addresses of any
    526 /// referenced global symbols.
    527 void X86JITInfo::relocate(void *Function, MachineRelocation *MR,
    528                           unsigned NumRelocs, unsigned char* GOTBase) {
    529   for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
    530     void *RelocPos = (char*)Function + MR->getMachineCodeOffset();
    531     intptr_t ResultPtr = (intptr_t)MR->getResultPointer();
    532     switch ((X86::RelocationType)MR->getRelocationType()) {
    533     case X86::reloc_pcrel_word: {
    534       // PC relative relocation, add the relocated value to the value already in
    535       // memory, after we adjust it for where the PC is.
    536       ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal();
    537       *((unsigned*)RelocPos) += (unsigned)ResultPtr;
    538       break;
    539     }
    540     case X86::reloc_picrel_word: {
    541       // PIC base relative relocation, add the relocated value to the value
    542       // already in memory, after we adjust it for where the PIC base is.
    543       ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal());
    544       *((unsigned*)RelocPos) += (unsigned)ResultPtr;
    545       break;
    546     }
    547     case X86::reloc_absolute_word:
    548     case X86::reloc_absolute_word_sext:
    549       // Absolute relocation, just add the relocated value to the value already
    550       // in memory.
    551       *((unsigned*)RelocPos) += (unsigned)ResultPtr;
    552       break;
    553     case X86::reloc_absolute_dword:
    554       *((intptr_t*)RelocPos) += ResultPtr;
    555       break;
    556     }
    557   }
    558 }
    559 
    560 char* X86JITInfo::allocateThreadLocalMemory(size_t size) {
    561 #if defined(X86_32_JIT) && !defined(__APPLE__) && !defined(_MSC_VER)
    562   TLSOffset -= size;
    563   return TLSOffset;
    564 #else
    565   llvm_unreachable("Cannot allocate thread local storage on this arch!");
    566   return 0;
    567 #endif
    568 }
    569