1 //===-- X86JITInfo.cpp - Implement the JIT interfaces for the X86 target --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the JIT interfaces for the X86 target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "X86JITInfo.h" 15 #include "X86Relocations.h" 16 #include "X86Subtarget.h" 17 #include "X86TargetMachine.h" 18 #include "llvm/IR/Function.h" 19 #include "llvm/Support/Compiler.h" 20 #include "llvm/Support/ErrorHandling.h" 21 #include "llvm/Support/Valgrind.h" 22 #include <cstdlib> 23 #include <cstring> 24 using namespace llvm; 25 26 #define DEBUG_TYPE "jit" 27 28 // Determine the platform we're running on 29 #if defined (__x86_64__) || defined (_M_AMD64) || defined (_M_X64) 30 # define X86_64_JIT 31 #elif defined(__i386__) || defined(i386) || defined(_M_IX86) 32 # define X86_32_JIT 33 #endif 34 35 void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) { 36 unsigned char *OldByte = (unsigned char *)Old; 37 *OldByte++ = 0xE9; // Emit JMP opcode. 38 unsigned *OldWord = (unsigned *)OldByte; 39 unsigned NewAddr = (intptr_t)New; 40 unsigned OldAddr = (intptr_t)OldWord; 41 *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code. 42 43 // X86 doesn't need to invalidate the processor cache, so just invalidate 44 // Valgrind's cache directly. 45 sys::ValgrindDiscardTranslations(Old, 5); 46 } 47 48 49 /// JITCompilerFunction - This contains the address of the JIT function used to 50 /// compile a function lazily. 51 static TargetJITInfo::JITCompilerFn JITCompilerFunction; 52 53 // Get the ASMPREFIX for the current host. This is often '_'. 54 #ifndef __USER_LABEL_PREFIX__ 55 #define __USER_LABEL_PREFIX__ 56 #endif 57 #define GETASMPREFIX2(X) #X 58 #define GETASMPREFIX(X) GETASMPREFIX2(X) 59 #define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__) 60 61 // For ELF targets, use a .size and .type directive, to let tools 62 // know the extent of functions defined in assembler. 63 #if defined(__ELF__) 64 # define SIZE(sym) ".size " #sym ", . - " #sym "\n" 65 # define TYPE_FUNCTION(sym) ".type " #sym ", @function\n" 66 #else 67 # define SIZE(sym) 68 # define TYPE_FUNCTION(sym) 69 #endif 70 71 // Provide a convenient way for disabling usage of CFI directives. 72 // This is needed for old/broken assemblers (for example, gas on 73 // Darwin is pretty old and doesn't support these directives) 74 #if defined(__APPLE__) 75 # define CFI(x) 76 #else 77 // FIXME: Disable this until we really want to use it. Also, we will 78 // need to add some workarounds for compilers, which support 79 // only subset of these directives. 80 # define CFI(x) 81 #endif 82 83 // Provide a wrapper for LLVMX86CompilationCallback2 that saves non-traditional 84 // callee saved registers, for the fastcc calling convention. 85 extern "C" { 86 #if defined(X86_64_JIT) 87 # ifndef _MSC_VER 88 // No need to save EAX/EDX for X86-64. 89 void X86CompilationCallback(void); 90 asm( 91 ".text\n" 92 ".align 8\n" 93 ".globl " ASMPREFIX "X86CompilationCallback\n" 94 TYPE_FUNCTION(X86CompilationCallback) 95 ASMPREFIX "X86CompilationCallback:\n" 96 CFI(".cfi_startproc\n") 97 // Save RBP 98 "pushq %rbp\n" 99 CFI(".cfi_def_cfa_offset 16\n") 100 CFI(".cfi_offset %rbp, -16\n") 101 // Save RSP 102 "movq %rsp, %rbp\n" 103 CFI(".cfi_def_cfa_register %rbp\n") 104 // Save all int arg registers 105 "pushq %rdi\n" 106 CFI(".cfi_rel_offset %rdi, 0\n") 107 "pushq %rsi\n" 108 CFI(".cfi_rel_offset %rsi, 8\n") 109 "pushq %rdx\n" 110 CFI(".cfi_rel_offset %rdx, 16\n") 111 "pushq %rcx\n" 112 CFI(".cfi_rel_offset %rcx, 24\n") 113 "pushq %r8\n" 114 CFI(".cfi_rel_offset %r8, 32\n") 115 "pushq %r9\n" 116 CFI(".cfi_rel_offset %r9, 40\n") 117 // Align stack on 16-byte boundary. ESP might not be properly aligned 118 // (8 byte) if this is called from an indirect stub. 119 "andq $-16, %rsp\n" 120 // Save all XMM arg registers 121 "subq $128, %rsp\n" 122 "movaps %xmm0, (%rsp)\n" 123 "movaps %xmm1, 16(%rsp)\n" 124 "movaps %xmm2, 32(%rsp)\n" 125 "movaps %xmm3, 48(%rsp)\n" 126 "movaps %xmm4, 64(%rsp)\n" 127 "movaps %xmm5, 80(%rsp)\n" 128 "movaps %xmm6, 96(%rsp)\n" 129 "movaps %xmm7, 112(%rsp)\n" 130 // JIT callee 131 #if defined(_WIN64) || defined(__CYGWIN__) 132 "subq $32, %rsp\n" 133 "movq %rbp, %rcx\n" // Pass prev frame and return address 134 "movq 8(%rbp), %rdx\n" 135 "call " ASMPREFIX "LLVMX86CompilationCallback2\n" 136 "addq $32, %rsp\n" 137 #else 138 "movq %rbp, %rdi\n" // Pass prev frame and return address 139 "movq 8(%rbp), %rsi\n" 140 "call " ASMPREFIX "LLVMX86CompilationCallback2\n" 141 #endif 142 // Restore all XMM arg registers 143 "movaps 112(%rsp), %xmm7\n" 144 "movaps 96(%rsp), %xmm6\n" 145 "movaps 80(%rsp), %xmm5\n" 146 "movaps 64(%rsp), %xmm4\n" 147 "movaps 48(%rsp), %xmm3\n" 148 "movaps 32(%rsp), %xmm2\n" 149 "movaps 16(%rsp), %xmm1\n" 150 "movaps (%rsp), %xmm0\n" 151 // Restore RSP 152 "movq %rbp, %rsp\n" 153 CFI(".cfi_def_cfa_register %rsp\n") 154 // Restore all int arg registers 155 "subq $48, %rsp\n" 156 CFI(".cfi_adjust_cfa_offset 48\n") 157 "popq %r9\n" 158 CFI(".cfi_adjust_cfa_offset -8\n") 159 CFI(".cfi_restore %r9\n") 160 "popq %r8\n" 161 CFI(".cfi_adjust_cfa_offset -8\n") 162 CFI(".cfi_restore %r8\n") 163 "popq %rcx\n" 164 CFI(".cfi_adjust_cfa_offset -8\n") 165 CFI(".cfi_restore %rcx\n") 166 "popq %rdx\n" 167 CFI(".cfi_adjust_cfa_offset -8\n") 168 CFI(".cfi_restore %rdx\n") 169 "popq %rsi\n" 170 CFI(".cfi_adjust_cfa_offset -8\n") 171 CFI(".cfi_restore %rsi\n") 172 "popq %rdi\n" 173 CFI(".cfi_adjust_cfa_offset -8\n") 174 CFI(".cfi_restore %rdi\n") 175 // Restore RBP 176 "popq %rbp\n" 177 CFI(".cfi_adjust_cfa_offset -8\n") 178 CFI(".cfi_restore %rbp\n") 179 "ret\n" 180 CFI(".cfi_endproc\n") 181 SIZE(X86CompilationCallback) 182 ); 183 # else 184 // No inline assembler support on this platform. The routine is in external 185 // file. 186 void X86CompilationCallback(); 187 188 # endif 189 #elif defined (X86_32_JIT) 190 # ifndef _MSC_VER 191 void X86CompilationCallback(void); 192 asm( 193 ".text\n" 194 ".align 8\n" 195 ".globl " ASMPREFIX "X86CompilationCallback\n" 196 TYPE_FUNCTION(X86CompilationCallback) 197 ASMPREFIX "X86CompilationCallback:\n" 198 CFI(".cfi_startproc\n") 199 "pushl %ebp\n" 200 CFI(".cfi_def_cfa_offset 8\n") 201 CFI(".cfi_offset %ebp, -8\n") 202 "movl %esp, %ebp\n" // Standard prologue 203 CFI(".cfi_def_cfa_register %ebp\n") 204 "pushl %eax\n" 205 CFI(".cfi_rel_offset %eax, 0\n") 206 "pushl %edx\n" // Save EAX/EDX/ECX 207 CFI(".cfi_rel_offset %edx, 4\n") 208 "pushl %ecx\n" 209 CFI(".cfi_rel_offset %ecx, 8\n") 210 # if defined(__APPLE__) 211 "andl $-16, %esp\n" // Align ESP on 16-byte boundary 212 # endif 213 "subl $16, %esp\n" 214 "movl 4(%ebp), %eax\n" // Pass prev frame and return address 215 "movl %eax, 4(%esp)\n" 216 "movl %ebp, (%esp)\n" 217 "call " ASMPREFIX "LLVMX86CompilationCallback2\n" 218 "movl %ebp, %esp\n" // Restore ESP 219 CFI(".cfi_def_cfa_register %esp\n") 220 "subl $12, %esp\n" 221 CFI(".cfi_adjust_cfa_offset 12\n") 222 "popl %ecx\n" 223 CFI(".cfi_adjust_cfa_offset -4\n") 224 CFI(".cfi_restore %ecx\n") 225 "popl %edx\n" 226 CFI(".cfi_adjust_cfa_offset -4\n") 227 CFI(".cfi_restore %edx\n") 228 "popl %eax\n" 229 CFI(".cfi_adjust_cfa_offset -4\n") 230 CFI(".cfi_restore %eax\n") 231 "popl %ebp\n" 232 CFI(".cfi_adjust_cfa_offset -4\n") 233 CFI(".cfi_restore %ebp\n") 234 "ret\n" 235 CFI(".cfi_endproc\n") 236 SIZE(X86CompilationCallback) 237 ); 238 239 // Same as X86CompilationCallback but also saves XMM argument registers. 240 void X86CompilationCallback_SSE(void); 241 asm( 242 ".text\n" 243 ".align 8\n" 244 ".globl " ASMPREFIX "X86CompilationCallback_SSE\n" 245 TYPE_FUNCTION(X86CompilationCallback_SSE) 246 ASMPREFIX "X86CompilationCallback_SSE:\n" 247 CFI(".cfi_startproc\n") 248 "pushl %ebp\n" 249 CFI(".cfi_def_cfa_offset 8\n") 250 CFI(".cfi_offset %ebp, -8\n") 251 "movl %esp, %ebp\n" // Standard prologue 252 CFI(".cfi_def_cfa_register %ebp\n") 253 "pushl %eax\n" 254 CFI(".cfi_rel_offset %eax, 0\n") 255 "pushl %edx\n" // Save EAX/EDX/ECX 256 CFI(".cfi_rel_offset %edx, 4\n") 257 "pushl %ecx\n" 258 CFI(".cfi_rel_offset %ecx, 8\n") 259 "andl $-16, %esp\n" // Align ESP on 16-byte boundary 260 // Save all XMM arg registers 261 "subl $64, %esp\n" 262 // FIXME: provide frame move information for xmm registers. 263 // This can be tricky, because CFA register is ebp (unaligned) 264 // and we need to produce offsets relative to it. 265 "movaps %xmm0, (%esp)\n" 266 "movaps %xmm1, 16(%esp)\n" 267 "movaps %xmm2, 32(%esp)\n" 268 "movaps %xmm3, 48(%esp)\n" 269 "subl $16, %esp\n" 270 "movl 4(%ebp), %eax\n" // Pass prev frame and return address 271 "movl %eax, 4(%esp)\n" 272 "movl %ebp, (%esp)\n" 273 "call " ASMPREFIX "LLVMX86CompilationCallback2\n" 274 "addl $16, %esp\n" 275 "movaps 48(%esp), %xmm3\n" 276 CFI(".cfi_restore %xmm3\n") 277 "movaps 32(%esp), %xmm2\n" 278 CFI(".cfi_restore %xmm2\n") 279 "movaps 16(%esp), %xmm1\n" 280 CFI(".cfi_restore %xmm1\n") 281 "movaps (%esp), %xmm0\n" 282 CFI(".cfi_restore %xmm0\n") 283 "movl %ebp, %esp\n" // Restore ESP 284 CFI(".cfi_def_cfa_register esp\n") 285 "subl $12, %esp\n" 286 CFI(".cfi_adjust_cfa_offset 12\n") 287 "popl %ecx\n" 288 CFI(".cfi_adjust_cfa_offset -4\n") 289 CFI(".cfi_restore %ecx\n") 290 "popl %edx\n" 291 CFI(".cfi_adjust_cfa_offset -4\n") 292 CFI(".cfi_restore %edx\n") 293 "popl %eax\n" 294 CFI(".cfi_adjust_cfa_offset -4\n") 295 CFI(".cfi_restore %eax\n") 296 "popl %ebp\n" 297 CFI(".cfi_adjust_cfa_offset -4\n") 298 CFI(".cfi_restore %ebp\n") 299 "ret\n" 300 CFI(".cfi_endproc\n") 301 SIZE(X86CompilationCallback_SSE) 302 ); 303 # else 304 void LLVMX86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr); 305 306 _declspec(naked) void X86CompilationCallback(void) { 307 __asm { 308 push ebp 309 mov ebp, esp 310 push eax 311 push edx 312 push ecx 313 and esp, -16 314 sub esp, 16 315 mov eax, dword ptr [ebp+4] 316 mov dword ptr [esp+4], eax 317 mov dword ptr [esp], ebp 318 call LLVMX86CompilationCallback2 319 mov esp, ebp 320 sub esp, 12 321 pop ecx 322 pop edx 323 pop eax 324 pop ebp 325 ret 326 } 327 } 328 329 # endif // _MSC_VER 330 331 #else // Not an i386 host 332 void X86CompilationCallback() { 333 llvm_unreachable("Cannot call X86CompilationCallback() on a non-x86 arch!"); 334 } 335 #endif 336 } 337 338 /// This is the target-specific function invoked by the 339 /// function stub when we did not know the real target of a call. This function 340 /// must locate the start of the stub or call site and pass it into the JIT 341 /// compiler function. 342 extern "C" { 343 LLVM_ATTRIBUTE_USED // Referenced from inline asm. 344 LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr, 345 intptr_t RetAddr) { 346 intptr_t *RetAddrLoc = &StackPtr[1]; 347 // We are reading raw stack data here. Tell MemorySanitizer that it is 348 // sufficiently initialized. 349 __msan_unpoison(RetAddrLoc, sizeof(*RetAddrLoc)); 350 assert(*RetAddrLoc == RetAddr && 351 "Could not find return address on the stack!"); 352 353 // It's a stub if there is an interrupt marker after the call. 354 bool isStub = ((unsigned char*)RetAddr)[0] == 0xCE; 355 356 // The call instruction should have pushed the return value onto the stack... 357 #if defined (X86_64_JIT) 358 RetAddr--; // Backtrack to the reference itself... 359 #else 360 RetAddr -= 4; // Backtrack to the reference itself... 361 #endif 362 363 #if 0 364 DEBUG(dbgs() << "In callback! Addr=" << (void*)RetAddr 365 << " ESP=" << (void*)StackPtr 366 << ": Resolving call to function: " 367 << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n"); 368 #endif 369 370 // Sanity check to make sure this really is a call instruction. 371 #if defined (X86_64_JIT) 372 assert(((unsigned char*)RetAddr)[-2] == 0x41 &&"Not a call instr!"); 373 assert(((unsigned char*)RetAddr)[-1] == 0xFF &&"Not a call instr!"); 374 #else 375 assert(((unsigned char*)RetAddr)[-1] == 0xE8 &&"Not a call instr!"); 376 #endif 377 378 intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)RetAddr); 379 380 // Rewrite the call target... so that we don't end up here every time we 381 // execute the call. 382 #if defined (X86_64_JIT) 383 assert(isStub && 384 "X86-64 doesn't support rewriting non-stub lazy compilation calls:" 385 " the call instruction varies too much."); 386 #else 387 *(intptr_t *)RetAddr = (intptr_t)(NewVal-RetAddr-4); 388 #endif 389 390 if (isStub) { 391 // If this is a stub, rewrite the call into an unconditional branch 392 // instruction so that two return addresses are not pushed onto the stack 393 // when the requested function finally gets called. This also makes the 394 // 0xCE byte (interrupt) dead, so the marker doesn't effect anything. 395 #if defined (X86_64_JIT) 396 // If the target address is within 32-bit range of the stub, use a 397 // PC-relative branch instead of loading the actual address. (This is 398 // considerably shorter than the 64-bit immediate load already there.) 399 // We assume here intptr_t is 64 bits. 400 intptr_t diff = NewVal-RetAddr+7; 401 if (diff >= -2147483648LL && diff <= 2147483647LL) { 402 *(unsigned char*)(RetAddr-0xc) = 0xE9; 403 *(intptr_t *)(RetAddr-0xb) = diff & 0xffffffff; 404 } else { 405 *(intptr_t *)(RetAddr - 0xa) = NewVal; 406 ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6)); 407 } 408 sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd); 409 #else 410 ((unsigned char*)RetAddr)[-1] = 0xE9; 411 sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5); 412 #endif 413 } 414 415 // Change the return address to reexecute the call instruction... 416 #if defined (X86_64_JIT) 417 *RetAddrLoc -= 0xd; 418 #else 419 *RetAddrLoc -= 5; 420 #endif 421 } 422 } 423 424 TargetJITInfo::LazyResolverFn 425 X86JITInfo::getLazyResolverFunction(JITCompilerFn F) { 426 TsanIgnoreWritesBegin(); 427 JITCompilerFunction = F; 428 TsanIgnoreWritesEnd(); 429 430 #if defined (X86_32_JIT) && !defined (_MSC_VER) 431 #if defined(__SSE__) 432 // SSE Callback should be called for SSE-enabled LLVM. 433 return X86CompilationCallback_SSE; 434 #else 435 if (useSSE) 436 return X86CompilationCallback_SSE; 437 #endif 438 #endif 439 440 return X86CompilationCallback; 441 } 442 443 X86JITInfo::X86JITInfo(bool UseSSE) { 444 useSSE = UseSSE; 445 useGOT = 0; 446 TLSOffset = nullptr; 447 } 448 449 void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, 450 JITCodeEmitter &JCE) { 451 #if defined (X86_64_JIT) 452 const unsigned Alignment = 8; 453 uint8_t Buffer[8]; 454 uint8_t *Cur = Buffer; 455 MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(intptr_t)ptr); 456 MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(((intptr_t)ptr) >> 32)); 457 #else 458 const unsigned Alignment = 4; 459 uint8_t Buffer[4]; 460 uint8_t *Cur = Buffer; 461 MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)ptr); 462 #endif 463 return JCE.allocIndirectGV(GV, Buffer, sizeof(Buffer), Alignment); 464 } 465 466 TargetJITInfo::StubLayout X86JITInfo::getStubLayout() { 467 // The 64-bit stub contains: 468 // movabs r10 <- 8-byte-target-address # 10 bytes 469 // call|jmp *r10 # 3 bytes 470 // The 32-bit stub contains a 5-byte call|jmp. 471 // If the stub is a call to the compilation callback, an extra byte is added 472 // to mark it as a stub. 473 StubLayout Result = {14, 4}; 474 return Result; 475 } 476 477 void *X86JITInfo::emitFunctionStub(const Function* F, void *Target, 478 JITCodeEmitter &JCE) { 479 // Note, we cast to intptr_t here to silence a -pedantic warning that 480 // complains about casting a function pointer to a normal pointer. 481 #if defined (X86_32_JIT) && !defined (_MSC_VER) 482 bool NotCC = (Target != (void*)(intptr_t)X86CompilationCallback && 483 Target != (void*)(intptr_t)X86CompilationCallback_SSE); 484 #else 485 bool NotCC = Target != (void*)(intptr_t)X86CompilationCallback; 486 #endif 487 JCE.emitAlignment(4); 488 void *Result = (void*)JCE.getCurrentPCValue(); 489 if (NotCC) { 490 #if defined (X86_64_JIT) 491 JCE.emitByte(0x49); // REX prefix 492 JCE.emitByte(0xB8+2); // movabsq r10 493 JCE.emitWordLE((unsigned)(intptr_t)Target); 494 JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32)); 495 JCE.emitByte(0x41); // REX prefix 496 JCE.emitByte(0xFF); // jmpq *r10 497 JCE.emitByte(2 | (4 << 3) | (3 << 6)); 498 #else 499 JCE.emitByte(0xE9); 500 JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4); 501 #endif 502 return Result; 503 } 504 505 #if defined (X86_64_JIT) 506 JCE.emitByte(0x49); // REX prefix 507 JCE.emitByte(0xB8+2); // movabsq r10 508 JCE.emitWordLE((unsigned)(intptr_t)Target); 509 JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32)); 510 JCE.emitByte(0x41); // REX prefix 511 JCE.emitByte(0xFF); // callq *r10 512 JCE.emitByte(2 | (2 << 3) | (3 << 6)); 513 #else 514 JCE.emitByte(0xE8); // Call with 32 bit pc-rel destination... 515 516 JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4); 517 #endif 518 519 // This used to use 0xCD, but that value is used by JITMemoryManager to 520 // initialize the buffer with garbage, which means it may follow a 521 // noreturn function call, confusing LLVMX86CompilationCallback2. PR 4929. 522 JCE.emitByte(0xCE); // Interrupt - Just a marker identifying the stub! 523 return Result; 524 } 525 526 /// getPICJumpTableEntry - Returns the value of the jumptable entry for the 527 /// specific basic block. 528 uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) { 529 #if defined(X86_64_JIT) 530 return BB - Entry; 531 #else 532 return BB - PICBase; 533 #endif 534 } 535 536 template<typename T> static void addUnaligned(void *Pos, T Delta) { 537 T Value; 538 std::memcpy(reinterpret_cast<char*>(&Value), reinterpret_cast<char*>(Pos), 539 sizeof(T)); 540 Value += Delta; 541 std::memcpy(reinterpret_cast<char*>(Pos), reinterpret_cast<char*>(&Value), 542 sizeof(T)); 543 } 544 545 /// relocate - Before the JIT can run a block of code that has been emitted, 546 /// it must rewrite the code to contain the actual addresses of any 547 /// referenced global symbols. 548 void X86JITInfo::relocate(void *Function, MachineRelocation *MR, 549 unsigned NumRelocs, unsigned char* GOTBase) { 550 for (unsigned i = 0; i != NumRelocs; ++i, ++MR) { 551 void *RelocPos = (char*)Function + MR->getMachineCodeOffset(); 552 intptr_t ResultPtr = (intptr_t)MR->getResultPointer(); 553 switch ((X86::RelocationType)MR->getRelocationType()) { 554 case X86::reloc_pcrel_word: { 555 // PC relative relocation, add the relocated value to the value already in 556 // memory, after we adjust it for where the PC is. 557 ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal(); 558 addUnaligned<unsigned>(RelocPos, ResultPtr); 559 break; 560 } 561 case X86::reloc_picrel_word: { 562 // PIC base relative relocation, add the relocated value to the value 563 // already in memory, after we adjust it for where the PIC base is. 564 ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal()); 565 addUnaligned<unsigned>(RelocPos, ResultPtr); 566 break; 567 } 568 case X86::reloc_absolute_word: 569 case X86::reloc_absolute_word_sext: 570 // Absolute relocation, just add the relocated value to the value already 571 // in memory. 572 addUnaligned<unsigned>(RelocPos, ResultPtr); 573 break; 574 case X86::reloc_absolute_dword: 575 addUnaligned<intptr_t>(RelocPos, ResultPtr); 576 break; 577 } 578 } 579 } 580 581 char* X86JITInfo::allocateThreadLocalMemory(size_t size) { 582 #if defined(X86_32_JIT) && !defined(__APPLE__) && !defined(_MSC_VER) 583 TLSOffset -= size; 584 return TLSOffset; 585 #else 586 llvm_unreachable("Cannot allocate thread local storage on this arch!"); 587 #endif 588 } 589