1 //===-- CodeEmitter.cpp - CodeEmitter Class -------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See external/llvm/LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the CodeEmitter class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define LOG_TAG "bcc" 15 #include <cutils/log.h> 16 17 #include "CodeEmitter.h" 18 19 #include "Config.h" 20 21 #if DEBUG_OLD_JIT_DISASSEMBLER 22 #include "Disassembler/Disassembler.h" 23 #endif 24 25 #include "CodeMemoryManager.h" 26 #include "ExecutionEngine/Runtime.h" 27 #include "ExecutionEngine/ScriptCompiled.h" 28 29 #include <bcc/bcc.h> 30 #include <bcc/bcc_cache.h> 31 #include "ExecutionEngine/bcc_internal.h" 32 33 #include "llvm/ADT/APFloat.h" 34 #include "llvm/ADT/APInt.h" 35 #include "llvm/ADT/DenseMap.h" 36 #include "llvm/ADT/SmallVector.h" 37 #include "llvm/ADT/StringRef.h" 38 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineConstantPool.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineModuleInfo.h" 43 #include "llvm/CodeGen/MachineRelocation.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/JITCodeEmitter.h" 46 47 #include "llvm/ExecutionEngine/GenericValue.h" 48 49 #include "llvm/Support/ErrorHandling.h" 50 #include "llvm/Support/raw_ostream.h" 51 52 #include "llvm/Support/Host.h" 53 54 #include "llvm/Target/TargetData.h" 55 #include "llvm/Target/TargetMachine.h" 56 #include "llvm/Target/TargetRegistry.h" 57 #include "llvm/Target/TargetJITInfo.h" 58 59 #include "llvm/Constant.h" 60 #include "llvm/Constants.h" 61 #include "llvm/DerivedTypes.h" 62 #include "llvm/Function.h" 63 #include "llvm/GlobalAlias.h" 64 #include "llvm/GlobalValue.h" 65 #include "llvm/GlobalVariable.h" 66 #include "llvm/Instruction.h" 67 #include "llvm/Type.h" 68 69 #include <algorithm> 70 #include <vector> 71 #include <set> 72 #include <string> 73 74 #include <stddef.h> 75 76 77 namespace bcc { 78 79 // Will take the ownership of @MemMgr 80 CodeEmitter::CodeEmitter(ScriptCompiled *result, CodeMemoryManager *pMemMgr) 81 : mpResult(result), 82 mpMemMgr(pMemMgr), 83 mpTarget(NULL), 84 mpTJI(NULL), 85 mpTD(NULL), 86 mpCurEmitFunction(NULL), 87 mpConstantPool(NULL), 88 mpJumpTable(NULL), 89 mpMMI(NULL), 90 mpSymbolLookupFn(NULL), 91 mpSymbolLookupContext(NULL) { 92 } 93 94 95 CodeEmitter::~CodeEmitter() { 96 } 97 98 99 // Once you finish the compilation on a translation unit, you can call this 100 // function to recycle the memory (which is used at compilation time and not 101 // needed for runtime). 102 // 103 // NOTE: You should not call this funtion until the code-gen passes for a 104 // given module is done. Otherwise, the results is undefined and may 105 // cause the system crash! 106 void CodeEmitter::releaseUnnecessary() { 107 mMBBLocations.clear(); 108 mLabelLocations.clear(); 109 mGlobalAddressMap.clear(); 110 mFunctionToLazyStubMap.clear(); 111 GlobalToIndirectSymMap.clear(); 112 ExternalFnToStubMap.clear(); 113 PendingFunctions.clear(); 114 } 115 116 117 void CodeEmitter::reset() { 118 releaseUnnecessary(); 119 120 mpResult = NULL; 121 122 mpSymbolLookupFn = NULL; 123 mpSymbolLookupContext = NULL; 124 125 mpTJI = NULL; 126 mpTD = NULL; 127 128 mpMemMgr->reset(); 129 } 130 131 132 void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) { 133 if (Addr == NULL) { 134 // Removing mapping 135 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV); 136 void *OldVal; 137 138 if (I == mGlobalAddressMap.end()) { 139 OldVal = NULL; 140 } else { 141 OldVal = I->second; 142 mGlobalAddressMap.erase(I); 143 } 144 145 return OldVal; 146 } 147 148 void *&CurVal = mGlobalAddressMap[GV]; 149 void *OldVal = CurVal; 150 151 CurVal = Addr; 152 153 return OldVal; 154 } 155 156 157 unsigned int CodeEmitter::GetConstantPoolSizeInBytes( 158 llvm::MachineConstantPool *MCP) { 159 const std::vector<llvm::MachineConstantPoolEntry> &Constants = 160 MCP->getConstants(); 161 162 if (Constants.empty()) 163 return 0; 164 165 unsigned int Size = 0; 166 for (int i = 0, e = Constants.size(); i != e; i++) { 167 llvm::MachineConstantPoolEntry CPE = Constants[i]; 168 unsigned int AlignMask = CPE.getAlignment() - 1; 169 Size = (Size + AlignMask) & ~AlignMask; 170 llvm::Type *Ty = CPE.getType(); 171 Size += mpTD->getTypeAllocSize(Ty); 172 } 173 174 return Size; 175 } 176 177 // This function converts a Constant* into a GenericValue. The interesting 178 // part is if C is a ConstantExpr. 179 void CodeEmitter::GetConstantValue(const llvm::Constant *C, 180 llvm::GenericValue &Result) { 181 if (C->getValueID() == llvm::Value::UndefValueVal) 182 return; 183 else if (C->getValueID() == llvm::Value::ConstantExprVal) { 184 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C; 185 const llvm::Constant *Op0 = CE->getOperand(0); 186 187 switch (CE->getOpcode()) { 188 case llvm::Instruction::GetElementPtr: { 189 // Compute the index 190 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1, 191 CE->op_end()); 192 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(), Indices); 193 194 GetConstantValue(Op0, Result); 195 Result.PointerVal = 196 static_cast<uint8_t*>(Result.PointerVal) + Offset; 197 198 return; 199 } 200 case llvm::Instruction::Trunc: { 201 uint32_t BitWidth = 202 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth(); 203 204 GetConstantValue(Op0, Result); 205 Result.IntVal = Result.IntVal.trunc(BitWidth); 206 207 return; 208 } 209 case llvm::Instruction::ZExt: { 210 uint32_t BitWidth = 211 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth(); 212 213 GetConstantValue(Op0, Result); 214 Result.IntVal = Result.IntVal.zext(BitWidth); 215 216 return; 217 } 218 case llvm::Instruction::SExt: { 219 uint32_t BitWidth = 220 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth(); 221 222 GetConstantValue(Op0, Result); 223 Result.IntVal = Result.IntVal.sext(BitWidth); 224 225 return; 226 } 227 case llvm::Instruction::FPTrunc: { 228 // TODO(all): fixme: long double 229 GetConstantValue(Op0, Result); 230 Result.FloatVal = static_cast<float>(Result.DoubleVal); 231 return; 232 } 233 case llvm::Instruction::FPExt: { 234 // TODO(all): fixme: long double 235 GetConstantValue(Op0, Result); 236 Result.DoubleVal = static_cast<double>(Result.FloatVal); 237 return; 238 } 239 case llvm::Instruction::UIToFP: { 240 GetConstantValue(Op0, Result); 241 if (CE->getType()->isFloatTy()) 242 Result.FloatVal = 243 static_cast<float>(Result.IntVal.roundToDouble()); 244 else if (CE->getType()->isDoubleTy()) 245 Result.DoubleVal = Result.IntVal.roundToDouble(); 246 else if (CE->getType()->isX86_FP80Ty()) { 247 const uint64_t zero[] = { 0, 0 }; 248 llvm::APFloat apf(llvm::APInt(80, 2, zero)); 249 apf.convertFromAPInt(Result.IntVal, 250 false, 251 llvm::APFloat::rmNearestTiesToEven); 252 Result.IntVal = apf.bitcastToAPInt(); 253 } 254 return; 255 } 256 case llvm::Instruction::SIToFP: { 257 GetConstantValue(Op0, Result); 258 if (CE->getType()->isFloatTy()) 259 Result.FloatVal = 260 static_cast<float>(Result.IntVal.signedRoundToDouble()); 261 else if (CE->getType()->isDoubleTy()) 262 Result.DoubleVal = Result.IntVal.signedRoundToDouble(); 263 else if (CE->getType()->isX86_FP80Ty()) { 264 const uint64_t zero[] = { 0, 0 }; 265 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero)); 266 apf.convertFromAPInt(Result.IntVal, 267 true, 268 llvm::APFloat::rmNearestTiesToEven); 269 Result.IntVal = apf.bitcastToAPInt(); 270 } 271 return; 272 } 273 // double->APInt conversion handles sign 274 case llvm::Instruction::FPToUI: 275 case llvm::Instruction::FPToSI: { 276 uint32_t BitWidth = 277 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth(); 278 279 GetConstantValue(Op0, Result); 280 if (Op0->getType()->isFloatTy()) 281 Result.IntVal = 282 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth); 283 else if (Op0->getType()->isDoubleTy()) 284 Result.IntVal = 285 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal, 286 BitWidth); 287 else if (Op0->getType()->isX86_FP80Ty()) { 288 llvm::APFloat apf = llvm::APFloat(Result.IntVal); 289 uint64_t V; 290 bool Ignored; 291 apf.convertToInteger(&V, 292 BitWidth, 293 CE->getOpcode() == llvm::Instruction::FPToSI, 294 llvm::APFloat::rmTowardZero, 295 &Ignored); 296 Result.IntVal = V; // endian? 297 } 298 return; 299 } 300 case llvm::Instruction::PtrToInt: { 301 uint32_t PtrWidth = mpTD->getPointerSizeInBits(); 302 303 GetConstantValue(Op0, Result); 304 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t 305 (Result.PointerVal)); 306 307 return; 308 } 309 case llvm::Instruction::IntToPtr: { 310 uint32_t PtrWidth = mpTD->getPointerSizeInBits(); 311 312 GetConstantValue(Op0, Result); 313 if (PtrWidth != Result.IntVal.getBitWidth()) 314 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth); 315 bccAssert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width"); 316 317 Result.PointerVal = 318 llvm::PointerTy( 319 static_cast<uintptr_t>(Result.IntVal.getZExtValue())); 320 321 return; 322 } 323 case llvm::Instruction::BitCast: { 324 GetConstantValue(Op0, Result); 325 const llvm::Type *DestTy = CE->getType(); 326 327 switch (Op0->getType()->getTypeID()) { 328 case llvm::Type::IntegerTyID: { 329 bccAssert(DestTy->isFloatingPointTy() && "invalid bitcast"); 330 if (DestTy->isFloatTy()) 331 Result.FloatVal = Result.IntVal.bitsToFloat(); 332 else if (DestTy->isDoubleTy()) 333 Result.DoubleVal = Result.IntVal.bitsToDouble(); 334 break; 335 } 336 case llvm::Type::FloatTyID: { 337 bccAssert(DestTy->isIntegerTy(32) && "Invalid bitcast"); 338 Result.IntVal.floatToBits(Result.FloatVal); 339 break; 340 } 341 case llvm::Type::DoubleTyID: { 342 bccAssert(DestTy->isIntegerTy(64) && "Invalid bitcast"); 343 Result.IntVal.doubleToBits(Result.DoubleVal); 344 break; 345 } 346 case llvm::Type::PointerTyID: { 347 bccAssert(DestTy->isPointerTy() && "Invalid bitcast"); 348 break; // getConstantValue(Op0) above already converted it 349 } 350 default: { 351 llvm_unreachable("Invalid bitcast operand"); 352 } 353 } 354 return; 355 } 356 case llvm::Instruction::Add: 357 case llvm::Instruction::FAdd: 358 case llvm::Instruction::Sub: 359 case llvm::Instruction::FSub: 360 case llvm::Instruction::Mul: 361 case llvm::Instruction::FMul: 362 case llvm::Instruction::UDiv: 363 case llvm::Instruction::SDiv: 364 case llvm::Instruction::URem: 365 case llvm::Instruction::SRem: 366 case llvm::Instruction::And: 367 case llvm::Instruction::Or: 368 case llvm::Instruction::Xor: { 369 llvm::GenericValue LHS, RHS; 370 GetConstantValue(Op0, LHS); 371 GetConstantValue(CE->getOperand(1), RHS); 372 373 switch (Op0->getType()->getTypeID()) { 374 case llvm::Type::IntegerTyID: { 375 switch (CE->getOpcode()) { 376 case llvm::Instruction::Add: { 377 Result.IntVal = LHS.IntVal + RHS.IntVal; 378 break; 379 } 380 case llvm::Instruction::Sub: { 381 Result.IntVal = LHS.IntVal - RHS.IntVal; 382 break; 383 } 384 case llvm::Instruction::Mul: { 385 Result.IntVal = LHS.IntVal * RHS.IntVal; 386 break; 387 } 388 case llvm::Instruction::UDiv: { 389 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal); 390 break; 391 } 392 case llvm::Instruction::SDiv: { 393 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal); 394 break; 395 } 396 case llvm::Instruction::URem: { 397 Result.IntVal = LHS.IntVal.urem(RHS.IntVal); 398 break; 399 } 400 case llvm::Instruction::SRem: { 401 Result.IntVal = LHS.IntVal.srem(RHS.IntVal); 402 break; 403 } 404 case llvm::Instruction::And: { 405 Result.IntVal = LHS.IntVal & RHS.IntVal; 406 break; 407 } 408 case llvm::Instruction::Or: { 409 Result.IntVal = LHS.IntVal | RHS.IntVal; 410 break; 411 } 412 case llvm::Instruction::Xor: { 413 Result.IntVal = LHS.IntVal ^ RHS.IntVal; 414 break; 415 } 416 default: { 417 llvm_unreachable("Invalid integer opcode"); 418 } 419 } 420 break; 421 } 422 case llvm::Type::FloatTyID: { 423 switch (CE->getOpcode()) { 424 case llvm::Instruction::FAdd: { 425 Result.FloatVal = LHS.FloatVal + RHS.FloatVal; 426 break; 427 } 428 case llvm::Instruction::FSub: { 429 Result.FloatVal = LHS.FloatVal - RHS.FloatVal; 430 break; 431 } 432 case llvm::Instruction::FMul: { 433 Result.FloatVal = LHS.FloatVal * RHS.FloatVal; 434 break; 435 } 436 case llvm::Instruction::FDiv: { 437 Result.FloatVal = LHS.FloatVal / RHS.FloatVal; 438 break; 439 } 440 case llvm::Instruction::FRem: { 441 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal); 442 break; 443 } 444 default: { 445 llvm_unreachable("Invalid float opcode"); 446 } 447 } 448 break; 449 } 450 case llvm::Type::DoubleTyID: { 451 switch (CE->getOpcode()) { 452 case llvm::Instruction::FAdd: { 453 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal; 454 break; 455 } 456 case llvm::Instruction::FSub: { 457 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; 458 break; 459 } 460 case llvm::Instruction::FMul: { 461 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; 462 break; 463 } 464 case llvm::Instruction::FDiv: { 465 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; 466 break; 467 } 468 case llvm::Instruction::FRem: { 469 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal); 470 break; 471 } 472 default: { 473 llvm_unreachable("Invalid double opcode"); 474 } 475 } 476 break; 477 } 478 case llvm::Type::X86_FP80TyID: 479 case llvm::Type::PPC_FP128TyID: 480 case llvm::Type::FP128TyID: { 481 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal); 482 switch (CE->getOpcode()) { 483 case llvm::Instruction::FAdd: { 484 apfLHS.add(llvm::APFloat(RHS.IntVal), 485 llvm::APFloat::rmNearestTiesToEven); 486 break; 487 } 488 case llvm::Instruction::FSub: { 489 apfLHS.subtract(llvm::APFloat(RHS.IntVal), 490 llvm::APFloat::rmNearestTiesToEven); 491 break; 492 } 493 case llvm::Instruction::FMul: { 494 apfLHS.multiply(llvm::APFloat(RHS.IntVal), 495 llvm::APFloat::rmNearestTiesToEven); 496 break; 497 } 498 case llvm::Instruction::FDiv: { 499 apfLHS.divide(llvm::APFloat(RHS.IntVal), 500 llvm::APFloat::rmNearestTiesToEven); 501 break; 502 } 503 case llvm::Instruction::FRem: { 504 apfLHS.mod(llvm::APFloat(RHS.IntVal), 505 llvm::APFloat::rmNearestTiesToEven); 506 break; 507 } 508 default: { 509 llvm_unreachable("Invalid long double opcode"); 510 } 511 } 512 Result.IntVal = apfLHS.bitcastToAPInt(); 513 break; 514 } 515 default: { 516 llvm_unreachable("Bad add type!"); 517 } 518 } // End switch (Op0->getType()->getTypeID()) 519 return; 520 } 521 default: { 522 break; 523 } 524 } // End switch (CE->getOpcode()) 525 526 std::string msg; 527 llvm::raw_string_ostream Msg(msg); 528 Msg << "ConstantExpr not handled: " << *CE; 529 llvm::report_fatal_error(Msg.str()); 530 } // C->getValueID() == llvm::Value::ConstantExprVal 531 532 switch (C->getType()->getTypeID()) { 533 case llvm::Type::FloatTyID: { 534 Result.FloatVal = 535 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat(); 536 break; 537 } 538 case llvm::Type::DoubleTyID: { 539 Result.DoubleVal = 540 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble(); 541 break; 542 } 543 case llvm::Type::X86_FP80TyID: 544 case llvm::Type::FP128TyID: 545 case llvm::Type::PPC_FP128TyID: { 546 Result.IntVal = 547 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt(); 548 break; 549 } 550 case llvm::Type::IntegerTyID: { 551 Result.IntVal = 552 llvm::cast<llvm::ConstantInt>(C)->getValue(); 553 break; 554 } 555 case llvm::Type::PointerTyID: { 556 switch (C->getValueID()) { 557 case llvm::Value::ConstantPointerNullVal: { 558 Result.PointerVal = NULL; 559 break; 560 } 561 case llvm::Value::FunctionVal: { 562 const llvm::Function *F = static_cast<const llvm::Function*>(C); 563 Result.PointerVal = 564 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F)); 565 break; 566 } 567 case llvm::Value::GlobalVariableVal: { 568 const llvm::GlobalVariable *GV = 569 static_cast<const llvm::GlobalVariable*>(C); 570 Result.PointerVal = 571 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV)); 572 break; 573 } 574 case llvm::Value::BlockAddressVal: { 575 bccAssert(false && "JIT does not support address-of-label yet!"); 576 } 577 default: { 578 llvm_unreachable("Unknown constant pointer type!"); 579 } 580 } 581 break; 582 } 583 default: { 584 std::string msg; 585 llvm::raw_string_ostream Msg(msg); 586 Msg << "ERROR: Constant unimplemented for type: " << *C->getType(); 587 llvm::report_fatal_error(Msg.str()); 588 break; 589 } 590 } 591 return; 592 } 593 594 595 // Stores the data in @Val of type @Ty at address @Addr. 596 void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val, 597 void *Addr, 598 llvm::Type *Ty) { 599 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty); 600 601 switch (Ty->getTypeID()) { 602 case llvm::Type::IntegerTyID: { 603 const llvm::APInt &IntVal = Val.IntVal; 604 bccAssert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) && 605 "Integer too small!"); 606 607 const uint8_t *Src = 608 reinterpret_cast<const uint8_t*>(IntVal.getRawData()); 609 610 if (llvm::sys::isLittleEndianHost()) { 611 // Little-endian host - the source is ordered from LSB to MSB. 612 // Order the destination from LSB to MSB: Do a straight copy. 613 memcpy(Addr, Src, StoreBytes); 614 } else { 615 // Big-endian host - the source is an array of 64 bit words 616 // ordered from LSW to MSW. 617 // 618 // Each word is ordered from MSB to LSB. 619 // 620 // Order the destination from MSB to LSB: 621 // Reverse the word order, but not the bytes in a word. 622 unsigned int i = StoreBytes; 623 while (i > sizeof(uint64_t)) { 624 i -= sizeof(uint64_t); 625 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i, 626 Src, 627 sizeof(uint64_t)); 628 Src += sizeof(uint64_t); 629 } 630 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i); 631 } 632 break; 633 } 634 case llvm::Type::FloatTyID: { 635 *reinterpret_cast<float*>(Addr) = Val.FloatVal; 636 break; 637 } 638 case llvm::Type::DoubleTyID: { 639 *reinterpret_cast<double*>(Addr) = Val.DoubleVal; 640 break; 641 } 642 case llvm::Type::X86_FP80TyID: { 643 memcpy(Addr, Val.IntVal.getRawData(), 10); 644 break; 645 } 646 case llvm::Type::PointerTyID: { 647 // Ensure 64 bit target pointers are fully initialized on 32 bit 648 // hosts. 649 if (StoreBytes != sizeof(llvm::PointerTy)) 650 memset(Addr, 0, StoreBytes); 651 *((llvm::PointerTy*) Addr) = Val.PointerVal; 652 break; 653 } 654 default: { 655 break; 656 } 657 } 658 659 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian()) 660 std::reverse(reinterpret_cast<uint8_t*>(Addr), 661 reinterpret_cast<uint8_t*>(Addr) + StoreBytes); 662 663 return; 664 } 665 666 667 // Recursive function to apply a @Constant value into the specified memory 668 // location @Addr. 669 void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) { 670 switch (C->getValueID()) { 671 case llvm::Value::UndefValueVal: { 672 // Nothing to do 673 break; 674 } 675 case llvm::Value::ConstantVectorVal: { 676 // dynamic cast may hurt performance 677 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C; 678 679 unsigned int ElementSize = mpTD->getTypeAllocSize 680 (CP->getType()->getElementType()); 681 682 for (int i = 0, e = CP->getNumOperands(); i != e;i++) 683 InitializeConstantToMemory( 684 CP->getOperand(i), 685 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize); 686 break; 687 } 688 case llvm::Value::ConstantAggregateZeroVal: { 689 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType())); 690 break; 691 } 692 case llvm::Value::ConstantArrayVal: { 693 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C; 694 unsigned int ElementSize = mpTD->getTypeAllocSize 695 (CPA->getType()->getElementType()); 696 697 for (int i = 0, e = CPA->getNumOperands(); i != e; i++) 698 InitializeConstantToMemory( 699 CPA->getOperand(i), 700 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize); 701 break; 702 } 703 case llvm::Value::ConstantStructVal: { 704 const llvm::ConstantStruct *CPS = 705 static_cast<const llvm::ConstantStruct*>(C); 706 const llvm::StructLayout *SL = mpTD->getStructLayout 707 (llvm::cast<llvm::StructType>(CPS->getType())); 708 709 for (int i = 0, e = CPS->getNumOperands(); i != e; i++) 710 InitializeConstantToMemory( 711 CPS->getOperand(i), 712 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i)); 713 break; 714 } 715 default: { 716 if (C->getType()->isFirstClassType()) { 717 llvm::GenericValue Val; 718 GetConstantValue(C, Val); 719 StoreValueToMemory(Val, Addr, C->getType()); 720 } else { 721 llvm_unreachable("Unknown constant type to initialize memory " 722 "with!"); 723 } 724 break; 725 } 726 } 727 return; 728 } 729 730 731 void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) { 732 if (mpTJI->hasCustomConstantPool()) 733 return; 734 735 // Constant pool address resolution is handled by the target itself in ARM 736 // (TargetJITInfo::hasCustomConstantPool() returns true). 737 #if !defined(PROVIDE_ARM_CODEGEN) 738 const std::vector<llvm::MachineConstantPoolEntry> &Constants = 739 MCP->getConstants(); 740 741 if (Constants.empty()) 742 return; 743 744 unsigned Size = GetConstantPoolSizeInBytes(MCP); 745 unsigned Align = MCP->getConstantPoolAlignment(); 746 747 mpConstantPoolBase = allocateSpace(Size, Align); 748 mpConstantPool = MCP; 749 750 if (mpConstantPoolBase == NULL) 751 return; // out of memory 752 753 unsigned Offset = 0; 754 for (int i = 0, e = Constants.size(); i != e; i++) { 755 llvm::MachineConstantPoolEntry CPE = Constants[i]; 756 unsigned AlignMask = CPE.getAlignment() - 1; 757 Offset = (Offset + AlignMask) & ~AlignMask; 758 759 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset; 760 mConstPoolAddresses.push_back(CAddr); 761 762 if (CPE.isMachineConstantPoolEntry()) 763 llvm::report_fatal_error 764 ("Initialize memory with machine specific constant pool" 765 " entry has not been implemented!"); 766 767 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr); 768 769 llvm::Type *Ty = CPE.Val.ConstVal->getType(); 770 Offset += mpTD->getTypeAllocSize(Ty); 771 } 772 #endif 773 return; 774 } 775 776 777 void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) { 778 if (mpTJI->hasCustomJumpTables()) 779 return; 780 781 const std::vector<llvm::MachineJumpTableEntry> &JT = 782 MJTI->getJumpTables(); 783 if (JT.empty()) 784 return; 785 786 unsigned NumEntries = 0; 787 for (int i = 0, e = JT.size(); i != e; i++) 788 NumEntries += JT[i].MBBs.size(); 789 790 unsigned EntrySize = MJTI->getEntrySize(*mpTD); 791 792 mpJumpTable = MJTI; 793 mpJumpTableBase = allocateSpace(NumEntries * EntrySize, 794 MJTI->getEntryAlignment(*mpTD)); 795 796 return; 797 } 798 799 800 void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) { 801 if (mpTJI->hasCustomJumpTables()) 802 return; 803 804 const std::vector<llvm::MachineJumpTableEntry> &JT = 805 MJTI->getJumpTables(); 806 if (JT.empty() || mpJumpTableBase == 0) 807 return; 808 809 bccAssert(mpTargetMachine->getRelocationModel() == llvm::Reloc::Static && 810 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) && 811 "Cross JIT'ing?"); 812 813 // For each jump table, map each target in the jump table to the 814 // address of an emitted MachineBasicBlock. 815 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase); 816 for (int i = 0, ie = JT.size(); i != ie; i++) { 817 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs; 818 // Store the address of the basic block for this jump table slot in the 819 // memory we allocated for the jump table in 'initJumpTableInfo' 820 for (int j = 0, je = MBBs.size(); j != je; j++) 821 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]); 822 } 823 } 824 825 826 void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V, 827 void *Reference, 828 bool MayNeedFarStub) { 829 switch (V->getValueID()) { 830 case llvm::Value::FunctionVal: { 831 llvm::Function *F = (llvm::Function*) V; 832 833 // If we have code, go ahead and return that. 834 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F)) 835 return ResultPtr; 836 837 if (void *FnStub = GetLazyFunctionStubIfAvailable(F)) 838 // Return the function stub if it's already created. 839 // We do this first so that: 840 // we're returning the same address for the function as any 841 // previous call. 842 // 843 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't 844 // guaranteed to be close enough to call. 845 return FnStub; 846 847 // If we know the target can handle arbitrary-distance calls, try to 848 // return a direct pointer. 849 if (!MayNeedFarStub) { 850 // 851 // x86_64 architecture may encounter the bug: 852 // http://llvm.org/bugs/show_bug.cgi?id=5201 853 // which generate instruction "call" instead of "callq". 854 // 855 // And once the real address of stub is greater than 64-bit 856 // long, the replacement will truncate to 32-bit resulting a 857 // serious problem. 858 #if !defined(__x86_64__) 859 // If this is an external function pointer, we can force the JIT 860 // to 'compile' it, which really just adds it to the map. 861 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) { 862 return GetPointerToFunction(F, /* AbortOnFailure = */false); 863 // Changing to false because wanting to allow later calls to 864 // mpTJI->relocate() without aborting. For caching purpose 865 } 866 #endif 867 } 868 869 // Otherwise, we may need a to emit a stub, and, conservatively, we 870 // always do so. 871 return GetLazyFunctionStub(F); 872 break; 873 } 874 case llvm::Value::GlobalVariableVal: { 875 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V); 876 break; 877 } 878 case llvm::Value::GlobalAliasVal: { 879 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V; 880 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false); 881 882 switch (GV->getValueID()) { 883 case llvm::Value::FunctionVal: { 884 // TODO(all): is there's any possibility that the function is not 885 // code-gen'd? 886 return GetPointerToFunction( 887 static_cast<const llvm::Function*>(GV), 888 /* AbortOnFailure = */false); 889 // Changing to false because wanting to allow later calls to 890 // mpTJI->relocate() without aborting. For caching purpose 891 break; 892 } 893 case llvm::Value::GlobalVariableVal: { 894 if (void *P = mGlobalAddressMap[GV]) 895 return P; 896 897 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV; 898 EmitGlobalVariable(GVar); 899 900 return mGlobalAddressMap[GV]; 901 break; 902 } 903 case llvm::Value::GlobalAliasVal: { 904 bccAssert(false && "Alias should be resolved ultimately!"); 905 } 906 } 907 break; 908 } 909 default: { 910 break; 911 } 912 } 913 llvm_unreachable("Unknown type of global value!"); 914 } 915 916 917 // If the specified function has been code-gen'd, return a pointer to the 918 // function. If not, compile it, or use a stub to implement lazy compilation 919 // if available. 920 void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) { 921 // If we have already code generated the function, just return the 922 // address. 923 if (void *Addr = GetPointerToGlobalIfAvailable(F)) 924 return Addr; 925 926 // Get a stub if the target supports it. 927 return GetLazyFunctionStub(F); 928 } 929 930 931 void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) { 932 // If we already have a lazy stub for this function, recycle it. 933 void *&Stub = mFunctionToLazyStubMap[F]; 934 if (Stub) 935 return Stub; 936 937 // In any cases, we should NOT resolve function at runtime (though we are 938 // able to). We resolve this right now. 939 void *Actual = NULL; 940 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) { 941 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false); 942 // Changing to false because wanting to allow later calls to 943 // mpTJI->relocate() without aborting. For caching purpose 944 } 945 946 // Codegen a new stub, calling the actual address of the external 947 // function, if it was resolved. 948 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout(); 949 startGVStub(F, SL.Size, SL.Alignment); 950 Stub = mpTJI->emitFunctionStub(F, Actual, *this); 951 finishGVStub(); 952 953 // We really want the address of the stub in the GlobalAddressMap for the 954 // JIT, not the address of the external function. 955 UpdateGlobalMapping(F, Stub); 956 957 if (!Actual) { 958 PendingFunctions.insert(F); 959 } else { 960 #if DEBUG_OLD_JIT_DISASSEMBLER 961 Disassemble(DEBUG_OLD_JIT_DISASSEMBLER_FILE, 962 mpTarget, mpTargetMachine, F->getName(), 963 (unsigned char const *)Stub, SL.Size); 964 #endif 965 } 966 967 return Stub; 968 } 969 970 971 void *CodeEmitter::GetPointerToFunction(const llvm::Function *F, 972 bool AbortOnFailure) { 973 void *Addr = GetPointerToGlobalIfAvailable(F); 974 if (Addr) 975 return Addr; 976 977 bccAssert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) && 978 "Internal error: only external defined function routes here!"); 979 980 // Handle the failure resolution by ourselves. 981 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(), 982 /* AbortOnFailure = */ false); 983 984 // If we resolved the symbol to a null address (eg. a weak external) 985 // return a null pointer let the application handle it. 986 if (Addr == NULL) { 987 if (AbortOnFailure) 988 llvm::report_fatal_error("Could not resolve external function " 989 "address: " + F->getName()); 990 else 991 return NULL; 992 } 993 994 AddGlobalMapping(F, Addr); 995 996 return Addr; 997 } 998 999 1000 void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name, 1001 bool AbortOnFailure) { 1002 if (void *Addr = FindRuntimeFunction(Name.c_str())) 1003 return Addr; 1004 1005 if (mpSymbolLookupFn) 1006 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str())) 1007 return Addr; 1008 1009 if (AbortOnFailure) 1010 llvm::report_fatal_error("Program used external symbol '" + Name + 1011 "' which could not be resolved!"); 1012 1013 return NULL; 1014 } 1015 1016 1017 // Return the address of the specified global variable, possibly emitting it 1018 // to memory if needed. This is used by the Emitter. 1019 void *CodeEmitter::GetOrEmitGlobalVariable(llvm::GlobalVariable *GV) { 1020 void *Ptr = GetPointerToGlobalIfAvailable(GV); 1021 if (Ptr) 1022 return Ptr; 1023 1024 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) { 1025 // If the global is external, just remember the address. 1026 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true); 1027 AddGlobalMapping(GV, Ptr); 1028 } else { 1029 // If the global hasn't been emitted to memory yet, allocate space and 1030 // emit it into memory. 1031 Ptr = GetMemoryForGV(GV); 1032 AddGlobalMapping(GV, Ptr); 1033 EmitGlobalVariable(GV); 1034 } 1035 1036 return Ptr; 1037 } 1038 1039 1040 // This method abstracts memory allocation of global variable so that the 1041 // JIT can allocate thread local variables depending on the target. 1042 void *CodeEmitter::GetMemoryForGV(llvm::GlobalVariable *GV) { 1043 void *Ptr; 1044 1045 llvm::Type *GlobalType = GV->getType()->getElementType(); 1046 size_t S = mpTD->getTypeAllocSize(GlobalType); 1047 size_t A = mpTD->getPreferredAlignment(GV); 1048 1049 if (GV->isThreadLocal()) { 1050 // We can support TLS by 1051 // 1052 // Ptr = TJI.allocateThreadLocalMemory(S); 1053 // 1054 // But I tend not to. 1055 // (should we disable this in the front-end (i.e., slang)?). 1056 llvm::report_fatal_error 1057 ("Compilation of Thread Local Storage (TLS) is disabled!"); 1058 1059 } else if (mpTJI->allocateSeparateGVMemory()) { 1060 if (A <= 8) { 1061 Ptr = malloc(S); 1062 } else { 1063 // Allocate (S + A) bytes of memory, then use an aligned pointer 1064 // within that space. 1065 Ptr = malloc(S + A); 1066 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1)); 1067 Ptr = reinterpret_cast<uint8_t*>(Ptr) + 1068 (MisAligned ? (A - MisAligned) : 0); 1069 } 1070 } else { 1071 Ptr = allocateGlobal(S, A); 1072 } 1073 1074 return Ptr; 1075 } 1076 1077 1078 void CodeEmitter::EmitGlobalVariable(llvm::GlobalVariable *GV) { 1079 void *GA = GetPointerToGlobalIfAvailable(GV); 1080 1081 if (GV->isThreadLocal()) 1082 llvm::report_fatal_error 1083 ("We don't support Thread Local Storage (TLS)!"); 1084 1085 if (GA == NULL) { 1086 // If it's not already specified, allocate memory for the global. 1087 GA = GetMemoryForGV(GV); 1088 AddGlobalMapping(GV, GA); 1089 } 1090 1091 InitializeConstantToMemory(GV->getInitializer(), GA); 1092 1093 // You can do some statistics on global variable here. 1094 return; 1095 } 1096 1097 1098 void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) { 1099 // Make sure GV is emitted first, and create a stub containing the fully 1100 // resolved address. 1101 void *GVAddress = GetPointerToGlobal(V, Reference, false); 1102 1103 // If we already have a stub for this global variable, recycle it. 1104 void *&IndirectSym = GlobalToIndirectSymMap[V]; 1105 // Otherwise, codegen a new indirect symbol. 1106 if (!IndirectSym) 1107 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this); 1108 1109 return IndirectSym; 1110 } 1111 1112 1113 // Return a stub for the function at the specified address. 1114 void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) { 1115 void *&Stub = ExternalFnToStubMap[FnAddr]; 1116 if (Stub) 1117 return Stub; 1118 1119 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout(); 1120 startGVStub(0, SL.Size, SL.Alignment); 1121 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this); 1122 finishGVStub(); 1123 1124 return Stub; 1125 } 1126 1127 1128 void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) { 1129 mpTargetMachine = &TM; 1130 1131 // Set Target 1132 mpTarget = &TM.getTarget(); 1133 // Set TargetJITInfo 1134 mpTJI = TM.getJITInfo(); 1135 // set TargetData 1136 mpTD = TM.getTargetData(); 1137 1138 bccAssert(!mpTJI->needsGOT() && "We don't support GOT needed target!"); 1139 1140 return; 1141 } 1142 1143 1144 // This callback is invoked when the specified function is about to be code 1145 // generated. This initializes the BufferBegin/End/Ptr fields. 1146 void CodeEmitter::startFunction(llvm::MachineFunction &F) { 1147 uintptr_t ActualSize = 0; 1148 1149 mpMemMgr->setMemoryWritable(); 1150 1151 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class 1152 // MachineCodeEmitter, which is the super class of the class 1153 // JITCodeEmitter. 1154 // 1155 // BufferBegin/BufferEnd - Pointers to the start and end of the memory 1156 // allocated for this code buffer. 1157 // 1158 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting 1159 // code. This is guranteed to be in the range 1160 // [BufferBegin, BufferEnd]. If this pointer is at 1161 // BufferEnd, it will never move due to code emission, and 1162 // all code emission requests will be ignored (this is the 1163 // buffer overflow condition). 1164 BufferBegin = CurBufferPtr = 1165 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize); 1166 BufferEnd = BufferBegin + ActualSize; 1167 1168 if (mpCurEmitFunction == NULL) { 1169 mpCurEmitFunction = new FuncInfo(); // TODO(all): Allocation check! 1170 mpCurEmitFunction->name = NULL; 1171 mpCurEmitFunction->addr = NULL; 1172 mpCurEmitFunction->size = 0; 1173 } 1174 1175 // Ensure the constant pool/jump table info is at least 4-byte aligned. 1176 emitAlignment(16); 1177 1178 emitConstantPool(F.getConstantPool()); 1179 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo()) 1180 initJumpTableInfo(MJTI); 1181 1182 // About to start emitting the machine code for the function. 1183 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U)); 1184 1185 UpdateGlobalMapping(F.getFunction(), CurBufferPtr); 1186 1187 mpCurEmitFunction->addr = CurBufferPtr; 1188 1189 mMBBLocations.clear(); 1190 } 1191 1192 1193 // This callback is invoked when the specified function has finished code 1194 // generation. If a buffer overflow has occurred, this method returns true 1195 // (the callee is required to try again). 1196 bool CodeEmitter::finishFunction(llvm::MachineFunction &F) { 1197 if (CurBufferPtr == BufferEnd) { 1198 // No enough memory 1199 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr); 1200 return false; 1201 } 1202 1203 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo()) 1204 emitJumpTableInfo(MJTI); 1205 1206 if (!mRelocations.empty()) { 1207 //ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase(); 1208 1209 // Resolve the relocations to concrete pointers. 1210 for (int i = 0, e = mRelocations.size(); i != e; i++) { 1211 llvm::MachineRelocation &MR = mRelocations[i]; 1212 void *ResultPtr = NULL; 1213 1214 if (!MR.letTargetResolve()) { 1215 if (MR.isExternalSymbol()) { 1216 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true); 1217 1218 if (MR.mayNeedFarStub()) { 1219 ResultPtr = GetExternalFunctionStub(ResultPtr); 1220 } 1221 1222 } else if (MR.isGlobalValue()) { 1223 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(), 1224 BufferBegin 1225 + MR.getMachineCodeOffset(), 1226 MR.mayNeedFarStub()); 1227 } else if (MR.isIndirectSymbol()) { 1228 ResultPtr = 1229 GetPointerToGVIndirectSym( 1230 MR.getGlobalValue(), 1231 BufferBegin + MR.getMachineCodeOffset()); 1232 } else if (MR.isBasicBlock()) { 1233 ResultPtr = 1234 (void*) getMachineBasicBlockAddress(MR.getBasicBlock()); 1235 } else if (MR.isConstantPoolIndex()) { 1236 ResultPtr = 1237 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex()); 1238 } else { 1239 bccAssert(MR.isJumpTableIndex() && "Unknown type of relocation"); 1240 ResultPtr = 1241 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex()); 1242 } 1243 1244 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) { 1245 // TODO(logan): Cache external symbol relocation entry. 1246 // Currently, we are not caching them. But since Android 1247 // system is using prelink, it is not a problem. 1248 #if 0 1249 // Cache the relocation result address 1250 mCachingRelocations.push_back( 1251 oBCCRelocEntry(MR.getRelocationType(), 1252 MR.getMachineCodeOffset() + BufferOffset, 1253 ResultPtr)); 1254 #endif 1255 } 1256 1257 MR.setResultPointer(ResultPtr); 1258 } 1259 } 1260 1261 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(), 1262 mpMemMgr->getGOTBase()); 1263 } 1264 1265 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr); 1266 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for 1267 // global variables that were referenced in the relocations. 1268 if (CurBufferPtr == BufferEnd) 1269 return false; 1270 1271 // Now that we've succeeded in emitting the function. 1272 mpCurEmitFunction->size = CurBufferPtr - BufferBegin; 1273 1274 #if DEBUG_OLD_JIT_DISASSEMBLER 1275 // FnStart is the start of the text, not the start of the constant pool 1276 // and other per-function data. 1277 uint8_t *FnStart = 1278 reinterpret_cast<uint8_t*>( 1279 GetPointerToGlobalIfAvailable(F.getFunction())); 1280 1281 // FnEnd is the end of the function's machine code. 1282 uint8_t *FnEnd = CurBufferPtr; 1283 #endif 1284 1285 BufferBegin = CurBufferPtr = 0; 1286 1287 if (F.getFunction()->hasName()) { 1288 std::string const &name = F.getFunction()->getNameStr(); 1289 mpResult->mEmittedFunctions[name] = mpCurEmitFunction; 1290 mpCurEmitFunction = NULL; 1291 } 1292 1293 mRelocations.clear(); 1294 mConstPoolAddresses.clear(); 1295 1296 if (mpMMI) 1297 mpMMI->EndFunction(); 1298 1299 updateFunctionStub(F.getFunction()); 1300 1301 // Mark code region readable and executable if it's not so already. 1302 mpMemMgr->setMemoryExecutable(); 1303 1304 #if DEBUG_OLD_JIT_DISASSEMBLER 1305 Disassemble(DEBUG_OLD_JIT_DISASSEMBLER_FILE, 1306 mpTarget, mpTargetMachine, F.getFunction()->getName(), 1307 (unsigned char const *)FnStart, FnEnd - FnStart); 1308 #endif 1309 1310 return false; 1311 } 1312 1313 1314 void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize, 1315 unsigned Alignment) { 1316 mpSavedBufferBegin = BufferBegin; 1317 mpSavedBufferEnd = BufferEnd; 1318 mpSavedCurBufferPtr = CurBufferPtr; 1319 1320 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize, 1321 Alignment); 1322 BufferEnd = BufferBegin + StubSize + 1; 1323 1324 return; 1325 } 1326 1327 1328 void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) { 1329 mpSavedBufferBegin = BufferBegin; 1330 mpSavedBufferEnd = BufferEnd; 1331 mpSavedCurBufferPtr = CurBufferPtr; 1332 1333 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer); 1334 BufferEnd = BufferBegin + StubSize + 1; 1335 1336 return; 1337 } 1338 1339 1340 void CodeEmitter::finishGVStub() { 1341 bccAssert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space."); 1342 1343 // restore 1344 BufferBegin = mpSavedBufferBegin; 1345 BufferEnd = mpSavedBufferEnd; 1346 CurBufferPtr = mpSavedCurBufferPtr; 1347 } 1348 1349 1350 // Allocates and fills storage for an indirect GlobalValue, and returns the 1351 // address. 1352 void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV, 1353 const uint8_t *Buffer, size_t Size, 1354 unsigned Alignment) { 1355 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment); 1356 memcpy(IndGV, Buffer, Size); 1357 return IndGV; 1358 } 1359 1360 1361 // Allocate memory for a global. Unlike allocateSpace, this method does not 1362 // allocate memory in the current output buffer, because a global may live 1363 // longer than the current function. 1364 void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) { 1365 // Delegate this call through the memory manager. 1366 return mpMemMgr->allocateGlobal(Size, Alignment); 1367 } 1368 1369 1370 // This should be called by the target when a new basic block is about to be 1371 // emitted. This way the MCE knows where the start of the block is, and can 1372 // implement getMachineBasicBlockAddress. 1373 void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) { 1374 if (mMBBLocations.size() <= (unsigned) MBB->getNumber()) 1375 mMBBLocations.resize((MBB->getNumber() + 1) * 2); 1376 mMBBLocations[MBB->getNumber()] = getCurrentPCValue(); 1377 return; 1378 } 1379 1380 1381 // Return the address of the jump table with index @Index in the function 1382 // that last called initJumpTableInfo. 1383 uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const { 1384 const std::vector<llvm::MachineJumpTableEntry> &JT = 1385 mpJumpTable->getJumpTables(); 1386 1387 bccAssert((Index < JT.size()) && "Invalid jump table index!"); 1388 1389 unsigned int Offset = 0; 1390 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD); 1391 1392 for (unsigned i = 0; i < Index; i++) 1393 Offset += JT[i].MBBs.size(); 1394 Offset *= EntrySize; 1395 1396 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset); 1397 } 1398 1399 1400 // Return the address of the specified MachineBasicBlock, only usable after 1401 // the label for the MBB has been emitted. 1402 uintptr_t CodeEmitter::getMachineBasicBlockAddress( 1403 llvm::MachineBasicBlock *MBB) const { 1404 bccAssert(mMBBLocations.size() > (unsigned) MBB->getNumber() && 1405 mMBBLocations[MBB->getNumber()] && 1406 "MBB not emitted!"); 1407 return mMBBLocations[MBB->getNumber()]; 1408 } 1409 1410 1411 void CodeEmitter::updateFunctionStub(const llvm::Function *F) { 1412 // Get the empty stub we generated earlier. 1413 void *Stub; 1414 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F); 1415 if (I != PendingFunctions.end()) 1416 Stub = mFunctionToLazyStubMap[F]; 1417 else 1418 return; 1419 1420 void *Addr = GetPointerToGlobalIfAvailable(F); 1421 1422 bccAssert(Addr != Stub && 1423 "Function must have non-stub address to be updated."); 1424 1425 // Tell the target jit info to rewrite the stub at the specified address, 1426 // rather than creating a new one. 1427 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout(); 1428 startGVStub(Stub, SL.Size); 1429 mpTJI->emitFunctionStub(F, Addr, *this); 1430 finishGVStub(); 1431 1432 #if DEBUG_OLD_JIT_DISASSEMBLER 1433 Disassemble(DEBUG_OLD_JIT_DISASSEMBLER_FILE, 1434 mpTarget, mpTargetMachine, F->getName(), 1435 (unsigned char const *)Stub, SL.Size); 1436 #endif 1437 1438 PendingFunctions.erase(I); 1439 } 1440 1441 1442 } // namespace bcc 1443