1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the FastISel class. 11 // 12 // "Fast" instruction selection is designed to emit very poor code quickly. 13 // Also, it is not designed to be able to do much lowering, so most illegal 14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is 15 // also not intended to be able to do much optimization, except in a few cases 16 // where doing optimizations reduces overall compile time. For example, folding 17 // constants into immediate fields is often done, because it's cheap and it 18 // reduces the number of instructions later phases have to examine. 19 // 20 // "Fast" instruction selection is able to fail gracefully and transfer 21 // control to the SelectionDAG selector for operations that it doesn't 22 // support. In many cases, this allows us to avoid duplicating a lot of 23 // the complicated lowering logic that SelectionDAG currently has. 24 // 25 // The intended use for "fast" instruction selection is "-O0" mode 26 // compilation, where the quality of the generated code is irrelevant when 27 // weighed against the speed at which the code can be generated. Also, 28 // at -O0, the LLVM optimizers are not running, and this makes the 29 // compile time of codegen a much higher portion of the overall compile 30 // time. Despite its limitations, "fast" instruction selection is able to 31 // handle enough code on its own to provide noticeable overall speedups 32 // in -O0 compiles. 33 // 34 // Basic operations are supported in a target-independent way, by reading 35 // the same instruction descriptions that the SelectionDAG selector reads, 36 // and identifying simple arithmetic operations that can be directly selected 37 // from simple operators. More complicated operations currently require 38 // target-specific code. 39 // 40 //===----------------------------------------------------------------------===// 41 42 #include "llvm/Function.h" 43 #include "llvm/GlobalVariable.h" 44 #include "llvm/Instructions.h" 45 #include "llvm/IntrinsicInst.h" 46 #include "llvm/Operator.h" 47 #include "llvm/CodeGen/Analysis.h" 48 #include "llvm/CodeGen/FastISel.h" 49 #include "llvm/CodeGen/FunctionLoweringInfo.h" 50 #include "llvm/CodeGen/MachineInstrBuilder.h" 51 #include "llvm/CodeGen/MachineModuleInfo.h" 52 #include "llvm/CodeGen/MachineRegisterInfo.h" 53 #include "llvm/Analysis/DebugInfo.h" 54 #include "llvm/Analysis/Loads.h" 55 #include "llvm/Target/TargetData.h" 56 #include "llvm/Target/TargetInstrInfo.h" 57 #include "llvm/Target/TargetLowering.h" 58 #include "llvm/Target/TargetMachine.h" 59 #include "llvm/Support/ErrorHandling.h" 60 #include "llvm/Support/Debug.h" 61 using namespace llvm; 62 63 /// startNewBlock - Set the current block to which generated machine 64 /// instructions will be appended, and clear the local CSE map. 65 /// 66 void FastISel::startNewBlock() { 67 LocalValueMap.clear(); 68 69 // Start out as null, meaining no local-value instructions have 70 // been emitted. 71 LastLocalValue = 0; 72 73 // Advance the last local value past any EH_LABEL instructions. 74 MachineBasicBlock::iterator 75 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end(); 76 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) { 77 LastLocalValue = I; 78 ++I; 79 } 80 } 81 82 bool FastISel::hasTrivialKill(const Value *V) const { 83 // Don't consider constants or arguments to have trivial kills. 84 const Instruction *I = dyn_cast<Instruction>(V); 85 if (!I) 86 return false; 87 88 // No-op casts are trivially coalesced by fast-isel. 89 if (const CastInst *Cast = dyn_cast<CastInst>(I)) 90 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) && 91 !hasTrivialKill(Cast->getOperand(0))) 92 return false; 93 94 // Only instructions with a single use in the same basic block are considered 95 // to have trivial kills. 96 return I->hasOneUse() && 97 !(I->getOpcode() == Instruction::BitCast || 98 I->getOpcode() == Instruction::PtrToInt || 99 I->getOpcode() == Instruction::IntToPtr) && 100 cast<Instruction>(*I->use_begin())->getParent() == I->getParent(); 101 } 102 103 unsigned FastISel::getRegForValue(const Value *V) { 104 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true); 105 // Don't handle non-simple values in FastISel. 106 if (!RealVT.isSimple()) 107 return 0; 108 109 // Ignore illegal types. We must do this before looking up the value 110 // in ValueMap because Arguments are given virtual registers regardless 111 // of whether FastISel can handle them. 112 MVT VT = RealVT.getSimpleVT(); 113 if (!TLI.isTypeLegal(VT)) { 114 // Handle integer promotions, though, because they're common and easy. 115 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 116 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); 117 else 118 return 0; 119 } 120 121 // Look up the value to see if we already have a register for it. We 122 // cache values defined by Instructions across blocks, and other values 123 // only locally. This is because Instructions already have the SSA 124 // def-dominates-use requirement enforced. 125 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V); 126 if (I != FuncInfo.ValueMap.end()) 127 return I->second; 128 129 unsigned Reg = LocalValueMap[V]; 130 if (Reg != 0) 131 return Reg; 132 133 // In bottom-up mode, just create the virtual register which will be used 134 // to hold the value. It will be materialized later. 135 if (isa<Instruction>(V) && 136 (!isa<AllocaInst>(V) || 137 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) 138 return FuncInfo.InitializeRegForValue(V); 139 140 SavePoint SaveInsertPt = enterLocalValueArea(); 141 142 // Materialize the value in a register. Emit any instructions in the 143 // local value area. 144 Reg = materializeRegForValue(V, VT); 145 146 leaveLocalValueArea(SaveInsertPt); 147 148 return Reg; 149 } 150 151 /// materializeRegForValue - Helper for getRegForValue. This function is 152 /// called when the value isn't already available in a register and must 153 /// be materialized with new instructions. 154 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { 155 unsigned Reg = 0; 156 157 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 158 if (CI->getValue().getActiveBits() <= 64) 159 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 160 } else if (isa<AllocaInst>(V)) { 161 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V)); 162 } else if (isa<ConstantPointerNull>(V)) { 163 // Translate this as an integer zero so that it can be 164 // local-CSE'd with actual integer zeros. 165 Reg = 166 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext()))); 167 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { 168 if (CF->isNullValue()) { 169 Reg = TargetMaterializeFloatZero(CF); 170 } else { 171 // Try to emit the constant directly. 172 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF); 173 } 174 175 if (!Reg) { 176 // Try to emit the constant by using an integer constant with a cast. 177 const APFloat &Flt = CF->getValueAPF(); 178 EVT IntVT = TLI.getPointerTy(); 179 180 uint64_t x[2]; 181 uint32_t IntBitWidth = IntVT.getSizeInBits(); 182 bool isExact; 183 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, 184 APFloat::rmTowardZero, &isExact); 185 if (isExact) { 186 APInt IntVal(IntBitWidth, x); 187 188 unsigned IntegerReg = 189 getRegForValue(ConstantInt::get(V->getContext(), IntVal)); 190 if (IntegerReg != 0) 191 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, 192 IntegerReg, /*Kill=*/false); 193 } 194 } 195 } else if (const Operator *Op = dyn_cast<Operator>(V)) { 196 if (!SelectOperator(Op, Op->getOpcode())) 197 if (!isa<Instruction>(Op) || 198 !TargetSelectInstruction(cast<Instruction>(Op))) 199 return 0; 200 Reg = lookUpRegForValue(Op); 201 } else if (isa<UndefValue>(V)) { 202 Reg = createResultReg(TLI.getRegClassFor(VT)); 203 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 204 TII.get(TargetOpcode::IMPLICIT_DEF), Reg); 205 } 206 207 // If target-independent code couldn't handle the value, give target-specific 208 // code a try. 209 if (!Reg && isa<Constant>(V)) 210 Reg = TargetMaterializeConstant(cast<Constant>(V)); 211 212 // Don't cache constant materializations in the general ValueMap. 213 // To do so would require tracking what uses they dominate. 214 if (Reg != 0) { 215 LocalValueMap[V] = Reg; 216 LastLocalValue = MRI.getVRegDef(Reg); 217 } 218 return Reg; 219 } 220 221 unsigned FastISel::lookUpRegForValue(const Value *V) { 222 // Look up the value to see if we already have a register for it. We 223 // cache values defined by Instructions across blocks, and other values 224 // only locally. This is because Instructions already have the SSA 225 // def-dominates-use requirement enforced. 226 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V); 227 if (I != FuncInfo.ValueMap.end()) 228 return I->second; 229 return LocalValueMap[V]; 230 } 231 232 /// UpdateValueMap - Update the value map to include the new mapping for this 233 /// instruction, or insert an extra copy to get the result in a previous 234 /// determined register. 235 /// NOTE: This is only necessary because we might select a block that uses 236 /// a value before we select the block that defines the value. It might be 237 /// possible to fix this by selecting blocks in reverse postorder. 238 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) { 239 if (!isa<Instruction>(I)) { 240 LocalValueMap[I] = Reg; 241 return; 242 } 243 244 unsigned &AssignedReg = FuncInfo.ValueMap[I]; 245 if (AssignedReg == 0) 246 // Use the new register. 247 AssignedReg = Reg; 248 else if (Reg != AssignedReg) { 249 // Arrange for uses of AssignedReg to be replaced by uses of Reg. 250 for (unsigned i = 0; i < NumRegs; i++) 251 FuncInfo.RegFixups[AssignedReg+i] = Reg+i; 252 253 AssignedReg = Reg; 254 } 255 } 256 257 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) { 258 unsigned IdxN = getRegForValue(Idx); 259 if (IdxN == 0) 260 // Unhandled operand. Halt "fast" selection and bail. 261 return std::pair<unsigned, bool>(0, false); 262 263 bool IdxNIsKill = hasTrivialKill(Idx); 264 265 // If the index is smaller or larger than intptr_t, truncate or extend it. 266 MVT PtrVT = TLI.getPointerTy(); 267 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); 268 if (IdxVT.bitsLT(PtrVT)) { 269 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, 270 IdxN, IdxNIsKill); 271 IdxNIsKill = true; 272 } 273 else if (IdxVT.bitsGT(PtrVT)) { 274 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, 275 IdxN, IdxNIsKill); 276 IdxNIsKill = true; 277 } 278 return std::pair<unsigned, bool>(IdxN, IdxNIsKill); 279 } 280 281 void FastISel::recomputeInsertPt() { 282 if (getLastLocalValue()) { 283 FuncInfo.InsertPt = getLastLocalValue(); 284 FuncInfo.MBB = FuncInfo.InsertPt->getParent(); 285 ++FuncInfo.InsertPt; 286 } else 287 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); 288 289 // Now skip past any EH_LABELs, which must remain at the beginning. 290 while (FuncInfo.InsertPt != FuncInfo.MBB->end() && 291 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL) 292 ++FuncInfo.InsertPt; 293 } 294 295 FastISel::SavePoint FastISel::enterLocalValueArea() { 296 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt; 297 DebugLoc OldDL = DL; 298 recomputeInsertPt(); 299 DL = DebugLoc(); 300 SavePoint SP = { OldInsertPt, OldDL }; 301 return SP; 302 } 303 304 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { 305 if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) 306 LastLocalValue = llvm::prior(FuncInfo.InsertPt); 307 308 // Restore the previous insert position. 309 FuncInfo.InsertPt = OldInsertPt.InsertPt; 310 DL = OldInsertPt.DL; 311 } 312 313 /// SelectBinaryOp - Select and emit code for a binary operator instruction, 314 /// which has an opcode which directly corresponds to the given ISD opcode. 315 /// 316 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) { 317 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); 318 if (VT == MVT::Other || !VT.isSimple()) 319 // Unhandled type. Halt "fast" selection and bail. 320 return false; 321 322 // We only handle legal types. For example, on x86-32 the instruction 323 // selector contains all of the 64-bit instructions from x86-64, 324 // under the assumption that i64 won't be used if the target doesn't 325 // support it. 326 if (!TLI.isTypeLegal(VT)) { 327 // MVT::i1 is special. Allow AND, OR, or XOR because they 328 // don't require additional zeroing, which makes them easy. 329 if (VT == MVT::i1 && 330 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR || 331 ISDOpcode == ISD::XOR)) 332 VT = TLI.getTypeToTransformTo(I->getContext(), VT); 333 else 334 return false; 335 } 336 337 // Check if the first operand is a constant, and handle it as "ri". At -O0, 338 // we don't have anything that canonicalizes operand order. 339 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0))) 340 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) { 341 unsigned Op1 = getRegForValue(I->getOperand(1)); 342 if (Op1 == 0) return false; 343 344 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 345 346 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, 347 Op1IsKill, CI->getZExtValue(), 348 VT.getSimpleVT()); 349 if (ResultReg == 0) return false; 350 351 // We successfully emitted code for the given LLVM Instruction. 352 UpdateValueMap(I, ResultReg); 353 return true; 354 } 355 356 357 unsigned Op0 = getRegForValue(I->getOperand(0)); 358 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail. 359 return false; 360 361 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 362 363 // Check if the second operand is a constant and handle it appropriately. 364 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 365 uint64_t Imm = CI->getZExtValue(); 366 367 // Transform "sdiv exact X, 8" -> "sra X, 3". 368 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) && 369 cast<BinaryOperator>(I)->isExact() && 370 isPowerOf2_64(Imm)) { 371 Imm = Log2_64(Imm); 372 ISDOpcode = ISD::SRA; 373 } 374 375 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, 376 Op0IsKill, Imm, VT.getSimpleVT()); 377 if (ResultReg == 0) return false; 378 379 // We successfully emitted code for the given LLVM Instruction. 380 UpdateValueMap(I, ResultReg); 381 return true; 382 } 383 384 // Check if the second operand is a constant float. 385 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) { 386 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(), 387 ISDOpcode, Op0, Op0IsKill, CF); 388 if (ResultReg != 0) { 389 // We successfully emitted code for the given LLVM Instruction. 390 UpdateValueMap(I, ResultReg); 391 return true; 392 } 393 } 394 395 unsigned Op1 = getRegForValue(I->getOperand(1)); 396 if (Op1 == 0) 397 // Unhandled operand. Halt "fast" selection and bail. 398 return false; 399 400 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 401 402 // Now we have both operands in registers. Emit the instruction. 403 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 404 ISDOpcode, 405 Op0, Op0IsKill, 406 Op1, Op1IsKill); 407 if (ResultReg == 0) 408 // Target-specific code wasn't able to find a machine opcode for 409 // the given ISD opcode and type. Halt "fast" selection and bail. 410 return false; 411 412 // We successfully emitted code for the given LLVM Instruction. 413 UpdateValueMap(I, ResultReg); 414 return true; 415 } 416 417 bool FastISel::SelectGetElementPtr(const User *I) { 418 unsigned N = getRegForValue(I->getOperand(0)); 419 if (N == 0) 420 // Unhandled operand. Halt "fast" selection and bail. 421 return false; 422 423 bool NIsKill = hasTrivialKill(I->getOperand(0)); 424 425 Type *Ty = I->getOperand(0)->getType(); 426 MVT VT = TLI.getPointerTy(); 427 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1, 428 E = I->op_end(); OI != E; ++OI) { 429 const Value *Idx = *OI; 430 if (StructType *StTy = dyn_cast<StructType>(Ty)) { 431 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 432 if (Field) { 433 // N = N + Offset 434 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field); 435 // FIXME: This can be optimized by combining the add with a 436 // subsequent one. 437 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT); 438 if (N == 0) 439 // Unhandled operand. Halt "fast" selection and bail. 440 return false; 441 NIsKill = true; 442 } 443 Ty = StTy->getElementType(Field); 444 } else { 445 Ty = cast<SequentialType>(Ty)->getElementType(); 446 447 // If this is a constant subscript, handle it quickly. 448 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 449 if (CI->isZero()) continue; 450 uint64_t Offs = 451 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 452 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT); 453 if (N == 0) 454 // Unhandled operand. Halt "fast" selection and bail. 455 return false; 456 NIsKill = true; 457 continue; 458 } 459 460 // N = N + Idx * ElementSize; 461 uint64_t ElementSize = TD.getTypeAllocSize(Ty); 462 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx); 463 unsigned IdxN = Pair.first; 464 bool IdxNIsKill = Pair.second; 465 if (IdxN == 0) 466 // Unhandled operand. Halt "fast" selection and bail. 467 return false; 468 469 if (ElementSize != 1) { 470 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); 471 if (IdxN == 0) 472 // Unhandled operand. Halt "fast" selection and bail. 473 return false; 474 IdxNIsKill = true; 475 } 476 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); 477 if (N == 0) 478 // Unhandled operand. Halt "fast" selection and bail. 479 return false; 480 } 481 } 482 483 // We successfully emitted code for the given LLVM Instruction. 484 UpdateValueMap(I, N); 485 return true; 486 } 487 488 bool FastISel::SelectCall(const User *I) { 489 const CallInst *Call = cast<CallInst>(I); 490 491 // Handle simple inline asms. 492 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getArgOperand(0))) { 493 // Don't attempt to handle constraints. 494 if (!IA->getConstraintString().empty()) 495 return false; 496 497 unsigned ExtraInfo = 0; 498 if (IA->hasSideEffects()) 499 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 500 if (IA->isAlignStack()) 501 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 502 503 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 504 TII.get(TargetOpcode::INLINEASM)) 505 .addExternalSymbol(IA->getAsmString().c_str()) 506 .addImm(ExtraInfo); 507 return true; 508 } 509 510 const Function *F = Call->getCalledFunction(); 511 if (!F) return false; 512 513 // Handle selected intrinsic function calls. 514 switch (F->getIntrinsicID()) { 515 default: break; 516 case Intrinsic::dbg_declare: { 517 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call); 518 if (!DIVariable(DI->getVariable()).Verify() || 519 !FuncInfo.MF->getMMI().hasDebugInfo()) 520 return true; 521 522 const Value *Address = DI->getAddress(); 523 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address)) 524 return true; 525 526 unsigned Reg = 0; 527 unsigned Offset = 0; 528 if (const Argument *Arg = dyn_cast<Argument>(Address)) { 529 if (Arg->hasByValAttr()) { 530 // Byval arguments' frame index is recorded during argument lowering. 531 // Use this info directly. 532 Offset = FuncInfo.getByValArgumentFrameIndex(Arg); 533 if (Offset) 534 Reg = TRI.getFrameRegister(*FuncInfo.MF); 535 } 536 } 537 if (!Reg) 538 Reg = getRegForValue(Address); 539 540 if (Reg) 541 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 542 TII.get(TargetOpcode::DBG_VALUE)) 543 .addReg(Reg, RegState::Debug).addImm(Offset) 544 .addMetadata(DI->getVariable()); 545 return true; 546 } 547 case Intrinsic::dbg_value: { 548 // This form of DBG_VALUE is target-independent. 549 const DbgValueInst *DI = cast<DbgValueInst>(Call); 550 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 551 const Value *V = DI->getValue(); 552 if (!V) { 553 // Currently the optimizer can produce this; insert an undef to 554 // help debugging. Probably the optimizer should not do this. 555 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 556 .addReg(0U).addImm(DI->getOffset()) 557 .addMetadata(DI->getVariable()); 558 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 559 if (CI->getBitWidth() > 64) 560 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 561 .addCImm(CI).addImm(DI->getOffset()) 562 .addMetadata(DI->getVariable()); 563 else 564 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 565 .addImm(CI->getZExtValue()).addImm(DI->getOffset()) 566 .addMetadata(DI->getVariable()); 567 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { 568 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 569 .addFPImm(CF).addImm(DI->getOffset()) 570 .addMetadata(DI->getVariable()); 571 } else if (unsigned Reg = lookUpRegForValue(V)) { 572 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 573 .addReg(Reg, RegState::Debug).addImm(DI->getOffset()) 574 .addMetadata(DI->getVariable()); 575 } else { 576 // We can't yet handle anything else here because it would require 577 // generating code, thus altering codegen because of debug info. 578 DEBUG(dbgs() << "Dropping debug info for " << DI); 579 } 580 return true; 581 } 582 case Intrinsic::eh_exception: { 583 EVT VT = TLI.getValueType(Call->getType()); 584 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand) 585 break; 586 587 assert(FuncInfo.MBB->isLandingPad() && 588 "Call to eh.exception not in landing pad!"); 589 unsigned Reg = TLI.getExceptionAddressRegister(); 590 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 591 unsigned ResultReg = createResultReg(RC); 592 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 593 ResultReg).addReg(Reg); 594 UpdateValueMap(Call, ResultReg); 595 return true; 596 } 597 case Intrinsic::eh_selector: { 598 EVT VT = TLI.getValueType(Call->getType()); 599 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand) 600 break; 601 if (FuncInfo.MBB->isLandingPad()) 602 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB); 603 else { 604 #ifndef NDEBUG 605 FuncInfo.CatchInfoLost.insert(Call); 606 #endif 607 // FIXME: Mark exception selector register as live in. Hack for PR1508. 608 unsigned Reg = TLI.getExceptionSelectorRegister(); 609 if (Reg) FuncInfo.MBB->addLiveIn(Reg); 610 } 611 612 unsigned Reg = TLI.getExceptionSelectorRegister(); 613 EVT SrcVT = TLI.getPointerTy(); 614 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); 615 unsigned ResultReg = createResultReg(RC); 616 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 617 ResultReg).addReg(Reg); 618 619 bool ResultRegIsKill = hasTrivialKill(Call); 620 621 // Cast the register to the type of the selector. 622 if (SrcVT.bitsGT(MVT::i32)) 623 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE, 624 ResultReg, ResultRegIsKill); 625 else if (SrcVT.bitsLT(MVT::i32)) 626 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, 627 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill); 628 if (ResultReg == 0) 629 // Unhandled operand. Halt "fast" selection and bail. 630 return false; 631 632 UpdateValueMap(Call, ResultReg); 633 634 return true; 635 } 636 case Intrinsic::objectsize: { 637 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1)); 638 unsigned long long Res = CI->isZero() ? -1ULL : 0; 639 Constant *ResCI = ConstantInt::get(Call->getType(), Res); 640 unsigned ResultReg = getRegForValue(ResCI); 641 if (ResultReg == 0) 642 return false; 643 UpdateValueMap(Call, ResultReg); 644 return true; 645 } 646 } 647 648 // An arbitrary call. Bail. 649 return false; 650 } 651 652 bool FastISel::SelectCast(const User *I, unsigned Opcode) { 653 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 654 EVT DstVT = TLI.getValueType(I->getType()); 655 656 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 657 DstVT == MVT::Other || !DstVT.isSimple()) 658 // Unhandled type. Halt "fast" selection and bail. 659 return false; 660 661 // Check if the destination type is legal. 662 if (!TLI.isTypeLegal(DstVT)) 663 return false; 664 665 // Check if the source operand is legal. 666 if (!TLI.isTypeLegal(SrcVT)) 667 return false; 668 669 unsigned InputReg = getRegForValue(I->getOperand(0)); 670 if (!InputReg) 671 // Unhandled operand. Halt "fast" selection and bail. 672 return false; 673 674 bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); 675 676 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(), 677 DstVT.getSimpleVT(), 678 Opcode, 679 InputReg, InputRegIsKill); 680 if (!ResultReg) 681 return false; 682 683 UpdateValueMap(I, ResultReg); 684 return true; 685 } 686 687 bool FastISel::SelectBitCast(const User *I) { 688 // If the bitcast doesn't change the type, just use the operand value. 689 if (I->getType() == I->getOperand(0)->getType()) { 690 unsigned Reg = getRegForValue(I->getOperand(0)); 691 if (Reg == 0) 692 return false; 693 UpdateValueMap(I, Reg); 694 return true; 695 } 696 697 // Bitcasts of other values become reg-reg copies or BITCAST operators. 698 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 699 EVT DstVT = TLI.getValueType(I->getType()); 700 701 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 702 DstVT == MVT::Other || !DstVT.isSimple() || 703 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) 704 // Unhandled type. Halt "fast" selection and bail. 705 return false; 706 707 unsigned Op0 = getRegForValue(I->getOperand(0)); 708 if (Op0 == 0) 709 // Unhandled operand. Halt "fast" selection and bail. 710 return false; 711 712 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 713 714 // First, try to perform the bitcast by inserting a reg-reg copy. 715 unsigned ResultReg = 0; 716 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) { 717 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT); 718 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); 719 // Don't attempt a cross-class copy. It will likely fail. 720 if (SrcClass == DstClass) { 721 ResultReg = createResultReg(DstClass); 722 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 723 ResultReg).addReg(Op0); 724 } 725 } 726 727 // If the reg-reg copy failed, select a BITCAST opcode. 728 if (!ResultReg) 729 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 730 ISD::BITCAST, Op0, Op0IsKill); 731 732 if (!ResultReg) 733 return false; 734 735 UpdateValueMap(I, ResultReg); 736 return true; 737 } 738 739 bool 740 FastISel::SelectInstruction(const Instruction *I) { 741 // Just before the terminator instruction, insert instructions to 742 // feed PHI nodes in successor blocks. 743 if (isa<TerminatorInst>(I)) 744 if (!HandlePHINodesInSuccessorBlocks(I->getParent())) 745 return false; 746 747 DL = I->getDebugLoc(); 748 749 // First, try doing target-independent selection. 750 if (SelectOperator(I, I->getOpcode())) { 751 DL = DebugLoc(); 752 return true; 753 } 754 755 // Next, try calling the target to attempt to handle the instruction. 756 if (TargetSelectInstruction(I)) { 757 DL = DebugLoc(); 758 return true; 759 } 760 761 DL = DebugLoc(); 762 return false; 763 } 764 765 /// FastEmitBranch - Emit an unconditional branch to the given block, 766 /// unless it is the immediate (fall-through) successor, and update 767 /// the CFG. 768 void 769 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) { 770 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 771 // The unconditional fall-through case, which needs no instructions. 772 } else { 773 // The unconditional branch case. 774 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL, 775 SmallVector<MachineOperand, 0>(), DL); 776 } 777 FuncInfo.MBB->addSuccessor(MSucc); 778 } 779 780 /// SelectFNeg - Emit an FNeg operation. 781 /// 782 bool 783 FastISel::SelectFNeg(const User *I) { 784 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I)); 785 if (OpReg == 0) return false; 786 787 bool OpRegIsKill = hasTrivialKill(I); 788 789 // If the target has ISD::FNEG, use it. 790 EVT VT = TLI.getValueType(I->getType()); 791 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), 792 ISD::FNEG, OpReg, OpRegIsKill); 793 if (ResultReg != 0) { 794 UpdateValueMap(I, ResultReg); 795 return true; 796 } 797 798 // Bitcast the value to integer, twiddle the sign bit with xor, 799 // and then bitcast it back to floating-point. 800 if (VT.getSizeInBits() > 64) return false; 801 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 802 if (!TLI.isTypeLegal(IntVT)) 803 return false; 804 805 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 806 ISD::BITCAST, OpReg, OpRegIsKill); 807 if (IntReg == 0) 808 return false; 809 810 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, 811 IntReg, /*Kill=*/true, 812 UINT64_C(1) << (VT.getSizeInBits()-1), 813 IntVT.getSimpleVT()); 814 if (IntResultReg == 0) 815 return false; 816 817 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), 818 ISD::BITCAST, IntResultReg, /*Kill=*/true); 819 if (ResultReg == 0) 820 return false; 821 822 UpdateValueMap(I, ResultReg); 823 return true; 824 } 825 826 bool 827 FastISel::SelectExtractValue(const User *U) { 828 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 829 if (!EVI) 830 return false; 831 832 // Make sure we only try to handle extracts with a legal result. But also 833 // allow i1 because it's easy. 834 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true); 835 if (!RealVT.isSimple()) 836 return false; 837 MVT VT = RealVT.getSimpleVT(); 838 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 839 return false; 840 841 const Value *Op0 = EVI->getOperand(0); 842 Type *AggTy = Op0->getType(); 843 844 // Get the base result register. 845 unsigned ResultReg; 846 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0); 847 if (I != FuncInfo.ValueMap.end()) 848 ResultReg = I->second; 849 else if (isa<Instruction>(Op0)) 850 ResultReg = FuncInfo.InitializeRegForValue(Op0); 851 else 852 return false; // fast-isel can't handle aggregate constants at the moment 853 854 // Get the actual result register, which is an offset from the base register. 855 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 856 857 SmallVector<EVT, 4> AggValueVTs; 858 ComputeValueVTs(TLI, AggTy, AggValueVTs); 859 860 for (unsigned i = 0; i < VTIndex; i++) 861 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 862 863 UpdateValueMap(EVI, ResultReg); 864 return true; 865 } 866 867 bool 868 FastISel::SelectOperator(const User *I, unsigned Opcode) { 869 switch (Opcode) { 870 case Instruction::Add: 871 return SelectBinaryOp(I, ISD::ADD); 872 case Instruction::FAdd: 873 return SelectBinaryOp(I, ISD::FADD); 874 case Instruction::Sub: 875 return SelectBinaryOp(I, ISD::SUB); 876 case Instruction::FSub: 877 // FNeg is currently represented in LLVM IR as a special case of FSub. 878 if (BinaryOperator::isFNeg(I)) 879 return SelectFNeg(I); 880 return SelectBinaryOp(I, ISD::FSUB); 881 case Instruction::Mul: 882 return SelectBinaryOp(I, ISD::MUL); 883 case Instruction::FMul: 884 return SelectBinaryOp(I, ISD::FMUL); 885 case Instruction::SDiv: 886 return SelectBinaryOp(I, ISD::SDIV); 887 case Instruction::UDiv: 888 return SelectBinaryOp(I, ISD::UDIV); 889 case Instruction::FDiv: 890 return SelectBinaryOp(I, ISD::FDIV); 891 case Instruction::SRem: 892 return SelectBinaryOp(I, ISD::SREM); 893 case Instruction::URem: 894 return SelectBinaryOp(I, ISD::UREM); 895 case Instruction::FRem: 896 return SelectBinaryOp(I, ISD::FREM); 897 case Instruction::Shl: 898 return SelectBinaryOp(I, ISD::SHL); 899 case Instruction::LShr: 900 return SelectBinaryOp(I, ISD::SRL); 901 case Instruction::AShr: 902 return SelectBinaryOp(I, ISD::SRA); 903 case Instruction::And: 904 return SelectBinaryOp(I, ISD::AND); 905 case Instruction::Or: 906 return SelectBinaryOp(I, ISD::OR); 907 case Instruction::Xor: 908 return SelectBinaryOp(I, ISD::XOR); 909 910 case Instruction::GetElementPtr: 911 return SelectGetElementPtr(I); 912 913 case Instruction::Br: { 914 const BranchInst *BI = cast<BranchInst>(I); 915 916 if (BI->isUnconditional()) { 917 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 918 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 919 FastEmitBranch(MSucc, BI->getDebugLoc()); 920 return true; 921 } 922 923 // Conditional branches are not handed yet. 924 // Halt "fast" selection and bail. 925 return false; 926 } 927 928 case Instruction::Unreachable: 929 // Nothing to emit. 930 return true; 931 932 case Instruction::Alloca: 933 // FunctionLowering has the static-sized case covered. 934 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 935 return true; 936 937 // Dynamic-sized alloca is not handled yet. 938 return false; 939 940 case Instruction::Call: 941 return SelectCall(I); 942 943 case Instruction::BitCast: 944 return SelectBitCast(I); 945 946 case Instruction::FPToSI: 947 return SelectCast(I, ISD::FP_TO_SINT); 948 case Instruction::ZExt: 949 return SelectCast(I, ISD::ZERO_EXTEND); 950 case Instruction::SExt: 951 return SelectCast(I, ISD::SIGN_EXTEND); 952 case Instruction::Trunc: 953 return SelectCast(I, ISD::TRUNCATE); 954 case Instruction::SIToFP: 955 return SelectCast(I, ISD::SINT_TO_FP); 956 957 case Instruction::IntToPtr: // Deliberate fall-through. 958 case Instruction::PtrToInt: { 959 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 960 EVT DstVT = TLI.getValueType(I->getType()); 961 if (DstVT.bitsGT(SrcVT)) 962 return SelectCast(I, ISD::ZERO_EXTEND); 963 if (DstVT.bitsLT(SrcVT)) 964 return SelectCast(I, ISD::TRUNCATE); 965 unsigned Reg = getRegForValue(I->getOperand(0)); 966 if (Reg == 0) return false; 967 UpdateValueMap(I, Reg); 968 return true; 969 } 970 971 case Instruction::ExtractValue: 972 return SelectExtractValue(I); 973 974 case Instruction::PHI: 975 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 976 977 default: 978 // Unhandled instruction. Halt "fast" selection and bail. 979 return false; 980 } 981 } 982 983 FastISel::FastISel(FunctionLoweringInfo &funcInfo) 984 : FuncInfo(funcInfo), 985 MRI(FuncInfo.MF->getRegInfo()), 986 MFI(*FuncInfo.MF->getFrameInfo()), 987 MCP(*FuncInfo.MF->getConstantPool()), 988 TM(FuncInfo.MF->getTarget()), 989 TD(*TM.getTargetData()), 990 TII(*TM.getInstrInfo()), 991 TLI(*TM.getTargetLowering()), 992 TRI(*TM.getRegisterInfo()) { 993 } 994 995 FastISel::~FastISel() {} 996 997 unsigned FastISel::FastEmit_(MVT, MVT, 998 unsigned) { 999 return 0; 1000 } 1001 1002 unsigned FastISel::FastEmit_r(MVT, MVT, 1003 unsigned, 1004 unsigned /*Op0*/, bool /*Op0IsKill*/) { 1005 return 0; 1006 } 1007 1008 unsigned FastISel::FastEmit_rr(MVT, MVT, 1009 unsigned, 1010 unsigned /*Op0*/, bool /*Op0IsKill*/, 1011 unsigned /*Op1*/, bool /*Op1IsKill*/) { 1012 return 0; 1013 } 1014 1015 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1016 return 0; 1017 } 1018 1019 unsigned FastISel::FastEmit_f(MVT, MVT, 1020 unsigned, const ConstantFP * /*FPImm*/) { 1021 return 0; 1022 } 1023 1024 unsigned FastISel::FastEmit_ri(MVT, MVT, 1025 unsigned, 1026 unsigned /*Op0*/, bool /*Op0IsKill*/, 1027 uint64_t /*Imm*/) { 1028 return 0; 1029 } 1030 1031 unsigned FastISel::FastEmit_rf(MVT, MVT, 1032 unsigned, 1033 unsigned /*Op0*/, bool /*Op0IsKill*/, 1034 const ConstantFP * /*FPImm*/) { 1035 return 0; 1036 } 1037 1038 unsigned FastISel::FastEmit_rri(MVT, MVT, 1039 unsigned, 1040 unsigned /*Op0*/, bool /*Op0IsKill*/, 1041 unsigned /*Op1*/, bool /*Op1IsKill*/, 1042 uint64_t /*Imm*/) { 1043 return 0; 1044 } 1045 1046 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries 1047 /// to emit an instruction with an immediate operand using FastEmit_ri. 1048 /// If that fails, it materializes the immediate into a register and try 1049 /// FastEmit_rr instead. 1050 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, 1051 unsigned Op0, bool Op0IsKill, 1052 uint64_t Imm, MVT ImmType) { 1053 // If this is a multiply by a power of two, emit this as a shift left. 1054 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1055 Opcode = ISD::SHL; 1056 Imm = Log2_64(Imm); 1057 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1058 // div x, 8 -> srl x, 3 1059 Opcode = ISD::SRL; 1060 Imm = Log2_64(Imm); 1061 } 1062 1063 // Horrible hack (to be removed), check to make sure shift amounts are 1064 // in-range. 1065 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1066 Imm >= VT.getSizeInBits()) 1067 return 0; 1068 1069 // First check if immediate type is legal. If not, we can't use the ri form. 1070 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); 1071 if (ResultReg != 0) 1072 return ResultReg; 1073 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1074 if (MaterialReg == 0) { 1075 // This is a bit ugly/slow, but failing here means falling out of 1076 // fast-isel, which would be very slow. 1077 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(), 1078 VT.getSizeInBits()); 1079 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1080 } 1081 return FastEmit_rr(VT, VT, Opcode, 1082 Op0, Op0IsKill, 1083 MaterialReg, /*Kill=*/true); 1084 } 1085 1086 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) { 1087 return MRI.createVirtualRegister(RC); 1088 } 1089 1090 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, 1091 const TargetRegisterClass* RC) { 1092 unsigned ResultReg = createResultReg(RC); 1093 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1094 1095 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg); 1096 return ResultReg; 1097 } 1098 1099 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, 1100 const TargetRegisterClass *RC, 1101 unsigned Op0, bool Op0IsKill) { 1102 unsigned ResultReg = createResultReg(RC); 1103 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1104 1105 if (II.getNumDefs() >= 1) 1106 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1107 .addReg(Op0, Op0IsKill * RegState::Kill); 1108 else { 1109 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1110 .addReg(Op0, Op0IsKill * RegState::Kill); 1111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1112 ResultReg).addReg(II.ImplicitDefs[0]); 1113 } 1114 1115 return ResultReg; 1116 } 1117 1118 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 1119 const TargetRegisterClass *RC, 1120 unsigned Op0, bool Op0IsKill, 1121 unsigned Op1, bool Op1IsKill) { 1122 unsigned ResultReg = createResultReg(RC); 1123 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1124 1125 if (II.getNumDefs() >= 1) 1126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1127 .addReg(Op0, Op0IsKill * RegState::Kill) 1128 .addReg(Op1, Op1IsKill * RegState::Kill); 1129 else { 1130 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1131 .addReg(Op0, Op0IsKill * RegState::Kill) 1132 .addReg(Op1, Op1IsKill * RegState::Kill); 1133 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1134 ResultReg).addReg(II.ImplicitDefs[0]); 1135 } 1136 return ResultReg; 1137 } 1138 1139 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 1140 const TargetRegisterClass *RC, 1141 unsigned Op0, bool Op0IsKill, 1142 unsigned Op1, bool Op1IsKill, 1143 unsigned Op2, bool Op2IsKill) { 1144 unsigned ResultReg = createResultReg(RC); 1145 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1146 1147 if (II.getNumDefs() >= 1) 1148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1149 .addReg(Op0, Op0IsKill * RegState::Kill) 1150 .addReg(Op1, Op1IsKill * RegState::Kill) 1151 .addReg(Op2, Op2IsKill * RegState::Kill); 1152 else { 1153 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1154 .addReg(Op0, Op0IsKill * RegState::Kill) 1155 .addReg(Op1, Op1IsKill * RegState::Kill) 1156 .addReg(Op2, Op2IsKill * RegState::Kill); 1157 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1158 ResultReg).addReg(II.ImplicitDefs[0]); 1159 } 1160 return ResultReg; 1161 } 1162 1163 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 1164 const TargetRegisterClass *RC, 1165 unsigned Op0, bool Op0IsKill, 1166 uint64_t Imm) { 1167 unsigned ResultReg = createResultReg(RC); 1168 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1169 1170 if (II.getNumDefs() >= 1) 1171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1172 .addReg(Op0, Op0IsKill * RegState::Kill) 1173 .addImm(Imm); 1174 else { 1175 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1176 .addReg(Op0, Op0IsKill * RegState::Kill) 1177 .addImm(Imm); 1178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1179 ResultReg).addReg(II.ImplicitDefs[0]); 1180 } 1181 return ResultReg; 1182 } 1183 1184 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode, 1185 const TargetRegisterClass *RC, 1186 unsigned Op0, bool Op0IsKill, 1187 uint64_t Imm1, uint64_t Imm2) { 1188 unsigned ResultReg = createResultReg(RC); 1189 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1190 1191 if (II.getNumDefs() >= 1) 1192 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1193 .addReg(Op0, Op0IsKill * RegState::Kill) 1194 .addImm(Imm1) 1195 .addImm(Imm2); 1196 else { 1197 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1198 .addReg(Op0, Op0IsKill * RegState::Kill) 1199 .addImm(Imm1) 1200 .addImm(Imm2); 1201 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1202 ResultReg).addReg(II.ImplicitDefs[0]); 1203 } 1204 return ResultReg; 1205 } 1206 1207 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 1208 const TargetRegisterClass *RC, 1209 unsigned Op0, bool Op0IsKill, 1210 const ConstantFP *FPImm) { 1211 unsigned ResultReg = createResultReg(RC); 1212 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1213 1214 if (II.getNumDefs() >= 1) 1215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1216 .addReg(Op0, Op0IsKill * RegState::Kill) 1217 .addFPImm(FPImm); 1218 else { 1219 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1220 .addReg(Op0, Op0IsKill * RegState::Kill) 1221 .addFPImm(FPImm); 1222 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1223 ResultReg).addReg(II.ImplicitDefs[0]); 1224 } 1225 return ResultReg; 1226 } 1227 1228 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 1229 const TargetRegisterClass *RC, 1230 unsigned Op0, bool Op0IsKill, 1231 unsigned Op1, bool Op1IsKill, 1232 uint64_t Imm) { 1233 unsigned ResultReg = createResultReg(RC); 1234 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1235 1236 if (II.getNumDefs() >= 1) 1237 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1238 .addReg(Op0, Op0IsKill * RegState::Kill) 1239 .addReg(Op1, Op1IsKill * RegState::Kill) 1240 .addImm(Imm); 1241 else { 1242 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1243 .addReg(Op0, Op0IsKill * RegState::Kill) 1244 .addReg(Op1, Op1IsKill * RegState::Kill) 1245 .addImm(Imm); 1246 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1247 ResultReg).addReg(II.ImplicitDefs[0]); 1248 } 1249 return ResultReg; 1250 } 1251 1252 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, 1253 const TargetRegisterClass *RC, 1254 uint64_t Imm) { 1255 unsigned ResultReg = createResultReg(RC); 1256 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1257 1258 if (II.getNumDefs() >= 1) 1259 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm); 1260 else { 1261 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm); 1262 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1263 ResultReg).addReg(II.ImplicitDefs[0]); 1264 } 1265 return ResultReg; 1266 } 1267 1268 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 1269 const TargetRegisterClass *RC, 1270 uint64_t Imm1, uint64_t Imm2) { 1271 unsigned ResultReg = createResultReg(RC); 1272 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1273 1274 if (II.getNumDefs() >= 1) 1275 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 1276 .addImm(Imm1).addImm(Imm2); 1277 else { 1278 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2); 1279 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1280 ResultReg).addReg(II.ImplicitDefs[0]); 1281 } 1282 return ResultReg; 1283 } 1284 1285 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, 1286 unsigned Op0, bool Op0IsKill, 1287 uint32_t Idx) { 1288 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 1289 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 1290 "Cannot yet extract from physregs"); 1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 1292 DL, TII.get(TargetOpcode::COPY), ResultReg) 1293 .addReg(Op0, getKillRegState(Op0IsKill), Idx); 1294 return ResultReg; 1295 } 1296 1297 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op 1298 /// with all but the least significant bit set to zero. 1299 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { 1300 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); 1301 } 1302 1303 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 1304 /// Emit code to ensure constants are copied into registers when needed. 1305 /// Remember the virtual registers that need to be added to the Machine PHI 1306 /// nodes as input. We cannot just directly add them, because expansion 1307 /// might result in multiple MBB's for one BB. As such, the start of the 1308 /// BB might correspond to a different MBB than the end. 1309 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 1310 const TerminatorInst *TI = LLVMBB->getTerminator(); 1311 1312 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 1313 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 1314 1315 // Check successor nodes' PHI nodes that expect a constant to be available 1316 // from this block. 1317 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 1318 const BasicBlock *SuccBB = TI->getSuccessor(succ); 1319 if (!isa<PHINode>(SuccBB->begin())) continue; 1320 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 1321 1322 // If this terminator has multiple identical successors (common for 1323 // switches), only handle each succ once. 1324 if (!SuccsHandled.insert(SuccMBB)) continue; 1325 1326 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 1327 1328 // At this point we know that there is a 1-1 correspondence between LLVM PHI 1329 // nodes and Machine PHI nodes, but the incoming operands have not been 1330 // emitted yet. 1331 for (BasicBlock::const_iterator I = SuccBB->begin(); 1332 const PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1333 1334 // Ignore dead phi's. 1335 if (PN->use_empty()) continue; 1336 1337 // Only handle legal types. Two interesting things to note here. First, 1338 // by bailing out early, we may leave behind some dead instructions, 1339 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 1340 // own moves. Second, this check is necessary because FastISel doesn't 1341 // use CreateRegs to create registers, so it always creates 1342 // exactly one register for each non-void instruction. 1343 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true); 1344 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 1345 // Promote MVT::i1. 1346 if (VT == MVT::i1) 1347 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT); 1348 else { 1349 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate); 1350 return false; 1351 } 1352 } 1353 1354 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 1355 1356 // Set the DebugLoc for the copy. Prefer the location of the operand 1357 // if there is one; use the location of the PHI otherwise. 1358 DL = PN->getDebugLoc(); 1359 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp)) 1360 DL = Inst->getDebugLoc(); 1361 1362 unsigned Reg = getRegForValue(PHIOp); 1363 if (Reg == 0) { 1364 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate); 1365 return false; 1366 } 1367 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg)); 1368 DL = DebugLoc(); 1369 } 1370 } 1371 1372 return true; 1373 } 1374