1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseInstrInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMTargetMachine.h" 20 #include "ARMSubtarget.h" 21 #include "ARMConstantPoolValue.h" 22 #include "MCTargetDesc/ARMAddressingModes.h" 23 #include "llvm/CallingConv.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/GlobalVariable.h" 26 #include "llvm/Instructions.h" 27 #include "llvm/IntrinsicInst.h" 28 #include "llvm/Module.h" 29 #include "llvm/Operator.h" 30 #include "llvm/CodeGen/Analysis.h" 31 #include "llvm/CodeGen/FastISel.h" 32 #include "llvm/CodeGen/FunctionLoweringInfo.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineModuleInfo.h" 35 #include "llvm/CodeGen/MachineConstantPool.h" 36 #include "llvm/CodeGen/MachineFrameInfo.h" 37 #include "llvm/CodeGen/MachineMemOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/Support/CallSite.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/GetElementPtrTypeIterator.h" 43 #include "llvm/Target/TargetData.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetOptions.h" 48 using namespace llvm; 49 50 static cl::opt<bool> 51 DisableARMFastISel("disable-arm-fast-isel", 52 cl::desc("Turn off experimental ARM fast-isel support"), 53 cl::init(false), cl::Hidden); 54 55 extern cl::opt<bool> EnableARMLongCalls; 56 57 namespace { 58 59 // All possible address modes, plus some. 60 typedef struct Address { 61 enum { 62 RegBase, 63 FrameIndexBase 64 } BaseType; 65 66 union { 67 unsigned Reg; 68 int FI; 69 } Base; 70 71 int Offset; 72 73 // Innocuous defaults for our address. 74 Address() 75 : BaseType(RegBase), Offset(0) { 76 Base.Reg = 0; 77 } 78 } Address; 79 80 class ARMFastISel : public FastISel { 81 82 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 83 /// make the right decision when generating code for different targets. 84 const ARMSubtarget *Subtarget; 85 const TargetMachine &TM; 86 const TargetInstrInfo &TII; 87 const TargetLowering &TLI; 88 ARMFunctionInfo *AFI; 89 90 // Convenience variables to avoid some queries. 91 bool isThumb2; 92 LLVMContext *Context; 93 94 public: 95 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 96 : FastISel(funcInfo), 97 TM(funcInfo.MF->getTarget()), 98 TII(*TM.getInstrInfo()), 99 TLI(*TM.getTargetLowering()) { 100 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 101 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 102 isThumb2 = AFI->isThumbFunction(); 103 Context = &funcInfo.Fn->getContext(); 104 } 105 106 // Code from FastISel.cpp. 107 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC); 109 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC, 111 unsigned Op0, bool Op0IsKill); 112 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 113 const TargetRegisterClass *RC, 114 unsigned Op0, bool Op0IsKill, 115 unsigned Op1, bool Op1IsKill); 116 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 unsigned Op1, bool Op1IsKill, 120 unsigned Op2, bool Op2IsKill); 121 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 122 const TargetRegisterClass *RC, 123 unsigned Op0, bool Op0IsKill, 124 uint64_t Imm); 125 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 126 const TargetRegisterClass *RC, 127 unsigned Op0, bool Op0IsKill, 128 const ConstantFP *FPImm); 129 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 unsigned Op0, bool Op0IsKill, 132 unsigned Op1, bool Op1IsKill, 133 uint64_t Imm); 134 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 135 const TargetRegisterClass *RC, 136 uint64_t Imm); 137 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 138 const TargetRegisterClass *RC, 139 uint64_t Imm1, uint64_t Imm2); 140 141 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 142 unsigned Op0, bool Op0IsKill, 143 uint32_t Idx); 144 145 // Backend specific FastISel code. 146 virtual bool TargetSelectInstruction(const Instruction *I); 147 virtual unsigned TargetMaterializeConstant(const Constant *C); 148 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 149 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 150 const LoadInst *LI); 151 152 #include "ARMGenFastISel.inc" 153 154 // Instruction selection routines. 155 private: 156 bool SelectLoad(const Instruction *I); 157 bool SelectStore(const Instruction *I); 158 bool SelectBranch(const Instruction *I); 159 bool SelectIndirectBr(const Instruction *I); 160 bool SelectCmp(const Instruction *I); 161 bool SelectFPExt(const Instruction *I); 162 bool SelectFPTrunc(const Instruction *I); 163 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 164 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 165 bool SelectIToFP(const Instruction *I, bool isSigned); 166 bool SelectFPToI(const Instruction *I, bool isSigned); 167 bool SelectDiv(const Instruction *I, bool isSigned); 168 bool SelectRem(const Instruction *I, bool isSigned); 169 bool SelectCall(const Instruction *I, const char *IntrMemName); 170 bool SelectIntrinsicCall(const IntrinsicInst &I); 171 bool SelectSelect(const Instruction *I); 172 bool SelectRet(const Instruction *I); 173 bool SelectTrunc(const Instruction *I); 174 bool SelectIntExt(const Instruction *I); 175 176 // Utility routines. 177 private: 178 bool isTypeLegal(Type *Ty, MVT &VT); 179 bool isLoadTypeLegal(Type *Ty, MVT &VT); 180 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 181 bool isZExt); 182 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 183 unsigned Alignment = 0, bool isZExt = true, 184 bool allocReg = true); 185 186 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 187 unsigned Alignment = 0); 188 bool ARMComputeAddress(const Value *Obj, Address &Addr); 189 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 190 bool ARMIsMemCpySmall(uint64_t Len); 191 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 192 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 193 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 194 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 195 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 196 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 197 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 198 unsigned ARMSelectCallOp(const GlobalValue *GV); 199 200 // Call handling routines. 201 private: 202 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 203 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 204 SmallVectorImpl<unsigned> &ArgRegs, 205 SmallVectorImpl<MVT> &ArgVTs, 206 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 207 SmallVectorImpl<unsigned> &RegArgs, 208 CallingConv::ID CC, 209 unsigned &NumBytes); 210 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 211 const Instruction *I, CallingConv::ID CC, 212 unsigned &NumBytes); 213 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 214 215 // OptionalDef handling routines. 216 private: 217 bool isARMNEONPred(const MachineInstr *MI); 218 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 219 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 220 void AddLoadStoreOperands(EVT VT, Address &Addr, 221 const MachineInstrBuilder &MIB, 222 unsigned Flags, bool useAM3); 223 }; 224 225 } // end anonymous namespace 226 227 #include "ARMGenCallingConv.inc" 228 229 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 230 // we don't care about implicit defs here, just places we'll need to add a 231 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 232 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 233 if (!MI->hasOptionalDef()) 234 return false; 235 236 // Look to see if our OptionalDef is defining CPSR or CCR. 237 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 238 const MachineOperand &MO = MI->getOperand(i); 239 if (!MO.isReg() || !MO.isDef()) continue; 240 if (MO.getReg() == ARM::CPSR) 241 *CPSR = true; 242 } 243 return true; 244 } 245 246 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 247 const MCInstrDesc &MCID = MI->getDesc(); 248 249 // If we're a thumb2 or not NEON function we were handled via isPredicable. 250 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 251 AFI->isThumb2Function()) 252 return false; 253 254 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 255 if (MCID.OpInfo[i].isPredicate()) 256 return true; 257 258 return false; 259 } 260 261 // If the machine is predicable go ahead and add the predicate operands, if 262 // it needs default CC operands add those. 263 // TODO: If we want to support thumb1 then we'll need to deal with optional 264 // CPSR defs that need to be added before the remaining operands. See s_cc_out 265 // for descriptions why. 266 const MachineInstrBuilder & 267 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 268 MachineInstr *MI = &*MIB; 269 270 // Do we use a predicate? or... 271 // Are we NEON in ARM mode and have a predicate operand? If so, I know 272 // we're not predicable but add it anyways. 273 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 274 AddDefaultPred(MIB); 275 276 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 277 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 278 bool CPSR = false; 279 if (DefinesOptionalPredicate(MI, &CPSR)) { 280 if (CPSR) 281 AddDefaultT1CC(MIB); 282 else 283 AddDefaultCC(MIB); 284 } 285 return MIB; 286 } 287 288 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 289 const TargetRegisterClass* RC) { 290 unsigned ResultReg = createResultReg(RC); 291 const MCInstrDesc &II = TII.get(MachineInstOpcode); 292 293 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 294 return ResultReg; 295 } 296 297 unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 298 const TargetRegisterClass *RC, 299 unsigned Op0, bool Op0IsKill) { 300 unsigned ResultReg = createResultReg(RC); 301 const MCInstrDesc &II = TII.get(MachineInstOpcode); 302 303 if (II.getNumDefs() >= 1) { 304 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 305 .addReg(Op0, Op0IsKill * RegState::Kill)); 306 } else { 307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 308 .addReg(Op0, Op0IsKill * RegState::Kill)); 309 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 310 TII.get(TargetOpcode::COPY), ResultReg) 311 .addReg(II.ImplicitDefs[0])); 312 } 313 return ResultReg; 314 } 315 316 unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 317 const TargetRegisterClass *RC, 318 unsigned Op0, bool Op0IsKill, 319 unsigned Op1, bool Op1IsKill) { 320 unsigned ResultReg = createResultReg(RC); 321 const MCInstrDesc &II = TII.get(MachineInstOpcode); 322 323 if (II.getNumDefs() >= 1) { 324 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 325 .addReg(Op0, Op0IsKill * RegState::Kill) 326 .addReg(Op1, Op1IsKill * RegState::Kill)); 327 } else { 328 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 329 .addReg(Op0, Op0IsKill * RegState::Kill) 330 .addReg(Op1, Op1IsKill * RegState::Kill)); 331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 332 TII.get(TargetOpcode::COPY), ResultReg) 333 .addReg(II.ImplicitDefs[0])); 334 } 335 return ResultReg; 336 } 337 338 unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 339 const TargetRegisterClass *RC, 340 unsigned Op0, bool Op0IsKill, 341 unsigned Op1, bool Op1IsKill, 342 unsigned Op2, bool Op2IsKill) { 343 unsigned ResultReg = createResultReg(RC); 344 const MCInstrDesc &II = TII.get(MachineInstOpcode); 345 346 if (II.getNumDefs() >= 1) { 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 348 .addReg(Op0, Op0IsKill * RegState::Kill) 349 .addReg(Op1, Op1IsKill * RegState::Kill) 350 .addReg(Op2, Op2IsKill * RegState::Kill)); 351 } else { 352 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 353 .addReg(Op0, Op0IsKill * RegState::Kill) 354 .addReg(Op1, Op1IsKill * RegState::Kill) 355 .addReg(Op2, Op2IsKill * RegState::Kill)); 356 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 357 TII.get(TargetOpcode::COPY), ResultReg) 358 .addReg(II.ImplicitDefs[0])); 359 } 360 return ResultReg; 361 } 362 363 unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 364 const TargetRegisterClass *RC, 365 unsigned Op0, bool Op0IsKill, 366 uint64_t Imm) { 367 unsigned ResultReg = createResultReg(RC); 368 const MCInstrDesc &II = TII.get(MachineInstOpcode); 369 370 if (II.getNumDefs() >= 1) { 371 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 372 .addReg(Op0, Op0IsKill * RegState::Kill) 373 .addImm(Imm)); 374 } else { 375 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 376 .addReg(Op0, Op0IsKill * RegState::Kill) 377 .addImm(Imm)); 378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 379 TII.get(TargetOpcode::COPY), ResultReg) 380 .addReg(II.ImplicitDefs[0])); 381 } 382 return ResultReg; 383 } 384 385 unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 386 const TargetRegisterClass *RC, 387 unsigned Op0, bool Op0IsKill, 388 const ConstantFP *FPImm) { 389 unsigned ResultReg = createResultReg(RC); 390 const MCInstrDesc &II = TII.get(MachineInstOpcode); 391 392 if (II.getNumDefs() >= 1) { 393 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 394 .addReg(Op0, Op0IsKill * RegState::Kill) 395 .addFPImm(FPImm)); 396 } else { 397 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 398 .addReg(Op0, Op0IsKill * RegState::Kill) 399 .addFPImm(FPImm)); 400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 401 TII.get(TargetOpcode::COPY), ResultReg) 402 .addReg(II.ImplicitDefs[0])); 403 } 404 return ResultReg; 405 } 406 407 unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 408 const TargetRegisterClass *RC, 409 unsigned Op0, bool Op0IsKill, 410 unsigned Op1, bool Op1IsKill, 411 uint64_t Imm) { 412 unsigned ResultReg = createResultReg(RC); 413 const MCInstrDesc &II = TII.get(MachineInstOpcode); 414 415 if (II.getNumDefs() >= 1) { 416 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 417 .addReg(Op0, Op0IsKill * RegState::Kill) 418 .addReg(Op1, Op1IsKill * RegState::Kill) 419 .addImm(Imm)); 420 } else { 421 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 422 .addReg(Op0, Op0IsKill * RegState::Kill) 423 .addReg(Op1, Op1IsKill * RegState::Kill) 424 .addImm(Imm)); 425 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 426 TII.get(TargetOpcode::COPY), ResultReg) 427 .addReg(II.ImplicitDefs[0])); 428 } 429 return ResultReg; 430 } 431 432 unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 433 const TargetRegisterClass *RC, 434 uint64_t Imm) { 435 unsigned ResultReg = createResultReg(RC); 436 const MCInstrDesc &II = TII.get(MachineInstOpcode); 437 438 if (II.getNumDefs() >= 1) { 439 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 440 .addImm(Imm)); 441 } else { 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 443 .addImm(Imm)); 444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 445 TII.get(TargetOpcode::COPY), ResultReg) 446 .addReg(II.ImplicitDefs[0])); 447 } 448 return ResultReg; 449 } 450 451 unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 452 const TargetRegisterClass *RC, 453 uint64_t Imm1, uint64_t Imm2) { 454 unsigned ResultReg = createResultReg(RC); 455 const MCInstrDesc &II = TII.get(MachineInstOpcode); 456 457 if (II.getNumDefs() >= 1) { 458 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 459 .addImm(Imm1).addImm(Imm2)); 460 } else { 461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 462 .addImm(Imm1).addImm(Imm2)); 463 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 464 TII.get(TargetOpcode::COPY), 465 ResultReg) 466 .addReg(II.ImplicitDefs[0])); 467 } 468 return ResultReg; 469 } 470 471 unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 472 unsigned Op0, bool Op0IsKill, 473 uint32_t Idx) { 474 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 475 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 476 "Cannot yet extract from physregs"); 477 478 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 479 DL, TII.get(TargetOpcode::COPY), ResultReg) 480 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 481 return ResultReg; 482 } 483 484 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 485 // checks from the various callers. 486 unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 487 if (VT == MVT::f64) return 0; 488 489 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 490 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 491 TII.get(ARM::VMOVSR), MoveReg) 492 .addReg(SrcReg)); 493 return MoveReg; 494 } 495 496 unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 497 if (VT == MVT::i64) return 0; 498 499 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 500 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 501 TII.get(ARM::VMOVRS), MoveReg) 502 .addReg(SrcReg)); 503 return MoveReg; 504 } 505 506 // For double width floating point we need to materialize two constants 507 // (the high and the low) into integer registers then use a move to get 508 // the combined constant into an FP reg. 509 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 510 const APFloat Val = CFP->getValueAPF(); 511 bool is64bit = VT == MVT::f64; 512 513 // This checks to see if we can use VFP3 instructions to materialize 514 // a constant, otherwise we have to go through the constant pool. 515 if (TLI.isFPImmLegal(Val, VT)) { 516 int Imm; 517 unsigned Opc; 518 if (is64bit) { 519 Imm = ARM_AM::getFP64Imm(Val); 520 Opc = ARM::FCONSTD; 521 } else { 522 Imm = ARM_AM::getFP32Imm(Val); 523 Opc = ARM::FCONSTS; 524 } 525 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 526 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 527 DestReg) 528 .addImm(Imm)); 529 return DestReg; 530 } 531 532 // Require VFP2 for loading fp constants. 533 if (!Subtarget->hasVFP2()) return false; 534 535 // MachineConstantPool wants an explicit alignment. 536 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 537 if (Align == 0) { 538 // TODO: Figure out if this is correct. 539 Align = TD.getTypeAllocSize(CFP->getType()); 540 } 541 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 542 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 543 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 544 545 // The extra reg is for addrmode5. 546 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 547 DestReg) 548 .addConstantPoolIndex(Idx) 549 .addReg(0)); 550 return DestReg; 551 } 552 553 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 554 555 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 556 return false; 557 558 // If we can do this in a single instruction without a constant pool entry 559 // do so now. 560 const ConstantInt *CI = cast<ConstantInt>(C); 561 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 562 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 563 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 564 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 565 TII.get(Opc), ImmReg) 566 .addImm(CI->getZExtValue())); 567 return ImmReg; 568 } 569 570 // Use MVN to emit negative constants. 571 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 572 unsigned Imm = (unsigned)~(CI->getSExtValue()); 573 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 574 (ARM_AM::getSOImmVal(Imm) != -1); 575 if (UseImm) { 576 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 577 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 578 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 579 TII.get(Opc), ImmReg) 580 .addImm(Imm)); 581 return ImmReg; 582 } 583 } 584 585 // Load from constant pool. For now 32-bit only. 586 if (VT != MVT::i32) 587 return false; 588 589 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 590 591 // MachineConstantPool wants an explicit alignment. 592 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 593 if (Align == 0) { 594 // TODO: Figure out if this is correct. 595 Align = TD.getTypeAllocSize(C->getType()); 596 } 597 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 598 599 if (isThumb2) 600 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 601 TII.get(ARM::t2LDRpci), DestReg) 602 .addConstantPoolIndex(Idx)); 603 else 604 // The extra immediate is for addrmode2. 605 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 606 TII.get(ARM::LDRcp), DestReg) 607 .addConstantPoolIndex(Idx) 608 .addImm(0)); 609 610 return DestReg; 611 } 612 613 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 614 // For now 32-bit only. 615 if (VT != MVT::i32) return 0; 616 617 Reloc::Model RelocM = TM.getRelocationModel(); 618 619 // TODO: Need more magic for ARM PIC. 620 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 621 622 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 623 624 // Use movw+movt when possible, it avoids constant pool entries. 625 // Darwin targets don't support movt with Reloc::Static, see 626 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 627 // static movt relocations. 628 if (Subtarget->useMovt() && 629 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 630 unsigned Opc; 631 switch (RelocM) { 632 case Reloc::PIC_: 633 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 634 break; 635 case Reloc::DynamicNoPIC: 636 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 637 break; 638 default: 639 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 640 break; 641 } 642 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 643 DestReg).addGlobalAddress(GV)); 644 } else { 645 // MachineConstantPool wants an explicit alignment. 646 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 647 if (Align == 0) { 648 // TODO: Figure out if this is correct. 649 Align = TD.getTypeAllocSize(GV->getType()); 650 } 651 652 // Grab index. 653 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 654 (Subtarget->isThumb() ? 4 : 8); 655 unsigned Id = AFI->createPICLabelUId(); 656 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 657 ARMCP::CPValue, 658 PCAdj); 659 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 660 661 // Load value. 662 MachineInstrBuilder MIB; 663 if (isThumb2) { 664 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 665 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 666 .addConstantPoolIndex(Idx); 667 if (RelocM == Reloc::PIC_) 668 MIB.addImm(Id); 669 } else { 670 // The extra immediate is for addrmode2. 671 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 672 DestReg) 673 .addConstantPoolIndex(Idx) 674 .addImm(0); 675 } 676 AddOptionalDefs(MIB); 677 } 678 679 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 680 MachineInstrBuilder MIB; 681 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 682 if (isThumb2) 683 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 684 TII.get(ARM::t2LDRi12), NewDestReg) 685 .addReg(DestReg) 686 .addImm(0); 687 else 688 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 689 NewDestReg) 690 .addReg(DestReg) 691 .addImm(0); 692 DestReg = NewDestReg; 693 AddOptionalDefs(MIB); 694 } 695 696 return DestReg; 697 } 698 699 unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 700 EVT VT = TLI.getValueType(C->getType(), true); 701 702 // Only handle simple types. 703 if (!VT.isSimple()) return 0; 704 705 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 706 return ARMMaterializeFP(CFP, VT); 707 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 708 return ARMMaterializeGV(GV, VT); 709 else if (isa<ConstantInt>(C)) 710 return ARMMaterializeInt(C, VT); 711 712 return 0; 713 } 714 715 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 716 717 unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 718 // Don't handle dynamic allocas. 719 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 720 721 MVT VT; 722 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 723 724 DenseMap<const AllocaInst*, int>::iterator SI = 725 FuncInfo.StaticAllocaMap.find(AI); 726 727 // This will get lowered later into the correct offsets and registers 728 // via rewriteXFrameIndex. 729 if (SI != FuncInfo.StaticAllocaMap.end()) { 730 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 731 unsigned ResultReg = createResultReg(RC); 732 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 733 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 734 TII.get(Opc), ResultReg) 735 .addFrameIndex(SI->second) 736 .addImm(0)); 737 return ResultReg; 738 } 739 740 return 0; 741 } 742 743 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 744 EVT evt = TLI.getValueType(Ty, true); 745 746 // Only handle simple types. 747 if (evt == MVT::Other || !evt.isSimple()) return false; 748 VT = evt.getSimpleVT(); 749 750 // Handle all legal types, i.e. a register that will directly hold this 751 // value. 752 return TLI.isTypeLegal(VT); 753 } 754 755 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 756 if (isTypeLegal(Ty, VT)) return true; 757 758 // If this is a type than can be sign or zero-extended to a basic operation 759 // go ahead and accept it now. 760 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 761 return true; 762 763 return false; 764 } 765 766 // Computes the address to get to an object. 767 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 768 // Some boilerplate from the X86 FastISel. 769 const User *U = NULL; 770 unsigned Opcode = Instruction::UserOp1; 771 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 772 // Don't walk into other basic blocks unless the object is an alloca from 773 // another block, otherwise it may not have a virtual register assigned. 774 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 775 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 776 Opcode = I->getOpcode(); 777 U = I; 778 } 779 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 780 Opcode = C->getOpcode(); 781 U = C; 782 } 783 784 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 785 if (Ty->getAddressSpace() > 255) 786 // Fast instruction selection doesn't support the special 787 // address spaces. 788 return false; 789 790 switch (Opcode) { 791 default: 792 break; 793 case Instruction::BitCast: { 794 // Look through bitcasts. 795 return ARMComputeAddress(U->getOperand(0), Addr); 796 } 797 case Instruction::IntToPtr: { 798 // Look past no-op inttoptrs. 799 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 800 return ARMComputeAddress(U->getOperand(0), Addr); 801 break; 802 } 803 case Instruction::PtrToInt: { 804 // Look past no-op ptrtoints. 805 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 806 return ARMComputeAddress(U->getOperand(0), Addr); 807 break; 808 } 809 case Instruction::GetElementPtr: { 810 Address SavedAddr = Addr; 811 int TmpOffset = Addr.Offset; 812 813 // Iterate through the GEP folding the constants into offsets where 814 // we can. 815 gep_type_iterator GTI = gep_type_begin(U); 816 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 817 i != e; ++i, ++GTI) { 818 const Value *Op = *i; 819 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 820 const StructLayout *SL = TD.getStructLayout(STy); 821 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 822 TmpOffset += SL->getElementOffset(Idx); 823 } else { 824 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 825 for (;;) { 826 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 827 // Constant-offset addressing. 828 TmpOffset += CI->getSExtValue() * S; 829 break; 830 } 831 if (isa<AddOperator>(Op) && 832 (!isa<Instruction>(Op) || 833 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 834 == FuncInfo.MBB) && 835 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 836 // An add (in the same block) with a constant operand. Fold the 837 // constant. 838 ConstantInt *CI = 839 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 840 TmpOffset += CI->getSExtValue() * S; 841 // Iterate on the other operand. 842 Op = cast<AddOperator>(Op)->getOperand(0); 843 continue; 844 } 845 // Unsupported 846 goto unsupported_gep; 847 } 848 } 849 } 850 851 // Try to grab the base operand now. 852 Addr.Offset = TmpOffset; 853 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 854 855 // We failed, restore everything and try the other options. 856 Addr = SavedAddr; 857 858 unsupported_gep: 859 break; 860 } 861 case Instruction::Alloca: { 862 const AllocaInst *AI = cast<AllocaInst>(Obj); 863 DenseMap<const AllocaInst*, int>::iterator SI = 864 FuncInfo.StaticAllocaMap.find(AI); 865 if (SI != FuncInfo.StaticAllocaMap.end()) { 866 Addr.BaseType = Address::FrameIndexBase; 867 Addr.Base.FI = SI->second; 868 return true; 869 } 870 break; 871 } 872 } 873 874 // Try to get this in a register if nothing else has worked. 875 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 876 return Addr.Base.Reg != 0; 877 } 878 879 void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 880 881 assert(VT.isSimple() && "Non-simple types are invalid here!"); 882 883 bool needsLowering = false; 884 switch (VT.getSimpleVT().SimpleTy) { 885 default: llvm_unreachable("Unhandled load/store type!"); 886 case MVT::i1: 887 case MVT::i8: 888 case MVT::i16: 889 case MVT::i32: 890 if (!useAM3) { 891 // Integer loads/stores handle 12-bit offsets. 892 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 893 // Handle negative offsets. 894 if (needsLowering && isThumb2) 895 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 896 Addr.Offset > -256); 897 } else { 898 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 899 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 900 } 901 break; 902 case MVT::f32: 903 case MVT::f64: 904 // Floating point operands handle 8-bit offsets. 905 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 906 break; 907 } 908 909 // If this is a stack pointer and the offset needs to be simplified then 910 // put the alloca address into a register, set the base type back to 911 // register and continue. This should almost never happen. 912 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 913 const TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass 914 : ARM::GPRRegisterClass; 915 unsigned ResultReg = createResultReg(RC); 916 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 917 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 918 TII.get(Opc), ResultReg) 919 .addFrameIndex(Addr.Base.FI) 920 .addImm(0)); 921 Addr.Base.Reg = ResultReg; 922 Addr.BaseType = Address::RegBase; 923 } 924 925 // Since the offset is too large for the load/store instruction 926 // get the reg+offset into a register. 927 if (needsLowering) { 928 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 929 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 930 Addr.Offset = 0; 931 } 932 } 933 934 void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 935 const MachineInstrBuilder &MIB, 936 unsigned Flags, bool useAM3) { 937 // addrmode5 output depends on the selection dag addressing dividing the 938 // offset by 4 that it then later multiplies. Do this here as well. 939 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 940 VT.getSimpleVT().SimpleTy == MVT::f64) 941 Addr.Offset /= 4; 942 943 // Frame base works a bit differently. Handle it separately. 944 if (Addr.BaseType == Address::FrameIndexBase) { 945 int FI = Addr.Base.FI; 946 int Offset = Addr.Offset; 947 MachineMemOperand *MMO = 948 FuncInfo.MF->getMachineMemOperand( 949 MachinePointerInfo::getFixedStack(FI, Offset), 950 Flags, 951 MFI.getObjectSize(FI), 952 MFI.getObjectAlignment(FI)); 953 // Now add the rest of the operands. 954 MIB.addFrameIndex(FI); 955 956 // ARM halfword load/stores and signed byte loads need an additional 957 // operand. 958 if (useAM3) { 959 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 960 MIB.addReg(0); 961 MIB.addImm(Imm); 962 } else { 963 MIB.addImm(Addr.Offset); 964 } 965 MIB.addMemOperand(MMO); 966 } else { 967 // Now add the rest of the operands. 968 MIB.addReg(Addr.Base.Reg); 969 970 // ARM halfword load/stores and signed byte loads need an additional 971 // operand. 972 if (useAM3) { 973 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 974 MIB.addReg(0); 975 MIB.addImm(Imm); 976 } else { 977 MIB.addImm(Addr.Offset); 978 } 979 } 980 AddOptionalDefs(MIB); 981 } 982 983 bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 984 unsigned Alignment, bool isZExt, bool allocReg) { 985 assert(VT.isSimple() && "Non-simple types are invalid here!"); 986 unsigned Opc; 987 bool useAM3 = false; 988 bool needVMOV = false; 989 const TargetRegisterClass *RC; 990 switch (VT.getSimpleVT().SimpleTy) { 991 // This is mostly going to be Neon/vector support. 992 default: return false; 993 case MVT::i1: 994 case MVT::i8: 995 if (isThumb2) { 996 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 997 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 998 else 999 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1000 } else { 1001 if (isZExt) { 1002 Opc = ARM::LDRBi12; 1003 } else { 1004 Opc = ARM::LDRSB; 1005 useAM3 = true; 1006 } 1007 } 1008 RC = ARM::GPRRegisterClass; 1009 break; 1010 case MVT::i16: 1011 if (isThumb2) { 1012 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1013 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1014 else 1015 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1016 } else { 1017 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1018 useAM3 = true; 1019 } 1020 RC = ARM::GPRRegisterClass; 1021 break; 1022 case MVT::i32: 1023 if (isThumb2) { 1024 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1025 Opc = ARM::t2LDRi8; 1026 else 1027 Opc = ARM::t2LDRi12; 1028 } else { 1029 Opc = ARM::LDRi12; 1030 } 1031 RC = ARM::GPRRegisterClass; 1032 break; 1033 case MVT::f32: 1034 if (!Subtarget->hasVFP2()) return false; 1035 // Unaligned loads need special handling. Floats require word-alignment. 1036 if (Alignment && Alignment < 4) { 1037 needVMOV = true; 1038 VT = MVT::i32; 1039 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1040 RC = ARM::GPRRegisterClass; 1041 } else { 1042 Opc = ARM::VLDRS; 1043 RC = TLI.getRegClassFor(VT); 1044 } 1045 break; 1046 case MVT::f64: 1047 if (!Subtarget->hasVFP2()) return false; 1048 // FIXME: Unaligned loads need special handling. Doublewords require 1049 // word-alignment. 1050 if (Alignment && Alignment < 4) 1051 return false; 1052 1053 Opc = ARM::VLDRD; 1054 RC = TLI.getRegClassFor(VT); 1055 break; 1056 } 1057 // Simplify this down to something we can handle. 1058 ARMSimplifyAddress(Addr, VT, useAM3); 1059 1060 // Create the base instruction, then add the operands. 1061 if (allocReg) 1062 ResultReg = createResultReg(RC); 1063 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1064 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1065 TII.get(Opc), ResultReg); 1066 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1067 1068 // If we had an unaligned load of a float we've converted it to an regular 1069 // load. Now we must move from the GRP to the FP register. 1070 if (needVMOV) { 1071 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1072 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1073 TII.get(ARM::VMOVSR), MoveReg) 1074 .addReg(ResultReg)); 1075 ResultReg = MoveReg; 1076 } 1077 return true; 1078 } 1079 1080 bool ARMFastISel::SelectLoad(const Instruction *I) { 1081 // Atomic loads need special handling. 1082 if (cast<LoadInst>(I)->isAtomic()) 1083 return false; 1084 1085 // Verify we have a legal type before going any further. 1086 MVT VT; 1087 if (!isLoadTypeLegal(I->getType(), VT)) 1088 return false; 1089 1090 // See if we can handle this address. 1091 Address Addr; 1092 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1093 1094 unsigned ResultReg; 1095 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1096 return false; 1097 UpdateValueMap(I, ResultReg); 1098 return true; 1099 } 1100 1101 bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1102 unsigned Alignment) { 1103 unsigned StrOpc; 1104 bool useAM3 = false; 1105 switch (VT.getSimpleVT().SimpleTy) { 1106 // This is mostly going to be Neon/vector support. 1107 default: return false; 1108 case MVT::i1: { 1109 unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : 1110 ARM::GPRRegisterClass); 1111 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1112 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1113 TII.get(Opc), Res) 1114 .addReg(SrcReg).addImm(1)); 1115 SrcReg = Res; 1116 } // Fallthrough here. 1117 case MVT::i8: 1118 if (isThumb2) { 1119 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1120 StrOpc = ARM::t2STRBi8; 1121 else 1122 StrOpc = ARM::t2STRBi12; 1123 } else { 1124 StrOpc = ARM::STRBi12; 1125 } 1126 break; 1127 case MVT::i16: 1128 if (isThumb2) { 1129 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1130 StrOpc = ARM::t2STRHi8; 1131 else 1132 StrOpc = ARM::t2STRHi12; 1133 } else { 1134 StrOpc = ARM::STRH; 1135 useAM3 = true; 1136 } 1137 break; 1138 case MVT::i32: 1139 if (isThumb2) { 1140 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1141 StrOpc = ARM::t2STRi8; 1142 else 1143 StrOpc = ARM::t2STRi12; 1144 } else { 1145 StrOpc = ARM::STRi12; 1146 } 1147 break; 1148 case MVT::f32: 1149 if (!Subtarget->hasVFP2()) return false; 1150 // Unaligned stores need special handling. Floats require word-alignment. 1151 if (Alignment && Alignment < 4) { 1152 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1153 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1154 TII.get(ARM::VMOVRS), MoveReg) 1155 .addReg(SrcReg)); 1156 SrcReg = MoveReg; 1157 VT = MVT::i32; 1158 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1159 } else { 1160 StrOpc = ARM::VSTRS; 1161 } 1162 break; 1163 case MVT::f64: 1164 if (!Subtarget->hasVFP2()) return false; 1165 // FIXME: Unaligned stores need special handling. Doublewords require 1166 // word-alignment. 1167 if (Alignment && Alignment < 4) 1168 return false; 1169 1170 StrOpc = ARM::VSTRD; 1171 break; 1172 } 1173 // Simplify this down to something we can handle. 1174 ARMSimplifyAddress(Addr, VT, useAM3); 1175 1176 // Create the base instruction, then add the operands. 1177 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1178 TII.get(StrOpc)) 1179 .addReg(SrcReg); 1180 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1181 return true; 1182 } 1183 1184 bool ARMFastISel::SelectStore(const Instruction *I) { 1185 Value *Op0 = I->getOperand(0); 1186 unsigned SrcReg = 0; 1187 1188 // Atomic stores need special handling. 1189 if (cast<StoreInst>(I)->isAtomic()) 1190 return false; 1191 1192 // Verify we have a legal type before going any further. 1193 MVT VT; 1194 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1195 return false; 1196 1197 // Get the value to be stored into a register. 1198 SrcReg = getRegForValue(Op0); 1199 if (SrcReg == 0) return false; 1200 1201 // See if we can handle this address. 1202 Address Addr; 1203 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1204 return false; 1205 1206 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1207 return false; 1208 return true; 1209 } 1210 1211 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1212 switch (Pred) { 1213 // Needs two compares... 1214 case CmpInst::FCMP_ONE: 1215 case CmpInst::FCMP_UEQ: 1216 default: 1217 // AL is our "false" for now. The other two need more compares. 1218 return ARMCC::AL; 1219 case CmpInst::ICMP_EQ: 1220 case CmpInst::FCMP_OEQ: 1221 return ARMCC::EQ; 1222 case CmpInst::ICMP_SGT: 1223 case CmpInst::FCMP_OGT: 1224 return ARMCC::GT; 1225 case CmpInst::ICMP_SGE: 1226 case CmpInst::FCMP_OGE: 1227 return ARMCC::GE; 1228 case CmpInst::ICMP_UGT: 1229 case CmpInst::FCMP_UGT: 1230 return ARMCC::HI; 1231 case CmpInst::FCMP_OLT: 1232 return ARMCC::MI; 1233 case CmpInst::ICMP_ULE: 1234 case CmpInst::FCMP_OLE: 1235 return ARMCC::LS; 1236 case CmpInst::FCMP_ORD: 1237 return ARMCC::VC; 1238 case CmpInst::FCMP_UNO: 1239 return ARMCC::VS; 1240 case CmpInst::FCMP_UGE: 1241 return ARMCC::PL; 1242 case CmpInst::ICMP_SLT: 1243 case CmpInst::FCMP_ULT: 1244 return ARMCC::LT; 1245 case CmpInst::ICMP_SLE: 1246 case CmpInst::FCMP_ULE: 1247 return ARMCC::LE; 1248 case CmpInst::FCMP_UNE: 1249 case CmpInst::ICMP_NE: 1250 return ARMCC::NE; 1251 case CmpInst::ICMP_UGE: 1252 return ARMCC::HS; 1253 case CmpInst::ICMP_ULT: 1254 return ARMCC::LO; 1255 } 1256 } 1257 1258 bool ARMFastISel::SelectBranch(const Instruction *I) { 1259 const BranchInst *BI = cast<BranchInst>(I); 1260 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1261 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1262 1263 // Simple branch support. 1264 1265 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1266 // behavior. 1267 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1268 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1269 1270 // Get the compare predicate. 1271 // Try to take advantage of fallthrough opportunities. 1272 CmpInst::Predicate Predicate = CI->getPredicate(); 1273 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1274 std::swap(TBB, FBB); 1275 Predicate = CmpInst::getInversePredicate(Predicate); 1276 } 1277 1278 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1279 1280 // We may not handle every CC for now. 1281 if (ARMPred == ARMCC::AL) return false; 1282 1283 // Emit the compare. 1284 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1285 return false; 1286 1287 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1289 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1290 FastEmitBranch(FBB, DL); 1291 FuncInfo.MBB->addSuccessor(TBB); 1292 return true; 1293 } 1294 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1295 MVT SourceVT; 1296 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1297 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1298 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1299 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1300 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1301 TII.get(TstOpc)) 1302 .addReg(OpReg).addImm(1)); 1303 1304 unsigned CCMode = ARMCC::NE; 1305 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1306 std::swap(TBB, FBB); 1307 CCMode = ARMCC::EQ; 1308 } 1309 1310 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1312 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1313 1314 FastEmitBranch(FBB, DL); 1315 FuncInfo.MBB->addSuccessor(TBB); 1316 return true; 1317 } 1318 } else if (const ConstantInt *CI = 1319 dyn_cast<ConstantInt>(BI->getCondition())) { 1320 uint64_t Imm = CI->getZExtValue(); 1321 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1322 FastEmitBranch(Target, DL); 1323 return true; 1324 } 1325 1326 unsigned CmpReg = getRegForValue(BI->getCondition()); 1327 if (CmpReg == 0) return false; 1328 1329 // We've been divorced from our compare! Our block was split, and 1330 // now our compare lives in a predecessor block. We musn't 1331 // re-compare here, as the children of the compare aren't guaranteed 1332 // live across the block boundary (we *could* check for this). 1333 // Regardless, the compare has been done in the predecessor block, 1334 // and it left a value for us in a virtual register. Ergo, we test 1335 // the one-bit value left in the virtual register. 1336 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1338 .addReg(CmpReg).addImm(1)); 1339 1340 unsigned CCMode = ARMCC::NE; 1341 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1342 std::swap(TBB, FBB); 1343 CCMode = ARMCC::EQ; 1344 } 1345 1346 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1347 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1348 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1349 FastEmitBranch(FBB, DL); 1350 FuncInfo.MBB->addSuccessor(TBB); 1351 return true; 1352 } 1353 1354 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1355 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1356 if (AddrReg == 0) return false; 1357 1358 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1360 .addReg(AddrReg)); 1361 return true; 1362 } 1363 1364 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1365 bool isZExt) { 1366 Type *Ty = Src1Value->getType(); 1367 EVT SrcVT = TLI.getValueType(Ty, true); 1368 if (!SrcVT.isSimple()) return false; 1369 1370 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1371 if (isFloat && !Subtarget->hasVFP2()) 1372 return false; 1373 1374 // Check to see if the 2nd operand is a constant that we can encode directly 1375 // in the compare. 1376 int Imm = 0; 1377 bool UseImm = false; 1378 bool isNegativeImm = false; 1379 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1380 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1381 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1382 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1383 SrcVT == MVT::i1) { 1384 const APInt &CIVal = ConstInt->getValue(); 1385 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1386 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1387 // then a cmn, because there is no way to represent 2147483648 as a 1388 // signed 32-bit int. 1389 if (Imm < 0 && Imm != (int)0x80000000) { 1390 isNegativeImm = true; 1391 Imm = -Imm; 1392 } 1393 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1394 (ARM_AM::getSOImmVal(Imm) != -1); 1395 } 1396 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1397 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1398 if (ConstFP->isZero() && !ConstFP->isNegative()) 1399 UseImm = true; 1400 } 1401 1402 unsigned CmpOpc; 1403 bool isICmp = true; 1404 bool needsExt = false; 1405 switch (SrcVT.getSimpleVT().SimpleTy) { 1406 default: return false; 1407 // TODO: Verify compares. 1408 case MVT::f32: 1409 isICmp = false; 1410 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1411 break; 1412 case MVT::f64: 1413 isICmp = false; 1414 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1415 break; 1416 case MVT::i1: 1417 case MVT::i8: 1418 case MVT::i16: 1419 needsExt = true; 1420 // Intentional fall-through. 1421 case MVT::i32: 1422 if (isThumb2) { 1423 if (!UseImm) 1424 CmpOpc = ARM::t2CMPrr; 1425 else 1426 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1427 } else { 1428 if (!UseImm) 1429 CmpOpc = ARM::CMPrr; 1430 else 1431 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1432 } 1433 break; 1434 } 1435 1436 unsigned SrcReg1 = getRegForValue(Src1Value); 1437 if (SrcReg1 == 0) return false; 1438 1439 unsigned SrcReg2 = 0; 1440 if (!UseImm) { 1441 SrcReg2 = getRegForValue(Src2Value); 1442 if (SrcReg2 == 0) return false; 1443 } 1444 1445 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1446 if (needsExt) { 1447 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1448 if (SrcReg1 == 0) return false; 1449 if (!UseImm) { 1450 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1451 if (SrcReg2 == 0) return false; 1452 } 1453 } 1454 1455 if (!UseImm) { 1456 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1457 TII.get(CmpOpc)) 1458 .addReg(SrcReg1).addReg(SrcReg2)); 1459 } else { 1460 MachineInstrBuilder MIB; 1461 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1462 .addReg(SrcReg1); 1463 1464 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1465 if (isICmp) 1466 MIB.addImm(Imm); 1467 AddOptionalDefs(MIB); 1468 } 1469 1470 // For floating point we need to move the result to a comparison register 1471 // that we can then use for branches. 1472 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1473 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1474 TII.get(ARM::FMSTAT))); 1475 return true; 1476 } 1477 1478 bool ARMFastISel::SelectCmp(const Instruction *I) { 1479 const CmpInst *CI = cast<CmpInst>(I); 1480 1481 // Get the compare predicate. 1482 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1483 1484 // We may not handle every CC for now. 1485 if (ARMPred == ARMCC::AL) return false; 1486 1487 // Emit the compare. 1488 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1489 return false; 1490 1491 // Now set a register based on the comparison. Explicitly set the predicates 1492 // here. 1493 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1494 const TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass 1495 : ARM::GPRRegisterClass; 1496 unsigned DestReg = createResultReg(RC); 1497 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1498 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1499 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1500 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1501 .addReg(ZeroReg).addImm(1) 1502 .addImm(ARMPred).addReg(ARM::CPSR); 1503 1504 UpdateValueMap(I, DestReg); 1505 return true; 1506 } 1507 1508 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1509 // Make sure we have VFP and that we're extending float to double. 1510 if (!Subtarget->hasVFP2()) return false; 1511 1512 Value *V = I->getOperand(0); 1513 if (!I->getType()->isDoubleTy() || 1514 !V->getType()->isFloatTy()) return false; 1515 1516 unsigned Op = getRegForValue(V); 1517 if (Op == 0) return false; 1518 1519 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1520 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1521 TII.get(ARM::VCVTDS), Result) 1522 .addReg(Op)); 1523 UpdateValueMap(I, Result); 1524 return true; 1525 } 1526 1527 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1528 // Make sure we have VFP and that we're truncating double to float. 1529 if (!Subtarget->hasVFP2()) return false; 1530 1531 Value *V = I->getOperand(0); 1532 if (!(I->getType()->isFloatTy() && 1533 V->getType()->isDoubleTy())) return false; 1534 1535 unsigned Op = getRegForValue(V); 1536 if (Op == 0) return false; 1537 1538 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1540 TII.get(ARM::VCVTSD), Result) 1541 .addReg(Op)); 1542 UpdateValueMap(I, Result); 1543 return true; 1544 } 1545 1546 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1547 // Make sure we have VFP. 1548 if (!Subtarget->hasVFP2()) return false; 1549 1550 MVT DstVT; 1551 Type *Ty = I->getType(); 1552 if (!isTypeLegal(Ty, DstVT)) 1553 return false; 1554 1555 Value *Src = I->getOperand(0); 1556 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1557 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1558 return false; 1559 1560 unsigned SrcReg = getRegForValue(Src); 1561 if (SrcReg == 0) return false; 1562 1563 // Handle sign-extension. 1564 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1565 EVT DestVT = MVT::i32; 1566 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1567 /*isZExt*/!isSigned); 1568 if (SrcReg == 0) return false; 1569 } 1570 1571 // The conversion routine works on fp-reg to fp-reg and the operand above 1572 // was an integer, move it to the fp registers if possible. 1573 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1574 if (FP == 0) return false; 1575 1576 unsigned Opc; 1577 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1578 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1579 else return false; 1580 1581 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1582 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1583 ResultReg) 1584 .addReg(FP)); 1585 UpdateValueMap(I, ResultReg); 1586 return true; 1587 } 1588 1589 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1590 // Make sure we have VFP. 1591 if (!Subtarget->hasVFP2()) return false; 1592 1593 MVT DstVT; 1594 Type *RetTy = I->getType(); 1595 if (!isTypeLegal(RetTy, DstVT)) 1596 return false; 1597 1598 unsigned Op = getRegForValue(I->getOperand(0)); 1599 if (Op == 0) return false; 1600 1601 unsigned Opc; 1602 Type *OpTy = I->getOperand(0)->getType(); 1603 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1604 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1605 else return false; 1606 1607 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1608 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1609 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1610 ResultReg) 1611 .addReg(Op)); 1612 1613 // This result needs to be in an integer register, but the conversion only 1614 // takes place in fp-regs. 1615 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1616 if (IntReg == 0) return false; 1617 1618 UpdateValueMap(I, IntReg); 1619 return true; 1620 } 1621 1622 bool ARMFastISel::SelectSelect(const Instruction *I) { 1623 MVT VT; 1624 if (!isTypeLegal(I->getType(), VT)) 1625 return false; 1626 1627 // Things need to be register sized for register moves. 1628 if (VT != MVT::i32) return false; 1629 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1630 1631 unsigned CondReg = getRegForValue(I->getOperand(0)); 1632 if (CondReg == 0) return false; 1633 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1634 if (Op1Reg == 0) return false; 1635 1636 // Check to see if we can use an immediate in the conditional move. 1637 int Imm = 0; 1638 bool UseImm = false; 1639 bool isNegativeImm = false; 1640 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1641 assert (VT == MVT::i32 && "Expecting an i32."); 1642 Imm = (int)ConstInt->getValue().getZExtValue(); 1643 if (Imm < 0) { 1644 isNegativeImm = true; 1645 Imm = ~Imm; 1646 } 1647 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1648 (ARM_AM::getSOImmVal(Imm) != -1); 1649 } 1650 1651 unsigned Op2Reg = 0; 1652 if (!UseImm) { 1653 Op2Reg = getRegForValue(I->getOperand(2)); 1654 if (Op2Reg == 0) return false; 1655 } 1656 1657 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1658 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1659 .addReg(CondReg).addImm(0)); 1660 1661 unsigned MovCCOpc; 1662 if (!UseImm) { 1663 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1664 } else { 1665 if (!isNegativeImm) { 1666 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1667 } else { 1668 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1669 } 1670 } 1671 unsigned ResultReg = createResultReg(RC); 1672 if (!UseImm) 1673 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1674 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1675 else 1676 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1677 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1678 UpdateValueMap(I, ResultReg); 1679 return true; 1680 } 1681 1682 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1683 MVT VT; 1684 Type *Ty = I->getType(); 1685 if (!isTypeLegal(Ty, VT)) 1686 return false; 1687 1688 // If we have integer div support we should have selected this automagically. 1689 // In case we have a real miss go ahead and return false and we'll pick 1690 // it up later. 1691 if (Subtarget->hasDivide()) return false; 1692 1693 // Otherwise emit a libcall. 1694 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1695 if (VT == MVT::i8) 1696 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1697 else if (VT == MVT::i16) 1698 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1699 else if (VT == MVT::i32) 1700 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1701 else if (VT == MVT::i64) 1702 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1703 else if (VT == MVT::i128) 1704 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1705 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1706 1707 return ARMEmitLibcall(I, LC); 1708 } 1709 1710 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1711 MVT VT; 1712 Type *Ty = I->getType(); 1713 if (!isTypeLegal(Ty, VT)) 1714 return false; 1715 1716 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1717 if (VT == MVT::i8) 1718 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1719 else if (VT == MVT::i16) 1720 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1721 else if (VT == MVT::i32) 1722 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1723 else if (VT == MVT::i64) 1724 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1725 else if (VT == MVT::i128) 1726 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1727 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1728 1729 return ARMEmitLibcall(I, LC); 1730 } 1731 1732 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1733 EVT DestVT = TLI.getValueType(I->getType(), true); 1734 1735 // We can get here in the case when we have a binary operation on a non-legal 1736 // type and the target independent selector doesn't know how to handle it. 1737 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1738 return false; 1739 1740 unsigned Opc; 1741 switch (ISDOpcode) { 1742 default: return false; 1743 case ISD::ADD: 1744 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1745 break; 1746 case ISD::OR: 1747 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1748 break; 1749 case ISD::SUB: 1750 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1751 break; 1752 } 1753 1754 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1755 if (SrcReg1 == 0) return false; 1756 1757 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1758 // in the instruction, rather then materializing the value in a register. 1759 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1760 if (SrcReg2 == 0) return false; 1761 1762 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1763 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1764 TII.get(Opc), ResultReg) 1765 .addReg(SrcReg1).addReg(SrcReg2)); 1766 UpdateValueMap(I, ResultReg); 1767 return true; 1768 } 1769 1770 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1771 EVT VT = TLI.getValueType(I->getType(), true); 1772 1773 // We can get here in the case when we want to use NEON for our fp 1774 // operations, but can't figure out how to. Just use the vfp instructions 1775 // if we have them. 1776 // FIXME: It'd be nice to use NEON instructions. 1777 Type *Ty = I->getType(); 1778 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1779 if (isFloat && !Subtarget->hasVFP2()) 1780 return false; 1781 1782 unsigned Opc; 1783 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1784 switch (ISDOpcode) { 1785 default: return false; 1786 case ISD::FADD: 1787 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1788 break; 1789 case ISD::FSUB: 1790 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1791 break; 1792 case ISD::FMUL: 1793 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1794 break; 1795 } 1796 unsigned Op1 = getRegForValue(I->getOperand(0)); 1797 if (Op1 == 0) return false; 1798 1799 unsigned Op2 = getRegForValue(I->getOperand(1)); 1800 if (Op2 == 0) return false; 1801 1802 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1803 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1804 TII.get(Opc), ResultReg) 1805 .addReg(Op1).addReg(Op2)); 1806 UpdateValueMap(I, ResultReg); 1807 return true; 1808 } 1809 1810 // Call Handling Code 1811 1812 // This is largely taken directly from CCAssignFnForNode - we don't support 1813 // varargs in FastISel so that part has been removed. 1814 // TODO: We may not support all of this. 1815 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1816 switch (CC) { 1817 default: 1818 llvm_unreachable("Unsupported calling convention"); 1819 case CallingConv::Fast: 1820 // Ignore fastcc. Silence compiler warnings. 1821 (void)RetFastCC_ARM_APCS; 1822 (void)FastCC_ARM_APCS; 1823 // Fallthrough 1824 case CallingConv::C: 1825 // Use target triple & subtarget features to do actual dispatch. 1826 if (Subtarget->isAAPCS_ABI()) { 1827 if (Subtarget->hasVFP2() && 1828 TM.Options.FloatABIType == FloatABI::Hard) 1829 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1830 else 1831 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1832 } else 1833 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1834 case CallingConv::ARM_AAPCS_VFP: 1835 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1836 case CallingConv::ARM_AAPCS: 1837 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1838 case CallingConv::ARM_APCS: 1839 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1840 } 1841 } 1842 1843 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1844 SmallVectorImpl<unsigned> &ArgRegs, 1845 SmallVectorImpl<MVT> &ArgVTs, 1846 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1847 SmallVectorImpl<unsigned> &RegArgs, 1848 CallingConv::ID CC, 1849 unsigned &NumBytes) { 1850 SmallVector<CCValAssign, 16> ArgLocs; 1851 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1852 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1853 1854 // Check that we can handle all of the arguments. If we can't, then bail out 1855 // now before we add code to the MBB. 1856 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1857 CCValAssign &VA = ArgLocs[i]; 1858 MVT ArgVT = ArgVTs[VA.getValNo()]; 1859 1860 // We don't handle NEON/vector parameters yet. 1861 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1862 return false; 1863 1864 // Now copy/store arg to correct locations. 1865 if (VA.isRegLoc() && !VA.needsCustom()) { 1866 continue; 1867 } else if (VA.needsCustom()) { 1868 // TODO: We need custom lowering for vector (v2f64) args. 1869 if (VA.getLocVT() != MVT::f64 || 1870 // TODO: Only handle register args for now. 1871 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1872 return false; 1873 } else { 1874 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1875 default: 1876 return false; 1877 case MVT::i1: 1878 case MVT::i8: 1879 case MVT::i16: 1880 case MVT::i32: 1881 break; 1882 case MVT::f32: 1883 if (!Subtarget->hasVFP2()) 1884 return false; 1885 break; 1886 case MVT::f64: 1887 if (!Subtarget->hasVFP2()) 1888 return false; 1889 break; 1890 } 1891 } 1892 } 1893 1894 // At the point, we are able to handle the call's arguments in fast isel. 1895 1896 // Get a count of how many bytes are to be pushed on the stack. 1897 NumBytes = CCInfo.getNextStackOffset(); 1898 1899 // Issue CALLSEQ_START 1900 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1901 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1902 TII.get(AdjStackDown)) 1903 .addImm(NumBytes)); 1904 1905 // Process the args. 1906 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1907 CCValAssign &VA = ArgLocs[i]; 1908 unsigned Arg = ArgRegs[VA.getValNo()]; 1909 MVT ArgVT = ArgVTs[VA.getValNo()]; 1910 1911 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1912 "We don't handle NEON/vector parameters yet."); 1913 1914 // Handle arg promotion, etc. 1915 switch (VA.getLocInfo()) { 1916 case CCValAssign::Full: break; 1917 case CCValAssign::SExt: { 1918 MVT DestVT = VA.getLocVT(); 1919 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1920 assert (Arg != 0 && "Failed to emit a sext"); 1921 ArgVT = DestVT; 1922 break; 1923 } 1924 case CCValAssign::AExt: 1925 // Intentional fall-through. Handle AExt and ZExt. 1926 case CCValAssign::ZExt: { 1927 MVT DestVT = VA.getLocVT(); 1928 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1929 assert (Arg != 0 && "Failed to emit a sext"); 1930 ArgVT = DestVT; 1931 break; 1932 } 1933 case CCValAssign::BCvt: { 1934 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1935 /*TODO: Kill=*/false); 1936 assert(BC != 0 && "Failed to emit a bitcast!"); 1937 Arg = BC; 1938 ArgVT = VA.getLocVT(); 1939 break; 1940 } 1941 default: llvm_unreachable("Unknown arg promotion!"); 1942 } 1943 1944 // Now copy/store arg to correct locations. 1945 if (VA.isRegLoc() && !VA.needsCustom()) { 1946 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1947 VA.getLocReg()) 1948 .addReg(Arg); 1949 RegArgs.push_back(VA.getLocReg()); 1950 } else if (VA.needsCustom()) { 1951 // TODO: We need custom lowering for vector (v2f64) args. 1952 assert(VA.getLocVT() == MVT::f64 && 1953 "Custom lowering for v2f64 args not available"); 1954 1955 CCValAssign &NextVA = ArgLocs[++i]; 1956 1957 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1958 "We only handle register args!"); 1959 1960 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1961 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1962 .addReg(NextVA.getLocReg(), RegState::Define) 1963 .addReg(Arg)); 1964 RegArgs.push_back(VA.getLocReg()); 1965 RegArgs.push_back(NextVA.getLocReg()); 1966 } else { 1967 assert(VA.isMemLoc()); 1968 // Need to store on the stack. 1969 Address Addr; 1970 Addr.BaseType = Address::RegBase; 1971 Addr.Base.Reg = ARM::SP; 1972 Addr.Offset = VA.getLocMemOffset(); 1973 1974 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 1975 assert(EmitRet && "Could not emit a store for argument!"); 1976 } 1977 } 1978 1979 return true; 1980 } 1981 1982 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1983 const Instruction *I, CallingConv::ID CC, 1984 unsigned &NumBytes) { 1985 // Issue CALLSEQ_END 1986 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1987 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1988 TII.get(AdjStackUp)) 1989 .addImm(NumBytes).addImm(0)); 1990 1991 // Now the return value. 1992 if (RetVT != MVT::isVoid) { 1993 SmallVector<CCValAssign, 16> RVLocs; 1994 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1995 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1996 1997 // Copy all of the result registers out of their specified physreg. 1998 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1999 // For this move we copy into two registers and then move into the 2000 // double fp reg we want. 2001 EVT DestVT = RVLocs[0].getValVT(); 2002 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2003 unsigned ResultReg = createResultReg(DstRC); 2004 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2005 TII.get(ARM::VMOVDRR), ResultReg) 2006 .addReg(RVLocs[0].getLocReg()) 2007 .addReg(RVLocs[1].getLocReg())); 2008 2009 UsedRegs.push_back(RVLocs[0].getLocReg()); 2010 UsedRegs.push_back(RVLocs[1].getLocReg()); 2011 2012 // Finally update the result. 2013 UpdateValueMap(I, ResultReg); 2014 } else { 2015 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2016 EVT CopyVT = RVLocs[0].getValVT(); 2017 2018 // Special handling for extended integers. 2019 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2020 CopyVT = MVT::i32; 2021 2022 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2023 2024 unsigned ResultReg = createResultReg(DstRC); 2025 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2026 ResultReg).addReg(RVLocs[0].getLocReg()); 2027 UsedRegs.push_back(RVLocs[0].getLocReg()); 2028 2029 // Finally update the result. 2030 UpdateValueMap(I, ResultReg); 2031 } 2032 } 2033 2034 return true; 2035 } 2036 2037 bool ARMFastISel::SelectRet(const Instruction *I) { 2038 const ReturnInst *Ret = cast<ReturnInst>(I); 2039 const Function &F = *I->getParent()->getParent(); 2040 2041 if (!FuncInfo.CanLowerReturn) 2042 return false; 2043 2044 if (F.isVarArg()) 2045 return false; 2046 2047 CallingConv::ID CC = F.getCallingConv(); 2048 if (Ret->getNumOperands() > 0) { 2049 SmallVector<ISD::OutputArg, 4> Outs; 2050 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2051 Outs, TLI); 2052 2053 // Analyze operands of the call, assigning locations to each operand. 2054 SmallVector<CCValAssign, 16> ValLocs; 2055 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2056 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 2057 2058 const Value *RV = Ret->getOperand(0); 2059 unsigned Reg = getRegForValue(RV); 2060 if (Reg == 0) 2061 return false; 2062 2063 // Only handle a single return value for now. 2064 if (ValLocs.size() != 1) 2065 return false; 2066 2067 CCValAssign &VA = ValLocs[0]; 2068 2069 // Don't bother handling odd stuff for now. 2070 if (VA.getLocInfo() != CCValAssign::Full) 2071 return false; 2072 // Only handle register returns for now. 2073 if (!VA.isRegLoc()) 2074 return false; 2075 2076 unsigned SrcReg = Reg + VA.getValNo(); 2077 EVT RVVT = TLI.getValueType(RV->getType()); 2078 EVT DestVT = VA.getValVT(); 2079 // Special handling for extended integers. 2080 if (RVVT != DestVT) { 2081 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2082 return false; 2083 2084 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2085 2086 // Perform extension if flagged as either zext or sext. Otherwise, do 2087 // nothing. 2088 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2089 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2090 if (SrcReg == 0) return false; 2091 } 2092 } 2093 2094 // Make the copy. 2095 unsigned DstReg = VA.getLocReg(); 2096 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2097 // Avoid a cross-class copy. This is very unlikely. 2098 if (!SrcRC->contains(DstReg)) 2099 return false; 2100 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2101 DstReg).addReg(SrcReg); 2102 2103 // Mark the register as live out of the function. 2104 MRI.addLiveOut(VA.getLocReg()); 2105 } 2106 2107 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2108 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2109 TII.get(RetOpc))); 2110 return true; 2111 } 2112 2113 unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 2114 if (isThumb2) { 2115 return ARM::tBL; 2116 } else { 2117 return ARM::BL; 2118 } 2119 } 2120 2121 // A quick function that will emit a call for a named libcall in F with the 2122 // vector of passed arguments for the Instruction in I. We can assume that we 2123 // can emit a call for any libcall we can produce. This is an abridged version 2124 // of the full call infrastructure since we won't need to worry about things 2125 // like computed function pointers or strange arguments at call sites. 2126 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2127 // with X86. 2128 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2129 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2130 2131 // Handle *simple* calls for now. 2132 Type *RetTy = I->getType(); 2133 MVT RetVT; 2134 if (RetTy->isVoidTy()) 2135 RetVT = MVT::isVoid; 2136 else if (!isTypeLegal(RetTy, RetVT)) 2137 return false; 2138 2139 // TODO: For now if we have long calls specified we don't handle the call. 2140 if (EnableARMLongCalls) return false; 2141 2142 // Set up the argument vectors. 2143 SmallVector<Value*, 8> Args; 2144 SmallVector<unsigned, 8> ArgRegs; 2145 SmallVector<MVT, 8> ArgVTs; 2146 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2147 Args.reserve(I->getNumOperands()); 2148 ArgRegs.reserve(I->getNumOperands()); 2149 ArgVTs.reserve(I->getNumOperands()); 2150 ArgFlags.reserve(I->getNumOperands()); 2151 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2152 Value *Op = I->getOperand(i); 2153 unsigned Arg = getRegForValue(Op); 2154 if (Arg == 0) return false; 2155 2156 Type *ArgTy = Op->getType(); 2157 MVT ArgVT; 2158 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2159 2160 ISD::ArgFlagsTy Flags; 2161 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2162 Flags.setOrigAlign(OriginalAlignment); 2163 2164 Args.push_back(Op); 2165 ArgRegs.push_back(Arg); 2166 ArgVTs.push_back(ArgVT); 2167 ArgFlags.push_back(Flags); 2168 } 2169 2170 // Handle the arguments now that we've gotten them. 2171 SmallVector<unsigned, 4> RegArgs; 2172 unsigned NumBytes; 2173 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2174 return false; 2175 2176 // Issue the call. 2177 MachineInstrBuilder MIB; 2178 unsigned CallOpc = ARMSelectCallOp(NULL); 2179 if (isThumb2) 2180 // Explicitly adding the predicate here. 2181 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2182 TII.get(CallOpc))) 2183 .addExternalSymbol(TLI.getLibcallName(Call)); 2184 else 2185 // Explicitly adding the predicate here. 2186 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2187 TII.get(CallOpc)) 2188 .addExternalSymbol(TLI.getLibcallName(Call))); 2189 2190 // Add implicit physical register uses to the call. 2191 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2192 MIB.addReg(RegArgs[i]); 2193 2194 // Add a register mask with the call-preserved registers. 2195 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2196 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2197 2198 // Finish off the call including any return values. 2199 SmallVector<unsigned, 4> UsedRegs; 2200 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2201 2202 // Set all unused physreg defs as dead. 2203 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2204 2205 return true; 2206 } 2207 2208 bool ARMFastISel::SelectCall(const Instruction *I, 2209 const char *IntrMemName = 0) { 2210 const CallInst *CI = cast<CallInst>(I); 2211 const Value *Callee = CI->getCalledValue(); 2212 2213 // Can't handle inline asm. 2214 if (isa<InlineAsm>(Callee)) return false; 2215 2216 // Only handle global variable Callees. 2217 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2218 if (!GV) 2219 return false; 2220 2221 // Check the calling convention. 2222 ImmutableCallSite CS(CI); 2223 CallingConv::ID CC = CS.getCallingConv(); 2224 2225 // TODO: Avoid some calling conventions? 2226 2227 // Let SDISel handle vararg functions. 2228 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2229 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2230 if (FTy->isVarArg()) 2231 return false; 2232 2233 // Handle *simple* calls for now. 2234 Type *RetTy = I->getType(); 2235 MVT RetVT; 2236 if (RetTy->isVoidTy()) 2237 RetVT = MVT::isVoid; 2238 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2239 RetVT != MVT::i8 && RetVT != MVT::i1) 2240 return false; 2241 2242 // TODO: For now if we have long calls specified we don't handle the call. 2243 if (EnableARMLongCalls) return false; 2244 2245 // Set up the argument vectors. 2246 SmallVector<Value*, 8> Args; 2247 SmallVector<unsigned, 8> ArgRegs; 2248 SmallVector<MVT, 8> ArgVTs; 2249 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2250 unsigned arg_size = CS.arg_size(); 2251 Args.reserve(arg_size); 2252 ArgRegs.reserve(arg_size); 2253 ArgVTs.reserve(arg_size); 2254 ArgFlags.reserve(arg_size); 2255 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2256 i != e; ++i) { 2257 // If we're lowering a memory intrinsic instead of a regular call, skip the 2258 // last two arguments, which shouldn't be passed to the underlying function. 2259 if (IntrMemName && e-i <= 2) 2260 break; 2261 2262 ISD::ArgFlagsTy Flags; 2263 unsigned AttrInd = i - CS.arg_begin() + 1; 2264 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2265 Flags.setSExt(); 2266 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2267 Flags.setZExt(); 2268 2269 // FIXME: Only handle *easy* calls for now. 2270 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2271 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2272 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2273 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2274 return false; 2275 2276 Type *ArgTy = (*i)->getType(); 2277 MVT ArgVT; 2278 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2279 ArgVT != MVT::i1) 2280 return false; 2281 2282 unsigned Arg = getRegForValue(*i); 2283 if (Arg == 0) 2284 return false; 2285 2286 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2287 Flags.setOrigAlign(OriginalAlignment); 2288 2289 Args.push_back(*i); 2290 ArgRegs.push_back(Arg); 2291 ArgVTs.push_back(ArgVT); 2292 ArgFlags.push_back(Flags); 2293 } 2294 2295 // Handle the arguments now that we've gotten them. 2296 SmallVector<unsigned, 4> RegArgs; 2297 unsigned NumBytes; 2298 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2299 return false; 2300 2301 // Issue the call. 2302 MachineInstrBuilder MIB; 2303 unsigned CallOpc = ARMSelectCallOp(GV); 2304 // Explicitly adding the predicate here. 2305 if(isThumb2) { 2306 // Explicitly adding the predicate here. 2307 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2308 TII.get(CallOpc))); 2309 if (!IntrMemName) 2310 MIB.addGlobalAddress(GV, 0, 0); 2311 else 2312 MIB.addExternalSymbol(IntrMemName, 0); 2313 } else { 2314 if (!IntrMemName) 2315 // Explicitly adding the predicate here. 2316 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2317 TII.get(CallOpc)) 2318 .addGlobalAddress(GV, 0, 0)); 2319 else 2320 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2321 TII.get(CallOpc)) 2322 .addExternalSymbol(IntrMemName, 0)); 2323 } 2324 2325 // Add implicit physical register uses to the call. 2326 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2327 MIB.addReg(RegArgs[i]); 2328 2329 // Add a register mask with the call-preserved registers. 2330 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2331 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2332 2333 // Finish off the call including any return values. 2334 SmallVector<unsigned, 4> UsedRegs; 2335 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2336 2337 // Set all unused physreg defs as dead. 2338 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2339 2340 return true; 2341 } 2342 2343 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2344 return Len <= 16; 2345 } 2346 2347 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2348 uint64_t Len) { 2349 // Make sure we don't bloat code by inlining very large memcpy's. 2350 if (!ARMIsMemCpySmall(Len)) 2351 return false; 2352 2353 // We don't care about alignment here since we just emit integer accesses. 2354 while (Len) { 2355 MVT VT; 2356 if (Len >= 4) 2357 VT = MVT::i32; 2358 else if (Len >= 2) 2359 VT = MVT::i16; 2360 else { 2361 assert(Len == 1); 2362 VT = MVT::i8; 2363 } 2364 2365 bool RV; 2366 unsigned ResultReg; 2367 RV = ARMEmitLoad(VT, ResultReg, Src); 2368 assert (RV == true && "Should be able to handle this load."); 2369 RV = ARMEmitStore(VT, ResultReg, Dest); 2370 assert (RV == true && "Should be able to handle this store."); 2371 (void)RV; 2372 2373 unsigned Size = VT.getSizeInBits()/8; 2374 Len -= Size; 2375 Dest.Offset += Size; 2376 Src.Offset += Size; 2377 } 2378 2379 return true; 2380 } 2381 2382 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2383 // FIXME: Handle more intrinsics. 2384 switch (I.getIntrinsicID()) { 2385 default: return false; 2386 case Intrinsic::memcpy: 2387 case Intrinsic::memmove: { 2388 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2389 // Don't handle volatile. 2390 if (MTI.isVolatile()) 2391 return false; 2392 2393 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2394 // we would emit dead code because we don't currently handle memmoves. 2395 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2396 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2397 // Small memcpy's are common enough that we want to do them without a call 2398 // if possible. 2399 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2400 if (ARMIsMemCpySmall(Len)) { 2401 Address Dest, Src; 2402 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2403 !ARMComputeAddress(MTI.getRawSource(), Src)) 2404 return false; 2405 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2406 return true; 2407 } 2408 } 2409 2410 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2411 return false; 2412 2413 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2414 return false; 2415 2416 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2417 return SelectCall(&I, IntrMemName); 2418 } 2419 case Intrinsic::memset: { 2420 const MemSetInst &MSI = cast<MemSetInst>(I); 2421 // Don't handle volatile. 2422 if (MSI.isVolatile()) 2423 return false; 2424 2425 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2426 return false; 2427 2428 if (MSI.getDestAddressSpace() > 255) 2429 return false; 2430 2431 return SelectCall(&I, "memset"); 2432 } 2433 } 2434 } 2435 2436 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2437 // The high bits for a type smaller than the register size are assumed to be 2438 // undefined. 2439 Value *Op = I->getOperand(0); 2440 2441 EVT SrcVT, DestVT; 2442 SrcVT = TLI.getValueType(Op->getType(), true); 2443 DestVT = TLI.getValueType(I->getType(), true); 2444 2445 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2446 return false; 2447 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2448 return false; 2449 2450 unsigned SrcReg = getRegForValue(Op); 2451 if (!SrcReg) return false; 2452 2453 // Because the high bits are undefined, a truncate doesn't generate 2454 // any code. 2455 UpdateValueMap(I, SrcReg); 2456 return true; 2457 } 2458 2459 unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2460 bool isZExt) { 2461 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2462 return 0; 2463 2464 unsigned Opc; 2465 bool isBoolZext = false; 2466 if (!SrcVT.isSimple()) return 0; 2467 switch (SrcVT.getSimpleVT().SimpleTy) { 2468 default: return 0; 2469 case MVT::i16: 2470 if (!Subtarget->hasV6Ops()) return 0; 2471 if (isZExt) 2472 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2473 else 2474 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2475 break; 2476 case MVT::i8: 2477 if (!Subtarget->hasV6Ops()) return 0; 2478 if (isZExt) 2479 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2480 else 2481 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2482 break; 2483 case MVT::i1: 2484 if (isZExt) { 2485 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2486 isBoolZext = true; 2487 break; 2488 } 2489 return 0; 2490 } 2491 2492 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2493 MachineInstrBuilder MIB; 2494 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2495 .addReg(SrcReg); 2496 if (isBoolZext) 2497 MIB.addImm(1); 2498 else 2499 MIB.addImm(0); 2500 AddOptionalDefs(MIB); 2501 return ResultReg; 2502 } 2503 2504 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2505 // On ARM, in general, integer casts don't involve legal types; this code 2506 // handles promotable integers. 2507 Type *DestTy = I->getType(); 2508 Value *Src = I->getOperand(0); 2509 Type *SrcTy = Src->getType(); 2510 2511 EVT SrcVT, DestVT; 2512 SrcVT = TLI.getValueType(SrcTy, true); 2513 DestVT = TLI.getValueType(DestTy, true); 2514 2515 bool isZExt = isa<ZExtInst>(I); 2516 unsigned SrcReg = getRegForValue(Src); 2517 if (!SrcReg) return false; 2518 2519 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2520 if (ResultReg == 0) return false; 2521 UpdateValueMap(I, ResultReg); 2522 return true; 2523 } 2524 2525 // TODO: SoftFP support. 2526 bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2527 2528 switch (I->getOpcode()) { 2529 case Instruction::Load: 2530 return SelectLoad(I); 2531 case Instruction::Store: 2532 return SelectStore(I); 2533 case Instruction::Br: 2534 return SelectBranch(I); 2535 case Instruction::IndirectBr: 2536 return SelectIndirectBr(I); 2537 case Instruction::ICmp: 2538 case Instruction::FCmp: 2539 return SelectCmp(I); 2540 case Instruction::FPExt: 2541 return SelectFPExt(I); 2542 case Instruction::FPTrunc: 2543 return SelectFPTrunc(I); 2544 case Instruction::SIToFP: 2545 return SelectIToFP(I, /*isSigned*/ true); 2546 case Instruction::UIToFP: 2547 return SelectIToFP(I, /*isSigned*/ false); 2548 case Instruction::FPToSI: 2549 return SelectFPToI(I, /*isSigned*/ true); 2550 case Instruction::FPToUI: 2551 return SelectFPToI(I, /*isSigned*/ false); 2552 case Instruction::Add: 2553 return SelectBinaryIntOp(I, ISD::ADD); 2554 case Instruction::Or: 2555 return SelectBinaryIntOp(I, ISD::OR); 2556 case Instruction::Sub: 2557 return SelectBinaryIntOp(I, ISD::SUB); 2558 case Instruction::FAdd: 2559 return SelectBinaryFPOp(I, ISD::FADD); 2560 case Instruction::FSub: 2561 return SelectBinaryFPOp(I, ISD::FSUB); 2562 case Instruction::FMul: 2563 return SelectBinaryFPOp(I, ISD::FMUL); 2564 case Instruction::SDiv: 2565 return SelectDiv(I, /*isSigned*/ true); 2566 case Instruction::UDiv: 2567 return SelectDiv(I, /*isSigned*/ false); 2568 case Instruction::SRem: 2569 return SelectRem(I, /*isSigned*/ true); 2570 case Instruction::URem: 2571 return SelectRem(I, /*isSigned*/ false); 2572 case Instruction::Call: 2573 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2574 return SelectIntrinsicCall(*II); 2575 return SelectCall(I); 2576 case Instruction::Select: 2577 return SelectSelect(I); 2578 case Instruction::Ret: 2579 return SelectRet(I); 2580 case Instruction::Trunc: 2581 return SelectTrunc(I); 2582 case Instruction::ZExt: 2583 case Instruction::SExt: 2584 return SelectIntExt(I); 2585 default: break; 2586 } 2587 return false; 2588 } 2589 2590 /// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2591 /// vreg is being provided by the specified load instruction. If possible, 2592 /// try to fold the load as an operand to the instruction, returning true if 2593 /// successful. 2594 bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2595 const LoadInst *LI) { 2596 // Verify we have a legal type before going any further. 2597 MVT VT; 2598 if (!isLoadTypeLegal(LI->getType(), VT)) 2599 return false; 2600 2601 // Combine load followed by zero- or sign-extend. 2602 // ldrb r1, [r0] ldrb r1, [r0] 2603 // uxtb r2, r1 => 2604 // mov r3, r2 mov r3, r1 2605 bool isZExt = true; 2606 switch(MI->getOpcode()) { 2607 default: return false; 2608 case ARM::SXTH: 2609 case ARM::t2SXTH: 2610 isZExt = false; 2611 case ARM::UXTH: 2612 case ARM::t2UXTH: 2613 if (VT != MVT::i16) 2614 return false; 2615 break; 2616 case ARM::SXTB: 2617 case ARM::t2SXTB: 2618 isZExt = false; 2619 case ARM::UXTB: 2620 case ARM::t2UXTB: 2621 if (VT != MVT::i8) 2622 return false; 2623 break; 2624 } 2625 // See if we can handle this address. 2626 Address Addr; 2627 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2628 2629 unsigned ResultReg = MI->getOperand(0).getReg(); 2630 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2631 return false; 2632 MI->eraseFromParent(); 2633 return true; 2634 } 2635 2636 namespace llvm { 2637 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2638 // Completely untested on non-iOS. 2639 const TargetMachine &TM = funcInfo.MF->getTarget(); 2640 2641 // Darwin and thumb1 only for now. 2642 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2643 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only() && 2644 !DisableARMFastISel) 2645 return new ARMFastISel(funcInfo); 2646 return 0; 2647 } 2648 } 2649