1 //===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the ARM-specific support for the FastISel class. Some 11 // of the target-specific code is generated by tablegen in the file 12 // ARMGenFastISel.inc, which is #included here. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "ARM.h" 17 #include "ARMBaseRegisterInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMISelLowering.h" 21 #include "ARMMachineFunctionInfo.h" 22 #include "ARMSubtarget.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/CodeGen/FastISel.h" 26 #include "llvm/CodeGen/FunctionLoweringInfo.h" 27 #include "llvm/CodeGen/MachineConstantPool.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineModuleInfo.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/IR/CallSite.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/Module.h" 42 #include "llvm/IR/Operator.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetOptions.h" 48 using namespace llvm; 49 50 namespace { 51 52 // All possible address modes, plus some. 53 typedef struct Address { 54 enum { 55 RegBase, 56 FrameIndexBase 57 } BaseType; 58 59 union { 60 unsigned Reg; 61 int FI; 62 } Base; 63 64 int Offset; 65 66 // Innocuous defaults for our address. 67 Address() 68 : BaseType(RegBase), Offset(0) { 69 Base.Reg = 0; 70 } 71 } Address; 72 73 class ARMFastISel final : public FastISel { 74 75 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 76 /// make the right decision when generating code for different targets. 77 const ARMSubtarget *Subtarget; 78 Module &M; 79 const TargetMachine &TM; 80 const TargetInstrInfo &TII; 81 const TargetLowering &TLI; 82 ARMFunctionInfo *AFI; 83 84 // Convenience variables to avoid some queries. 85 bool isThumb2; 86 LLVMContext *Context; 87 88 public: 89 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 90 const TargetLibraryInfo *libInfo) 91 : FastISel(funcInfo, libInfo), 92 Subtarget( 93 &static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())), 94 M(const_cast<Module &>(*funcInfo.Fn->getParent())), 95 TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()), 96 TLI(*Subtarget->getTargetLowering()) { 97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 98 isThumb2 = AFI->isThumbFunction(); 99 Context = &funcInfo.Fn->getContext(); 100 } 101 102 // Code from FastISel.cpp. 103 private: 104 unsigned fastEmitInst_r(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill); 107 unsigned fastEmitInst_rr(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill, 110 unsigned Op1, bool Op1IsKill); 111 unsigned fastEmitInst_ri(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 uint64_t Imm); 115 unsigned fastEmitInst_rri(unsigned MachineInstOpcode, 116 const TargetRegisterClass *RC, 117 unsigned Op0, bool Op0IsKill, 118 unsigned Op1, bool Op1IsKill, 119 uint64_t Imm); 120 unsigned fastEmitInst_i(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 uint64_t Imm); 123 124 // Backend specific FastISel code. 125 private: 126 bool fastSelectInstruction(const Instruction *I) override; 127 unsigned fastMaterializeConstant(const Constant *C) override; 128 unsigned fastMaterializeAlloca(const AllocaInst *AI) override; 129 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 130 const LoadInst *LI) override; 131 bool fastLowerArguments() override; 132 private: 133 #include "ARMGenFastISel.inc" 134 135 // Instruction selection routines. 136 private: 137 bool SelectLoad(const Instruction *I); 138 bool SelectStore(const Instruction *I); 139 bool SelectBranch(const Instruction *I); 140 bool SelectIndirectBr(const Instruction *I); 141 bool SelectCmp(const Instruction *I); 142 bool SelectFPExt(const Instruction *I); 143 bool SelectFPTrunc(const Instruction *I); 144 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 145 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 146 bool SelectIToFP(const Instruction *I, bool isSigned); 147 bool SelectFPToI(const Instruction *I, bool isSigned); 148 bool SelectDiv(const Instruction *I, bool isSigned); 149 bool SelectRem(const Instruction *I, bool isSigned); 150 bool SelectCall(const Instruction *I, const char *IntrMemName); 151 bool SelectIntrinsicCall(const IntrinsicInst &I); 152 bool SelectSelect(const Instruction *I); 153 bool SelectRet(const Instruction *I); 154 bool SelectTrunc(const Instruction *I); 155 bool SelectIntExt(const Instruction *I); 156 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 157 158 // Utility routines. 159 private: 160 bool isPositionIndependent() const; 161 bool isTypeLegal(Type *Ty, MVT &VT); 162 bool isLoadTypeLegal(Type *Ty, MVT &VT); 163 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 164 bool isZExt); 165 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 166 unsigned Alignment = 0, bool isZExt = true, 167 bool allocReg = true); 168 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 169 unsigned Alignment = 0); 170 bool ARMComputeAddress(const Value *Obj, Address &Addr); 171 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 172 bool ARMIsMemCpySmall(uint64_t Len); 173 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 174 unsigned Alignment); 175 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 176 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 177 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 178 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 179 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 180 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 181 unsigned ARMSelectCallOp(bool UseReg); 182 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 183 184 const TargetLowering *getTargetLowering() { return &TLI; } 185 186 // Call handling routines. 187 private: 188 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 189 bool Return, 190 bool isVarArg); 191 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 192 SmallVectorImpl<unsigned> &ArgRegs, 193 SmallVectorImpl<MVT> &ArgVTs, 194 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 195 SmallVectorImpl<unsigned> &RegArgs, 196 CallingConv::ID CC, 197 unsigned &NumBytes, 198 bool isVarArg); 199 unsigned getLibcallReg(const Twine &Name); 200 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 201 const Instruction *I, CallingConv::ID CC, 202 unsigned &NumBytes, bool isVarArg); 203 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 204 205 // OptionalDef handling routines. 206 private: 207 bool isARMNEONPred(const MachineInstr *MI); 208 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 209 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 210 void AddLoadStoreOperands(MVT VT, Address &Addr, 211 const MachineInstrBuilder &MIB, 212 unsigned Flags, bool useAM3); 213 }; 214 215 } // end anonymous namespace 216 217 #include "ARMGenCallingConv.inc" 218 219 // DefinesOptionalPredicate - This is different from DefinesPredicate in that 220 // we don't care about implicit defs here, just places we'll need to add a 221 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 222 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 223 if (!MI->hasOptionalDef()) 224 return false; 225 226 // Look to see if our OptionalDef is defining CPSR or CCR. 227 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 228 const MachineOperand &MO = MI->getOperand(i); 229 if (!MO.isReg() || !MO.isDef()) continue; 230 if (MO.getReg() == ARM::CPSR) 231 *CPSR = true; 232 } 233 return true; 234 } 235 236 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 237 const MCInstrDesc &MCID = MI->getDesc(); 238 239 // If we're a thumb2 or not NEON function we'll be handled via isPredicable. 240 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 241 AFI->isThumb2Function()) 242 return MI->isPredicable(); 243 244 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 245 if (MCID.OpInfo[i].isPredicate()) 246 return true; 247 248 return false; 249 } 250 251 // If the machine is predicable go ahead and add the predicate operands, if 252 // it needs default CC operands add those. 253 // TODO: If we want to support thumb1 then we'll need to deal with optional 254 // CPSR defs that need to be added before the remaining operands. See s_cc_out 255 // for descriptions why. 256 const MachineInstrBuilder & 257 ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 258 MachineInstr *MI = &*MIB; 259 260 // Do we use a predicate? or... 261 // Are we NEON in ARM mode and have a predicate operand? If so, I know 262 // we're not predicable but add it anyways. 263 if (isARMNEONPred(MI)) 264 AddDefaultPred(MIB); 265 266 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 267 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 268 bool CPSR = false; 269 if (DefinesOptionalPredicate(MI, &CPSR)) { 270 if (CPSR) 271 AddDefaultT1CC(MIB); 272 else 273 AddDefaultCC(MIB); 274 } 275 return MIB; 276 } 277 278 unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, 279 const TargetRegisterClass *RC, 280 unsigned Op0, bool Op0IsKill) { 281 unsigned ResultReg = createResultReg(RC); 282 const MCInstrDesc &II = TII.get(MachineInstOpcode); 283 284 // Make sure the input operand is sufficiently constrained to be legal 285 // for this instruction. 286 Op0 = constrainOperandRegClass(II, Op0, 1); 287 if (II.getNumDefs() >= 1) { 288 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 289 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); 290 } else { 291 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 292 .addReg(Op0, Op0IsKill * RegState::Kill)); 293 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 294 TII.get(TargetOpcode::COPY), ResultReg) 295 .addReg(II.ImplicitDefs[0])); 296 } 297 return ResultReg; 298 } 299 300 unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 301 const TargetRegisterClass *RC, 302 unsigned Op0, bool Op0IsKill, 303 unsigned Op1, bool Op1IsKill) { 304 unsigned ResultReg = createResultReg(RC); 305 const MCInstrDesc &II = TII.get(MachineInstOpcode); 306 307 // Make sure the input operands are sufficiently constrained to be legal 308 // for this instruction. 309 Op0 = constrainOperandRegClass(II, Op0, 1); 310 Op1 = constrainOperandRegClass(II, Op1, 2); 311 312 if (II.getNumDefs() >= 1) { 313 AddOptionalDefs( 314 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 315 .addReg(Op0, Op0IsKill * RegState::Kill) 316 .addReg(Op1, Op1IsKill * RegState::Kill)); 317 } else { 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 319 .addReg(Op0, Op0IsKill * RegState::Kill) 320 .addReg(Op1, Op1IsKill * RegState::Kill)); 321 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 322 TII.get(TargetOpcode::COPY), ResultReg) 323 .addReg(II.ImplicitDefs[0])); 324 } 325 return ResultReg; 326 } 327 328 unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 329 const TargetRegisterClass *RC, 330 unsigned Op0, bool Op0IsKill, 331 uint64_t Imm) { 332 unsigned ResultReg = createResultReg(RC); 333 const MCInstrDesc &II = TII.get(MachineInstOpcode); 334 335 // Make sure the input operand is sufficiently constrained to be legal 336 // for this instruction. 337 Op0 = constrainOperandRegClass(II, Op0, 1); 338 if (II.getNumDefs() >= 1) { 339 AddOptionalDefs( 340 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 341 .addReg(Op0, Op0IsKill * RegState::Kill) 342 .addImm(Imm)); 343 } else { 344 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 345 .addReg(Op0, Op0IsKill * RegState::Kill) 346 .addImm(Imm)); 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 348 TII.get(TargetOpcode::COPY), ResultReg) 349 .addReg(II.ImplicitDefs[0])); 350 } 351 return ResultReg; 352 } 353 354 unsigned ARMFastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 355 const TargetRegisterClass *RC, 356 unsigned Op0, bool Op0IsKill, 357 unsigned Op1, bool Op1IsKill, 358 uint64_t Imm) { 359 unsigned ResultReg = createResultReg(RC); 360 const MCInstrDesc &II = TII.get(MachineInstOpcode); 361 362 // Make sure the input operands are sufficiently constrained to be legal 363 // for this instruction. 364 Op0 = constrainOperandRegClass(II, Op0, 1); 365 Op1 = constrainOperandRegClass(II, Op1, 2); 366 if (II.getNumDefs() >= 1) { 367 AddOptionalDefs( 368 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 369 .addReg(Op0, Op0IsKill * RegState::Kill) 370 .addReg(Op1, Op1IsKill * RegState::Kill) 371 .addImm(Imm)); 372 } else { 373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 374 .addReg(Op0, Op0IsKill * RegState::Kill) 375 .addReg(Op1, Op1IsKill * RegState::Kill) 376 .addImm(Imm)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 378 TII.get(TargetOpcode::COPY), ResultReg) 379 .addReg(II.ImplicitDefs[0])); 380 } 381 return ResultReg; 382 } 383 384 unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode, 385 const TargetRegisterClass *RC, 386 uint64_t Imm) { 387 unsigned ResultReg = createResultReg(RC); 388 const MCInstrDesc &II = TII.get(MachineInstOpcode); 389 390 if (II.getNumDefs() >= 1) { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, 392 ResultReg).addImm(Imm)); 393 } else { 394 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 395 .addImm(Imm)); 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 397 TII.get(TargetOpcode::COPY), ResultReg) 398 .addReg(II.ImplicitDefs[0])); 399 } 400 return ResultReg; 401 } 402 403 // TODO: Don't worry about 64-bit now, but when this is fixed remove the 404 // checks from the various callers. 405 unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 406 if (VT == MVT::f64) return 0; 407 408 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 409 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 410 TII.get(ARM::VMOVSR), MoveReg) 411 .addReg(SrcReg)); 412 return MoveReg; 413 } 414 415 unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 416 if (VT == MVT::i64) return 0; 417 418 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 420 TII.get(ARM::VMOVRS), MoveReg) 421 .addReg(SrcReg)); 422 return MoveReg; 423 } 424 425 // For double width floating point we need to materialize two constants 426 // (the high and the low) into integer registers then use a move to get 427 // the combined constant into an FP reg. 428 unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 429 const APFloat Val = CFP->getValueAPF(); 430 bool is64bit = VT == MVT::f64; 431 432 // This checks to see if we can use VFP3 instructions to materialize 433 // a constant, otherwise we have to go through the constant pool. 434 if (TLI.isFPImmLegal(Val, VT)) { 435 int Imm; 436 unsigned Opc; 437 if (is64bit) { 438 Imm = ARM_AM::getFP64Imm(Val); 439 Opc = ARM::FCONSTD; 440 } else { 441 Imm = ARM_AM::getFP32Imm(Val); 442 Opc = ARM::FCONSTS; 443 } 444 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 446 TII.get(Opc), DestReg).addImm(Imm)); 447 return DestReg; 448 } 449 450 // Require VFP2 for loading fp constants. 451 if (!Subtarget->hasVFP2()) return false; 452 453 // MachineConstantPool wants an explicit alignment. 454 unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); 455 if (Align == 0) { 456 // TODO: Figure out if this is correct. 457 Align = DL.getTypeAllocSize(CFP->getType()); 458 } 459 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 460 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 461 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 462 463 // The extra reg is for addrmode5. 464 AddOptionalDefs( 465 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 466 .addConstantPoolIndex(Idx) 467 .addReg(0)); 468 return DestReg; 469 } 470 471 unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 472 473 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 474 return 0; 475 476 // If we can do this in a single instruction without a constant pool entry 477 // do so now. 478 const ConstantInt *CI = cast<ConstantInt>(C); 479 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 480 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 481 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 482 &ARM::GPRRegClass; 483 unsigned ImmReg = createResultReg(RC); 484 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 485 TII.get(Opc), ImmReg) 486 .addImm(CI->getZExtValue())); 487 return ImmReg; 488 } 489 490 // Use MVN to emit negative constants. 491 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 492 unsigned Imm = (unsigned)~(CI->getSExtValue()); 493 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 494 (ARM_AM::getSOImmVal(Imm) != -1); 495 if (UseImm) { 496 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 497 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 498 &ARM::GPRRegClass; 499 unsigned ImmReg = createResultReg(RC); 500 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 501 TII.get(Opc), ImmReg) 502 .addImm(Imm)); 503 return ImmReg; 504 } 505 } 506 507 unsigned ResultReg = 0; 508 if (Subtarget->useMovt(*FuncInfo.MF)) 509 ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 510 511 if (ResultReg) 512 return ResultReg; 513 514 // Load from constant pool. For now 32-bit only. 515 if (VT != MVT::i32) 516 return 0; 517 518 // MachineConstantPool wants an explicit alignment. 519 unsigned Align = DL.getPrefTypeAlignment(C->getType()); 520 if (Align == 0) { 521 // TODO: Figure out if this is correct. 522 Align = DL.getTypeAllocSize(C->getType()); 523 } 524 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 525 ResultReg = createResultReg(TLI.getRegClassFor(VT)); 526 if (isThumb2) 527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 528 TII.get(ARM::t2LDRpci), ResultReg) 529 .addConstantPoolIndex(Idx)); 530 else { 531 // The extra immediate is for addrmode2. 532 ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0); 533 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 534 TII.get(ARM::LDRcp), ResultReg) 535 .addConstantPoolIndex(Idx) 536 .addImm(0)); 537 } 538 return ResultReg; 539 } 540 541 bool ARMFastISel::isPositionIndependent() const { 542 return TLI.isPositionIndependent(); 543 } 544 545 unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 546 // For now 32-bit only. 547 if (VT != MVT::i32 || GV->isThreadLocal()) return 0; 548 549 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV); 550 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 551 : &ARM::GPRRegClass; 552 unsigned DestReg = createResultReg(RC); 553 554 // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. 555 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 556 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 557 if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; 558 559 bool IsPositionIndependent = isPositionIndependent(); 560 // Use movw+movt when possible, it avoids constant pool entries. 561 // Non-darwin targets only support static movt relocations in FastISel. 562 if (Subtarget->useMovt(*FuncInfo.MF) && 563 (Subtarget->isTargetMachO() || !IsPositionIndependent)) { 564 unsigned Opc; 565 unsigned char TF = 0; 566 if (Subtarget->isTargetMachO()) 567 TF = ARMII::MO_NONLAZY; 568 569 if (IsPositionIndependent) 570 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 571 else 572 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 573 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 574 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF)); 575 } else { 576 // MachineConstantPool wants an explicit alignment. 577 unsigned Align = DL.getPrefTypeAlignment(GV->getType()); 578 if (Align == 0) { 579 // TODO: Figure out if this is correct. 580 Align = DL.getTypeAllocSize(GV->getType()); 581 } 582 583 if (Subtarget->isTargetELF() && IsPositionIndependent) 584 return ARMLowerPICELF(GV, Align, VT); 585 586 // Grab index. 587 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 588 unsigned Id = AFI->createPICLabelUId(); 589 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 590 ARMCP::CPValue, 591 PCAdj); 592 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 593 594 // Load value. 595 MachineInstrBuilder MIB; 596 if (isThumb2) { 597 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci; 598 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 599 DestReg).addConstantPoolIndex(Idx); 600 if (IsPositionIndependent) 601 MIB.addImm(Id); 602 AddOptionalDefs(MIB); 603 } else { 604 // The extra immediate is for addrmode2. 605 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0); 606 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 607 TII.get(ARM::LDRcp), DestReg) 608 .addConstantPoolIndex(Idx) 609 .addImm(0); 610 AddOptionalDefs(MIB); 611 612 if (IsPositionIndependent) { 613 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 614 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 615 616 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 617 DbgLoc, TII.get(Opc), NewDestReg) 618 .addReg(DestReg) 619 .addImm(Id); 620 AddOptionalDefs(MIB); 621 return NewDestReg; 622 } 623 } 624 } 625 626 if (IsIndirect) { 627 MachineInstrBuilder MIB; 628 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 629 if (isThumb2) 630 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 631 TII.get(ARM::t2LDRi12), NewDestReg) 632 .addReg(DestReg) 633 .addImm(0); 634 else 635 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 636 TII.get(ARM::LDRi12), NewDestReg) 637 .addReg(DestReg) 638 .addImm(0); 639 DestReg = NewDestReg; 640 AddOptionalDefs(MIB); 641 } 642 643 return DestReg; 644 } 645 646 unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) { 647 EVT CEVT = TLI.getValueType(DL, C->getType(), true); 648 649 // Only handle simple types. 650 if (!CEVT.isSimple()) return 0; 651 MVT VT = CEVT.getSimpleVT(); 652 653 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 654 return ARMMaterializeFP(CFP, VT); 655 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 656 return ARMMaterializeGV(GV, VT); 657 else if (isa<ConstantInt>(C)) 658 return ARMMaterializeInt(C, VT); 659 660 return 0; 661 } 662 663 // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 664 665 unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) { 666 // Don't handle dynamic allocas. 667 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 668 669 MVT VT; 670 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 671 672 DenseMap<const AllocaInst*, int>::iterator SI = 673 FuncInfo.StaticAllocaMap.find(AI); 674 675 // This will get lowered later into the correct offsets and registers 676 // via rewriteXFrameIndex. 677 if (SI != FuncInfo.StaticAllocaMap.end()) { 678 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 679 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 680 unsigned ResultReg = createResultReg(RC); 681 ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0); 682 683 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 684 TII.get(Opc), ResultReg) 685 .addFrameIndex(SI->second) 686 .addImm(0)); 687 return ResultReg; 688 } 689 690 return 0; 691 } 692 693 bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 694 EVT evt = TLI.getValueType(DL, Ty, true); 695 696 // Only handle simple types. 697 if (evt == MVT::Other || !evt.isSimple()) return false; 698 VT = evt.getSimpleVT(); 699 700 // Handle all legal types, i.e. a register that will directly hold this 701 // value. 702 return TLI.isTypeLegal(VT); 703 } 704 705 bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 706 if (isTypeLegal(Ty, VT)) return true; 707 708 // If this is a type than can be sign or zero-extended to a basic operation 709 // go ahead and accept it now. 710 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 711 return true; 712 713 return false; 714 } 715 716 // Computes the address to get to an object. 717 bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 718 // Some boilerplate from the X86 FastISel. 719 const User *U = nullptr; 720 unsigned Opcode = Instruction::UserOp1; 721 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 722 // Don't walk into other basic blocks unless the object is an alloca from 723 // another block, otherwise it may not have a virtual register assigned. 724 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 725 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 726 Opcode = I->getOpcode(); 727 U = I; 728 } 729 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 730 Opcode = C->getOpcode(); 731 U = C; 732 } 733 734 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 735 if (Ty->getAddressSpace() > 255) 736 // Fast instruction selection doesn't support the special 737 // address spaces. 738 return false; 739 740 switch (Opcode) { 741 default: 742 break; 743 case Instruction::BitCast: 744 // Look through bitcasts. 745 return ARMComputeAddress(U->getOperand(0), Addr); 746 case Instruction::IntToPtr: 747 // Look past no-op inttoptrs. 748 if (TLI.getValueType(DL, U->getOperand(0)->getType()) == 749 TLI.getPointerTy(DL)) 750 return ARMComputeAddress(U->getOperand(0), Addr); 751 break; 752 case Instruction::PtrToInt: 753 // Look past no-op ptrtoints. 754 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) 755 return ARMComputeAddress(U->getOperand(0), Addr); 756 break; 757 case Instruction::GetElementPtr: { 758 Address SavedAddr = Addr; 759 int TmpOffset = Addr.Offset; 760 761 // Iterate through the GEP folding the constants into offsets where 762 // we can. 763 gep_type_iterator GTI = gep_type_begin(U); 764 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 765 i != e; ++i, ++GTI) { 766 const Value *Op = *i; 767 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 768 const StructLayout *SL = DL.getStructLayout(STy); 769 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 770 TmpOffset += SL->getElementOffset(Idx); 771 } else { 772 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 773 for (;;) { 774 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 775 // Constant-offset addressing. 776 TmpOffset += CI->getSExtValue() * S; 777 break; 778 } 779 if (canFoldAddIntoGEP(U, Op)) { 780 // A compatible add with a constant operand. Fold the constant. 781 ConstantInt *CI = 782 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 783 TmpOffset += CI->getSExtValue() * S; 784 // Iterate on the other operand. 785 Op = cast<AddOperator>(Op)->getOperand(0); 786 continue; 787 } 788 // Unsupported 789 goto unsupported_gep; 790 } 791 } 792 } 793 794 // Try to grab the base operand now. 795 Addr.Offset = TmpOffset; 796 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 797 798 // We failed, restore everything and try the other options. 799 Addr = SavedAddr; 800 801 unsupported_gep: 802 break; 803 } 804 case Instruction::Alloca: { 805 const AllocaInst *AI = cast<AllocaInst>(Obj); 806 DenseMap<const AllocaInst*, int>::iterator SI = 807 FuncInfo.StaticAllocaMap.find(AI); 808 if (SI != FuncInfo.StaticAllocaMap.end()) { 809 Addr.BaseType = Address::FrameIndexBase; 810 Addr.Base.FI = SI->second; 811 return true; 812 } 813 break; 814 } 815 } 816 817 // Try to get this in a register if nothing else has worked. 818 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 819 return Addr.Base.Reg != 0; 820 } 821 822 void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 823 bool needsLowering = false; 824 switch (VT.SimpleTy) { 825 default: llvm_unreachable("Unhandled load/store type!"); 826 case MVT::i1: 827 case MVT::i8: 828 case MVT::i16: 829 case MVT::i32: 830 if (!useAM3) { 831 // Integer loads/stores handle 12-bit offsets. 832 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 833 // Handle negative offsets. 834 if (needsLowering && isThumb2) 835 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 836 Addr.Offset > -256); 837 } else { 838 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 839 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 840 } 841 break; 842 case MVT::f32: 843 case MVT::f64: 844 // Floating point operands handle 8-bit offsets. 845 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 846 break; 847 } 848 849 // If this is a stack pointer and the offset needs to be simplified then 850 // put the alloca address into a register, set the base type back to 851 // register and continue. This should almost never happen. 852 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 853 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 854 : &ARM::GPRRegClass; 855 unsigned ResultReg = createResultReg(RC); 856 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 857 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 858 TII.get(Opc), ResultReg) 859 .addFrameIndex(Addr.Base.FI) 860 .addImm(0)); 861 Addr.Base.Reg = ResultReg; 862 Addr.BaseType = Address::RegBase; 863 } 864 865 // Since the offset is too large for the load/store instruction 866 // get the reg+offset into a register. 867 if (needsLowering) { 868 Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 869 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 870 Addr.Offset = 0; 871 } 872 } 873 874 void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 875 const MachineInstrBuilder &MIB, 876 unsigned Flags, bool useAM3) { 877 // addrmode5 output depends on the selection dag addressing dividing the 878 // offset by 4 that it then later multiplies. Do this here as well. 879 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 880 Addr.Offset /= 4; 881 882 // Frame base works a bit differently. Handle it separately. 883 if (Addr.BaseType == Address::FrameIndexBase) { 884 int FI = Addr.Base.FI; 885 int Offset = Addr.Offset; 886 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( 887 MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags, 888 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 889 // Now add the rest of the operands. 890 MIB.addFrameIndex(FI); 891 892 // ARM halfword load/stores and signed byte loads need an additional 893 // operand. 894 if (useAM3) { 895 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 896 MIB.addReg(0); 897 MIB.addImm(Imm); 898 } else { 899 MIB.addImm(Addr.Offset); 900 } 901 MIB.addMemOperand(MMO); 902 } else { 903 // Now add the rest of the operands. 904 MIB.addReg(Addr.Base.Reg); 905 906 // ARM halfword load/stores and signed byte loads need an additional 907 // operand. 908 if (useAM3) { 909 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 910 MIB.addReg(0); 911 MIB.addImm(Imm); 912 } else { 913 MIB.addImm(Addr.Offset); 914 } 915 } 916 AddOptionalDefs(MIB); 917 } 918 919 bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 920 unsigned Alignment, bool isZExt, bool allocReg) { 921 unsigned Opc; 922 bool useAM3 = false; 923 bool needVMOV = false; 924 const TargetRegisterClass *RC; 925 switch (VT.SimpleTy) { 926 // This is mostly going to be Neon/vector support. 927 default: return false; 928 case MVT::i1: 929 case MVT::i8: 930 if (isThumb2) { 931 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 932 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 933 else 934 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 935 } else { 936 if (isZExt) { 937 Opc = ARM::LDRBi12; 938 } else { 939 Opc = ARM::LDRSB; 940 useAM3 = true; 941 } 942 } 943 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 944 break; 945 case MVT::i16: 946 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 947 return false; 948 949 if (isThumb2) { 950 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 951 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 952 else 953 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 954 } else { 955 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 956 useAM3 = true; 957 } 958 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 959 break; 960 case MVT::i32: 961 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 962 return false; 963 964 if (isThumb2) { 965 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 966 Opc = ARM::t2LDRi8; 967 else 968 Opc = ARM::t2LDRi12; 969 } else { 970 Opc = ARM::LDRi12; 971 } 972 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 973 break; 974 case MVT::f32: 975 if (!Subtarget->hasVFP2()) return false; 976 // Unaligned loads need special handling. Floats require word-alignment. 977 if (Alignment && Alignment < 4) { 978 needVMOV = true; 979 VT = MVT::i32; 980 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 981 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 982 } else { 983 Opc = ARM::VLDRS; 984 RC = TLI.getRegClassFor(VT); 985 } 986 break; 987 case MVT::f64: 988 if (!Subtarget->hasVFP2()) return false; 989 // FIXME: Unaligned loads need special handling. Doublewords require 990 // word-alignment. 991 if (Alignment && Alignment < 4) 992 return false; 993 994 Opc = ARM::VLDRD; 995 RC = TLI.getRegClassFor(VT); 996 break; 997 } 998 // Simplify this down to something we can handle. 999 ARMSimplifyAddress(Addr, VT, useAM3); 1000 1001 // Create the base instruction, then add the operands. 1002 if (allocReg) 1003 ResultReg = createResultReg(RC); 1004 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1005 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1006 TII.get(Opc), ResultReg); 1007 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1008 1009 // If we had an unaligned load of a float we've converted it to an regular 1010 // load. Now we must move from the GRP to the FP register. 1011 if (needVMOV) { 1012 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1013 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1014 TII.get(ARM::VMOVSR), MoveReg) 1015 .addReg(ResultReg)); 1016 ResultReg = MoveReg; 1017 } 1018 return true; 1019 } 1020 1021 bool ARMFastISel::SelectLoad(const Instruction *I) { 1022 // Atomic loads need special handling. 1023 if (cast<LoadInst>(I)->isAtomic()) 1024 return false; 1025 1026 const Value *SV = I->getOperand(0); 1027 if (TLI.supportSwiftError()) { 1028 // Swifterror values can come from either a function parameter with 1029 // swifterror attribute or an alloca with swifterror attribute. 1030 if (const Argument *Arg = dyn_cast<Argument>(SV)) { 1031 if (Arg->hasSwiftErrorAttr()) 1032 return false; 1033 } 1034 1035 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { 1036 if (Alloca->isSwiftError()) 1037 return false; 1038 } 1039 } 1040 1041 // Verify we have a legal type before going any further. 1042 MVT VT; 1043 if (!isLoadTypeLegal(I->getType(), VT)) 1044 return false; 1045 1046 // See if we can handle this address. 1047 Address Addr; 1048 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1049 1050 unsigned ResultReg; 1051 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1052 return false; 1053 updateValueMap(I, ResultReg); 1054 return true; 1055 } 1056 1057 bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1058 unsigned Alignment) { 1059 unsigned StrOpc; 1060 bool useAM3 = false; 1061 switch (VT.SimpleTy) { 1062 // This is mostly going to be Neon/vector support. 1063 default: return false; 1064 case MVT::i1: { 1065 unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass 1066 : &ARM::GPRRegClass); 1067 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1068 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1); 1069 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1070 TII.get(Opc), Res) 1071 .addReg(SrcReg).addImm(1)); 1072 SrcReg = Res; 1073 } // Fallthrough here. 1074 case MVT::i8: 1075 if (isThumb2) { 1076 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1077 StrOpc = ARM::t2STRBi8; 1078 else 1079 StrOpc = ARM::t2STRBi12; 1080 } else { 1081 StrOpc = ARM::STRBi12; 1082 } 1083 break; 1084 case MVT::i16: 1085 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1086 return false; 1087 1088 if (isThumb2) { 1089 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1090 StrOpc = ARM::t2STRHi8; 1091 else 1092 StrOpc = ARM::t2STRHi12; 1093 } else { 1094 StrOpc = ARM::STRH; 1095 useAM3 = true; 1096 } 1097 break; 1098 case MVT::i32: 1099 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1100 return false; 1101 1102 if (isThumb2) { 1103 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1104 StrOpc = ARM::t2STRi8; 1105 else 1106 StrOpc = ARM::t2STRi12; 1107 } else { 1108 StrOpc = ARM::STRi12; 1109 } 1110 break; 1111 case MVT::f32: 1112 if (!Subtarget->hasVFP2()) return false; 1113 // Unaligned stores need special handling. Floats require word-alignment. 1114 if (Alignment && Alignment < 4) { 1115 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1116 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1117 TII.get(ARM::VMOVRS), MoveReg) 1118 .addReg(SrcReg)); 1119 SrcReg = MoveReg; 1120 VT = MVT::i32; 1121 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1122 } else { 1123 StrOpc = ARM::VSTRS; 1124 } 1125 break; 1126 case MVT::f64: 1127 if (!Subtarget->hasVFP2()) return false; 1128 // FIXME: Unaligned stores need special handling. Doublewords require 1129 // word-alignment. 1130 if (Alignment && Alignment < 4) 1131 return false; 1132 1133 StrOpc = ARM::VSTRD; 1134 break; 1135 } 1136 // Simplify this down to something we can handle. 1137 ARMSimplifyAddress(Addr, VT, useAM3); 1138 1139 // Create the base instruction, then add the operands. 1140 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0); 1141 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1142 TII.get(StrOpc)) 1143 .addReg(SrcReg); 1144 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1145 return true; 1146 } 1147 1148 bool ARMFastISel::SelectStore(const Instruction *I) { 1149 Value *Op0 = I->getOperand(0); 1150 unsigned SrcReg = 0; 1151 1152 // Atomic stores need special handling. 1153 if (cast<StoreInst>(I)->isAtomic()) 1154 return false; 1155 1156 const Value *PtrV = I->getOperand(1); 1157 if (TLI.supportSwiftError()) { 1158 // Swifterror values can come from either a function parameter with 1159 // swifterror attribute or an alloca with swifterror attribute. 1160 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { 1161 if (Arg->hasSwiftErrorAttr()) 1162 return false; 1163 } 1164 1165 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { 1166 if (Alloca->isSwiftError()) 1167 return false; 1168 } 1169 } 1170 1171 // Verify we have a legal type before going any further. 1172 MVT VT; 1173 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1174 return false; 1175 1176 // Get the value to be stored into a register. 1177 SrcReg = getRegForValue(Op0); 1178 if (SrcReg == 0) return false; 1179 1180 // See if we can handle this address. 1181 Address Addr; 1182 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1183 return false; 1184 1185 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1186 return false; 1187 return true; 1188 } 1189 1190 static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1191 switch (Pred) { 1192 // Needs two compares... 1193 case CmpInst::FCMP_ONE: 1194 case CmpInst::FCMP_UEQ: 1195 default: 1196 // AL is our "false" for now. The other two need more compares. 1197 return ARMCC::AL; 1198 case CmpInst::ICMP_EQ: 1199 case CmpInst::FCMP_OEQ: 1200 return ARMCC::EQ; 1201 case CmpInst::ICMP_SGT: 1202 case CmpInst::FCMP_OGT: 1203 return ARMCC::GT; 1204 case CmpInst::ICMP_SGE: 1205 case CmpInst::FCMP_OGE: 1206 return ARMCC::GE; 1207 case CmpInst::ICMP_UGT: 1208 case CmpInst::FCMP_UGT: 1209 return ARMCC::HI; 1210 case CmpInst::FCMP_OLT: 1211 return ARMCC::MI; 1212 case CmpInst::ICMP_ULE: 1213 case CmpInst::FCMP_OLE: 1214 return ARMCC::LS; 1215 case CmpInst::FCMP_ORD: 1216 return ARMCC::VC; 1217 case CmpInst::FCMP_UNO: 1218 return ARMCC::VS; 1219 case CmpInst::FCMP_UGE: 1220 return ARMCC::PL; 1221 case CmpInst::ICMP_SLT: 1222 case CmpInst::FCMP_ULT: 1223 return ARMCC::LT; 1224 case CmpInst::ICMP_SLE: 1225 case CmpInst::FCMP_ULE: 1226 return ARMCC::LE; 1227 case CmpInst::FCMP_UNE: 1228 case CmpInst::ICMP_NE: 1229 return ARMCC::NE; 1230 case CmpInst::ICMP_UGE: 1231 return ARMCC::HS; 1232 case CmpInst::ICMP_ULT: 1233 return ARMCC::LO; 1234 } 1235 } 1236 1237 bool ARMFastISel::SelectBranch(const Instruction *I) { 1238 const BranchInst *BI = cast<BranchInst>(I); 1239 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1240 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1241 1242 // Simple branch support. 1243 1244 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1245 // behavior. 1246 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1247 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1248 1249 // Get the compare predicate. 1250 // Try to take advantage of fallthrough opportunities. 1251 CmpInst::Predicate Predicate = CI->getPredicate(); 1252 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1253 std::swap(TBB, FBB); 1254 Predicate = CmpInst::getInversePredicate(Predicate); 1255 } 1256 1257 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1258 1259 // We may not handle every CC for now. 1260 if (ARMPred == ARMCC::AL) return false; 1261 1262 // Emit the compare. 1263 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1264 return false; 1265 1266 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1267 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1268 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1269 finishCondBranch(BI->getParent(), TBB, FBB); 1270 return true; 1271 } 1272 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1273 MVT SourceVT; 1274 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1275 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1276 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1277 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1278 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0); 1279 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1280 TII.get(TstOpc)) 1281 .addReg(OpReg).addImm(1)); 1282 1283 unsigned CCMode = ARMCC::NE; 1284 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1285 std::swap(TBB, FBB); 1286 CCMode = ARMCC::EQ; 1287 } 1288 1289 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1290 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1291 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1292 1293 finishCondBranch(BI->getParent(), TBB, FBB); 1294 return true; 1295 } 1296 } else if (const ConstantInt *CI = 1297 dyn_cast<ConstantInt>(BI->getCondition())) { 1298 uint64_t Imm = CI->getZExtValue(); 1299 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1300 fastEmitBranch(Target, DbgLoc); 1301 return true; 1302 } 1303 1304 unsigned CmpReg = getRegForValue(BI->getCondition()); 1305 if (CmpReg == 0) return false; 1306 1307 // We've been divorced from our compare! Our block was split, and 1308 // now our compare lives in a predecessor block. We musn't 1309 // re-compare here, as the children of the compare aren't guaranteed 1310 // live across the block boundary (we *could* check for this). 1311 // Regardless, the compare has been done in the predecessor block, 1312 // and it left a value for us in a virtual register. Ergo, we test 1313 // the one-bit value left in the virtual register. 1314 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1315 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0); 1316 AddOptionalDefs( 1317 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1318 .addReg(CmpReg) 1319 .addImm(1)); 1320 1321 unsigned CCMode = ARMCC::NE; 1322 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1323 std::swap(TBB, FBB); 1324 CCMode = ARMCC::EQ; 1325 } 1326 1327 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1328 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc)) 1329 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1330 finishCondBranch(BI->getParent(), TBB, FBB); 1331 return true; 1332 } 1333 1334 bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1335 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1336 if (AddrReg == 0) return false; 1337 1338 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1339 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1340 TII.get(Opc)).addReg(AddrReg)); 1341 1342 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1343 for (const BasicBlock *SuccBB : IB->successors()) 1344 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]); 1345 1346 return true; 1347 } 1348 1349 bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1350 bool isZExt) { 1351 Type *Ty = Src1Value->getType(); 1352 EVT SrcEVT = TLI.getValueType(DL, Ty, true); 1353 if (!SrcEVT.isSimple()) return false; 1354 MVT SrcVT = SrcEVT.getSimpleVT(); 1355 1356 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1357 if (isFloat && !Subtarget->hasVFP2()) 1358 return false; 1359 1360 // Check to see if the 2nd operand is a constant that we can encode directly 1361 // in the compare. 1362 int Imm = 0; 1363 bool UseImm = false; 1364 bool isNegativeImm = false; 1365 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1366 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1367 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1368 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1369 SrcVT == MVT::i1) { 1370 const APInt &CIVal = ConstInt->getValue(); 1371 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1372 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1373 // then a cmn, because there is no way to represent 2147483648 as a 1374 // signed 32-bit int. 1375 if (Imm < 0 && Imm != (int)0x80000000) { 1376 isNegativeImm = true; 1377 Imm = -Imm; 1378 } 1379 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1380 (ARM_AM::getSOImmVal(Imm) != -1); 1381 } 1382 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1383 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1384 if (ConstFP->isZero() && !ConstFP->isNegative()) 1385 UseImm = true; 1386 } 1387 1388 unsigned CmpOpc; 1389 bool isICmp = true; 1390 bool needsExt = false; 1391 switch (SrcVT.SimpleTy) { 1392 default: return false; 1393 // TODO: Verify compares. 1394 case MVT::f32: 1395 isICmp = false; 1396 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1397 break; 1398 case MVT::f64: 1399 isICmp = false; 1400 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1401 break; 1402 case MVT::i1: 1403 case MVT::i8: 1404 case MVT::i16: 1405 needsExt = true; 1406 // Intentional fall-through. 1407 case MVT::i32: 1408 if (isThumb2) { 1409 if (!UseImm) 1410 CmpOpc = ARM::t2CMPrr; 1411 else 1412 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1413 } else { 1414 if (!UseImm) 1415 CmpOpc = ARM::CMPrr; 1416 else 1417 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1418 } 1419 break; 1420 } 1421 1422 unsigned SrcReg1 = getRegForValue(Src1Value); 1423 if (SrcReg1 == 0) return false; 1424 1425 unsigned SrcReg2 = 0; 1426 if (!UseImm) { 1427 SrcReg2 = getRegForValue(Src2Value); 1428 if (SrcReg2 == 0) return false; 1429 } 1430 1431 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1432 if (needsExt) { 1433 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1434 if (SrcReg1 == 0) return false; 1435 if (!UseImm) { 1436 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1437 if (SrcReg2 == 0) return false; 1438 } 1439 } 1440 1441 const MCInstrDesc &II = TII.get(CmpOpc); 1442 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0); 1443 if (!UseImm) { 1444 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1); 1445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1446 .addReg(SrcReg1).addReg(SrcReg2)); 1447 } else { 1448 MachineInstrBuilder MIB; 1449 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1450 .addReg(SrcReg1); 1451 1452 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1453 if (isICmp) 1454 MIB.addImm(Imm); 1455 AddOptionalDefs(MIB); 1456 } 1457 1458 // For floating point we need to move the result to a comparison register 1459 // that we can then use for branches. 1460 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1462 TII.get(ARM::FMSTAT))); 1463 return true; 1464 } 1465 1466 bool ARMFastISel::SelectCmp(const Instruction *I) { 1467 const CmpInst *CI = cast<CmpInst>(I); 1468 1469 // Get the compare predicate. 1470 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1471 1472 // We may not handle every CC for now. 1473 if (ARMPred == ARMCC::AL) return false; 1474 1475 // Emit the compare. 1476 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1477 return false; 1478 1479 // Now set a register based on the comparison. Explicitly set the predicates 1480 // here. 1481 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1482 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass 1483 : &ARM::GPRRegClass; 1484 unsigned DestReg = createResultReg(RC); 1485 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1486 unsigned ZeroReg = fastMaterializeConstant(Zero); 1487 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1488 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg) 1489 .addReg(ZeroReg).addImm(1) 1490 .addImm(ARMPred).addReg(ARM::CPSR); 1491 1492 updateValueMap(I, DestReg); 1493 return true; 1494 } 1495 1496 bool ARMFastISel::SelectFPExt(const Instruction *I) { 1497 // Make sure we have VFP and that we're extending float to double. 1498 if (!Subtarget->hasVFP2()) return false; 1499 1500 Value *V = I->getOperand(0); 1501 if (!I->getType()->isDoubleTy() || 1502 !V->getType()->isFloatTy()) return false; 1503 1504 unsigned Op = getRegForValue(V); 1505 if (Op == 0) return false; 1506 1507 unsigned Result = createResultReg(&ARM::DPRRegClass); 1508 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1509 TII.get(ARM::VCVTDS), Result) 1510 .addReg(Op)); 1511 updateValueMap(I, Result); 1512 return true; 1513 } 1514 1515 bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1516 // Make sure we have VFP and that we're truncating double to float. 1517 if (!Subtarget->hasVFP2()) return false; 1518 1519 Value *V = I->getOperand(0); 1520 if (!(I->getType()->isFloatTy() && 1521 V->getType()->isDoubleTy())) return false; 1522 1523 unsigned Op = getRegForValue(V); 1524 if (Op == 0) return false; 1525 1526 unsigned Result = createResultReg(&ARM::SPRRegClass); 1527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1528 TII.get(ARM::VCVTSD), Result) 1529 .addReg(Op)); 1530 updateValueMap(I, Result); 1531 return true; 1532 } 1533 1534 bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1535 // Make sure we have VFP. 1536 if (!Subtarget->hasVFP2()) return false; 1537 1538 MVT DstVT; 1539 Type *Ty = I->getType(); 1540 if (!isTypeLegal(Ty, DstVT)) 1541 return false; 1542 1543 Value *Src = I->getOperand(0); 1544 EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true); 1545 if (!SrcEVT.isSimple()) 1546 return false; 1547 MVT SrcVT = SrcEVT.getSimpleVT(); 1548 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1549 return false; 1550 1551 unsigned SrcReg = getRegForValue(Src); 1552 if (SrcReg == 0) return false; 1553 1554 // Handle sign-extension. 1555 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1556 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1557 /*isZExt*/!isSigned); 1558 if (SrcReg == 0) return false; 1559 } 1560 1561 // The conversion routine works on fp-reg to fp-reg and the operand above 1562 // was an integer, move it to the fp registers if possible. 1563 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1564 if (FP == 0) return false; 1565 1566 unsigned Opc; 1567 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1568 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1569 else return false; 1570 1571 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1572 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1573 TII.get(Opc), ResultReg).addReg(FP)); 1574 updateValueMap(I, ResultReg); 1575 return true; 1576 } 1577 1578 bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1579 // Make sure we have VFP. 1580 if (!Subtarget->hasVFP2()) return false; 1581 1582 MVT DstVT; 1583 Type *RetTy = I->getType(); 1584 if (!isTypeLegal(RetTy, DstVT)) 1585 return false; 1586 1587 unsigned Op = getRegForValue(I->getOperand(0)); 1588 if (Op == 0) return false; 1589 1590 unsigned Opc; 1591 Type *OpTy = I->getOperand(0)->getType(); 1592 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1593 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1594 else return false; 1595 1596 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1597 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1598 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1599 TII.get(Opc), ResultReg).addReg(Op)); 1600 1601 // This result needs to be in an integer register, but the conversion only 1602 // takes place in fp-regs. 1603 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1604 if (IntReg == 0) return false; 1605 1606 updateValueMap(I, IntReg); 1607 return true; 1608 } 1609 1610 bool ARMFastISel::SelectSelect(const Instruction *I) { 1611 MVT VT; 1612 if (!isTypeLegal(I->getType(), VT)) 1613 return false; 1614 1615 // Things need to be register sized for register moves. 1616 if (VT != MVT::i32) return false; 1617 1618 unsigned CondReg = getRegForValue(I->getOperand(0)); 1619 if (CondReg == 0) return false; 1620 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1621 if (Op1Reg == 0) return false; 1622 1623 // Check to see if we can use an immediate in the conditional move. 1624 int Imm = 0; 1625 bool UseImm = false; 1626 bool isNegativeImm = false; 1627 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1628 assert (VT == MVT::i32 && "Expecting an i32."); 1629 Imm = (int)ConstInt->getValue().getZExtValue(); 1630 if (Imm < 0) { 1631 isNegativeImm = true; 1632 Imm = ~Imm; 1633 } 1634 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1635 (ARM_AM::getSOImmVal(Imm) != -1); 1636 } 1637 1638 unsigned Op2Reg = 0; 1639 if (!UseImm) { 1640 Op2Reg = getRegForValue(I->getOperand(2)); 1641 if (Op2Reg == 0) return false; 1642 } 1643 1644 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1645 CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0); 1646 AddOptionalDefs( 1647 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc)) 1648 .addReg(CondReg) 1649 .addImm(1)); 1650 1651 unsigned MovCCOpc; 1652 const TargetRegisterClass *RC; 1653 if (!UseImm) { 1654 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1655 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1656 } else { 1657 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1658 if (!isNegativeImm) 1659 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1660 else 1661 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1662 } 1663 unsigned ResultReg = createResultReg(RC); 1664 if (!UseImm) { 1665 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1); 1666 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2); 1667 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1668 ResultReg) 1669 .addReg(Op2Reg) 1670 .addReg(Op1Reg) 1671 .addImm(ARMCC::NE) 1672 .addReg(ARM::CPSR); 1673 } else { 1674 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1); 1675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), 1676 ResultReg) 1677 .addReg(Op1Reg) 1678 .addImm(Imm) 1679 .addImm(ARMCC::EQ) 1680 .addReg(ARM::CPSR); 1681 } 1682 updateValueMap(I, ResultReg); 1683 return true; 1684 } 1685 1686 bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1687 MVT VT; 1688 Type *Ty = I->getType(); 1689 if (!isTypeLegal(Ty, VT)) 1690 return false; 1691 1692 // If we have integer div support we should have selected this automagically. 1693 // In case we have a real miss go ahead and return false and we'll pick 1694 // it up later. 1695 if (Subtarget->hasDivide()) return false; 1696 1697 // Otherwise emit a libcall. 1698 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1699 if (VT == MVT::i8) 1700 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1701 else if (VT == MVT::i16) 1702 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1703 else if (VT == MVT::i32) 1704 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1705 else if (VT == MVT::i64) 1706 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1707 else if (VT == MVT::i128) 1708 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1709 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1710 1711 return ARMEmitLibcall(I, LC); 1712 } 1713 1714 bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1715 MVT VT; 1716 Type *Ty = I->getType(); 1717 if (!isTypeLegal(Ty, VT)) 1718 return false; 1719 1720 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1721 if (VT == MVT::i8) 1722 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1723 else if (VT == MVT::i16) 1724 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1725 else if (VT == MVT::i32) 1726 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1727 else if (VT == MVT::i64) 1728 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1729 else if (VT == MVT::i128) 1730 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1731 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1732 1733 return ARMEmitLibcall(I, LC); 1734 } 1735 1736 bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1737 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 1738 1739 // We can get here in the case when we have a binary operation on a non-legal 1740 // type and the target independent selector doesn't know how to handle it. 1741 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1742 return false; 1743 1744 unsigned Opc; 1745 switch (ISDOpcode) { 1746 default: return false; 1747 case ISD::ADD: 1748 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1749 break; 1750 case ISD::OR: 1751 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1752 break; 1753 case ISD::SUB: 1754 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1755 break; 1756 } 1757 1758 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1759 if (SrcReg1 == 0) return false; 1760 1761 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1762 // in the instruction, rather then materializing the value in a register. 1763 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1764 if (SrcReg2 == 0) return false; 1765 1766 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 1767 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1); 1768 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2); 1769 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1770 TII.get(Opc), ResultReg) 1771 .addReg(SrcReg1).addReg(SrcReg2)); 1772 updateValueMap(I, ResultReg); 1773 return true; 1774 } 1775 1776 bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1777 EVT FPVT = TLI.getValueType(DL, I->getType(), true); 1778 if (!FPVT.isSimple()) return false; 1779 MVT VT = FPVT.getSimpleVT(); 1780 1781 // FIXME: Support vector types where possible. 1782 if (VT.isVector()) 1783 return false; 1784 1785 // We can get here in the case when we want to use NEON for our fp 1786 // operations, but can't figure out how to. Just use the vfp instructions 1787 // if we have them. 1788 // FIXME: It'd be nice to use NEON instructions. 1789 Type *Ty = I->getType(); 1790 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1791 if (isFloat && !Subtarget->hasVFP2()) 1792 return false; 1793 1794 unsigned Opc; 1795 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1796 switch (ISDOpcode) { 1797 default: return false; 1798 case ISD::FADD: 1799 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1800 break; 1801 case ISD::FSUB: 1802 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1803 break; 1804 case ISD::FMUL: 1805 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1806 break; 1807 } 1808 unsigned Op1 = getRegForValue(I->getOperand(0)); 1809 if (Op1 == 0) return false; 1810 1811 unsigned Op2 = getRegForValue(I->getOperand(1)); 1812 if (Op2 == 0) return false; 1813 1814 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1815 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1816 TII.get(Opc), ResultReg) 1817 .addReg(Op1).addReg(Op2)); 1818 updateValueMap(I, ResultReg); 1819 return true; 1820 } 1821 1822 // Call Handling Code 1823 1824 // This is largely taken directly from CCAssignFnForNode 1825 // TODO: We may not support all of this. 1826 CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1827 bool Return, 1828 bool isVarArg) { 1829 switch (CC) { 1830 default: 1831 llvm_unreachable("Unsupported calling convention"); 1832 case CallingConv::Fast: 1833 if (Subtarget->hasVFP2() && !isVarArg) { 1834 if (!Subtarget->isAAPCS_ABI()) 1835 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1836 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1837 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1838 } 1839 // Fallthrough 1840 case CallingConv::C: 1841 case CallingConv::CXX_FAST_TLS: 1842 // Use target triple & subtarget features to do actual dispatch. 1843 if (Subtarget->isAAPCS_ABI()) { 1844 if (Subtarget->hasVFP2() && 1845 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1846 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1847 else 1848 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1849 } else { 1850 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1851 } 1852 case CallingConv::ARM_AAPCS_VFP: 1853 case CallingConv::Swift: 1854 if (!isVarArg) 1855 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1856 // Fall through to soft float variant, variadic functions don't 1857 // use hard floating point ABI. 1858 case CallingConv::ARM_AAPCS: 1859 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1860 case CallingConv::ARM_APCS: 1861 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1862 case CallingConv::GHC: 1863 if (Return) 1864 llvm_unreachable("Can't return in GHC call convention"); 1865 else 1866 return CC_ARM_APCS_GHC; 1867 } 1868 } 1869 1870 bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1871 SmallVectorImpl<unsigned> &ArgRegs, 1872 SmallVectorImpl<MVT> &ArgVTs, 1873 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1874 SmallVectorImpl<unsigned> &RegArgs, 1875 CallingConv::ID CC, 1876 unsigned &NumBytes, 1877 bool isVarArg) { 1878 SmallVector<CCValAssign, 16> ArgLocs; 1879 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context); 1880 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1881 CCAssignFnForCall(CC, false, isVarArg)); 1882 1883 // Check that we can handle all of the arguments. If we can't, then bail out 1884 // now before we add code to the MBB. 1885 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1886 CCValAssign &VA = ArgLocs[i]; 1887 MVT ArgVT = ArgVTs[VA.getValNo()]; 1888 1889 // We don't handle NEON/vector parameters yet. 1890 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1891 return false; 1892 1893 // Now copy/store arg to correct locations. 1894 if (VA.isRegLoc() && !VA.needsCustom()) { 1895 continue; 1896 } else if (VA.needsCustom()) { 1897 // TODO: We need custom lowering for vector (v2f64) args. 1898 if (VA.getLocVT() != MVT::f64 || 1899 // TODO: Only handle register args for now. 1900 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1901 return false; 1902 } else { 1903 switch (ArgVT.SimpleTy) { 1904 default: 1905 return false; 1906 case MVT::i1: 1907 case MVT::i8: 1908 case MVT::i16: 1909 case MVT::i32: 1910 break; 1911 case MVT::f32: 1912 if (!Subtarget->hasVFP2()) 1913 return false; 1914 break; 1915 case MVT::f64: 1916 if (!Subtarget->hasVFP2()) 1917 return false; 1918 break; 1919 } 1920 } 1921 } 1922 1923 // At the point, we are able to handle the call's arguments in fast isel. 1924 1925 // Get a count of how many bytes are to be pushed on the stack. 1926 NumBytes = CCInfo.getNextStackOffset(); 1927 1928 // Issue CALLSEQ_START 1929 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1930 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1931 TII.get(AdjStackDown)) 1932 .addImm(NumBytes)); 1933 1934 // Process the args. 1935 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1936 CCValAssign &VA = ArgLocs[i]; 1937 const Value *ArgVal = Args[VA.getValNo()]; 1938 unsigned Arg = ArgRegs[VA.getValNo()]; 1939 MVT ArgVT = ArgVTs[VA.getValNo()]; 1940 1941 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1942 "We don't handle NEON/vector parameters yet."); 1943 1944 // Handle arg promotion, etc. 1945 switch (VA.getLocInfo()) { 1946 case CCValAssign::Full: break; 1947 case CCValAssign::SExt: { 1948 MVT DestVT = VA.getLocVT(); 1949 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1950 assert (Arg != 0 && "Failed to emit a sext"); 1951 ArgVT = DestVT; 1952 break; 1953 } 1954 case CCValAssign::AExt: 1955 // Intentional fall-through. Handle AExt and ZExt. 1956 case CCValAssign::ZExt: { 1957 MVT DestVT = VA.getLocVT(); 1958 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1959 assert (Arg != 0 && "Failed to emit a zext"); 1960 ArgVT = DestVT; 1961 break; 1962 } 1963 case CCValAssign::BCvt: { 1964 unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1965 /*TODO: Kill=*/false); 1966 assert(BC != 0 && "Failed to emit a bitcast!"); 1967 Arg = BC; 1968 ArgVT = VA.getLocVT(); 1969 break; 1970 } 1971 default: llvm_unreachable("Unknown arg promotion!"); 1972 } 1973 1974 // Now copy/store arg to correct locations. 1975 if (VA.isRegLoc() && !VA.needsCustom()) { 1976 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1977 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); 1978 RegArgs.push_back(VA.getLocReg()); 1979 } else if (VA.needsCustom()) { 1980 // TODO: We need custom lowering for vector (v2f64) args. 1981 assert(VA.getLocVT() == MVT::f64 && 1982 "Custom lowering for v2f64 args not available"); 1983 1984 CCValAssign &NextVA = ArgLocs[++i]; 1985 1986 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1987 "We only handle register args!"); 1988 1989 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1990 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1991 .addReg(NextVA.getLocReg(), RegState::Define) 1992 .addReg(Arg)); 1993 RegArgs.push_back(VA.getLocReg()); 1994 RegArgs.push_back(NextVA.getLocReg()); 1995 } else { 1996 assert(VA.isMemLoc()); 1997 // Need to store on the stack. 1998 1999 // Don't emit stores for undef values. 2000 if (isa<UndefValue>(ArgVal)) 2001 continue; 2002 2003 Address Addr; 2004 Addr.BaseType = Address::RegBase; 2005 Addr.Base.Reg = ARM::SP; 2006 Addr.Offset = VA.getLocMemOffset(); 2007 2008 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2009 assert(EmitRet && "Could not emit a store for argument!"); 2010 } 2011 } 2012 2013 return true; 2014 } 2015 2016 bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2017 const Instruction *I, CallingConv::ID CC, 2018 unsigned &NumBytes, bool isVarArg) { 2019 // Issue CALLSEQ_END 2020 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2021 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2022 TII.get(AdjStackUp)) 2023 .addImm(NumBytes).addImm(0)); 2024 2025 // Now the return value. 2026 if (RetVT != MVT::isVoid) { 2027 SmallVector<CCValAssign, 16> RVLocs; 2028 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2029 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2030 2031 // Copy all of the result registers out of their specified physreg. 2032 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2033 // For this move we copy into two registers and then move into the 2034 // double fp reg we want. 2035 MVT DestVT = RVLocs[0].getValVT(); 2036 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2037 unsigned ResultReg = createResultReg(DstRC); 2038 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2039 TII.get(ARM::VMOVDRR), ResultReg) 2040 .addReg(RVLocs[0].getLocReg()) 2041 .addReg(RVLocs[1].getLocReg())); 2042 2043 UsedRegs.push_back(RVLocs[0].getLocReg()); 2044 UsedRegs.push_back(RVLocs[1].getLocReg()); 2045 2046 // Finally update the result. 2047 updateValueMap(I, ResultReg); 2048 } else { 2049 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2050 MVT CopyVT = RVLocs[0].getValVT(); 2051 2052 // Special handling for extended integers. 2053 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2054 CopyVT = MVT::i32; 2055 2056 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2057 2058 unsigned ResultReg = createResultReg(DstRC); 2059 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2060 TII.get(TargetOpcode::COPY), 2061 ResultReg).addReg(RVLocs[0].getLocReg()); 2062 UsedRegs.push_back(RVLocs[0].getLocReg()); 2063 2064 // Finally update the result. 2065 updateValueMap(I, ResultReg); 2066 } 2067 } 2068 2069 return true; 2070 } 2071 2072 bool ARMFastISel::SelectRet(const Instruction *I) { 2073 const ReturnInst *Ret = cast<ReturnInst>(I); 2074 const Function &F = *I->getParent()->getParent(); 2075 2076 if (!FuncInfo.CanLowerReturn) 2077 return false; 2078 2079 if (TLI.supportSwiftError() && 2080 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 2081 return false; 2082 2083 if (TLI.supportSplitCSR(FuncInfo.MF)) 2084 return false; 2085 2086 // Build a list of return value registers. 2087 SmallVector<unsigned, 4> RetRegs; 2088 2089 CallingConv::ID CC = F.getCallingConv(); 2090 if (Ret->getNumOperands() > 0) { 2091 SmallVector<ISD::OutputArg, 4> Outs; 2092 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); 2093 2094 // Analyze operands of the call, assigning locations to each operand. 2095 SmallVector<CCValAssign, 16> ValLocs; 2096 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); 2097 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2098 F.isVarArg())); 2099 2100 const Value *RV = Ret->getOperand(0); 2101 unsigned Reg = getRegForValue(RV); 2102 if (Reg == 0) 2103 return false; 2104 2105 // Only handle a single return value for now. 2106 if (ValLocs.size() != 1) 2107 return false; 2108 2109 CCValAssign &VA = ValLocs[0]; 2110 2111 // Don't bother handling odd stuff for now. 2112 if (VA.getLocInfo() != CCValAssign::Full) 2113 return false; 2114 // Only handle register returns for now. 2115 if (!VA.isRegLoc()) 2116 return false; 2117 2118 unsigned SrcReg = Reg + VA.getValNo(); 2119 EVT RVEVT = TLI.getValueType(DL, RV->getType()); 2120 if (!RVEVT.isSimple()) return false; 2121 MVT RVVT = RVEVT.getSimpleVT(); 2122 MVT DestVT = VA.getValVT(); 2123 // Special handling for extended integers. 2124 if (RVVT != DestVT) { 2125 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2126 return false; 2127 2128 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2129 2130 // Perform extension if flagged as either zext or sext. Otherwise, do 2131 // nothing. 2132 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2133 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2134 if (SrcReg == 0) return false; 2135 } 2136 } 2137 2138 // Make the copy. 2139 unsigned DstReg = VA.getLocReg(); 2140 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2141 // Avoid a cross-class copy. This is very unlikely. 2142 if (!SrcRC->contains(DstReg)) 2143 return false; 2144 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2145 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); 2146 2147 // Add register to return instruction. 2148 RetRegs.push_back(VA.getLocReg()); 2149 } 2150 2151 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2152 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2153 TII.get(RetOpc)); 2154 AddOptionalDefs(MIB); 2155 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 2156 MIB.addReg(RetRegs[i], RegState::Implicit); 2157 return true; 2158 } 2159 2160 unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2161 if (UseReg) 2162 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2163 else 2164 return isThumb2 ? ARM::tBL : ARM::BL; 2165 } 2166 2167 unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2168 // Manually compute the global's type to avoid building it when unnecessary. 2169 Type *GVTy = Type::getInt32PtrTy(*Context, /*AS=*/0); 2170 EVT LCREVT = TLI.getValueType(DL, GVTy); 2171 if (!LCREVT.isSimple()) return 0; 2172 2173 GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, 2174 GlobalValue::ExternalLinkage, nullptr, 2175 Name); 2176 assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); 2177 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2178 } 2179 2180 // A quick function that will emit a call for a named libcall in F with the 2181 // vector of passed arguments for the Instruction in I. We can assume that we 2182 // can emit a call for any libcall we can produce. This is an abridged version 2183 // of the full call infrastructure since we won't need to worry about things 2184 // like computed function pointers or strange arguments at call sites. 2185 // TODO: Try to unify this and the normal call bits for ARM, then try to unify 2186 // with X86. 2187 bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2188 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2189 2190 // Handle *simple* calls for now. 2191 Type *RetTy = I->getType(); 2192 MVT RetVT; 2193 if (RetTy->isVoidTy()) 2194 RetVT = MVT::isVoid; 2195 else if (!isTypeLegal(RetTy, RetVT)) 2196 return false; 2197 2198 // Can't handle non-double multi-reg retvals. 2199 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2200 SmallVector<CCValAssign, 16> RVLocs; 2201 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); 2202 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2203 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2204 return false; 2205 } 2206 2207 // Set up the argument vectors. 2208 SmallVector<Value*, 8> Args; 2209 SmallVector<unsigned, 8> ArgRegs; 2210 SmallVector<MVT, 8> ArgVTs; 2211 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2212 Args.reserve(I->getNumOperands()); 2213 ArgRegs.reserve(I->getNumOperands()); 2214 ArgVTs.reserve(I->getNumOperands()); 2215 ArgFlags.reserve(I->getNumOperands()); 2216 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2217 Value *Op = I->getOperand(i); 2218 unsigned Arg = getRegForValue(Op); 2219 if (Arg == 0) return false; 2220 2221 Type *ArgTy = Op->getType(); 2222 MVT ArgVT; 2223 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2224 2225 ISD::ArgFlagsTy Flags; 2226 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2227 Flags.setOrigAlign(OriginalAlignment); 2228 2229 Args.push_back(Op); 2230 ArgRegs.push_back(Arg); 2231 ArgVTs.push_back(ArgVT); 2232 ArgFlags.push_back(Flags); 2233 } 2234 2235 // Handle the arguments now that we've gotten them. 2236 SmallVector<unsigned, 4> RegArgs; 2237 unsigned NumBytes; 2238 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2239 RegArgs, CC, NumBytes, false)) 2240 return false; 2241 2242 unsigned CalleeReg = 0; 2243 if (Subtarget->genLongCalls()) { 2244 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2245 if (CalleeReg == 0) return false; 2246 } 2247 2248 // Issue the call. 2249 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls()); 2250 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2251 DbgLoc, TII.get(CallOpc)); 2252 // BL / BLX don't take a predicate, but tBL / tBLX do. 2253 if (isThumb2) 2254 AddDefaultPred(MIB); 2255 if (Subtarget->genLongCalls()) 2256 MIB.addReg(CalleeReg); 2257 else 2258 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2259 2260 // Add implicit physical register uses to the call. 2261 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2262 MIB.addReg(RegArgs[i], RegState::Implicit); 2263 2264 // Add a register mask with the call-preserved registers. 2265 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2266 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2267 2268 // Finish off the call including any return values. 2269 SmallVector<unsigned, 4> UsedRegs; 2270 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2271 2272 // Set all unused physreg defs as dead. 2273 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2274 2275 return true; 2276 } 2277 2278 bool ARMFastISel::SelectCall(const Instruction *I, 2279 const char *IntrMemName = nullptr) { 2280 const CallInst *CI = cast<CallInst>(I); 2281 const Value *Callee = CI->getCalledValue(); 2282 2283 // Can't handle inline asm. 2284 if (isa<InlineAsm>(Callee)) return false; 2285 2286 // Allow SelectionDAG isel to handle tail calls. 2287 if (CI->isTailCall()) return false; 2288 2289 // Check the calling convention. 2290 ImmutableCallSite CS(CI); 2291 CallingConv::ID CC = CS.getCallingConv(); 2292 2293 // TODO: Avoid some calling conventions? 2294 2295 FunctionType *FTy = CS.getFunctionType(); 2296 bool isVarArg = FTy->isVarArg(); 2297 2298 // Handle *simple* calls for now. 2299 Type *RetTy = I->getType(); 2300 MVT RetVT; 2301 if (RetTy->isVoidTy()) 2302 RetVT = MVT::isVoid; 2303 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2304 RetVT != MVT::i8 && RetVT != MVT::i1) 2305 return false; 2306 2307 // Can't handle non-double multi-reg retvals. 2308 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2309 RetVT != MVT::i16 && RetVT != MVT::i32) { 2310 SmallVector<CCValAssign, 16> RVLocs; 2311 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); 2312 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2313 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2314 return false; 2315 } 2316 2317 // Set up the argument vectors. 2318 SmallVector<Value*, 8> Args; 2319 SmallVector<unsigned, 8> ArgRegs; 2320 SmallVector<MVT, 8> ArgVTs; 2321 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2322 unsigned arg_size = CS.arg_size(); 2323 Args.reserve(arg_size); 2324 ArgRegs.reserve(arg_size); 2325 ArgVTs.reserve(arg_size); 2326 ArgFlags.reserve(arg_size); 2327 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2328 i != e; ++i) { 2329 // If we're lowering a memory intrinsic instead of a regular call, skip the 2330 // last two arguments, which shouldn't be passed to the underlying function. 2331 if (IntrMemName && e-i <= 2) 2332 break; 2333 2334 ISD::ArgFlagsTy Flags; 2335 unsigned AttrInd = i - CS.arg_begin() + 1; 2336 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2337 Flags.setSExt(); 2338 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2339 Flags.setZExt(); 2340 2341 // FIXME: Only handle *easy* calls for now. 2342 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2343 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2344 CS.paramHasAttr(AttrInd, Attribute::SwiftSelf) || 2345 CS.paramHasAttr(AttrInd, Attribute::SwiftError) || 2346 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2347 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2348 return false; 2349 2350 Type *ArgTy = (*i)->getType(); 2351 MVT ArgVT; 2352 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2353 ArgVT != MVT::i1) 2354 return false; 2355 2356 unsigned Arg = getRegForValue(*i); 2357 if (Arg == 0) 2358 return false; 2359 2360 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 2361 Flags.setOrigAlign(OriginalAlignment); 2362 2363 Args.push_back(*i); 2364 ArgRegs.push_back(Arg); 2365 ArgVTs.push_back(ArgVT); 2366 ArgFlags.push_back(Flags); 2367 } 2368 2369 // Handle the arguments now that we've gotten them. 2370 SmallVector<unsigned, 4> RegArgs; 2371 unsigned NumBytes; 2372 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2373 RegArgs, CC, NumBytes, isVarArg)) 2374 return false; 2375 2376 bool UseReg = false; 2377 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2378 if (!GV || Subtarget->genLongCalls()) UseReg = true; 2379 2380 unsigned CalleeReg = 0; 2381 if (UseReg) { 2382 if (IntrMemName) 2383 CalleeReg = getLibcallReg(IntrMemName); 2384 else 2385 CalleeReg = getRegForValue(Callee); 2386 2387 if (CalleeReg == 0) return false; 2388 } 2389 2390 // Issue the call. 2391 unsigned CallOpc = ARMSelectCallOp(UseReg); 2392 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2393 DbgLoc, TII.get(CallOpc)); 2394 2395 // ARM calls don't take a predicate, but tBL / tBLX do. 2396 if(isThumb2) 2397 AddDefaultPred(MIB); 2398 if (UseReg) 2399 MIB.addReg(CalleeReg); 2400 else if (!IntrMemName) 2401 MIB.addGlobalAddress(GV, 0, 0); 2402 else 2403 MIB.addExternalSymbol(IntrMemName, 0); 2404 2405 // Add implicit physical register uses to the call. 2406 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2407 MIB.addReg(RegArgs[i], RegState::Implicit); 2408 2409 // Add a register mask with the call-preserved registers. 2410 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2411 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 2412 2413 // Finish off the call including any return values. 2414 SmallVector<unsigned, 4> UsedRegs; 2415 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2416 return false; 2417 2418 // Set all unused physreg defs as dead. 2419 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2420 2421 return true; 2422 } 2423 2424 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2425 return Len <= 16; 2426 } 2427 2428 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2429 uint64_t Len, unsigned Alignment) { 2430 // Make sure we don't bloat code by inlining very large memcpy's. 2431 if (!ARMIsMemCpySmall(Len)) 2432 return false; 2433 2434 while (Len) { 2435 MVT VT; 2436 if (!Alignment || Alignment >= 4) { 2437 if (Len >= 4) 2438 VT = MVT::i32; 2439 else if (Len >= 2) 2440 VT = MVT::i16; 2441 else { 2442 assert (Len == 1 && "Expected a length of 1!"); 2443 VT = MVT::i8; 2444 } 2445 } else { 2446 // Bound based on alignment. 2447 if (Len >= 2 && Alignment == 2) 2448 VT = MVT::i16; 2449 else { 2450 VT = MVT::i8; 2451 } 2452 } 2453 2454 bool RV; 2455 unsigned ResultReg; 2456 RV = ARMEmitLoad(VT, ResultReg, Src); 2457 assert (RV == true && "Should be able to handle this load."); 2458 RV = ARMEmitStore(VT, ResultReg, Dest); 2459 assert (RV == true && "Should be able to handle this store."); 2460 (void)RV; 2461 2462 unsigned Size = VT.getSizeInBits()/8; 2463 Len -= Size; 2464 Dest.Offset += Size; 2465 Src.Offset += Size; 2466 } 2467 2468 return true; 2469 } 2470 2471 bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2472 // FIXME: Handle more intrinsics. 2473 switch (I.getIntrinsicID()) { 2474 default: return false; 2475 case Intrinsic::frameaddress: { 2476 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2477 MFI->setFrameAddressIsTaken(true); 2478 2479 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 2480 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass 2481 : &ARM::GPRRegClass; 2482 2483 const ARMBaseRegisterInfo *RegInfo = 2484 static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo()); 2485 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2486 unsigned SrcReg = FramePtr; 2487 2488 // Recursively load frame address 2489 // ldr r0 [fp] 2490 // ldr r0 [r0] 2491 // ldr r0 [r0] 2492 // ... 2493 unsigned DestReg; 2494 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2495 while (Depth--) { 2496 DestReg = createResultReg(RC); 2497 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2498 TII.get(LdrOpc), DestReg) 2499 .addReg(SrcReg).addImm(0)); 2500 SrcReg = DestReg; 2501 } 2502 updateValueMap(&I, SrcReg); 2503 return true; 2504 } 2505 case Intrinsic::memcpy: 2506 case Intrinsic::memmove: { 2507 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2508 // Don't handle volatile. 2509 if (MTI.isVolatile()) 2510 return false; 2511 2512 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2513 // we would emit dead code because we don't currently handle memmoves. 2514 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2515 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2516 // Small memcpy's are common enough that we want to do them without a call 2517 // if possible. 2518 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2519 if (ARMIsMemCpySmall(Len)) { 2520 Address Dest, Src; 2521 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2522 !ARMComputeAddress(MTI.getRawSource(), Src)) 2523 return false; 2524 unsigned Alignment = MTI.getAlignment(); 2525 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2526 return true; 2527 } 2528 } 2529 2530 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2531 return false; 2532 2533 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2534 return false; 2535 2536 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2537 return SelectCall(&I, IntrMemName); 2538 } 2539 case Intrinsic::memset: { 2540 const MemSetInst &MSI = cast<MemSetInst>(I); 2541 // Don't handle volatile. 2542 if (MSI.isVolatile()) 2543 return false; 2544 2545 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2546 return false; 2547 2548 if (MSI.getDestAddressSpace() > 255) 2549 return false; 2550 2551 return SelectCall(&I, "memset"); 2552 } 2553 case Intrinsic::trap: { 2554 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get( 2555 Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP)); 2556 return true; 2557 } 2558 } 2559 } 2560 2561 bool ARMFastISel::SelectTrunc(const Instruction *I) { 2562 // The high bits for a type smaller than the register size are assumed to be 2563 // undefined. 2564 Value *Op = I->getOperand(0); 2565 2566 EVT SrcVT, DestVT; 2567 SrcVT = TLI.getValueType(DL, Op->getType(), true); 2568 DestVT = TLI.getValueType(DL, I->getType(), true); 2569 2570 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2571 return false; 2572 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2573 return false; 2574 2575 unsigned SrcReg = getRegForValue(Op); 2576 if (!SrcReg) return false; 2577 2578 // Because the high bits are undefined, a truncate doesn't generate 2579 // any code. 2580 updateValueMap(I, SrcReg); 2581 return true; 2582 } 2583 2584 unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2585 bool isZExt) { 2586 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2587 return 0; 2588 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1) 2589 return 0; 2590 2591 // Table of which combinations can be emitted as a single instruction, 2592 // and which will require two. 2593 static const uint8_t isSingleInstrTbl[3][2][2][2] = { 2594 // ARM Thumb 2595 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops 2596 // ext: s z s z s z s z 2597 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } }, 2598 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }, 2599 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } } 2600 }; 2601 2602 // Target registers for: 2603 // - For ARM can never be PC. 2604 // - For 16-bit Thumb are restricted to lower 8 registers. 2605 // - For 32-bit Thumb are restricted to non-SP and non-PC. 2606 static const TargetRegisterClass *RCTbl[2][2] = { 2607 // Instructions: Two Single 2608 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass }, 2609 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass } 2610 }; 2611 2612 // Table governing the instruction(s) to be emitted. 2613 static const struct InstructionTable { 2614 uint32_t Opc : 16; 2615 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0. 2616 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi. 2617 uint32_t Imm : 8; // All instructions have either a shift or a mask. 2618 } IT[2][2][3][2] = { 2619 { // Two instructions (first is left shift, second is in this table). 2620 { // ARM Opc S Shift Imm 2621 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 }, 2622 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } }, 2623 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 }, 2624 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } }, 2625 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 }, 2626 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } } 2627 }, 2628 { // Thumb Opc S Shift Imm 2629 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 }, 2630 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } }, 2631 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 }, 2632 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } }, 2633 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 }, 2634 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } } 2635 } 2636 }, 2637 { // Single instruction. 2638 { // ARM Opc S Shift Imm 2639 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2640 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } }, 2641 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 }, 2642 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } }, 2643 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 }, 2644 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } } 2645 }, 2646 { // Thumb Opc S Shift Imm 2647 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 }, 2648 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } }, 2649 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 }, 2650 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } }, 2651 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 }, 2652 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } } 2653 } 2654 } 2655 }; 2656 2657 unsigned SrcBits = SrcVT.getSizeInBits(); 2658 unsigned DestBits = DestVT.getSizeInBits(); 2659 (void) DestBits; 2660 assert((SrcBits < DestBits) && "can only extend to larger types"); 2661 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) && 2662 "other sizes unimplemented"); 2663 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) && 2664 "other sizes unimplemented"); 2665 2666 bool hasV6Ops = Subtarget->hasV6Ops(); 2667 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2} 2668 assert((Bitness < 3) && "sanity-check table bounds"); 2669 2670 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt]; 2671 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr]; 2672 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt]; 2673 unsigned Opc = ITP->Opc; 2674 assert(ARM::KILL != Opc && "Invalid table entry"); 2675 unsigned hasS = ITP->hasS; 2676 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift; 2677 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) && 2678 "only MOVsi has shift operand addressing mode"); 2679 unsigned Imm = ITP->Imm; 2680 2681 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block). 2682 bool setsCPSR = &ARM::tGPRRegClass == RC; 2683 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi; 2684 unsigned ResultReg; 2685 // MOVsi encodes shift and immediate in shift operand addressing mode. 2686 // The following condition has the same value when emitting two 2687 // instruction sequences: both are shifts. 2688 bool ImmIsSO = (Shift != ARM_AM::no_shift); 2689 2690 // Either one or two instructions are emitted. 2691 // They're always of the form: 2692 // dst = in OP imm 2693 // CPSR is set only by 16-bit Thumb instructions. 2694 // Predicate, if any, is AL. 2695 // S bit, if available, is always 0. 2696 // When two are emitted the first's result will feed as the second's input, 2697 // that value is then dead. 2698 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2; 2699 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) { 2700 ResultReg = createResultReg(RC); 2701 bool isLsl = (0 == Instr) && !isSingleInstr; 2702 unsigned Opcode = isLsl ? LSLOpc : Opc; 2703 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift; 2704 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm; 2705 bool isKill = 1 == Instr; 2706 MachineInstrBuilder MIB = BuildMI( 2707 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg); 2708 if (setsCPSR) 2709 MIB.addReg(ARM::CPSR, RegState::Define); 2710 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR); 2711 AddDefaultPred(MIB.addReg(SrcReg, isKill * RegState::Kill).addImm(ImmEnc)); 2712 if (hasS) 2713 AddDefaultCC(MIB); 2714 // Second instruction consumes the first's result. 2715 SrcReg = ResultReg; 2716 } 2717 2718 return ResultReg; 2719 } 2720 2721 bool ARMFastISel::SelectIntExt(const Instruction *I) { 2722 // On ARM, in general, integer casts don't involve legal types; this code 2723 // handles promotable integers. 2724 Type *DestTy = I->getType(); 2725 Value *Src = I->getOperand(0); 2726 Type *SrcTy = Src->getType(); 2727 2728 bool isZExt = isa<ZExtInst>(I); 2729 unsigned SrcReg = getRegForValue(Src); 2730 if (!SrcReg) return false; 2731 2732 EVT SrcEVT, DestEVT; 2733 SrcEVT = TLI.getValueType(DL, SrcTy, true); 2734 DestEVT = TLI.getValueType(DL, DestTy, true); 2735 if (!SrcEVT.isSimple()) return false; 2736 if (!DestEVT.isSimple()) return false; 2737 2738 MVT SrcVT = SrcEVT.getSimpleVT(); 2739 MVT DestVT = DestEVT.getSimpleVT(); 2740 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2741 if (ResultReg == 0) return false; 2742 updateValueMap(I, ResultReg); 2743 return true; 2744 } 2745 2746 bool ARMFastISel::SelectShift(const Instruction *I, 2747 ARM_AM::ShiftOpc ShiftTy) { 2748 // We handle thumb2 mode by target independent selector 2749 // or SelectionDAG ISel. 2750 if (isThumb2) 2751 return false; 2752 2753 // Only handle i32 now. 2754 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 2755 if (DestVT != MVT::i32) 2756 return false; 2757 2758 unsigned Opc = ARM::MOVsr; 2759 unsigned ShiftImm; 2760 Value *Src2Value = I->getOperand(1); 2761 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2762 ShiftImm = CI->getZExtValue(); 2763 2764 // Fall back to selection DAG isel if the shift amount 2765 // is zero or greater than the width of the value type. 2766 if (ShiftImm == 0 || ShiftImm >=32) 2767 return false; 2768 2769 Opc = ARM::MOVsi; 2770 } 2771 2772 Value *Src1Value = I->getOperand(0); 2773 unsigned Reg1 = getRegForValue(Src1Value); 2774 if (Reg1 == 0) return false; 2775 2776 unsigned Reg2 = 0; 2777 if (Opc == ARM::MOVsr) { 2778 Reg2 = getRegForValue(Src2Value); 2779 if (Reg2 == 0) return false; 2780 } 2781 2782 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass); 2783 if(ResultReg == 0) return false; 2784 2785 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2786 TII.get(Opc), ResultReg) 2787 .addReg(Reg1); 2788 2789 if (Opc == ARM::MOVsi) 2790 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2791 else if (Opc == ARM::MOVsr) { 2792 MIB.addReg(Reg2); 2793 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2794 } 2795 2796 AddOptionalDefs(MIB); 2797 updateValueMap(I, ResultReg); 2798 return true; 2799 } 2800 2801 // TODO: SoftFP support. 2802 bool ARMFastISel::fastSelectInstruction(const Instruction *I) { 2803 2804 switch (I->getOpcode()) { 2805 case Instruction::Load: 2806 return SelectLoad(I); 2807 case Instruction::Store: 2808 return SelectStore(I); 2809 case Instruction::Br: 2810 return SelectBranch(I); 2811 case Instruction::IndirectBr: 2812 return SelectIndirectBr(I); 2813 case Instruction::ICmp: 2814 case Instruction::FCmp: 2815 return SelectCmp(I); 2816 case Instruction::FPExt: 2817 return SelectFPExt(I); 2818 case Instruction::FPTrunc: 2819 return SelectFPTrunc(I); 2820 case Instruction::SIToFP: 2821 return SelectIToFP(I, /*isSigned*/ true); 2822 case Instruction::UIToFP: 2823 return SelectIToFP(I, /*isSigned*/ false); 2824 case Instruction::FPToSI: 2825 return SelectFPToI(I, /*isSigned*/ true); 2826 case Instruction::FPToUI: 2827 return SelectFPToI(I, /*isSigned*/ false); 2828 case Instruction::Add: 2829 return SelectBinaryIntOp(I, ISD::ADD); 2830 case Instruction::Or: 2831 return SelectBinaryIntOp(I, ISD::OR); 2832 case Instruction::Sub: 2833 return SelectBinaryIntOp(I, ISD::SUB); 2834 case Instruction::FAdd: 2835 return SelectBinaryFPOp(I, ISD::FADD); 2836 case Instruction::FSub: 2837 return SelectBinaryFPOp(I, ISD::FSUB); 2838 case Instruction::FMul: 2839 return SelectBinaryFPOp(I, ISD::FMUL); 2840 case Instruction::SDiv: 2841 return SelectDiv(I, /*isSigned*/ true); 2842 case Instruction::UDiv: 2843 return SelectDiv(I, /*isSigned*/ false); 2844 case Instruction::SRem: 2845 return SelectRem(I, /*isSigned*/ true); 2846 case Instruction::URem: 2847 return SelectRem(I, /*isSigned*/ false); 2848 case Instruction::Call: 2849 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2850 return SelectIntrinsicCall(*II); 2851 return SelectCall(I); 2852 case Instruction::Select: 2853 return SelectSelect(I); 2854 case Instruction::Ret: 2855 return SelectRet(I); 2856 case Instruction::Trunc: 2857 return SelectTrunc(I); 2858 case Instruction::ZExt: 2859 case Instruction::SExt: 2860 return SelectIntExt(I); 2861 case Instruction::Shl: 2862 return SelectShift(I, ARM_AM::lsl); 2863 case Instruction::LShr: 2864 return SelectShift(I, ARM_AM::lsr); 2865 case Instruction::AShr: 2866 return SelectShift(I, ARM_AM::asr); 2867 default: break; 2868 } 2869 return false; 2870 } 2871 2872 namespace { 2873 // This table describes sign- and zero-extend instructions which can be 2874 // folded into a preceding load. All of these extends have an immediate 2875 // (sometimes a mask and sometimes a shift) that's applied after 2876 // extension. 2877 const struct FoldableLoadExtendsStruct { 2878 uint16_t Opc[2]; // ARM, Thumb. 2879 uint8_t ExpectedImm; 2880 uint8_t isZExt : 1; 2881 uint8_t ExpectedVT : 7; 2882 } FoldableLoadExtends[] = { 2883 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 }, 2884 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 }, 2885 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 }, 2886 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 }, 2887 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } 2888 }; 2889 } 2890 2891 /// \brief The specified machine instr operand is a vreg, and that 2892 /// vreg is being provided by the specified load instruction. If possible, 2893 /// try to fold the load as an operand to the instruction, returning true if 2894 /// successful. 2895 bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, 2896 const LoadInst *LI) { 2897 // Verify we have a legal type before going any further. 2898 MVT VT; 2899 if (!isLoadTypeLegal(LI->getType(), VT)) 2900 return false; 2901 2902 // Combine load followed by zero- or sign-extend. 2903 // ldrb r1, [r0] ldrb r1, [r0] 2904 // uxtb r2, r1 => 2905 // mov r3, r2 mov r3, r1 2906 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm()) 2907 return false; 2908 const uint64_t Imm = MI->getOperand(2).getImm(); 2909 2910 bool Found = false; 2911 bool isZExt; 2912 for (unsigned i = 0, e = array_lengthof(FoldableLoadExtends); 2913 i != e; ++i) { 2914 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->getOpcode() && 2915 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm && 2916 MVT((MVT::SimpleValueType)FoldableLoadExtends[i].ExpectedVT) == VT) { 2917 Found = true; 2918 isZExt = FoldableLoadExtends[i].isZExt; 2919 } 2920 } 2921 if (!Found) return false; 2922 2923 // See if we can handle this address. 2924 Address Addr; 2925 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2926 2927 unsigned ResultReg = MI->getOperand(0).getReg(); 2928 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2929 return false; 2930 MI->eraseFromParent(); 2931 return true; 2932 } 2933 2934 unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2935 unsigned Align, MVT VT) { 2936 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 2937 2938 LLVMContext *Context = &MF->getFunction()->getContext(); 2939 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2940 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2941 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( 2942 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, 2943 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier, 2944 /*AddCurrentAddress=*/UseGOT_PREL); 2945 2946 unsigned ConstAlign = 2947 MF->getDataLayout().getPrefTypeAlignment(Type::getInt32PtrTy(*Context)); 2948 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign); 2949 2950 unsigned TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); 2951 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; 2952 MachineInstrBuilder MIB = 2953 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg) 2954 .addConstantPoolIndex(Idx); 2955 if (Opc == ARM::LDRcp) 2956 MIB.addImm(0); 2957 AddDefaultPred(MIB); 2958 2959 // Fix the address by adding pc. 2960 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 2961 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR 2962 : ARM::PICADD; 2963 DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0); 2964 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg) 2965 .addReg(TempReg) 2966 .addImm(ARMPCLabelIndex); 2967 if (!Subtarget->isThumb()) 2968 AddDefaultPred(MIB); 2969 2970 if (UseGOT_PREL && Subtarget->isThumb()) { 2971 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 2972 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2973 TII.get(ARM::t2LDRi12), NewDestReg) 2974 .addReg(DestReg) 2975 .addImm(0); 2976 DestReg = NewDestReg; 2977 AddOptionalDefs(MIB); 2978 } 2979 return DestReg; 2980 } 2981 2982 bool ARMFastISel::fastLowerArguments() { 2983 if (!FuncInfo.CanLowerReturn) 2984 return false; 2985 2986 const Function *F = FuncInfo.Fn; 2987 if (F->isVarArg()) 2988 return false; 2989 2990 CallingConv::ID CC = F->getCallingConv(); 2991 switch (CC) { 2992 default: 2993 return false; 2994 case CallingConv::Fast: 2995 case CallingConv::C: 2996 case CallingConv::ARM_AAPCS_VFP: 2997 case CallingConv::ARM_AAPCS: 2998 case CallingConv::ARM_APCS: 2999 case CallingConv::Swift: 3000 break; 3001 } 3002 3003 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments 3004 // which are passed in r0 - r3. 3005 unsigned Idx = 1; 3006 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3007 I != E; ++I, ++Idx) { 3008 if (Idx > 4) 3009 return false; 3010 3011 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || 3012 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || 3013 F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) || 3014 F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) || 3015 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) 3016 return false; 3017 3018 Type *ArgTy = I->getType(); 3019 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) 3020 return false; 3021 3022 EVT ArgVT = TLI.getValueType(DL, ArgTy); 3023 if (!ArgVT.isSimple()) return false; 3024 switch (ArgVT.getSimpleVT().SimpleTy) { 3025 case MVT::i8: 3026 case MVT::i16: 3027 case MVT::i32: 3028 break; 3029 default: 3030 return false; 3031 } 3032 } 3033 3034 3035 static const MCPhysReg GPRArgRegs[] = { 3036 ARM::R0, ARM::R1, ARM::R2, ARM::R3 3037 }; 3038 3039 const TargetRegisterClass *RC = &ARM::rGPRRegClass; 3040 Idx = 0; 3041 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 3042 I != E; ++I, ++Idx) { 3043 unsigned SrcReg = GPRArgRegs[Idx]; 3044 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC); 3045 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy. 3046 // Without this, EmitLiveInCopies may eliminate the livein if its only 3047 // use is a bitcast (which isn't turned into an instruction). 3048 unsigned ResultReg = createResultReg(RC); 3049 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 3050 TII.get(TargetOpcode::COPY), 3051 ResultReg).addReg(DstReg, getKillRegState(true)); 3052 updateValueMap(&*I, ResultReg); 3053 } 3054 3055 return true; 3056 } 3057 3058 namespace llvm { 3059 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 3060 const TargetLibraryInfo *libInfo) { 3061 if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel()) 3062 return new ARMFastISel(funcInfo, libInfo); 3063 3064 return nullptr; 3065 } 3066 } 3067