1 //===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Hexagon implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "HexagonInstrInfo.h" 15 #include "Hexagon.h" 16 #include "HexagonRegisterInfo.h" 17 #include "HexagonSubtarget.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/CodeGen/DFAPacketizer.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineMemOperand.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/PseudoSourceValue.h" 26 #include "llvm/MC/MCAsmInfo.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/MathExtras.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include <cctype> 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "hexagon-instrinfo" 36 37 #define GET_INSTRINFO_CTOR_DTOR 38 #define GET_INSTRMAP_INFO 39 #include "HexagonGenInstrInfo.inc" 40 #include "HexagonGenDFAPacketizer.inc" 41 42 using namespace llvm; 43 44 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden, 45 cl::init(false), cl::desc("Do not consider inline-asm a scheduling/" 46 "packetization boundary.")); 47 48 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction", 49 cl::Hidden, cl::init(true), cl::desc("Enable branch prediction")); 50 51 static cl::opt<bool> DisableNVSchedule("disable-hexagon-nv-schedule", 52 cl::Hidden, cl::ZeroOrMore, cl::init(false), 53 cl::desc("Disable schedule adjustment for new value stores.")); 54 55 static cl::opt<bool> EnableTimingClassLatency( 56 "enable-timing-class-latency", cl::Hidden, cl::init(false), 57 cl::desc("Enable timing class latency")); 58 59 static cl::opt<bool> EnableALUForwarding( 60 "enable-alu-forwarding", cl::Hidden, cl::init(true), 61 cl::desc("Enable vec alu forwarding")); 62 63 static cl::opt<bool> EnableACCForwarding( 64 "enable-acc-forwarding", cl::Hidden, cl::init(true), 65 cl::desc("Enable vec acc forwarding")); 66 67 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large", 68 cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm")); 69 70 /// 71 /// Constants for Hexagon instructions. 72 /// 73 const int Hexagon_MEMV_OFFSET_MAX_128B = 896; // #s4: -8*128...7*128 74 const int Hexagon_MEMV_OFFSET_MIN_128B = -1024; // #s4 75 const int Hexagon_MEMV_OFFSET_MAX = 448; // #s4: -8*64...7*64 76 const int Hexagon_MEMV_OFFSET_MIN = -512; // #s4 77 const int Hexagon_MEMW_OFFSET_MAX = 4095; 78 const int Hexagon_MEMW_OFFSET_MIN = -4096; 79 const int Hexagon_MEMD_OFFSET_MAX = 8191; 80 const int Hexagon_MEMD_OFFSET_MIN = -8192; 81 const int Hexagon_MEMH_OFFSET_MAX = 2047; 82 const int Hexagon_MEMH_OFFSET_MIN = -2048; 83 const int Hexagon_MEMB_OFFSET_MAX = 1023; 84 const int Hexagon_MEMB_OFFSET_MIN = -1024; 85 const int Hexagon_ADDI_OFFSET_MAX = 32767; 86 const int Hexagon_ADDI_OFFSET_MIN = -32768; 87 const int Hexagon_MEMD_AUTOINC_MAX = 56; 88 const int Hexagon_MEMD_AUTOINC_MIN = -64; 89 const int Hexagon_MEMW_AUTOINC_MAX = 28; 90 const int Hexagon_MEMW_AUTOINC_MIN = -32; 91 const int Hexagon_MEMH_AUTOINC_MAX = 14; 92 const int Hexagon_MEMH_AUTOINC_MIN = -16; 93 const int Hexagon_MEMB_AUTOINC_MAX = 7; 94 const int Hexagon_MEMB_AUTOINC_MIN = -8; 95 const int Hexagon_MEMV_AUTOINC_MAX = 192; // #s3 96 const int Hexagon_MEMV_AUTOINC_MIN = -256; // #s3 97 const int Hexagon_MEMV_AUTOINC_MAX_128B = 384; // #s3 98 const int Hexagon_MEMV_AUTOINC_MIN_128B = -512; // #s3 99 100 // Pin the vtable to this file. 101 void HexagonInstrInfo::anchor() {} 102 103 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST) 104 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP), 105 RI() {} 106 107 108 static bool isIntRegForSubInst(unsigned Reg) { 109 return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) || 110 (Reg >= Hexagon::R16 && Reg <= Hexagon::R23); 111 } 112 113 114 static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI) { 115 return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::subreg_loreg)) && 116 isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::subreg_hireg)); 117 } 118 119 120 /// Calculate number of instructions excluding the debug instructions. 121 static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB, 122 MachineBasicBlock::const_instr_iterator MIE) { 123 unsigned Count = 0; 124 for (; MIB != MIE; ++MIB) { 125 if (!MIB->isDebugValue()) 126 ++Count; 127 } 128 return Count; 129 } 130 131 132 /// Find the hardware loop instruction used to set-up the specified loop. 133 /// On Hexagon, we have two instructions used to set-up the hardware loop 134 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions 135 /// to indicate the end of a loop. 136 static MachineInstr *findLoopInstr(MachineBasicBlock *BB, int EndLoopOp, 137 SmallPtrSet<MachineBasicBlock *, 8> &Visited) { 138 int LOOPi; 139 int LOOPr; 140 if (EndLoopOp == Hexagon::ENDLOOP0) { 141 LOOPi = Hexagon::J2_loop0i; 142 LOOPr = Hexagon::J2_loop0r; 143 } else { // EndLoopOp == Hexagon::EndLOOP1 144 LOOPi = Hexagon::J2_loop1i; 145 LOOPr = Hexagon::J2_loop1r; 146 } 147 148 // The loop set-up instruction will be in a predecessor block 149 for (MachineBasicBlock::pred_iterator PB = BB->pred_begin(), 150 PE = BB->pred_end(); PB != PE; ++PB) { 151 // If this has been visited, already skip it. 152 if (!Visited.insert(*PB).second) 153 continue; 154 if (*PB == BB) 155 continue; 156 for (MachineBasicBlock::reverse_instr_iterator I = (*PB)->instr_rbegin(), 157 E = (*PB)->instr_rend(); I != E; ++I) { 158 int Opc = I->getOpcode(); 159 if (Opc == LOOPi || Opc == LOOPr) 160 return &*I; 161 // We've reached a different loop, which means the loop0 has been removed. 162 if (Opc == EndLoopOp) 163 return 0; 164 } 165 // Check the predecessors for the LOOP instruction. 166 MachineInstr *loop = findLoopInstr(*PB, EndLoopOp, Visited); 167 if (loop) 168 return loop; 169 } 170 return 0; 171 } 172 173 174 /// Gather register def/uses from MI. 175 /// This treats possible (predicated) defs as actually happening ones 176 /// (conservatively). 177 static inline void parseOperands(const MachineInstr *MI, 178 SmallVector<unsigned, 4> &Defs, SmallVector<unsigned, 8> &Uses) { 179 Defs.clear(); 180 Uses.clear(); 181 182 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 183 const MachineOperand &MO = MI->getOperand(i); 184 185 if (!MO.isReg()) 186 continue; 187 188 unsigned Reg = MO.getReg(); 189 if (!Reg) 190 continue; 191 192 if (MO.isUse()) 193 Uses.push_back(MO.getReg()); 194 195 if (MO.isDef()) 196 Defs.push_back(MO.getReg()); 197 } 198 } 199 200 201 // Position dependent, so check twice for swap. 202 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) { 203 switch (Ga) { 204 case HexagonII::HSIG_None: 205 default: 206 return false; 207 case HexagonII::HSIG_L1: 208 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A); 209 case HexagonII::HSIG_L2: 210 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 || 211 Gb == HexagonII::HSIG_A); 212 case HexagonII::HSIG_S1: 213 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 || 214 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A); 215 case HexagonII::HSIG_S2: 216 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 || 217 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 || 218 Gb == HexagonII::HSIG_A); 219 case HexagonII::HSIG_A: 220 return (Gb == HexagonII::HSIG_A); 221 case HexagonII::HSIG_Compound: 222 return (Gb == HexagonII::HSIG_Compound); 223 } 224 return false; 225 } 226 227 228 229 /// isLoadFromStackSlot - If the specified machine instruction is a direct 230 /// load from a stack slot, return the virtual or physical register number of 231 /// the destination along with the FrameIndex of the loaded stack slot. If 232 /// not, return 0. This predicate must return 0 if the instruction has 233 /// any side effects other than loading from the stack slot. 234 unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 235 int &FrameIndex) const { 236 switch (MI.getOpcode()) { 237 default: 238 break; 239 case Hexagon::L2_loadrb_io: 240 case Hexagon::L2_loadrub_io: 241 case Hexagon::L2_loadrh_io: 242 case Hexagon::L2_loadruh_io: 243 case Hexagon::L2_loadri_io: 244 case Hexagon::L2_loadrd_io: 245 case Hexagon::V6_vL32b_ai: 246 case Hexagon::V6_vL32b_ai_128B: 247 case Hexagon::V6_vL32Ub_ai: 248 case Hexagon::V6_vL32Ub_ai_128B: 249 case Hexagon::LDriw_pred: 250 case Hexagon::LDriw_mod: 251 case Hexagon::LDriq_pred_V6: 252 case Hexagon::LDriq_pred_vec_V6: 253 case Hexagon::LDriv_pseudo_V6: 254 case Hexagon::LDrivv_pseudo_V6: 255 case Hexagon::LDriq_pred_V6_128B: 256 case Hexagon::LDriq_pred_vec_V6_128B: 257 case Hexagon::LDriv_pseudo_V6_128B: 258 case Hexagon::LDrivv_pseudo_V6_128B: { 259 const MachineOperand OpFI = MI.getOperand(1); 260 if (!OpFI.isFI()) 261 return 0; 262 const MachineOperand OpOff = MI.getOperand(2); 263 if (!OpOff.isImm() || OpOff.getImm() != 0) 264 return 0; 265 FrameIndex = OpFI.getIndex(); 266 return MI.getOperand(0).getReg(); 267 } 268 269 case Hexagon::L2_ploadrbt_io: 270 case Hexagon::L2_ploadrbf_io: 271 case Hexagon::L2_ploadrubt_io: 272 case Hexagon::L2_ploadrubf_io: 273 case Hexagon::L2_ploadrht_io: 274 case Hexagon::L2_ploadrhf_io: 275 case Hexagon::L2_ploadruht_io: 276 case Hexagon::L2_ploadruhf_io: 277 case Hexagon::L2_ploadrit_io: 278 case Hexagon::L2_ploadrif_io: 279 case Hexagon::L2_ploadrdt_io: 280 case Hexagon::L2_ploadrdf_io: { 281 const MachineOperand OpFI = MI.getOperand(2); 282 if (!OpFI.isFI()) 283 return 0; 284 const MachineOperand OpOff = MI.getOperand(3); 285 if (!OpOff.isImm() || OpOff.getImm() != 0) 286 return 0; 287 FrameIndex = OpFI.getIndex(); 288 return MI.getOperand(0).getReg(); 289 } 290 } 291 292 return 0; 293 } 294 295 296 /// isStoreToStackSlot - If the specified machine instruction is a direct 297 /// store to a stack slot, return the virtual or physical register number of 298 /// the source reg along with the FrameIndex of the loaded stack slot. If 299 /// not, return 0. This predicate must return 0 if the instruction has 300 /// any side effects other than storing to the stack slot. 301 unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 302 int &FrameIndex) const { 303 switch (MI.getOpcode()) { 304 default: 305 break; 306 case Hexagon::S2_storerb_io: 307 case Hexagon::S2_storerh_io: 308 case Hexagon::S2_storeri_io: 309 case Hexagon::S2_storerd_io: 310 case Hexagon::V6_vS32b_ai: 311 case Hexagon::V6_vS32b_ai_128B: 312 case Hexagon::V6_vS32Ub_ai: 313 case Hexagon::V6_vS32Ub_ai_128B: 314 case Hexagon::STriw_pred: 315 case Hexagon::STriw_mod: 316 case Hexagon::STriq_pred_V6: 317 case Hexagon::STriq_pred_vec_V6: 318 case Hexagon::STriv_pseudo_V6: 319 case Hexagon::STrivv_pseudo_V6: 320 case Hexagon::STriq_pred_V6_128B: 321 case Hexagon::STriq_pred_vec_V6_128B: 322 case Hexagon::STriv_pseudo_V6_128B: 323 case Hexagon::STrivv_pseudo_V6_128B: { 324 const MachineOperand &OpFI = MI.getOperand(0); 325 if (!OpFI.isFI()) 326 return 0; 327 const MachineOperand &OpOff = MI.getOperand(1); 328 if (!OpOff.isImm() || OpOff.getImm() != 0) 329 return 0; 330 FrameIndex = OpFI.getIndex(); 331 return MI.getOperand(2).getReg(); 332 } 333 334 case Hexagon::S2_pstorerbt_io: 335 case Hexagon::S2_pstorerbf_io: 336 case Hexagon::S2_pstorerht_io: 337 case Hexagon::S2_pstorerhf_io: 338 case Hexagon::S2_pstorerit_io: 339 case Hexagon::S2_pstorerif_io: 340 case Hexagon::S2_pstorerdt_io: 341 case Hexagon::S2_pstorerdf_io: { 342 const MachineOperand &OpFI = MI.getOperand(1); 343 if (!OpFI.isFI()) 344 return 0; 345 const MachineOperand &OpOff = MI.getOperand(2); 346 if (!OpOff.isImm() || OpOff.getImm() != 0) 347 return 0; 348 FrameIndex = OpFI.getIndex(); 349 return MI.getOperand(3).getReg(); 350 } 351 } 352 353 return 0; 354 } 355 356 357 /// This function can analyze one/two way branching only and should (mostly) be 358 /// called by target independent side. 359 /// First entry is always the opcode of the branching instruction, except when 360 /// the Cond vector is supposed to be empty, e.g., when AnalyzeBranch fails, a 361 /// BB with only unconditional jump. Subsequent entries depend upon the opcode, 362 /// e.g. Jump_c p will have 363 /// Cond[0] = Jump_c 364 /// Cond[1] = p 365 /// HW-loop ENDLOOP: 366 /// Cond[0] = ENDLOOP 367 /// Cond[1] = MBB 368 /// New value jump: 369 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode 370 /// Cond[1] = R 371 /// Cond[2] = Imm 372 /// 373 bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 374 MachineBasicBlock *&TBB, 375 MachineBasicBlock *&FBB, 376 SmallVectorImpl<MachineOperand> &Cond, 377 bool AllowModify) const { 378 TBB = nullptr; 379 FBB = nullptr; 380 Cond.clear(); 381 382 // If the block has no terminators, it just falls into the block after it. 383 MachineBasicBlock::instr_iterator I = MBB.instr_end(); 384 if (I == MBB.instr_begin()) 385 return false; 386 387 // A basic block may looks like this: 388 // 389 // [ insn 390 // EH_LABEL 391 // insn 392 // insn 393 // insn 394 // EH_LABEL 395 // insn ] 396 // 397 // It has two succs but does not have a terminator 398 // Don't know how to handle it. 399 do { 400 --I; 401 if (I->isEHLabel()) 402 // Don't analyze EH branches. 403 return true; 404 } while (I != MBB.instr_begin()); 405 406 I = MBB.instr_end(); 407 --I; 408 409 while (I->isDebugValue()) { 410 if (I == MBB.instr_begin()) 411 return false; 412 --I; 413 } 414 415 bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump && 416 I->getOperand(0).isMBB(); 417 // Delete the J2_jump if it's equivalent to a fall-through. 418 if (AllowModify && JumpToBlock && 419 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { 420 DEBUG(dbgs()<< "\nErasing the jump to successor block\n";); 421 I->eraseFromParent(); 422 I = MBB.instr_end(); 423 if (I == MBB.instr_begin()) 424 return false; 425 --I; 426 } 427 if (!isUnpredicatedTerminator(*I)) 428 return false; 429 430 // Get the last instruction in the block. 431 MachineInstr *LastInst = &*I; 432 MachineInstr *SecondLastInst = nullptr; 433 // Find one more terminator if present. 434 for (;;) { 435 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) { 436 if (!SecondLastInst) 437 SecondLastInst = &*I; 438 else 439 // This is a third branch. 440 return true; 441 } 442 if (I == MBB.instr_begin()) 443 break; 444 --I; 445 } 446 447 int LastOpcode = LastInst->getOpcode(); 448 int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0; 449 // If the branch target is not a basic block, it could be a tail call. 450 // (It is, if the target is a function.) 451 if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB()) 452 return true; 453 if (SecLastOpcode == Hexagon::J2_jump && 454 !SecondLastInst->getOperand(0).isMBB()) 455 return true; 456 457 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode); 458 bool LastOpcodeHasNVJump = isNewValueJump(LastInst); 459 460 if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB()) 461 return true; 462 463 // If there is only one terminator instruction, process it. 464 if (LastInst && !SecondLastInst) { 465 if (LastOpcode == Hexagon::J2_jump) { 466 TBB = LastInst->getOperand(0).getMBB(); 467 return false; 468 } 469 if (isEndLoopN(LastOpcode)) { 470 TBB = LastInst->getOperand(0).getMBB(); 471 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); 472 Cond.push_back(LastInst->getOperand(0)); 473 return false; 474 } 475 if (LastOpcodeHasJMP_c) { 476 TBB = LastInst->getOperand(1).getMBB(); 477 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); 478 Cond.push_back(LastInst->getOperand(0)); 479 return false; 480 } 481 // Only supporting rr/ri versions of new-value jumps. 482 if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) { 483 TBB = LastInst->getOperand(2).getMBB(); 484 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); 485 Cond.push_back(LastInst->getOperand(0)); 486 Cond.push_back(LastInst->getOperand(1)); 487 return false; 488 } 489 DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber() 490 << " with one jump\n";); 491 // Otherwise, don't know what this is. 492 return true; 493 } 494 495 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode); 496 bool SecLastOpcodeHasNVJump = isNewValueJump(SecondLastInst); 497 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) { 498 if (!SecondLastInst->getOperand(1).isMBB()) 499 return true; 500 TBB = SecondLastInst->getOperand(1).getMBB(); 501 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode())); 502 Cond.push_back(SecondLastInst->getOperand(0)); 503 FBB = LastInst->getOperand(0).getMBB(); 504 return false; 505 } 506 507 // Only supporting rr/ri versions of new-value jumps. 508 if (SecLastOpcodeHasNVJump && 509 (SecondLastInst->getNumExplicitOperands() == 3) && 510 (LastOpcode == Hexagon::J2_jump)) { 511 TBB = SecondLastInst->getOperand(2).getMBB(); 512 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode())); 513 Cond.push_back(SecondLastInst->getOperand(0)); 514 Cond.push_back(SecondLastInst->getOperand(1)); 515 FBB = LastInst->getOperand(0).getMBB(); 516 return false; 517 } 518 519 // If the block ends with two Hexagon:JMPs, handle it. The second one is not 520 // executed, so remove it. 521 if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) { 522 TBB = SecondLastInst->getOperand(0).getMBB(); 523 I = LastInst->getIterator(); 524 if (AllowModify) 525 I->eraseFromParent(); 526 return false; 527 } 528 529 // If the block ends with an ENDLOOP, and J2_jump, handle it. 530 if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) { 531 TBB = SecondLastInst->getOperand(0).getMBB(); 532 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode())); 533 Cond.push_back(SecondLastInst->getOperand(0)); 534 FBB = LastInst->getOperand(0).getMBB(); 535 return false; 536 } 537 DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber() 538 << " with two jumps";); 539 // Otherwise, can't handle this. 540 return true; 541 } 542 543 544 unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 545 DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber()); 546 MachineBasicBlock::iterator I = MBB.end(); 547 unsigned Count = 0; 548 while (I != MBB.begin()) { 549 --I; 550 if (I->isDebugValue()) 551 continue; 552 // Only removing branches from end of MBB. 553 if (!I->isBranch()) 554 return Count; 555 if (Count && (I->getOpcode() == Hexagon::J2_jump)) 556 llvm_unreachable("Malformed basic block: unconditional branch not last"); 557 MBB.erase(&MBB.back()); 558 I = MBB.end(); 559 ++Count; 560 } 561 return Count; 562 } 563 564 unsigned HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB, 565 MachineBasicBlock *TBB, 566 MachineBasicBlock *FBB, 567 ArrayRef<MachineOperand> Cond, 568 const DebugLoc &DL) const { 569 unsigned BOpc = Hexagon::J2_jump; 570 unsigned BccOpc = Hexagon::J2_jumpt; 571 assert(validateBranchCond(Cond) && "Invalid branching condition"); 572 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 573 574 // Check if ReverseBranchCondition has asked to reverse this branch 575 // If we want to reverse the branch an odd number of times, we want 576 // J2_jumpf. 577 if (!Cond.empty() && Cond[0].isImm()) 578 BccOpc = Cond[0].getImm(); 579 580 if (!FBB) { 581 if (Cond.empty()) { 582 // Due to a bug in TailMerging/CFG Optimization, we need to add a 583 // special case handling of a predicated jump followed by an 584 // unconditional jump. If not, Tail Merging and CFG Optimization go 585 // into an infinite loop. 586 MachineBasicBlock *NewTBB, *NewFBB; 587 SmallVector<MachineOperand, 4> Cond; 588 auto Term = MBB.getFirstTerminator(); 589 if (Term != MBB.end() && isPredicated(*Term) && 590 !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false)) { 591 MachineBasicBlock *NextBB = &*++MBB.getIterator(); 592 if (NewTBB == NextBB) { 593 ReverseBranchCondition(Cond); 594 RemoveBranch(MBB); 595 return InsertBranch(MBB, TBB, nullptr, Cond, DL); 596 } 597 } 598 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 599 } else if (isEndLoopN(Cond[0].getImm())) { 600 int EndLoopOp = Cond[0].getImm(); 601 assert(Cond[1].isMBB()); 602 // Since we're adding an ENDLOOP, there better be a LOOP instruction. 603 // Check for it, and change the BB target if needed. 604 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs; 605 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs); 606 assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP"); 607 Loop->getOperand(0).setMBB(TBB); 608 // Add the ENDLOOP after the finding the LOOP0. 609 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB); 610 } else if (isNewValueJump(Cond[0].getImm())) { 611 assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump"); 612 // New value jump 613 // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset) 614 // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset) 615 unsigned Flags1 = getUndefRegState(Cond[1].isUndef()); 616 DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber();); 617 if (Cond[2].isReg()) { 618 unsigned Flags2 = getUndefRegState(Cond[2].isUndef()); 619 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1). 620 addReg(Cond[2].getReg(), Flags2).addMBB(TBB); 621 } else if(Cond[2].isImm()) { 622 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1). 623 addImm(Cond[2].getImm()).addMBB(TBB); 624 } else 625 llvm_unreachable("Invalid condition for branching"); 626 } else { 627 assert((Cond.size() == 2) && "Malformed cond vector"); 628 const MachineOperand &RO = Cond[1]; 629 unsigned Flags = getUndefRegState(RO.isUndef()); 630 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB); 631 } 632 return 1; 633 } 634 assert((!Cond.empty()) && 635 "Cond. cannot be empty when multiple branchings are required"); 636 assert((!isNewValueJump(Cond[0].getImm())) && 637 "NV-jump cannot be inserted with another branch"); 638 // Special case for hardware loops. The condition is a basic block. 639 if (isEndLoopN(Cond[0].getImm())) { 640 int EndLoopOp = Cond[0].getImm(); 641 assert(Cond[1].isMBB()); 642 // Since we're adding an ENDLOOP, there better be a LOOP instruction. 643 // Check for it, and change the BB target if needed. 644 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs; 645 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs); 646 assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP"); 647 Loop->getOperand(0).setMBB(TBB); 648 // Add the ENDLOOP after the finding the LOOP0. 649 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB); 650 } else { 651 const MachineOperand &RO = Cond[1]; 652 unsigned Flags = getUndefRegState(RO.isUndef()); 653 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB); 654 } 655 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 656 657 return 2; 658 } 659 660 /// Analyze the loop code to find the loop induction variable and compare used 661 /// to compute the number of iterations. Currently, we analyze loop that are 662 /// controlled using hardware loops. In this case, the induction variable 663 /// instruction is null. For all other cases, this function returns true, which 664 /// means we're unable to analyze it. 665 bool HexagonInstrInfo::analyzeLoop(MachineLoop &L, 666 MachineInstr *&IndVarInst, 667 MachineInstr *&CmpInst) const { 668 669 MachineBasicBlock *LoopEnd = L.getBottomBlock(); 670 MachineBasicBlock::iterator I = LoopEnd->getFirstTerminator(); 671 // We really "analyze" only hardware loops right now. 672 if (I != LoopEnd->end() && isEndLoopN(I->getOpcode())) { 673 IndVarInst = nullptr; 674 CmpInst = &*I; 675 return false; 676 } 677 return true; 678 } 679 680 /// Generate code to reduce the loop iteration by one and check if the loop is 681 /// finished. Return the value/register of the new loop count. this function 682 /// assumes the nth iteration is peeled first. 683 unsigned HexagonInstrInfo::reduceLoopCount(MachineBasicBlock &MBB, 684 MachineInstr *IndVar, MachineInstr *Cmp, 685 SmallVectorImpl<MachineOperand> &Cond, 686 SmallVectorImpl<MachineInstr *> &PrevInsts, 687 unsigned Iter, unsigned MaxIter) const { 688 // We expect a hardware loop currently. This means that IndVar is set 689 // to null, and the compare is the ENDLOOP instruction. 690 assert((!IndVar) && isEndLoopN(Cmp->getOpcode()) 691 && "Expecting a hardware loop"); 692 MachineFunction *MF = MBB.getParent(); 693 DebugLoc DL = Cmp->getDebugLoc(); 694 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs; 695 MachineInstr *Loop = findLoopInstr(&MBB, Cmp->getOpcode(), VisitedBBs); 696 if (!Loop) 697 return 0; 698 // If the loop trip count is a compile-time value, then just change the 699 // value. 700 if (Loop->getOpcode() == Hexagon::J2_loop0i || 701 Loop->getOpcode() == Hexagon::J2_loop1i) { 702 int64_t Offset = Loop->getOperand(1).getImm(); 703 if (Offset <= 1) 704 Loop->eraseFromParent(); 705 else 706 Loop->getOperand(1).setImm(Offset - 1); 707 return Offset - 1; 708 } 709 // The loop trip count is a run-time value. We generate code to subtract 710 // one from the trip count, and update the loop instruction. 711 assert(Loop->getOpcode() == Hexagon::J2_loop0r && "Unexpected instruction"); 712 unsigned LoopCount = Loop->getOperand(1).getReg(); 713 // Check if we're done with the loop. 714 unsigned LoopEnd = createVR(MF, MVT::i1); 715 MachineInstr *NewCmp = BuildMI(&MBB, DL, get(Hexagon::C2_cmpgtui), LoopEnd). 716 addReg(LoopCount).addImm(1); 717 unsigned NewLoopCount = createVR(MF, MVT::i32); 718 MachineInstr *NewAdd = BuildMI(&MBB, DL, get(Hexagon::A2_addi), NewLoopCount). 719 addReg(LoopCount).addImm(-1); 720 // Update the previously generated instructions with the new loop counter. 721 for (SmallVectorImpl<MachineInstr *>::iterator I = PrevInsts.begin(), 722 E = PrevInsts.end(); I != E; ++I) 723 (*I)->substituteRegister(LoopCount, NewLoopCount, 0, getRegisterInfo()); 724 PrevInsts.clear(); 725 PrevInsts.push_back(NewCmp); 726 PrevInsts.push_back(NewAdd); 727 // Insert the new loop instruction if this is the last time the loop is 728 // decremented. 729 if (Iter == MaxIter) 730 BuildMI(&MBB, DL, get(Hexagon::J2_loop0r)). 731 addMBB(Loop->getOperand(0).getMBB()).addReg(NewLoopCount); 732 // Delete the old loop instruction. 733 if (Iter == 0) 734 Loop->eraseFromParent(); 735 Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf)); 736 Cond.push_back(NewCmp->getOperand(0)); 737 return NewLoopCount; 738 } 739 740 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB, 741 unsigned NumCycles, unsigned ExtraPredCycles, 742 BranchProbability Probability) const { 743 return nonDbgBBSize(&MBB) <= 3; 744 } 745 746 747 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, 748 unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, 749 unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) 750 const { 751 return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3; 752 } 753 754 755 bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB, 756 unsigned NumInstrs, BranchProbability Probability) const { 757 return NumInstrs <= 4; 758 } 759 760 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 761 MachineBasicBlock::iterator I, 762 const DebugLoc &DL, unsigned DestReg, 763 unsigned SrcReg, bool KillSrc) const { 764 auto &HRI = getRegisterInfo(); 765 unsigned KillFlag = getKillRegState(KillSrc); 766 767 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) { 768 BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg) 769 .addReg(SrcReg, KillFlag); 770 return; 771 } 772 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) { 773 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg) 774 .addReg(SrcReg, KillFlag); 775 return; 776 } 777 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) { 778 // Map Pd = Ps to Pd = or(Ps, Ps). 779 BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg) 780 .addReg(SrcReg).addReg(SrcReg, KillFlag); 781 return; 782 } 783 if (Hexagon::CtrRegsRegClass.contains(DestReg) && 784 Hexagon::IntRegsRegClass.contains(SrcReg)) { 785 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg) 786 .addReg(SrcReg, KillFlag); 787 return; 788 } 789 if (Hexagon::IntRegsRegClass.contains(DestReg) && 790 Hexagon::CtrRegsRegClass.contains(SrcReg)) { 791 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg) 792 .addReg(SrcReg, KillFlag); 793 return; 794 } 795 if (Hexagon::ModRegsRegClass.contains(DestReg) && 796 Hexagon::IntRegsRegClass.contains(SrcReg)) { 797 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg) 798 .addReg(SrcReg, KillFlag); 799 return; 800 } 801 if (Hexagon::PredRegsRegClass.contains(SrcReg) && 802 Hexagon::IntRegsRegClass.contains(DestReg)) { 803 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg) 804 .addReg(SrcReg, KillFlag); 805 return; 806 } 807 if (Hexagon::IntRegsRegClass.contains(SrcReg) && 808 Hexagon::PredRegsRegClass.contains(DestReg)) { 809 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg) 810 .addReg(SrcReg, KillFlag); 811 return; 812 } 813 if (Hexagon::PredRegsRegClass.contains(SrcReg) && 814 Hexagon::IntRegsRegClass.contains(DestReg)) { 815 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg) 816 .addReg(SrcReg, KillFlag); 817 return; 818 } 819 if (Hexagon::VectorRegsRegClass.contains(SrcReg, DestReg)) { 820 BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg). 821 addReg(SrcReg, KillFlag); 822 return; 823 } 824 if (Hexagon::VecDblRegsRegClass.contains(SrcReg, DestReg)) { 825 BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg) 826 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg), KillFlag) 827 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg), KillFlag); 828 return; 829 } 830 if (Hexagon::VecPredRegsRegClass.contains(SrcReg, DestReg)) { 831 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg) 832 .addReg(SrcReg) 833 .addReg(SrcReg, KillFlag); 834 return; 835 } 836 if (Hexagon::VecPredRegsRegClass.contains(SrcReg) && 837 Hexagon::VectorRegsRegClass.contains(DestReg)) { 838 llvm_unreachable("Unimplemented pred to vec"); 839 return; 840 } 841 if (Hexagon::VecPredRegsRegClass.contains(DestReg) && 842 Hexagon::VectorRegsRegClass.contains(SrcReg)) { 843 llvm_unreachable("Unimplemented vec to pred"); 844 return; 845 } 846 if (Hexagon::VecPredRegs128BRegClass.contains(SrcReg, DestReg)) { 847 unsigned DstHi = HRI.getSubReg(DestReg, Hexagon::subreg_hireg); 848 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DstHi) 849 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg), KillFlag); 850 unsigned DstLo = HRI.getSubReg(DestReg, Hexagon::subreg_loreg); 851 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DstLo) 852 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg), KillFlag); 853 return; 854 } 855 856 #ifndef NDEBUG 857 // Show the invalid registers to ease debugging. 858 dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber() 859 << ": " << PrintReg(DestReg, &HRI) 860 << " = " << PrintReg(SrcReg, &HRI) << '\n'; 861 #endif 862 llvm_unreachable("Unimplemented"); 863 } 864 865 866 void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 867 MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI, 868 const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { 869 DebugLoc DL = MBB.findDebugLoc(I); 870 MachineFunction &MF = *MBB.getParent(); 871 MachineFrameInfo &MFI = *MF.getFrameInfo(); 872 unsigned Align = MFI.getObjectAlignment(FI); 873 unsigned KillFlag = getKillRegState(isKill); 874 875 MachineMemOperand *MMO = MF.getMachineMemOperand( 876 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 877 MFI.getObjectSize(FI), Align); 878 879 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { 880 BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io)) 881 .addFrameIndex(FI).addImm(0) 882 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 883 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) { 884 BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io)) 885 .addFrameIndex(FI).addImm(0) 886 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 887 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) { 888 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred)) 889 .addFrameIndex(FI).addImm(0) 890 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 891 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) { 892 BuildMI(MBB, I, DL, get(Hexagon::STriw_mod)) 893 .addFrameIndex(FI).addImm(0) 894 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 895 } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) { 896 BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6_128B)) 897 .addFrameIndex(FI).addImm(0) 898 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 899 } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) { 900 BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6)) 901 .addFrameIndex(FI).addImm(0) 902 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 903 } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) { 904 DEBUG(dbgs() << "++Generating 128B vector spill"); 905 BuildMI(MBB, I, DL, get(Hexagon::STriv_pseudo_V6_128B)) 906 .addFrameIndex(FI).addImm(0) 907 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 908 } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) { 909 DEBUG(dbgs() << "++Generating vector spill"); 910 BuildMI(MBB, I, DL, get(Hexagon::STriv_pseudo_V6)) 911 .addFrameIndex(FI).addImm(0) 912 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 913 } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) { 914 DEBUG(dbgs() << "++Generating double vector spill"); 915 BuildMI(MBB, I, DL, get(Hexagon::STrivv_pseudo_V6)) 916 .addFrameIndex(FI).addImm(0) 917 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 918 } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) { 919 DEBUG(dbgs() << "++Generating 128B double vector spill"); 920 BuildMI(MBB, I, DL, get(Hexagon::STrivv_pseudo_V6_128B)) 921 .addFrameIndex(FI).addImm(0) 922 .addReg(SrcReg, KillFlag).addMemOperand(MMO); 923 } else { 924 llvm_unreachable("Unimplemented"); 925 } 926 } 927 928 void HexagonInstrInfo::loadRegFromStackSlot( 929 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, 930 int FI, const TargetRegisterClass *RC, 931 const TargetRegisterInfo *TRI) const { 932 DebugLoc DL = MBB.findDebugLoc(I); 933 MachineFunction &MF = *MBB.getParent(); 934 MachineFrameInfo &MFI = *MF.getFrameInfo(); 935 unsigned Align = MFI.getObjectAlignment(FI); 936 937 MachineMemOperand *MMO = MF.getMachineMemOperand( 938 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 939 MFI.getObjectSize(FI), Align); 940 941 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { 942 BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg) 943 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 944 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) { 945 BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg) 946 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 947 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) { 948 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg) 949 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 950 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) { 951 BuildMI(MBB, I, DL, get(Hexagon::LDriw_mod), DestReg) 952 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 953 } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) { 954 BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6_128B), DestReg) 955 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 956 } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) { 957 BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6), DestReg) 958 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 959 } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) { 960 DEBUG(dbgs() << "++Generating 128B double vector restore"); 961 BuildMI(MBB, I, DL, get(Hexagon::LDrivv_pseudo_V6_128B), DestReg) 962 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 963 } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) { 964 DEBUG(dbgs() << "++Generating 128B vector restore"); 965 BuildMI(MBB, I, DL, get(Hexagon::LDriv_pseudo_V6_128B), DestReg) 966 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 967 } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) { 968 DEBUG(dbgs() << "++Generating vector restore"); 969 BuildMI(MBB, I, DL, get(Hexagon::LDriv_pseudo_V6), DestReg) 970 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 971 } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) { 972 DEBUG(dbgs() << "++Generating double vector restore"); 973 BuildMI(MBB, I, DL, get(Hexagon::LDrivv_pseudo_V6), DestReg) 974 .addFrameIndex(FI).addImm(0).addMemOperand(MMO); 975 } else { 976 llvm_unreachable("Can't store this register to stack slot"); 977 } 978 } 979 980 981 /// expandPostRAPseudo - This function is called for all pseudo instructions 982 /// that remain after register allocation. Many pseudo instructions are 983 /// created to help register allocation. This is the place to convert them 984 /// into real instructions. The target can edit MI in place, or it can insert 985 /// new instructions and erase MI. The function should return true if 986 /// anything was changed. 987 bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 988 const HexagonRegisterInfo &HRI = getRegisterInfo(); 989 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 990 MachineBasicBlock &MBB = *MI.getParent(); 991 DebugLoc DL = MI.getDebugLoc(); 992 unsigned Opc = MI.getOpcode(); 993 const unsigned VecOffset = 1; 994 bool Is128B = false; 995 996 switch (Opc) { 997 case TargetOpcode::COPY: { 998 MachineOperand &MD = MI.getOperand(0); 999 MachineOperand &MS = MI.getOperand(1); 1000 MachineBasicBlock::iterator MBBI = MI.getIterator(); 1001 if (MD.getReg() != MS.getReg() && !MS.isUndef()) { 1002 copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill()); 1003 std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI); 1004 } 1005 MBB.erase(MBBI); 1006 return true; 1007 } 1008 case Hexagon::ALIGNA: 1009 BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg()) 1010 .addReg(HRI.getFrameRegister()) 1011 .addImm(-MI.getOperand(1).getImm()); 1012 MBB.erase(MI); 1013 return true; 1014 case Hexagon::HEXAGON_V6_vassignp_128B: 1015 case Hexagon::HEXAGON_V6_vassignp: { 1016 unsigned SrcReg = MI.getOperand(1).getReg(); 1017 unsigned DstReg = MI.getOperand(0).getReg(); 1018 if (SrcReg != DstReg) 1019 copyPhysReg(MBB, MI, DL, DstReg, SrcReg, MI.getOperand(1).isKill()); 1020 MBB.erase(MI); 1021 return true; 1022 } 1023 case Hexagon::HEXAGON_V6_lo_128B: 1024 case Hexagon::HEXAGON_V6_lo: { 1025 unsigned SrcReg = MI.getOperand(1).getReg(); 1026 unsigned DstReg = MI.getOperand(0).getReg(); 1027 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg); 1028 copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill()); 1029 MBB.erase(MI); 1030 MRI.clearKillFlags(SrcSubLo); 1031 return true; 1032 } 1033 case Hexagon::HEXAGON_V6_hi_128B: 1034 case Hexagon::HEXAGON_V6_hi: { 1035 unsigned SrcReg = MI.getOperand(1).getReg(); 1036 unsigned DstReg = MI.getOperand(0).getReg(); 1037 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg); 1038 copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill()); 1039 MBB.erase(MI); 1040 MRI.clearKillFlags(SrcSubHi); 1041 return true; 1042 } 1043 case Hexagon::STrivv_indexed_128B: 1044 Is128B = true; 1045 case Hexagon::STrivv_indexed: { 1046 unsigned SrcReg = MI.getOperand(2).getReg(); 1047 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg); 1048 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg); 1049 unsigned NewOpcd = Is128B ? Hexagon::V6_vS32b_ai_128B 1050 : Hexagon::V6_vS32b_ai; 1051 unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6; 1052 MachineInstr *MI1New = 1053 BuildMI(MBB, MI, DL, get(NewOpcd)) 1054 .addOperand(MI.getOperand(0)) 1055 .addImm(MI.getOperand(1).getImm()) 1056 .addReg(SrcSubLo) 1057 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 1058 MI1New->getOperand(0).setIsKill(false); 1059 BuildMI(MBB, MI, DL, get(NewOpcd)) 1060 .addOperand(MI.getOperand(0)) 1061 // The Vectors are indexed in multiples of vector size. 1062 .addImm(MI.getOperand(1).getImm() + Offset) 1063 .addReg(SrcSubHi) 1064 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 1065 MBB.erase(MI); 1066 return true; 1067 } 1068 case Hexagon::LDrivv_pseudo_V6_128B: 1069 case Hexagon::LDrivv_indexed_128B: 1070 Is128B = true; 1071 case Hexagon::LDrivv_pseudo_V6: 1072 case Hexagon::LDrivv_indexed: { 1073 unsigned NewOpcd = Is128B ? Hexagon::V6_vL32b_ai_128B 1074 : Hexagon::V6_vL32b_ai; 1075 unsigned DstReg = MI.getOperand(0).getReg(); 1076 unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6; 1077 MachineInstr *MI1New = 1078 BuildMI(MBB, MI, DL, get(NewOpcd), 1079 HRI.getSubReg(DstReg, Hexagon::subreg_loreg)) 1080 .addOperand(MI.getOperand(1)) 1081 .addImm(MI.getOperand(2).getImm()); 1082 MI1New->getOperand(1).setIsKill(false); 1083 BuildMI(MBB, MI, DL, get(NewOpcd), 1084 HRI.getSubReg(DstReg, Hexagon::subreg_hireg)) 1085 .addOperand(MI.getOperand(1)) 1086 // The Vectors are indexed in multiples of vector size. 1087 .addImm(MI.getOperand(2).getImm() + Offset) 1088 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 1089 MBB.erase(MI); 1090 return true; 1091 } 1092 case Hexagon::LDriv_pseudo_V6_128B: 1093 Is128B = true; 1094 case Hexagon::LDriv_pseudo_V6: { 1095 unsigned DstReg = MI.getOperand(0).getReg(); 1096 unsigned NewOpc = Is128B ? Hexagon::V6_vL32b_ai_128B 1097 : Hexagon::V6_vL32b_ai; 1098 int32_t Off = MI.getOperand(2).getImm(); 1099 BuildMI(MBB, MI, DL, get(NewOpc), DstReg) 1100 .addOperand(MI.getOperand(1)) 1101 .addImm(Off) 1102 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 1103 MBB.erase(MI); 1104 return true; 1105 } 1106 case Hexagon::STriv_pseudo_V6_128B: 1107 Is128B = true; 1108 case Hexagon::STriv_pseudo_V6: { 1109 unsigned NewOpc = Is128B ? Hexagon::V6_vS32b_ai_128B 1110 : Hexagon::V6_vS32b_ai; 1111 int32_t Off = MI.getOperand(1).getImm(); 1112 BuildMI(MBB, MI, DL, get(NewOpc)) 1113 .addOperand(MI.getOperand(0)) 1114 .addImm(Off) 1115 .addOperand(MI.getOperand(2)) 1116 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 1117 MBB.erase(MI); 1118 return true; 1119 } 1120 case Hexagon::TFR_PdTrue: { 1121 unsigned Reg = MI.getOperand(0).getReg(); 1122 BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg) 1123 .addReg(Reg, RegState::Undef) 1124 .addReg(Reg, RegState::Undef); 1125 MBB.erase(MI); 1126 return true; 1127 } 1128 case Hexagon::TFR_PdFalse: { 1129 unsigned Reg = MI.getOperand(0).getReg(); 1130 BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg) 1131 .addReg(Reg, RegState::Undef) 1132 .addReg(Reg, RegState::Undef); 1133 MBB.erase(MI); 1134 return true; 1135 } 1136 case Hexagon::VMULW: { 1137 // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies. 1138 unsigned DstReg = MI.getOperand(0).getReg(); 1139 unsigned Src1Reg = MI.getOperand(1).getReg(); 1140 unsigned Src2Reg = MI.getOperand(2).getReg(); 1141 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg); 1142 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg); 1143 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg); 1144 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg); 1145 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi), 1146 HRI.getSubReg(DstReg, Hexagon::subreg_hireg)) 1147 .addReg(Src1SubHi) 1148 .addReg(Src2SubHi); 1149 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi), 1150 HRI.getSubReg(DstReg, Hexagon::subreg_loreg)) 1151 .addReg(Src1SubLo) 1152 .addReg(Src2SubLo); 1153 MBB.erase(MI); 1154 MRI.clearKillFlags(Src1SubHi); 1155 MRI.clearKillFlags(Src1SubLo); 1156 MRI.clearKillFlags(Src2SubHi); 1157 MRI.clearKillFlags(Src2SubLo); 1158 return true; 1159 } 1160 case Hexagon::VMULW_ACC: { 1161 // Expand 64-bit vector multiply with addition into 2 scalar multiplies. 1162 unsigned DstReg = MI.getOperand(0).getReg(); 1163 unsigned Src1Reg = MI.getOperand(1).getReg(); 1164 unsigned Src2Reg = MI.getOperand(2).getReg(); 1165 unsigned Src3Reg = MI.getOperand(3).getReg(); 1166 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg); 1167 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg); 1168 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg); 1169 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg); 1170 unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::subreg_hireg); 1171 unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::subreg_loreg); 1172 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci), 1173 HRI.getSubReg(DstReg, Hexagon::subreg_hireg)) 1174 .addReg(Src1SubHi) 1175 .addReg(Src2SubHi) 1176 .addReg(Src3SubHi); 1177 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci), 1178 HRI.getSubReg(DstReg, Hexagon::subreg_loreg)) 1179 .addReg(Src1SubLo) 1180 .addReg(Src2SubLo) 1181 .addReg(Src3SubLo); 1182 MBB.erase(MI); 1183 MRI.clearKillFlags(Src1SubHi); 1184 MRI.clearKillFlags(Src1SubLo); 1185 MRI.clearKillFlags(Src2SubHi); 1186 MRI.clearKillFlags(Src2SubLo); 1187 MRI.clearKillFlags(Src3SubHi); 1188 MRI.clearKillFlags(Src3SubLo); 1189 return true; 1190 } 1191 case Hexagon::Insert4: { 1192 unsigned DstReg = MI.getOperand(0).getReg(); 1193 unsigned Src1Reg = MI.getOperand(1).getReg(); 1194 unsigned Src2Reg = MI.getOperand(2).getReg(); 1195 unsigned Src3Reg = MI.getOperand(3).getReg(); 1196 unsigned Src4Reg = MI.getOperand(4).getReg(); 1197 unsigned Src1RegIsKill = getKillRegState(MI.getOperand(1).isKill()); 1198 unsigned Src2RegIsKill = getKillRegState(MI.getOperand(2).isKill()); 1199 unsigned Src3RegIsKill = getKillRegState(MI.getOperand(3).isKill()); 1200 unsigned Src4RegIsKill = getKillRegState(MI.getOperand(4).isKill()); 1201 unsigned DstSubHi = HRI.getSubReg(DstReg, Hexagon::subreg_hireg); 1202 unsigned DstSubLo = HRI.getSubReg(DstReg, Hexagon::subreg_loreg); 1203 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert), 1204 HRI.getSubReg(DstReg, Hexagon::subreg_loreg)) 1205 .addReg(DstSubLo) 1206 .addReg(Src1Reg, Src1RegIsKill) 1207 .addImm(16) 1208 .addImm(0); 1209 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert), 1210 HRI.getSubReg(DstReg, Hexagon::subreg_loreg)) 1211 .addReg(DstSubLo) 1212 .addReg(Src2Reg, Src2RegIsKill) 1213 .addImm(16) 1214 .addImm(16); 1215 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert), 1216 HRI.getSubReg(DstReg, Hexagon::subreg_hireg)) 1217 .addReg(DstSubHi) 1218 .addReg(Src3Reg, Src3RegIsKill) 1219 .addImm(16) 1220 .addImm(0); 1221 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert), 1222 HRI.getSubReg(DstReg, Hexagon::subreg_hireg)) 1223 .addReg(DstSubHi) 1224 .addReg(Src4Reg, Src4RegIsKill) 1225 .addImm(16) 1226 .addImm(16); 1227 MBB.erase(MI); 1228 MRI.clearKillFlags(DstReg); 1229 MRI.clearKillFlags(DstSubHi); 1230 MRI.clearKillFlags(DstSubLo); 1231 return true; 1232 } 1233 case Hexagon::MUX64_rr: { 1234 const MachineOperand &Op0 = MI.getOperand(0); 1235 const MachineOperand &Op1 = MI.getOperand(1); 1236 const MachineOperand &Op2 = MI.getOperand(2); 1237 const MachineOperand &Op3 = MI.getOperand(3); 1238 unsigned Rd = Op0.getReg(); 1239 unsigned Pu = Op1.getReg(); 1240 unsigned Rs = Op2.getReg(); 1241 unsigned Rt = Op3.getReg(); 1242 DebugLoc DL = MI.getDebugLoc(); 1243 unsigned K1 = getKillRegState(Op1.isKill()); 1244 unsigned K2 = getKillRegState(Op2.isKill()); 1245 unsigned K3 = getKillRegState(Op3.isKill()); 1246 if (Rd != Rs) 1247 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd) 1248 .addReg(Pu, (Rd == Rt) ? K1 : 0) 1249 .addReg(Rs, K2); 1250 if (Rd != Rt) 1251 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd) 1252 .addReg(Pu, K1) 1253 .addReg(Rt, K3); 1254 MBB.erase(MI); 1255 return true; 1256 } 1257 case Hexagon::VSelectPseudo_V6: { 1258 const MachineOperand &Op0 = MI.getOperand(0); 1259 const MachineOperand &Op1 = MI.getOperand(1); 1260 const MachineOperand &Op2 = MI.getOperand(2); 1261 const MachineOperand &Op3 = MI.getOperand(3); 1262 BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov)) 1263 .addOperand(Op0) 1264 .addOperand(Op1) 1265 .addOperand(Op2); 1266 BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov)) 1267 .addOperand(Op0) 1268 .addOperand(Op1) 1269 .addOperand(Op3); 1270 MBB.erase(MI); 1271 return true; 1272 } 1273 case Hexagon::VSelectDblPseudo_V6: { 1274 MachineOperand &Op0 = MI.getOperand(0); 1275 MachineOperand &Op1 = MI.getOperand(1); 1276 MachineOperand &Op2 = MI.getOperand(2); 1277 MachineOperand &Op3 = MI.getOperand(3); 1278 unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::subreg_loreg); 1279 unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::subreg_hireg); 1280 BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine)) 1281 .addOperand(Op0) 1282 .addOperand(Op1) 1283 .addReg(SrcHi) 1284 .addReg(SrcLo); 1285 SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::subreg_loreg); 1286 SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::subreg_hireg); 1287 BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine)) 1288 .addOperand(Op0) 1289 .addOperand(Op1) 1290 .addReg(SrcHi) 1291 .addReg(SrcLo); 1292 MBB.erase(MI); 1293 return true; 1294 } 1295 case Hexagon::TCRETURNi: 1296 MI.setDesc(get(Hexagon::J2_jump)); 1297 return true; 1298 case Hexagon::TCRETURNr: 1299 MI.setDesc(get(Hexagon::J2_jumpr)); 1300 return true; 1301 case Hexagon::TFRI_f: 1302 case Hexagon::TFRI_cPt_f: 1303 case Hexagon::TFRI_cNotPt_f: { 1304 unsigned Opx = (Opc == Hexagon::TFRI_f) ? 1 : 2; 1305 APFloat FVal = MI.getOperand(Opx).getFPImm()->getValueAPF(); 1306 APInt IVal = FVal.bitcastToAPInt(); 1307 MI.RemoveOperand(Opx); 1308 unsigned NewOpc = (Opc == Hexagon::TFRI_f) ? Hexagon::A2_tfrsi : 1309 (Opc == Hexagon::TFRI_cPt_f) ? Hexagon::C2_cmoveit : 1310 Hexagon::C2_cmoveif; 1311 MI.setDesc(get(NewOpc)); 1312 MI.addOperand(MachineOperand::CreateImm(IVal.getZExtValue())); 1313 return true; 1314 } 1315 } 1316 1317 return false; 1318 } 1319 1320 1321 // We indicate that we want to reverse the branch by 1322 // inserting the reversed branching opcode. 1323 bool HexagonInstrInfo::ReverseBranchCondition( 1324 SmallVectorImpl<MachineOperand> &Cond) const { 1325 if (Cond.empty()) 1326 return true; 1327 assert(Cond[0].isImm() && "First entry in the cond vector not imm-val"); 1328 unsigned opcode = Cond[0].getImm(); 1329 //unsigned temp; 1330 assert(get(opcode).isBranch() && "Should be a branching condition."); 1331 if (isEndLoopN(opcode)) 1332 return true; 1333 unsigned NewOpcode = getInvertedPredicatedOpcode(opcode); 1334 Cond[0].setImm(NewOpcode); 1335 return false; 1336 } 1337 1338 1339 void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB, 1340 MachineBasicBlock::iterator MI) const { 1341 DebugLoc DL; 1342 BuildMI(MBB, MI, DL, get(Hexagon::A2_nop)); 1343 } 1344 1345 1346 // Returns true if an instruction is predicated irrespective of the predicate 1347 // sense. For example, all of the following will return true. 1348 // if (p0) R1 = add(R2, R3) 1349 // if (!p0) R1 = add(R2, R3) 1350 // if (p0.new) R1 = add(R2, R3) 1351 // if (!p0.new) R1 = add(R2, R3) 1352 // Note: New-value stores are not included here as in the current 1353 // implementation, we don't need to check their predicate sense. 1354 bool HexagonInstrInfo::isPredicated(const MachineInstr &MI) const { 1355 const uint64_t F = MI.getDesc().TSFlags; 1356 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask; 1357 } 1358 1359 1360 bool HexagonInstrInfo::PredicateInstruction( 1361 MachineInstr &MI, ArrayRef<MachineOperand> Cond) const { 1362 if (Cond.empty() || isNewValueJump(Cond[0].getImm()) || 1363 isEndLoopN(Cond[0].getImm())) { 1364 DEBUG(dbgs() << "\nCannot predicate:"; MI.dump();); 1365 return false; 1366 } 1367 int Opc = MI.getOpcode(); 1368 assert (isPredicable(MI) && "Expected predicable instruction"); 1369 bool invertJump = predOpcodeHasNot(Cond); 1370 1371 // We have to predicate MI "in place", i.e. after this function returns, 1372 // MI will need to be transformed into a predicated form. To avoid com- 1373 // plicated manipulations with the operands (handling tied operands, 1374 // etc.), build a new temporary instruction, then overwrite MI with it. 1375 1376 MachineBasicBlock &B = *MI.getParent(); 1377 DebugLoc DL = MI.getDebugLoc(); 1378 unsigned PredOpc = getCondOpcode(Opc, invertJump); 1379 MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc)); 1380 unsigned NOp = 0, NumOps = MI.getNumOperands(); 1381 while (NOp < NumOps) { 1382 MachineOperand &Op = MI.getOperand(NOp); 1383 if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) 1384 break; 1385 T.addOperand(Op); 1386 NOp++; 1387 } 1388 1389 unsigned PredReg, PredRegPos, PredRegFlags; 1390 bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags); 1391 (void)GotPredReg; 1392 assert(GotPredReg); 1393 T.addReg(PredReg, PredRegFlags); 1394 while (NOp < NumOps) 1395 T.addOperand(MI.getOperand(NOp++)); 1396 1397 MI.setDesc(get(PredOpc)); 1398 while (unsigned n = MI.getNumOperands()) 1399 MI.RemoveOperand(n-1); 1400 for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i) 1401 MI.addOperand(T->getOperand(i)); 1402 1403 MachineBasicBlock::instr_iterator TI = T->getIterator(); 1404 B.erase(TI); 1405 1406 MachineRegisterInfo &MRI = B.getParent()->getRegInfo(); 1407 MRI.clearKillFlags(PredReg); 1408 return true; 1409 } 1410 1411 1412 bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 1413 ArrayRef<MachineOperand> Pred2) const { 1414 // TODO: Fix this 1415 return false; 1416 } 1417 1418 1419 bool HexagonInstrInfo::DefinesPredicate( 1420 MachineInstr &MI, std::vector<MachineOperand> &Pred) const { 1421 auto &HRI = getRegisterInfo(); 1422 for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) { 1423 MachineOperand MO = MI.getOperand(oper); 1424 if (MO.isReg() && MO.isDef()) { 1425 const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg()); 1426 if (RC == &Hexagon::PredRegsRegClass) { 1427 Pred.push_back(MO); 1428 return true; 1429 } 1430 } 1431 } 1432 return false; 1433 } 1434 1435 1436 bool HexagonInstrInfo::isPredicable(MachineInstr &MI) const { 1437 return MI.getDesc().isPredicable(); 1438 } 1439 1440 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 1441 const MachineBasicBlock *MBB, 1442 const MachineFunction &MF) const { 1443 // Debug info is never a scheduling boundary. It's necessary to be explicit 1444 // due to the special treatment of IT instructions below, otherwise a 1445 // dbg_value followed by an IT will result in the IT instruction being 1446 // considered a scheduling hazard, which is wrong. It should be the actual 1447 // instruction preceding the dbg_value instruction(s), just like it is 1448 // when debug info is not present. 1449 if (MI.isDebugValue()) 1450 return false; 1451 1452 // Throwing call is a boundary. 1453 if (MI.isCall()) { 1454 // If any of the block's successors is a landing pad, this could be a 1455 // throwing call. 1456 for (auto I : MBB->successors()) 1457 if (I->isEHPad()) 1458 return true; 1459 } 1460 1461 // Don't mess around with no return calls. 1462 if (MI.getOpcode() == Hexagon::CALLv3nr) 1463 return true; 1464 1465 // Terminators and labels can't be scheduled around. 1466 if (MI.getDesc().isTerminator() || MI.isPosition()) 1467 return true; 1468 1469 if (MI.isInlineAsm() && !ScheduleInlineAsm) 1470 return true; 1471 1472 return false; 1473 } 1474 1475 1476 /// Measure the specified inline asm to determine an approximation of its 1477 /// length. 1478 /// Comments (which run till the next SeparatorString or newline) do not 1479 /// count as an instruction. 1480 /// Any other non-whitespace text is considered an instruction, with 1481 /// multiple instructions separated by SeparatorString or newlines. 1482 /// Variable-length instructions are not handled here; this function 1483 /// may be overloaded in the target code to do that. 1484 /// Hexagon counts the number of ##'s and adjust for that many 1485 /// constant exenders. 1486 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str, 1487 const MCAsmInfo &MAI) const { 1488 StringRef AStr(Str); 1489 // Count the number of instructions in the asm. 1490 bool atInsnStart = true; 1491 unsigned Length = 0; 1492 for (; *Str; ++Str) { 1493 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 1494 strlen(MAI.getSeparatorString())) == 0) 1495 atInsnStart = true; 1496 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { 1497 Length += MAI.getMaxInstLength(); 1498 atInsnStart = false; 1499 } 1500 if (atInsnStart && strncmp(Str, MAI.getCommentString(), 1501 strlen(MAI.getCommentString())) == 0) 1502 atInsnStart = false; 1503 } 1504 1505 // Add to size number of constant extenders seen * 4. 1506 StringRef Occ("##"); 1507 Length += AStr.count(Occ)*4; 1508 return Length; 1509 } 1510 1511 1512 ScheduleHazardRecognizer* 1513 HexagonInstrInfo::CreateTargetPostRAHazardRecognizer( 1514 const InstrItineraryData *II, const ScheduleDAG *DAG) const { 1515 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); 1516 } 1517 1518 1519 /// \brief For a comparison instruction, return the source registers in 1520 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it 1521 /// compares against in CmpValue. Return true if the comparison instruction 1522 /// can be analyzed. 1523 bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, 1524 unsigned &SrcReg2, int &Mask, 1525 int &Value) const { 1526 unsigned Opc = MI.getOpcode(); 1527 1528 // Set mask and the first source register. 1529 switch (Opc) { 1530 case Hexagon::C2_cmpeq: 1531 case Hexagon::C2_cmpeqp: 1532 case Hexagon::C2_cmpgt: 1533 case Hexagon::C2_cmpgtp: 1534 case Hexagon::C2_cmpgtu: 1535 case Hexagon::C2_cmpgtup: 1536 case Hexagon::C4_cmpneq: 1537 case Hexagon::C4_cmplte: 1538 case Hexagon::C4_cmplteu: 1539 case Hexagon::C2_cmpeqi: 1540 case Hexagon::C2_cmpgti: 1541 case Hexagon::C2_cmpgtui: 1542 case Hexagon::C4_cmpneqi: 1543 case Hexagon::C4_cmplteui: 1544 case Hexagon::C4_cmpltei: 1545 SrcReg = MI.getOperand(1).getReg(); 1546 Mask = ~0; 1547 break; 1548 case Hexagon::A4_cmpbeq: 1549 case Hexagon::A4_cmpbgt: 1550 case Hexagon::A4_cmpbgtu: 1551 case Hexagon::A4_cmpbeqi: 1552 case Hexagon::A4_cmpbgti: 1553 case Hexagon::A4_cmpbgtui: 1554 SrcReg = MI.getOperand(1).getReg(); 1555 Mask = 0xFF; 1556 break; 1557 case Hexagon::A4_cmpheq: 1558 case Hexagon::A4_cmphgt: 1559 case Hexagon::A4_cmphgtu: 1560 case Hexagon::A4_cmpheqi: 1561 case Hexagon::A4_cmphgti: 1562 case Hexagon::A4_cmphgtui: 1563 SrcReg = MI.getOperand(1).getReg(); 1564 Mask = 0xFFFF; 1565 break; 1566 } 1567 1568 // Set the value/second source register. 1569 switch (Opc) { 1570 case Hexagon::C2_cmpeq: 1571 case Hexagon::C2_cmpeqp: 1572 case Hexagon::C2_cmpgt: 1573 case Hexagon::C2_cmpgtp: 1574 case Hexagon::C2_cmpgtu: 1575 case Hexagon::C2_cmpgtup: 1576 case Hexagon::A4_cmpbeq: 1577 case Hexagon::A4_cmpbgt: 1578 case Hexagon::A4_cmpbgtu: 1579 case Hexagon::A4_cmpheq: 1580 case Hexagon::A4_cmphgt: 1581 case Hexagon::A4_cmphgtu: 1582 case Hexagon::C4_cmpneq: 1583 case Hexagon::C4_cmplte: 1584 case Hexagon::C4_cmplteu: 1585 SrcReg2 = MI.getOperand(2).getReg(); 1586 return true; 1587 1588 case Hexagon::C2_cmpeqi: 1589 case Hexagon::C2_cmpgtui: 1590 case Hexagon::C2_cmpgti: 1591 case Hexagon::C4_cmpneqi: 1592 case Hexagon::C4_cmplteui: 1593 case Hexagon::C4_cmpltei: 1594 case Hexagon::A4_cmpbeqi: 1595 case Hexagon::A4_cmpbgti: 1596 case Hexagon::A4_cmpbgtui: 1597 case Hexagon::A4_cmpheqi: 1598 case Hexagon::A4_cmphgti: 1599 case Hexagon::A4_cmphgtui: 1600 SrcReg2 = 0; 1601 Value = MI.getOperand(2).getImm(); 1602 return true; 1603 } 1604 1605 return false; 1606 } 1607 1608 unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1609 const MachineInstr &MI, 1610 unsigned *PredCost) const { 1611 return getInstrTimingClassLatency(ItinData, &MI); 1612 } 1613 1614 1615 DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState( 1616 const TargetSubtargetInfo &STI) const { 1617 const InstrItineraryData *II = STI.getInstrItineraryData(); 1618 return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II); 1619 } 1620 1621 1622 // Inspired by this pair: 1623 // %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0] 1624 // S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1] 1625 // Currently AA considers the addresses in these instructions to be aliasing. 1626 bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint( 1627 MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const { 1628 int OffsetA = 0, OffsetB = 0; 1629 unsigned SizeA = 0, SizeB = 0; 1630 1631 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() || 1632 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 1633 return false; 1634 1635 // Instructions that are pure loads, not loads and stores like memops are not 1636 // dependent. 1637 if (MIa.mayLoad() && !isMemOp(&MIa) && MIb.mayLoad() && !isMemOp(&MIb)) 1638 return true; 1639 1640 // Get base, offset, and access size in MIa. 1641 unsigned BaseRegA = getBaseAndOffset(&MIa, OffsetA, SizeA); 1642 if (!BaseRegA || !SizeA) 1643 return false; 1644 1645 // Get base, offset, and access size in MIb. 1646 unsigned BaseRegB = getBaseAndOffset(&MIb, OffsetB, SizeB); 1647 if (!BaseRegB || !SizeB) 1648 return false; 1649 1650 if (BaseRegA != BaseRegB) 1651 return false; 1652 1653 // This is a mem access with the same base register and known offsets from it. 1654 // Reason about it. 1655 if (OffsetA > OffsetB) { 1656 uint64_t offDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB); 1657 return (SizeB <= offDiff); 1658 } else if (OffsetA < OffsetB) { 1659 uint64_t offDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA); 1660 return (SizeA <= offDiff); 1661 } 1662 1663 return false; 1664 } 1665 1666 1667 /// If the instruction is an increment of a constant value, return the amount. 1668 bool HexagonInstrInfo::getIncrementValue(const MachineInstr *MI, 1669 int &Value) const { 1670 if (isPostIncrement(MI)) { 1671 unsigned AccessSize; 1672 return getBaseAndOffset(MI, Value, AccessSize); 1673 } 1674 if (MI->getOpcode() == Hexagon::A2_addi) { 1675 Value = MI->getOperand(2).getImm(); 1676 return true; 1677 } 1678 1679 return false; 1680 } 1681 1682 1683 unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { 1684 MachineRegisterInfo &MRI = MF->getRegInfo(); 1685 const TargetRegisterClass *TRC; 1686 if (VT == MVT::i1) { 1687 TRC = &Hexagon::PredRegsRegClass; 1688 } else if (VT == MVT::i32 || VT == MVT::f32) { 1689 TRC = &Hexagon::IntRegsRegClass; 1690 } else if (VT == MVT::i64 || VT == MVT::f64) { 1691 TRC = &Hexagon::DoubleRegsRegClass; 1692 } else { 1693 llvm_unreachable("Cannot handle this register class"); 1694 } 1695 1696 unsigned NewReg = MRI.createVirtualRegister(TRC); 1697 return NewReg; 1698 } 1699 1700 1701 bool HexagonInstrInfo::isAbsoluteSet(const MachineInstr* MI) const { 1702 return (getAddrMode(MI) == HexagonII::AbsoluteSet); 1703 } 1704 1705 1706 bool HexagonInstrInfo::isAccumulator(const MachineInstr *MI) const { 1707 const uint64_t F = MI->getDesc().TSFlags; 1708 return((F >> HexagonII::AccumulatorPos) & HexagonII::AccumulatorMask); 1709 } 1710 1711 1712 bool HexagonInstrInfo::isComplex(const MachineInstr *MI) const { 1713 const MachineFunction *MF = MI->getParent()->getParent(); 1714 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 1715 const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; 1716 1717 if (!(isTC1(MI)) 1718 && !(QII->isTC2Early(MI)) 1719 && !(MI->getDesc().mayLoad()) 1720 && !(MI->getDesc().mayStore()) 1721 && (MI->getDesc().getOpcode() != Hexagon::S2_allocframe) 1722 && (MI->getDesc().getOpcode() != Hexagon::L2_deallocframe) 1723 && !(QII->isMemOp(MI)) 1724 && !(MI->isBranch()) 1725 && !(MI->isReturn()) 1726 && !MI->isCall()) 1727 return true; 1728 1729 return false; 1730 } 1731 1732 1733 // Return true if the instruction is a compund branch instruction. 1734 bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr *MI) const { 1735 return (getType(MI) == HexagonII::TypeCOMPOUND && MI->isBranch()); 1736 } 1737 1738 1739 bool HexagonInstrInfo::isCondInst(const MachineInstr *MI) const { 1740 return (MI->isBranch() && isPredicated(*MI)) || 1741 isConditionalTransfer(MI) || 1742 isConditionalALU32(MI) || 1743 isConditionalLoad(MI) || 1744 // Predicated stores which don't have a .new on any operands. 1745 (MI->mayStore() && isPredicated(*MI) && !isNewValueStore(MI) && 1746 !isPredicatedNew(*MI)); 1747 } 1748 1749 1750 bool HexagonInstrInfo::isConditionalALU32(const MachineInstr* MI) const { 1751 switch (MI->getOpcode()) { 1752 case Hexagon::A2_paddf: 1753 case Hexagon::A2_paddfnew: 1754 case Hexagon::A2_paddif: 1755 case Hexagon::A2_paddifnew: 1756 case Hexagon::A2_paddit: 1757 case Hexagon::A2_padditnew: 1758 case Hexagon::A2_paddt: 1759 case Hexagon::A2_paddtnew: 1760 case Hexagon::A2_pandf: 1761 case Hexagon::A2_pandfnew: 1762 case Hexagon::A2_pandt: 1763 case Hexagon::A2_pandtnew: 1764 case Hexagon::A2_porf: 1765 case Hexagon::A2_porfnew: 1766 case Hexagon::A2_port: 1767 case Hexagon::A2_portnew: 1768 case Hexagon::A2_psubf: 1769 case Hexagon::A2_psubfnew: 1770 case Hexagon::A2_psubt: 1771 case Hexagon::A2_psubtnew: 1772 case Hexagon::A2_pxorf: 1773 case Hexagon::A2_pxorfnew: 1774 case Hexagon::A2_pxort: 1775 case Hexagon::A2_pxortnew: 1776 case Hexagon::A4_paslhf: 1777 case Hexagon::A4_paslhfnew: 1778 case Hexagon::A4_paslht: 1779 case Hexagon::A4_paslhtnew: 1780 case Hexagon::A4_pasrhf: 1781 case Hexagon::A4_pasrhfnew: 1782 case Hexagon::A4_pasrht: 1783 case Hexagon::A4_pasrhtnew: 1784 case Hexagon::A4_psxtbf: 1785 case Hexagon::A4_psxtbfnew: 1786 case Hexagon::A4_psxtbt: 1787 case Hexagon::A4_psxtbtnew: 1788 case Hexagon::A4_psxthf: 1789 case Hexagon::A4_psxthfnew: 1790 case Hexagon::A4_psxtht: 1791 case Hexagon::A4_psxthtnew: 1792 case Hexagon::A4_pzxtbf: 1793 case Hexagon::A4_pzxtbfnew: 1794 case Hexagon::A4_pzxtbt: 1795 case Hexagon::A4_pzxtbtnew: 1796 case Hexagon::A4_pzxthf: 1797 case Hexagon::A4_pzxthfnew: 1798 case Hexagon::A4_pzxtht: 1799 case Hexagon::A4_pzxthtnew: 1800 case Hexagon::C2_ccombinewf: 1801 case Hexagon::C2_ccombinewt: 1802 return true; 1803 } 1804 return false; 1805 } 1806 1807 1808 // FIXME - Function name and it's functionality don't match. 1809 // It should be renamed to hasPredNewOpcode() 1810 bool HexagonInstrInfo::isConditionalLoad(const MachineInstr* MI) const { 1811 if (!MI->getDesc().mayLoad() || !isPredicated(*MI)) 1812 return false; 1813 1814 int PNewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode()); 1815 // Instruction with valid predicated-new opcode can be promoted to .new. 1816 return PNewOpcode >= 0; 1817 } 1818 1819 1820 // Returns true if an instruction is a conditional store. 1821 // 1822 // Note: It doesn't include conditional new-value stores as they can't be 1823 // converted to .new predicate. 1824 bool HexagonInstrInfo::isConditionalStore(const MachineInstr* MI) const { 1825 switch (MI->getOpcode()) { 1826 default: return false; 1827 case Hexagon::S4_storeirbt_io: 1828 case Hexagon::S4_storeirbf_io: 1829 case Hexagon::S4_pstorerbt_rr: 1830 case Hexagon::S4_pstorerbf_rr: 1831 case Hexagon::S2_pstorerbt_io: 1832 case Hexagon::S2_pstorerbf_io: 1833 case Hexagon::S2_pstorerbt_pi: 1834 case Hexagon::S2_pstorerbf_pi: 1835 case Hexagon::S2_pstorerdt_io: 1836 case Hexagon::S2_pstorerdf_io: 1837 case Hexagon::S4_pstorerdt_rr: 1838 case Hexagon::S4_pstorerdf_rr: 1839 case Hexagon::S2_pstorerdt_pi: 1840 case Hexagon::S2_pstorerdf_pi: 1841 case Hexagon::S2_pstorerht_io: 1842 case Hexagon::S2_pstorerhf_io: 1843 case Hexagon::S4_storeirht_io: 1844 case Hexagon::S4_storeirhf_io: 1845 case Hexagon::S4_pstorerht_rr: 1846 case Hexagon::S4_pstorerhf_rr: 1847 case Hexagon::S2_pstorerht_pi: 1848 case Hexagon::S2_pstorerhf_pi: 1849 case Hexagon::S2_pstorerit_io: 1850 case Hexagon::S2_pstorerif_io: 1851 case Hexagon::S4_storeirit_io: 1852 case Hexagon::S4_storeirif_io: 1853 case Hexagon::S4_pstorerit_rr: 1854 case Hexagon::S4_pstorerif_rr: 1855 case Hexagon::S2_pstorerit_pi: 1856 case Hexagon::S2_pstorerif_pi: 1857 1858 // V4 global address store before promoting to dot new. 1859 case Hexagon::S4_pstorerdt_abs: 1860 case Hexagon::S4_pstorerdf_abs: 1861 case Hexagon::S4_pstorerbt_abs: 1862 case Hexagon::S4_pstorerbf_abs: 1863 case Hexagon::S4_pstorerht_abs: 1864 case Hexagon::S4_pstorerhf_abs: 1865 case Hexagon::S4_pstorerit_abs: 1866 case Hexagon::S4_pstorerif_abs: 1867 return true; 1868 1869 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded 1870 // from the "Conditional Store" list. Because a predicated new value store 1871 // would NOT be promoted to a double dot new store. 1872 // This function returns yes for those stores that are predicated but not 1873 // yet promoted to predicate dot new instructions. 1874 } 1875 } 1876 1877 1878 bool HexagonInstrInfo::isConditionalTransfer(const MachineInstr *MI) const { 1879 switch (MI->getOpcode()) { 1880 case Hexagon::A2_tfrt: 1881 case Hexagon::A2_tfrf: 1882 case Hexagon::C2_cmoveit: 1883 case Hexagon::C2_cmoveif: 1884 case Hexagon::A2_tfrtnew: 1885 case Hexagon::A2_tfrfnew: 1886 case Hexagon::C2_cmovenewit: 1887 case Hexagon::C2_cmovenewif: 1888 case Hexagon::A2_tfrpt: 1889 case Hexagon::A2_tfrpf: 1890 return true; 1891 1892 default: 1893 return false; 1894 } 1895 return false; 1896 } 1897 1898 1899 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle 1900 // isFPImm and later getFPImm as well. 1901 bool HexagonInstrInfo::isConstExtended(const MachineInstr *MI) const { 1902 const uint64_t F = MI->getDesc().TSFlags; 1903 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask; 1904 if (isExtended) // Instruction must be extended. 1905 return true; 1906 1907 unsigned isExtendable = 1908 (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask; 1909 if (!isExtendable) 1910 return false; 1911 1912 if (MI->isCall()) 1913 return false; 1914 1915 short ExtOpNum = getCExtOpNum(MI); 1916 const MachineOperand &MO = MI->getOperand(ExtOpNum); 1917 // Use MO operand flags to determine if MO 1918 // has the HMOTF_ConstExtended flag set. 1919 if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended) 1920 return true; 1921 // If this is a Machine BB address we are talking about, and it is 1922 // not marked as extended, say so. 1923 if (MO.isMBB()) 1924 return false; 1925 1926 // We could be using an instruction with an extendable immediate and shoehorn 1927 // a global address into it. If it is a global address it will be constant 1928 // extended. We do this for COMBINE. 1929 // We currently only handle isGlobal() because it is the only kind of 1930 // object we are going to end up with here for now. 1931 // In the future we probably should add isSymbol(), etc. 1932 if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() || 1933 MO.isJTI() || MO.isCPI()) 1934 return true; 1935 1936 // If the extendable operand is not 'Immediate' type, the instruction should 1937 // have 'isExtended' flag set. 1938 assert(MO.isImm() && "Extendable operand must be Immediate type"); 1939 1940 int MinValue = getMinValue(MI); 1941 int MaxValue = getMaxValue(MI); 1942 int ImmValue = MO.getImm(); 1943 1944 return (ImmValue < MinValue || ImmValue > MaxValue); 1945 } 1946 1947 1948 bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const { 1949 switch (MI->getOpcode()) { 1950 case Hexagon::L4_return : 1951 case Hexagon::L4_return_t : 1952 case Hexagon::L4_return_f : 1953 case Hexagon::L4_return_tnew_pnt : 1954 case Hexagon::L4_return_fnew_pnt : 1955 case Hexagon::L4_return_tnew_pt : 1956 case Hexagon::L4_return_fnew_pt : 1957 return true; 1958 } 1959 return false; 1960 } 1961 1962 1963 // Return true when ConsMI uses a register defined by ProdMI. 1964 bool HexagonInstrInfo::isDependent(const MachineInstr *ProdMI, 1965 const MachineInstr *ConsMI) const { 1966 const MCInstrDesc &ProdMCID = ProdMI->getDesc(); 1967 if (!ProdMCID.getNumDefs()) 1968 return false; 1969 1970 auto &HRI = getRegisterInfo(); 1971 1972 SmallVector<unsigned, 4> DefsA; 1973 SmallVector<unsigned, 4> DefsB; 1974 SmallVector<unsigned, 8> UsesA; 1975 SmallVector<unsigned, 8> UsesB; 1976 1977 parseOperands(ProdMI, DefsA, UsesA); 1978 parseOperands(ConsMI, DefsB, UsesB); 1979 1980 for (auto &RegA : DefsA) 1981 for (auto &RegB : UsesB) { 1982 // True data dependency. 1983 if (RegA == RegB) 1984 return true; 1985 1986 if (Hexagon::DoubleRegsRegClass.contains(RegA)) 1987 for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs) 1988 if (RegB == *SubRegs) 1989 return true; 1990 1991 if (Hexagon::DoubleRegsRegClass.contains(RegB)) 1992 for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs) 1993 if (RegA == *SubRegs) 1994 return true; 1995 } 1996 1997 return false; 1998 } 1999 2000 2001 // Returns true if the instruction is alread a .cur. 2002 bool HexagonInstrInfo::isDotCurInst(const MachineInstr* MI) const { 2003 switch (MI->getOpcode()) { 2004 case Hexagon::V6_vL32b_cur_pi: 2005 case Hexagon::V6_vL32b_cur_ai: 2006 case Hexagon::V6_vL32b_cur_pi_128B: 2007 case Hexagon::V6_vL32b_cur_ai_128B: 2008 return true; 2009 } 2010 return false; 2011 } 2012 2013 2014 // Returns true, if any one of the operands is a dot new 2015 // insn, whether it is predicated dot new or register dot new. 2016 bool HexagonInstrInfo::isDotNewInst(const MachineInstr* MI) const { 2017 if (isNewValueInst(MI) || (isPredicated(*MI) && isPredicatedNew(*MI))) 2018 return true; 2019 2020 return false; 2021 } 2022 2023 2024 /// Symmetrical. See if these two instructions are fit for duplex pair. 2025 bool HexagonInstrInfo::isDuplexPair(const MachineInstr *MIa, 2026 const MachineInstr *MIb) const { 2027 HexagonII::SubInstructionGroup MIaG = getDuplexCandidateGroup(MIa); 2028 HexagonII::SubInstructionGroup MIbG = getDuplexCandidateGroup(MIb); 2029 return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG)); 2030 } 2031 2032 2033 bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr *MI) const { 2034 if (!MI) 2035 return false; 2036 2037 if (MI->mayLoad() || MI->mayStore() || MI->isCompare()) 2038 return true; 2039 2040 // Multiply 2041 unsigned SchedClass = MI->getDesc().getSchedClass(); 2042 if (SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23) 2043 return true; 2044 return false; 2045 } 2046 2047 2048 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const { 2049 return (Opcode == Hexagon::ENDLOOP0 || 2050 Opcode == Hexagon::ENDLOOP1); 2051 } 2052 2053 2054 bool HexagonInstrInfo::isExpr(unsigned OpType) const { 2055 switch(OpType) { 2056 case MachineOperand::MO_MachineBasicBlock: 2057 case MachineOperand::MO_GlobalAddress: 2058 case MachineOperand::MO_ExternalSymbol: 2059 case MachineOperand::MO_JumpTableIndex: 2060 case MachineOperand::MO_ConstantPoolIndex: 2061 case MachineOperand::MO_BlockAddress: 2062 return true; 2063 default: 2064 return false; 2065 } 2066 } 2067 2068 2069 bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const { 2070 const MCInstrDesc &MID = MI->getDesc(); 2071 const uint64_t F = MID.TSFlags; 2072 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask) 2073 return true; 2074 2075 // TODO: This is largely obsolete now. Will need to be removed 2076 // in consecutive patches. 2077 switch(MI->getOpcode()) { 2078 // TFR_FI Remains a special case. 2079 case Hexagon::TFR_FI: 2080 return true; 2081 default: 2082 return false; 2083 } 2084 return false; 2085 } 2086 2087 2088 // This returns true in two cases: 2089 // - The OP code itself indicates that this is an extended instruction. 2090 // - One of MOs has been marked with HMOTF_ConstExtended flag. 2091 bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const { 2092 // First check if this is permanently extended op code. 2093 const uint64_t F = MI->getDesc().TSFlags; 2094 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask) 2095 return true; 2096 // Use MO operand flags to determine if one of MI's operands 2097 // has HMOTF_ConstExtended flag set. 2098 for (MachineInstr::const_mop_iterator I = MI->operands_begin(), 2099 E = MI->operands_end(); I != E; ++I) { 2100 if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended) 2101 return true; 2102 } 2103 return false; 2104 } 2105 2106 2107 bool HexagonInstrInfo::isFloat(const MachineInstr *MI) const { 2108 unsigned Opcode = MI->getOpcode(); 2109 const uint64_t F = get(Opcode).TSFlags; 2110 return (F >> HexagonII::FPPos) & HexagonII::FPMask; 2111 } 2112 2113 2114 // No V60 HVX VMEM with A_INDIRECT. 2115 bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr *I, 2116 const MachineInstr *J) const { 2117 if (!isV60VectorInstruction(I)) 2118 return false; 2119 if (!I->mayLoad() && !I->mayStore()) 2120 return false; 2121 return J->isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J); 2122 } 2123 2124 2125 bool HexagonInstrInfo::isIndirectCall(const MachineInstr *MI) const { 2126 switch (MI->getOpcode()) { 2127 case Hexagon::J2_callr : 2128 case Hexagon::J2_callrf : 2129 case Hexagon::J2_callrt : 2130 return true; 2131 } 2132 return false; 2133 } 2134 2135 2136 bool HexagonInstrInfo::isIndirectL4Return(const MachineInstr *MI) const { 2137 switch (MI->getOpcode()) { 2138 case Hexagon::L4_return : 2139 case Hexagon::L4_return_t : 2140 case Hexagon::L4_return_f : 2141 case Hexagon::L4_return_fnew_pnt : 2142 case Hexagon::L4_return_fnew_pt : 2143 case Hexagon::L4_return_tnew_pnt : 2144 case Hexagon::L4_return_tnew_pt : 2145 return true; 2146 } 2147 return false; 2148 } 2149 2150 2151 bool HexagonInstrInfo::isJumpR(const MachineInstr *MI) const { 2152 switch (MI->getOpcode()) { 2153 case Hexagon::J2_jumpr : 2154 case Hexagon::J2_jumprt : 2155 case Hexagon::J2_jumprf : 2156 case Hexagon::J2_jumprtnewpt : 2157 case Hexagon::J2_jumprfnewpt : 2158 case Hexagon::J2_jumprtnew : 2159 case Hexagon::J2_jumprfnew : 2160 return true; 2161 } 2162 return false; 2163 } 2164 2165 2166 // Return true if a given MI can accomodate given offset. 2167 // Use abs estimate as oppose to the exact number. 2168 // TODO: This will need to be changed to use MC level 2169 // definition of instruction extendable field size. 2170 bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr *MI, 2171 unsigned offset) const { 2172 // This selection of jump instructions matches to that what 2173 // AnalyzeBranch can parse, plus NVJ. 2174 if (isNewValueJump(MI)) // r9:2 2175 return isInt<11>(offset); 2176 2177 switch (MI->getOpcode()) { 2178 // Still missing Jump to address condition on register value. 2179 default: 2180 return false; 2181 case Hexagon::J2_jump: // bits<24> dst; // r22:2 2182 case Hexagon::J2_call: 2183 case Hexagon::CALLv3nr: 2184 return isInt<24>(offset); 2185 case Hexagon::J2_jumpt: //bits<17> dst; // r15:2 2186 case Hexagon::J2_jumpf: 2187 case Hexagon::J2_jumptnew: 2188 case Hexagon::J2_jumptnewpt: 2189 case Hexagon::J2_jumpfnew: 2190 case Hexagon::J2_jumpfnewpt: 2191 case Hexagon::J2_callt: 2192 case Hexagon::J2_callf: 2193 return isInt<17>(offset); 2194 case Hexagon::J2_loop0i: 2195 case Hexagon::J2_loop0iext: 2196 case Hexagon::J2_loop0r: 2197 case Hexagon::J2_loop0rext: 2198 case Hexagon::J2_loop1i: 2199 case Hexagon::J2_loop1iext: 2200 case Hexagon::J2_loop1r: 2201 case Hexagon::J2_loop1rext: 2202 return isInt<9>(offset); 2203 // TODO: Add all the compound branches here. Can we do this in Relation model? 2204 case Hexagon::J4_cmpeqi_tp0_jump_nt: 2205 case Hexagon::J4_cmpeqi_tp1_jump_nt: 2206 return isInt<11>(offset); 2207 } 2208 } 2209 2210 2211 bool HexagonInstrInfo::isLateInstrFeedsEarlyInstr(const MachineInstr *LRMI, 2212 const MachineInstr *ESMI) const { 2213 if (!LRMI || !ESMI) 2214 return false; 2215 2216 bool isLate = isLateResultInstr(LRMI); 2217 bool isEarly = isEarlySourceInstr(ESMI); 2218 2219 DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- ")); 2220 DEBUG(LRMI->dump()); 2221 DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- ")); 2222 DEBUG(ESMI->dump()); 2223 2224 if (isLate && isEarly) { 2225 DEBUG(dbgs() << "++Is Late Result feeding Early Source\n"); 2226 return true; 2227 } 2228 2229 return false; 2230 } 2231 2232 2233 bool HexagonInstrInfo::isLateResultInstr(const MachineInstr *MI) const { 2234 if (!MI) 2235 return false; 2236 2237 switch (MI->getOpcode()) { 2238 case TargetOpcode::EXTRACT_SUBREG: 2239 case TargetOpcode::INSERT_SUBREG: 2240 case TargetOpcode::SUBREG_TO_REG: 2241 case TargetOpcode::REG_SEQUENCE: 2242 case TargetOpcode::IMPLICIT_DEF: 2243 case TargetOpcode::COPY: 2244 case TargetOpcode::INLINEASM: 2245 case TargetOpcode::PHI: 2246 return false; 2247 default: 2248 break; 2249 } 2250 2251 unsigned SchedClass = MI->getDesc().getSchedClass(); 2252 2253 switch (SchedClass) { 2254 case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123: 2255 case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123: 2256 case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123: 2257 case Hexagon::Sched::ALU64_tc_1_SLOT23: 2258 case Hexagon::Sched::EXTENDER_tc_1_SLOT0123: 2259 case Hexagon::Sched::S_2op_tc_1_SLOT23: 2260 case Hexagon::Sched::S_3op_tc_1_SLOT23: 2261 case Hexagon::Sched::V2LDST_tc_ld_SLOT01: 2262 case Hexagon::Sched::V2LDST_tc_st_SLOT0: 2263 case Hexagon::Sched::V2LDST_tc_st_SLOT01: 2264 case Hexagon::Sched::V4LDST_tc_ld_SLOT01: 2265 case Hexagon::Sched::V4LDST_tc_st_SLOT0: 2266 case Hexagon::Sched::V4LDST_tc_st_SLOT01: 2267 return false; 2268 } 2269 return true; 2270 } 2271 2272 2273 bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr *MI) const { 2274 if (!MI) 2275 return false; 2276 2277 // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply 2278 // resource, but all operands can be received late like an ALU instruction. 2279 return MI->getDesc().getSchedClass() == Hexagon::Sched::CVI_VX_LATE; 2280 } 2281 2282 2283 bool HexagonInstrInfo::isLoopN(const MachineInstr *MI) const { 2284 unsigned Opcode = MI->getOpcode(); 2285 return Opcode == Hexagon::J2_loop0i || 2286 Opcode == Hexagon::J2_loop0r || 2287 Opcode == Hexagon::J2_loop0iext || 2288 Opcode == Hexagon::J2_loop0rext || 2289 Opcode == Hexagon::J2_loop1i || 2290 Opcode == Hexagon::J2_loop1r || 2291 Opcode == Hexagon::J2_loop1iext || 2292 Opcode == Hexagon::J2_loop1rext; 2293 } 2294 2295 2296 bool HexagonInstrInfo::isMemOp(const MachineInstr *MI) const { 2297 switch (MI->getOpcode()) { 2298 default: return false; 2299 case Hexagon::L4_iadd_memopw_io : 2300 case Hexagon::L4_isub_memopw_io : 2301 case Hexagon::L4_add_memopw_io : 2302 case Hexagon::L4_sub_memopw_io : 2303 case Hexagon::L4_and_memopw_io : 2304 case Hexagon::L4_or_memopw_io : 2305 case Hexagon::L4_iadd_memoph_io : 2306 case Hexagon::L4_isub_memoph_io : 2307 case Hexagon::L4_add_memoph_io : 2308 case Hexagon::L4_sub_memoph_io : 2309 case Hexagon::L4_and_memoph_io : 2310 case Hexagon::L4_or_memoph_io : 2311 case Hexagon::L4_iadd_memopb_io : 2312 case Hexagon::L4_isub_memopb_io : 2313 case Hexagon::L4_add_memopb_io : 2314 case Hexagon::L4_sub_memopb_io : 2315 case Hexagon::L4_and_memopb_io : 2316 case Hexagon::L4_or_memopb_io : 2317 case Hexagon::L4_ior_memopb_io: 2318 case Hexagon::L4_ior_memoph_io: 2319 case Hexagon::L4_ior_memopw_io: 2320 case Hexagon::L4_iand_memopb_io: 2321 case Hexagon::L4_iand_memoph_io: 2322 case Hexagon::L4_iand_memopw_io: 2323 return true; 2324 } 2325 return false; 2326 } 2327 2328 2329 bool HexagonInstrInfo::isNewValue(const MachineInstr* MI) const { 2330 const uint64_t F = MI->getDesc().TSFlags; 2331 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask; 2332 } 2333 2334 2335 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const { 2336 const uint64_t F = get(Opcode).TSFlags; 2337 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask; 2338 } 2339 2340 2341 bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const { 2342 return isNewValueJump(MI) || isNewValueStore(MI); 2343 } 2344 2345 2346 bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const { 2347 return isNewValue(MI) && MI->isBranch(); 2348 } 2349 2350 2351 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const { 2352 return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode); 2353 } 2354 2355 2356 bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const { 2357 const uint64_t F = MI->getDesc().TSFlags; 2358 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask; 2359 } 2360 2361 2362 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const { 2363 const uint64_t F = get(Opcode).TSFlags; 2364 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask; 2365 } 2366 2367 2368 // Returns true if a particular operand is extendable for an instruction. 2369 bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI, 2370 unsigned OperandNum) const { 2371 const uint64_t F = MI->getDesc().TSFlags; 2372 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask) 2373 == OperandNum; 2374 } 2375 2376 2377 bool HexagonInstrInfo::isPostIncrement(const MachineInstr* MI) const { 2378 return getAddrMode(MI) == HexagonII::PostInc; 2379 } 2380 2381 2382 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr &MI) const { 2383 const uint64_t F = MI.getDesc().TSFlags; 2384 assert(isPredicated(MI)); 2385 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask; 2386 } 2387 2388 2389 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const { 2390 const uint64_t F = get(Opcode).TSFlags; 2391 assert(isPredicated(Opcode)); 2392 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask; 2393 } 2394 2395 2396 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr &MI) const { 2397 const uint64_t F = MI.getDesc().TSFlags; 2398 return !((F >> HexagonII::PredicatedFalsePos) & 2399 HexagonII::PredicatedFalseMask); 2400 } 2401 2402 2403 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const { 2404 const uint64_t F = get(Opcode).TSFlags; 2405 // Make sure that the instruction is predicated. 2406 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask); 2407 return !((F >> HexagonII::PredicatedFalsePos) & 2408 HexagonII::PredicatedFalseMask); 2409 } 2410 2411 2412 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const { 2413 const uint64_t F = get(Opcode).TSFlags; 2414 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask; 2415 } 2416 2417 2418 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const { 2419 const uint64_t F = get(Opcode).TSFlags; 2420 return ~(F >> HexagonII::PredicateLatePos) & HexagonII::PredicateLateMask; 2421 } 2422 2423 2424 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const { 2425 const uint64_t F = get(Opcode).TSFlags; 2426 assert(get(Opcode).isBranch() && 2427 (isPredicatedNew(Opcode) || isNewValue(Opcode))); 2428 return (F >> HexagonII::TakenPos) & HexagonII::TakenMask; 2429 } 2430 2431 2432 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const { 2433 return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 || 2434 MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT || 2435 MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC || 2436 MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC; 2437 } 2438 2439 bool HexagonInstrInfo::isSignExtendingLoad(const MachineInstr &MI) const { 2440 switch (MI.getOpcode()) { 2441 // Byte 2442 case Hexagon::L2_loadrb_io: 2443 case Hexagon::L4_loadrb_ur: 2444 case Hexagon::L4_loadrb_ap: 2445 case Hexagon::L2_loadrb_pr: 2446 case Hexagon::L2_loadrb_pbr: 2447 case Hexagon::L2_loadrb_pi: 2448 case Hexagon::L2_loadrb_pci: 2449 case Hexagon::L2_loadrb_pcr: 2450 case Hexagon::L2_loadbsw2_io: 2451 case Hexagon::L4_loadbsw2_ur: 2452 case Hexagon::L4_loadbsw2_ap: 2453 case Hexagon::L2_loadbsw2_pr: 2454 case Hexagon::L2_loadbsw2_pbr: 2455 case Hexagon::L2_loadbsw2_pi: 2456 case Hexagon::L2_loadbsw2_pci: 2457 case Hexagon::L2_loadbsw2_pcr: 2458 case Hexagon::L2_loadbsw4_io: 2459 case Hexagon::L4_loadbsw4_ur: 2460 case Hexagon::L4_loadbsw4_ap: 2461 case Hexagon::L2_loadbsw4_pr: 2462 case Hexagon::L2_loadbsw4_pbr: 2463 case Hexagon::L2_loadbsw4_pi: 2464 case Hexagon::L2_loadbsw4_pci: 2465 case Hexagon::L2_loadbsw4_pcr: 2466 case Hexagon::L4_loadrb_rr: 2467 case Hexagon::L2_ploadrbt_io: 2468 case Hexagon::L2_ploadrbt_pi: 2469 case Hexagon::L2_ploadrbf_io: 2470 case Hexagon::L2_ploadrbf_pi: 2471 case Hexagon::L2_ploadrbtnew_io: 2472 case Hexagon::L2_ploadrbfnew_io: 2473 case Hexagon::L4_ploadrbt_rr: 2474 case Hexagon::L4_ploadrbf_rr: 2475 case Hexagon::L4_ploadrbtnew_rr: 2476 case Hexagon::L4_ploadrbfnew_rr: 2477 case Hexagon::L2_ploadrbtnew_pi: 2478 case Hexagon::L2_ploadrbfnew_pi: 2479 case Hexagon::L4_ploadrbt_abs: 2480 case Hexagon::L4_ploadrbf_abs: 2481 case Hexagon::L4_ploadrbtnew_abs: 2482 case Hexagon::L4_ploadrbfnew_abs: 2483 case Hexagon::L2_loadrbgp: 2484 // Half 2485 case Hexagon::L2_loadrh_io: 2486 case Hexagon::L4_loadrh_ur: 2487 case Hexagon::L4_loadrh_ap: 2488 case Hexagon::L2_loadrh_pr: 2489 case Hexagon::L2_loadrh_pbr: 2490 case Hexagon::L2_loadrh_pi: 2491 case Hexagon::L2_loadrh_pci: 2492 case Hexagon::L2_loadrh_pcr: 2493 case Hexagon::L4_loadrh_rr: 2494 case Hexagon::L2_ploadrht_io: 2495 case Hexagon::L2_ploadrht_pi: 2496 case Hexagon::L2_ploadrhf_io: 2497 case Hexagon::L2_ploadrhf_pi: 2498 case Hexagon::L2_ploadrhtnew_io: 2499 case Hexagon::L2_ploadrhfnew_io: 2500 case Hexagon::L4_ploadrht_rr: 2501 case Hexagon::L4_ploadrhf_rr: 2502 case Hexagon::L4_ploadrhtnew_rr: 2503 case Hexagon::L4_ploadrhfnew_rr: 2504 case Hexagon::L2_ploadrhtnew_pi: 2505 case Hexagon::L2_ploadrhfnew_pi: 2506 case Hexagon::L4_ploadrht_abs: 2507 case Hexagon::L4_ploadrhf_abs: 2508 case Hexagon::L4_ploadrhtnew_abs: 2509 case Hexagon::L4_ploadrhfnew_abs: 2510 case Hexagon::L2_loadrhgp: 2511 return true; 2512 default: 2513 return false; 2514 } 2515 } 2516 2517 2518 bool HexagonInstrInfo::isSolo(const MachineInstr* MI) const { 2519 const uint64_t F = MI->getDesc().TSFlags; 2520 return (F >> HexagonII::SoloPos) & HexagonII::SoloMask; 2521 } 2522 2523 2524 bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr *MI) const { 2525 switch (MI->getOpcode()) { 2526 case Hexagon::STriw_pred : 2527 case Hexagon::LDriw_pred : 2528 return true; 2529 default: 2530 return false; 2531 } 2532 } 2533 2534 2535 bool HexagonInstrInfo::isTailCall(const MachineInstr *MI) const { 2536 if (!MI->isBranch()) 2537 return false; 2538 2539 for (auto &Op : MI->operands()) 2540 if (Op.isGlobal() || Op.isSymbol()) 2541 return true; 2542 return false; 2543 } 2544 2545 2546 // Returns true when SU has a timing class TC1. 2547 bool HexagonInstrInfo::isTC1(const MachineInstr *MI) const { 2548 unsigned SchedClass = MI->getDesc().getSchedClass(); 2549 switch (SchedClass) { 2550 case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123: 2551 case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123: 2552 case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123: 2553 case Hexagon::Sched::ALU64_tc_1_SLOT23: 2554 case Hexagon::Sched::EXTENDER_tc_1_SLOT0123: 2555 //case Hexagon::Sched::M_tc_1_SLOT23: 2556 case Hexagon::Sched::S_2op_tc_1_SLOT23: 2557 case Hexagon::Sched::S_3op_tc_1_SLOT23: 2558 return true; 2559 2560 default: 2561 return false; 2562 } 2563 } 2564 2565 2566 bool HexagonInstrInfo::isTC2(const MachineInstr *MI) const { 2567 unsigned SchedClass = MI->getDesc().getSchedClass(); 2568 switch (SchedClass) { 2569 case Hexagon::Sched::ALU32_3op_tc_2_SLOT0123: 2570 case Hexagon::Sched::ALU64_tc_2_SLOT23: 2571 case Hexagon::Sched::CR_tc_2_SLOT3: 2572 case Hexagon::Sched::M_tc_2_SLOT23: 2573 case Hexagon::Sched::S_2op_tc_2_SLOT23: 2574 case Hexagon::Sched::S_3op_tc_2_SLOT23: 2575 return true; 2576 2577 default: 2578 return false; 2579 } 2580 } 2581 2582 2583 bool HexagonInstrInfo::isTC2Early(const MachineInstr *MI) const { 2584 unsigned SchedClass = MI->getDesc().getSchedClass(); 2585 switch (SchedClass) { 2586 case Hexagon::Sched::ALU32_2op_tc_2early_SLOT0123: 2587 case Hexagon::Sched::ALU32_3op_tc_2early_SLOT0123: 2588 case Hexagon::Sched::ALU64_tc_2early_SLOT23: 2589 case Hexagon::Sched::CR_tc_2early_SLOT23: 2590 case Hexagon::Sched::CR_tc_2early_SLOT3: 2591 case Hexagon::Sched::J_tc_2early_SLOT0123: 2592 case Hexagon::Sched::J_tc_2early_SLOT2: 2593 case Hexagon::Sched::J_tc_2early_SLOT23: 2594 case Hexagon::Sched::S_2op_tc_2early_SLOT23: 2595 case Hexagon::Sched::S_3op_tc_2early_SLOT23: 2596 return true; 2597 2598 default: 2599 return false; 2600 } 2601 } 2602 2603 2604 bool HexagonInstrInfo::isTC4x(const MachineInstr *MI) const { 2605 if (!MI) 2606 return false; 2607 2608 unsigned SchedClass = MI->getDesc().getSchedClass(); 2609 return SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23; 2610 } 2611 2612 2613 bool HexagonInstrInfo::isV60VectorInstruction(const MachineInstr *MI) const { 2614 if (!MI) 2615 return false; 2616 2617 const uint64_t V = getType(MI); 2618 return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST; 2619 } 2620 2621 2622 // Check if the Offset is a valid auto-inc imm by Load/Store Type. 2623 // 2624 bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, const int Offset) const { 2625 if (VT == MVT::v16i32 || VT == MVT::v8i64 || 2626 VT == MVT::v32i16 || VT == MVT::v64i8) { 2627 return (Offset >= Hexagon_MEMV_AUTOINC_MIN && 2628 Offset <= Hexagon_MEMV_AUTOINC_MAX && 2629 (Offset & 0x3f) == 0); 2630 } 2631 // 128B 2632 if (VT == MVT::v32i32 || VT == MVT::v16i64 || 2633 VT == MVT::v64i16 || VT == MVT::v128i8) { 2634 return (Offset >= Hexagon_MEMV_AUTOINC_MIN_128B && 2635 Offset <= Hexagon_MEMV_AUTOINC_MAX_128B && 2636 (Offset & 0x7f) == 0); 2637 } 2638 if (VT == MVT::i64) { 2639 return (Offset >= Hexagon_MEMD_AUTOINC_MIN && 2640 Offset <= Hexagon_MEMD_AUTOINC_MAX && 2641 (Offset & 0x7) == 0); 2642 } 2643 if (VT == MVT::i32) { 2644 return (Offset >= Hexagon_MEMW_AUTOINC_MIN && 2645 Offset <= Hexagon_MEMW_AUTOINC_MAX && 2646 (Offset & 0x3) == 0); 2647 } 2648 if (VT == MVT::i16) { 2649 return (Offset >= Hexagon_MEMH_AUTOINC_MIN && 2650 Offset <= Hexagon_MEMH_AUTOINC_MAX && 2651 (Offset & 0x1) == 0); 2652 } 2653 if (VT == MVT::i8) { 2654 return (Offset >= Hexagon_MEMB_AUTOINC_MIN && 2655 Offset <= Hexagon_MEMB_AUTOINC_MAX); 2656 } 2657 llvm_unreachable("Not an auto-inc opc!"); 2658 } 2659 2660 2661 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset, 2662 bool Extend) const { 2663 // This function is to check whether the "Offset" is in the correct range of 2664 // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is 2665 // inserted to calculate the final address. Due to this reason, the function 2666 // assumes that the "Offset" has correct alignment. 2667 // We used to assert if the offset was not properly aligned, however, 2668 // there are cases where a misaligned pointer recast can cause this 2669 // problem, and we need to allow for it. The front end warns of such 2670 // misaligns with respect to load size. 2671 2672 switch (Opcode) { 2673 case Hexagon::STriq_pred_V6: 2674 case Hexagon::STriq_pred_vec_V6: 2675 case Hexagon::STriv_pseudo_V6: 2676 case Hexagon::STrivv_pseudo_V6: 2677 case Hexagon::LDriq_pred_V6: 2678 case Hexagon::LDriq_pred_vec_V6: 2679 case Hexagon::LDriv_pseudo_V6: 2680 case Hexagon::LDrivv_pseudo_V6: 2681 case Hexagon::LDrivv_indexed: 2682 case Hexagon::STrivv_indexed: 2683 case Hexagon::V6_vL32b_ai: 2684 case Hexagon::V6_vS32b_ai: 2685 case Hexagon::V6_vL32Ub_ai: 2686 case Hexagon::V6_vS32Ub_ai: 2687 return (Offset >= Hexagon_MEMV_OFFSET_MIN) && 2688 (Offset <= Hexagon_MEMV_OFFSET_MAX); 2689 2690 case Hexagon::STriq_pred_V6_128B: 2691 case Hexagon::STriq_pred_vec_V6_128B: 2692 case Hexagon::STriv_pseudo_V6_128B: 2693 case Hexagon::STrivv_pseudo_V6_128B: 2694 case Hexagon::LDriq_pred_V6_128B: 2695 case Hexagon::LDriq_pred_vec_V6_128B: 2696 case Hexagon::LDriv_pseudo_V6_128B: 2697 case Hexagon::LDrivv_pseudo_V6_128B: 2698 case Hexagon::LDrivv_indexed_128B: 2699 case Hexagon::STrivv_indexed_128B: 2700 case Hexagon::V6_vL32b_ai_128B: 2701 case Hexagon::V6_vS32b_ai_128B: 2702 case Hexagon::V6_vL32Ub_ai_128B: 2703 case Hexagon::V6_vS32Ub_ai_128B: 2704 return (Offset >= Hexagon_MEMV_OFFSET_MIN_128B) && 2705 (Offset <= Hexagon_MEMV_OFFSET_MAX_128B); 2706 2707 case Hexagon::J2_loop0i: 2708 case Hexagon::J2_loop1i: 2709 return isUInt<10>(Offset); 2710 } 2711 2712 if (Extend) 2713 return true; 2714 2715 switch (Opcode) { 2716 case Hexagon::L2_loadri_io: 2717 case Hexagon::S2_storeri_io: 2718 return (Offset >= Hexagon_MEMW_OFFSET_MIN) && 2719 (Offset <= Hexagon_MEMW_OFFSET_MAX); 2720 2721 case Hexagon::L2_loadrd_io: 2722 case Hexagon::S2_storerd_io: 2723 return (Offset >= Hexagon_MEMD_OFFSET_MIN) && 2724 (Offset <= Hexagon_MEMD_OFFSET_MAX); 2725 2726 case Hexagon::L2_loadrh_io: 2727 case Hexagon::L2_loadruh_io: 2728 case Hexagon::S2_storerh_io: 2729 return (Offset >= Hexagon_MEMH_OFFSET_MIN) && 2730 (Offset <= Hexagon_MEMH_OFFSET_MAX); 2731 2732 case Hexagon::L2_loadrb_io: 2733 case Hexagon::L2_loadrub_io: 2734 case Hexagon::S2_storerb_io: 2735 return (Offset >= Hexagon_MEMB_OFFSET_MIN) && 2736 (Offset <= Hexagon_MEMB_OFFSET_MAX); 2737 2738 case Hexagon::A2_addi: 2739 return (Offset >= Hexagon_ADDI_OFFSET_MIN) && 2740 (Offset <= Hexagon_ADDI_OFFSET_MAX); 2741 2742 case Hexagon::L4_iadd_memopw_io : 2743 case Hexagon::L4_isub_memopw_io : 2744 case Hexagon::L4_add_memopw_io : 2745 case Hexagon::L4_sub_memopw_io : 2746 case Hexagon::L4_and_memopw_io : 2747 case Hexagon::L4_or_memopw_io : 2748 return (0 <= Offset && Offset <= 255); 2749 2750 case Hexagon::L4_iadd_memoph_io : 2751 case Hexagon::L4_isub_memoph_io : 2752 case Hexagon::L4_add_memoph_io : 2753 case Hexagon::L4_sub_memoph_io : 2754 case Hexagon::L4_and_memoph_io : 2755 case Hexagon::L4_or_memoph_io : 2756 return (0 <= Offset && Offset <= 127); 2757 2758 case Hexagon::L4_iadd_memopb_io : 2759 case Hexagon::L4_isub_memopb_io : 2760 case Hexagon::L4_add_memopb_io : 2761 case Hexagon::L4_sub_memopb_io : 2762 case Hexagon::L4_and_memopb_io : 2763 case Hexagon::L4_or_memopb_io : 2764 return (0 <= Offset && Offset <= 63); 2765 2766 // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of 2767 // any size. Later pass knows how to handle it. 2768 case Hexagon::STriw_pred: 2769 case Hexagon::LDriw_pred: 2770 case Hexagon::STriw_mod: 2771 case Hexagon::LDriw_mod: 2772 return true; 2773 2774 case Hexagon::TFR_FI: 2775 case Hexagon::TFR_FIA: 2776 case Hexagon::INLINEASM: 2777 return true; 2778 2779 case Hexagon::L2_ploadrbt_io: 2780 case Hexagon::L2_ploadrbf_io: 2781 case Hexagon::L2_ploadrubt_io: 2782 case Hexagon::L2_ploadrubf_io: 2783 case Hexagon::S2_pstorerbt_io: 2784 case Hexagon::S2_pstorerbf_io: 2785 case Hexagon::S4_storeirb_io: 2786 case Hexagon::S4_storeirbt_io: 2787 case Hexagon::S4_storeirbf_io: 2788 return isUInt<6>(Offset); 2789 2790 case Hexagon::L2_ploadrht_io: 2791 case Hexagon::L2_ploadrhf_io: 2792 case Hexagon::L2_ploadruht_io: 2793 case Hexagon::L2_ploadruhf_io: 2794 case Hexagon::S2_pstorerht_io: 2795 case Hexagon::S2_pstorerhf_io: 2796 case Hexagon::S4_storeirh_io: 2797 case Hexagon::S4_storeirht_io: 2798 case Hexagon::S4_storeirhf_io: 2799 return isShiftedUInt<6,1>(Offset); 2800 2801 case Hexagon::L2_ploadrit_io: 2802 case Hexagon::L2_ploadrif_io: 2803 case Hexagon::S2_pstorerit_io: 2804 case Hexagon::S2_pstorerif_io: 2805 case Hexagon::S4_storeiri_io: 2806 case Hexagon::S4_storeirit_io: 2807 case Hexagon::S4_storeirif_io: 2808 return isShiftedUInt<6,2>(Offset); 2809 2810 case Hexagon::L2_ploadrdt_io: 2811 case Hexagon::L2_ploadrdf_io: 2812 case Hexagon::S2_pstorerdt_io: 2813 case Hexagon::S2_pstorerdf_io: 2814 return isShiftedUInt<6,3>(Offset); 2815 } // switch 2816 2817 llvm_unreachable("No offset range is defined for this opcode. " 2818 "Please define it in the above switch statement!"); 2819 } 2820 2821 2822 bool HexagonInstrInfo::isVecAcc(const MachineInstr *MI) const { 2823 return MI && isV60VectorInstruction(MI) && isAccumulator(MI); 2824 } 2825 2826 2827 bool HexagonInstrInfo::isVecALU(const MachineInstr *MI) const { 2828 if (!MI) 2829 return false; 2830 const uint64_t F = get(MI->getOpcode()).TSFlags; 2831 const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask); 2832 return 2833 V == HexagonII::TypeCVI_VA || 2834 V == HexagonII::TypeCVI_VA_DV; 2835 } 2836 2837 2838 bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr *ProdMI, 2839 const MachineInstr *ConsMI) const { 2840 if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI)) 2841 return true; 2842 2843 if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI))) 2844 return true; 2845 2846 if (mayBeNewStore(ConsMI)) 2847 return true; 2848 2849 return false; 2850 } 2851 2852 bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const { 2853 switch (MI.getOpcode()) { 2854 // Byte 2855 case Hexagon::L2_loadrub_io: 2856 case Hexagon::L4_loadrub_ur: 2857 case Hexagon::L4_loadrub_ap: 2858 case Hexagon::L2_loadrub_pr: 2859 case Hexagon::L2_loadrub_pbr: 2860 case Hexagon::L2_loadrub_pi: 2861 case Hexagon::L2_loadrub_pci: 2862 case Hexagon::L2_loadrub_pcr: 2863 case Hexagon::L2_loadbzw2_io: 2864 case Hexagon::L4_loadbzw2_ur: 2865 case Hexagon::L4_loadbzw2_ap: 2866 case Hexagon::L2_loadbzw2_pr: 2867 case Hexagon::L2_loadbzw2_pbr: 2868 case Hexagon::L2_loadbzw2_pi: 2869 case Hexagon::L2_loadbzw2_pci: 2870 case Hexagon::L2_loadbzw2_pcr: 2871 case Hexagon::L2_loadbzw4_io: 2872 case Hexagon::L4_loadbzw4_ur: 2873 case Hexagon::L4_loadbzw4_ap: 2874 case Hexagon::L2_loadbzw4_pr: 2875 case Hexagon::L2_loadbzw4_pbr: 2876 case Hexagon::L2_loadbzw4_pi: 2877 case Hexagon::L2_loadbzw4_pci: 2878 case Hexagon::L2_loadbzw4_pcr: 2879 case Hexagon::L4_loadrub_rr: 2880 case Hexagon::L2_ploadrubt_io: 2881 case Hexagon::L2_ploadrubt_pi: 2882 case Hexagon::L2_ploadrubf_io: 2883 case Hexagon::L2_ploadrubf_pi: 2884 case Hexagon::L2_ploadrubtnew_io: 2885 case Hexagon::L2_ploadrubfnew_io: 2886 case Hexagon::L4_ploadrubt_rr: 2887 case Hexagon::L4_ploadrubf_rr: 2888 case Hexagon::L4_ploadrubtnew_rr: 2889 case Hexagon::L4_ploadrubfnew_rr: 2890 case Hexagon::L2_ploadrubtnew_pi: 2891 case Hexagon::L2_ploadrubfnew_pi: 2892 case Hexagon::L4_ploadrubt_abs: 2893 case Hexagon::L4_ploadrubf_abs: 2894 case Hexagon::L4_ploadrubtnew_abs: 2895 case Hexagon::L4_ploadrubfnew_abs: 2896 case Hexagon::L2_loadrubgp: 2897 // Half 2898 case Hexagon::L2_loadruh_io: 2899 case Hexagon::L4_loadruh_ur: 2900 case Hexagon::L4_loadruh_ap: 2901 case Hexagon::L2_loadruh_pr: 2902 case Hexagon::L2_loadruh_pbr: 2903 case Hexagon::L2_loadruh_pi: 2904 case Hexagon::L2_loadruh_pci: 2905 case Hexagon::L2_loadruh_pcr: 2906 case Hexagon::L4_loadruh_rr: 2907 case Hexagon::L2_ploadruht_io: 2908 case Hexagon::L2_ploadruht_pi: 2909 case Hexagon::L2_ploadruhf_io: 2910 case Hexagon::L2_ploadruhf_pi: 2911 case Hexagon::L2_ploadruhtnew_io: 2912 case Hexagon::L2_ploadruhfnew_io: 2913 case Hexagon::L4_ploadruht_rr: 2914 case Hexagon::L4_ploadruhf_rr: 2915 case Hexagon::L4_ploadruhtnew_rr: 2916 case Hexagon::L4_ploadruhfnew_rr: 2917 case Hexagon::L2_ploadruhtnew_pi: 2918 case Hexagon::L2_ploadruhfnew_pi: 2919 case Hexagon::L4_ploadruht_abs: 2920 case Hexagon::L4_ploadruhf_abs: 2921 case Hexagon::L4_ploadruhtnew_abs: 2922 case Hexagon::L4_ploadruhfnew_abs: 2923 case Hexagon::L2_loadruhgp: 2924 return true; 2925 default: 2926 return false; 2927 } 2928 } 2929 2930 2931 // Add latency to instruction. 2932 bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr *MI1, 2933 const MachineInstr *MI2) const { 2934 if (isV60VectorInstruction(MI1) && isV60VectorInstruction(MI2)) 2935 if (!isVecUsableNextPacket(MI1, MI2)) 2936 return true; 2937 return false; 2938 } 2939 2940 2941 /// \brief Get the base register and byte offset of a load/store instr. 2942 bool HexagonInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, 2943 unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) 2944 const { 2945 unsigned AccessSize = 0; 2946 int OffsetVal = 0; 2947 BaseReg = getBaseAndOffset(&LdSt, OffsetVal, AccessSize); 2948 Offset = OffsetVal; 2949 return BaseReg != 0; 2950 } 2951 2952 2953 /// \brief Can these instructions execute at the same time in a bundle. 2954 bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr *First, 2955 const MachineInstr *Second) const { 2956 if (DisableNVSchedule) 2957 return false; 2958 if (mayBeNewStore(Second)) { 2959 // Make sure the definition of the first instruction is the value being 2960 // stored. 2961 const MachineOperand &Stored = 2962 Second->getOperand(Second->getNumOperands() - 1); 2963 if (!Stored.isReg()) 2964 return false; 2965 for (unsigned i = 0, e = First->getNumOperands(); i < e; ++i) { 2966 const MachineOperand &Op = First->getOperand(i); 2967 if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg()) 2968 return true; 2969 } 2970 } 2971 return false; 2972 } 2973 2974 2975 bool HexagonInstrInfo::hasEHLabel(const MachineBasicBlock *B) const { 2976 for (auto &I : *B) 2977 if (I.isEHLabel()) 2978 return true; 2979 return false; 2980 } 2981 2982 2983 // Returns true if an instruction can be converted into a non-extended 2984 // equivalent instruction. 2985 bool HexagonInstrInfo::hasNonExtEquivalent(const MachineInstr *MI) const { 2986 short NonExtOpcode; 2987 // Check if the instruction has a register form that uses register in place 2988 // of the extended operand, if so return that as the non-extended form. 2989 if (Hexagon::getRegForm(MI->getOpcode()) >= 0) 2990 return true; 2991 2992 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) { 2993 // Check addressing mode and retrieve non-ext equivalent instruction. 2994 2995 switch (getAddrMode(MI)) { 2996 case HexagonII::Absolute : 2997 // Load/store with absolute addressing mode can be converted into 2998 // base+offset mode. 2999 NonExtOpcode = Hexagon::getBaseWithImmOffset(MI->getOpcode()); 3000 break; 3001 case HexagonII::BaseImmOffset : 3002 // Load/store with base+offset addressing mode can be converted into 3003 // base+register offset addressing mode. However left shift operand should 3004 // be set to 0. 3005 NonExtOpcode = Hexagon::getBaseWithRegOffset(MI->getOpcode()); 3006 break; 3007 case HexagonII::BaseLongOffset: 3008 NonExtOpcode = Hexagon::getRegShlForm(MI->getOpcode()); 3009 break; 3010 default: 3011 return false; 3012 } 3013 if (NonExtOpcode < 0) 3014 return false; 3015 return true; 3016 } 3017 return false; 3018 } 3019 3020 3021 bool HexagonInstrInfo::hasPseudoInstrPair(const MachineInstr *MI) const { 3022 return Hexagon::getRealHWInstr(MI->getOpcode(), 3023 Hexagon::InstrType_Pseudo) >= 0; 3024 } 3025 3026 3027 bool HexagonInstrInfo::hasUncondBranch(const MachineBasicBlock *B) 3028 const { 3029 MachineBasicBlock::const_iterator I = B->getFirstTerminator(), E = B->end(); 3030 while (I != E) { 3031 if (I->isBarrier()) 3032 return true; 3033 ++I; 3034 } 3035 return false; 3036 } 3037 3038 3039 // Returns true, if a LD insn can be promoted to a cur load. 3040 bool HexagonInstrInfo::mayBeCurLoad(const MachineInstr *MI) const { 3041 auto &HST = MI->getParent()->getParent()->getSubtarget<HexagonSubtarget>(); 3042 const uint64_t F = MI->getDesc().TSFlags; 3043 return ((F >> HexagonII::mayCVLoadPos) & HexagonII::mayCVLoadMask) && 3044 HST.hasV60TOps(); 3045 } 3046 3047 3048 // Returns true, if a ST insn can be promoted to a new-value store. 3049 bool HexagonInstrInfo::mayBeNewStore(const MachineInstr *MI) const { 3050 const uint64_t F = MI->getDesc().TSFlags; 3051 return (F >> HexagonII::mayNVStorePos) & HexagonII::mayNVStoreMask; 3052 } 3053 3054 3055 bool HexagonInstrInfo::producesStall(const MachineInstr *ProdMI, 3056 const MachineInstr *ConsMI) const { 3057 // There is no stall when ProdMI is not a V60 vector. 3058 if (!isV60VectorInstruction(ProdMI)) 3059 return false; 3060 3061 // There is no stall when ProdMI and ConsMI are not dependent. 3062 if (!isDependent(ProdMI, ConsMI)) 3063 return false; 3064 3065 // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI 3066 // are scheduled in consecutive packets. 3067 if (isVecUsableNextPacket(ProdMI, ConsMI)) 3068 return false; 3069 3070 return true; 3071 } 3072 3073 3074 bool HexagonInstrInfo::producesStall(const MachineInstr *MI, 3075 MachineBasicBlock::const_instr_iterator BII) const { 3076 // There is no stall when I is not a V60 vector. 3077 if (!isV60VectorInstruction(MI)) 3078 return false; 3079 3080 MachineBasicBlock::const_instr_iterator MII = BII; 3081 MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end(); 3082 3083 if (!(*MII).isBundle()) { 3084 const MachineInstr *J = &*MII; 3085 if (!isV60VectorInstruction(J)) 3086 return false; 3087 else if (isVecUsableNextPacket(J, MI)) 3088 return false; 3089 return true; 3090 } 3091 3092 for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) { 3093 const MachineInstr *J = &*MII; 3094 if (producesStall(J, MI)) 3095 return true; 3096 } 3097 return false; 3098 } 3099 3100 3101 bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr *MI, 3102 unsigned PredReg) const { 3103 for (unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) { 3104 const MachineOperand &MO = MI->getOperand(opNum); 3105 if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg)) 3106 return false; // Predicate register must be explicitly defined. 3107 } 3108 3109 // Hexagon Programmer's Reference says that decbin, memw_locked, and 3110 // memd_locked cannot be used as .new as well, 3111 // but we don't seem to have these instructions defined. 3112 return MI->getOpcode() != Hexagon::A4_tlbmatch; 3113 } 3114 3115 3116 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const { 3117 return (Opcode == Hexagon::J2_jumpt) || 3118 (Opcode == Hexagon::J2_jumpf) || 3119 (Opcode == Hexagon::J2_jumptnew) || 3120 (Opcode == Hexagon::J2_jumpfnew) || 3121 (Opcode == Hexagon::J2_jumptnewpt) || 3122 (Opcode == Hexagon::J2_jumpfnewpt); 3123 } 3124 3125 3126 bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const { 3127 if (Cond.empty() || !isPredicated(Cond[0].getImm())) 3128 return false; 3129 return !isPredicatedTrue(Cond[0].getImm()); 3130 } 3131 3132 3133 short HexagonInstrInfo::getAbsoluteForm(const MachineInstr *MI) const { 3134 return Hexagon::getAbsoluteForm(MI->getOpcode()); 3135 } 3136 3137 3138 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const { 3139 const uint64_t F = MI->getDesc().TSFlags; 3140 return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask; 3141 } 3142 3143 3144 // Returns the base register in a memory access (load/store). The offset is 3145 // returned in Offset and the access size is returned in AccessSize. 3146 unsigned HexagonInstrInfo::getBaseAndOffset(const MachineInstr *MI, 3147 int &Offset, unsigned &AccessSize) const { 3148 // Return if it is not a base+offset type instruction or a MemOp. 3149 if (getAddrMode(MI) != HexagonII::BaseImmOffset && 3150 getAddrMode(MI) != HexagonII::BaseLongOffset && 3151 !isMemOp(MI) && !isPostIncrement(MI)) 3152 return 0; 3153 3154 // Since it is a memory access instruction, getMemAccessSize() should never 3155 // return 0. 3156 assert (getMemAccessSize(MI) && 3157 "BaseImmOffset or BaseLongOffset or MemOp without accessSize"); 3158 3159 // Return Values of getMemAccessSize() are 3160 // 0 - Checked in the assert above. 3161 // 1, 2, 3, 4 & 7, 8 - The statement below is correct for all these. 3162 // MemAccessSize is represented as 1+log2(N) where N is size in bits. 3163 AccessSize = (1U << (getMemAccessSize(MI) - 1)); 3164 3165 unsigned basePos = 0, offsetPos = 0; 3166 if (!getBaseAndOffsetPosition(MI, basePos, offsetPos)) 3167 return 0; 3168 3169 // Post increment updates its EA after the mem access, 3170 // so we need to treat its offset as zero. 3171 if (isPostIncrement(MI)) 3172 Offset = 0; 3173 else { 3174 Offset = MI->getOperand(offsetPos).getImm(); 3175 } 3176 3177 return MI->getOperand(basePos).getReg(); 3178 } 3179 3180 3181 /// Return the position of the base and offset operands for this instruction. 3182 bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr *MI, 3183 unsigned &BasePos, unsigned &OffsetPos) const { 3184 // Deal with memops first. 3185 if (isMemOp(MI)) { 3186 assert (MI->getOperand(0).isReg() && MI->getOperand(1).isImm() && 3187 "Bad Memop."); 3188 BasePos = 0; 3189 OffsetPos = 1; 3190 } else if (MI->mayStore()) { 3191 BasePos = 0; 3192 OffsetPos = 1; 3193 } else if (MI->mayLoad()) { 3194 BasePos = 1; 3195 OffsetPos = 2; 3196 } else 3197 return false; 3198 3199 if (isPredicated(*MI)) { 3200 BasePos++; 3201 OffsetPos++; 3202 } 3203 if (isPostIncrement(MI)) { 3204 BasePos++; 3205 OffsetPos++; 3206 } 3207 3208 if (!MI->getOperand(BasePos).isReg() || !MI->getOperand(OffsetPos).isImm()) 3209 return false; 3210 3211 return true; 3212 } 3213 3214 3215 // Inserts branching instructions in reverse order of their occurence. 3216 // e.g. jump_t t1 (i1) 3217 // jump t2 (i2) 3218 // Jumpers = {i2, i1} 3219 SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs( 3220 MachineBasicBlock& MBB) const { 3221 SmallVector<MachineInstr*, 2> Jumpers; 3222 // If the block has no terminators, it just falls into the block after it. 3223 MachineBasicBlock::instr_iterator I = MBB.instr_end(); 3224 if (I == MBB.instr_begin()) 3225 return Jumpers; 3226 3227 // A basic block may looks like this: 3228 // 3229 // [ insn 3230 // EH_LABEL 3231 // insn 3232 // insn 3233 // insn 3234 // EH_LABEL 3235 // insn ] 3236 // 3237 // It has two succs but does not have a terminator 3238 // Don't know how to handle it. 3239 do { 3240 --I; 3241 if (I->isEHLabel()) 3242 return Jumpers; 3243 } while (I != MBB.instr_begin()); 3244 3245 I = MBB.instr_end(); 3246 --I; 3247 3248 while (I->isDebugValue()) { 3249 if (I == MBB.instr_begin()) 3250 return Jumpers; 3251 --I; 3252 } 3253 if (!isUnpredicatedTerminator(*I)) 3254 return Jumpers; 3255 3256 // Get the last instruction in the block. 3257 MachineInstr *LastInst = &*I; 3258 Jumpers.push_back(LastInst); 3259 MachineInstr *SecondLastInst = nullptr; 3260 // Find one more terminator if present. 3261 do { 3262 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) { 3263 if (!SecondLastInst) { 3264 SecondLastInst = &*I; 3265 Jumpers.push_back(SecondLastInst); 3266 } else // This is a third branch. 3267 return Jumpers; 3268 } 3269 if (I == MBB.instr_begin()) 3270 break; 3271 --I; 3272 } while (true); 3273 return Jumpers; 3274 } 3275 3276 3277 short HexagonInstrInfo::getBaseWithLongOffset(short Opcode) const { 3278 if (Opcode < 0) 3279 return -1; 3280 return Hexagon::getBaseWithLongOffset(Opcode); 3281 } 3282 3283 3284 short HexagonInstrInfo::getBaseWithLongOffset(const MachineInstr *MI) const { 3285 return Hexagon::getBaseWithLongOffset(MI->getOpcode()); 3286 } 3287 3288 3289 short HexagonInstrInfo::getBaseWithRegOffset(const MachineInstr *MI) const { 3290 return Hexagon::getBaseWithRegOffset(MI->getOpcode()); 3291 } 3292 3293 3294 // Returns Operand Index for the constant extended instruction. 3295 unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const { 3296 const uint64_t F = MI->getDesc().TSFlags; 3297 return (F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask; 3298 } 3299 3300 // See if instruction could potentially be a duplex candidate. 3301 // If so, return its group. Zero otherwise. 3302 HexagonII::CompoundGroup HexagonInstrInfo::getCompoundCandidateGroup( 3303 const MachineInstr *MI) const { 3304 unsigned DstReg, SrcReg, Src1Reg, Src2Reg; 3305 3306 switch (MI->getOpcode()) { 3307 default: 3308 return HexagonII::HCG_None; 3309 // 3310 // Compound pairs. 3311 // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2" 3312 // "Rd16=#U6 ; jump #r9:2" 3313 // "Rd16=Rs16 ; jump #r9:2" 3314 // 3315 case Hexagon::C2_cmpeq: 3316 case Hexagon::C2_cmpgt: 3317 case Hexagon::C2_cmpgtu: 3318 DstReg = MI->getOperand(0).getReg(); 3319 Src1Reg = MI->getOperand(1).getReg(); 3320 Src2Reg = MI->getOperand(2).getReg(); 3321 if (Hexagon::PredRegsRegClass.contains(DstReg) && 3322 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) && 3323 isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg)) 3324 return HexagonII::HCG_A; 3325 break; 3326 case Hexagon::C2_cmpeqi: 3327 case Hexagon::C2_cmpgti: 3328 case Hexagon::C2_cmpgtui: 3329 // P0 = cmp.eq(Rs,#u2) 3330 DstReg = MI->getOperand(0).getReg(); 3331 SrcReg = MI->getOperand(1).getReg(); 3332 if (Hexagon::PredRegsRegClass.contains(DstReg) && 3333 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) && 3334 isIntRegForSubInst(SrcReg) && MI->getOperand(2).isImm() && 3335 ((isUInt<5>(MI->getOperand(2).getImm())) || 3336 (MI->getOperand(2).getImm() == -1))) 3337 return HexagonII::HCG_A; 3338 break; 3339 case Hexagon::A2_tfr: 3340 // Rd = Rs 3341 DstReg = MI->getOperand(0).getReg(); 3342 SrcReg = MI->getOperand(1).getReg(); 3343 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg)) 3344 return HexagonII::HCG_A; 3345 break; 3346 case Hexagon::A2_tfrsi: 3347 // Rd = #u6 3348 // Do not test for #u6 size since the const is getting extended 3349 // regardless and compound could be formed. 3350 DstReg = MI->getOperand(0).getReg(); 3351 if (isIntRegForSubInst(DstReg)) 3352 return HexagonII::HCG_A; 3353 break; 3354 case Hexagon::S2_tstbit_i: 3355 DstReg = MI->getOperand(0).getReg(); 3356 Src1Reg = MI->getOperand(1).getReg(); 3357 if (Hexagon::PredRegsRegClass.contains(DstReg) && 3358 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) && 3359 MI->getOperand(2).isImm() && 3360 isIntRegForSubInst(Src1Reg) && (MI->getOperand(2).getImm() == 0)) 3361 return HexagonII::HCG_A; 3362 break; 3363 // The fact that .new form is used pretty much guarantees 3364 // that predicate register will match. Nevertheless, 3365 // there could be some false positives without additional 3366 // checking. 3367 case Hexagon::J2_jumptnew: 3368 case Hexagon::J2_jumpfnew: 3369 case Hexagon::J2_jumptnewpt: 3370 case Hexagon::J2_jumpfnewpt: 3371 Src1Reg = MI->getOperand(0).getReg(); 3372 if (Hexagon::PredRegsRegClass.contains(Src1Reg) && 3373 (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg)) 3374 return HexagonII::HCG_B; 3375 break; 3376 // Transfer and jump: 3377 // Rd=#U6 ; jump #r9:2 3378 // Rd=Rs ; jump #r9:2 3379 // Do not test for jump range here. 3380 case Hexagon::J2_jump: 3381 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4: 3382 return HexagonII::HCG_C; 3383 break; 3384 } 3385 3386 return HexagonII::HCG_None; 3387 } 3388 3389 3390 // Returns -1 when there is no opcode found. 3391 unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr *GA, 3392 const MachineInstr *GB) const { 3393 assert(getCompoundCandidateGroup(GA) == HexagonII::HCG_A); 3394 assert(getCompoundCandidateGroup(GB) == HexagonII::HCG_B); 3395 if ((GA->getOpcode() != Hexagon::C2_cmpeqi) || 3396 (GB->getOpcode() != Hexagon::J2_jumptnew)) 3397 return -1; 3398 unsigned DestReg = GA->getOperand(0).getReg(); 3399 if (!GB->readsRegister(DestReg)) 3400 return -1; 3401 if (DestReg == Hexagon::P0) 3402 return Hexagon::J4_cmpeqi_tp0_jump_nt; 3403 if (DestReg == Hexagon::P1) 3404 return Hexagon::J4_cmpeqi_tp1_jump_nt; 3405 return -1; 3406 } 3407 3408 3409 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const { 3410 enum Hexagon::PredSense inPredSense; 3411 inPredSense = invertPredicate ? Hexagon::PredSense_false : 3412 Hexagon::PredSense_true; 3413 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense); 3414 if (CondOpcode >= 0) // Valid Conditional opcode/instruction 3415 return CondOpcode; 3416 3417 // This switch case will be removed once all the instructions have been 3418 // modified to use relation maps. 3419 switch(Opc) { 3420 case Hexagon::TFRI_f: 3421 return !invertPredicate ? Hexagon::TFRI_cPt_f : 3422 Hexagon::TFRI_cNotPt_f; 3423 } 3424 3425 llvm_unreachable("Unexpected predicable instruction"); 3426 } 3427 3428 3429 // Return the cur value instruction for a given store. 3430 int HexagonInstrInfo::getDotCurOp(const MachineInstr* MI) const { 3431 switch (MI->getOpcode()) { 3432 default: llvm_unreachable("Unknown .cur type"); 3433 case Hexagon::V6_vL32b_pi: 3434 return Hexagon::V6_vL32b_cur_pi; 3435 case Hexagon::V6_vL32b_ai: 3436 return Hexagon::V6_vL32b_cur_ai; 3437 //128B 3438 case Hexagon::V6_vL32b_pi_128B: 3439 return Hexagon::V6_vL32b_cur_pi_128B; 3440 case Hexagon::V6_vL32b_ai_128B: 3441 return Hexagon::V6_vL32b_cur_ai_128B; 3442 } 3443 return 0; 3444 } 3445 3446 3447 3448 // The diagram below shows the steps involved in the conversion of a predicated 3449 // store instruction to its .new predicated new-value form. 3450 // 3451 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ] 3452 // ^ ^ 3453 // / \ (not OK. it will cause new-value store to be 3454 // / X conditional on p0.new while R2 producer is 3455 // / \ on p0) 3456 // / \. 3457 // p.new store p.old NV store 3458 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new] 3459 // ^ ^ 3460 // \ / 3461 // \ / 3462 // \ / 3463 // p.old store 3464 // [if (p0)memw(R0+#0)=R2] 3465 // 3466 // 3467 // The following set of instructions further explains the scenario where 3468 // conditional new-value store becomes invalid when promoted to .new predicate 3469 // form. 3470 // 3471 // { 1) if (p0) r0 = add(r1, r2) 3472 // 2) p0 = cmp.eq(r3, #0) } 3473 // 3474 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with 3475 // the first two instructions because in instr 1, r0 is conditional on old value 3476 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which 3477 // is not valid for new-value stores. 3478 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded 3479 // from the "Conditional Store" list. Because a predicated new value store 3480 // would NOT be promoted to a double dot new store. See diagram below: 3481 // This function returns yes for those stores that are predicated but not 3482 // yet promoted to predicate dot new instructions. 3483 // 3484 // +---------------------+ 3485 // /-----| if (p0) memw(..)=r0 |---------\~ 3486 // || +---------------------+ || 3487 // promote || /\ /\ || promote 3488 // || /||\ /||\ || 3489 // \||/ demote || \||/ 3490 // \/ || || \/ 3491 // +-------------------------+ || +-------------------------+ 3492 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new | 3493 // +-------------------------+ || +-------------------------+ 3494 // || || || 3495 // || demote \||/ 3496 // promote || \/ NOT possible 3497 // || || /\~ 3498 // \||/ || /||\~ 3499 // \/ || || 3500 // +-----------------------------+ 3501 // | if (p0.new) memw(..)=r0.new | 3502 // +-----------------------------+ 3503 // Double Dot New Store 3504 // 3505 // Returns the most basic instruction for the .new predicated instructions and 3506 // new-value stores. 3507 // For example, all of the following instructions will be converted back to the 3508 // same instruction: 3509 // 1) if (p0.new) memw(R0+#0) = R1.new ---> 3510 // 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1 3511 // 3) if (p0.new) memw(R0+#0) = R1 ---> 3512 // 3513 // To understand the translation of instruction 1 to its original form, consider 3514 // a packet with 3 instructions. 3515 // { p0 = cmp.eq(R0,R1) 3516 // if (p0.new) R2 = add(R3, R4) 3517 // R5 = add (R3, R1) 3518 // } 3519 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet 3520 // 3521 // This instruction can be part of the previous packet only if both p0 and R2 3522 // are promoted to .new values. This promotion happens in steps, first 3523 // predicate register is promoted to .new and in the next iteration R2 is 3524 // promoted. Therefore, in case of dependence check failure (due to R5) during 3525 // next iteration, it should be converted back to its most basic form. 3526 3527 3528 // Return the new value instruction for a given store. 3529 int HexagonInstrInfo::getDotNewOp(const MachineInstr* MI) const { 3530 int NVOpcode = Hexagon::getNewValueOpcode(MI->getOpcode()); 3531 if (NVOpcode >= 0) // Valid new-value store instruction. 3532 return NVOpcode; 3533 3534 switch (MI->getOpcode()) { 3535 default: llvm_unreachable("Unknown .new type"); 3536 case Hexagon::S4_storerb_ur: 3537 return Hexagon::S4_storerbnew_ur; 3538 3539 case Hexagon::S2_storerb_pci: 3540 return Hexagon::S2_storerb_pci; 3541 3542 case Hexagon::S2_storeri_pci: 3543 return Hexagon::S2_storeri_pci; 3544 3545 case Hexagon::S2_storerh_pci: 3546 return Hexagon::S2_storerh_pci; 3547 3548 case Hexagon::S2_storerd_pci: 3549 return Hexagon::S2_storerd_pci; 3550 3551 case Hexagon::S2_storerf_pci: 3552 return Hexagon::S2_storerf_pci; 3553 3554 case Hexagon::V6_vS32b_ai: 3555 return Hexagon::V6_vS32b_new_ai; 3556 3557 case Hexagon::V6_vS32b_pi: 3558 return Hexagon::V6_vS32b_new_pi; 3559 3560 // 128B 3561 case Hexagon::V6_vS32b_ai_128B: 3562 return Hexagon::V6_vS32b_new_ai_128B; 3563 3564 case Hexagon::V6_vS32b_pi_128B: 3565 return Hexagon::V6_vS32b_new_pi_128B; 3566 } 3567 return 0; 3568 } 3569 3570 3571 // Returns the opcode to use when converting MI, which is a conditional jump, 3572 // into a conditional instruction which uses the .new value of the predicate. 3573 // We also use branch probabilities to add a hint to the jump. 3574 int HexagonInstrInfo::getDotNewPredJumpOp(const MachineInstr *MI, 3575 const MachineBranchProbabilityInfo *MBPI) const { 3576 // We assume that block can have at most two successors. 3577 bool taken = false; 3578 const MachineBasicBlock *Src = MI->getParent(); 3579 const MachineOperand *BrTarget = &MI->getOperand(1); 3580 const MachineBasicBlock *Dst = BrTarget->getMBB(); 3581 3582 const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst); 3583 if (Prediction >= BranchProbability(1,2)) 3584 taken = true; 3585 3586 switch (MI->getOpcode()) { 3587 case Hexagon::J2_jumpt: 3588 return taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew; 3589 case Hexagon::J2_jumpf: 3590 return taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew; 3591 3592 default: 3593 llvm_unreachable("Unexpected jump instruction."); 3594 } 3595 } 3596 3597 3598 // Return .new predicate version for an instruction. 3599 int HexagonInstrInfo::getDotNewPredOp(const MachineInstr *MI, 3600 const MachineBranchProbabilityInfo *MBPI) const { 3601 int NewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode()); 3602 if (NewOpcode >= 0) // Valid predicate new instruction 3603 return NewOpcode; 3604 3605 switch (MI->getOpcode()) { 3606 // Condtional Jumps 3607 case Hexagon::J2_jumpt: 3608 case Hexagon::J2_jumpf: 3609 return getDotNewPredJumpOp(MI, MBPI); 3610 3611 default: 3612 assert(0 && "Unknown .new type"); 3613 } 3614 return 0; 3615 } 3616 3617 3618 int HexagonInstrInfo::getDotOldOp(const int opc) const { 3619 int NewOp = opc; 3620 if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form 3621 NewOp = Hexagon::getPredOldOpcode(NewOp); 3622 assert(NewOp >= 0 && 3623 "Couldn't change predicate new instruction to its old form."); 3624 } 3625 3626 if (isNewValueStore(NewOp)) { // Convert into non-new-value format 3627 NewOp = Hexagon::getNonNVStore(NewOp); 3628 assert(NewOp >= 0 && "Couldn't change new-value store to its old form."); 3629 } 3630 return NewOp; 3631 } 3632 3633 3634 // See if instruction could potentially be a duplex candidate. 3635 // If so, return its group. Zero otherwise. 3636 HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup( 3637 const MachineInstr *MI) const { 3638 unsigned DstReg, SrcReg, Src1Reg, Src2Reg; 3639 auto &HRI = getRegisterInfo(); 3640 3641 switch (MI->getOpcode()) { 3642 default: 3643 return HexagonII::HSIG_None; 3644 // 3645 // Group L1: 3646 // 3647 // Rd = memw(Rs+#u4:2) 3648 // Rd = memub(Rs+#u4:0) 3649 case Hexagon::L2_loadri_io: 3650 DstReg = MI->getOperand(0).getReg(); 3651 SrcReg = MI->getOperand(1).getReg(); 3652 // Special case this one from Group L2. 3653 // Rd = memw(r29+#u5:2) 3654 if (isIntRegForSubInst(DstReg)) { 3655 if (Hexagon::IntRegsRegClass.contains(SrcReg) && 3656 HRI.getStackRegister() == SrcReg && 3657 MI->getOperand(2).isImm() && 3658 isShiftedUInt<5,2>(MI->getOperand(2).getImm())) 3659 return HexagonII::HSIG_L2; 3660 // Rd = memw(Rs+#u4:2) 3661 if (isIntRegForSubInst(SrcReg) && 3662 (MI->getOperand(2).isImm() && 3663 isShiftedUInt<4,2>(MI->getOperand(2).getImm()))) 3664 return HexagonII::HSIG_L1; 3665 } 3666 break; 3667 case Hexagon::L2_loadrub_io: 3668 // Rd = memub(Rs+#u4:0) 3669 DstReg = MI->getOperand(0).getReg(); 3670 SrcReg = MI->getOperand(1).getReg(); 3671 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) && 3672 MI->getOperand(2).isImm() && isUInt<4>(MI->getOperand(2).getImm())) 3673 return HexagonII::HSIG_L1; 3674 break; 3675 // 3676 // Group L2: 3677 // 3678 // Rd = memh/memuh(Rs+#u3:1) 3679 // Rd = memb(Rs+#u3:0) 3680 // Rd = memw(r29+#u5:2) - Handled above. 3681 // Rdd = memd(r29+#u5:3) 3682 // deallocframe 3683 // [if ([!]p0[.new])] dealloc_return 3684 // [if ([!]p0[.new])] jumpr r31 3685 case Hexagon::L2_loadrh_io: 3686 case Hexagon::L2_loadruh_io: 3687 // Rd = memh/memuh(Rs+#u3:1) 3688 DstReg = MI->getOperand(0).getReg(); 3689 SrcReg = MI->getOperand(1).getReg(); 3690 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) && 3691 MI->getOperand(2).isImm() && 3692 isShiftedUInt<3,1>(MI->getOperand(2).getImm())) 3693 return HexagonII::HSIG_L2; 3694 break; 3695 case Hexagon::L2_loadrb_io: 3696 // Rd = memb(Rs+#u3:0) 3697 DstReg = MI->getOperand(0).getReg(); 3698 SrcReg = MI->getOperand(1).getReg(); 3699 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) && 3700 MI->getOperand(2).isImm() && 3701 isUInt<3>(MI->getOperand(2).getImm())) 3702 return HexagonII::HSIG_L2; 3703 break; 3704 case Hexagon::L2_loadrd_io: 3705 // Rdd = memd(r29+#u5:3) 3706 DstReg = MI->getOperand(0).getReg(); 3707 SrcReg = MI->getOperand(1).getReg(); 3708 if (isDblRegForSubInst(DstReg, HRI) && 3709 Hexagon::IntRegsRegClass.contains(SrcReg) && 3710 HRI.getStackRegister() == SrcReg && 3711 MI->getOperand(2).isImm() && 3712 isShiftedUInt<5,3>(MI->getOperand(2).getImm())) 3713 return HexagonII::HSIG_L2; 3714 break; 3715 // dealloc_return is not documented in Hexagon Manual, but marked 3716 // with A_SUBINSN attribute in iset_v4classic.py. 3717 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4: 3718 case Hexagon::L4_return: 3719 case Hexagon::L2_deallocframe: 3720 return HexagonII::HSIG_L2; 3721 case Hexagon::EH_RETURN_JMPR: 3722 case Hexagon::JMPret : 3723 // jumpr r31 3724 // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>. 3725 DstReg = MI->getOperand(0).getReg(); 3726 if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)) 3727 return HexagonII::HSIG_L2; 3728 break; 3729 case Hexagon::JMPrett: 3730 case Hexagon::JMPretf: 3731 case Hexagon::JMPrettnewpt: 3732 case Hexagon::JMPretfnewpt : 3733 case Hexagon::JMPrettnew : 3734 case Hexagon::JMPretfnew : 3735 DstReg = MI->getOperand(1).getReg(); 3736 SrcReg = MI->getOperand(0).getReg(); 3737 // [if ([!]p0[.new])] jumpr r31 3738 if ((Hexagon::PredRegsRegClass.contains(SrcReg) && 3739 (Hexagon::P0 == SrcReg)) && 3740 (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))) 3741 return HexagonII::HSIG_L2; 3742 break; 3743 case Hexagon::L4_return_t : 3744 case Hexagon::L4_return_f : 3745 case Hexagon::L4_return_tnew_pnt : 3746 case Hexagon::L4_return_fnew_pnt : 3747 case Hexagon::L4_return_tnew_pt : 3748 case Hexagon::L4_return_fnew_pt : 3749 // [if ([!]p0[.new])] dealloc_return 3750 SrcReg = MI->getOperand(0).getReg(); 3751 if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg)) 3752 return HexagonII::HSIG_L2; 3753 break; 3754 // 3755 // Group S1: 3756 // 3757 // memw(Rs+#u4:2) = Rt 3758 // memb(Rs+#u4:0) = Rt 3759 case Hexagon::S2_storeri_io: 3760 // Special case this one from Group S2. 3761 // memw(r29+#u5:2) = Rt 3762 Src1Reg = MI->getOperand(0).getReg(); 3763 Src2Reg = MI->getOperand(2).getReg(); 3764 if (Hexagon::IntRegsRegClass.contains(Src1Reg) && 3765 isIntRegForSubInst(Src2Reg) && 3766 HRI.getStackRegister() == Src1Reg && MI->getOperand(1).isImm() && 3767 isShiftedUInt<5,2>(MI->getOperand(1).getImm())) 3768 return HexagonII::HSIG_S2; 3769 // memw(Rs+#u4:2) = Rt 3770 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) && 3771 MI->getOperand(1).isImm() && 3772 isShiftedUInt<4,2>(MI->getOperand(1).getImm())) 3773 return HexagonII::HSIG_S1; 3774 break; 3775 case Hexagon::S2_storerb_io: 3776 // memb(Rs+#u4:0) = Rt 3777 Src1Reg = MI->getOperand(0).getReg(); 3778 Src2Reg = MI->getOperand(2).getReg(); 3779 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) && 3780 MI->getOperand(1).isImm() && isUInt<4>(MI->getOperand(1).getImm())) 3781 return HexagonII::HSIG_S1; 3782 break; 3783 // 3784 // Group S2: 3785 // 3786 // memh(Rs+#u3:1) = Rt 3787 // memw(r29+#u5:2) = Rt 3788 // memd(r29+#s6:3) = Rtt 3789 // memw(Rs+#u4:2) = #U1 3790 // memb(Rs+#u4) = #U1 3791 // allocframe(#u5:3) 3792 case Hexagon::S2_storerh_io: 3793 // memh(Rs+#u3:1) = Rt 3794 Src1Reg = MI->getOperand(0).getReg(); 3795 Src2Reg = MI->getOperand(2).getReg(); 3796 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) && 3797 MI->getOperand(1).isImm() && 3798 isShiftedUInt<3,1>(MI->getOperand(1).getImm())) 3799 return HexagonII::HSIG_S1; 3800 break; 3801 case Hexagon::S2_storerd_io: 3802 // memd(r29+#s6:3) = Rtt 3803 Src1Reg = MI->getOperand(0).getReg(); 3804 Src2Reg = MI->getOperand(2).getReg(); 3805 if (isDblRegForSubInst(Src2Reg, HRI) && 3806 Hexagon::IntRegsRegClass.contains(Src1Reg) && 3807 HRI.getStackRegister() == Src1Reg && MI->getOperand(1).isImm() && 3808 isShiftedInt<6,3>(MI->getOperand(1).getImm())) 3809 return HexagonII::HSIG_S2; 3810 break; 3811 case Hexagon::S4_storeiri_io: 3812 // memw(Rs+#u4:2) = #U1 3813 Src1Reg = MI->getOperand(0).getReg(); 3814 if (isIntRegForSubInst(Src1Reg) && MI->getOperand(1).isImm() && 3815 isShiftedUInt<4,2>(MI->getOperand(1).getImm()) && 3816 MI->getOperand(2).isImm() && isUInt<1>(MI->getOperand(2).getImm())) 3817 return HexagonII::HSIG_S2; 3818 break; 3819 case Hexagon::S4_storeirb_io: 3820 // memb(Rs+#u4) = #U1 3821 Src1Reg = MI->getOperand(0).getReg(); 3822 if (isIntRegForSubInst(Src1Reg) && 3823 MI->getOperand(1).isImm() && isUInt<4>(MI->getOperand(1).getImm()) && 3824 MI->getOperand(2).isImm() && isUInt<1>(MI->getOperand(2).getImm())) 3825 return HexagonII::HSIG_S2; 3826 break; 3827 case Hexagon::S2_allocframe: 3828 if (MI->getOperand(0).isImm() && 3829 isShiftedUInt<5,3>(MI->getOperand(0).getImm())) 3830 return HexagonII::HSIG_S1; 3831 break; 3832 // 3833 // Group A: 3834 // 3835 // Rx = add(Rx,#s7) 3836 // Rd = Rs 3837 // Rd = #u6 3838 // Rd = #-1 3839 // if ([!]P0[.new]) Rd = #0 3840 // Rd = add(r29,#u6:2) 3841 // Rx = add(Rx,Rs) 3842 // P0 = cmp.eq(Rs,#u2) 3843 // Rdd = combine(#0,Rs) 3844 // Rdd = combine(Rs,#0) 3845 // Rdd = combine(#u2,#U2) 3846 // Rd = add(Rs,#1) 3847 // Rd = add(Rs,#-1) 3848 // Rd = sxth/sxtb/zxtb/zxth(Rs) 3849 // Rd = and(Rs,#1) 3850 case Hexagon::A2_addi: 3851 DstReg = MI->getOperand(0).getReg(); 3852 SrcReg = MI->getOperand(1).getReg(); 3853 if (isIntRegForSubInst(DstReg)) { 3854 // Rd = add(r29,#u6:2) 3855 if (Hexagon::IntRegsRegClass.contains(SrcReg) && 3856 HRI.getStackRegister() == SrcReg && MI->getOperand(2).isImm() && 3857 isShiftedUInt<6,2>(MI->getOperand(2).getImm())) 3858 return HexagonII::HSIG_A; 3859 // Rx = add(Rx,#s7) 3860 if ((DstReg == SrcReg) && MI->getOperand(2).isImm() && 3861 isInt<7>(MI->getOperand(2).getImm())) 3862 return HexagonII::HSIG_A; 3863 // Rd = add(Rs,#1) 3864 // Rd = add(Rs,#-1) 3865 if (isIntRegForSubInst(SrcReg) && MI->getOperand(2).isImm() && 3866 ((MI->getOperand(2).getImm() == 1) || 3867 (MI->getOperand(2).getImm() == -1))) 3868 return HexagonII::HSIG_A; 3869 } 3870 break; 3871 case Hexagon::A2_add: 3872 // Rx = add(Rx,Rs) 3873 DstReg = MI->getOperand(0).getReg(); 3874 Src1Reg = MI->getOperand(1).getReg(); 3875 Src2Reg = MI->getOperand(2).getReg(); 3876 if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) && 3877 isIntRegForSubInst(Src2Reg)) 3878 return HexagonII::HSIG_A; 3879 break; 3880 case Hexagon::A2_andir: 3881 // Same as zxtb. 3882 // Rd16=and(Rs16,#255) 3883 // Rd16=and(Rs16,#1) 3884 DstReg = MI->getOperand(0).getReg(); 3885 SrcReg = MI->getOperand(1).getReg(); 3886 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) && 3887 MI->getOperand(2).isImm() && 3888 ((MI->getOperand(2).getImm() == 1) || 3889 (MI->getOperand(2).getImm() == 255))) 3890 return HexagonII::HSIG_A; 3891 break; 3892 case Hexagon::A2_tfr: 3893 // Rd = Rs 3894 DstReg = MI->getOperand(0).getReg(); 3895 SrcReg = MI->getOperand(1).getReg(); 3896 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg)) 3897 return HexagonII::HSIG_A; 3898 break; 3899 case Hexagon::A2_tfrsi: 3900 // Rd = #u6 3901 // Do not test for #u6 size since the const is getting extended 3902 // regardless and compound could be formed. 3903 // Rd = #-1 3904 DstReg = MI->getOperand(0).getReg(); 3905 if (isIntRegForSubInst(DstReg)) 3906 return HexagonII::HSIG_A; 3907 break; 3908 case Hexagon::C2_cmoveit: 3909 case Hexagon::C2_cmovenewit: 3910 case Hexagon::C2_cmoveif: 3911 case Hexagon::C2_cmovenewif: 3912 // if ([!]P0[.new]) Rd = #0 3913 // Actual form: 3914 // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>; 3915 DstReg = MI->getOperand(0).getReg(); 3916 SrcReg = MI->getOperand(1).getReg(); 3917 if (isIntRegForSubInst(DstReg) && 3918 Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg && 3919 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) 3920 return HexagonII::HSIG_A; 3921 break; 3922 case Hexagon::C2_cmpeqi: 3923 // P0 = cmp.eq(Rs,#u2) 3924 DstReg = MI->getOperand(0).getReg(); 3925 SrcReg = MI->getOperand(1).getReg(); 3926 if (Hexagon::PredRegsRegClass.contains(DstReg) && 3927 Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) && 3928 MI->getOperand(2).isImm() && isUInt<2>(MI->getOperand(2).getImm())) 3929 return HexagonII::HSIG_A; 3930 break; 3931 case Hexagon::A2_combineii: 3932 case Hexagon::A4_combineii: 3933 // Rdd = combine(#u2,#U2) 3934 DstReg = MI->getOperand(0).getReg(); 3935 if (isDblRegForSubInst(DstReg, HRI) && 3936 ((MI->getOperand(1).isImm() && isUInt<2>(MI->getOperand(1).getImm())) || 3937 (MI->getOperand(1).isGlobal() && 3938 isUInt<2>(MI->getOperand(1).getOffset()))) && 3939 ((MI->getOperand(2).isImm() && isUInt<2>(MI->getOperand(2).getImm())) || 3940 (MI->getOperand(2).isGlobal() && 3941 isUInt<2>(MI->getOperand(2).getOffset())))) 3942 return HexagonII::HSIG_A; 3943 break; 3944 case Hexagon::A4_combineri: 3945 // Rdd = combine(Rs,#0) 3946 DstReg = MI->getOperand(0).getReg(); 3947 SrcReg = MI->getOperand(1).getReg(); 3948 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) && 3949 ((MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) || 3950 (MI->getOperand(2).isGlobal() && MI->getOperand(2).getOffset() == 0))) 3951 return HexagonII::HSIG_A; 3952 break; 3953 case Hexagon::A4_combineir: 3954 // Rdd = combine(#0,Rs) 3955 DstReg = MI->getOperand(0).getReg(); 3956 SrcReg = MI->getOperand(2).getReg(); 3957 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) && 3958 ((MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) || 3959 (MI->getOperand(1).isGlobal() && MI->getOperand(1).getOffset() == 0))) 3960 return HexagonII::HSIG_A; 3961 break; 3962 case Hexagon::A2_sxtb: 3963 case Hexagon::A2_sxth: 3964 case Hexagon::A2_zxtb: 3965 case Hexagon::A2_zxth: 3966 // Rd = sxth/sxtb/zxtb/zxth(Rs) 3967 DstReg = MI->getOperand(0).getReg(); 3968 SrcReg = MI->getOperand(1).getReg(); 3969 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg)) 3970 return HexagonII::HSIG_A; 3971 break; 3972 } 3973 3974 return HexagonII::HSIG_None; 3975 } 3976 3977 3978 short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr *MI) const { 3979 return Hexagon::getRealHWInstr(MI->getOpcode(), Hexagon::InstrType_Real); 3980 } 3981 3982 3983 // Return first non-debug instruction in the basic block. 3984 MachineInstr *HexagonInstrInfo::getFirstNonDbgInst(MachineBasicBlock *BB) 3985 const { 3986 for (auto MII = BB->instr_begin(), End = BB->instr_end(); MII != End; MII++) { 3987 MachineInstr *MI = &*MII; 3988 if (MI->isDebugValue()) 3989 continue; 3990 return MI; 3991 } 3992 return nullptr; 3993 } 3994 3995 3996 unsigned HexagonInstrInfo::getInstrTimingClassLatency( 3997 const InstrItineraryData *ItinData, const MachineInstr *MI) const { 3998 // Default to one cycle for no itinerary. However, an "empty" itinerary may 3999 // still have a MinLatency property, which getStageLatency checks. 4000 if (!ItinData) 4001 return getInstrLatency(ItinData, *MI); 4002 4003 // Get the latency embedded in the itinerary. If we're not using timing class 4004 // latencies or if we using BSB scheduling, then restrict the maximum latency 4005 // to 1 (that is, either 0 or 1). 4006 if (MI->isTransient()) 4007 return 0; 4008 unsigned Latency = ItinData->getStageLatency(MI->getDesc().getSchedClass()); 4009 if (!EnableTimingClassLatency || 4010 MI->getParent()->getParent()->getSubtarget<HexagonSubtarget>(). 4011 useBSBScheduling()) 4012 if (Latency > 1) 4013 Latency = 1; 4014 return Latency; 4015 } 4016 4017 4018 // inverts the predication logic. 4019 // p -> NotP 4020 // NotP -> P 4021 bool HexagonInstrInfo::getInvertedPredSense( 4022 SmallVectorImpl<MachineOperand> &Cond) const { 4023 if (Cond.empty()) 4024 return false; 4025 unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm()); 4026 Cond[0].setImm(Opc); 4027 return true; 4028 } 4029 4030 4031 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const { 4032 int InvPredOpcode; 4033 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc) 4034 : Hexagon::getTruePredOpcode(Opc); 4035 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate. 4036 return InvPredOpcode; 4037 4038 llvm_unreachable("Unexpected predicated instruction"); 4039 } 4040 4041 4042 // Returns the max value that doesn't need to be extended. 4043 int HexagonInstrInfo::getMaxValue(const MachineInstr *MI) const { 4044 const uint64_t F = MI->getDesc().TSFlags; 4045 unsigned isSigned = (F >> HexagonII::ExtentSignedPos) 4046 & HexagonII::ExtentSignedMask; 4047 unsigned bits = (F >> HexagonII::ExtentBitsPos) 4048 & HexagonII::ExtentBitsMask; 4049 4050 if (isSigned) // if value is signed 4051 return ~(-1U << (bits - 1)); 4052 else 4053 return ~(-1U << bits); 4054 } 4055 4056 4057 unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr* MI) const { 4058 const uint64_t F = MI->getDesc().TSFlags; 4059 return (F >> HexagonII::MemAccessSizePos) & HexagonII::MemAccesSizeMask; 4060 } 4061 4062 4063 // Returns the min value that doesn't need to be extended. 4064 int HexagonInstrInfo::getMinValue(const MachineInstr *MI) const { 4065 const uint64_t F = MI->getDesc().TSFlags; 4066 unsigned isSigned = (F >> HexagonII::ExtentSignedPos) 4067 & HexagonII::ExtentSignedMask; 4068 unsigned bits = (F >> HexagonII::ExtentBitsPos) 4069 & HexagonII::ExtentBitsMask; 4070 4071 if (isSigned) // if value is signed 4072 return -1U << (bits - 1); 4073 else 4074 return 0; 4075 } 4076 4077 4078 // Returns opcode of the non-extended equivalent instruction. 4079 short HexagonInstrInfo::getNonExtOpcode(const MachineInstr *MI) const { 4080 // Check if the instruction has a register form that uses register in place 4081 // of the extended operand, if so return that as the non-extended form. 4082 short NonExtOpcode = Hexagon::getRegForm(MI->getOpcode()); 4083 if (NonExtOpcode >= 0) 4084 return NonExtOpcode; 4085 4086 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) { 4087 // Check addressing mode and retrieve non-ext equivalent instruction. 4088 switch (getAddrMode(MI)) { 4089 case HexagonII::Absolute : 4090 return Hexagon::getBaseWithImmOffset(MI->getOpcode()); 4091 case HexagonII::BaseImmOffset : 4092 return Hexagon::getBaseWithRegOffset(MI->getOpcode()); 4093 case HexagonII::BaseLongOffset: 4094 return Hexagon::getRegShlForm(MI->getOpcode()); 4095 4096 default: 4097 return -1; 4098 } 4099 } 4100 return -1; 4101 } 4102 4103 4104 bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond, 4105 unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const { 4106 if (Cond.empty()) 4107 return false; 4108 assert(Cond.size() == 2); 4109 if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) { 4110 DEBUG(dbgs() << "No predregs for new-value jumps/endloop"); 4111 return false; 4112 } 4113 PredReg = Cond[1].getReg(); 4114 PredRegPos = 1; 4115 // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef 4116 PredRegFlags = 0; 4117 if (Cond[1].isImplicit()) 4118 PredRegFlags = RegState::Implicit; 4119 if (Cond[1].isUndef()) 4120 PredRegFlags |= RegState::Undef; 4121 return true; 4122 } 4123 4124 4125 short HexagonInstrInfo::getPseudoInstrPair(const MachineInstr *MI) const { 4126 return Hexagon::getRealHWInstr(MI->getOpcode(), Hexagon::InstrType_Pseudo); 4127 } 4128 4129 4130 short HexagonInstrInfo::getRegForm(const MachineInstr *MI) const { 4131 return Hexagon::getRegForm(MI->getOpcode()); 4132 } 4133 4134 4135 // Return the number of bytes required to encode the instruction. 4136 // Hexagon instructions are fixed length, 4 bytes, unless they 4137 // use a constant extender, which requires another 4 bytes. 4138 // For debug instructions and prolog labels, return 0. 4139 unsigned HexagonInstrInfo::getSize(const MachineInstr *MI) const { 4140 if (MI->isDebugValue() || MI->isPosition()) 4141 return 0; 4142 4143 unsigned Size = MI->getDesc().getSize(); 4144 if (!Size) 4145 // Assume the default insn size in case it cannot be determined 4146 // for whatever reason. 4147 Size = HEXAGON_INSTR_SIZE; 4148 4149 if (isConstExtended(MI) || isExtended(MI)) 4150 Size += HEXAGON_INSTR_SIZE; 4151 4152 // Try and compute number of instructions in asm. 4153 if (BranchRelaxAsmLarge && MI->getOpcode() == Hexagon::INLINEASM) { 4154 const MachineBasicBlock &MBB = *MI->getParent(); 4155 const MachineFunction *MF = MBB.getParent(); 4156 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 4157 4158 // Count the number of register definitions to find the asm string. 4159 unsigned NumDefs = 0; 4160 for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef(); 4161 ++NumDefs) 4162 assert(NumDefs != MI->getNumOperands()-2 && "No asm string?"); 4163 4164 assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?"); 4165 // Disassemble the AsmStr and approximate number of instructions. 4166 const char *AsmStr = MI->getOperand(NumDefs).getSymbolName(); 4167 Size = getInlineAsmLength(AsmStr, *MAI); 4168 } 4169 4170 return Size; 4171 } 4172 4173 4174 uint64_t HexagonInstrInfo::getType(const MachineInstr* MI) const { 4175 const uint64_t F = MI->getDesc().TSFlags; 4176 return (F >> HexagonII::TypePos) & HexagonII::TypeMask; 4177 } 4178 4179 4180 unsigned HexagonInstrInfo::getUnits(const MachineInstr* MI) const { 4181 const TargetSubtargetInfo &ST = MI->getParent()->getParent()->getSubtarget(); 4182 const InstrItineraryData &II = *ST.getInstrItineraryData(); 4183 const InstrStage &IS = *II.beginStage(MI->getDesc().getSchedClass()); 4184 4185 return IS.getUnits(); 4186 } 4187 4188 4189 unsigned HexagonInstrInfo::getValidSubTargets(const unsigned Opcode) const { 4190 const uint64_t F = get(Opcode).TSFlags; 4191 return (F >> HexagonII::validSubTargetPos) & HexagonII::validSubTargetMask; 4192 } 4193 4194 4195 // Calculate size of the basic block without debug instructions. 4196 unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const { 4197 return nonDbgMICount(BB->instr_begin(), BB->instr_end()); 4198 } 4199 4200 4201 unsigned HexagonInstrInfo::nonDbgBundleSize( 4202 MachineBasicBlock::const_iterator BundleHead) const { 4203 assert(BundleHead->isBundle() && "Not a bundle header"); 4204 auto MII = BundleHead.getInstrIterator(); 4205 // Skip the bundle header. 4206 return nonDbgMICount(++MII, getBundleEnd(*BundleHead)); 4207 } 4208 4209 4210 /// immediateExtend - Changes the instruction in place to one using an immediate 4211 /// extender. 4212 void HexagonInstrInfo::immediateExtend(MachineInstr *MI) const { 4213 assert((isExtendable(MI)||isConstExtended(MI)) && 4214 "Instruction must be extendable"); 4215 // Find which operand is extendable. 4216 short ExtOpNum = getCExtOpNum(MI); 4217 MachineOperand &MO = MI->getOperand(ExtOpNum); 4218 // This needs to be something we understand. 4219 assert((MO.isMBB() || MO.isImm()) && 4220 "Branch with unknown extendable field type"); 4221 // Mark given operand as extended. 4222 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended); 4223 } 4224 4225 4226 bool HexagonInstrInfo::invertAndChangeJumpTarget( 4227 MachineInstr* MI, MachineBasicBlock* NewTarget) const { 4228 DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#" 4229 << NewTarget->getNumber(); MI->dump();); 4230 assert(MI->isBranch()); 4231 unsigned NewOpcode = getInvertedPredicatedOpcode(MI->getOpcode()); 4232 int TargetPos = MI->getNumOperands() - 1; 4233 // In general branch target is the last operand, 4234 // but some implicit defs added at the end might change it. 4235 while ((TargetPos > -1) && !MI->getOperand(TargetPos).isMBB()) 4236 --TargetPos; 4237 assert((TargetPos >= 0) && MI->getOperand(TargetPos).isMBB()); 4238 MI->getOperand(TargetPos).setMBB(NewTarget); 4239 if (EnableBranchPrediction && isPredicatedNew(*MI)) { 4240 NewOpcode = reversePrediction(NewOpcode); 4241 } 4242 MI->setDesc(get(NewOpcode)); 4243 return true; 4244 } 4245 4246 4247 void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const { 4248 /* +++ The code below is used to generate complete set of Hexagon Insn +++ */ 4249 MachineFunction::iterator A = MF.begin(); 4250 MachineBasicBlock &B = *A; 4251 MachineBasicBlock::iterator I = B.begin(); 4252 MachineInstr *MI = &*I; 4253 DebugLoc DL = MI->getDebugLoc(); 4254 MachineInstr *NewMI; 4255 4256 for (unsigned insn = TargetOpcode::GENERIC_OP_END+1; 4257 insn < Hexagon::INSTRUCTION_LIST_END; ++insn) { 4258 NewMI = BuildMI(B, MI, DL, get(insn)); 4259 DEBUG(dbgs() << "\n" << getName(NewMI->getOpcode()) << 4260 " Class: " << NewMI->getDesc().getSchedClass()); 4261 NewMI->eraseFromParent(); 4262 } 4263 /* --- The code above is used to generate complete set of Hexagon Insn --- */ 4264 } 4265 4266 4267 // inverts the predication logic. 4268 // p -> NotP 4269 // NotP -> P 4270 bool HexagonInstrInfo::reversePredSense(MachineInstr* MI) const { 4271 DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI->dump()); 4272 MI->setDesc(get(getInvertedPredicatedOpcode(MI->getOpcode()))); 4273 return true; 4274 } 4275 4276 4277 // Reverse the branch prediction. 4278 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const { 4279 int PredRevOpcode = -1; 4280 if (isPredictedTaken(Opcode)) 4281 PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode); 4282 else 4283 PredRevOpcode = Hexagon::takenBranchPrediction(Opcode); 4284 assert(PredRevOpcode > 0); 4285 return PredRevOpcode; 4286 } 4287 4288 4289 // TODO: Add more rigorous validation. 4290 bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond) 4291 const { 4292 return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1)); 4293 } 4294 4295 4296 short HexagonInstrInfo::xformRegToImmOffset(const MachineInstr *MI) const { 4297 return Hexagon::xformRegToImmOffset(MI->getOpcode()); 4298 } 4299