1 //===--- HexagonBitSimplify.cpp -------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define DEBUG_TYPE "hexbit" 11 12 #include "llvm/CodeGen/Passes.h" 13 #include "llvm/CodeGen/MachineDominators.h" 14 #include "llvm/CodeGen/MachineFunctionPass.h" 15 #include "llvm/CodeGen/MachineInstrBuilder.h" 16 #include "llvm/CodeGen/MachineRegisterInfo.h" 17 #include "llvm/Support/CommandLine.h" 18 #include "llvm/Support/Debug.h" 19 #include "llvm/Support/raw_ostream.h" 20 #include "llvm/Target/TargetMachine.h" 21 #include "llvm/Target/TargetInstrInfo.h" 22 #include "HexagonTargetMachine.h" 23 #include "HexagonBitTracker.h" 24 25 using namespace llvm; 26 27 namespace llvm { 28 void initializeHexagonBitSimplifyPass(PassRegistry& Registry); 29 FunctionPass *createHexagonBitSimplify(); 30 } 31 32 namespace { 33 // Set of virtual registers, based on BitVector. 34 struct RegisterSet : private BitVector { 35 RegisterSet() : BitVector() {} 36 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} 37 RegisterSet(const RegisterSet &RS) : BitVector(RS) {} 38 39 using BitVector::clear; 40 using BitVector::count; 41 42 unsigned find_first() const { 43 int First = BitVector::find_first(); 44 if (First < 0) 45 return 0; 46 return x2v(First); 47 } 48 49 unsigned find_next(unsigned Prev) const { 50 int Next = BitVector::find_next(v2x(Prev)); 51 if (Next < 0) 52 return 0; 53 return x2v(Next); 54 } 55 56 RegisterSet &insert(unsigned R) { 57 unsigned Idx = v2x(R); 58 ensure(Idx); 59 return static_cast<RegisterSet&>(BitVector::set(Idx)); 60 } 61 RegisterSet &remove(unsigned R) { 62 unsigned Idx = v2x(R); 63 if (Idx >= size()) 64 return *this; 65 return static_cast<RegisterSet&>(BitVector::reset(Idx)); 66 } 67 68 RegisterSet &insert(const RegisterSet &Rs) { 69 return static_cast<RegisterSet&>(BitVector::operator|=(Rs)); 70 } 71 RegisterSet &remove(const RegisterSet &Rs) { 72 return static_cast<RegisterSet&>(BitVector::reset(Rs)); 73 } 74 75 reference operator[](unsigned R) { 76 unsigned Idx = v2x(R); 77 ensure(Idx); 78 return BitVector::operator[](Idx); 79 } 80 bool operator[](unsigned R) const { 81 unsigned Idx = v2x(R); 82 assert(Idx < size()); 83 return BitVector::operator[](Idx); 84 } 85 bool has(unsigned R) const { 86 unsigned Idx = v2x(R); 87 if (Idx >= size()) 88 return false; 89 return BitVector::test(Idx); 90 } 91 92 bool empty() const { 93 return !BitVector::any(); 94 } 95 bool includes(const RegisterSet &Rs) const { 96 // A.BitVector::test(B) <=> A-B != {} 97 return !Rs.BitVector::test(*this); 98 } 99 bool intersects(const RegisterSet &Rs) const { 100 return BitVector::anyCommon(Rs); 101 } 102 103 private: 104 void ensure(unsigned Idx) { 105 if (size() <= Idx) 106 resize(std::max(Idx+1, 32U)); 107 } 108 static inline unsigned v2x(unsigned v) { 109 return TargetRegisterInfo::virtReg2Index(v); 110 } 111 static inline unsigned x2v(unsigned x) { 112 return TargetRegisterInfo::index2VirtReg(x); 113 } 114 }; 115 116 117 struct PrintRegSet { 118 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) 119 : RS(S), TRI(RI) {} 120 friend raw_ostream &operator<< (raw_ostream &OS, 121 const PrintRegSet &P); 122 private: 123 const RegisterSet &RS; 124 const TargetRegisterInfo *TRI; 125 }; 126 127 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) 128 LLVM_ATTRIBUTE_UNUSED; 129 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { 130 OS << '{'; 131 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R)) 132 OS << ' ' << PrintReg(R, P.TRI); 133 OS << " }"; 134 return OS; 135 } 136 } 137 138 139 namespace { 140 class Transformation; 141 142 class HexagonBitSimplify : public MachineFunctionPass { 143 public: 144 static char ID; 145 HexagonBitSimplify() : MachineFunctionPass(ID), MDT(0) { 146 initializeHexagonBitSimplifyPass(*PassRegistry::getPassRegistry()); 147 } 148 virtual const char *getPassName() const { 149 return "Hexagon bit simplification"; 150 } 151 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 152 AU.addRequired<MachineDominatorTree>(); 153 AU.addPreserved<MachineDominatorTree>(); 154 MachineFunctionPass::getAnalysisUsage(AU); 155 } 156 virtual bool runOnMachineFunction(MachineFunction &MF); 157 158 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs); 159 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses); 160 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1, 161 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W); 162 static bool isConst(const BitTracker::RegisterCell &RC, uint16_t B, 163 uint16_t W); 164 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B, 165 uint16_t W); 166 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B, 167 uint16_t W, uint64_t &U); 168 static bool replaceReg(unsigned OldR, unsigned NewR, 169 MachineRegisterInfo &MRI); 170 static bool getSubregMask(const BitTracker::RegisterRef &RR, 171 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI); 172 static bool replaceRegWithSub(unsigned OldR, unsigned NewR, 173 unsigned NewSR, MachineRegisterInfo &MRI); 174 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR, 175 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI); 176 static bool parseRegSequence(const MachineInstr &I, 177 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH); 178 179 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits, 180 uint16_t Begin); 181 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits, 182 uint16_t Begin, const HexagonInstrInfo &HII); 183 184 static const TargetRegisterClass *getFinalVRegClass( 185 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI); 186 static bool isTransparentCopy(const BitTracker::RegisterRef &RD, 187 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI); 188 189 private: 190 MachineDominatorTree *MDT; 191 192 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs); 193 }; 194 195 char HexagonBitSimplify::ID = 0; 196 typedef HexagonBitSimplify HBS; 197 198 199 // The purpose of this class is to provide a common facility to traverse 200 // the function top-down or bottom-up via the dominator tree, and keep 201 // track of the available registers. 202 class Transformation { 203 public: 204 bool TopDown; 205 Transformation(bool TD) : TopDown(TD) {} 206 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0; 207 virtual ~Transformation() {} 208 }; 209 } 210 211 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexbit", 212 "Hexagon bit simplification", false, false) 213 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 214 INITIALIZE_PASS_END(HexagonBitSimplify, "hexbit", 215 "Hexagon bit simplification", false, false) 216 217 218 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T, 219 RegisterSet &AVs) { 220 MachineDomTreeNode *N = MDT->getNode(&B); 221 typedef GraphTraits<MachineDomTreeNode*> GTN; 222 bool Changed = false; 223 224 if (T.TopDown) 225 Changed = T.processBlock(B, AVs); 226 227 RegisterSet Defs; 228 for (auto &I : B) 229 getInstrDefs(I, Defs); 230 RegisterSet NewAVs = AVs; 231 NewAVs.insert(Defs); 232 233 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) { 234 MachineBasicBlock *SB = (*I)->getBlock(); 235 Changed |= visitBlock(*SB, T, NewAVs); 236 } 237 if (!T.TopDown) 238 Changed |= T.processBlock(B, AVs); 239 240 return Changed; 241 } 242 243 // 244 // Utility functions: 245 // 246 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI, 247 RegisterSet &Defs) { 248 for (auto &Op : MI.operands()) { 249 if (!Op.isReg() || !Op.isDef()) 250 continue; 251 unsigned R = Op.getReg(); 252 if (!TargetRegisterInfo::isVirtualRegister(R)) 253 continue; 254 Defs.insert(R); 255 } 256 } 257 258 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI, 259 RegisterSet &Uses) { 260 for (auto &Op : MI.operands()) { 261 if (!Op.isReg() || !Op.isUse()) 262 continue; 263 unsigned R = Op.getReg(); 264 if (!TargetRegisterInfo::isVirtualRegister(R)) 265 continue; 266 Uses.insert(R); 267 } 268 } 269 270 // Check if all the bits in range [B, E) in both cells are equal. 271 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1, 272 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2, 273 uint16_t W) { 274 for (uint16_t i = 0; i < W; ++i) { 275 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i]. 276 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0) 277 return false; 278 // Same for RC2[i]. 279 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0) 280 return false; 281 if (RC1[B1+i] != RC2[B2+i]) 282 return false; 283 } 284 return true; 285 } 286 287 288 bool HexagonBitSimplify::isConst(const BitTracker::RegisterCell &RC, 289 uint16_t B, uint16_t W) { 290 assert(B < RC.width() && B+W <= RC.width()); 291 for (uint16_t i = B; i < B+W; ++i) 292 if (!RC[i].num()) 293 return false; 294 return true; 295 } 296 297 298 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC, 299 uint16_t B, uint16_t W) { 300 assert(B < RC.width() && B+W <= RC.width()); 301 for (uint16_t i = B; i < B+W; ++i) 302 if (!RC[i].is(0)) 303 return false; 304 return true; 305 } 306 307 308 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC, 309 uint16_t B, uint16_t W, uint64_t &U) { 310 assert(B < RC.width() && B+W <= RC.width()); 311 int64_t T = 0; 312 for (uint16_t i = B+W; i > B; --i) { 313 const BitTracker::BitValue &BV = RC[i-1]; 314 T <<= 1; 315 if (BV.is(1)) 316 T |= 1; 317 else if (!BV.is(0)) 318 return false; 319 } 320 U = T; 321 return true; 322 } 323 324 325 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR, 326 MachineRegisterInfo &MRI) { 327 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 328 !TargetRegisterInfo::isVirtualRegister(NewR)) 329 return false; 330 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 331 decltype(End) NextI; 332 for (auto I = Begin; I != End; I = NextI) { 333 NextI = std::next(I); 334 I->setReg(NewR); 335 } 336 return Begin != End; 337 } 338 339 340 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR, 341 unsigned NewSR, MachineRegisterInfo &MRI) { 342 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 343 !TargetRegisterInfo::isVirtualRegister(NewR)) 344 return false; 345 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 346 decltype(End) NextI; 347 for (auto I = Begin; I != End; I = NextI) { 348 NextI = std::next(I); 349 I->setReg(NewR); 350 I->setSubReg(NewSR); 351 } 352 return Begin != End; 353 } 354 355 356 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR, 357 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) { 358 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 359 !TargetRegisterInfo::isVirtualRegister(NewR)) 360 return false; 361 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 362 decltype(End) NextI; 363 for (auto I = Begin; I != End; I = NextI) { 364 NextI = std::next(I); 365 if (I->getSubReg() != OldSR) 366 continue; 367 I->setReg(NewR); 368 I->setSubReg(NewSR); 369 } 370 return Begin != End; 371 } 372 373 374 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB 375 // of Sub in Reg, and set Width to the size of Sub in bits. Return true, 376 // if this succeeded, otherwise return false. 377 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR, 378 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) { 379 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg); 380 if (RC == &Hexagon::IntRegsRegClass) { 381 assert(RR.Sub == 0); 382 Begin = 0; 383 Width = 32; 384 return true; 385 } 386 if (RC == &Hexagon::DoubleRegsRegClass) { 387 if (RR.Sub == 0) { 388 Begin = 0; 389 Width = 64; 390 return true; 391 } 392 assert(RR.Sub == Hexagon::subreg_loreg || RR.Sub == Hexagon::subreg_hireg); 393 Width = 32; 394 Begin = (RR.Sub == Hexagon::subreg_loreg ? 0 : 32); 395 return true; 396 } 397 return false; 398 } 399 400 401 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high 402 // subregister. 403 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I, 404 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH) { 405 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE); 406 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm(); 407 assert(Sub1 != Sub2); 408 if (Sub1 == Hexagon::subreg_loreg && Sub2 == Hexagon::subreg_hireg) { 409 SL = I.getOperand(1); 410 SH = I.getOperand(3); 411 return true; 412 } 413 if (Sub1 == Hexagon::subreg_hireg && Sub2 == Hexagon::subreg_loreg) { 414 SH = I.getOperand(1); 415 SL = I.getOperand(3); 416 return true; 417 } 418 return false; 419 } 420 421 422 // All stores (except 64-bit stores) take a 32-bit register as the source 423 // of the value to be stored. If the instruction stores into a location 424 // that is shorter than 32 bits, some bits of the source register are not 425 // used. For each store instruction, calculate the set of used bits in 426 // the source register, and set appropriate bits in Bits. Return true if 427 // the bits are calculated, false otherwise. 428 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits, 429 uint16_t Begin) { 430 using namespace Hexagon; 431 432 switch (Opc) { 433 // Store byte 434 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32 435 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new 436 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32 437 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32 438 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32 439 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32 440 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new 441 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new 442 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new 443 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new 444 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32 445 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new 446 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32 447 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32 448 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32 449 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32 450 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new 451 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new 452 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new 453 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new 454 case S4_storerb_ap: // memb(Re32=#U6)=Rt32 455 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new 456 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32 457 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new 458 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32 459 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new 460 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32 461 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new 462 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32 463 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new 464 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32 465 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new 466 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32 467 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new 468 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32 469 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32 470 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 471 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 472 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 473 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 474 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 475 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 476 case S2_storerbgp: // memb(gp+#u16:0)=Rt32 477 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new 478 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32 479 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32 480 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32 481 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32 482 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new 483 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new 484 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new 485 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new 486 Bits.set(Begin, Begin+8); 487 return true; 488 489 // Store low half 490 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32 491 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new 492 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32 493 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32 494 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32 495 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32 496 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new 497 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new 498 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new 499 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new 500 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32 501 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new 502 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32 503 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32 504 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32 505 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32 506 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new 507 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new 508 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new 509 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new 510 case S4_storerh_ap: // memh(Re32=#U6)=Rt32 511 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new 512 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32 513 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new 514 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32 515 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new 516 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32 517 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new 518 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32 519 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new 520 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32 521 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new 522 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32 523 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32 524 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32 525 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 526 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 527 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new 528 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 529 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 530 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 531 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 532 case S2_storerhgp: // memh(gp+#u16:1)=Rt32 533 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new 534 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32 535 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32 536 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32 537 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32 538 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new 539 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new 540 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new 541 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new 542 Bits.set(Begin, Begin+16); 543 return true; 544 545 // Store high half 546 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32 547 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32 548 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32 549 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32 550 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32 551 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32 552 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32 553 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32 554 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32 555 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32 556 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32 557 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32 558 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32 559 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32 560 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32 561 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32 562 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32 563 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 564 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 565 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 566 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 567 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32 568 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32 569 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32 570 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32 571 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32 572 Bits.set(Begin+16, Begin+32); 573 return true; 574 } 575 576 return false; 577 } 578 579 580 // For an instruction with opcode Opc, calculate the set of bits that it 581 // uses in a register in operand OpN. This only calculates the set of used 582 // bits for cases where it does not depend on any operands (as is the case 583 // in shifts, for example). For concrete instructions from a program, the 584 // operand may be a subregister of a larger register, while Bits would 585 // correspond to the larger register in its entirety. Because of that, 586 // the parameter Begin can be used to indicate which bit of Bits should be 587 // considered the LSB of of the operand. 588 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN, 589 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) { 590 using namespace Hexagon; 591 592 const MCInstrDesc &D = HII.get(Opc); 593 if (D.mayStore()) { 594 if (OpN == D.getNumOperands()-1) 595 return getUsedBitsInStore(Opc, Bits, Begin); 596 return false; 597 } 598 599 switch (Opc) { 600 // One register source. Used bits: R1[0-7]. 601 case A2_sxtb: 602 case A2_zxtb: 603 case A4_cmpbeqi: 604 case A4_cmpbgti: 605 case A4_cmpbgtui: 606 if (OpN == 1) { 607 Bits.set(Begin, Begin+8); 608 return true; 609 } 610 break; 611 612 // One register source. Used bits: R1[0-15]. 613 case A2_aslh: 614 case A2_sxth: 615 case A2_zxth: 616 case A4_cmpheqi: 617 case A4_cmphgti: 618 case A4_cmphgtui: 619 if (OpN == 1) { 620 Bits.set(Begin, Begin+16); 621 return true; 622 } 623 break; 624 625 // One register source. Used bits: R1[16-31]. 626 case A2_asrh: 627 if (OpN == 1) { 628 Bits.set(Begin+16, Begin+32); 629 return true; 630 } 631 break; 632 633 // Two register sources. Used bits: R1[0-7], R2[0-7]. 634 case A4_cmpbeq: 635 case A4_cmpbgt: 636 case A4_cmpbgtu: 637 if (OpN == 1) { 638 Bits.set(Begin, Begin+8); 639 return true; 640 } 641 break; 642 643 // Two register sources. Used bits: R1[0-15], R2[0-15]. 644 case A4_cmpheq: 645 case A4_cmphgt: 646 case A4_cmphgtu: 647 case A2_addh_h16_ll: 648 case A2_addh_h16_sat_ll: 649 case A2_addh_l16_ll: 650 case A2_addh_l16_sat_ll: 651 case A2_combine_ll: 652 case A2_subh_h16_ll: 653 case A2_subh_h16_sat_ll: 654 case A2_subh_l16_ll: 655 case A2_subh_l16_sat_ll: 656 case M2_mpy_acc_ll_s0: 657 case M2_mpy_acc_ll_s1: 658 case M2_mpy_acc_sat_ll_s0: 659 case M2_mpy_acc_sat_ll_s1: 660 case M2_mpy_ll_s0: 661 case M2_mpy_ll_s1: 662 case M2_mpy_nac_ll_s0: 663 case M2_mpy_nac_ll_s1: 664 case M2_mpy_nac_sat_ll_s0: 665 case M2_mpy_nac_sat_ll_s1: 666 case M2_mpy_rnd_ll_s0: 667 case M2_mpy_rnd_ll_s1: 668 case M2_mpy_sat_ll_s0: 669 case M2_mpy_sat_ll_s1: 670 case M2_mpy_sat_rnd_ll_s0: 671 case M2_mpy_sat_rnd_ll_s1: 672 case M2_mpyd_acc_ll_s0: 673 case M2_mpyd_acc_ll_s1: 674 case M2_mpyd_ll_s0: 675 case M2_mpyd_ll_s1: 676 case M2_mpyd_nac_ll_s0: 677 case M2_mpyd_nac_ll_s1: 678 case M2_mpyd_rnd_ll_s0: 679 case M2_mpyd_rnd_ll_s1: 680 case M2_mpyu_acc_ll_s0: 681 case M2_mpyu_acc_ll_s1: 682 case M2_mpyu_ll_s0: 683 case M2_mpyu_ll_s1: 684 case M2_mpyu_nac_ll_s0: 685 case M2_mpyu_nac_ll_s1: 686 case M2_mpyud_acc_ll_s0: 687 case M2_mpyud_acc_ll_s1: 688 case M2_mpyud_ll_s0: 689 case M2_mpyud_ll_s1: 690 case M2_mpyud_nac_ll_s0: 691 case M2_mpyud_nac_ll_s1: 692 if (OpN == 1 || OpN == 2) { 693 Bits.set(Begin, Begin+16); 694 return true; 695 } 696 break; 697 698 // Two register sources. Used bits: R1[0-15], R2[16-31]. 699 case A2_addh_h16_lh: 700 case A2_addh_h16_sat_lh: 701 case A2_combine_lh: 702 case A2_subh_h16_lh: 703 case A2_subh_h16_sat_lh: 704 case M2_mpy_acc_lh_s0: 705 case M2_mpy_acc_lh_s1: 706 case M2_mpy_acc_sat_lh_s0: 707 case M2_mpy_acc_sat_lh_s1: 708 case M2_mpy_lh_s0: 709 case M2_mpy_lh_s1: 710 case M2_mpy_nac_lh_s0: 711 case M2_mpy_nac_lh_s1: 712 case M2_mpy_nac_sat_lh_s0: 713 case M2_mpy_nac_sat_lh_s1: 714 case M2_mpy_rnd_lh_s0: 715 case M2_mpy_rnd_lh_s1: 716 case M2_mpy_sat_lh_s0: 717 case M2_mpy_sat_lh_s1: 718 case M2_mpy_sat_rnd_lh_s0: 719 case M2_mpy_sat_rnd_lh_s1: 720 case M2_mpyd_acc_lh_s0: 721 case M2_mpyd_acc_lh_s1: 722 case M2_mpyd_lh_s0: 723 case M2_mpyd_lh_s1: 724 case M2_mpyd_nac_lh_s0: 725 case M2_mpyd_nac_lh_s1: 726 case M2_mpyd_rnd_lh_s0: 727 case M2_mpyd_rnd_lh_s1: 728 case M2_mpyu_acc_lh_s0: 729 case M2_mpyu_acc_lh_s1: 730 case M2_mpyu_lh_s0: 731 case M2_mpyu_lh_s1: 732 case M2_mpyu_nac_lh_s0: 733 case M2_mpyu_nac_lh_s1: 734 case M2_mpyud_acc_lh_s0: 735 case M2_mpyud_acc_lh_s1: 736 case M2_mpyud_lh_s0: 737 case M2_mpyud_lh_s1: 738 case M2_mpyud_nac_lh_s0: 739 case M2_mpyud_nac_lh_s1: 740 // These four are actually LH. 741 case A2_addh_l16_hl: 742 case A2_addh_l16_sat_hl: 743 case A2_subh_l16_hl: 744 case A2_subh_l16_sat_hl: 745 if (OpN == 1) { 746 Bits.set(Begin, Begin+16); 747 return true; 748 } 749 if (OpN == 2) { 750 Bits.set(Begin+16, Begin+32); 751 return true; 752 } 753 break; 754 755 // Two register sources, used bits: R1[16-31], R2[0-15]. 756 case A2_addh_h16_hl: 757 case A2_addh_h16_sat_hl: 758 case A2_combine_hl: 759 case A2_subh_h16_hl: 760 case A2_subh_h16_sat_hl: 761 case M2_mpy_acc_hl_s0: 762 case M2_mpy_acc_hl_s1: 763 case M2_mpy_acc_sat_hl_s0: 764 case M2_mpy_acc_sat_hl_s1: 765 case M2_mpy_hl_s0: 766 case M2_mpy_hl_s1: 767 case M2_mpy_nac_hl_s0: 768 case M2_mpy_nac_hl_s1: 769 case M2_mpy_nac_sat_hl_s0: 770 case M2_mpy_nac_sat_hl_s1: 771 case M2_mpy_rnd_hl_s0: 772 case M2_mpy_rnd_hl_s1: 773 case M2_mpy_sat_hl_s0: 774 case M2_mpy_sat_hl_s1: 775 case M2_mpy_sat_rnd_hl_s0: 776 case M2_mpy_sat_rnd_hl_s1: 777 case M2_mpyd_acc_hl_s0: 778 case M2_mpyd_acc_hl_s1: 779 case M2_mpyd_hl_s0: 780 case M2_mpyd_hl_s1: 781 case M2_mpyd_nac_hl_s0: 782 case M2_mpyd_nac_hl_s1: 783 case M2_mpyd_rnd_hl_s0: 784 case M2_mpyd_rnd_hl_s1: 785 case M2_mpyu_acc_hl_s0: 786 case M2_mpyu_acc_hl_s1: 787 case M2_mpyu_hl_s0: 788 case M2_mpyu_hl_s1: 789 case M2_mpyu_nac_hl_s0: 790 case M2_mpyu_nac_hl_s1: 791 case M2_mpyud_acc_hl_s0: 792 case M2_mpyud_acc_hl_s1: 793 case M2_mpyud_hl_s0: 794 case M2_mpyud_hl_s1: 795 case M2_mpyud_nac_hl_s0: 796 case M2_mpyud_nac_hl_s1: 797 if (OpN == 1) { 798 Bits.set(Begin+16, Begin+32); 799 return true; 800 } 801 if (OpN == 2) { 802 Bits.set(Begin, Begin+16); 803 return true; 804 } 805 break; 806 807 // Two register sources, used bits: R1[16-31], R2[16-31]. 808 case A2_addh_h16_hh: 809 case A2_addh_h16_sat_hh: 810 case A2_combine_hh: 811 case A2_subh_h16_hh: 812 case A2_subh_h16_sat_hh: 813 case M2_mpy_acc_hh_s0: 814 case M2_mpy_acc_hh_s1: 815 case M2_mpy_acc_sat_hh_s0: 816 case M2_mpy_acc_sat_hh_s1: 817 case M2_mpy_hh_s0: 818 case M2_mpy_hh_s1: 819 case M2_mpy_nac_hh_s0: 820 case M2_mpy_nac_hh_s1: 821 case M2_mpy_nac_sat_hh_s0: 822 case M2_mpy_nac_sat_hh_s1: 823 case M2_mpy_rnd_hh_s0: 824 case M2_mpy_rnd_hh_s1: 825 case M2_mpy_sat_hh_s0: 826 case M2_mpy_sat_hh_s1: 827 case M2_mpy_sat_rnd_hh_s0: 828 case M2_mpy_sat_rnd_hh_s1: 829 case M2_mpyd_acc_hh_s0: 830 case M2_mpyd_acc_hh_s1: 831 case M2_mpyd_hh_s0: 832 case M2_mpyd_hh_s1: 833 case M2_mpyd_nac_hh_s0: 834 case M2_mpyd_nac_hh_s1: 835 case M2_mpyd_rnd_hh_s0: 836 case M2_mpyd_rnd_hh_s1: 837 case M2_mpyu_acc_hh_s0: 838 case M2_mpyu_acc_hh_s1: 839 case M2_mpyu_hh_s0: 840 case M2_mpyu_hh_s1: 841 case M2_mpyu_nac_hh_s0: 842 case M2_mpyu_nac_hh_s1: 843 case M2_mpyud_acc_hh_s0: 844 case M2_mpyud_acc_hh_s1: 845 case M2_mpyud_hh_s0: 846 case M2_mpyud_hh_s1: 847 case M2_mpyud_nac_hh_s0: 848 case M2_mpyud_nac_hh_s1: 849 if (OpN == 1 || OpN == 2) { 850 Bits.set(Begin+16, Begin+32); 851 return true; 852 } 853 break; 854 } 855 856 return false; 857 } 858 859 860 // Calculate the register class that matches Reg:Sub. For example, if 861 // vreg1 is a double register, then vreg1:subreg_hireg would match "int" 862 // register class. 863 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass( 864 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) { 865 if (!TargetRegisterInfo::isVirtualRegister(RR.Reg)) 866 return nullptr; 867 auto *RC = MRI.getRegClass(RR.Reg); 868 if (RR.Sub == 0) 869 return RC; 870 871 auto VerifySR = [] (unsigned Sub) -> void { 872 assert(Sub == Hexagon::subreg_hireg || Sub == Hexagon::subreg_loreg); 873 }; 874 875 switch (RC->getID()) { 876 case Hexagon::DoubleRegsRegClassID: 877 VerifySR(RR.Sub); 878 return &Hexagon::IntRegsRegClass; 879 } 880 return nullptr; 881 } 882 883 884 // Check if RD could be replaced with RS at any possible use of RD. 885 // For example a predicate register cannot be replaced with a integer 886 // register, but a 64-bit register with a subregister can be replaced 887 // with a 32-bit register. 888 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD, 889 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) { 890 if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) || 891 !TargetRegisterInfo::isVirtualRegister(RS.Reg)) 892 return false; 893 // Return false if one (or both) classes are nullptr. 894 auto *DRC = getFinalVRegClass(RD, MRI); 895 if (!DRC) 896 return false; 897 898 return DRC == getFinalVRegClass(RS, MRI); 899 } 900 901 902 // 903 // Dead code elimination 904 // 905 namespace { 906 class DeadCodeElimination { 907 public: 908 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt) 909 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()), 910 MDT(mdt), MRI(mf.getRegInfo()) {} 911 912 bool run() { 913 return runOnNode(MDT.getRootNode()); 914 } 915 916 private: 917 bool isDead(unsigned R) const; 918 bool runOnNode(MachineDomTreeNode *N); 919 920 MachineFunction &MF; 921 const HexagonInstrInfo &HII; 922 MachineDominatorTree &MDT; 923 MachineRegisterInfo &MRI; 924 }; 925 } 926 927 928 bool DeadCodeElimination::isDead(unsigned R) const { 929 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 930 MachineInstr *UseI = I->getParent(); 931 if (UseI->isDebugValue()) 932 continue; 933 if (UseI->isPHI()) { 934 assert(!UseI->getOperand(0).getSubReg()); 935 unsigned DR = UseI->getOperand(0).getReg(); 936 if (DR == R) 937 continue; 938 } 939 return false; 940 } 941 return true; 942 } 943 944 945 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) { 946 bool Changed = false; 947 typedef GraphTraits<MachineDomTreeNode*> GTN; 948 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) 949 Changed |= runOnNode(*I); 950 951 MachineBasicBlock *B = N->getBlock(); 952 std::vector<MachineInstr*> Instrs; 953 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) 954 Instrs.push_back(&*I); 955 956 for (auto MI : Instrs) { 957 unsigned Opc = MI->getOpcode(); 958 // Do not touch lifetime markers. This is why the target-independent DCE 959 // cannot be used. 960 if (Opc == TargetOpcode::LIFETIME_START || 961 Opc == TargetOpcode::LIFETIME_END) 962 continue; 963 bool Store = false; 964 if (MI->isInlineAsm()) 965 continue; 966 // Delete PHIs if possible. 967 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store)) 968 continue; 969 970 bool AllDead = true; 971 SmallVector<unsigned,2> Regs; 972 for (auto &Op : MI->operands()) { 973 if (!Op.isReg() || !Op.isDef()) 974 continue; 975 unsigned R = Op.getReg(); 976 if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) { 977 AllDead = false; 978 break; 979 } 980 Regs.push_back(R); 981 } 982 if (!AllDead) 983 continue; 984 985 B->erase(MI); 986 for (unsigned i = 0, n = Regs.size(); i != n; ++i) 987 MRI.markUsesInDebugValueAsUndef(Regs[i]); 988 Changed = true; 989 } 990 991 return Changed; 992 } 993 994 995 // 996 // Eliminate redundant instructions 997 // 998 // This transformation will identify instructions where the output register 999 // is the same as one of its input registers. This only works on instructions 1000 // that define a single register (unlike post-increment loads, for example). 1001 // The equality check is actually more detailed: the code calculates which 1002 // bits of the output are used, and only compares these bits with the input 1003 // registers. 1004 // If the output matches an input, the instruction is replaced with COPY. 1005 // The copies will be removed by another transformation. 1006 namespace { 1007 class RedundantInstrElimination : public Transformation { 1008 public: 1009 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii, 1010 MachineRegisterInfo &mri) 1011 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1012 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1013 private: 1014 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN, 1015 unsigned &LostB, unsigned &LostE); 1016 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN, 1017 unsigned &LostB, unsigned &LostE); 1018 bool computeUsedBits(unsigned Reg, BitVector &Bits); 1019 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits, 1020 uint16_t Begin); 1021 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS); 1022 1023 const HexagonInstrInfo &HII; 1024 MachineRegisterInfo &MRI; 1025 BitTracker &BT; 1026 }; 1027 } 1028 1029 1030 // Check if the instruction is a lossy shift left, where the input being 1031 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1032 // of bit indices that are lost. 1033 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI, 1034 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1035 using namespace Hexagon; 1036 unsigned Opc = MI.getOpcode(); 1037 unsigned ImN, RegN, Width; 1038 switch (Opc) { 1039 case S2_asl_i_p: 1040 ImN = 2; 1041 RegN = 1; 1042 Width = 64; 1043 break; 1044 case S2_asl_i_p_acc: 1045 case S2_asl_i_p_and: 1046 case S2_asl_i_p_nac: 1047 case S2_asl_i_p_or: 1048 case S2_asl_i_p_xacc: 1049 ImN = 3; 1050 RegN = 2; 1051 Width = 64; 1052 break; 1053 case S2_asl_i_r: 1054 ImN = 2; 1055 RegN = 1; 1056 Width = 32; 1057 break; 1058 case S2_addasl_rrri: 1059 case S4_andi_asl_ri: 1060 case S4_ori_asl_ri: 1061 case S4_addi_asl_ri: 1062 case S4_subi_asl_ri: 1063 case S2_asl_i_r_acc: 1064 case S2_asl_i_r_and: 1065 case S2_asl_i_r_nac: 1066 case S2_asl_i_r_or: 1067 case S2_asl_i_r_sat: 1068 case S2_asl_i_r_xacc: 1069 ImN = 3; 1070 RegN = 2; 1071 Width = 32; 1072 break; 1073 default: 1074 return false; 1075 } 1076 1077 if (RegN != OpN) 1078 return false; 1079 1080 assert(MI.getOperand(ImN).isImm()); 1081 unsigned S = MI.getOperand(ImN).getImm(); 1082 if (S == 0) 1083 return false; 1084 LostB = Width-S; 1085 LostE = Width; 1086 return true; 1087 } 1088 1089 1090 // Check if the instruction is a lossy shift right, where the input being 1091 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1092 // of bit indices that are lost. 1093 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI, 1094 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1095 using namespace Hexagon; 1096 unsigned Opc = MI.getOpcode(); 1097 unsigned ImN, RegN; 1098 switch (Opc) { 1099 case S2_asr_i_p: 1100 case S2_lsr_i_p: 1101 ImN = 2; 1102 RegN = 1; 1103 break; 1104 case S2_asr_i_p_acc: 1105 case S2_asr_i_p_and: 1106 case S2_asr_i_p_nac: 1107 case S2_asr_i_p_or: 1108 case S2_lsr_i_p_acc: 1109 case S2_lsr_i_p_and: 1110 case S2_lsr_i_p_nac: 1111 case S2_lsr_i_p_or: 1112 case S2_lsr_i_p_xacc: 1113 ImN = 3; 1114 RegN = 2; 1115 break; 1116 case S2_asr_i_r: 1117 case S2_lsr_i_r: 1118 ImN = 2; 1119 RegN = 1; 1120 break; 1121 case S4_andi_lsr_ri: 1122 case S4_ori_lsr_ri: 1123 case S4_addi_lsr_ri: 1124 case S4_subi_lsr_ri: 1125 case S2_asr_i_r_acc: 1126 case S2_asr_i_r_and: 1127 case S2_asr_i_r_nac: 1128 case S2_asr_i_r_or: 1129 case S2_lsr_i_r_acc: 1130 case S2_lsr_i_r_and: 1131 case S2_lsr_i_r_nac: 1132 case S2_lsr_i_r_or: 1133 case S2_lsr_i_r_xacc: 1134 ImN = 3; 1135 RegN = 2; 1136 break; 1137 1138 default: 1139 return false; 1140 } 1141 1142 if (RegN != OpN) 1143 return false; 1144 1145 assert(MI.getOperand(ImN).isImm()); 1146 unsigned S = MI.getOperand(ImN).getImm(); 1147 LostB = 0; 1148 LostE = S; 1149 return true; 1150 } 1151 1152 1153 // Calculate the bit vector that corresponds to the used bits of register Reg. 1154 // The vector Bits has the same size, as the size of Reg in bits. If the cal- 1155 // culation fails (i.e. the used bits are unknown), it returns false. Other- 1156 // wise, it returns true and sets the corresponding bits in Bits. 1157 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) { 1158 BitVector Used(Bits.size()); 1159 RegisterSet Visited; 1160 std::vector<unsigned> Pending; 1161 Pending.push_back(Reg); 1162 1163 for (unsigned i = 0; i < Pending.size(); ++i) { 1164 unsigned R = Pending[i]; 1165 if (Visited.has(R)) 1166 continue; 1167 Visited.insert(R); 1168 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 1169 BitTracker::RegisterRef UR = *I; 1170 unsigned B, W; 1171 if (!HBS::getSubregMask(UR, B, W, MRI)) 1172 return false; 1173 MachineInstr &UseI = *I->getParent(); 1174 if (UseI.isPHI() || UseI.isCopy()) { 1175 unsigned DefR = UseI.getOperand(0).getReg(); 1176 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 1177 return false; 1178 Pending.push_back(DefR); 1179 } else { 1180 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B)) 1181 return false; 1182 } 1183 } 1184 } 1185 Bits |= Used; 1186 return true; 1187 } 1188 1189 1190 // Calculate the bits used by instruction MI in a register in operand OpN. 1191 // Return true/false if the calculation succeeds/fails. If is succeeds, set 1192 // used bits in Bits. This function does not reset any bits in Bits, so 1193 // subsequent calls over different instructions will result in the union 1194 // of the used bits in all these instructions. 1195 // The register in question may be used with a sub-register, whereas Bits 1196 // holds the bits for the entire register. To keep track of that, the 1197 // argument Begin indicates where in Bits is the lowest-significant bit 1198 // of the register used in operand OpN. For example, in instruction: 1199 // vreg1 = S2_lsr_i_r vreg2:subreg_hireg, 10 1200 // the operand 1 is a 32-bit register, which happens to be a subregister 1201 // of the 64-bit register vreg2, and that subregister starts at position 32. 1202 // In this case Begin=32, since Bits[32] would be the lowest-significant bit 1203 // of vreg2:subreg_hireg. 1204 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI, 1205 unsigned OpN, BitVector &Bits, uint16_t Begin) { 1206 unsigned Opc = MI.getOpcode(); 1207 BitVector T(Bits.size()); 1208 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII); 1209 // Even if we don't have bits yet, we could still provide some information 1210 // if the instruction is a lossy shift: the lost bits will be marked as 1211 // not used. 1212 unsigned LB, LE; 1213 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) { 1214 assert(MI.getOperand(OpN).isReg()); 1215 BitTracker::RegisterRef RR = MI.getOperand(OpN); 1216 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI); 1217 uint16_t Width = RC->getSize()*8; 1218 1219 if (!GotBits) 1220 T.set(Begin, Begin+Width); 1221 assert(LB <= LE && LB < Width && LE <= Width); 1222 T.reset(Begin+LB, Begin+LE); 1223 GotBits = true; 1224 } 1225 if (GotBits) 1226 Bits |= T; 1227 return GotBits; 1228 } 1229 1230 1231 // Calculates the used bits in RD ("defined register"), and checks if these 1232 // bits in RS ("used register") and RD are identical. 1233 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD, 1234 BitTracker::RegisterRef RS) { 1235 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1236 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1237 1238 unsigned DB, DW; 1239 if (!HBS::getSubregMask(RD, DB, DW, MRI)) 1240 return false; 1241 unsigned SB, SW; 1242 if (!HBS::getSubregMask(RS, SB, SW, MRI)) 1243 return false; 1244 if (SW != DW) 1245 return false; 1246 1247 BitVector Used(DC.width()); 1248 if (!computeUsedBits(RD.Reg, Used)) 1249 return false; 1250 1251 for (unsigned i = 0; i != DW; ++i) 1252 if (Used[i+DB] && DC[DB+i] != SC[SB+i]) 1253 return false; 1254 return true; 1255 } 1256 1257 1258 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B, 1259 const RegisterSet&) { 1260 bool Changed = false; 1261 1262 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) { 1263 NextI = std::next(I); 1264 MachineInstr *MI = &*I; 1265 1266 if (MI->getOpcode() == TargetOpcode::COPY) 1267 continue; 1268 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 1269 continue; 1270 unsigned NumD = MI->getDesc().getNumDefs(); 1271 if (NumD != 1) 1272 continue; 1273 1274 BitTracker::RegisterRef RD = MI->getOperand(0); 1275 if (!BT.has(RD.Reg)) 1276 continue; 1277 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1278 1279 // Find a source operand that is equal to the result. 1280 for (auto &Op : MI->uses()) { 1281 if (!Op.isReg()) 1282 continue; 1283 BitTracker::RegisterRef RS = Op; 1284 if (!BT.has(RS.Reg)) 1285 continue; 1286 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1287 continue; 1288 1289 unsigned BN, BW; 1290 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 1291 continue; 1292 1293 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1294 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW)) 1295 continue; 1296 1297 // If found, replace the instruction with a COPY. 1298 DebugLoc DL = MI->getDebugLoc(); 1299 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 1300 unsigned NewR = MRI.createVirtualRegister(FRC); 1301 BuildMI(B, I, DL, HII.get(TargetOpcode::COPY), NewR) 1302 .addReg(RS.Reg, 0, RS.Sub); 1303 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1304 BT.put(BitTracker::RegisterRef(NewR), SC); 1305 Changed = true; 1306 break; 1307 } 1308 } 1309 1310 return Changed; 1311 } 1312 1313 1314 // 1315 // Const generation 1316 // 1317 // Recognize instructions that produce constant values known at compile-time. 1318 // Replace them with register definitions that load these constants directly. 1319 namespace { 1320 class ConstGeneration : public Transformation { 1321 public: 1322 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1323 MachineRegisterInfo &mri) 1324 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1325 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1326 private: 1327 bool isTfrConst(const MachineInstr *MI) const; 1328 bool isConst(unsigned R, int64_t &V) const; 1329 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C, 1330 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL); 1331 1332 const HexagonInstrInfo &HII; 1333 MachineRegisterInfo &MRI; 1334 BitTracker &BT; 1335 }; 1336 } 1337 1338 bool ConstGeneration::isConst(unsigned R, int64_t &C) const { 1339 if (!BT.has(R)) 1340 return false; 1341 const BitTracker::RegisterCell &RC = BT.lookup(R); 1342 int64_t T = 0; 1343 for (unsigned i = RC.width(); i > 0; --i) { 1344 const BitTracker::BitValue &V = RC[i-1]; 1345 T <<= 1; 1346 if (V.is(1)) 1347 T |= 1; 1348 else if (!V.is(0)) 1349 return false; 1350 } 1351 C = T; 1352 return true; 1353 } 1354 1355 1356 bool ConstGeneration::isTfrConst(const MachineInstr *MI) const { 1357 unsigned Opc = MI->getOpcode(); 1358 switch (Opc) { 1359 case Hexagon::A2_combineii: 1360 case Hexagon::A4_combineii: 1361 case Hexagon::A2_tfrsi: 1362 case Hexagon::A2_tfrpi: 1363 case Hexagon::TFR_PdTrue: 1364 case Hexagon::TFR_PdFalse: 1365 case Hexagon::CONST32_Int_Real: 1366 case Hexagon::CONST64_Int_Real: 1367 return true; 1368 } 1369 return false; 1370 } 1371 1372 1373 // Generate a transfer-immediate instruction that is appropriate for the 1374 // register class and the actual value being transferred. 1375 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C, 1376 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) { 1377 unsigned Reg = MRI.createVirtualRegister(RC); 1378 if (RC == &Hexagon::IntRegsRegClass) { 1379 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg) 1380 .addImm(int32_t(C)); 1381 return Reg; 1382 } 1383 1384 if (RC == &Hexagon::DoubleRegsRegClass) { 1385 if (isInt<8>(C)) { 1386 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg) 1387 .addImm(C); 1388 return Reg; 1389 } 1390 1391 unsigned Lo = Lo_32(C), Hi = Hi_32(C); 1392 if (isInt<8>(Lo) || isInt<8>(Hi)) { 1393 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii 1394 : Hexagon::A4_combineii; 1395 BuildMI(B, At, DL, HII.get(Opc), Reg) 1396 .addImm(int32_t(Hi)) 1397 .addImm(int32_t(Lo)); 1398 return Reg; 1399 } 1400 1401 BuildMI(B, At, DL, HII.get(Hexagon::CONST64_Int_Real), Reg) 1402 .addImm(C); 1403 return Reg; 1404 } 1405 1406 if (RC == &Hexagon::PredRegsRegClass) { 1407 unsigned Opc; 1408 if (C == 0) 1409 Opc = Hexagon::TFR_PdFalse; 1410 else if ((C & 0xFF) == 0xFF) 1411 Opc = Hexagon::TFR_PdTrue; 1412 else 1413 return 0; 1414 BuildMI(B, At, DL, HII.get(Opc), Reg); 1415 return Reg; 1416 } 1417 1418 return 0; 1419 } 1420 1421 1422 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1423 bool Changed = false; 1424 RegisterSet Defs; 1425 1426 for (auto I = B.begin(), E = B.end(); I != E; ++I) { 1427 if (isTfrConst(I)) 1428 continue; 1429 Defs.clear(); 1430 HBS::getInstrDefs(*I, Defs); 1431 if (Defs.count() != 1) 1432 continue; 1433 unsigned DR = Defs.find_first(); 1434 if (!TargetRegisterInfo::isVirtualRegister(DR)) 1435 continue; 1436 int64_t C; 1437 if (isConst(DR, C)) { 1438 DebugLoc DL = I->getDebugLoc(); 1439 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1440 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL); 1441 if (ImmReg) { 1442 HBS::replaceReg(DR, ImmReg, MRI); 1443 BT.put(ImmReg, BT.lookup(DR)); 1444 Changed = true; 1445 } 1446 } 1447 } 1448 return Changed; 1449 } 1450 1451 1452 // 1453 // Copy generation 1454 // 1455 // Identify pairs of available registers which hold identical values. 1456 // In such cases, only one of them needs to be calculated, the other one 1457 // will be defined as a copy of the first. 1458 // 1459 // Copy propagation 1460 // 1461 // Eliminate register copies RD = RS, by replacing the uses of RD with 1462 // with uses of RS. 1463 namespace { 1464 class CopyGeneration : public Transformation { 1465 public: 1466 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1467 MachineRegisterInfo &mri) 1468 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1469 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1470 private: 1471 bool findMatch(const BitTracker::RegisterRef &Inp, 1472 BitTracker::RegisterRef &Out, const RegisterSet &AVs); 1473 1474 const HexagonInstrInfo &HII; 1475 MachineRegisterInfo &MRI; 1476 BitTracker &BT; 1477 }; 1478 1479 class CopyPropagation : public Transformation { 1480 public: 1481 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1482 : Transformation(false), MRI(mri) {} 1483 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1484 static bool isCopyReg(unsigned Opc); 1485 private: 1486 bool propagateRegCopy(MachineInstr &MI); 1487 1488 MachineRegisterInfo &MRI; 1489 }; 1490 1491 } 1492 1493 1494 /// Check if there is a register in AVs that is identical to Inp. If so, 1495 /// set Out to the found register. The output may be a pair Reg:Sub. 1496 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp, 1497 BitTracker::RegisterRef &Out, const RegisterSet &AVs) { 1498 if (!BT.has(Inp.Reg)) 1499 return false; 1500 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg); 1501 unsigned B, W; 1502 if (!HBS::getSubregMask(Inp, B, W, MRI)) 1503 return false; 1504 1505 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) { 1506 if (!BT.has(R) || !HBS::isTransparentCopy(R, Inp, MRI)) 1507 continue; 1508 const BitTracker::RegisterCell &RC = BT.lookup(R); 1509 unsigned RW = RC.width(); 1510 if (W == RW) { 1511 if (MRI.getRegClass(Inp.Reg) != MRI.getRegClass(R)) 1512 continue; 1513 if (!HBS::isEqual(InpRC, B, RC, 0, W)) 1514 continue; 1515 Out.Reg = R; 1516 Out.Sub = 0; 1517 return true; 1518 } 1519 // Check if there is a super-register, whose part (with a subregister) 1520 // is equal to the input. 1521 // Only do double registers for now. 1522 if (W*2 != RW) 1523 continue; 1524 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass) 1525 continue; 1526 1527 if (HBS::isEqual(InpRC, B, RC, 0, W)) 1528 Out.Sub = Hexagon::subreg_loreg; 1529 else if (HBS::isEqual(InpRC, B, RC, W, W)) 1530 Out.Sub = Hexagon::subreg_hireg; 1531 else 1532 continue; 1533 Out.Reg = R; 1534 return true; 1535 } 1536 return false; 1537 } 1538 1539 1540 bool CopyGeneration::processBlock(MachineBasicBlock &B, 1541 const RegisterSet &AVs) { 1542 RegisterSet AVB(AVs); 1543 bool Changed = false; 1544 RegisterSet Defs; 1545 1546 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; 1547 ++I, AVB.insert(Defs)) { 1548 NextI = std::next(I); 1549 Defs.clear(); 1550 HBS::getInstrDefs(*I, Defs); 1551 1552 unsigned Opc = I->getOpcode(); 1553 if (CopyPropagation::isCopyReg(Opc)) 1554 continue; 1555 1556 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) { 1557 BitTracker::RegisterRef MR; 1558 if (!findMatch(R, MR, AVB)) 1559 continue; 1560 DebugLoc DL = I->getDebugLoc(); 1561 auto *FRC = HBS::getFinalVRegClass(MR, MRI); 1562 unsigned NewR = MRI.createVirtualRegister(FRC); 1563 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1564 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1565 .addReg(MR.Reg, 0, MR.Sub); 1566 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR)); 1567 } 1568 } 1569 1570 return Changed; 1571 } 1572 1573 1574 bool CopyPropagation::isCopyReg(unsigned Opc) { 1575 switch (Opc) { 1576 case TargetOpcode::COPY: 1577 case TargetOpcode::REG_SEQUENCE: 1578 case Hexagon::A2_tfr: 1579 case Hexagon::A2_tfrp: 1580 case Hexagon::A2_combinew: 1581 case Hexagon::A4_combineir: 1582 case Hexagon::A4_combineri: 1583 return true; 1584 default: 1585 break; 1586 } 1587 return false; 1588 } 1589 1590 1591 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) { 1592 bool Changed = false; 1593 unsigned Opc = MI.getOpcode(); 1594 BitTracker::RegisterRef RD = MI.getOperand(0); 1595 assert(MI.getOperand(0).getSubReg() == 0); 1596 1597 switch (Opc) { 1598 case TargetOpcode::COPY: 1599 case Hexagon::A2_tfr: 1600 case Hexagon::A2_tfrp: { 1601 BitTracker::RegisterRef RS = MI.getOperand(1); 1602 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1603 break; 1604 if (RS.Sub != 0) 1605 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI); 1606 else 1607 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI); 1608 break; 1609 } 1610 case TargetOpcode::REG_SEQUENCE: { 1611 BitTracker::RegisterRef SL, SH; 1612 if (HBS::parseRegSequence(MI, SL, SH)) { 1613 Changed = HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_loreg, 1614 SL.Reg, SL.Sub, MRI); 1615 Changed |= HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_hireg, 1616 SH.Reg, SH.Sub, MRI); 1617 } 1618 break; 1619 } 1620 case Hexagon::A2_combinew: { 1621 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2); 1622 Changed = HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_loreg, 1623 RL.Reg, RL.Sub, MRI); 1624 Changed |= HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_hireg, 1625 RH.Reg, RH.Sub, MRI); 1626 break; 1627 } 1628 case Hexagon::A4_combineir: 1629 case Hexagon::A4_combineri: { 1630 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1; 1631 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::subreg_loreg 1632 : Hexagon::subreg_hireg; 1633 BitTracker::RegisterRef RS = MI.getOperand(SrcX); 1634 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI); 1635 break; 1636 } 1637 } 1638 return Changed; 1639 } 1640 1641 1642 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1643 std::vector<MachineInstr*> Instrs; 1644 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I) 1645 Instrs.push_back(&*I); 1646 1647 bool Changed = false; 1648 for (auto I : Instrs) { 1649 unsigned Opc = I->getOpcode(); 1650 if (!CopyPropagation::isCopyReg(Opc)) 1651 continue; 1652 Changed |= propagateRegCopy(*I); 1653 } 1654 1655 return Changed; 1656 } 1657 1658 1659 // 1660 // Bit simplification 1661 // 1662 // Recognize patterns that can be simplified and replace them with the 1663 // simpler forms. 1664 // This is by no means complete 1665 namespace { 1666 class BitSimplification : public Transformation { 1667 public: 1668 BitSimplification(BitTracker &bt, const HexagonInstrInfo &hii, 1669 MachineRegisterInfo &mri) 1670 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1671 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1672 private: 1673 struct RegHalf : public BitTracker::RegisterRef { 1674 bool Low; // Low/High halfword. 1675 }; 1676 1677 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC, 1678 unsigned B, RegHalf &RH); 1679 1680 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC, 1681 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt); 1682 unsigned getCombineOpcode(bool HLow, bool LLow); 1683 1684 bool genStoreUpperHalf(MachineInstr *MI); 1685 bool genStoreImmediate(MachineInstr *MI); 1686 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD, 1687 const BitTracker::RegisterCell &RC); 1688 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1689 const BitTracker::RegisterCell &RC); 1690 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1691 const BitTracker::RegisterCell &RC); 1692 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1693 const BitTracker::RegisterCell &RC); 1694 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD, 1695 const BitTracker::RegisterCell &RC); 1696 1697 const HexagonInstrInfo &HII; 1698 MachineRegisterInfo &MRI; 1699 BitTracker &BT; 1700 }; 1701 } 1702 1703 1704 // Check if the bits [B..B+16) in register cell RC form a valid halfword, 1705 // i.e. [0..16), [16..32), etc. of some register. If so, return true and 1706 // set the information about the found register in RH. 1707 bool BitSimplification::matchHalf(unsigned SelfR, 1708 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) { 1709 // XXX This could be searching in the set of available registers, in case 1710 // the match is not exact. 1711 1712 // Match 16-bit chunks, where the RC[B..B+15] references exactly one 1713 // register and all the bits B..B+15 match between RC and the register. 1714 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... }, 1715 // and RC = { [0]:0 [1-15]:v1[1-15]... }. 1716 bool Low = false; 1717 unsigned I = B; 1718 while (I < B+16 && RC[I].num()) 1719 I++; 1720 if (I == B+16) 1721 return false; 1722 1723 unsigned Reg = RC[I].RefI.Reg; 1724 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B. 1725 if (P < I-B) 1726 return false; 1727 unsigned Pos = P - (I-B); 1728 1729 if (Reg == 0 || Reg == SelfR) // Don't match "self". 1730 return false; 1731 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1732 return false; 1733 if (!BT.has(Reg)) 1734 return false; 1735 1736 const BitTracker::RegisterCell &SC = BT.lookup(Reg); 1737 if (Pos+16 > SC.width()) 1738 return false; 1739 1740 for (unsigned i = 0; i < 16; ++i) { 1741 const BitTracker::BitValue &RV = RC[i+B]; 1742 if (RV.Type == BitTracker::BitValue::Ref) { 1743 if (RV.RefI.Reg != Reg) 1744 return false; 1745 if (RV.RefI.Pos != i+Pos) 1746 return false; 1747 continue; 1748 } 1749 if (RC[i+B] != SC[i+Pos]) 1750 return false; 1751 } 1752 1753 unsigned Sub = 0; 1754 switch (Pos) { 1755 case 0: 1756 Sub = Hexagon::subreg_loreg; 1757 Low = true; 1758 break; 1759 case 16: 1760 Sub = Hexagon::subreg_loreg; 1761 Low = false; 1762 break; 1763 case 32: 1764 Sub = Hexagon::subreg_hireg; 1765 Low = true; 1766 break; 1767 case 48: 1768 Sub = Hexagon::subreg_hireg; 1769 Low = false; 1770 break; 1771 default: 1772 return false; 1773 } 1774 1775 RH.Reg = Reg; 1776 RH.Sub = Sub; 1777 RH.Low = Low; 1778 // If the subregister is not valid with the register, set it to 0. 1779 if (!HBS::getFinalVRegClass(RH, MRI)) 1780 RH.Sub = 0; 1781 1782 return true; 1783 } 1784 1785 1786 // Check if RC matches the pattern of a S2_packhl. If so, return true and 1787 // set the inputs Rs and Rt. 1788 bool BitSimplification::matchPackhl(unsigned SelfR, 1789 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs, 1790 BitTracker::RegisterRef &Rt) { 1791 RegHalf L1, H1, L2, H2; 1792 1793 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1)) 1794 return false; 1795 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1)) 1796 return false; 1797 1798 // Rs = H1.L1, Rt = H2.L2 1799 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low) 1800 return false; 1801 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low) 1802 return false; 1803 1804 Rs = H1; 1805 Rt = H2; 1806 return true; 1807 } 1808 1809 1810 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) { 1811 return HLow ? LLow ? Hexagon::A2_combine_ll 1812 : Hexagon::A2_combine_lh 1813 : LLow ? Hexagon::A2_combine_hl 1814 : Hexagon::A2_combine_hh; 1815 } 1816 1817 1818 // If MI stores the upper halfword of a register (potentially obtained via 1819 // shifts or extracts), replace it with a storerf instruction. This could 1820 // cause the "extraction" code to become dead. 1821 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) { 1822 unsigned Opc = MI->getOpcode(); 1823 if (Opc != Hexagon::S2_storerh_io) 1824 return false; 1825 1826 MachineOperand &ValOp = MI->getOperand(2); 1827 BitTracker::RegisterRef RS = ValOp; 1828 if (!BT.has(RS.Reg)) 1829 return false; 1830 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1831 RegHalf H; 1832 if (!matchHalf(0, RC, 0, H)) 1833 return false; 1834 if (H.Low) 1835 return false; 1836 MI->setDesc(HII.get(Hexagon::S2_storerf_io)); 1837 ValOp.setReg(H.Reg); 1838 ValOp.setSubReg(H.Sub); 1839 return true; 1840 } 1841 1842 1843 // If MI stores a value known at compile-time, and the value is within a range 1844 // that avoids using constant-extenders, replace it with a store-immediate. 1845 bool BitSimplification::genStoreImmediate(MachineInstr *MI) { 1846 unsigned Opc = MI->getOpcode(); 1847 unsigned Align = 0; 1848 switch (Opc) { 1849 case Hexagon::S2_storeri_io: 1850 Align++; 1851 case Hexagon::S2_storerh_io: 1852 Align++; 1853 case Hexagon::S2_storerb_io: 1854 break; 1855 default: 1856 return false; 1857 } 1858 1859 // Avoid stores to frame-indices (due to an unknown offset). 1860 if (!MI->getOperand(0).isReg()) 1861 return false; 1862 MachineOperand &OffOp = MI->getOperand(1); 1863 if (!OffOp.isImm()) 1864 return false; 1865 1866 int64_t Off = OffOp.getImm(); 1867 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x). 1868 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1))) 1869 return false; 1870 // Source register: 1871 BitTracker::RegisterRef RS = MI->getOperand(2); 1872 if (!BT.has(RS.Reg)) 1873 return false; 1874 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1875 uint64_t U; 1876 if (!HBS::getConst(RC, 0, RC.width(), U)) 1877 return false; 1878 1879 // Only consider 8-bit values to avoid constant-extenders. 1880 int V; 1881 switch (Opc) { 1882 case Hexagon::S2_storerb_io: 1883 V = int8_t(U); 1884 break; 1885 case Hexagon::S2_storerh_io: 1886 V = int16_t(U); 1887 break; 1888 case Hexagon::S2_storeri_io: 1889 V = int32_t(U); 1890 break; 1891 } 1892 if (!isInt<8>(V)) 1893 return false; 1894 1895 MI->RemoveOperand(2); 1896 switch (Opc) { 1897 case Hexagon::S2_storerb_io: 1898 MI->setDesc(HII.get(Hexagon::S4_storeirb_io)); 1899 break; 1900 case Hexagon::S2_storerh_io: 1901 MI->setDesc(HII.get(Hexagon::S4_storeirh_io)); 1902 break; 1903 case Hexagon::S2_storeri_io: 1904 MI->setDesc(HII.get(Hexagon::S4_storeiri_io)); 1905 break; 1906 } 1907 MI->addOperand(MachineOperand::CreateImm(V)); 1908 return true; 1909 } 1910 1911 1912 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the 1913 // last instruction in a sequence that results in something equivalent to 1914 // the pack-halfwords. The intent is to cause the entire sequence to become 1915 // dead. 1916 bool BitSimplification::genPackhl(MachineInstr *MI, 1917 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 1918 unsigned Opc = MI->getOpcode(); 1919 if (Opc == Hexagon::S2_packhl) 1920 return false; 1921 BitTracker::RegisterRef Rs, Rt; 1922 if (!matchPackhl(RD.Reg, RC, Rs, Rt)) 1923 return false; 1924 1925 MachineBasicBlock &B = *MI->getParent(); 1926 unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 1927 DebugLoc DL = MI->getDebugLoc(); 1928 BuildMI(B, MI, DL, HII.get(Hexagon::S2_packhl), NewR) 1929 .addReg(Rs.Reg, 0, Rs.Sub) 1930 .addReg(Rt.Reg, 0, Rt.Sub); 1931 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1932 BT.put(BitTracker::RegisterRef(NewR), RC); 1933 return true; 1934 } 1935 1936 1937 // If MI produces halfword of the input in the low half of the output, 1938 // replace it with zero-extend or extractu. 1939 bool BitSimplification::genExtractHalf(MachineInstr *MI, 1940 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 1941 RegHalf L; 1942 // Check for halfword in low 16 bits, zeros elsewhere. 1943 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16)) 1944 return false; 1945 1946 unsigned Opc = MI->getOpcode(); 1947 MachineBasicBlock &B = *MI->getParent(); 1948 DebugLoc DL = MI->getDebugLoc(); 1949 1950 // Prefer zxth, since zxth can go in any slot, while extractu only in 1951 // slots 2 and 3. 1952 unsigned NewR = 0; 1953 if (L.Low && Opc != Hexagon::A2_zxth) { 1954 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1955 BuildMI(B, MI, DL, HII.get(Hexagon::A2_zxth), NewR) 1956 .addReg(L.Reg, 0, L.Sub); 1957 } else if (!L.Low && Opc != Hexagon::S2_extractu) { 1958 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1959 BuildMI(B, MI, DL, HII.get(Hexagon::S2_extractu), NewR) 1960 .addReg(L.Reg, 0, L.Sub) 1961 .addImm(16) 1962 .addImm(16); 1963 } 1964 if (NewR == 0) 1965 return false; 1966 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1967 BT.put(BitTracker::RegisterRef(NewR), RC); 1968 return true; 1969 } 1970 1971 1972 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the 1973 // combine. 1974 bool BitSimplification::genCombineHalf(MachineInstr *MI, 1975 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 1976 RegHalf L, H; 1977 // Check for combine h/l 1978 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H)) 1979 return false; 1980 // Do nothing if this is just a reg copy. 1981 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low) 1982 return false; 1983 1984 unsigned Opc = MI->getOpcode(); 1985 unsigned COpc = getCombineOpcode(H.Low, L.Low); 1986 if (COpc == Opc) 1987 return false; 1988 1989 MachineBasicBlock &B = *MI->getParent(); 1990 DebugLoc DL = MI->getDebugLoc(); 1991 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1992 BuildMI(B, MI, DL, HII.get(COpc), NewR) 1993 .addReg(H.Reg, 0, H.Sub) 1994 .addReg(L.Reg, 0, L.Sub); 1995 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1996 BT.put(BitTracker::RegisterRef(NewR), RC); 1997 return true; 1998 } 1999 2000 2001 // If MI resets high bits of a register and keeps the lower ones, replace it 2002 // with zero-extend byte/half, and-immediate, or extractu, as appropriate. 2003 bool BitSimplification::genExtractLow(MachineInstr *MI, 2004 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2005 unsigned Opc = MI->getOpcode(); 2006 switch (Opc) { 2007 case Hexagon::A2_zxtb: 2008 case Hexagon::A2_zxth: 2009 case Hexagon::S2_extractu: 2010 return false; 2011 } 2012 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) { 2013 int32_t Imm = MI->getOperand(2).getImm(); 2014 if (isInt<10>(Imm)) 2015 return false; 2016 } 2017 2018 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 2019 return false; 2020 unsigned W = RC.width(); 2021 while (W > 0 && RC[W-1].is(0)) 2022 W--; 2023 if (W == 0 || W == RC.width()) 2024 return false; 2025 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb 2026 : (W == 16) ? Hexagon::A2_zxth 2027 : (W < 10) ? Hexagon::A2_andir 2028 : Hexagon::S2_extractu; 2029 MachineBasicBlock &B = *MI->getParent(); 2030 DebugLoc DL = MI->getDebugLoc(); 2031 2032 for (auto &Op : MI->uses()) { 2033 if (!Op.isReg()) 2034 continue; 2035 BitTracker::RegisterRef RS = Op; 2036 if (!BT.has(RS.Reg)) 2037 continue; 2038 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2039 unsigned BN, BW; 2040 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 2041 continue; 2042 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W)) 2043 continue; 2044 2045 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2046 auto MIB = BuildMI(B, MI, DL, HII.get(NewOpc), NewR) 2047 .addReg(RS.Reg, 0, RS.Sub); 2048 if (NewOpc == Hexagon::A2_andir) 2049 MIB.addImm((1 << W) - 1); 2050 else if (NewOpc == Hexagon::S2_extractu) 2051 MIB.addImm(W).addImm(0); 2052 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2053 BT.put(BitTracker::RegisterRef(NewR), RC); 2054 return true; 2055 } 2056 return false; 2057 } 2058 2059 2060 // Check for tstbit simplification opportunity, where the bit being checked 2061 // can be tracked back to another register. For example: 2062 // vreg2 = S2_lsr_i_r vreg1, 5 2063 // vreg3 = S2_tstbit_i vreg2, 0 2064 // => 2065 // vreg3 = S2_tstbit_i vreg1, 5 2066 bool BitSimplification::simplifyTstbit(MachineInstr *MI, 2067 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2068 unsigned Opc = MI->getOpcode(); 2069 if (Opc != Hexagon::S2_tstbit_i) 2070 return false; 2071 2072 unsigned BN = MI->getOperand(2).getImm(); 2073 BitTracker::RegisterRef RS = MI->getOperand(1); 2074 unsigned F, W; 2075 DebugLoc DL = MI->getDebugLoc(); 2076 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI)) 2077 return false; 2078 MachineBasicBlock &B = *MI->getParent(); 2079 2080 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2081 const BitTracker::BitValue &V = SC[F+BN]; 2082 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) { 2083 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg); 2084 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is 2085 // a double register, need to use a subregister and adjust bit 2086 // number. 2087 unsigned P = UINT_MAX; 2088 BitTracker::RegisterRef RR(V.RefI.Reg, 0); 2089 if (TC == &Hexagon::DoubleRegsRegClass) { 2090 P = V.RefI.Pos; 2091 RR.Sub = Hexagon::subreg_loreg; 2092 if (P >= 32) { 2093 P -= 32; 2094 RR.Sub = Hexagon::subreg_hireg; 2095 } 2096 } else if (TC == &Hexagon::IntRegsRegClass) { 2097 P = V.RefI.Pos; 2098 } 2099 if (P != UINT_MAX) { 2100 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2101 BuildMI(B, MI, DL, HII.get(Hexagon::S2_tstbit_i), NewR) 2102 .addReg(RR.Reg, 0, RR.Sub) 2103 .addImm(P); 2104 HBS::replaceReg(RD.Reg, NewR, MRI); 2105 BT.put(NewR, RC); 2106 return true; 2107 } 2108 } else if (V.is(0) || V.is(1)) { 2109 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2110 unsigned NewOpc = V.is(0) ? Hexagon::TFR_PdFalse : Hexagon::TFR_PdTrue; 2111 BuildMI(B, MI, DL, HII.get(NewOpc), NewR); 2112 HBS::replaceReg(RD.Reg, NewR, MRI); 2113 return true; 2114 } 2115 2116 return false; 2117 } 2118 2119 2120 bool BitSimplification::processBlock(MachineBasicBlock &B, 2121 const RegisterSet &AVs) { 2122 bool Changed = false; 2123 RegisterSet AVB = AVs; 2124 RegisterSet Defs; 2125 2126 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) { 2127 MachineInstr *MI = &*I; 2128 Defs.clear(); 2129 HBS::getInstrDefs(*MI, Defs); 2130 2131 unsigned Opc = MI->getOpcode(); 2132 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE) 2133 continue; 2134 2135 if (MI->mayStore()) { 2136 bool T = genStoreUpperHalf(MI); 2137 T = T || genStoreImmediate(MI); 2138 Changed |= T; 2139 continue; 2140 } 2141 2142 if (Defs.count() != 1) 2143 continue; 2144 const MachineOperand &Op0 = MI->getOperand(0); 2145 if (!Op0.isReg() || !Op0.isDef()) 2146 continue; 2147 BitTracker::RegisterRef RD = Op0; 2148 if (!BT.has(RD.Reg)) 2149 continue; 2150 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2151 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg); 2152 2153 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) { 2154 bool T = genPackhl(MI, RD, RC); 2155 Changed |= T; 2156 continue; 2157 } 2158 2159 if (FRC->getID() == Hexagon::IntRegsRegClassID) { 2160 bool T = genExtractHalf(MI, RD, RC); 2161 T = T || genCombineHalf(MI, RD, RC); 2162 T = T || genExtractLow(MI, RD, RC); 2163 Changed |= T; 2164 continue; 2165 } 2166 2167 if (FRC->getID() == Hexagon::PredRegsRegClassID) { 2168 bool T = simplifyTstbit(MI, RD, RC); 2169 Changed |= T; 2170 continue; 2171 } 2172 } 2173 return Changed; 2174 } 2175 2176 2177 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) { 2178 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2179 auto &HRI = *HST.getRegisterInfo(); 2180 auto &HII = *HST.getInstrInfo(); 2181 2182 MDT = &getAnalysis<MachineDominatorTree>(); 2183 MachineRegisterInfo &MRI = MF.getRegInfo(); 2184 bool Changed; 2185 2186 Changed = DeadCodeElimination(MF, *MDT).run(); 2187 2188 const HexagonEvaluator HE(HRI, MRI, HII, MF); 2189 BitTracker BT(HE, MF); 2190 DEBUG(BT.trace(true)); 2191 BT.run(); 2192 2193 MachineBasicBlock &Entry = MF.front(); 2194 2195 RegisterSet AIG; // Available registers for IG. 2196 ConstGeneration ImmG(BT, HII, MRI); 2197 Changed |= visitBlock(Entry, ImmG, AIG); 2198 2199 RegisterSet ARE; // Available registers for RIE. 2200 RedundantInstrElimination RIE(BT, HII, MRI); 2201 Changed |= visitBlock(Entry, RIE, ARE); 2202 2203 RegisterSet ACG; // Available registers for CG. 2204 CopyGeneration CopyG(BT, HII, MRI); 2205 Changed |= visitBlock(Entry, CopyG, ACG); 2206 2207 RegisterSet ACP; // Available registers for CP. 2208 CopyPropagation CopyP(HRI, MRI); 2209 Changed |= visitBlock(Entry, CopyP, ACP); 2210 2211 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2212 2213 BT.run(); 2214 RegisterSet ABS; // Available registers for BS. 2215 BitSimplification BitS(BT, HII, MRI); 2216 Changed |= visitBlock(Entry, BitS, ABS); 2217 2218 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2219 2220 if (Changed) { 2221 for (auto &B : MF) 2222 for (auto &I : B) 2223 I.clearKillInfo(); 2224 DeadCodeElimination(MF, *MDT).run(); 2225 } 2226 return Changed; 2227 } 2228 2229 2230 // Recognize loops where the code at the end of the loop matches the code 2231 // before the entry of the loop, and the matching code is such that is can 2232 // be simplified. This pass relies on the bit simplification above and only 2233 // prepares code in a way that can be handled by the bit simplifcation. 2234 // 2235 // This is the motivating testcase (and explanation): 2236 // 2237 // { 2238 // loop0(.LBB0_2, r1) // %for.body.preheader 2239 // r5:4 = memd(r0++#8) 2240 // } 2241 // { 2242 // r3 = lsr(r4, #16) 2243 // r7:6 = combine(r5, r5) 2244 // } 2245 // { 2246 // r3 = insert(r5, #16, #16) 2247 // r7:6 = vlsrw(r7:6, #16) 2248 // } 2249 // .LBB0_2: 2250 // { 2251 // memh(r2+#4) = r5 2252 // memh(r2+#6) = r6 # R6 is really R5.H 2253 // } 2254 // { 2255 // r2 = add(r2, #8) 2256 // memh(r2+#0) = r4 2257 // memh(r2+#2) = r3 # R3 is really R4.H 2258 // } 2259 // { 2260 // r5:4 = memd(r0++#8) 2261 // } 2262 // { # "Shuffling" code that sets up R3 and R6 2263 // r3 = lsr(r4, #16) # so that their halves can be stored in the 2264 // r7:6 = combine(r5, r5) # next iteration. This could be folded into 2265 // } # the stores if the code was at the beginning 2266 // { # of the loop iteration. Since the same code 2267 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved 2268 // r7:6 = vlsrw(r7:6, #16) # there. 2269 // }:endloop0 2270 // 2271 // 2272 // The outcome: 2273 // 2274 // { 2275 // loop0(.LBB0_2, r1) 2276 // r5:4 = memd(r0++#8) 2277 // } 2278 // .LBB0_2: 2279 // { 2280 // memh(r2+#4) = r5 2281 // memh(r2+#6) = r5.h 2282 // } 2283 // { 2284 // r2 = add(r2, #8) 2285 // memh(r2+#0) = r4 2286 // memh(r2+#2) = r4.h 2287 // } 2288 // { 2289 // r5:4 = memd(r0++#8) 2290 // }:endloop0 2291 2292 namespace llvm { 2293 FunctionPass *createHexagonLoopRescheduling(); 2294 void initializeHexagonLoopReschedulingPass(PassRegistry&); 2295 } 2296 2297 namespace { 2298 class HexagonLoopRescheduling : public MachineFunctionPass { 2299 public: 2300 static char ID; 2301 HexagonLoopRescheduling() : MachineFunctionPass(ID), 2302 HII(0), HRI(0), MRI(0), BTP(0) { 2303 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry()); 2304 } 2305 2306 bool runOnMachineFunction(MachineFunction &MF) override; 2307 2308 private: 2309 const HexagonInstrInfo *HII; 2310 const HexagonRegisterInfo *HRI; 2311 MachineRegisterInfo *MRI; 2312 BitTracker *BTP; 2313 2314 struct LoopCand { 2315 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb, 2316 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {} 2317 MachineBasicBlock *LB, *PB, *EB; 2318 }; 2319 typedef std::vector<MachineInstr*> InstrList; 2320 struct InstrGroup { 2321 BitTracker::RegisterRef Inp, Out; 2322 InstrList Ins; 2323 }; 2324 struct PhiInfo { 2325 PhiInfo(MachineInstr &P, MachineBasicBlock &B); 2326 unsigned DefR; 2327 BitTracker::RegisterRef LR, PR; 2328 MachineBasicBlock *LB, *PB; 2329 }; 2330 2331 static unsigned getDefReg(const MachineInstr *MI); 2332 bool isConst(unsigned Reg) const; 2333 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const; 2334 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const; 2335 bool isShuffleOf(unsigned OutR, unsigned InpR) const; 2336 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2, 2337 unsigned &InpR2) const; 2338 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB, 2339 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR); 2340 bool processLoop(LoopCand &C); 2341 }; 2342 } 2343 2344 char HexagonLoopRescheduling::ID = 0; 2345 2346 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched", 2347 "Hexagon Loop Rescheduling", false, false) 2348 2349 2350 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P, 2351 MachineBasicBlock &B) { 2352 DefR = HexagonLoopRescheduling::getDefReg(&P); 2353 LB = &B; 2354 PB = nullptr; 2355 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) { 2356 const MachineOperand &OpB = P.getOperand(i+1); 2357 if (OpB.getMBB() == &B) { 2358 LR = P.getOperand(i); 2359 continue; 2360 } 2361 PB = OpB.getMBB(); 2362 PR = P.getOperand(i); 2363 } 2364 } 2365 2366 2367 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) { 2368 RegisterSet Defs; 2369 HBS::getInstrDefs(*MI, Defs); 2370 if (Defs.count() != 1) 2371 return 0; 2372 return Defs.find_first(); 2373 } 2374 2375 2376 bool HexagonLoopRescheduling::isConst(unsigned Reg) const { 2377 if (!BTP->has(Reg)) 2378 return false; 2379 const BitTracker::RegisterCell &RC = BTP->lookup(Reg); 2380 for (unsigned i = 0, w = RC.width(); i < w; ++i) { 2381 const BitTracker::BitValue &V = RC[i]; 2382 if (!V.is(0) && !V.is(1)) 2383 return false; 2384 } 2385 return true; 2386 } 2387 2388 2389 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI, 2390 unsigned DefR) const { 2391 unsigned Opc = MI->getOpcode(); 2392 switch (Opc) { 2393 case TargetOpcode::COPY: 2394 case Hexagon::S2_lsr_i_r: 2395 case Hexagon::S2_asr_i_r: 2396 case Hexagon::S2_asl_i_r: 2397 case Hexagon::S2_lsr_i_p: 2398 case Hexagon::S2_asr_i_p: 2399 case Hexagon::S2_asl_i_p: 2400 case Hexagon::S2_insert: 2401 case Hexagon::A2_or: 2402 case Hexagon::A2_orp: 2403 case Hexagon::A2_and: 2404 case Hexagon::A2_andp: 2405 case Hexagon::A2_combinew: 2406 case Hexagon::A4_combineri: 2407 case Hexagon::A4_combineir: 2408 case Hexagon::A2_combineii: 2409 case Hexagon::A4_combineii: 2410 case Hexagon::A2_combine_ll: 2411 case Hexagon::A2_combine_lh: 2412 case Hexagon::A2_combine_hl: 2413 case Hexagon::A2_combine_hh: 2414 return true; 2415 } 2416 return false; 2417 } 2418 2419 2420 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI, 2421 unsigned InpR) const { 2422 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 2423 const MachineOperand &Op = MI->getOperand(i); 2424 if (!Op.isReg()) 2425 continue; 2426 if (Op.getReg() == InpR) 2427 return i == n-1; 2428 } 2429 return false; 2430 } 2431 2432 2433 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const { 2434 if (!BTP->has(OutR) || !BTP->has(InpR)) 2435 return false; 2436 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR); 2437 for (unsigned i = 0, w = OutC.width(); i < w; ++i) { 2438 const BitTracker::BitValue &V = OutC[i]; 2439 if (V.Type != BitTracker::BitValue::Ref) 2440 continue; 2441 if (V.RefI.Reg != InpR) 2442 return false; 2443 } 2444 return true; 2445 } 2446 2447 2448 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1, 2449 unsigned OutR2, unsigned &InpR2) const { 2450 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2)) 2451 return false; 2452 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1); 2453 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2); 2454 unsigned W = OutC1.width(); 2455 unsigned MatchR = 0; 2456 if (W != OutC2.width()) 2457 return false; 2458 for (unsigned i = 0; i < W; ++i) { 2459 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i]; 2460 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One) 2461 return false; 2462 if (V1.Type != BitTracker::BitValue::Ref) 2463 continue; 2464 if (V1.RefI.Pos != V2.RefI.Pos) 2465 return false; 2466 if (V1.RefI.Reg != InpR1) 2467 return false; 2468 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2) 2469 return false; 2470 if (!MatchR) 2471 MatchR = V2.RefI.Reg; 2472 else if (V2.RefI.Reg != MatchR) 2473 return false; 2474 } 2475 InpR2 = MatchR; 2476 return true; 2477 } 2478 2479 2480 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, 2481 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR, 2482 unsigned NewPredR) { 2483 DenseMap<unsigned,unsigned> RegMap; 2484 2485 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR); 2486 unsigned PhiR = MRI->createVirtualRegister(PhiRC); 2487 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR) 2488 .addReg(NewPredR) 2489 .addMBB(&PB) 2490 .addReg(G.Inp.Reg) 2491 .addMBB(&LB); 2492 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR)); 2493 2494 for (unsigned i = G.Ins.size(); i > 0; --i) { 2495 const MachineInstr *SI = G.Ins[i-1]; 2496 unsigned DR = getDefReg(SI); 2497 const TargetRegisterClass *RC = MRI->getRegClass(DR); 2498 unsigned NewDR = MRI->createVirtualRegister(RC); 2499 DebugLoc DL = SI->getDebugLoc(); 2500 2501 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR); 2502 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { 2503 const MachineOperand &Op = SI->getOperand(j); 2504 if (!Op.isReg()) { 2505 MIB.addOperand(Op); 2506 continue; 2507 } 2508 if (!Op.isUse()) 2509 continue; 2510 unsigned UseR = RegMap[Op.getReg()]; 2511 MIB.addReg(UseR, 0, Op.getSubReg()); 2512 } 2513 RegMap.insert(std::make_pair(DR, NewDR)); 2514 } 2515 2516 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI); 2517 } 2518 2519 2520 bool HexagonLoopRescheduling::processLoop(LoopCand &C) { 2521 DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n"); 2522 std::vector<PhiInfo> Phis; 2523 for (auto &I : *C.LB) { 2524 if (!I.isPHI()) 2525 break; 2526 unsigned PR = getDefReg(&I); 2527 if (isConst(PR)) 2528 continue; 2529 bool BadUse = false, GoodUse = false; 2530 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) { 2531 MachineInstr *UseI = UI->getParent(); 2532 if (UseI->getParent() != C.LB) { 2533 BadUse = true; 2534 break; 2535 } 2536 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR)) 2537 GoodUse = true; 2538 } 2539 if (BadUse || !GoodUse) 2540 continue; 2541 2542 Phis.push_back(PhiInfo(I, *C.LB)); 2543 } 2544 2545 DEBUG({ 2546 dbgs() << "Phis: {"; 2547 for (auto &I : Phis) { 2548 dbgs() << ' ' << PrintReg(I.DefR, HRI) << "=phi(" 2549 << PrintReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber() 2550 << ',' << PrintReg(I.LR.Reg, HRI, I.LR.Sub) << ":b" 2551 << I.LB->getNumber() << ')'; 2552 } 2553 dbgs() << " }\n"; 2554 }); 2555 2556 if (Phis.empty()) 2557 return false; 2558 2559 bool Changed = false; 2560 InstrList ShufIns; 2561 2562 // Go backwards in the block: for each bit shuffling instruction, check 2563 // if that instruction could potentially be moved to the front of the loop: 2564 // the output of the loop cannot be used in a non-shuffling instruction 2565 // in this loop. 2566 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) { 2567 if (I->isTerminator()) 2568 continue; 2569 if (I->isPHI()) 2570 break; 2571 2572 RegisterSet Defs; 2573 HBS::getInstrDefs(*I, Defs); 2574 if (Defs.count() != 1) 2575 continue; 2576 unsigned DefR = Defs.find_first(); 2577 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 2578 continue; 2579 if (!isBitShuffle(&*I, DefR)) 2580 continue; 2581 2582 bool BadUse = false; 2583 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) { 2584 MachineInstr *UseI = UI->getParent(); 2585 if (UseI->getParent() == C.LB) { 2586 if (UseI->isPHI()) { 2587 // If the use is in a phi node in this loop, then it should be 2588 // the value corresponding to the back edge. 2589 unsigned Idx = UI.getOperandNo(); 2590 if (UseI->getOperand(Idx+1).getMBB() != C.LB) 2591 BadUse = true; 2592 } else { 2593 auto F = std::find(ShufIns.begin(), ShufIns.end(), UseI); 2594 if (F == ShufIns.end()) 2595 BadUse = true; 2596 } 2597 } else { 2598 // There is a use outside of the loop, but there is no epilog block 2599 // suitable for a copy-out. 2600 if (C.EB == nullptr) 2601 BadUse = true; 2602 } 2603 if (BadUse) 2604 break; 2605 } 2606 2607 if (BadUse) 2608 continue; 2609 ShufIns.push_back(&*I); 2610 } 2611 2612 // Partition the list of shuffling instructions into instruction groups, 2613 // where each group has to be moved as a whole (i.e. a group is a chain of 2614 // dependent instructions). A group produces a single live output register, 2615 // which is meant to be the input of the loop phi node (although this is 2616 // not checked here yet). It also uses a single register as its input, 2617 // which is some value produced in the loop body. After moving the group 2618 // to the beginning of the loop, that input register would need to be 2619 // the loop-carried register (through a phi node) instead of the (currently 2620 // loop-carried) output register. 2621 typedef std::vector<InstrGroup> InstrGroupList; 2622 InstrGroupList Groups; 2623 2624 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) { 2625 MachineInstr *SI = ShufIns[i]; 2626 if (SI == nullptr) 2627 continue; 2628 2629 InstrGroup G; 2630 G.Ins.push_back(SI); 2631 G.Out.Reg = getDefReg(SI); 2632 RegisterSet Inputs; 2633 HBS::getInstrUses(*SI, Inputs); 2634 2635 for (unsigned j = i+1; j < n; ++j) { 2636 MachineInstr *MI = ShufIns[j]; 2637 if (MI == nullptr) 2638 continue; 2639 RegisterSet Defs; 2640 HBS::getInstrDefs(*MI, Defs); 2641 // If this instruction does not define any pending inputs, skip it. 2642 if (!Defs.intersects(Inputs)) 2643 continue; 2644 // Otherwise, add it to the current group and remove the inputs that 2645 // are defined by MI. 2646 G.Ins.push_back(MI); 2647 Inputs.remove(Defs); 2648 // Then add all registers used by MI. 2649 HBS::getInstrUses(*MI, Inputs); 2650 ShufIns[j] = nullptr; 2651 } 2652 2653 // Only add a group if it requires at most one register. 2654 if (Inputs.count() > 1) 2655 continue; 2656 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 2657 return G.Out.Reg == P.LR.Reg; 2658 }; 2659 if (std::find_if(Phis.begin(), Phis.end(), LoopInpEq) == Phis.end()) 2660 continue; 2661 2662 G.Inp.Reg = Inputs.find_first(); 2663 Groups.push_back(G); 2664 } 2665 2666 DEBUG({ 2667 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 2668 InstrGroup &G = Groups[i]; 2669 dbgs() << "Group[" << i << "] inp: " 2670 << PrintReg(G.Inp.Reg, HRI, G.Inp.Sub) 2671 << " out: " << PrintReg(G.Out.Reg, HRI, G.Out.Sub) << "\n"; 2672 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j) 2673 dbgs() << " " << *G.Ins[j]; 2674 } 2675 }); 2676 2677 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 2678 InstrGroup &G = Groups[i]; 2679 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg)) 2680 continue; 2681 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 2682 return G.Out.Reg == P.LR.Reg; 2683 }; 2684 auto F = std::find_if(Phis.begin(), Phis.end(), LoopInpEq); 2685 if (F == Phis.end()) 2686 continue; 2687 unsigned PredR = 0; 2688 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PredR)) { 2689 const MachineInstr *DefPredR = MRI->getVRegDef(F->PR.Reg); 2690 unsigned Opc = DefPredR->getOpcode(); 2691 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi) 2692 continue; 2693 if (!DefPredR->getOperand(1).isImm()) 2694 continue; 2695 if (DefPredR->getOperand(1).getImm() != 0) 2696 continue; 2697 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg); 2698 if (RC != MRI->getRegClass(F->PR.Reg)) { 2699 PredR = MRI->createVirtualRegister(RC); 2700 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi 2701 : Hexagon::A2_tfrpi; 2702 auto T = C.PB->getFirstTerminator(); 2703 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc(); 2704 BuildMI(*C.PB, T, DL, HII->get(TfrI), PredR) 2705 .addImm(0); 2706 } else { 2707 PredR = F->PR.Reg; 2708 } 2709 } 2710 assert(MRI->getRegClass(PredR) == MRI->getRegClass(G.Inp.Reg)); 2711 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PredR); 2712 Changed = true; 2713 } 2714 2715 return Changed; 2716 } 2717 2718 2719 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) { 2720 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2721 HII = HST.getInstrInfo(); 2722 HRI = HST.getRegisterInfo(); 2723 MRI = &MF.getRegInfo(); 2724 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); 2725 BitTracker BT(HE, MF); 2726 DEBUG(BT.trace(true)); 2727 BT.run(); 2728 BTP = &BT; 2729 2730 std::vector<LoopCand> Cand; 2731 2732 for (auto &B : MF) { 2733 if (B.pred_size() != 2 || B.succ_size() != 2) 2734 continue; 2735 MachineBasicBlock *PB = nullptr; 2736 bool IsLoop = false; 2737 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) { 2738 if (*PI != &B) 2739 PB = *PI; 2740 else 2741 IsLoop = true; 2742 } 2743 if (!IsLoop) 2744 continue; 2745 2746 MachineBasicBlock *EB = nullptr; 2747 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) { 2748 if (*SI == &B) 2749 continue; 2750 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the 2751 // edge from B to EP is non-critical. 2752 if ((*SI)->pred_size() == 1) 2753 EB = *SI; 2754 break; 2755 } 2756 2757 Cand.push_back(LoopCand(&B, PB, EB)); 2758 } 2759 2760 bool Changed = false; 2761 for (auto &C : Cand) 2762 Changed |= processLoop(C); 2763 2764 return Changed; 2765 } 2766 2767 //===----------------------------------------------------------------------===// 2768 // Public Constructor Functions 2769 //===----------------------------------------------------------------------===// 2770 2771 FunctionPass *llvm::createHexagonLoopRescheduling() { 2772 return new HexagonLoopRescheduling(); 2773 } 2774 2775 FunctionPass *llvm::createHexagonBitSimplify() { 2776 return new HexagonBitSimplify(); 2777 } 2778 2779