1 //===--- HexagonBitTracker.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "llvm/CodeGen/MachineRegisterInfo.h" 11 #include "llvm/IR/Module.h" 12 #include "llvm/Support/Debug.h" 13 #include "llvm/Support/raw_ostream.h" 14 15 #include "Hexagon.h" 16 #include "HexagonInstrInfo.h" 17 #include "HexagonRegisterInfo.h" 18 #include "HexagonTargetMachine.h" 19 #include "HexagonBitTracker.h" 20 21 using namespace llvm; 22 23 typedef BitTracker BT; 24 25 HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, 26 MachineRegisterInfo &mri, 27 const HexagonInstrInfo &tii, 28 MachineFunction &mf) 29 : MachineEvaluator(tri, mri), MF(mf), MFI(*mf.getFrameInfo()), TII(tii) { 30 // Populate the VRX map (VR to extension-type). 31 // Go over all the formal parameters of the function. If a given parameter 32 // P is sign- or zero-extended, locate the virtual register holding that 33 // parameter and create an entry in the VRX map indicating the type of ex- 34 // tension (and the source type). 35 // This is a bit complicated to do accurately, since the memory layout in- 36 // formation is necessary to precisely determine whether an aggregate para- 37 // meter will be passed in a register or in memory. What is given in MRI 38 // is the association between the physical register that is live-in (i.e. 39 // holds an argument), and the virtual register that this value will be 40 // copied into. This, by itself, is not sufficient to map back the virtual 41 // register to a formal parameter from Function (since consecutive live-ins 42 // from MRI may not correspond to consecutive formal parameters from Func- 43 // tion). To avoid the complications with in-memory arguments, only consi- 44 // der the initial sequence of formal parameters that are known to be 45 // passed via registers. 46 unsigned AttrIdx = 0; 47 unsigned InVirtReg, InPhysReg = 0; 48 const Function &F = *MF.getFunction(); 49 typedef Function::const_arg_iterator arg_iterator; 50 for (arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 51 AttrIdx++; 52 const Argument &Arg = *I; 53 Type *ATy = Arg.getType(); 54 unsigned Width = 0; 55 if (ATy->isIntegerTy()) 56 Width = ATy->getIntegerBitWidth(); 57 else if (ATy->isPointerTy()) 58 Width = 32; 59 // If pointer size is not set through target data, it will default to 60 // Module::AnyPointerSize. 61 if (Width == 0 || Width > 64) 62 break; 63 InPhysReg = getNextPhysReg(InPhysReg, Width); 64 if (!InPhysReg) 65 break; 66 InVirtReg = getVirtRegFor(InPhysReg); 67 if (!InVirtReg) 68 continue; 69 AttributeSet Attrs = F.getAttributes(); 70 if (Attrs.hasAttribute(AttrIdx, Attribute::SExt)) 71 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width))); 72 else if (Attrs.hasAttribute(AttrIdx, Attribute::ZExt)) 73 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width))); 74 } 75 } 76 77 78 BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const { 79 if (Sub == 0) 80 return MachineEvaluator::mask(Reg, 0); 81 using namespace Hexagon; 82 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 83 unsigned ID = RC->getID(); 84 uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub)); 85 switch (ID) { 86 case DoubleRegsRegClassID: 87 case VecDblRegsRegClassID: 88 case VecDblRegs128BRegClassID: 89 return (Sub == subreg_loreg) ? BT::BitMask(0, RW-1) 90 : BT::BitMask(RW, 2*RW-1); 91 default: 92 break; 93 } 94 #ifndef NDEBUG 95 dbgs() << PrintReg(Reg, &TRI, Sub) << '\n'; 96 #endif 97 llvm_unreachable("Unexpected register/subregister"); 98 } 99 100 namespace { 101 class RegisterRefs { 102 std::vector<BT::RegisterRef> Vector; 103 104 public: 105 RegisterRefs(const MachineInstr *MI) : Vector(MI->getNumOperands()) { 106 for (unsigned i = 0, n = Vector.size(); i < n; ++i) { 107 const MachineOperand &MO = MI->getOperand(i); 108 if (MO.isReg()) 109 Vector[i] = BT::RegisterRef(MO); 110 // For indices that don't correspond to registers, the entry will 111 // remain constructed via the default constructor. 112 } 113 } 114 115 size_t size() const { return Vector.size(); } 116 const BT::RegisterRef &operator[](unsigned n) const { 117 // The main purpose of this operator is to assert with bad argument. 118 assert(n < Vector.size()); 119 return Vector[n]; 120 } 121 }; 122 } 123 124 bool HexagonEvaluator::evaluate(const MachineInstr *MI, 125 const CellMapType &Inputs, CellMapType &Outputs) const { 126 unsigned NumDefs = 0; 127 128 // Sanity verification: there should not be any defs with subregisters. 129 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 130 const MachineOperand &MO = MI->getOperand(i); 131 if (!MO.isReg() || !MO.isDef()) 132 continue; 133 NumDefs++; 134 assert(MO.getSubReg() == 0); 135 } 136 137 if (NumDefs == 0) 138 return false; 139 140 if (MI->mayLoad()) 141 return evaluateLoad(MI, Inputs, Outputs); 142 143 // Check COPY instructions that copy formal parameters into virtual 144 // registers. Such parameters can be sign- or zero-extended at the 145 // call site, and we should take advantage of this knowledge. The MRI 146 // keeps a list of pairs of live-in physical and virtual registers, 147 // which provides information about which virtual registers will hold 148 // the argument values. The function will still contain instructions 149 // defining those virtual registers, and in practice those are COPY 150 // instructions from a physical to a virtual register. In such cases, 151 // applying the argument extension to the virtual register can be seen 152 // as simply mirroring the extension that had already been applied to 153 // the physical register at the call site. If the defining instruction 154 // was not a COPY, it would not be clear how to mirror that extension 155 // on the callee's side. For that reason, only check COPY instructions 156 // for potential extensions. 157 if (MI->isCopy()) { 158 if (evaluateFormalCopy(MI, Inputs, Outputs)) 159 return true; 160 } 161 162 // Beyond this point, if any operand is a global, skip that instruction. 163 // The reason is that certain instructions that can take an immediate 164 // operand can also have a global symbol in that operand. To avoid 165 // checking what kind of operand a given instruction has individually 166 // for each instruction, do it here. Global symbols as operands gene- 167 // rally do not provide any useful information. 168 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 169 const MachineOperand &MO = MI->getOperand(i); 170 if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() || 171 MO.isCPI()) 172 return false; 173 } 174 175 RegisterRefs Reg(MI); 176 unsigned Opc = MI->getOpcode(); 177 using namespace Hexagon; 178 #define op(i) MI->getOperand(i) 179 #define rc(i) RegisterCell::ref(getCell(Reg[i],Inputs)) 180 #define im(i) MI->getOperand(i).getImm() 181 182 // If the instruction has no register operands, skip it. 183 if (Reg.size() == 0) 184 return false; 185 186 // Record result for register in operand 0. 187 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs) 188 -> bool { 189 putCell(Reg[0], Val, Outputs); 190 return true; 191 }; 192 // Get the cell corresponding to the N-th operand. 193 auto cop = [this,&Reg,&MI,&Inputs] (unsigned N, uint16_t W) 194 -> BT::RegisterCell { 195 const MachineOperand &Op = MI->getOperand(N); 196 if (Op.isImm()) 197 return eIMM(Op.getImm(), W); 198 if (!Op.isReg()) 199 return RegisterCell::self(0, W); 200 assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch"); 201 return rc(N); 202 }; 203 // Extract RW low bits of the cell. 204 auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW) 205 -> BT::RegisterCell { 206 assert(RW <= RC.width()); 207 return eXTR(RC, 0, RW); 208 }; 209 // Extract RW high bits of the cell. 210 auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW) 211 -> BT::RegisterCell { 212 uint16_t W = RC.width(); 213 assert(RW <= W); 214 return eXTR(RC, W-RW, W); 215 }; 216 // Extract N-th halfword (counting from the least significant position). 217 auto half = [this] (const BT::RegisterCell &RC, unsigned N) 218 -> BT::RegisterCell { 219 assert(N*16+16 <= RC.width()); 220 return eXTR(RC, N*16, N*16+16); 221 }; 222 // Shuffle bits (pick even/odd from cells and merge into result). 223 auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt, 224 uint16_t BW, bool Odd) -> BT::RegisterCell { 225 uint16_t I = Odd, Ws = Rs.width(); 226 assert(Ws == Rt.width()); 227 RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW)); 228 I += 2; 229 while (I*BW < Ws) { 230 RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW)); 231 I += 2; 232 } 233 return RC; 234 }; 235 236 // The bitwidth of the 0th operand. In most (if not all) of the 237 // instructions below, the 0th operand is the defined register. 238 // Pre-compute the bitwidth here, because it is needed in many cases 239 // cases below. 240 uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0; 241 242 switch (Opc) { 243 // Transfer immediate: 244 245 case A2_tfrsi: 246 case A2_tfrpi: 247 case CONST32: 248 case CONST32_Float_Real: 249 case CONST32_Int_Real: 250 case CONST64_Float_Real: 251 case CONST64_Int_Real: 252 return rr0(eIMM(im(1), W0), Outputs); 253 case TFR_PdFalse: 254 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs); 255 case TFR_PdTrue: 256 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs); 257 case TFR_FI: { 258 int FI = op(1).getIndex(); 259 int Off = op(2).getImm(); 260 unsigned A = MFI.getObjectAlignment(FI) + std::abs(Off); 261 unsigned L = Log2_32(A); 262 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); 263 RC.fill(0, L, BT::BitValue::Zero); 264 return rr0(RC, Outputs); 265 } 266 267 // Transfer register: 268 269 case A2_tfr: 270 case A2_tfrp: 271 case C2_pxfer_map: 272 return rr0(rc(1), Outputs); 273 case C2_tfrpr: { 274 uint16_t RW = W0; 275 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 276 assert(PW <= RW); 277 RegisterCell PC = eXTR(rc(1), 0, PW); 278 RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1)); 279 RC.fill(PW, RW, BT::BitValue::Zero); 280 return rr0(RC, Outputs); 281 } 282 case C2_tfrrp: { 283 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); 284 W0 = 8; // XXX Pred size 285 return rr0(eINS(RC, eXTR(rc(1), 0, W0), 0), Outputs); 286 } 287 288 // Arithmetic: 289 290 case A2_abs: 291 case A2_absp: 292 // TODO 293 break; 294 295 case A2_addsp: { 296 uint16_t W1 = getRegBitWidth(Reg[1]); 297 assert(W0 == 64 && W1 == 32); 298 RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1)); 299 RegisterCell RC = eADD(eSXT(CW, W1), rc(2)); 300 return rr0(RC, Outputs); 301 } 302 case A2_add: 303 case A2_addp: 304 return rr0(eADD(rc(1), rc(2)), Outputs); 305 case A2_addi: 306 return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs); 307 case S4_addi_asl_ri: { 308 RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3))); 309 return rr0(RC, Outputs); 310 } 311 case S4_addi_lsr_ri: { 312 RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3))); 313 return rr0(RC, Outputs); 314 } 315 case S4_addaddi: { 316 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 317 return rr0(RC, Outputs); 318 } 319 case M4_mpyri_addi: { 320 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 321 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 322 return rr0(RC, Outputs); 323 } 324 case M4_mpyrr_addi: { 325 RegisterCell M = eMLS(rc(2), rc(3)); 326 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 327 return rr0(RC, Outputs); 328 } 329 case M4_mpyri_addr_u2: { 330 RegisterCell M = eMLS(eIMM(im(2), W0), rc(3)); 331 RegisterCell RC = eADD(rc(1), lo(M, W0)); 332 return rr0(RC, Outputs); 333 } 334 case M4_mpyri_addr: { 335 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 336 RegisterCell RC = eADD(rc(1), lo(M, W0)); 337 return rr0(RC, Outputs); 338 } 339 case M4_mpyrr_addr: { 340 RegisterCell M = eMLS(rc(2), rc(3)); 341 RegisterCell RC = eADD(rc(1), lo(M, W0)); 342 return rr0(RC, Outputs); 343 } 344 case S4_subaddi: { 345 RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3))); 346 return rr0(RC, Outputs); 347 } 348 case M2_accii: { 349 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 350 return rr0(RC, Outputs); 351 } 352 case M2_acci: { 353 RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3))); 354 return rr0(RC, Outputs); 355 } 356 case M2_subacc: { 357 RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3))); 358 return rr0(RC, Outputs); 359 } 360 case S2_addasl_rrri: { 361 RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3))); 362 return rr0(RC, Outputs); 363 } 364 case C4_addipc: { 365 RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0); 366 RPC.fill(0, 2, BT::BitValue::Zero); 367 return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs); 368 } 369 case A2_sub: 370 case A2_subp: 371 return rr0(eSUB(rc(1), rc(2)), Outputs); 372 case A2_subri: 373 return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs); 374 case S4_subi_asl_ri: { 375 RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3))); 376 return rr0(RC, Outputs); 377 } 378 case S4_subi_lsr_ri: { 379 RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3))); 380 return rr0(RC, Outputs); 381 } 382 case M2_naccii: { 383 RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0))); 384 return rr0(RC, Outputs); 385 } 386 case M2_nacci: { 387 RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3))); 388 return rr0(RC, Outputs); 389 } 390 // 32-bit negation is done by "Rd = A2_subri 0, Rs" 391 case A2_negp: 392 return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs); 393 394 case M2_mpy_up: { 395 RegisterCell M = eMLS(rc(1), rc(2)); 396 return rr0(hi(M, W0), Outputs); 397 } 398 case M2_dpmpyss_s0: 399 return rr0(eMLS(rc(1), rc(2)), Outputs); 400 case M2_dpmpyss_acc_s0: 401 return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs); 402 case M2_dpmpyss_nac_s0: 403 return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs); 404 case M2_mpyi: { 405 RegisterCell M = eMLS(rc(1), rc(2)); 406 return rr0(lo(M, W0), Outputs); 407 } 408 case M2_macsip: { 409 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 410 RegisterCell RC = eADD(rc(1), lo(M, W0)); 411 return rr0(RC, Outputs); 412 } 413 case M2_macsin: { 414 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 415 RegisterCell RC = eSUB(rc(1), lo(M, W0)); 416 return rr0(RC, Outputs); 417 } 418 case M2_maci: { 419 RegisterCell M = eMLS(rc(2), rc(3)); 420 RegisterCell RC = eADD(rc(1), lo(M, W0)); 421 return rr0(RC, Outputs); 422 } 423 case M2_mpysmi: { 424 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 425 return rr0(lo(M, 32), Outputs); 426 } 427 case M2_mpysin: { 428 RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0)); 429 return rr0(lo(M, 32), Outputs); 430 } 431 case M2_mpysip: { 432 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 433 return rr0(lo(M, 32), Outputs); 434 } 435 case M2_mpyu_up: { 436 RegisterCell M = eMLU(rc(1), rc(2)); 437 return rr0(hi(M, W0), Outputs); 438 } 439 case M2_dpmpyuu_s0: 440 return rr0(eMLU(rc(1), rc(2)), Outputs); 441 case M2_dpmpyuu_acc_s0: 442 return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs); 443 case M2_dpmpyuu_nac_s0: 444 return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs); 445 //case M2_mpysu_up: 446 447 // Logical/bitwise: 448 449 case A2_andir: 450 return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs); 451 case A2_and: 452 case A2_andp: 453 return rr0(eAND(rc(1), rc(2)), Outputs); 454 case A4_andn: 455 case A4_andnp: 456 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 457 case S4_andi_asl_ri: { 458 RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3))); 459 return rr0(RC, Outputs); 460 } 461 case S4_andi_lsr_ri: { 462 RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3))); 463 return rr0(RC, Outputs); 464 } 465 case M4_and_and: 466 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 467 case M4_and_andn: 468 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 469 case M4_and_or: 470 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 471 case M4_and_xor: 472 return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs); 473 case A2_orir: 474 return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs); 475 case A2_or: 476 case A2_orp: 477 return rr0(eORL(rc(1), rc(2)), Outputs); 478 case A4_orn: 479 case A4_ornp: 480 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 481 case S4_ori_asl_ri: { 482 RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3))); 483 return rr0(RC, Outputs); 484 } 485 case S4_ori_lsr_ri: { 486 RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3))); 487 return rr0(RC, Outputs); 488 } 489 case M4_or_and: 490 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 491 case M4_or_andn: 492 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 493 case S4_or_andi: 494 case S4_or_andix: { 495 RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0))); 496 return rr0(RC, Outputs); 497 } 498 case S4_or_ori: { 499 RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0))); 500 return rr0(RC, Outputs); 501 } 502 case M4_or_or: 503 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 504 case M4_or_xor: 505 return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs); 506 case A2_xor: 507 case A2_xorp: 508 return rr0(eXOR(rc(1), rc(2)), Outputs); 509 case M4_xor_and: 510 return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs); 511 case M4_xor_andn: 512 return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 513 case M4_xor_or: 514 return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs); 515 case M4_xor_xacc: 516 return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs); 517 case A2_not: 518 case A2_notp: 519 return rr0(eNOT(rc(1)), Outputs); 520 521 case S2_asl_i_r: 522 case S2_asl_i_p: 523 return rr0(eASL(rc(1), im(2)), Outputs); 524 case A2_aslh: 525 return rr0(eASL(rc(1), 16), Outputs); 526 case S2_asl_i_r_acc: 527 case S2_asl_i_p_acc: 528 return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs); 529 case S2_asl_i_r_nac: 530 case S2_asl_i_p_nac: 531 return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs); 532 case S2_asl_i_r_and: 533 case S2_asl_i_p_and: 534 return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs); 535 case S2_asl_i_r_or: 536 case S2_asl_i_p_or: 537 return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs); 538 case S2_asl_i_r_xacc: 539 case S2_asl_i_p_xacc: 540 return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs); 541 case S2_asl_i_vh: 542 case S2_asl_i_vw: 543 // TODO 544 break; 545 546 case S2_asr_i_r: 547 case S2_asr_i_p: 548 return rr0(eASR(rc(1), im(2)), Outputs); 549 case A2_asrh: 550 return rr0(eASR(rc(1), 16), Outputs); 551 case S2_asr_i_r_acc: 552 case S2_asr_i_p_acc: 553 return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs); 554 case S2_asr_i_r_nac: 555 case S2_asr_i_p_nac: 556 return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs); 557 case S2_asr_i_r_and: 558 case S2_asr_i_p_and: 559 return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs); 560 case S2_asr_i_r_or: 561 case S2_asr_i_p_or: 562 return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs); 563 case S2_asr_i_r_rnd: { 564 // The input is first sign-extended to 64 bits, then the output 565 // is truncated back to 32 bits. 566 assert(W0 == 32); 567 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 568 RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1); 569 return rr0(eXTR(RC, 0, W0), Outputs); 570 } 571 case S2_asr_i_r_rnd_goodsyntax: { 572 int64_t S = im(2); 573 if (S == 0) 574 return rr0(rc(1), Outputs); 575 // Result: S2_asr_i_r_rnd Rs, u5-1 576 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 577 RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1); 578 return rr0(eXTR(RC, 0, W0), Outputs); 579 } 580 case S2_asr_r_vh: 581 case S2_asr_i_vw: 582 case S2_asr_i_svw_trun: 583 // TODO 584 break; 585 586 case S2_lsr_i_r: 587 case S2_lsr_i_p: 588 return rr0(eLSR(rc(1), im(2)), Outputs); 589 case S2_lsr_i_r_acc: 590 case S2_lsr_i_p_acc: 591 return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs); 592 case S2_lsr_i_r_nac: 593 case S2_lsr_i_p_nac: 594 return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs); 595 case S2_lsr_i_r_and: 596 case S2_lsr_i_p_and: 597 return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs); 598 case S2_lsr_i_r_or: 599 case S2_lsr_i_p_or: 600 return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs); 601 case S2_lsr_i_r_xacc: 602 case S2_lsr_i_p_xacc: 603 return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs); 604 605 case S2_clrbit_i: { 606 RegisterCell RC = rc(1); 607 RC[im(2)] = BT::BitValue::Zero; 608 return rr0(RC, Outputs); 609 } 610 case S2_setbit_i: { 611 RegisterCell RC = rc(1); 612 RC[im(2)] = BT::BitValue::One; 613 return rr0(RC, Outputs); 614 } 615 case S2_togglebit_i: { 616 RegisterCell RC = rc(1); 617 uint16_t BX = im(2); 618 RC[BX] = RC[BX].is(0) ? BT::BitValue::One 619 : RC[BX].is(1) ? BT::BitValue::Zero 620 : BT::BitValue::self(); 621 return rr0(RC, Outputs); 622 } 623 624 case A4_bitspliti: { 625 uint16_t W1 = getRegBitWidth(Reg[1]); 626 uint16_t BX = im(2); 627 // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx] 628 const BT::BitValue Zero = BT::BitValue::Zero; 629 RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero) 630 .fill(W1+(W1-BX), W0, Zero); 631 RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1); 632 RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1); 633 return rr0(RC, Outputs); 634 } 635 case S4_extract: 636 case S4_extractp: 637 case S2_extractu: 638 case S2_extractup: { 639 uint16_t Wd = im(2), Of = im(3); 640 assert(Wd <= W0); 641 if (Wd == 0) 642 return rr0(eIMM(0, W0), Outputs); 643 // If the width extends beyond the register size, pad the register 644 // with 0 bits. 645 RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1); 646 RegisterCell Ext = eXTR(Pad, Of, Wd+Of); 647 // Ext is short, need to extend it with 0s or sign bit. 648 RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1)); 649 if (Opc == S2_extractu || Opc == S2_extractup) 650 return rr0(eZXT(RC, Wd), Outputs); 651 return rr0(eSXT(RC, Wd), Outputs); 652 } 653 case S2_insert: 654 case S2_insertp: { 655 uint16_t Wd = im(3), Of = im(4); 656 assert(Wd < W0 && Of < W0); 657 // If Wd+Of exceeds W0, the inserted bits are truncated. 658 if (Wd+Of > W0) 659 Wd = W0-Of; 660 if (Wd == 0) 661 return rr0(rc(1), Outputs); 662 return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs); 663 } 664 665 // Bit permutations: 666 667 case A2_combineii: 668 case A4_combineii: 669 case A4_combineir: 670 case A4_combineri: 671 case A2_combinew: 672 assert(W0 % 2 == 0); 673 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs); 674 case A2_combine_ll: 675 case A2_combine_lh: 676 case A2_combine_hl: 677 case A2_combine_hh: { 678 assert(W0 == 32); 679 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 680 // Low half in the output is 0 for _ll and _hl, 1 otherwise: 681 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl); 682 // High half in the output is 0 for _ll and _lh, 1 otherwise: 683 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh); 684 RegisterCell R1 = rc(1); 685 RegisterCell R2 = rc(2); 686 RegisterCell RC = half(R2, LoH).cat(half(R1, HiH)); 687 return rr0(RC, Outputs); 688 } 689 case S2_packhl: { 690 assert(W0 == 64); 691 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 692 RegisterCell R1 = rc(1); 693 RegisterCell R2 = rc(2); 694 RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1)) 695 .cat(half(R1, 1)); 696 return rr0(RC, Outputs); 697 } 698 case S2_shuffeb: { 699 RegisterCell RC = shuffle(rc(1), rc(2), 8, false); 700 return rr0(RC, Outputs); 701 } 702 case S2_shuffeh: { 703 RegisterCell RC = shuffle(rc(1), rc(2), 16, false); 704 return rr0(RC, Outputs); 705 } 706 case S2_shuffob: { 707 RegisterCell RC = shuffle(rc(1), rc(2), 8, true); 708 return rr0(RC, Outputs); 709 } 710 case S2_shuffoh: { 711 RegisterCell RC = shuffle(rc(1), rc(2), 16, true); 712 return rr0(RC, Outputs); 713 } 714 case C2_mask: { 715 uint16_t WR = W0; 716 uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 717 assert(WR == 64 && WP == 8); 718 RegisterCell R1 = rc(1); 719 RegisterCell RC(WR); 720 for (uint16_t i = 0; i < WP; ++i) { 721 const BT::BitValue &V = R1[i]; 722 BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self(); 723 RC.fill(i*8, i*8+8, F); 724 } 725 return rr0(RC, Outputs); 726 } 727 728 // Mux: 729 730 case C2_muxii: 731 case C2_muxir: 732 case C2_muxri: 733 case C2_mux: { 734 BT::BitValue PC0 = rc(1)[0]; 735 RegisterCell R2 = cop(2, W0); 736 RegisterCell R3 = cop(3, W0); 737 if (PC0.is(0) || PC0.is(1)) 738 return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs); 739 R2.meet(R3, Reg[0].Reg); 740 return rr0(R2, Outputs); 741 } 742 case C2_vmux: 743 // TODO 744 break; 745 746 // Sign- and zero-extension: 747 748 case A2_sxtb: 749 return rr0(eSXT(rc(1), 8), Outputs); 750 case A2_sxth: 751 return rr0(eSXT(rc(1), 16), Outputs); 752 case A2_sxtw: { 753 uint16_t W1 = getRegBitWidth(Reg[1]); 754 assert(W0 == 64 && W1 == 32); 755 RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1); 756 return rr0(RC, Outputs); 757 } 758 case A2_zxtb: 759 return rr0(eZXT(rc(1), 8), Outputs); 760 case A2_zxth: 761 return rr0(eZXT(rc(1), 16), Outputs); 762 763 // Bit count: 764 765 case S2_cl0: 766 case S2_cl0p: 767 // Always produce a 32-bit result. 768 return rr0(eCLB(rc(1), 0/*bit*/, 32), Outputs); 769 case S2_cl1: 770 case S2_cl1p: 771 return rr0(eCLB(rc(1), 1/*bit*/, 32), Outputs); 772 case S2_clb: 773 case S2_clbp: { 774 uint16_t W1 = getRegBitWidth(Reg[1]); 775 RegisterCell R1 = rc(1); 776 BT::BitValue TV = R1[W1-1]; 777 if (TV.is(0) || TV.is(1)) 778 return rr0(eCLB(R1, TV, 32), Outputs); 779 break; 780 } 781 case S2_ct0: 782 case S2_ct0p: 783 return rr0(eCTB(rc(1), 0/*bit*/, 32), Outputs); 784 case S2_ct1: 785 case S2_ct1p: 786 return rr0(eCTB(rc(1), 1/*bit*/, 32), Outputs); 787 case S5_popcountp: 788 // TODO 789 break; 790 791 case C2_all8: { 792 RegisterCell P1 = rc(1); 793 bool Has0 = false, All1 = true; 794 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 795 if (!P1[i].is(1)) 796 All1 = false; 797 if (!P1[i].is(0)) 798 continue; 799 Has0 = true; 800 break; 801 } 802 if (!Has0 && !All1) 803 break; 804 RegisterCell RC(W0); 805 RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero)); 806 return rr0(RC, Outputs); 807 } 808 case C2_any8: { 809 RegisterCell P1 = rc(1); 810 bool Has1 = false, All0 = true; 811 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 812 if (!P1[i].is(0)) 813 All0 = false; 814 if (!P1[i].is(1)) 815 continue; 816 Has1 = true; 817 break; 818 } 819 if (!Has1 && !All0) 820 break; 821 RegisterCell RC(W0); 822 RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero)); 823 return rr0(RC, Outputs); 824 } 825 case C2_and: 826 return rr0(eAND(rc(1), rc(2)), Outputs); 827 case C2_andn: 828 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 829 case C2_not: 830 return rr0(eNOT(rc(1)), Outputs); 831 case C2_or: 832 return rr0(eORL(rc(1), rc(2)), Outputs); 833 case C2_orn: 834 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 835 case C2_xor: 836 return rr0(eXOR(rc(1), rc(2)), Outputs); 837 case C4_and_and: 838 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 839 case C4_and_andn: 840 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 841 case C4_and_or: 842 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 843 case C4_and_orn: 844 return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 845 case C4_or_and: 846 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 847 case C4_or_andn: 848 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 849 case C4_or_or: 850 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 851 case C4_or_orn: 852 return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 853 case C2_bitsclr: 854 case C2_bitsclri: 855 case C2_bitsset: 856 case C4_nbitsclr: 857 case C4_nbitsclri: 858 case C4_nbitsset: 859 // TODO 860 break; 861 case S2_tstbit_i: 862 case S4_ntstbit_i: { 863 BT::BitValue V = rc(1)[im(2)]; 864 if (V.is(0) || V.is(1)) { 865 // If instruction is S2_tstbit_i, test for 1, otherwise test for 0. 866 bool TV = (Opc == S2_tstbit_i); 867 BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero; 868 return rr0(RegisterCell(W0).fill(0, W0, F), Outputs); 869 } 870 break; 871 } 872 873 default: 874 return MachineEvaluator::evaluate(MI, Inputs, Outputs); 875 } 876 #undef im 877 #undef rc 878 #undef op 879 return false; 880 } 881 882 883 bool HexagonEvaluator::evaluate(const MachineInstr *BI, 884 const CellMapType &Inputs, BranchTargetList &Targets, 885 bool &FallsThru) const { 886 // We need to evaluate one branch at a time. TII::AnalyzeBranch checks 887 // all the branches in a basic block at once, so we cannot use it. 888 unsigned Opc = BI->getOpcode(); 889 bool SimpleBranch = false; 890 bool Negated = false; 891 switch (Opc) { 892 case Hexagon::J2_jumpf: 893 case Hexagon::J2_jumpfnew: 894 case Hexagon::J2_jumpfnewpt: 895 Negated = true; 896 case Hexagon::J2_jumpt: 897 case Hexagon::J2_jumptnew: 898 case Hexagon::J2_jumptnewpt: 899 // Simple branch: if([!]Pn) jump ... 900 // i.e. Op0 = predicate, Op1 = branch target. 901 SimpleBranch = true; 902 break; 903 case Hexagon::J2_jump: 904 Targets.insert(BI->getOperand(0).getMBB()); 905 FallsThru = false; 906 return true; 907 default: 908 // If the branch is of unknown type, assume that all successors are 909 // executable. 910 return false; 911 } 912 913 if (!SimpleBranch) 914 return false; 915 916 // BI is a conditional branch if we got here. 917 RegisterRef PR = BI->getOperand(0); 918 RegisterCell PC = getCell(PR, Inputs); 919 const BT::BitValue &Test = PC[0]; 920 921 // If the condition is neither true nor false, then it's unknown. 922 if (!Test.is(0) && !Test.is(1)) 923 return false; 924 925 // "Test.is(!Negated)" means "branch condition is true". 926 if (!Test.is(!Negated)) { 927 // Condition known to be false. 928 FallsThru = true; 929 return true; 930 } 931 932 Targets.insert(BI->getOperand(1).getMBB()); 933 FallsThru = false; 934 return true; 935 } 936 937 938 bool HexagonEvaluator::evaluateLoad(const MachineInstr *MI, 939 const CellMapType &Inputs, CellMapType &Outputs) const { 940 if (TII.isPredicated(MI)) 941 return false; 942 assert(MI->mayLoad() && "A load that mayn't?"); 943 unsigned Opc = MI->getOpcode(); 944 945 uint16_t BitNum; 946 bool SignEx; 947 using namespace Hexagon; 948 949 switch (Opc) { 950 default: 951 return false; 952 953 #if 0 954 // memb_fifo 955 case L2_loadalignb_pbr: 956 case L2_loadalignb_pcr: 957 case L2_loadalignb_pi: 958 // memh_fifo 959 case L2_loadalignh_pbr: 960 case L2_loadalignh_pcr: 961 case L2_loadalignh_pi: 962 // membh 963 case L2_loadbsw2_pbr: 964 case L2_loadbsw2_pci: 965 case L2_loadbsw2_pcr: 966 case L2_loadbsw2_pi: 967 case L2_loadbsw4_pbr: 968 case L2_loadbsw4_pci: 969 case L2_loadbsw4_pcr: 970 case L2_loadbsw4_pi: 971 // memubh 972 case L2_loadbzw2_pbr: 973 case L2_loadbzw2_pci: 974 case L2_loadbzw2_pcr: 975 case L2_loadbzw2_pi: 976 case L2_loadbzw4_pbr: 977 case L2_loadbzw4_pci: 978 case L2_loadbzw4_pcr: 979 case L2_loadbzw4_pi: 980 #endif 981 982 case L2_loadrbgp: 983 case L2_loadrb_io: 984 case L2_loadrb_pbr: 985 case L2_loadrb_pci: 986 case L2_loadrb_pcr: 987 case L2_loadrb_pi: 988 case L4_loadrb_abs: 989 case L4_loadrb_ap: 990 case L4_loadrb_rr: 991 case L4_loadrb_ur: 992 BitNum = 8; 993 SignEx = true; 994 break; 995 996 case L2_loadrubgp: 997 case L2_loadrub_io: 998 case L2_loadrub_pbr: 999 case L2_loadrub_pci: 1000 case L2_loadrub_pcr: 1001 case L2_loadrub_pi: 1002 case L4_loadrub_abs: 1003 case L4_loadrub_ap: 1004 case L4_loadrub_rr: 1005 case L4_loadrub_ur: 1006 BitNum = 8; 1007 SignEx = false; 1008 break; 1009 1010 case L2_loadrhgp: 1011 case L2_loadrh_io: 1012 case L2_loadrh_pbr: 1013 case L2_loadrh_pci: 1014 case L2_loadrh_pcr: 1015 case L2_loadrh_pi: 1016 case L4_loadrh_abs: 1017 case L4_loadrh_ap: 1018 case L4_loadrh_rr: 1019 case L4_loadrh_ur: 1020 BitNum = 16; 1021 SignEx = true; 1022 break; 1023 1024 case L2_loadruhgp: 1025 case L2_loadruh_io: 1026 case L2_loadruh_pbr: 1027 case L2_loadruh_pci: 1028 case L2_loadruh_pcr: 1029 case L2_loadruh_pi: 1030 case L4_loadruh_rr: 1031 case L4_loadruh_abs: 1032 case L4_loadruh_ap: 1033 case L4_loadruh_ur: 1034 BitNum = 16; 1035 SignEx = false; 1036 break; 1037 1038 case L2_loadrigp: 1039 case L2_loadri_io: 1040 case L2_loadri_pbr: 1041 case L2_loadri_pci: 1042 case L2_loadri_pcr: 1043 case L2_loadri_pi: 1044 case L2_loadw_locked: 1045 case L4_loadri_abs: 1046 case L4_loadri_ap: 1047 case L4_loadri_rr: 1048 case L4_loadri_ur: 1049 case LDriw_pred: 1050 BitNum = 32; 1051 SignEx = true; 1052 break; 1053 1054 case L2_loadrdgp: 1055 case L2_loadrd_io: 1056 case L2_loadrd_pbr: 1057 case L2_loadrd_pci: 1058 case L2_loadrd_pcr: 1059 case L2_loadrd_pi: 1060 case L4_loadd_locked: 1061 case L4_loadrd_abs: 1062 case L4_loadrd_ap: 1063 case L4_loadrd_rr: 1064 case L4_loadrd_ur: 1065 BitNum = 64; 1066 SignEx = true; 1067 break; 1068 } 1069 1070 const MachineOperand &MD = MI->getOperand(0); 1071 assert(MD.isReg() && MD.isDef()); 1072 RegisterRef RD = MD; 1073 1074 uint16_t W = getRegBitWidth(RD); 1075 assert(W >= BitNum && BitNum > 0); 1076 RegisterCell Res(W); 1077 1078 for (uint16_t i = 0; i < BitNum; ++i) 1079 Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i)); 1080 1081 if (SignEx) { 1082 const BT::BitValue &Sign = Res[BitNum-1]; 1083 for (uint16_t i = BitNum; i < W; ++i) 1084 Res[i] = BT::BitValue::ref(Sign); 1085 } else { 1086 for (uint16_t i = BitNum; i < W; ++i) 1087 Res[i] = BT::BitValue::Zero; 1088 } 1089 1090 putCell(RD, Res, Outputs); 1091 return true; 1092 } 1093 1094 1095 bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr *MI, 1096 const CellMapType &Inputs, CellMapType &Outputs) const { 1097 // If MI defines a formal parameter, but is not a copy (loads are handled 1098 // in evaluateLoad), then it's not clear what to do. 1099 assert(MI->isCopy()); 1100 1101 RegisterRef RD = MI->getOperand(0); 1102 RegisterRef RS = MI->getOperand(1); 1103 assert(RD.Sub == 0); 1104 if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg)) 1105 return false; 1106 RegExtMap::const_iterator F = VRX.find(RD.Reg); 1107 if (F == VRX.end()) 1108 return false; 1109 1110 uint16_t EW = F->second.Width; 1111 // Store RD's cell into the map. This will associate the cell with a virtual 1112 // register, and make zero-/sign-extends possible (otherwise we would be ex- 1113 // tending "self" bit values, which will have no effect, since "self" values 1114 // cannot be references to anything). 1115 putCell(RD, getCell(RS, Inputs), Outputs); 1116 1117 RegisterCell Res; 1118 // Read RD's cell from the outputs instead of RS's cell from the inputs: 1119 if (F->second.Type == ExtType::SExt) 1120 Res = eSXT(getCell(RD, Outputs), EW); 1121 else if (F->second.Type == ExtType::ZExt) 1122 Res = eZXT(getCell(RD, Outputs), EW); 1123 1124 putCell(RD, Res, Outputs); 1125 return true; 1126 } 1127 1128 1129 unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const { 1130 using namespace Hexagon; 1131 bool Is64 = DoubleRegsRegClass.contains(PReg); 1132 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg)); 1133 1134 static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 }; 1135 static const unsigned Phys64[] = { D0, D1, D2 }; 1136 const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned); 1137 const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned); 1138 1139 // Return the first parameter register of the required width. 1140 if (PReg == 0) 1141 return (Width <= 32) ? Phys32[0] : Phys64[0]; 1142 1143 // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the 1144 // next register. 1145 unsigned Idx32 = 0, Idx64 = 0; 1146 if (!Is64) { 1147 while (Idx32 < Num32) { 1148 if (Phys32[Idx32] == PReg) 1149 break; 1150 Idx32++; 1151 } 1152 Idx64 = Idx32/2; 1153 } else { 1154 while (Idx64 < Num64) { 1155 if (Phys64[Idx64] == PReg) 1156 break; 1157 Idx64++; 1158 } 1159 Idx32 = Idx64*2+1; 1160 } 1161 1162 if (Width <= 32) 1163 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0; 1164 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0; 1165 } 1166 1167 1168 unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const { 1169 typedef MachineRegisterInfo::livein_iterator iterator; 1170 for (iterator I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) { 1171 if (I->first == PReg) 1172 return I->second; 1173 } 1174 return 0; 1175 } 1176