1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define DEBUG_TYPE "t2-reduce-size" 11 #include "ARM.h" 12 #include "ARMBaseInstrInfo.h" 13 #include "ARMBaseRegisterInfo.h" 14 #include "ARMSubtarget.h" 15 #include "MCTargetDesc/ARMAddressingModes.h" 16 #include "Thumb2InstrInfo.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/PostOrderIterator.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/CodeGen/MachineFunctionPass.h" 21 #include "llvm/CodeGen/MachineInstr.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/IR/Function.h" // To access Function attributes 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/raw_ostream.h" 27 using namespace llvm; 28 29 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones"); 30 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones"); 31 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones"); 32 33 static cl::opt<int> ReduceLimit("t2-reduce-limit", 34 cl::init(-1), cl::Hidden); 35 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2", 36 cl::init(-1), cl::Hidden); 37 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3", 38 cl::init(-1), cl::Hidden); 39 40 namespace { 41 /// ReduceTable - A static table with information on mapping from wide 42 /// opcodes to narrow 43 struct ReduceEntry { 44 uint16_t WideOpc; // Wide opcode 45 uint16_t NarrowOpc1; // Narrow opcode to transform to 46 uint16_t NarrowOpc2; // Narrow opcode when it's two-address 47 uint8_t Imm1Limit; // Limit of immediate field (bits) 48 uint8_t Imm2Limit; // Limit of immediate field when it's two-address 49 unsigned LowRegs1 : 1; // Only possible if low-registers are used 50 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr) 51 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa. 52 // 1 - No cc field. 53 // 2 - Always set CPSR. 54 unsigned PredCC2 : 2; 55 unsigned PartFlag : 1; // 16-bit instruction does partial flag update 56 unsigned Special : 1; // Needs to be dealt with specially 57 unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift) 58 }; 59 60 static const ReduceEntry ReduceTable[] = { 61 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM 62 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 }, 63 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 }, 64 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 }, 65 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 }, 66 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 67 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 }, 68 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 69 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 70 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 }, 71 //FIXME: Disable CMN, as CCodes are backwards from compare expectations 72 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 73 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 74 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 }, 75 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 }, 76 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 }, 77 // FIXME: adr.n immediate offset must be multiple of 4. 78 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 79 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 80 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 81 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 82 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 83 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 }, 84 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 }, 85 // FIXME: Do we need the 16-bit 'S' variant? 86 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 }, 87 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 }, 88 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 89 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 }, 90 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 91 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 92 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 93 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 }, 94 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 95 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 96 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 }, 97 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 }, 98 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 99 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 }, 100 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 101 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 102 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 103 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 104 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 105 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 106 107 // FIXME: Clean this up after splitting each Thumb load / store opcode 108 // into multiple ones. 109 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 110 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 111 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 112 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 113 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 114 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 115 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 116 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 117 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 118 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 119 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 120 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 121 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 122 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 123 124 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 125 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 }, 126 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 }, 127 // ARM::t2STM (with no basereg writeback) has no Thumb1 equivalent 128 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 129 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 } 130 }; 131 132 class Thumb2SizeReduce : public MachineFunctionPass { 133 public: 134 static char ID; 135 Thumb2SizeReduce(); 136 137 const Thumb2InstrInfo *TII; 138 const ARMSubtarget *STI; 139 140 virtual bool runOnMachineFunction(MachineFunction &MF); 141 142 virtual const char *getPassName() const { 143 return "Thumb2 instruction size reduction pass"; 144 } 145 146 private: 147 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable. 148 DenseMap<unsigned, unsigned> ReduceOpcodeMap; 149 150 bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop); 151 152 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 153 bool is2Addr, ARMCC::CondCodes Pred, 154 bool LiveCPSR, bool &HasCC, bool &CCDead); 155 156 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 157 const ReduceEntry &Entry); 158 159 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 160 const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop); 161 162 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address 163 /// instruction. 164 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 165 const ReduceEntry &Entry, bool LiveCPSR, 166 bool IsSelfLoop); 167 168 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit 169 /// non-two-address instruction. 170 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 171 const ReduceEntry &Entry, bool LiveCPSR, 172 bool IsSelfLoop); 173 174 /// ReduceMI - Attempt to reduce MI, return true on success. 175 bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 176 bool LiveCPSR, bool IsSelfLoop); 177 178 /// ReduceMBB - Reduce width of instructions in the specified basic block. 179 bool ReduceMBB(MachineBasicBlock &MBB); 180 181 bool OptimizeSize; 182 bool MinimizeSize; 183 184 // Last instruction to define CPSR in the current block. 185 MachineInstr *CPSRDef; 186 // Was CPSR last defined by a high latency instruction? 187 // When CPSRDef is null, this refers to CPSR defs in predecessors. 188 bool HighLatencyCPSR; 189 190 struct MBBInfo { 191 // The flags leaving this block have high latency. 192 bool HighLatencyCPSR; 193 // Has this block been visited yet? 194 bool Visited; 195 196 MBBInfo() : HighLatencyCPSR(false), Visited(false) {} 197 }; 198 199 SmallVector<MBBInfo, 8> BlockInfo; 200 }; 201 char Thumb2SizeReduce::ID = 0; 202 } 203 204 Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) { 205 OptimizeSize = MinimizeSize = false; 206 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) { 207 unsigned FromOpc = ReduceTable[i].WideOpc; 208 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second) 209 assert(false && "Duplicated entries?"); 210 } 211 } 212 213 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) { 214 for (const uint16_t *Regs = MCID.getImplicitDefs(); *Regs; ++Regs) 215 if (*Regs == ARM::CPSR) 216 return true; 217 return false; 218 } 219 220 // Check for a likely high-latency flag def. 221 static bool isHighLatencyCPSR(MachineInstr *Def) { 222 switch(Def->getOpcode()) { 223 case ARM::FMSTAT: 224 case ARM::tMUL: 225 return true; 226 } 227 return false; 228 } 229 230 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations, 231 /// the 's' 16-bit instruction partially update CPSR. Abort the 232 /// transformation to avoid adding false dependency on last CPSR setting 233 /// instruction which hurts the ability for out-of-order execution engine 234 /// to do register renaming magic. 235 /// This function checks if there is a read-of-write dependency between the 236 /// last instruction that defines the CPSR and the current instruction. If there 237 /// is, then there is no harm done since the instruction cannot be retired 238 /// before the CPSR setting instruction anyway. 239 /// Note, we are not doing full dependency analysis here for the sake of compile 240 /// time. We're not looking for cases like: 241 /// r0 = muls ... 242 /// r1 = add.w r0, ... 243 /// ... 244 /// = mul.w r1 245 /// In this case it would have been ok to narrow the mul.w to muls since there 246 /// are indirect RAW dependency between the muls and the mul.w 247 bool 248 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) { 249 // Disable the check for -Oz (aka OptimizeForSizeHarder). 250 if (MinimizeSize || !STI->avoidCPSRPartialUpdate()) 251 return false; 252 253 if (!CPSRDef) 254 // If this BB loops back to itself, conservatively avoid narrowing the 255 // first instruction that does partial flag update. 256 return HighLatencyCPSR || FirstInSelfLoop; 257 258 SmallSet<unsigned, 2> Defs; 259 for (unsigned i = 0, e = CPSRDef->getNumOperands(); i != e; ++i) { 260 const MachineOperand &MO = CPSRDef->getOperand(i); 261 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 262 continue; 263 unsigned Reg = MO.getReg(); 264 if (Reg == 0 || Reg == ARM::CPSR) 265 continue; 266 Defs.insert(Reg); 267 } 268 269 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) { 270 const MachineOperand &MO = Use->getOperand(i); 271 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 272 continue; 273 unsigned Reg = MO.getReg(); 274 if (Defs.count(Reg)) 275 return false; 276 } 277 278 // If the current CPSR has high latency, try to avoid the false dependency. 279 if (HighLatencyCPSR) 280 return true; 281 282 // tMOVi8 usually doesn't start long dependency chains, and there are a lot 283 // of them, so always shrink them when CPSR doesn't have high latency. 284 if (Use->getOpcode() == ARM::t2MOVi || 285 Use->getOpcode() == ARM::t2MOVi16) 286 return false; 287 288 // No read-after-write dependency. The narrowing will add false dependency. 289 return true; 290 } 291 292 bool 293 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 294 bool is2Addr, ARMCC::CondCodes Pred, 295 bool LiveCPSR, bool &HasCC, bool &CCDead) { 296 if ((is2Addr && Entry.PredCC2 == 0) || 297 (!is2Addr && Entry.PredCC1 == 0)) { 298 if (Pred == ARMCC::AL) { 299 // Not predicated, must set CPSR. 300 if (!HasCC) { 301 // Original instruction was not setting CPSR, but CPSR is not 302 // currently live anyway. It's ok to set it. The CPSR def is 303 // dead though. 304 if (!LiveCPSR) { 305 HasCC = true; 306 CCDead = true; 307 return true; 308 } 309 return false; 310 } 311 } else { 312 // Predicated, must not set CPSR. 313 if (HasCC) 314 return false; 315 } 316 } else if ((is2Addr && Entry.PredCC2 == 2) || 317 (!is2Addr && Entry.PredCC1 == 2)) { 318 /// Old opcode has an optional def of CPSR. 319 if (HasCC) 320 return true; 321 // If old opcode does not implicitly define CPSR, then it's not ok since 322 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP. 323 if (!HasImplicitCPSRDef(MI->getDesc())) 324 return false; 325 HasCC = true; 326 } else { 327 // 16-bit instruction does not set CPSR. 328 if (HasCC) 329 return false; 330 } 331 332 return true; 333 } 334 335 static bool VerifyLowRegs(MachineInstr *MI) { 336 unsigned Opc = MI->getOpcode(); 337 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA || 338 Opc == ARM::t2LDMDB || Opc == ARM::t2LDMIA_UPD || 339 Opc == ARM::t2LDMDB_UPD); 340 bool isLROk = (Opc == ARM::t2STMIA_UPD || Opc == ARM::t2STMDB_UPD); 341 bool isSPOk = isPCOk || isLROk; 342 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 343 const MachineOperand &MO = MI->getOperand(i); 344 if (!MO.isReg() || MO.isImplicit()) 345 continue; 346 unsigned Reg = MO.getReg(); 347 if (Reg == 0 || Reg == ARM::CPSR) 348 continue; 349 if (isPCOk && Reg == ARM::PC) 350 continue; 351 if (isLROk && Reg == ARM::LR) 352 continue; 353 if (Reg == ARM::SP) { 354 if (isSPOk) 355 continue; 356 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12)) 357 // Special case for these ldr / str with sp as base register. 358 continue; 359 } 360 if (!isARMLowRegister(Reg)) 361 return false; 362 } 363 return true; 364 } 365 366 bool 367 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 368 const ReduceEntry &Entry) { 369 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt)) 370 return false; 371 372 unsigned Scale = 1; 373 bool HasImmOffset = false; 374 bool HasShift = false; 375 bool HasOffReg = true; 376 bool isLdStMul = false; 377 unsigned Opc = Entry.NarrowOpc1; 378 unsigned OpNum = 3; // First 'rest' of operands. 379 uint8_t ImmLimit = Entry.Imm1Limit; 380 381 switch (Entry.WideOpc) { 382 default: 383 llvm_unreachable("Unexpected Thumb2 load / store opcode!"); 384 case ARM::t2LDRi12: 385 case ARM::t2STRi12: 386 if (MI->getOperand(1).getReg() == ARM::SP) { 387 Opc = Entry.NarrowOpc2; 388 ImmLimit = Entry.Imm2Limit; 389 HasOffReg = false; 390 } 391 392 Scale = 4; 393 HasImmOffset = true; 394 HasOffReg = false; 395 break; 396 case ARM::t2LDRBi12: 397 case ARM::t2STRBi12: 398 HasImmOffset = true; 399 HasOffReg = false; 400 break; 401 case ARM::t2LDRHi12: 402 case ARM::t2STRHi12: 403 Scale = 2; 404 HasImmOffset = true; 405 HasOffReg = false; 406 break; 407 case ARM::t2LDRs: 408 case ARM::t2LDRBs: 409 case ARM::t2LDRHs: 410 case ARM::t2LDRSBs: 411 case ARM::t2LDRSHs: 412 case ARM::t2STRs: 413 case ARM::t2STRBs: 414 case ARM::t2STRHs: 415 HasShift = true; 416 OpNum = 4; 417 break; 418 case ARM::t2LDMIA: 419 case ARM::t2LDMDB: { 420 unsigned BaseReg = MI->getOperand(0).getReg(); 421 if (!isARMLowRegister(BaseReg) || Entry.WideOpc != ARM::t2LDMIA) 422 return false; 423 424 // For the non-writeback version (this one), the base register must be 425 // one of the registers being loaded. 426 bool isOK = false; 427 for (unsigned i = 4; i < MI->getNumOperands(); ++i) { 428 if (MI->getOperand(i).getReg() == BaseReg) { 429 isOK = true; 430 break; 431 } 432 } 433 434 if (!isOK) 435 return false; 436 437 OpNum = 0; 438 isLdStMul = true; 439 break; 440 } 441 case ARM::t2LDMIA_RET: { 442 unsigned BaseReg = MI->getOperand(1).getReg(); 443 if (BaseReg != ARM::SP) 444 return false; 445 Opc = Entry.NarrowOpc2; // tPOP_RET 446 OpNum = 2; 447 isLdStMul = true; 448 break; 449 } 450 case ARM::t2LDMIA_UPD: 451 case ARM::t2LDMDB_UPD: 452 case ARM::t2STMIA_UPD: 453 case ARM::t2STMDB_UPD: { 454 OpNum = 0; 455 456 unsigned BaseReg = MI->getOperand(1).getReg(); 457 if (BaseReg == ARM::SP && 458 (Entry.WideOpc == ARM::t2LDMIA_UPD || 459 Entry.WideOpc == ARM::t2STMDB_UPD)) { 460 Opc = Entry.NarrowOpc2; // tPOP or tPUSH 461 OpNum = 2; 462 } else if (!isARMLowRegister(BaseReg) || 463 (Entry.WideOpc != ARM::t2LDMIA_UPD && 464 Entry.WideOpc != ARM::t2STMIA_UPD)) { 465 return false; 466 } 467 468 isLdStMul = true; 469 break; 470 } 471 } 472 473 unsigned OffsetReg = 0; 474 bool OffsetKill = false; 475 if (HasShift) { 476 OffsetReg = MI->getOperand(2).getReg(); 477 OffsetKill = MI->getOperand(2).isKill(); 478 479 if (MI->getOperand(3).getImm()) 480 // Thumb1 addressing mode doesn't support shift. 481 return false; 482 } 483 484 unsigned OffsetImm = 0; 485 if (HasImmOffset) { 486 OffsetImm = MI->getOperand(2).getImm(); 487 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale; 488 489 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset) 490 // Make sure the immediate field fits. 491 return false; 492 } 493 494 // Add the 16-bit load / store instruction. 495 DebugLoc dl = MI->getDebugLoc(); 496 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc)); 497 if (!isLdStMul) { 498 MIB.addOperand(MI->getOperand(0)); 499 MIB.addOperand(MI->getOperand(1)); 500 501 if (HasImmOffset) 502 MIB.addImm(OffsetImm / Scale); 503 504 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!"); 505 506 if (HasOffReg) 507 MIB.addReg(OffsetReg, getKillRegState(OffsetKill)); 508 } 509 510 // Transfer the rest of operands. 511 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum) 512 MIB.addOperand(MI->getOperand(OpNum)); 513 514 // Transfer memoperands. 515 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 516 517 // Transfer MI flags. 518 MIB.setMIFlags(MI->getFlags()); 519 520 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 521 522 MBB.erase_instr(MI); 523 ++NumLdSts; 524 return true; 525 } 526 527 bool 528 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 529 const ReduceEntry &Entry, 530 bool LiveCPSR, bool IsSelfLoop) { 531 unsigned Opc = MI->getOpcode(); 532 if (Opc == ARM::t2ADDri) { 533 // If the source register is SP, try to reduce to tADDrSPi, otherwise 534 // it's a normal reduce. 535 if (MI->getOperand(1).getReg() != ARM::SP) { 536 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 537 return true; 538 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 539 } 540 // Try to reduce to tADDrSPi. 541 unsigned Imm = MI->getOperand(2).getImm(); 542 // The immediate must be in range, the destination register must be a low 543 // reg, the predicate must be "always" and the condition flags must not 544 // be being set. 545 if (Imm & 3 || Imm > 1020) 546 return false; 547 if (!isARMLowRegister(MI->getOperand(0).getReg())) 548 return false; 549 if (MI->getOperand(3).getImm() != ARMCC::AL) 550 return false; 551 const MCInstrDesc &MCID = MI->getDesc(); 552 if (MCID.hasOptionalDef() && 553 MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR) 554 return false; 555 556 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), 557 TII->get(ARM::tADDrSPi)) 558 .addOperand(MI->getOperand(0)) 559 .addOperand(MI->getOperand(1)) 560 .addImm(Imm / 4); // The tADDrSPi has an implied scale by four. 561 AddDefaultPred(MIB); 562 563 // Transfer MI flags. 564 MIB.setMIFlags(MI->getFlags()); 565 566 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB); 567 568 MBB.erase_instr(MI); 569 ++NumNarrows; 570 return true; 571 } 572 573 if (Entry.LowRegs1 && !VerifyLowRegs(MI)) 574 return false; 575 576 if (MI->mayLoad() || MI->mayStore()) 577 return ReduceLoadStore(MBB, MI, Entry); 578 579 switch (Opc) { 580 default: break; 581 case ARM::t2ADDSri: 582 case ARM::t2ADDSrr: { 583 unsigned PredReg = 0; 584 if (getInstrPredicate(MI, PredReg) == ARMCC::AL) { 585 switch (Opc) { 586 default: break; 587 case ARM::t2ADDSri: { 588 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 589 return true; 590 // fallthrough 591 } 592 case ARM::t2ADDSrr: 593 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 594 } 595 } 596 break; 597 } 598 case ARM::t2RSBri: 599 case ARM::t2RSBSri: 600 case ARM::t2SXTB: 601 case ARM::t2SXTH: 602 case ARM::t2UXTB: 603 case ARM::t2UXTH: 604 if (MI->getOperand(2).getImm() == 0) 605 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 606 break; 607 case ARM::t2MOVi16: 608 // Can convert only 'pure' immediate operands, not immediates obtained as 609 // globals' addresses. 610 if (MI->getOperand(1).isImm()) 611 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 612 break; 613 case ARM::t2CMPrr: { 614 // Try to reduce to the lo-reg only version first. Why there are two 615 // versions of the instruction is a mystery. 616 // It would be nice to just have two entries in the master table that 617 // are prioritized, but the table assumes a unique entry for each 618 // source insn opcode. So for now, we hack a local entry record to use. 619 static const ReduceEntry NarrowEntry = 620 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 }; 621 if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop)) 622 return true; 623 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 624 } 625 } 626 return false; 627 } 628 629 bool 630 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 631 const ReduceEntry &Entry, 632 bool LiveCPSR, bool IsSelfLoop) { 633 634 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr)) 635 return false; 636 637 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && 638 STI->avoidMOVsShifterOperand()) 639 // Don't issue movs with shifter operand for some CPUs unless we 640 // are optimizing / minimizing for size. 641 return false; 642 643 unsigned Reg0 = MI->getOperand(0).getReg(); 644 unsigned Reg1 = MI->getOperand(1).getReg(); 645 // t2MUL is "special". The tied source operand is second, not first. 646 if (MI->getOpcode() == ARM::t2MUL) { 647 unsigned Reg2 = MI->getOperand(2).getReg(); 648 // Early exit if the regs aren't all low regs. 649 if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1) 650 || !isARMLowRegister(Reg2)) 651 return false; 652 if (Reg0 != Reg2) { 653 // If the other operand also isn't the same as the destination, we 654 // can't reduce. 655 if (Reg1 != Reg0) 656 return false; 657 // Try to commute the operands to make it a 2-address instruction. 658 MachineInstr *CommutedMI = TII->commuteInstruction(MI); 659 if (!CommutedMI) 660 return false; 661 } 662 } else if (Reg0 != Reg1) { 663 // Try to commute the operands to make it a 2-address instruction. 664 unsigned CommOpIdx1, CommOpIdx2; 665 if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) || 666 CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0) 667 return false; 668 MachineInstr *CommutedMI = TII->commuteInstruction(MI); 669 if (!CommutedMI) 670 return false; 671 } 672 if (Entry.LowRegs2 && !isARMLowRegister(Reg0)) 673 return false; 674 if (Entry.Imm2Limit) { 675 unsigned Imm = MI->getOperand(2).getImm(); 676 unsigned Limit = (1 << Entry.Imm2Limit) - 1; 677 if (Imm > Limit) 678 return false; 679 } else { 680 unsigned Reg2 = MI->getOperand(2).getReg(); 681 if (Entry.LowRegs2 && !isARMLowRegister(Reg2)) 682 return false; 683 } 684 685 // Check if it's possible / necessary to transfer the predicate. 686 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2); 687 unsigned PredReg = 0; 688 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); 689 bool SkipPred = false; 690 if (Pred != ARMCC::AL) { 691 if (!NewMCID.isPredicable()) 692 // Can't transfer predicate, fail. 693 return false; 694 } else { 695 SkipPred = !NewMCID.isPredicable(); 696 } 697 698 bool HasCC = false; 699 bool CCDead = false; 700 const MCInstrDesc &MCID = MI->getDesc(); 701 if (MCID.hasOptionalDef()) { 702 unsigned NumOps = MCID.getNumOperands(); 703 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 704 if (HasCC && MI->getOperand(NumOps-1).isDead()) 705 CCDead = true; 706 } 707 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead)) 708 return false; 709 710 // Avoid adding a false dependency on partial flag update by some 16-bit 711 // instructions which has the 's' bit set. 712 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 713 canAddPseudoFlagDep(MI, IsSelfLoop)) 714 return false; 715 716 // Add the 16-bit instruction. 717 DebugLoc dl = MI->getDebugLoc(); 718 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 719 MIB.addOperand(MI->getOperand(0)); 720 if (NewMCID.hasOptionalDef()) { 721 if (HasCC) 722 AddDefaultT1CC(MIB, CCDead); 723 else 724 AddNoT1CC(MIB); 725 } 726 727 // Transfer the rest of operands. 728 unsigned NumOps = MCID.getNumOperands(); 729 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 730 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 731 continue; 732 if (SkipPred && MCID.OpInfo[i].isPredicate()) 733 continue; 734 MIB.addOperand(MI->getOperand(i)); 735 } 736 737 // Transfer MI flags. 738 MIB.setMIFlags(MI->getFlags()); 739 740 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 741 742 MBB.erase_instr(MI); 743 ++Num2Addrs; 744 return true; 745 } 746 747 bool 748 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 749 const ReduceEntry &Entry, 750 bool LiveCPSR, bool IsSelfLoop) { 751 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit)) 752 return false; 753 754 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && 755 STI->avoidMOVsShifterOperand()) 756 // Don't issue movs with shifter operand for some CPUs unless we 757 // are optimizing / minimizing for size. 758 return false; 759 760 unsigned Limit = ~0U; 761 if (Entry.Imm1Limit) 762 Limit = (1 << Entry.Imm1Limit) - 1; 763 764 const MCInstrDesc &MCID = MI->getDesc(); 765 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { 766 if (MCID.OpInfo[i].isPredicate()) 767 continue; 768 const MachineOperand &MO = MI->getOperand(i); 769 if (MO.isReg()) { 770 unsigned Reg = MO.getReg(); 771 if (!Reg || Reg == ARM::CPSR) 772 continue; 773 if (Entry.LowRegs1 && !isARMLowRegister(Reg)) 774 return false; 775 } else if (MO.isImm() && 776 !MCID.OpInfo[i].isPredicate()) { 777 if (((unsigned)MO.getImm()) > Limit) 778 return false; 779 } 780 } 781 782 // Check if it's possible / necessary to transfer the predicate. 783 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1); 784 unsigned PredReg = 0; 785 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); 786 bool SkipPred = false; 787 if (Pred != ARMCC::AL) { 788 if (!NewMCID.isPredicable()) 789 // Can't transfer predicate, fail. 790 return false; 791 } else { 792 SkipPred = !NewMCID.isPredicable(); 793 } 794 795 bool HasCC = false; 796 bool CCDead = false; 797 if (MCID.hasOptionalDef()) { 798 unsigned NumOps = MCID.getNumOperands(); 799 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 800 if (HasCC && MI->getOperand(NumOps-1).isDead()) 801 CCDead = true; 802 } 803 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead)) 804 return false; 805 806 // Avoid adding a false dependency on partial flag update by some 16-bit 807 // instructions which has the 's' bit set. 808 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 809 canAddPseudoFlagDep(MI, IsSelfLoop)) 810 return false; 811 812 // Add the 16-bit instruction. 813 DebugLoc dl = MI->getDebugLoc(); 814 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 815 MIB.addOperand(MI->getOperand(0)); 816 if (NewMCID.hasOptionalDef()) { 817 if (HasCC) 818 AddDefaultT1CC(MIB, CCDead); 819 else 820 AddNoT1CC(MIB); 821 } 822 823 // Transfer the rest of operands. 824 unsigned NumOps = MCID.getNumOperands(); 825 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 826 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 827 continue; 828 if ((MCID.getOpcode() == ARM::t2RSBSri || 829 MCID.getOpcode() == ARM::t2RSBri || 830 MCID.getOpcode() == ARM::t2SXTB || 831 MCID.getOpcode() == ARM::t2SXTH || 832 MCID.getOpcode() == ARM::t2UXTB || 833 MCID.getOpcode() == ARM::t2UXTH) && i == 2) 834 // Skip the zero immediate operand, it's now implicit. 835 continue; 836 bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate()); 837 if (SkipPred && isPred) 838 continue; 839 const MachineOperand &MO = MI->getOperand(i); 840 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR) 841 // Skip implicit def of CPSR. Either it's modeled as an optional 842 // def now or it's already an implicit def on the new instruction. 843 continue; 844 MIB.addOperand(MO); 845 } 846 if (!MCID.isPredicable() && NewMCID.isPredicable()) 847 AddDefaultPred(MIB); 848 849 // Transfer MI flags. 850 MIB.setMIFlags(MI->getFlags()); 851 852 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 853 854 MBB.erase_instr(MI); 855 ++NumNarrows; 856 return true; 857 } 858 859 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) { 860 bool HasDef = false; 861 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 862 const MachineOperand &MO = MI.getOperand(i); 863 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 864 continue; 865 if (MO.getReg() != ARM::CPSR) 866 continue; 867 868 DefCPSR = true; 869 if (!MO.isDead()) 870 HasDef = true; 871 } 872 873 return HasDef || LiveCPSR; 874 } 875 876 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) { 877 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 878 const MachineOperand &MO = MI.getOperand(i); 879 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 880 continue; 881 if (MO.getReg() != ARM::CPSR) 882 continue; 883 assert(LiveCPSR && "CPSR liveness tracking is wrong!"); 884 if (MO.isKill()) { 885 LiveCPSR = false; 886 break; 887 } 888 } 889 890 return LiveCPSR; 891 } 892 893 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 894 bool LiveCPSR, bool IsSelfLoop) { 895 unsigned Opcode = MI->getOpcode(); 896 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode); 897 if (OPI == ReduceOpcodeMap.end()) 898 return false; 899 const ReduceEntry &Entry = ReduceTable[OPI->second]; 900 901 // Don't attempt normal reductions on "special" cases for now. 902 if (Entry.Special) 903 return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 904 905 // Try to transform to a 16-bit two-address instruction. 906 if (Entry.NarrowOpc2 && 907 ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 908 return true; 909 910 // Try to transform to a 16-bit non-two-address instruction. 911 if (Entry.NarrowOpc1 && 912 ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 913 return true; 914 915 return false; 916 } 917 918 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { 919 bool Modified = false; 920 921 // Yes, CPSR could be livein. 922 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR); 923 MachineInstr *BundleMI = 0; 924 925 CPSRDef = 0; 926 HighLatencyCPSR = false; 927 928 // Check predecessors for the latest CPSRDef. 929 for (MachineBasicBlock::pred_iterator 930 I = MBB.pred_begin(), E = MBB.pred_end(); I != E; ++I) { 931 const MBBInfo &PInfo = BlockInfo[(*I)->getNumber()]; 932 if (!PInfo.Visited) { 933 // Since blocks are visited in RPO, this must be a back-edge. 934 continue; 935 } 936 if (PInfo.HighLatencyCPSR) { 937 HighLatencyCPSR = true; 938 break; 939 } 940 } 941 942 // If this BB loops back to itself, conservatively avoid narrowing the 943 // first instruction that does partial flag update. 944 bool IsSelfLoop = MBB.isSuccessor(&MBB); 945 MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end(); 946 MachineBasicBlock::instr_iterator NextMII; 947 for (; MII != E; MII = NextMII) { 948 NextMII = llvm::next(MII); 949 950 MachineInstr *MI = &*MII; 951 if (MI->isBundle()) { 952 BundleMI = MI; 953 continue; 954 } 955 if (MI->isDebugValue()) 956 continue; 957 958 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR); 959 960 // Does NextMII belong to the same bundle as MI? 961 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred(); 962 963 if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) { 964 Modified = true; 965 MachineBasicBlock::instr_iterator I = prior(NextMII); 966 MI = &*I; 967 // Removing and reinserting the first instruction in a bundle will break 968 // up the bundle. Fix the bundling if it was broken. 969 if (NextInSameBundle && !NextMII->isBundledWithPred()) 970 NextMII->bundleWithPred(); 971 } 972 973 if (!NextInSameBundle && MI->isInsideBundle()) { 974 // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill 975 // marker is only on the BUNDLE instruction. Process the BUNDLE 976 // instruction as we finish with the bundled instruction to work around 977 // the inconsistency. 978 if (BundleMI->killsRegister(ARM::CPSR)) 979 LiveCPSR = false; 980 MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR); 981 if (MO && !MO->isDead()) 982 LiveCPSR = true; 983 } 984 985 bool DefCPSR = false; 986 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR); 987 if (MI->isCall()) { 988 // Calls don't really set CPSR. 989 CPSRDef = 0; 990 HighLatencyCPSR = false; 991 IsSelfLoop = false; 992 } else if (DefCPSR) { 993 // This is the last CPSR defining instruction. 994 CPSRDef = MI; 995 HighLatencyCPSR = isHighLatencyCPSR(CPSRDef); 996 IsSelfLoop = false; 997 } 998 } 999 1000 MBBInfo &Info = BlockInfo[MBB.getNumber()]; 1001 Info.HighLatencyCPSR = HighLatencyCPSR; 1002 Info.Visited = true; 1003 return Modified; 1004 } 1005 1006 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { 1007 const TargetMachine &TM = MF.getTarget(); 1008 TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo()); 1009 STI = &TM.getSubtarget<ARMSubtarget>(); 1010 1011 // Optimizing / minimizing size? 1012 AttributeSet FnAttrs = MF.getFunction()->getAttributes(); 1013 OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, 1014 Attribute::OptimizeForSize); 1015 MinimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, 1016 Attribute::MinSize); 1017 1018 BlockInfo.clear(); 1019 BlockInfo.resize(MF.getNumBlockIDs()); 1020 1021 // Visit blocks in reverse post-order so LastCPSRDef is known for all 1022 // predecessors. 1023 ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); 1024 bool Modified = false; 1025 for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator 1026 I = RPOT.begin(), E = RPOT.end(); I != E; ++I) 1027 Modified |= ReduceMBB(**I); 1028 return Modified; 1029 } 1030 1031 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size 1032 /// reduction pass. 1033 FunctionPass *llvm::createThumb2SizeReductionPass() { 1034 return new Thumb2SizeReduce(); 1035 } 1036