1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ARM.h" 10 #include "ARMBaseInstrInfo.h" 11 #include "ARMSubtarget.h" 12 #include "MCTargetDesc/ARMBaseInfo.h" 13 #include "Thumb2InstrInfo.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/PostOrderIterator.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/CodeGen/MachineBasicBlock.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineFunctionPass.h" 24 #include "llvm/CodeGen/MachineInstr.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineOperand.h" 27 #include "llvm/CodeGen/TargetInstrInfo.h" 28 #include "llvm/IR/DebugLoc.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/MC/MCInstrDesc.h" 31 #include "llvm/MC/MCRegisterInfo.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/Compiler.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include <algorithm> 38 #include <cassert> 39 #include <cstdint> 40 #include <functional> 41 #include <iterator> 42 #include <utility> 43 44 using namespace llvm; 45 46 #define DEBUG_TYPE "t2-reduce-size" 47 #define THUMB2_SIZE_REDUCE_NAME "Thumb2 instruction size reduce pass" 48 49 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones"); 50 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones"); 51 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones"); 52 53 static cl::opt<int> ReduceLimit("t2-reduce-limit", 54 cl::init(-1), cl::Hidden); 55 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2", 56 cl::init(-1), cl::Hidden); 57 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3", 58 cl::init(-1), cl::Hidden); 59 60 namespace { 61 62 /// ReduceTable - A static table with information on mapping from wide 63 /// opcodes to narrow 64 struct ReduceEntry { 65 uint16_t WideOpc; // Wide opcode 66 uint16_t NarrowOpc1; // Narrow opcode to transform to 67 uint16_t NarrowOpc2; // Narrow opcode when it's two-address 68 uint8_t Imm1Limit; // Limit of immediate field (bits) 69 uint8_t Imm2Limit; // Limit of immediate field when it's two-address 70 unsigned LowRegs1 : 1; // Only possible if low-registers are used 71 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr) 72 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa. 73 // 1 - No cc field. 74 // 2 - Always set CPSR. 75 unsigned PredCC2 : 2; 76 unsigned PartFlag : 1; // 16-bit instruction does partial flag update 77 unsigned Special : 1; // Needs to be dealt with specially 78 unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift) 79 }; 80 81 static const ReduceEntry ReduceTable[] = { 82 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM 83 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 }, 84 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 }, 85 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 }, 86 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 }, 87 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 88 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 }, 89 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 90 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 91 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 }, 92 //FIXME: Disable CMN, as CCodes are backwards from compare expectations 93 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 94 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 95 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 }, 96 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 }, 97 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 }, 98 // FIXME: adr.n immediate offset must be multiple of 4. 99 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 100 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 101 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 102 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 103 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 104 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 }, 105 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 }, 106 // FIXME: Do we need the 16-bit 'S' variant? 107 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 }, 108 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 }, 109 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 110 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 }, 111 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 112 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 113 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 114 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 }, 115 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 116 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 117 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 }, 118 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 }, 119 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 120 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 }, 121 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 122 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 123 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 124 { ARM::t2TEQrr, ARM::tEOR, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 125 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 126 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 127 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 128 129 // FIXME: Clean this up after splitting each Thumb load / store opcode 130 // into multiple ones. 131 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 132 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 133 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 134 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 135 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 136 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 137 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 138 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 139 { ARM::t2LDR_POST,ARM::tLDMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 }, 140 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 141 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 142 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 143 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 144 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 145 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 146 { ARM::t2STR_POST,ARM::tSTMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 }, 147 148 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 149 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 }, 150 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 }, 151 // ARM::t2STMIA (with no basereg writeback) has no Thumb1 equivalent. 152 // tSTMIA_UPD is a change in semantics which can only be used if the base 153 // register is killed. This difference is correctly handled elsewhere. 154 { ARM::t2STMIA, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 155 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 156 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 } 157 }; 158 159 class Thumb2SizeReduce : public MachineFunctionPass { 160 public: 161 static char ID; 162 163 const Thumb2InstrInfo *TII; 164 const ARMSubtarget *STI; 165 166 Thumb2SizeReduce(std::function<bool(const Function &)> Ftor = nullptr); 167 168 bool runOnMachineFunction(MachineFunction &MF) override; 169 170 MachineFunctionProperties getRequiredProperties() const override { 171 return MachineFunctionProperties().set( 172 MachineFunctionProperties::Property::NoVRegs); 173 } 174 175 StringRef getPassName() const override { 176 return THUMB2_SIZE_REDUCE_NAME; 177 } 178 179 private: 180 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable. 181 DenseMap<unsigned, unsigned> ReduceOpcodeMap; 182 183 bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop); 184 185 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 186 bool is2Addr, ARMCC::CondCodes Pred, 187 bool LiveCPSR, bool &HasCC, bool &CCDead); 188 189 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 190 const ReduceEntry &Entry); 191 192 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 193 const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop); 194 195 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address 196 /// instruction. 197 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 198 const ReduceEntry &Entry, bool LiveCPSR, 199 bool IsSelfLoop); 200 201 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit 202 /// non-two-address instruction. 203 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 204 const ReduceEntry &Entry, bool LiveCPSR, 205 bool IsSelfLoop); 206 207 /// ReduceMI - Attempt to reduce MI, return true on success. 208 bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 209 bool LiveCPSR, bool IsSelfLoop); 210 211 /// ReduceMBB - Reduce width of instructions in the specified basic block. 212 bool ReduceMBB(MachineBasicBlock &MBB); 213 214 bool OptimizeSize; 215 bool MinimizeSize; 216 217 // Last instruction to define CPSR in the current block. 218 MachineInstr *CPSRDef; 219 // Was CPSR last defined by a high latency instruction? 220 // When CPSRDef is null, this refers to CPSR defs in predecessors. 221 bool HighLatencyCPSR; 222 223 struct MBBInfo { 224 // The flags leaving this block have high latency. 225 bool HighLatencyCPSR = false; 226 // Has this block been visited yet? 227 bool Visited = false; 228 229 MBBInfo() = default; 230 }; 231 232 SmallVector<MBBInfo, 8> BlockInfo; 233 234 std::function<bool(const Function &)> PredicateFtor; 235 }; 236 237 char Thumb2SizeReduce::ID = 0; 238 239 } // end anonymous namespace 240 241 INITIALIZE_PASS(Thumb2SizeReduce, DEBUG_TYPE, THUMB2_SIZE_REDUCE_NAME, false, 242 false) 243 244 Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor) 245 : MachineFunctionPass(ID), PredicateFtor(std::move(Ftor)) { 246 OptimizeSize = MinimizeSize = false; 247 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) { 248 unsigned FromOpc = ReduceTable[i].WideOpc; 249 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second) 250 llvm_unreachable("Duplicated entries?"); 251 } 252 } 253 254 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) { 255 for (const MCPhysReg *Regs = MCID.getImplicitDefs(); *Regs; ++Regs) 256 if (*Regs == ARM::CPSR) 257 return true; 258 return false; 259 } 260 261 // Check for a likely high-latency flag def. 262 static bool isHighLatencyCPSR(MachineInstr *Def) { 263 switch(Def->getOpcode()) { 264 case ARM::FMSTAT: 265 case ARM::tMUL: 266 return true; 267 } 268 return false; 269 } 270 271 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations, 272 /// the 's' 16-bit instruction partially update CPSR. Abort the 273 /// transformation to avoid adding false dependency on last CPSR setting 274 /// instruction which hurts the ability for out-of-order execution engine 275 /// to do register renaming magic. 276 /// This function checks if there is a read-of-write dependency between the 277 /// last instruction that defines the CPSR and the current instruction. If there 278 /// is, then there is no harm done since the instruction cannot be retired 279 /// before the CPSR setting instruction anyway. 280 /// Note, we are not doing full dependency analysis here for the sake of compile 281 /// time. We're not looking for cases like: 282 /// r0 = muls ... 283 /// r1 = add.w r0, ... 284 /// ... 285 /// = mul.w r1 286 /// In this case it would have been ok to narrow the mul.w to muls since there 287 /// are indirect RAW dependency between the muls and the mul.w 288 bool 289 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) { 290 // Disable the check for -Oz (aka OptimizeForSizeHarder). 291 if (MinimizeSize || !STI->avoidCPSRPartialUpdate()) 292 return false; 293 294 if (!CPSRDef) 295 // If this BB loops back to itself, conservatively avoid narrowing the 296 // first instruction that does partial flag update. 297 return HighLatencyCPSR || FirstInSelfLoop; 298 299 SmallSet<unsigned, 2> Defs; 300 for (const MachineOperand &MO : CPSRDef->operands()) { 301 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 302 continue; 303 Register Reg = MO.getReg(); 304 if (Reg == 0 || Reg == ARM::CPSR) 305 continue; 306 Defs.insert(Reg); 307 } 308 309 for (const MachineOperand &MO : Use->operands()) { 310 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 311 continue; 312 Register Reg = MO.getReg(); 313 if (Defs.count(Reg)) 314 return false; 315 } 316 317 // If the current CPSR has high latency, try to avoid the false dependency. 318 if (HighLatencyCPSR) 319 return true; 320 321 // tMOVi8 usually doesn't start long dependency chains, and there are a lot 322 // of them, so always shrink them when CPSR doesn't have high latency. 323 if (Use->getOpcode() == ARM::t2MOVi || 324 Use->getOpcode() == ARM::t2MOVi16) 325 return false; 326 327 // No read-after-write dependency. The narrowing will add false dependency. 328 return true; 329 } 330 331 bool 332 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 333 bool is2Addr, ARMCC::CondCodes Pred, 334 bool LiveCPSR, bool &HasCC, bool &CCDead) { 335 if ((is2Addr && Entry.PredCC2 == 0) || 336 (!is2Addr && Entry.PredCC1 == 0)) { 337 if (Pred == ARMCC::AL) { 338 // Not predicated, must set CPSR. 339 if (!HasCC) { 340 // Original instruction was not setting CPSR, but CPSR is not 341 // currently live anyway. It's ok to set it. The CPSR def is 342 // dead though. 343 if (!LiveCPSR) { 344 HasCC = true; 345 CCDead = true; 346 return true; 347 } 348 return false; 349 } 350 } else { 351 // Predicated, must not set CPSR. 352 if (HasCC) 353 return false; 354 } 355 } else if ((is2Addr && Entry.PredCC2 == 2) || 356 (!is2Addr && Entry.PredCC1 == 2)) { 357 /// Old opcode has an optional def of CPSR. 358 if (HasCC) 359 return true; 360 // If old opcode does not implicitly define CPSR, then it's not ok since 361 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP. 362 if (!HasImplicitCPSRDef(MI->getDesc())) 363 return false; 364 HasCC = true; 365 } else { 366 // 16-bit instruction does not set CPSR. 367 if (HasCC) 368 return false; 369 } 370 371 return true; 372 } 373 374 static bool VerifyLowRegs(MachineInstr *MI) { 375 unsigned Opc = MI->getOpcode(); 376 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD); 377 bool isLROk = (Opc == ARM::t2STMDB_UPD); 378 bool isSPOk = isPCOk || isLROk; 379 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 380 const MachineOperand &MO = MI->getOperand(i); 381 if (!MO.isReg() || MO.isImplicit()) 382 continue; 383 Register Reg = MO.getReg(); 384 if (Reg == 0 || Reg == ARM::CPSR) 385 continue; 386 if (isPCOk && Reg == ARM::PC) 387 continue; 388 if (isLROk && Reg == ARM::LR) 389 continue; 390 if (Reg == ARM::SP) { 391 if (isSPOk) 392 continue; 393 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12)) 394 // Special case for these ldr / str with sp as base register. 395 continue; 396 } 397 if (!isARMLowRegister(Reg)) 398 return false; 399 } 400 return true; 401 } 402 403 bool 404 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 405 const ReduceEntry &Entry) { 406 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt)) 407 return false; 408 409 unsigned Scale = 1; 410 bool HasImmOffset = false; 411 bool HasShift = false; 412 bool HasOffReg = true; 413 bool isLdStMul = false; 414 unsigned Opc = Entry.NarrowOpc1; 415 unsigned OpNum = 3; // First 'rest' of operands. 416 uint8_t ImmLimit = Entry.Imm1Limit; 417 418 switch (Entry.WideOpc) { 419 default: 420 llvm_unreachable("Unexpected Thumb2 load / store opcode!"); 421 case ARM::t2LDRi12: 422 case ARM::t2STRi12: 423 if (MI->getOperand(1).getReg() == ARM::SP) { 424 Opc = Entry.NarrowOpc2; 425 ImmLimit = Entry.Imm2Limit; 426 } 427 428 Scale = 4; 429 HasImmOffset = true; 430 HasOffReg = false; 431 break; 432 case ARM::t2LDRBi12: 433 case ARM::t2STRBi12: 434 HasImmOffset = true; 435 HasOffReg = false; 436 break; 437 case ARM::t2LDRHi12: 438 case ARM::t2STRHi12: 439 Scale = 2; 440 HasImmOffset = true; 441 HasOffReg = false; 442 break; 443 case ARM::t2LDRs: 444 case ARM::t2LDRBs: 445 case ARM::t2LDRHs: 446 case ARM::t2LDRSBs: 447 case ARM::t2LDRSHs: 448 case ARM::t2STRs: 449 case ARM::t2STRBs: 450 case ARM::t2STRHs: 451 HasShift = true; 452 OpNum = 4; 453 break; 454 case ARM::t2LDR_POST: 455 case ARM::t2STR_POST: { 456 if (!MinimizeSize) 457 return false; 458 459 if (!MI->hasOneMemOperand() || 460 (*MI->memoperands_begin())->getAlign() < Align(4)) 461 return false; 462 463 // We're creating a completely different type of load/store - LDM from LDR. 464 // For this reason we can't reuse the logic at the end of this function; we 465 // have to implement the MI building here. 466 bool IsStore = Entry.WideOpc == ARM::t2STR_POST; 467 Register Rt = MI->getOperand(IsStore ? 1 : 0).getReg(); 468 Register Rn = MI->getOperand(IsStore ? 0 : 1).getReg(); 469 unsigned Offset = MI->getOperand(3).getImm(); 470 unsigned PredImm = MI->getOperand(4).getImm(); 471 Register PredReg = MI->getOperand(5).getReg(); 472 assert(isARMLowRegister(Rt)); 473 assert(isARMLowRegister(Rn)); 474 475 if (Offset != 4) 476 return false; 477 478 // Add the 16-bit load / store instruction. 479 DebugLoc dl = MI->getDebugLoc(); 480 auto MIB = BuildMI(MBB, MI, dl, TII->get(Entry.NarrowOpc1)) 481 .addReg(Rn, RegState::Define) 482 .addReg(Rn) 483 .addImm(PredImm) 484 .addReg(PredReg) 485 .addReg(Rt, IsStore ? 0 : RegState::Define); 486 487 // Transfer memoperands. 488 MIB.setMemRefs(MI->memoperands()); 489 490 // Transfer MI flags. 491 MIB.setMIFlags(MI->getFlags()); 492 493 // Kill the old instruction. 494 MI->eraseFromBundle(); 495 ++NumLdSts; 496 return true; 497 } 498 case ARM::t2LDMIA: { 499 Register BaseReg = MI->getOperand(0).getReg(); 500 assert(isARMLowRegister(BaseReg)); 501 502 // For the non-writeback version (this one), the base register must be 503 // one of the registers being loaded. 504 bool isOK = false; 505 for (unsigned i = 3; i < MI->getNumOperands(); ++i) { 506 if (MI->getOperand(i).getReg() == BaseReg) { 507 isOK = true; 508 break; 509 } 510 } 511 512 if (!isOK) 513 return false; 514 515 OpNum = 0; 516 isLdStMul = true; 517 break; 518 } 519 case ARM::t2STMIA: { 520 // t2STMIA is reduced to tSTMIA_UPD which has writeback. We can only do this 521 // if the base register is killed, as then it doesn't matter what its value 522 // is after the instruction. 523 if (!MI->getOperand(0).isKill()) 524 return false; 525 526 // If the base register is in the register list and isn't the lowest 527 // numbered register (i.e. it's in operand 4 onwards) then with writeback 528 // the stored value is unknown, so we can't convert to tSTMIA_UPD. 529 Register BaseReg = MI->getOperand(0).getReg(); 530 for (unsigned i = 4; i < MI->getNumOperands(); ++i) 531 if (MI->getOperand(i).getReg() == BaseReg) 532 return false; 533 534 break; 535 } 536 case ARM::t2LDMIA_RET: { 537 Register BaseReg = MI->getOperand(1).getReg(); 538 if (BaseReg != ARM::SP) 539 return false; 540 Opc = Entry.NarrowOpc2; // tPOP_RET 541 OpNum = 2; 542 isLdStMul = true; 543 break; 544 } 545 case ARM::t2LDMIA_UPD: 546 case ARM::t2STMIA_UPD: 547 case ARM::t2STMDB_UPD: { 548 OpNum = 0; 549 550 Register BaseReg = MI->getOperand(1).getReg(); 551 if (BaseReg == ARM::SP && 552 (Entry.WideOpc == ARM::t2LDMIA_UPD || 553 Entry.WideOpc == ARM::t2STMDB_UPD)) { 554 Opc = Entry.NarrowOpc2; // tPOP or tPUSH 555 OpNum = 2; 556 } else if (!isARMLowRegister(BaseReg) || 557 (Entry.WideOpc != ARM::t2LDMIA_UPD && 558 Entry.WideOpc != ARM::t2STMIA_UPD)) { 559 return false; 560 } 561 562 isLdStMul = true; 563 break; 564 } 565 } 566 567 unsigned OffsetReg = 0; 568 bool OffsetKill = false; 569 bool OffsetInternal = false; 570 if (HasShift) { 571 OffsetReg = MI->getOperand(2).getReg(); 572 OffsetKill = MI->getOperand(2).isKill(); 573 OffsetInternal = MI->getOperand(2).isInternalRead(); 574 575 if (MI->getOperand(3).getImm()) 576 // Thumb1 addressing mode doesn't support shift. 577 return false; 578 } 579 580 unsigned OffsetImm = 0; 581 if (HasImmOffset) { 582 OffsetImm = MI->getOperand(2).getImm(); 583 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale; 584 585 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset) 586 // Make sure the immediate field fits. 587 return false; 588 } 589 590 // Add the 16-bit load / store instruction. 591 DebugLoc dl = MI->getDebugLoc(); 592 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc)); 593 594 // tSTMIA_UPD takes a defining register operand. We've already checked that 595 // the register is killed, so mark it as dead here. 596 if (Entry.WideOpc == ARM::t2STMIA) 597 MIB.addReg(MI->getOperand(0).getReg(), RegState::Define | RegState::Dead); 598 599 if (!isLdStMul) { 600 MIB.add(MI->getOperand(0)); 601 MIB.add(MI->getOperand(1)); 602 603 if (HasImmOffset) 604 MIB.addImm(OffsetImm / Scale); 605 606 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!"); 607 608 if (HasOffReg) 609 MIB.addReg(OffsetReg, getKillRegState(OffsetKill) | 610 getInternalReadRegState(OffsetInternal)); 611 } 612 613 // Transfer the rest of operands. 614 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum) 615 MIB.add(MI->getOperand(OpNum)); 616 617 // Transfer memoperands. 618 MIB.setMemRefs(MI->memoperands()); 619 620 // Transfer MI flags. 621 MIB.setMIFlags(MI->getFlags()); 622 623 LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI 624 << " to 16-bit: " << *MIB); 625 626 MBB.erase_instr(MI); 627 ++NumLdSts; 628 return true; 629 } 630 631 bool 632 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 633 const ReduceEntry &Entry, 634 bool LiveCPSR, bool IsSelfLoop) { 635 unsigned Opc = MI->getOpcode(); 636 if (Opc == ARM::t2ADDri) { 637 // If the source register is SP, try to reduce to tADDrSPi, otherwise 638 // it's a normal reduce. 639 if (MI->getOperand(1).getReg() != ARM::SP) { 640 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 641 return true; 642 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 643 } 644 // Try to reduce to tADDrSPi. 645 unsigned Imm = MI->getOperand(2).getImm(); 646 // The immediate must be in range, the destination register must be a low 647 // reg, the predicate must be "always" and the condition flags must not 648 // be being set. 649 if (Imm & 3 || Imm > 1020) 650 return false; 651 if (!isARMLowRegister(MI->getOperand(0).getReg())) 652 return false; 653 if (MI->getOperand(3).getImm() != ARMCC::AL) 654 return false; 655 const MCInstrDesc &MCID = MI->getDesc(); 656 if (MCID.hasOptionalDef() && 657 MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR) 658 return false; 659 660 MachineInstrBuilder MIB = 661 BuildMI(MBB, MI, MI->getDebugLoc(), 662 TII->get(ARM::tADDrSPi)) 663 .add(MI->getOperand(0)) 664 .add(MI->getOperand(1)) 665 .addImm(Imm / 4) // The tADDrSPi has an implied scale by four. 666 .add(predOps(ARMCC::AL)); 667 668 // Transfer MI flags. 669 MIB.setMIFlags(MI->getFlags()); 670 671 LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI 672 << " to 16-bit: " << *MIB); 673 674 MBB.erase_instr(MI); 675 ++NumNarrows; 676 return true; 677 } 678 679 if (Entry.LowRegs1 && !VerifyLowRegs(MI)) 680 return false; 681 682 if (MI->mayLoadOrStore()) 683 return ReduceLoadStore(MBB, MI, Entry); 684 685 switch (Opc) { 686 default: break; 687 case ARM::t2ADDSri: 688 case ARM::t2ADDSrr: { 689 Register PredReg; 690 if (getInstrPredicate(*MI, PredReg) == ARMCC::AL) { 691 switch (Opc) { 692 default: break; 693 case ARM::t2ADDSri: 694 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 695 return true; 696 LLVM_FALLTHROUGH; 697 case ARM::t2ADDSrr: 698 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 699 } 700 } 701 break; 702 } 703 case ARM::t2RSBri: 704 case ARM::t2RSBSri: 705 case ARM::t2SXTB: 706 case ARM::t2SXTH: 707 case ARM::t2UXTB: 708 case ARM::t2UXTH: 709 if (MI->getOperand(2).getImm() == 0) 710 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 711 break; 712 case ARM::t2MOVi16: 713 // Can convert only 'pure' immediate operands, not immediates obtained as 714 // globals' addresses. 715 if (MI->getOperand(1).isImm()) 716 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 717 break; 718 case ARM::t2CMPrr: { 719 // Try to reduce to the lo-reg only version first. Why there are two 720 // versions of the instruction is a mystery. 721 // It would be nice to just have two entries in the master table that 722 // are prioritized, but the table assumes a unique entry for each 723 // source insn opcode. So for now, we hack a local entry record to use. 724 static const ReduceEntry NarrowEntry = 725 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 }; 726 if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop)) 727 return true; 728 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 729 } 730 case ARM::t2TEQrr: { 731 Register PredReg; 732 // Can only convert to eors if we're not in an IT block. 733 if (getInstrPredicate(*MI, PredReg) != ARMCC::AL) 734 break; 735 // TODO if Operand 0 is not killed but Operand 1 is, then we could write 736 // to Op1 instead. 737 if (MI->getOperand(0).isKill()) 738 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 739 } 740 } 741 return false; 742 } 743 744 bool 745 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 746 const ReduceEntry &Entry, 747 bool LiveCPSR, bool IsSelfLoop) { 748 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr)) 749 return false; 750 751 if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand()) 752 // Don't issue movs with shifter operand for some CPUs unless we 753 // are optimizing for size. 754 return false; 755 756 Register Reg0 = MI->getOperand(0).getReg(); 757 Register Reg1 = MI->getOperand(1).getReg(); 758 // t2MUL is "special". The tied source operand is second, not first. 759 if (MI->getOpcode() == ARM::t2MUL) { 760 Register Reg2 = MI->getOperand(2).getReg(); 761 // Early exit if the regs aren't all low regs. 762 if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1) 763 || !isARMLowRegister(Reg2)) 764 return false; 765 if (Reg0 != Reg2) { 766 // If the other operand also isn't the same as the destination, we 767 // can't reduce. 768 if (Reg1 != Reg0) 769 return false; 770 // Try to commute the operands to make it a 2-address instruction. 771 MachineInstr *CommutedMI = TII->commuteInstruction(*MI); 772 if (!CommutedMI) 773 return false; 774 } 775 } else if (Reg0 != Reg1) { 776 // Try to commute the operands to make it a 2-address instruction. 777 unsigned CommOpIdx1 = 1; 778 unsigned CommOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex; 779 if (!TII->findCommutedOpIndices(*MI, CommOpIdx1, CommOpIdx2) || 780 MI->getOperand(CommOpIdx2).getReg() != Reg0) 781 return false; 782 MachineInstr *CommutedMI = 783 TII->commuteInstruction(*MI, false, CommOpIdx1, CommOpIdx2); 784 if (!CommutedMI) 785 return false; 786 } 787 if (Entry.LowRegs2 && !isARMLowRegister(Reg0)) 788 return false; 789 if (Entry.Imm2Limit) { 790 unsigned Imm = MI->getOperand(2).getImm(); 791 unsigned Limit = (1 << Entry.Imm2Limit) - 1; 792 if (Imm > Limit) 793 return false; 794 } else { 795 Register Reg2 = MI->getOperand(2).getReg(); 796 if (Entry.LowRegs2 && !isARMLowRegister(Reg2)) 797 return false; 798 } 799 800 // Check if it's possible / necessary to transfer the predicate. 801 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2); 802 Register PredReg; 803 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg); 804 bool SkipPred = false; 805 if (Pred != ARMCC::AL) { 806 if (!NewMCID.isPredicable()) 807 // Can't transfer predicate, fail. 808 return false; 809 } else { 810 SkipPred = !NewMCID.isPredicable(); 811 } 812 813 bool HasCC = false; 814 bool CCDead = false; 815 const MCInstrDesc &MCID = MI->getDesc(); 816 if (MCID.hasOptionalDef()) { 817 unsigned NumOps = MCID.getNumOperands(); 818 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 819 if (HasCC && MI->getOperand(NumOps-1).isDead()) 820 CCDead = true; 821 } 822 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead)) 823 return false; 824 825 // Avoid adding a false dependency on partial flag update by some 16-bit 826 // instructions which has the 's' bit set. 827 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 828 canAddPseudoFlagDep(MI, IsSelfLoop)) 829 return false; 830 831 // Add the 16-bit instruction. 832 DebugLoc dl = MI->getDebugLoc(); 833 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 834 MIB.add(MI->getOperand(0)); 835 if (NewMCID.hasOptionalDef()) 836 MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp()); 837 838 // Transfer the rest of operands. 839 unsigned NumOps = MCID.getNumOperands(); 840 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 841 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 842 continue; 843 if (SkipPred && MCID.OpInfo[i].isPredicate()) 844 continue; 845 MIB.add(MI->getOperand(i)); 846 } 847 848 // Transfer MI flags. 849 MIB.setMIFlags(MI->getFlags()); 850 851 LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI 852 << " to 16-bit: " << *MIB); 853 854 MBB.erase_instr(MI); 855 ++Num2Addrs; 856 return true; 857 } 858 859 bool 860 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 861 const ReduceEntry &Entry, 862 bool LiveCPSR, bool IsSelfLoop) { 863 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit)) 864 return false; 865 866 if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand()) 867 // Don't issue movs with shifter operand for some CPUs unless we 868 // are optimizing for size. 869 return false; 870 871 unsigned Limit = ~0U; 872 if (Entry.Imm1Limit) 873 Limit = (1 << Entry.Imm1Limit) - 1; 874 875 const MCInstrDesc &MCID = MI->getDesc(); 876 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { 877 if (MCID.OpInfo[i].isPredicate()) 878 continue; 879 const MachineOperand &MO = MI->getOperand(i); 880 if (MO.isReg()) { 881 Register Reg = MO.getReg(); 882 if (!Reg || Reg == ARM::CPSR) 883 continue; 884 if (Entry.LowRegs1 && !isARMLowRegister(Reg)) 885 return false; 886 } else if (MO.isImm() && 887 !MCID.OpInfo[i].isPredicate()) { 888 if (((unsigned)MO.getImm()) > Limit) 889 return false; 890 } 891 } 892 893 // Check if it's possible / necessary to transfer the predicate. 894 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1); 895 Register PredReg; 896 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg); 897 bool SkipPred = false; 898 if (Pred != ARMCC::AL) { 899 if (!NewMCID.isPredicable()) 900 // Can't transfer predicate, fail. 901 return false; 902 } else { 903 SkipPred = !NewMCID.isPredicable(); 904 } 905 906 bool HasCC = false; 907 bool CCDead = false; 908 if (MCID.hasOptionalDef()) { 909 unsigned NumOps = MCID.getNumOperands(); 910 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 911 if (HasCC && MI->getOperand(NumOps-1).isDead()) 912 CCDead = true; 913 } 914 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead)) 915 return false; 916 917 // Avoid adding a false dependency on partial flag update by some 16-bit 918 // instructions which has the 's' bit set. 919 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 920 canAddPseudoFlagDep(MI, IsSelfLoop)) 921 return false; 922 923 // Add the 16-bit instruction. 924 DebugLoc dl = MI->getDebugLoc(); 925 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 926 927 // TEQ is special in that it doesn't define a register but we're converting 928 // it into an EOR which does. So add the first operand as a def and then 929 // again as a use. 930 if (MCID.getOpcode() == ARM::t2TEQrr) { 931 MIB.add(MI->getOperand(0)); 932 MIB->getOperand(0).setIsKill(false); 933 MIB->getOperand(0).setIsDef(true); 934 MIB->getOperand(0).setIsDead(true); 935 936 if (NewMCID.hasOptionalDef()) 937 MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp()); 938 MIB.add(MI->getOperand(0)); 939 } else { 940 MIB.add(MI->getOperand(0)); 941 if (NewMCID.hasOptionalDef()) 942 MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp()); 943 } 944 945 // Transfer the rest of operands. 946 unsigned NumOps = MCID.getNumOperands(); 947 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 948 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 949 continue; 950 if ((MCID.getOpcode() == ARM::t2RSBSri || 951 MCID.getOpcode() == ARM::t2RSBri || 952 MCID.getOpcode() == ARM::t2SXTB || 953 MCID.getOpcode() == ARM::t2SXTH || 954 MCID.getOpcode() == ARM::t2UXTB || 955 MCID.getOpcode() == ARM::t2UXTH) && i == 2) 956 // Skip the zero immediate operand, it's now implicit. 957 continue; 958 bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate()); 959 if (SkipPred && isPred) 960 continue; 961 const MachineOperand &MO = MI->getOperand(i); 962 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR) 963 // Skip implicit def of CPSR. Either it's modeled as an optional 964 // def now or it's already an implicit def on the new instruction. 965 continue; 966 MIB.add(MO); 967 } 968 if (!MCID.isPredicable() && NewMCID.isPredicable()) 969 MIB.add(predOps(ARMCC::AL)); 970 971 // Transfer MI flags. 972 MIB.setMIFlags(MI->getFlags()); 973 974 LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI 975 << " to 16-bit: " << *MIB); 976 977 MBB.erase_instr(MI); 978 ++NumNarrows; 979 return true; 980 } 981 982 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) { 983 bool HasDef = false; 984 for (const MachineOperand &MO : MI.operands()) { 985 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 986 continue; 987 if (MO.getReg() != ARM::CPSR) 988 continue; 989 990 DefCPSR = true; 991 if (!MO.isDead()) 992 HasDef = true; 993 } 994 995 return HasDef || LiveCPSR; 996 } 997 998 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) { 999 for (const MachineOperand &MO : MI.operands()) { 1000 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 1001 continue; 1002 if (MO.getReg() != ARM::CPSR) 1003 continue; 1004 assert(LiveCPSR && "CPSR liveness tracking is wrong!"); 1005 if (MO.isKill()) { 1006 LiveCPSR = false; 1007 break; 1008 } 1009 } 1010 1011 return LiveCPSR; 1012 } 1013 1014 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 1015 bool LiveCPSR, bool IsSelfLoop) { 1016 unsigned Opcode = MI->getOpcode(); 1017 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode); 1018 if (OPI == ReduceOpcodeMap.end()) 1019 return false; 1020 const ReduceEntry &Entry = ReduceTable[OPI->second]; 1021 1022 // Don't attempt normal reductions on "special" cases for now. 1023 if (Entry.Special) 1024 return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 1025 1026 // Try to transform to a 16-bit two-address instruction. 1027 if (Entry.NarrowOpc2 && 1028 ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 1029 return true; 1030 1031 // Try to transform to a 16-bit non-two-address instruction. 1032 if (Entry.NarrowOpc1 && 1033 ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 1034 return true; 1035 1036 return false; 1037 } 1038 1039 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { 1040 bool Modified = false; 1041 1042 // Yes, CPSR could be livein. 1043 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR); 1044 MachineInstr *BundleMI = nullptr; 1045 1046 CPSRDef = nullptr; 1047 HighLatencyCPSR = false; 1048 1049 // Check predecessors for the latest CPSRDef. 1050 for (auto *Pred : MBB.predecessors()) { 1051 const MBBInfo &PInfo = BlockInfo[Pred->getNumber()]; 1052 if (!PInfo.Visited) { 1053 // Since blocks are visited in RPO, this must be a back-edge. 1054 continue; 1055 } 1056 if (PInfo.HighLatencyCPSR) { 1057 HighLatencyCPSR = true; 1058 break; 1059 } 1060 } 1061 1062 // If this BB loops back to itself, conservatively avoid narrowing the 1063 // first instruction that does partial flag update. 1064 bool IsSelfLoop = MBB.isSuccessor(&MBB); 1065 MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end(); 1066 MachineBasicBlock::instr_iterator NextMII; 1067 for (; MII != E; MII = NextMII) { 1068 NextMII = std::next(MII); 1069 1070 MachineInstr *MI = &*MII; 1071 if (MI->isBundle()) { 1072 BundleMI = MI; 1073 continue; 1074 } 1075 if (MI->isDebugInstr()) 1076 continue; 1077 1078 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR); 1079 1080 // Does NextMII belong to the same bundle as MI? 1081 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred(); 1082 1083 if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) { 1084 Modified = true; 1085 MachineBasicBlock::instr_iterator I = std::prev(NextMII); 1086 MI = &*I; 1087 // Removing and reinserting the first instruction in a bundle will break 1088 // up the bundle. Fix the bundling if it was broken. 1089 if (NextInSameBundle && !NextMII->isBundledWithPred()) 1090 NextMII->bundleWithPred(); 1091 } 1092 1093 if (BundleMI && !NextInSameBundle && MI->isInsideBundle()) { 1094 // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill 1095 // marker is only on the BUNDLE instruction. Process the BUNDLE 1096 // instruction as we finish with the bundled instruction to work around 1097 // the inconsistency. 1098 if (BundleMI->killsRegister(ARM::CPSR)) 1099 LiveCPSR = false; 1100 MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR); 1101 if (MO && !MO->isDead()) 1102 LiveCPSR = true; 1103 MO = BundleMI->findRegisterUseOperand(ARM::CPSR); 1104 if (MO && !MO->isKill()) 1105 LiveCPSR = true; 1106 } 1107 1108 bool DefCPSR = false; 1109 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR); 1110 if (MI->isCall()) { 1111 // Calls don't really set CPSR. 1112 CPSRDef = nullptr; 1113 HighLatencyCPSR = false; 1114 IsSelfLoop = false; 1115 } else if (DefCPSR) { 1116 // This is the last CPSR defining instruction. 1117 CPSRDef = MI; 1118 HighLatencyCPSR = isHighLatencyCPSR(CPSRDef); 1119 IsSelfLoop = false; 1120 } 1121 } 1122 1123 MBBInfo &Info = BlockInfo[MBB.getNumber()]; 1124 Info.HighLatencyCPSR = HighLatencyCPSR; 1125 Info.Visited = true; 1126 return Modified; 1127 } 1128 1129 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { 1130 if (PredicateFtor && !PredicateFtor(MF.getFunction())) 1131 return false; 1132 1133 STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget()); 1134 if (STI->isThumb1Only() || STI->prefers32BitThumb()) 1135 return false; 1136 1137 TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo()); 1138 1139 // Optimizing / minimizing size? Minimizing size implies optimizing for size. 1140 OptimizeSize = MF.getFunction().hasOptSize(); 1141 MinimizeSize = STI->hasMinSize(); 1142 1143 BlockInfo.clear(); 1144 BlockInfo.resize(MF.getNumBlockIDs()); 1145 1146 // Visit blocks in reverse post-order so LastCPSRDef is known for all 1147 // predecessors. 1148 ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); 1149 bool Modified = false; 1150 for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator 1151 I = RPOT.begin(), E = RPOT.end(); I != E; ++I) 1152 Modified |= ReduceMBB(**I); 1153 return Modified; 1154 } 1155 1156 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size 1157 /// reduction pass. 1158 FunctionPass *llvm::createThumb2SizeReductionPass( 1159 std::function<bool(const Function &)> Ftor) { 1160 return new Thumb2SizeReduce(std::move(Ftor)); 1161 } 1162