1 //===- HexagonBitSimplify.cpp ---------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "BitTracker.h" 10 #include "HexagonBitTracker.h" 11 #include "HexagonInstrInfo.h" 12 #include "HexagonRegisterInfo.h" 13 #include "HexagonSubtarget.h" 14 #include "llvm/ADT/BitVector.h" 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/GraphTraits.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/StringRef.h" 20 #include "llvm/CodeGen/MachineBasicBlock.h" 21 #include "llvm/CodeGen/MachineDominators.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineFunctionPass.h" 24 #include "llvm/CodeGen/MachineInstr.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineOperand.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/TargetRegisterInfo.h" 29 #include "llvm/IR/DebugLoc.h" 30 #include "llvm/InitializePasses.h" 31 #include "llvm/MC/MCInstrDesc.h" 32 #include "llvm/Pass.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Compiler.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/MathExtras.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include <algorithm> 40 #include <cassert> 41 #include <cstdint> 42 #include <iterator> 43 #include <limits> 44 #include <utility> 45 #include <vector> 46 47 #define DEBUG_TYPE "hexbit" 48 49 using namespace llvm; 50 51 static cl::opt<bool> PreserveTiedOps("hexbit-keep-tied", cl::Hidden, 52 cl::init(true), cl::desc("Preserve subregisters in tied operands")); 53 static cl::opt<bool> GenExtract("hexbit-extract", cl::Hidden, 54 cl::init(true), cl::desc("Generate extract instructions")); 55 static cl::opt<bool> GenBitSplit("hexbit-bitsplit", cl::Hidden, 56 cl::init(true), cl::desc("Generate bitsplit instructions")); 57 58 static cl::opt<unsigned> MaxExtract("hexbit-max-extract", cl::Hidden, 59 cl::init(std::numeric_limits<unsigned>::max())); 60 static unsigned CountExtract = 0; 61 static cl::opt<unsigned> MaxBitSplit("hexbit-max-bitsplit", cl::Hidden, 62 cl::init(std::numeric_limits<unsigned>::max())); 63 static unsigned CountBitSplit = 0; 64 65 namespace llvm { 66 67 void initializeHexagonBitSimplifyPass(PassRegistry& Registry); 68 FunctionPass *createHexagonBitSimplify(); 69 70 } // end namespace llvm 71 72 namespace { 73 74 // Set of virtual registers, based on BitVector. 75 struct RegisterSet : private BitVector { 76 RegisterSet() = default; 77 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} 78 RegisterSet(const RegisterSet &RS) = default; 79 80 using BitVector::clear; 81 using BitVector::count; 82 83 unsigned find_first() const { 84 int First = BitVector::find_first(); 85 if (First < 0) 86 return 0; 87 return x2v(First); 88 } 89 90 unsigned find_next(unsigned Prev) const { 91 int Next = BitVector::find_next(v2x(Prev)); 92 if (Next < 0) 93 return 0; 94 return x2v(Next); 95 } 96 97 RegisterSet &insert(unsigned R) { 98 unsigned Idx = v2x(R); 99 ensure(Idx); 100 return static_cast<RegisterSet&>(BitVector::set(Idx)); 101 } 102 RegisterSet &remove(unsigned R) { 103 unsigned Idx = v2x(R); 104 if (Idx >= size()) 105 return *this; 106 return static_cast<RegisterSet&>(BitVector::reset(Idx)); 107 } 108 109 RegisterSet &insert(const RegisterSet &Rs) { 110 return static_cast<RegisterSet&>(BitVector::operator|=(Rs)); 111 } 112 RegisterSet &remove(const RegisterSet &Rs) { 113 return static_cast<RegisterSet&>(BitVector::reset(Rs)); 114 } 115 116 reference operator[](unsigned R) { 117 unsigned Idx = v2x(R); 118 ensure(Idx); 119 return BitVector::operator[](Idx); 120 } 121 bool operator[](unsigned R) const { 122 unsigned Idx = v2x(R); 123 assert(Idx < size()); 124 return BitVector::operator[](Idx); 125 } 126 bool has(unsigned R) const { 127 unsigned Idx = v2x(R); 128 if (Idx >= size()) 129 return false; 130 return BitVector::test(Idx); 131 } 132 133 bool empty() const { 134 return !BitVector::any(); 135 } 136 bool includes(const RegisterSet &Rs) const { 137 // A.BitVector::test(B) <=> A-B != {} 138 return !Rs.BitVector::test(*this); 139 } 140 bool intersects(const RegisterSet &Rs) const { 141 return BitVector::anyCommon(Rs); 142 } 143 144 private: 145 void ensure(unsigned Idx) { 146 if (size() <= Idx) 147 resize(std::max(Idx+1, 32U)); 148 } 149 150 static inline unsigned v2x(unsigned v) { 151 return Register::virtReg2Index(v); 152 } 153 154 static inline unsigned x2v(unsigned x) { 155 return Register::index2VirtReg(x); 156 } 157 }; 158 159 struct PrintRegSet { 160 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) 161 : RS(S), TRI(RI) {} 162 163 friend raw_ostream &operator<< (raw_ostream &OS, 164 const PrintRegSet &P); 165 166 private: 167 const RegisterSet &RS; 168 const TargetRegisterInfo *TRI; 169 }; 170 171 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) 172 LLVM_ATTRIBUTE_UNUSED; 173 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { 174 OS << '{'; 175 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R)) 176 OS << ' ' << printReg(R, P.TRI); 177 OS << " }"; 178 return OS; 179 } 180 181 class Transformation; 182 183 class HexagonBitSimplify : public MachineFunctionPass { 184 public: 185 static char ID; 186 187 HexagonBitSimplify() : MachineFunctionPass(ID) {} 188 189 StringRef getPassName() const override { 190 return "Hexagon bit simplification"; 191 } 192 193 void getAnalysisUsage(AnalysisUsage &AU) const override { 194 AU.addRequired<MachineDominatorTree>(); 195 AU.addPreserved<MachineDominatorTree>(); 196 MachineFunctionPass::getAnalysisUsage(AU); 197 } 198 199 bool runOnMachineFunction(MachineFunction &MF) override; 200 201 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs); 202 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses); 203 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1, 204 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W); 205 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B, 206 uint16_t W); 207 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B, 208 uint16_t W, uint64_t &U); 209 static bool replaceReg(unsigned OldR, unsigned NewR, 210 MachineRegisterInfo &MRI); 211 static bool getSubregMask(const BitTracker::RegisterRef &RR, 212 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI); 213 static bool replaceRegWithSub(unsigned OldR, unsigned NewR, 214 unsigned NewSR, MachineRegisterInfo &MRI); 215 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR, 216 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI); 217 static bool parseRegSequence(const MachineInstr &I, 218 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 219 const MachineRegisterInfo &MRI); 220 221 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits, 222 uint16_t Begin); 223 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits, 224 uint16_t Begin, const HexagonInstrInfo &HII); 225 226 static const TargetRegisterClass *getFinalVRegClass( 227 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI); 228 static bool isTransparentCopy(const BitTracker::RegisterRef &RD, 229 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI); 230 231 private: 232 MachineDominatorTree *MDT = nullptr; 233 234 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs); 235 static bool hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 236 unsigned NewSub = Hexagon::NoSubRegister); 237 }; 238 239 using HBS = HexagonBitSimplify; 240 241 // The purpose of this class is to provide a common facility to traverse 242 // the function top-down or bottom-up via the dominator tree, and keep 243 // track of the available registers. 244 class Transformation { 245 public: 246 bool TopDown; 247 248 Transformation(bool TD) : TopDown(TD) {} 249 virtual ~Transformation() = default; 250 251 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0; 252 }; 253 254 } // end anonymous namespace 255 256 char HexagonBitSimplify::ID = 0; 257 258 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexagon-bit-simplify", 259 "Hexagon bit simplification", false, false) 260 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 261 INITIALIZE_PASS_END(HexagonBitSimplify, "hexagon-bit-simplify", 262 "Hexagon bit simplification", false, false) 263 264 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T, 265 RegisterSet &AVs) { 266 bool Changed = false; 267 268 if (T.TopDown) 269 Changed = T.processBlock(B, AVs); 270 271 RegisterSet Defs; 272 for (auto &I : B) 273 getInstrDefs(I, Defs); 274 RegisterSet NewAVs = AVs; 275 NewAVs.insert(Defs); 276 277 for (auto *DTN : children<MachineDomTreeNode*>(MDT->getNode(&B))) 278 Changed |= visitBlock(*(DTN->getBlock()), T, NewAVs); 279 280 if (!T.TopDown) 281 Changed |= T.processBlock(B, AVs); 282 283 return Changed; 284 } 285 286 // 287 // Utility functions: 288 // 289 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI, 290 RegisterSet &Defs) { 291 for (auto &Op : MI.operands()) { 292 if (!Op.isReg() || !Op.isDef()) 293 continue; 294 Register R = Op.getReg(); 295 if (!Register::isVirtualRegister(R)) 296 continue; 297 Defs.insert(R); 298 } 299 } 300 301 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI, 302 RegisterSet &Uses) { 303 for (auto &Op : MI.operands()) { 304 if (!Op.isReg() || !Op.isUse()) 305 continue; 306 Register R = Op.getReg(); 307 if (!Register::isVirtualRegister(R)) 308 continue; 309 Uses.insert(R); 310 } 311 } 312 313 // Check if all the bits in range [B, E) in both cells are equal. 314 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1, 315 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2, 316 uint16_t W) { 317 for (uint16_t i = 0; i < W; ++i) { 318 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i]. 319 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0) 320 return false; 321 // Same for RC2[i]. 322 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0) 323 return false; 324 if (RC1[B1+i] != RC2[B2+i]) 325 return false; 326 } 327 return true; 328 } 329 330 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC, 331 uint16_t B, uint16_t W) { 332 assert(B < RC.width() && B+W <= RC.width()); 333 for (uint16_t i = B; i < B+W; ++i) 334 if (!RC[i].is(0)) 335 return false; 336 return true; 337 } 338 339 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC, 340 uint16_t B, uint16_t W, uint64_t &U) { 341 assert(B < RC.width() && B+W <= RC.width()); 342 int64_t T = 0; 343 for (uint16_t i = B+W; i > B; --i) { 344 const BitTracker::BitValue &BV = RC[i-1]; 345 T <<= 1; 346 if (BV.is(1)) 347 T |= 1; 348 else if (!BV.is(0)) 349 return false; 350 } 351 U = T; 352 return true; 353 } 354 355 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR, 356 MachineRegisterInfo &MRI) { 357 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR)) 358 return false; 359 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 360 decltype(End) NextI; 361 for (auto I = Begin; I != End; I = NextI) { 362 NextI = std::next(I); 363 I->setReg(NewR); 364 } 365 return Begin != End; 366 } 367 368 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR, 369 unsigned NewSR, MachineRegisterInfo &MRI) { 370 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR)) 371 return false; 372 if (hasTiedUse(OldR, MRI, NewSR)) 373 return false; 374 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 375 decltype(End) NextI; 376 for (auto I = Begin; I != End; I = NextI) { 377 NextI = std::next(I); 378 I->setReg(NewR); 379 I->setSubReg(NewSR); 380 } 381 return Begin != End; 382 } 383 384 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR, 385 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) { 386 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR)) 387 return false; 388 if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR)) 389 return false; 390 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 391 decltype(End) NextI; 392 for (auto I = Begin; I != End; I = NextI) { 393 NextI = std::next(I); 394 if (I->getSubReg() != OldSR) 395 continue; 396 I->setReg(NewR); 397 I->setSubReg(NewSR); 398 } 399 return Begin != End; 400 } 401 402 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB 403 // of Sub in Reg, and set Width to the size of Sub in bits. Return true, 404 // if this succeeded, otherwise return false. 405 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR, 406 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) { 407 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg); 408 if (RR.Sub == 0) { 409 Begin = 0; 410 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC); 411 return true; 412 } 413 414 Begin = 0; 415 416 switch (RC->getID()) { 417 case Hexagon::DoubleRegsRegClassID: 418 case Hexagon::HvxWRRegClassID: 419 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2; 420 if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi) 421 Begin = Width; 422 break; 423 default: 424 return false; 425 } 426 return true; 427 } 428 429 430 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high 431 // subregister. 432 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I, 433 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 434 const MachineRegisterInfo &MRI) { 435 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE); 436 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm(); 437 auto &DstRC = *MRI.getRegClass(I.getOperand(0).getReg()); 438 auto &HRI = static_cast<const HexagonRegisterInfo&>( 439 *MRI.getTargetRegisterInfo()); 440 unsigned SubLo = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_lo); 441 unsigned SubHi = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_hi); 442 assert((Sub1 == SubLo && Sub2 == SubHi) || (Sub1 == SubHi && Sub2 == SubLo)); 443 if (Sub1 == SubLo && Sub2 == SubHi) { 444 SL = I.getOperand(1); 445 SH = I.getOperand(3); 446 return true; 447 } 448 if (Sub1 == SubHi && Sub2 == SubLo) { 449 SH = I.getOperand(1); 450 SL = I.getOperand(3); 451 return true; 452 } 453 return false; 454 } 455 456 // All stores (except 64-bit stores) take a 32-bit register as the source 457 // of the value to be stored. If the instruction stores into a location 458 // that is shorter than 32 bits, some bits of the source register are not 459 // used. For each store instruction, calculate the set of used bits in 460 // the source register, and set appropriate bits in Bits. Return true if 461 // the bits are calculated, false otherwise. 462 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits, 463 uint16_t Begin) { 464 using namespace Hexagon; 465 466 switch (Opc) { 467 // Store byte 468 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32 469 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new 470 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32 471 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32 472 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32 473 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32 474 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new 475 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new 476 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new 477 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new 478 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32 479 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new 480 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32 481 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32 482 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32 483 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32 484 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new 485 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new 486 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new 487 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new 488 case S4_storerb_ap: // memb(Re32=#U6)=Rt32 489 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new 490 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32 491 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new 492 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32 493 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new 494 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32 495 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new 496 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32 497 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new 498 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32 499 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new 500 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32 501 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new 502 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32 503 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32 504 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 505 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 506 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 507 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 508 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 509 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 510 case S2_storerbgp: // memb(gp+#u16:0)=Rt32 511 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new 512 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32 513 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32 514 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32 515 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32 516 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new 517 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new 518 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new 519 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new 520 Bits.set(Begin, Begin+8); 521 return true; 522 523 // Store low half 524 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32 525 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new 526 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32 527 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32 528 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32 529 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32 530 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new 531 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new 532 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new 533 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new 534 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32 535 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new 536 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32 537 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32 538 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32 539 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32 540 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new 541 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new 542 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new 543 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new 544 case S4_storerh_ap: // memh(Re32=#U6)=Rt32 545 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new 546 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32 547 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new 548 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32 549 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new 550 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32 551 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new 552 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32 553 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new 554 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32 555 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new 556 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32 557 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32 558 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32 559 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 560 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 561 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new 562 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 563 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 564 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 565 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 566 case S2_storerhgp: // memh(gp+#u16:1)=Rt32 567 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new 568 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32 569 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32 570 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32 571 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32 572 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new 573 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new 574 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new 575 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new 576 Bits.set(Begin, Begin+16); 577 return true; 578 579 // Store high half 580 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32 581 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32 582 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32 583 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32 584 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32 585 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32 586 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32 587 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32 588 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32 589 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32 590 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32 591 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32 592 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32 593 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32 594 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32 595 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32 596 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32 597 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 598 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 599 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 600 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 601 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32 602 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32 603 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32 604 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32 605 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32 606 Bits.set(Begin+16, Begin+32); 607 return true; 608 } 609 610 return false; 611 } 612 613 // For an instruction with opcode Opc, calculate the set of bits that it 614 // uses in a register in operand OpN. This only calculates the set of used 615 // bits for cases where it does not depend on any operands (as is the case 616 // in shifts, for example). For concrete instructions from a program, the 617 // operand may be a subregister of a larger register, while Bits would 618 // correspond to the larger register in its entirety. Because of that, 619 // the parameter Begin can be used to indicate which bit of Bits should be 620 // considered the LSB of the operand. 621 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN, 622 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) { 623 using namespace Hexagon; 624 625 const MCInstrDesc &D = HII.get(Opc); 626 if (D.mayStore()) { 627 if (OpN == D.getNumOperands()-1) 628 return getUsedBitsInStore(Opc, Bits, Begin); 629 return false; 630 } 631 632 switch (Opc) { 633 // One register source. Used bits: R1[0-7]. 634 case A2_sxtb: 635 case A2_zxtb: 636 case A4_cmpbeqi: 637 case A4_cmpbgti: 638 case A4_cmpbgtui: 639 if (OpN == 1) { 640 Bits.set(Begin, Begin+8); 641 return true; 642 } 643 break; 644 645 // One register source. Used bits: R1[0-15]. 646 case A2_aslh: 647 case A2_sxth: 648 case A2_zxth: 649 case A4_cmpheqi: 650 case A4_cmphgti: 651 case A4_cmphgtui: 652 if (OpN == 1) { 653 Bits.set(Begin, Begin+16); 654 return true; 655 } 656 break; 657 658 // One register source. Used bits: R1[16-31]. 659 case A2_asrh: 660 if (OpN == 1) { 661 Bits.set(Begin+16, Begin+32); 662 return true; 663 } 664 break; 665 666 // Two register sources. Used bits: R1[0-7], R2[0-7]. 667 case A4_cmpbeq: 668 case A4_cmpbgt: 669 case A4_cmpbgtu: 670 if (OpN == 1) { 671 Bits.set(Begin, Begin+8); 672 return true; 673 } 674 break; 675 676 // Two register sources. Used bits: R1[0-15], R2[0-15]. 677 case A4_cmpheq: 678 case A4_cmphgt: 679 case A4_cmphgtu: 680 case A2_addh_h16_ll: 681 case A2_addh_h16_sat_ll: 682 case A2_addh_l16_ll: 683 case A2_addh_l16_sat_ll: 684 case A2_combine_ll: 685 case A2_subh_h16_ll: 686 case A2_subh_h16_sat_ll: 687 case A2_subh_l16_ll: 688 case A2_subh_l16_sat_ll: 689 case M2_mpy_acc_ll_s0: 690 case M2_mpy_acc_ll_s1: 691 case M2_mpy_acc_sat_ll_s0: 692 case M2_mpy_acc_sat_ll_s1: 693 case M2_mpy_ll_s0: 694 case M2_mpy_ll_s1: 695 case M2_mpy_nac_ll_s0: 696 case M2_mpy_nac_ll_s1: 697 case M2_mpy_nac_sat_ll_s0: 698 case M2_mpy_nac_sat_ll_s1: 699 case M2_mpy_rnd_ll_s0: 700 case M2_mpy_rnd_ll_s1: 701 case M2_mpy_sat_ll_s0: 702 case M2_mpy_sat_ll_s1: 703 case M2_mpy_sat_rnd_ll_s0: 704 case M2_mpy_sat_rnd_ll_s1: 705 case M2_mpyd_acc_ll_s0: 706 case M2_mpyd_acc_ll_s1: 707 case M2_mpyd_ll_s0: 708 case M2_mpyd_ll_s1: 709 case M2_mpyd_nac_ll_s0: 710 case M2_mpyd_nac_ll_s1: 711 case M2_mpyd_rnd_ll_s0: 712 case M2_mpyd_rnd_ll_s1: 713 case M2_mpyu_acc_ll_s0: 714 case M2_mpyu_acc_ll_s1: 715 case M2_mpyu_ll_s0: 716 case M2_mpyu_ll_s1: 717 case M2_mpyu_nac_ll_s0: 718 case M2_mpyu_nac_ll_s1: 719 case M2_mpyud_acc_ll_s0: 720 case M2_mpyud_acc_ll_s1: 721 case M2_mpyud_ll_s0: 722 case M2_mpyud_ll_s1: 723 case M2_mpyud_nac_ll_s0: 724 case M2_mpyud_nac_ll_s1: 725 if (OpN == 1 || OpN == 2) { 726 Bits.set(Begin, Begin+16); 727 return true; 728 } 729 break; 730 731 // Two register sources. Used bits: R1[0-15], R2[16-31]. 732 case A2_addh_h16_lh: 733 case A2_addh_h16_sat_lh: 734 case A2_combine_lh: 735 case A2_subh_h16_lh: 736 case A2_subh_h16_sat_lh: 737 case M2_mpy_acc_lh_s0: 738 case M2_mpy_acc_lh_s1: 739 case M2_mpy_acc_sat_lh_s0: 740 case M2_mpy_acc_sat_lh_s1: 741 case M2_mpy_lh_s0: 742 case M2_mpy_lh_s1: 743 case M2_mpy_nac_lh_s0: 744 case M2_mpy_nac_lh_s1: 745 case M2_mpy_nac_sat_lh_s0: 746 case M2_mpy_nac_sat_lh_s1: 747 case M2_mpy_rnd_lh_s0: 748 case M2_mpy_rnd_lh_s1: 749 case M2_mpy_sat_lh_s0: 750 case M2_mpy_sat_lh_s1: 751 case M2_mpy_sat_rnd_lh_s0: 752 case M2_mpy_sat_rnd_lh_s1: 753 case M2_mpyd_acc_lh_s0: 754 case M2_mpyd_acc_lh_s1: 755 case M2_mpyd_lh_s0: 756 case M2_mpyd_lh_s1: 757 case M2_mpyd_nac_lh_s0: 758 case M2_mpyd_nac_lh_s1: 759 case M2_mpyd_rnd_lh_s0: 760 case M2_mpyd_rnd_lh_s1: 761 case M2_mpyu_acc_lh_s0: 762 case M2_mpyu_acc_lh_s1: 763 case M2_mpyu_lh_s0: 764 case M2_mpyu_lh_s1: 765 case M2_mpyu_nac_lh_s0: 766 case M2_mpyu_nac_lh_s1: 767 case M2_mpyud_acc_lh_s0: 768 case M2_mpyud_acc_lh_s1: 769 case M2_mpyud_lh_s0: 770 case M2_mpyud_lh_s1: 771 case M2_mpyud_nac_lh_s0: 772 case M2_mpyud_nac_lh_s1: 773 // These four are actually LH. 774 case A2_addh_l16_hl: 775 case A2_addh_l16_sat_hl: 776 case A2_subh_l16_hl: 777 case A2_subh_l16_sat_hl: 778 if (OpN == 1) { 779 Bits.set(Begin, Begin+16); 780 return true; 781 } 782 if (OpN == 2) { 783 Bits.set(Begin+16, Begin+32); 784 return true; 785 } 786 break; 787 788 // Two register sources, used bits: R1[16-31], R2[0-15]. 789 case A2_addh_h16_hl: 790 case A2_addh_h16_sat_hl: 791 case A2_combine_hl: 792 case A2_subh_h16_hl: 793 case A2_subh_h16_sat_hl: 794 case M2_mpy_acc_hl_s0: 795 case M2_mpy_acc_hl_s1: 796 case M2_mpy_acc_sat_hl_s0: 797 case M2_mpy_acc_sat_hl_s1: 798 case M2_mpy_hl_s0: 799 case M2_mpy_hl_s1: 800 case M2_mpy_nac_hl_s0: 801 case M2_mpy_nac_hl_s1: 802 case M2_mpy_nac_sat_hl_s0: 803 case M2_mpy_nac_sat_hl_s1: 804 case M2_mpy_rnd_hl_s0: 805 case M2_mpy_rnd_hl_s1: 806 case M2_mpy_sat_hl_s0: 807 case M2_mpy_sat_hl_s1: 808 case M2_mpy_sat_rnd_hl_s0: 809 case M2_mpy_sat_rnd_hl_s1: 810 case M2_mpyd_acc_hl_s0: 811 case M2_mpyd_acc_hl_s1: 812 case M2_mpyd_hl_s0: 813 case M2_mpyd_hl_s1: 814 case M2_mpyd_nac_hl_s0: 815 case M2_mpyd_nac_hl_s1: 816 case M2_mpyd_rnd_hl_s0: 817 case M2_mpyd_rnd_hl_s1: 818 case M2_mpyu_acc_hl_s0: 819 case M2_mpyu_acc_hl_s1: 820 case M2_mpyu_hl_s0: 821 case M2_mpyu_hl_s1: 822 case M2_mpyu_nac_hl_s0: 823 case M2_mpyu_nac_hl_s1: 824 case M2_mpyud_acc_hl_s0: 825 case M2_mpyud_acc_hl_s1: 826 case M2_mpyud_hl_s0: 827 case M2_mpyud_hl_s1: 828 case M2_mpyud_nac_hl_s0: 829 case M2_mpyud_nac_hl_s1: 830 if (OpN == 1) { 831 Bits.set(Begin+16, Begin+32); 832 return true; 833 } 834 if (OpN == 2) { 835 Bits.set(Begin, Begin+16); 836 return true; 837 } 838 break; 839 840 // Two register sources, used bits: R1[16-31], R2[16-31]. 841 case A2_addh_h16_hh: 842 case A2_addh_h16_sat_hh: 843 case A2_combine_hh: 844 case A2_subh_h16_hh: 845 case A2_subh_h16_sat_hh: 846 case M2_mpy_acc_hh_s0: 847 case M2_mpy_acc_hh_s1: 848 case M2_mpy_acc_sat_hh_s0: 849 case M2_mpy_acc_sat_hh_s1: 850 case M2_mpy_hh_s0: 851 case M2_mpy_hh_s1: 852 case M2_mpy_nac_hh_s0: 853 case M2_mpy_nac_hh_s1: 854 case M2_mpy_nac_sat_hh_s0: 855 case M2_mpy_nac_sat_hh_s1: 856 case M2_mpy_rnd_hh_s0: 857 case M2_mpy_rnd_hh_s1: 858 case M2_mpy_sat_hh_s0: 859 case M2_mpy_sat_hh_s1: 860 case M2_mpy_sat_rnd_hh_s0: 861 case M2_mpy_sat_rnd_hh_s1: 862 case M2_mpyd_acc_hh_s0: 863 case M2_mpyd_acc_hh_s1: 864 case M2_mpyd_hh_s0: 865 case M2_mpyd_hh_s1: 866 case M2_mpyd_nac_hh_s0: 867 case M2_mpyd_nac_hh_s1: 868 case M2_mpyd_rnd_hh_s0: 869 case M2_mpyd_rnd_hh_s1: 870 case M2_mpyu_acc_hh_s0: 871 case M2_mpyu_acc_hh_s1: 872 case M2_mpyu_hh_s0: 873 case M2_mpyu_hh_s1: 874 case M2_mpyu_nac_hh_s0: 875 case M2_mpyu_nac_hh_s1: 876 case M2_mpyud_acc_hh_s0: 877 case M2_mpyud_acc_hh_s1: 878 case M2_mpyud_hh_s0: 879 case M2_mpyud_hh_s1: 880 case M2_mpyud_nac_hh_s0: 881 case M2_mpyud_nac_hh_s1: 882 if (OpN == 1 || OpN == 2) { 883 Bits.set(Begin+16, Begin+32); 884 return true; 885 } 886 break; 887 } 888 889 return false; 890 } 891 892 // Calculate the register class that matches Reg:Sub. For example, if 893 // %1 is a double register, then %1:isub_hi would match the "int" 894 // register class. 895 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass( 896 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) { 897 if (!Register::isVirtualRegister(RR.Reg)) 898 return nullptr; 899 auto *RC = MRI.getRegClass(RR.Reg); 900 if (RR.Sub == 0) 901 return RC; 902 auto &HRI = static_cast<const HexagonRegisterInfo&>( 903 *MRI.getTargetRegisterInfo()); 904 905 auto VerifySR = [&HRI] (const TargetRegisterClass *RC, unsigned Sub) -> void { 906 (void)HRI; 907 assert(Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_lo) || 908 Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_hi)); 909 }; 910 911 switch (RC->getID()) { 912 case Hexagon::DoubleRegsRegClassID: 913 VerifySR(RC, RR.Sub); 914 return &Hexagon::IntRegsRegClass; 915 case Hexagon::HvxWRRegClassID: 916 VerifySR(RC, RR.Sub); 917 return &Hexagon::HvxVRRegClass; 918 } 919 return nullptr; 920 } 921 922 // Check if RD could be replaced with RS at any possible use of RD. 923 // For example a predicate register cannot be replaced with a integer 924 // register, but a 64-bit register with a subregister can be replaced 925 // with a 32-bit register. 926 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD, 927 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) { 928 if (!Register::isVirtualRegister(RD.Reg) || 929 !Register::isVirtualRegister(RS.Reg)) 930 return false; 931 // Return false if one (or both) classes are nullptr. 932 auto *DRC = getFinalVRegClass(RD, MRI); 933 if (!DRC) 934 return false; 935 936 return DRC == getFinalVRegClass(RS, MRI); 937 } 938 939 bool HexagonBitSimplify::hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 940 unsigned NewSub) { 941 if (!PreserveTiedOps) 942 return false; 943 return llvm::any_of(MRI.use_operands(Reg), 944 [NewSub] (const MachineOperand &Op) -> bool { 945 return Op.getSubReg() != NewSub && Op.isTied(); 946 }); 947 } 948 949 namespace { 950 951 class DeadCodeElimination { 952 public: 953 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt) 954 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()), 955 MDT(mdt), MRI(mf.getRegInfo()) {} 956 957 bool run() { 958 return runOnNode(MDT.getRootNode()); 959 } 960 961 private: 962 bool isDead(unsigned R) const; 963 bool runOnNode(MachineDomTreeNode *N); 964 965 MachineFunction &MF; 966 const HexagonInstrInfo &HII; 967 MachineDominatorTree &MDT; 968 MachineRegisterInfo &MRI; 969 }; 970 971 } // end anonymous namespace 972 973 bool DeadCodeElimination::isDead(unsigned R) const { 974 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 975 MachineInstr *UseI = I->getParent(); 976 if (UseI->isDebugValue()) 977 continue; 978 if (UseI->isPHI()) { 979 assert(!UseI->getOperand(0).getSubReg()); 980 Register DR = UseI->getOperand(0).getReg(); 981 if (DR == R) 982 continue; 983 } 984 return false; 985 } 986 return true; 987 } 988 989 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) { 990 bool Changed = false; 991 992 for (auto *DTN : children<MachineDomTreeNode*>(N)) 993 Changed |= runOnNode(DTN); 994 995 MachineBasicBlock *B = N->getBlock(); 996 std::vector<MachineInstr*> Instrs; 997 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) 998 Instrs.push_back(&*I); 999 1000 for (auto MI : Instrs) { 1001 unsigned Opc = MI->getOpcode(); 1002 // Do not touch lifetime markers. This is why the target-independent DCE 1003 // cannot be used. 1004 if (Opc == TargetOpcode::LIFETIME_START || 1005 Opc == TargetOpcode::LIFETIME_END) 1006 continue; 1007 bool Store = false; 1008 if (MI->isInlineAsm()) 1009 continue; 1010 // Delete PHIs if possible. 1011 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store)) 1012 continue; 1013 1014 bool AllDead = true; 1015 SmallVector<unsigned,2> Regs; 1016 for (auto &Op : MI->operands()) { 1017 if (!Op.isReg() || !Op.isDef()) 1018 continue; 1019 Register R = Op.getReg(); 1020 if (!Register::isVirtualRegister(R) || !isDead(R)) { 1021 AllDead = false; 1022 break; 1023 } 1024 Regs.push_back(R); 1025 } 1026 if (!AllDead) 1027 continue; 1028 1029 B->erase(MI); 1030 for (unsigned i = 0, n = Regs.size(); i != n; ++i) 1031 MRI.markUsesInDebugValueAsUndef(Regs[i]); 1032 Changed = true; 1033 } 1034 1035 return Changed; 1036 } 1037 1038 namespace { 1039 1040 // Eliminate redundant instructions 1041 // 1042 // This transformation will identify instructions where the output register 1043 // is the same as one of its input registers. This only works on instructions 1044 // that define a single register (unlike post-increment loads, for example). 1045 // The equality check is actually more detailed: the code calculates which 1046 // bits of the output are used, and only compares these bits with the input 1047 // registers. 1048 // If the output matches an input, the instruction is replaced with COPY. 1049 // The copies will be removed by another transformation. 1050 class RedundantInstrElimination : public Transformation { 1051 public: 1052 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii, 1053 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1054 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {} 1055 1056 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1057 1058 private: 1059 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN, 1060 unsigned &LostB, unsigned &LostE); 1061 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN, 1062 unsigned &LostB, unsigned &LostE); 1063 bool computeUsedBits(unsigned Reg, BitVector &Bits); 1064 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits, 1065 uint16_t Begin); 1066 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS); 1067 1068 const HexagonInstrInfo &HII; 1069 const HexagonRegisterInfo &HRI; 1070 MachineRegisterInfo &MRI; 1071 BitTracker &BT; 1072 }; 1073 1074 } // end anonymous namespace 1075 1076 // Check if the instruction is a lossy shift left, where the input being 1077 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1078 // of bit indices that are lost. 1079 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI, 1080 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1081 using namespace Hexagon; 1082 1083 unsigned Opc = MI.getOpcode(); 1084 unsigned ImN, RegN, Width; 1085 switch (Opc) { 1086 case S2_asl_i_p: 1087 ImN = 2; 1088 RegN = 1; 1089 Width = 64; 1090 break; 1091 case S2_asl_i_p_acc: 1092 case S2_asl_i_p_and: 1093 case S2_asl_i_p_nac: 1094 case S2_asl_i_p_or: 1095 case S2_asl_i_p_xacc: 1096 ImN = 3; 1097 RegN = 2; 1098 Width = 64; 1099 break; 1100 case S2_asl_i_r: 1101 ImN = 2; 1102 RegN = 1; 1103 Width = 32; 1104 break; 1105 case S2_addasl_rrri: 1106 case S4_andi_asl_ri: 1107 case S4_ori_asl_ri: 1108 case S4_addi_asl_ri: 1109 case S4_subi_asl_ri: 1110 case S2_asl_i_r_acc: 1111 case S2_asl_i_r_and: 1112 case S2_asl_i_r_nac: 1113 case S2_asl_i_r_or: 1114 case S2_asl_i_r_sat: 1115 case S2_asl_i_r_xacc: 1116 ImN = 3; 1117 RegN = 2; 1118 Width = 32; 1119 break; 1120 default: 1121 return false; 1122 } 1123 1124 if (RegN != OpN) 1125 return false; 1126 1127 assert(MI.getOperand(ImN).isImm()); 1128 unsigned S = MI.getOperand(ImN).getImm(); 1129 if (S == 0) 1130 return false; 1131 LostB = Width-S; 1132 LostE = Width; 1133 return true; 1134 } 1135 1136 // Check if the instruction is a lossy shift right, where the input being 1137 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1138 // of bit indices that are lost. 1139 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI, 1140 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1141 using namespace Hexagon; 1142 1143 unsigned Opc = MI.getOpcode(); 1144 unsigned ImN, RegN; 1145 switch (Opc) { 1146 case S2_asr_i_p: 1147 case S2_lsr_i_p: 1148 ImN = 2; 1149 RegN = 1; 1150 break; 1151 case S2_asr_i_p_acc: 1152 case S2_asr_i_p_and: 1153 case S2_asr_i_p_nac: 1154 case S2_asr_i_p_or: 1155 case S2_lsr_i_p_acc: 1156 case S2_lsr_i_p_and: 1157 case S2_lsr_i_p_nac: 1158 case S2_lsr_i_p_or: 1159 case S2_lsr_i_p_xacc: 1160 ImN = 3; 1161 RegN = 2; 1162 break; 1163 case S2_asr_i_r: 1164 case S2_lsr_i_r: 1165 ImN = 2; 1166 RegN = 1; 1167 break; 1168 case S4_andi_lsr_ri: 1169 case S4_ori_lsr_ri: 1170 case S4_addi_lsr_ri: 1171 case S4_subi_lsr_ri: 1172 case S2_asr_i_r_acc: 1173 case S2_asr_i_r_and: 1174 case S2_asr_i_r_nac: 1175 case S2_asr_i_r_or: 1176 case S2_lsr_i_r_acc: 1177 case S2_lsr_i_r_and: 1178 case S2_lsr_i_r_nac: 1179 case S2_lsr_i_r_or: 1180 case S2_lsr_i_r_xacc: 1181 ImN = 3; 1182 RegN = 2; 1183 break; 1184 1185 default: 1186 return false; 1187 } 1188 1189 if (RegN != OpN) 1190 return false; 1191 1192 assert(MI.getOperand(ImN).isImm()); 1193 unsigned S = MI.getOperand(ImN).getImm(); 1194 LostB = 0; 1195 LostE = S; 1196 return true; 1197 } 1198 1199 // Calculate the bit vector that corresponds to the used bits of register Reg. 1200 // The vector Bits has the same size, as the size of Reg in bits. If the cal- 1201 // culation fails (i.e. the used bits are unknown), it returns false. Other- 1202 // wise, it returns true and sets the corresponding bits in Bits. 1203 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) { 1204 BitVector Used(Bits.size()); 1205 RegisterSet Visited; 1206 std::vector<unsigned> Pending; 1207 Pending.push_back(Reg); 1208 1209 for (unsigned i = 0; i < Pending.size(); ++i) { 1210 unsigned R = Pending[i]; 1211 if (Visited.has(R)) 1212 continue; 1213 Visited.insert(R); 1214 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 1215 BitTracker::RegisterRef UR = *I; 1216 unsigned B, W; 1217 if (!HBS::getSubregMask(UR, B, W, MRI)) 1218 return false; 1219 MachineInstr &UseI = *I->getParent(); 1220 if (UseI.isPHI() || UseI.isCopy()) { 1221 Register DefR = UseI.getOperand(0).getReg(); 1222 if (!Register::isVirtualRegister(DefR)) 1223 return false; 1224 Pending.push_back(DefR); 1225 } else { 1226 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B)) 1227 return false; 1228 } 1229 } 1230 } 1231 Bits |= Used; 1232 return true; 1233 } 1234 1235 // Calculate the bits used by instruction MI in a register in operand OpN. 1236 // Return true/false if the calculation succeeds/fails. If is succeeds, set 1237 // used bits in Bits. This function does not reset any bits in Bits, so 1238 // subsequent calls over different instructions will result in the union 1239 // of the used bits in all these instructions. 1240 // The register in question may be used with a sub-register, whereas Bits 1241 // holds the bits for the entire register. To keep track of that, the 1242 // argument Begin indicates where in Bits is the lowest-significant bit 1243 // of the register used in operand OpN. For example, in instruction: 1244 // %1 = S2_lsr_i_r %2:isub_hi, 10 1245 // the operand 1 is a 32-bit register, which happens to be a subregister 1246 // of the 64-bit register %2, and that subregister starts at position 32. 1247 // In this case Begin=32, since Bits[32] would be the lowest-significant bit 1248 // of %2:isub_hi. 1249 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI, 1250 unsigned OpN, BitVector &Bits, uint16_t Begin) { 1251 unsigned Opc = MI.getOpcode(); 1252 BitVector T(Bits.size()); 1253 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII); 1254 // Even if we don't have bits yet, we could still provide some information 1255 // if the instruction is a lossy shift: the lost bits will be marked as 1256 // not used. 1257 unsigned LB, LE; 1258 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) { 1259 assert(MI.getOperand(OpN).isReg()); 1260 BitTracker::RegisterRef RR = MI.getOperand(OpN); 1261 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI); 1262 uint16_t Width = HRI.getRegSizeInBits(*RC); 1263 1264 if (!GotBits) 1265 T.set(Begin, Begin+Width); 1266 assert(LB <= LE && LB < Width && LE <= Width); 1267 T.reset(Begin+LB, Begin+LE); 1268 GotBits = true; 1269 } 1270 if (GotBits) 1271 Bits |= T; 1272 return GotBits; 1273 } 1274 1275 // Calculates the used bits in RD ("defined register"), and checks if these 1276 // bits in RS ("used register") and RD are identical. 1277 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD, 1278 BitTracker::RegisterRef RS) { 1279 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1280 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1281 1282 unsigned DB, DW; 1283 if (!HBS::getSubregMask(RD, DB, DW, MRI)) 1284 return false; 1285 unsigned SB, SW; 1286 if (!HBS::getSubregMask(RS, SB, SW, MRI)) 1287 return false; 1288 if (SW != DW) 1289 return false; 1290 1291 BitVector Used(DC.width()); 1292 if (!computeUsedBits(RD.Reg, Used)) 1293 return false; 1294 1295 for (unsigned i = 0; i != DW; ++i) 1296 if (Used[i+DB] && DC[DB+i] != SC[SB+i]) 1297 return false; 1298 return true; 1299 } 1300 1301 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B, 1302 const RegisterSet&) { 1303 if (!BT.reached(&B)) 1304 return false; 1305 bool Changed = false; 1306 1307 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) { 1308 NextI = std::next(I); 1309 MachineInstr *MI = &*I; 1310 1311 if (MI->getOpcode() == TargetOpcode::COPY) 1312 continue; 1313 if (MI->isPHI() || MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 1314 continue; 1315 unsigned NumD = MI->getDesc().getNumDefs(); 1316 if (NumD != 1) 1317 continue; 1318 1319 BitTracker::RegisterRef RD = MI->getOperand(0); 1320 if (!BT.has(RD.Reg)) 1321 continue; 1322 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1323 auto At = MachineBasicBlock::iterator(MI); 1324 1325 // Find a source operand that is equal to the result. 1326 for (auto &Op : MI->uses()) { 1327 if (!Op.isReg()) 1328 continue; 1329 BitTracker::RegisterRef RS = Op; 1330 if (!BT.has(RS.Reg)) 1331 continue; 1332 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1333 continue; 1334 1335 unsigned BN, BW; 1336 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 1337 continue; 1338 1339 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1340 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW)) 1341 continue; 1342 1343 // If found, replace the instruction with a COPY. 1344 const DebugLoc &DL = MI->getDebugLoc(); 1345 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 1346 Register NewR = MRI.createVirtualRegister(FRC); 1347 MachineInstr *CopyI = 1348 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1349 .addReg(RS.Reg, 0, RS.Sub); 1350 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1351 // This pass can create copies between registers that don't have the 1352 // exact same values. Updating the tracker has to involve updating 1353 // all dependent cells. Example: 1354 // %1 = inst %2 ; %1 != %2, but used bits are equal 1355 // 1356 // %3 = copy %2 ; <- inserted 1357 // ... = %3 ; <- replaced from %2 1358 // Indirectly, we can create a "copy" between %1 and %2 even 1359 // though their exact values do not match. 1360 BT.visit(*CopyI); 1361 Changed = true; 1362 break; 1363 } 1364 } 1365 1366 return Changed; 1367 } 1368 1369 namespace { 1370 1371 // Recognize instructions that produce constant values known at compile-time. 1372 // Replace them with register definitions that load these constants directly. 1373 class ConstGeneration : public Transformation { 1374 public: 1375 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1376 MachineRegisterInfo &mri) 1377 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1378 1379 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1380 static bool isTfrConst(const MachineInstr &MI); 1381 1382 private: 1383 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C, 1384 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL); 1385 1386 const HexagonInstrInfo &HII; 1387 MachineRegisterInfo &MRI; 1388 BitTracker &BT; 1389 }; 1390 1391 } // end anonymous namespace 1392 1393 bool ConstGeneration::isTfrConst(const MachineInstr &MI) { 1394 unsigned Opc = MI.getOpcode(); 1395 switch (Opc) { 1396 case Hexagon::A2_combineii: 1397 case Hexagon::A4_combineii: 1398 case Hexagon::A2_tfrsi: 1399 case Hexagon::A2_tfrpi: 1400 case Hexagon::PS_true: 1401 case Hexagon::PS_false: 1402 case Hexagon::CONST32: 1403 case Hexagon::CONST64: 1404 return true; 1405 } 1406 return false; 1407 } 1408 1409 // Generate a transfer-immediate instruction that is appropriate for the 1410 // register class and the actual value being transferred. 1411 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C, 1412 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) { 1413 Register Reg = MRI.createVirtualRegister(RC); 1414 if (RC == &Hexagon::IntRegsRegClass) { 1415 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg) 1416 .addImm(int32_t(C)); 1417 return Reg; 1418 } 1419 1420 if (RC == &Hexagon::DoubleRegsRegClass) { 1421 if (isInt<8>(C)) { 1422 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg) 1423 .addImm(C); 1424 return Reg; 1425 } 1426 1427 unsigned Lo = Lo_32(C), Hi = Hi_32(C); 1428 if (isInt<8>(Lo) || isInt<8>(Hi)) { 1429 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii 1430 : Hexagon::A4_combineii; 1431 BuildMI(B, At, DL, HII.get(Opc), Reg) 1432 .addImm(int32_t(Hi)) 1433 .addImm(int32_t(Lo)); 1434 return Reg; 1435 } 1436 MachineFunction *MF = B.getParent(); 1437 auto &HST = MF->getSubtarget<HexagonSubtarget>(); 1438 1439 // Disable CONST64 for tiny core since it takes a LD resource. 1440 if (!HST.isTinyCore() || 1441 MF->getFunction().hasOptSize()) { 1442 BuildMI(B, At, DL, HII.get(Hexagon::CONST64), Reg) 1443 .addImm(C); 1444 return Reg; 1445 } 1446 } 1447 1448 if (RC == &Hexagon::PredRegsRegClass) { 1449 unsigned Opc; 1450 if (C == 0) 1451 Opc = Hexagon::PS_false; 1452 else if ((C & 0xFF) == 0xFF) 1453 Opc = Hexagon::PS_true; 1454 else 1455 return 0; 1456 BuildMI(B, At, DL, HII.get(Opc), Reg); 1457 return Reg; 1458 } 1459 1460 return 0; 1461 } 1462 1463 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1464 if (!BT.reached(&B)) 1465 return false; 1466 bool Changed = false; 1467 RegisterSet Defs; 1468 1469 for (auto I = B.begin(), E = B.end(); I != E; ++I) { 1470 if (isTfrConst(*I)) 1471 continue; 1472 Defs.clear(); 1473 HBS::getInstrDefs(*I, Defs); 1474 if (Defs.count() != 1) 1475 continue; 1476 unsigned DR = Defs.find_first(); 1477 if (!Register::isVirtualRegister(DR)) 1478 continue; 1479 uint64_t U; 1480 const BitTracker::RegisterCell &DRC = BT.lookup(DR); 1481 if (HBS::getConst(DRC, 0, DRC.width(), U)) { 1482 int64_t C = U; 1483 DebugLoc DL = I->getDebugLoc(); 1484 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1485 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL); 1486 if (ImmReg) { 1487 HBS::replaceReg(DR, ImmReg, MRI); 1488 BT.put(ImmReg, DRC); 1489 Changed = true; 1490 } 1491 } 1492 } 1493 return Changed; 1494 } 1495 1496 namespace { 1497 1498 // Identify pairs of available registers which hold identical values. 1499 // In such cases, only one of them needs to be calculated, the other one 1500 // will be defined as a copy of the first. 1501 class CopyGeneration : public Transformation { 1502 public: 1503 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1504 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1505 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {} 1506 1507 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1508 1509 private: 1510 bool findMatch(const BitTracker::RegisterRef &Inp, 1511 BitTracker::RegisterRef &Out, const RegisterSet &AVs); 1512 1513 const HexagonInstrInfo &HII; 1514 const HexagonRegisterInfo &HRI; 1515 MachineRegisterInfo &MRI; 1516 BitTracker &BT; 1517 RegisterSet Forbidden; 1518 }; 1519 1520 // Eliminate register copies RD = RS, by replacing the uses of RD with 1521 // with uses of RS. 1522 class CopyPropagation : public Transformation { 1523 public: 1524 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1525 : Transformation(false), HRI(hri), MRI(mri) {} 1526 1527 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1528 1529 static bool isCopyReg(unsigned Opc, bool NoConv); 1530 1531 private: 1532 bool propagateRegCopy(MachineInstr &MI); 1533 1534 const HexagonRegisterInfo &HRI; 1535 MachineRegisterInfo &MRI; 1536 }; 1537 1538 } // end anonymous namespace 1539 1540 /// Check if there is a register in AVs that is identical to Inp. If so, 1541 /// set Out to the found register. The output may be a pair Reg:Sub. 1542 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp, 1543 BitTracker::RegisterRef &Out, const RegisterSet &AVs) { 1544 if (!BT.has(Inp.Reg)) 1545 return false; 1546 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg); 1547 auto *FRC = HBS::getFinalVRegClass(Inp, MRI); 1548 unsigned B, W; 1549 if (!HBS::getSubregMask(Inp, B, W, MRI)) 1550 return false; 1551 1552 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) { 1553 if (!BT.has(R) || Forbidden[R]) 1554 continue; 1555 const BitTracker::RegisterCell &RC = BT.lookup(R); 1556 unsigned RW = RC.width(); 1557 if (W == RW) { 1558 if (FRC != MRI.getRegClass(R)) 1559 continue; 1560 if (!HBS::isTransparentCopy(R, Inp, MRI)) 1561 continue; 1562 if (!HBS::isEqual(InpRC, B, RC, 0, W)) 1563 continue; 1564 Out.Reg = R; 1565 Out.Sub = 0; 1566 return true; 1567 } 1568 // Check if there is a super-register, whose part (with a subregister) 1569 // is equal to the input. 1570 // Only do double registers for now. 1571 if (W*2 != RW) 1572 continue; 1573 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass) 1574 continue; 1575 1576 if (HBS::isEqual(InpRC, B, RC, 0, W)) 1577 Out.Sub = Hexagon::isub_lo; 1578 else if (HBS::isEqual(InpRC, B, RC, W, W)) 1579 Out.Sub = Hexagon::isub_hi; 1580 else 1581 continue; 1582 Out.Reg = R; 1583 if (HBS::isTransparentCopy(Out, Inp, MRI)) 1584 return true; 1585 } 1586 return false; 1587 } 1588 1589 bool CopyGeneration::processBlock(MachineBasicBlock &B, 1590 const RegisterSet &AVs) { 1591 if (!BT.reached(&B)) 1592 return false; 1593 RegisterSet AVB(AVs); 1594 bool Changed = false; 1595 RegisterSet Defs; 1596 1597 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; 1598 ++I, AVB.insert(Defs)) { 1599 NextI = std::next(I); 1600 Defs.clear(); 1601 HBS::getInstrDefs(*I, Defs); 1602 1603 unsigned Opc = I->getOpcode(); 1604 if (CopyPropagation::isCopyReg(Opc, false) || 1605 ConstGeneration::isTfrConst(*I)) 1606 continue; 1607 1608 DebugLoc DL = I->getDebugLoc(); 1609 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1610 1611 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) { 1612 BitTracker::RegisterRef MR; 1613 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1614 1615 if (findMatch(R, MR, AVB)) { 1616 Register NewR = MRI.createVirtualRegister(FRC); 1617 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1618 .addReg(MR.Reg, 0, MR.Sub); 1619 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR)); 1620 HBS::replaceReg(R, NewR, MRI); 1621 Forbidden.insert(R); 1622 continue; 1623 } 1624 1625 if (FRC == &Hexagon::DoubleRegsRegClass || 1626 FRC == &Hexagon::HvxWRRegClass) { 1627 // Try to generate REG_SEQUENCE. 1628 unsigned SubLo = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_lo); 1629 unsigned SubHi = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_hi); 1630 BitTracker::RegisterRef TL = { R, SubLo }; 1631 BitTracker::RegisterRef TH = { R, SubHi }; 1632 BitTracker::RegisterRef ML, MH; 1633 if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) { 1634 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1635 Register NewR = MRI.createVirtualRegister(FRC); 1636 BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR) 1637 .addReg(ML.Reg, 0, ML.Sub) 1638 .addImm(SubLo) 1639 .addReg(MH.Reg, 0, MH.Sub) 1640 .addImm(SubHi); 1641 BT.put(BitTracker::RegisterRef(NewR), BT.get(R)); 1642 HBS::replaceReg(R, NewR, MRI); 1643 Forbidden.insert(R); 1644 } 1645 } 1646 } 1647 } 1648 1649 return Changed; 1650 } 1651 1652 bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) { 1653 switch (Opc) { 1654 case TargetOpcode::COPY: 1655 case TargetOpcode::REG_SEQUENCE: 1656 case Hexagon::A4_combineir: 1657 case Hexagon::A4_combineri: 1658 return true; 1659 case Hexagon::A2_tfr: 1660 case Hexagon::A2_tfrp: 1661 case Hexagon::A2_combinew: 1662 case Hexagon::V6_vcombine: 1663 return NoConv; 1664 default: 1665 break; 1666 } 1667 return false; 1668 } 1669 1670 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) { 1671 bool Changed = false; 1672 unsigned Opc = MI.getOpcode(); 1673 BitTracker::RegisterRef RD = MI.getOperand(0); 1674 assert(MI.getOperand(0).getSubReg() == 0); 1675 1676 switch (Opc) { 1677 case TargetOpcode::COPY: 1678 case Hexagon::A2_tfr: 1679 case Hexagon::A2_tfrp: { 1680 BitTracker::RegisterRef RS = MI.getOperand(1); 1681 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1682 break; 1683 if (RS.Sub != 0) 1684 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI); 1685 else 1686 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI); 1687 break; 1688 } 1689 case TargetOpcode::REG_SEQUENCE: { 1690 BitTracker::RegisterRef SL, SH; 1691 if (HBS::parseRegSequence(MI, SL, SH, MRI)) { 1692 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg); 1693 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1694 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1695 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, SL.Reg, SL.Sub, MRI); 1696 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, SH.Reg, SH.Sub, MRI); 1697 } 1698 break; 1699 } 1700 case Hexagon::A2_combinew: 1701 case Hexagon::V6_vcombine: { 1702 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg); 1703 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1704 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1705 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2); 1706 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, RL.Reg, RL.Sub, MRI); 1707 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, RH.Reg, RH.Sub, MRI); 1708 break; 1709 } 1710 case Hexagon::A4_combineir: 1711 case Hexagon::A4_combineri: { 1712 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1; 1713 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::isub_lo 1714 : Hexagon::isub_hi; 1715 BitTracker::RegisterRef RS = MI.getOperand(SrcX); 1716 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI); 1717 break; 1718 } 1719 } 1720 return Changed; 1721 } 1722 1723 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1724 std::vector<MachineInstr*> Instrs; 1725 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I) 1726 Instrs.push_back(&*I); 1727 1728 bool Changed = false; 1729 for (auto I : Instrs) { 1730 unsigned Opc = I->getOpcode(); 1731 if (!CopyPropagation::isCopyReg(Opc, true)) 1732 continue; 1733 Changed |= propagateRegCopy(*I); 1734 } 1735 1736 return Changed; 1737 } 1738 1739 namespace { 1740 1741 // Recognize patterns that can be simplified and replace them with the 1742 // simpler forms. 1743 // This is by no means complete 1744 class BitSimplification : public Transformation { 1745 public: 1746 BitSimplification(BitTracker &bt, const MachineDominatorTree &mdt, 1747 const HexagonInstrInfo &hii, const HexagonRegisterInfo &hri, 1748 MachineRegisterInfo &mri, MachineFunction &mf) 1749 : Transformation(true), MDT(mdt), HII(hii), HRI(hri), MRI(mri), 1750 MF(mf), BT(bt) {} 1751 1752 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1753 1754 private: 1755 struct RegHalf : public BitTracker::RegisterRef { 1756 bool Low; // Low/High halfword. 1757 }; 1758 1759 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC, 1760 unsigned B, RegHalf &RH); 1761 bool validateReg(BitTracker::RegisterRef R, unsigned Opc, unsigned OpNum); 1762 1763 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC, 1764 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt); 1765 unsigned getCombineOpcode(bool HLow, bool LLow); 1766 1767 bool genStoreUpperHalf(MachineInstr *MI); 1768 bool genStoreImmediate(MachineInstr *MI); 1769 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD, 1770 const BitTracker::RegisterCell &RC); 1771 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1772 const BitTracker::RegisterCell &RC); 1773 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1774 const BitTracker::RegisterCell &RC); 1775 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1776 const BitTracker::RegisterCell &RC); 1777 bool genBitSplit(MachineInstr *MI, BitTracker::RegisterRef RD, 1778 const BitTracker::RegisterCell &RC, const RegisterSet &AVs); 1779 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD, 1780 const BitTracker::RegisterCell &RC); 1781 bool simplifyExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1782 const BitTracker::RegisterCell &RC, const RegisterSet &AVs); 1783 bool simplifyRCmp0(MachineInstr *MI, BitTracker::RegisterRef RD); 1784 1785 // Cache of created instructions to avoid creating duplicates. 1786 // XXX Currently only used by genBitSplit. 1787 std::vector<MachineInstr*> NewMIs; 1788 1789 const MachineDominatorTree &MDT; 1790 const HexagonInstrInfo &HII; 1791 const HexagonRegisterInfo &HRI; 1792 MachineRegisterInfo &MRI; 1793 MachineFunction &MF; 1794 BitTracker &BT; 1795 }; 1796 1797 } // end anonymous namespace 1798 1799 // Check if the bits [B..B+16) in register cell RC form a valid halfword, 1800 // i.e. [0..16), [16..32), etc. of some register. If so, return true and 1801 // set the information about the found register in RH. 1802 bool BitSimplification::matchHalf(unsigned SelfR, 1803 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) { 1804 // XXX This could be searching in the set of available registers, in case 1805 // the match is not exact. 1806 1807 // Match 16-bit chunks, where the RC[B..B+15] references exactly one 1808 // register and all the bits B..B+15 match between RC and the register. 1809 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... }, 1810 // and RC = { [0]:0 [1-15]:v1[1-15]... }. 1811 bool Low = false; 1812 unsigned I = B; 1813 while (I < B+16 && RC[I].num()) 1814 I++; 1815 if (I == B+16) 1816 return false; 1817 1818 unsigned Reg = RC[I].RefI.Reg; 1819 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B. 1820 if (P < I-B) 1821 return false; 1822 unsigned Pos = P - (I-B); 1823 1824 if (Reg == 0 || Reg == SelfR) // Don't match "self". 1825 return false; 1826 if (!Register::isVirtualRegister(Reg)) 1827 return false; 1828 if (!BT.has(Reg)) 1829 return false; 1830 1831 const BitTracker::RegisterCell &SC = BT.lookup(Reg); 1832 if (Pos+16 > SC.width()) 1833 return false; 1834 1835 for (unsigned i = 0; i < 16; ++i) { 1836 const BitTracker::BitValue &RV = RC[i+B]; 1837 if (RV.Type == BitTracker::BitValue::Ref) { 1838 if (RV.RefI.Reg != Reg) 1839 return false; 1840 if (RV.RefI.Pos != i+Pos) 1841 return false; 1842 continue; 1843 } 1844 if (RC[i+B] != SC[i+Pos]) 1845 return false; 1846 } 1847 1848 unsigned Sub = 0; 1849 switch (Pos) { 1850 case 0: 1851 Sub = Hexagon::isub_lo; 1852 Low = true; 1853 break; 1854 case 16: 1855 Sub = Hexagon::isub_lo; 1856 Low = false; 1857 break; 1858 case 32: 1859 Sub = Hexagon::isub_hi; 1860 Low = true; 1861 break; 1862 case 48: 1863 Sub = Hexagon::isub_hi; 1864 Low = false; 1865 break; 1866 default: 1867 return false; 1868 } 1869 1870 RH.Reg = Reg; 1871 RH.Sub = Sub; 1872 RH.Low = Low; 1873 // If the subregister is not valid with the register, set it to 0. 1874 if (!HBS::getFinalVRegClass(RH, MRI)) 1875 RH.Sub = 0; 1876 1877 return true; 1878 } 1879 1880 bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc, 1881 unsigned OpNum) { 1882 auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI, MF); 1883 auto *RRC = HBS::getFinalVRegClass(R, MRI); 1884 return OpRC->hasSubClassEq(RRC); 1885 } 1886 1887 // Check if RC matches the pattern of a S2_packhl. If so, return true and 1888 // set the inputs Rs and Rt. 1889 bool BitSimplification::matchPackhl(unsigned SelfR, 1890 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs, 1891 BitTracker::RegisterRef &Rt) { 1892 RegHalf L1, H1, L2, H2; 1893 1894 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1)) 1895 return false; 1896 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1)) 1897 return false; 1898 1899 // Rs = H1.L1, Rt = H2.L2 1900 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low) 1901 return false; 1902 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low) 1903 return false; 1904 1905 Rs = H1; 1906 Rt = H2; 1907 return true; 1908 } 1909 1910 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) { 1911 return HLow ? LLow ? Hexagon::A2_combine_ll 1912 : Hexagon::A2_combine_lh 1913 : LLow ? Hexagon::A2_combine_hl 1914 : Hexagon::A2_combine_hh; 1915 } 1916 1917 // If MI stores the upper halfword of a register (potentially obtained via 1918 // shifts or extracts), replace it with a storerf instruction. This could 1919 // cause the "extraction" code to become dead. 1920 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) { 1921 unsigned Opc = MI->getOpcode(); 1922 if (Opc != Hexagon::S2_storerh_io) 1923 return false; 1924 1925 MachineOperand &ValOp = MI->getOperand(2); 1926 BitTracker::RegisterRef RS = ValOp; 1927 if (!BT.has(RS.Reg)) 1928 return false; 1929 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1930 RegHalf H; 1931 if (!matchHalf(0, RC, 0, H)) 1932 return false; 1933 if (H.Low) 1934 return false; 1935 MI->setDesc(HII.get(Hexagon::S2_storerf_io)); 1936 ValOp.setReg(H.Reg); 1937 ValOp.setSubReg(H.Sub); 1938 return true; 1939 } 1940 1941 // If MI stores a value known at compile-time, and the value is within a range 1942 // that avoids using constant-extenders, replace it with a store-immediate. 1943 bool BitSimplification::genStoreImmediate(MachineInstr *MI) { 1944 unsigned Opc = MI->getOpcode(); 1945 unsigned Align = 0; 1946 switch (Opc) { 1947 case Hexagon::S2_storeri_io: 1948 Align++; 1949 LLVM_FALLTHROUGH; 1950 case Hexagon::S2_storerh_io: 1951 Align++; 1952 LLVM_FALLTHROUGH; 1953 case Hexagon::S2_storerb_io: 1954 break; 1955 default: 1956 return false; 1957 } 1958 1959 // Avoid stores to frame-indices (due to an unknown offset). 1960 if (!MI->getOperand(0).isReg()) 1961 return false; 1962 MachineOperand &OffOp = MI->getOperand(1); 1963 if (!OffOp.isImm()) 1964 return false; 1965 1966 int64_t Off = OffOp.getImm(); 1967 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x). 1968 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1))) 1969 return false; 1970 // Source register: 1971 BitTracker::RegisterRef RS = MI->getOperand(2); 1972 if (!BT.has(RS.Reg)) 1973 return false; 1974 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1975 uint64_t U; 1976 if (!HBS::getConst(RC, 0, RC.width(), U)) 1977 return false; 1978 1979 // Only consider 8-bit values to avoid constant-extenders. 1980 int V; 1981 switch (Opc) { 1982 case Hexagon::S2_storerb_io: 1983 V = int8_t(U); 1984 break; 1985 case Hexagon::S2_storerh_io: 1986 V = int16_t(U); 1987 break; 1988 case Hexagon::S2_storeri_io: 1989 V = int32_t(U); 1990 break; 1991 default: 1992 // Opc is already checked above to be one of the three store instructions. 1993 // This silences a -Wuninitialized false positive on GCC 5.4. 1994 llvm_unreachable("Unexpected store opcode"); 1995 } 1996 if (!isInt<8>(V)) 1997 return false; 1998 1999 MI->RemoveOperand(2); 2000 switch (Opc) { 2001 case Hexagon::S2_storerb_io: 2002 MI->setDesc(HII.get(Hexagon::S4_storeirb_io)); 2003 break; 2004 case Hexagon::S2_storerh_io: 2005 MI->setDesc(HII.get(Hexagon::S4_storeirh_io)); 2006 break; 2007 case Hexagon::S2_storeri_io: 2008 MI->setDesc(HII.get(Hexagon::S4_storeiri_io)); 2009 break; 2010 } 2011 MI->addOperand(MachineOperand::CreateImm(V)); 2012 return true; 2013 } 2014 2015 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the 2016 // last instruction in a sequence that results in something equivalent to 2017 // the pack-halfwords. The intent is to cause the entire sequence to become 2018 // dead. 2019 bool BitSimplification::genPackhl(MachineInstr *MI, 2020 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2021 unsigned Opc = MI->getOpcode(); 2022 if (Opc == Hexagon::S2_packhl) 2023 return false; 2024 BitTracker::RegisterRef Rs, Rt; 2025 if (!matchPackhl(RD.Reg, RC, Rs, Rt)) 2026 return false; 2027 if (!validateReg(Rs, Hexagon::S2_packhl, 1) || 2028 !validateReg(Rt, Hexagon::S2_packhl, 2)) 2029 return false; 2030 2031 MachineBasicBlock &B = *MI->getParent(); 2032 Register NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 2033 DebugLoc DL = MI->getDebugLoc(); 2034 auto At = MI->isPHI() ? B.getFirstNonPHI() 2035 : MachineBasicBlock::iterator(MI); 2036 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR) 2037 .addReg(Rs.Reg, 0, Rs.Sub) 2038 .addReg(Rt.Reg, 0, Rt.Sub); 2039 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2040 BT.put(BitTracker::RegisterRef(NewR), RC); 2041 return true; 2042 } 2043 2044 // If MI produces halfword of the input in the low half of the output, 2045 // replace it with zero-extend or extractu. 2046 bool BitSimplification::genExtractHalf(MachineInstr *MI, 2047 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2048 RegHalf L; 2049 // Check for halfword in low 16 bits, zeros elsewhere. 2050 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16)) 2051 return false; 2052 2053 unsigned Opc = MI->getOpcode(); 2054 MachineBasicBlock &B = *MI->getParent(); 2055 DebugLoc DL = MI->getDebugLoc(); 2056 2057 // Prefer zxth, since zxth can go in any slot, while extractu only in 2058 // slots 2 and 3. 2059 unsigned NewR = 0; 2060 auto At = MI->isPHI() ? B.getFirstNonPHI() 2061 : MachineBasicBlock::iterator(MI); 2062 if (L.Low && Opc != Hexagon::A2_zxth) { 2063 if (validateReg(L, Hexagon::A2_zxth, 1)) { 2064 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2065 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR) 2066 .addReg(L.Reg, 0, L.Sub); 2067 } 2068 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) { 2069 if (validateReg(L, Hexagon::S2_lsr_i_r, 1)) { 2070 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2071 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR) 2072 .addReg(L.Reg, 0, L.Sub) 2073 .addImm(16); 2074 } 2075 } 2076 if (NewR == 0) 2077 return false; 2078 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2079 BT.put(BitTracker::RegisterRef(NewR), RC); 2080 return true; 2081 } 2082 2083 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the 2084 // combine. 2085 bool BitSimplification::genCombineHalf(MachineInstr *MI, 2086 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2087 RegHalf L, H; 2088 // Check for combine h/l 2089 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H)) 2090 return false; 2091 // Do nothing if this is just a reg copy. 2092 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low) 2093 return false; 2094 2095 unsigned Opc = MI->getOpcode(); 2096 unsigned COpc = getCombineOpcode(H.Low, L.Low); 2097 if (COpc == Opc) 2098 return false; 2099 if (!validateReg(H, COpc, 1) || !validateReg(L, COpc, 2)) 2100 return false; 2101 2102 MachineBasicBlock &B = *MI->getParent(); 2103 DebugLoc DL = MI->getDebugLoc(); 2104 Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2105 auto At = MI->isPHI() ? B.getFirstNonPHI() 2106 : MachineBasicBlock::iterator(MI); 2107 BuildMI(B, At, DL, HII.get(COpc), NewR) 2108 .addReg(H.Reg, 0, H.Sub) 2109 .addReg(L.Reg, 0, L.Sub); 2110 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2111 BT.put(BitTracker::RegisterRef(NewR), RC); 2112 return true; 2113 } 2114 2115 // If MI resets high bits of a register and keeps the lower ones, replace it 2116 // with zero-extend byte/half, and-immediate, or extractu, as appropriate. 2117 bool BitSimplification::genExtractLow(MachineInstr *MI, 2118 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2119 unsigned Opc = MI->getOpcode(); 2120 switch (Opc) { 2121 case Hexagon::A2_zxtb: 2122 case Hexagon::A2_zxth: 2123 case Hexagon::S2_extractu: 2124 return false; 2125 } 2126 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) { 2127 int32_t Imm = MI->getOperand(2).getImm(); 2128 if (isInt<10>(Imm)) 2129 return false; 2130 } 2131 2132 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 2133 return false; 2134 unsigned W = RC.width(); 2135 while (W > 0 && RC[W-1].is(0)) 2136 W--; 2137 if (W == 0 || W == RC.width()) 2138 return false; 2139 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb 2140 : (W == 16) ? Hexagon::A2_zxth 2141 : (W < 10) ? Hexagon::A2_andir 2142 : Hexagon::S2_extractu; 2143 MachineBasicBlock &B = *MI->getParent(); 2144 DebugLoc DL = MI->getDebugLoc(); 2145 2146 for (auto &Op : MI->uses()) { 2147 if (!Op.isReg()) 2148 continue; 2149 BitTracker::RegisterRef RS = Op; 2150 if (!BT.has(RS.Reg)) 2151 continue; 2152 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2153 unsigned BN, BW; 2154 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 2155 continue; 2156 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W)) 2157 continue; 2158 if (!validateReg(RS, NewOpc, 1)) 2159 continue; 2160 2161 Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2162 auto At = MI->isPHI() ? B.getFirstNonPHI() 2163 : MachineBasicBlock::iterator(MI); 2164 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR) 2165 .addReg(RS.Reg, 0, RS.Sub); 2166 if (NewOpc == Hexagon::A2_andir) 2167 MIB.addImm((1 << W) - 1); 2168 else if (NewOpc == Hexagon::S2_extractu) 2169 MIB.addImm(W).addImm(0); 2170 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2171 BT.put(BitTracker::RegisterRef(NewR), RC); 2172 return true; 2173 } 2174 return false; 2175 } 2176 2177 bool BitSimplification::genBitSplit(MachineInstr *MI, 2178 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC, 2179 const RegisterSet &AVs) { 2180 if (!GenBitSplit) 2181 return false; 2182 if (MaxBitSplit.getNumOccurrences()) { 2183 if (CountBitSplit >= MaxBitSplit) 2184 return false; 2185 } 2186 2187 unsigned Opc = MI->getOpcode(); 2188 switch (Opc) { 2189 case Hexagon::A4_bitsplit: 2190 case Hexagon::A4_bitspliti: 2191 return false; 2192 } 2193 2194 unsigned W = RC.width(); 2195 if (W != 32) 2196 return false; 2197 2198 auto ctlz = [] (const BitTracker::RegisterCell &C) -> unsigned { 2199 unsigned Z = C.width(); 2200 while (Z > 0 && C[Z-1].is(0)) 2201 --Z; 2202 return C.width() - Z; 2203 }; 2204 2205 // Count the number of leading zeros in the target RC. 2206 unsigned Z = ctlz(RC); 2207 if (Z == 0 || Z == W) 2208 return false; 2209 2210 // A simplistic analysis: assume the source register (the one being split) 2211 // is fully unknown, and that all its bits are self-references. 2212 const BitTracker::BitValue &B0 = RC[0]; 2213 if (B0.Type != BitTracker::BitValue::Ref) 2214 return false; 2215 2216 unsigned SrcR = B0.RefI.Reg; 2217 unsigned SrcSR = 0; 2218 unsigned Pos = B0.RefI.Pos; 2219 2220 // All the non-zero bits should be consecutive bits from the same register. 2221 for (unsigned i = 1; i < W-Z; ++i) { 2222 const BitTracker::BitValue &V = RC[i]; 2223 if (V.Type != BitTracker::BitValue::Ref) 2224 return false; 2225 if (V.RefI.Reg != SrcR || V.RefI.Pos != Pos+i) 2226 return false; 2227 } 2228 2229 // Now, find the other bitfield among AVs. 2230 for (unsigned S = AVs.find_first(); S; S = AVs.find_next(S)) { 2231 // The number of leading zeros here should be the number of trailing 2232 // non-zeros in RC. 2233 unsigned SRC = MRI.getRegClass(S)->getID(); 2234 if (SRC != Hexagon::IntRegsRegClassID && 2235 SRC != Hexagon::DoubleRegsRegClassID) 2236 continue; 2237 if (!BT.has(S)) 2238 continue; 2239 const BitTracker::RegisterCell &SC = BT.lookup(S); 2240 if (SC.width() != W || ctlz(SC) != W-Z) 2241 continue; 2242 // The Z lower bits should now match SrcR. 2243 const BitTracker::BitValue &S0 = SC[0]; 2244 if (S0.Type != BitTracker::BitValue::Ref || S0.RefI.Reg != SrcR) 2245 continue; 2246 unsigned P = S0.RefI.Pos; 2247 2248 if (Pos <= P && (Pos + W-Z) != P) 2249 continue; 2250 if (P < Pos && (P + Z) != Pos) 2251 continue; 2252 // The starting bitfield position must be at a subregister boundary. 2253 if (std::min(P, Pos) != 0 && std::min(P, Pos) != 32) 2254 continue; 2255 2256 unsigned I; 2257 for (I = 1; I < Z; ++I) { 2258 const BitTracker::BitValue &V = SC[I]; 2259 if (V.Type != BitTracker::BitValue::Ref) 2260 break; 2261 if (V.RefI.Reg != SrcR || V.RefI.Pos != P+I) 2262 break; 2263 } 2264 if (I != Z) 2265 continue; 2266 2267 // Generate bitsplit where S is defined. 2268 if (MaxBitSplit.getNumOccurrences()) 2269 CountBitSplit++; 2270 MachineInstr *DefS = MRI.getVRegDef(S); 2271 assert(DefS != nullptr); 2272 DebugLoc DL = DefS->getDebugLoc(); 2273 MachineBasicBlock &B = *DefS->getParent(); 2274 auto At = DefS->isPHI() ? B.getFirstNonPHI() 2275 : MachineBasicBlock::iterator(DefS); 2276 if (MRI.getRegClass(SrcR)->getID() == Hexagon::DoubleRegsRegClassID) 2277 SrcSR = (std::min(Pos, P) == 32) ? Hexagon::isub_hi : Hexagon::isub_lo; 2278 if (!validateReg({SrcR,SrcSR}, Hexagon::A4_bitspliti, 1)) 2279 continue; 2280 unsigned ImmOp = Pos <= P ? W-Z : Z; 2281 2282 // Find an existing bitsplit instruction if one already exists. 2283 unsigned NewR = 0; 2284 for (MachineInstr *In : NewMIs) { 2285 if (In->getOpcode() != Hexagon::A4_bitspliti) 2286 continue; 2287 MachineOperand &Op1 = In->getOperand(1); 2288 if (Op1.getReg() != SrcR || Op1.getSubReg() != SrcSR) 2289 continue; 2290 if (In->getOperand(2).getImm() != ImmOp) 2291 continue; 2292 // Check if the target register is available here. 2293 MachineOperand &Op0 = In->getOperand(0); 2294 MachineInstr *DefI = MRI.getVRegDef(Op0.getReg()); 2295 assert(DefI != nullptr); 2296 if (!MDT.dominates(DefI, &*At)) 2297 continue; 2298 2299 // Found one that can be reused. 2300 assert(Op0.getSubReg() == 0); 2301 NewR = Op0.getReg(); 2302 break; 2303 } 2304 if (!NewR) { 2305 NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 2306 auto NewBS = BuildMI(B, At, DL, HII.get(Hexagon::A4_bitspliti), NewR) 2307 .addReg(SrcR, 0, SrcSR) 2308 .addImm(ImmOp); 2309 NewMIs.push_back(NewBS); 2310 } 2311 if (Pos <= P) { 2312 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_lo, MRI); 2313 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_hi, MRI); 2314 } else { 2315 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_lo, MRI); 2316 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_hi, MRI); 2317 } 2318 return true; 2319 } 2320 2321 return false; 2322 } 2323 2324 // Check for tstbit simplification opportunity, where the bit being checked 2325 // can be tracked back to another register. For example: 2326 // %2 = S2_lsr_i_r %1, 5 2327 // %3 = S2_tstbit_i %2, 0 2328 // => 2329 // %3 = S2_tstbit_i %1, 5 2330 bool BitSimplification::simplifyTstbit(MachineInstr *MI, 2331 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2332 unsigned Opc = MI->getOpcode(); 2333 if (Opc != Hexagon::S2_tstbit_i) 2334 return false; 2335 2336 unsigned BN = MI->getOperand(2).getImm(); 2337 BitTracker::RegisterRef RS = MI->getOperand(1); 2338 unsigned F, W; 2339 DebugLoc DL = MI->getDebugLoc(); 2340 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI)) 2341 return false; 2342 MachineBasicBlock &B = *MI->getParent(); 2343 auto At = MI->isPHI() ? B.getFirstNonPHI() 2344 : MachineBasicBlock::iterator(MI); 2345 2346 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2347 const BitTracker::BitValue &V = SC[F+BN]; 2348 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) { 2349 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg); 2350 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is 2351 // a double register, need to use a subregister and adjust bit 2352 // number. 2353 unsigned P = std::numeric_limits<unsigned>::max(); 2354 BitTracker::RegisterRef RR(V.RefI.Reg, 0); 2355 if (TC == &Hexagon::DoubleRegsRegClass) { 2356 P = V.RefI.Pos; 2357 RR.Sub = Hexagon::isub_lo; 2358 if (P >= 32) { 2359 P -= 32; 2360 RR.Sub = Hexagon::isub_hi; 2361 } 2362 } else if (TC == &Hexagon::IntRegsRegClass) { 2363 P = V.RefI.Pos; 2364 } 2365 if (P != std::numeric_limits<unsigned>::max()) { 2366 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2367 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR) 2368 .addReg(RR.Reg, 0, RR.Sub) 2369 .addImm(P); 2370 HBS::replaceReg(RD.Reg, NewR, MRI); 2371 BT.put(NewR, RC); 2372 return true; 2373 } 2374 } else if (V.is(0) || V.is(1)) { 2375 Register NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2376 unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true; 2377 BuildMI(B, At, DL, HII.get(NewOpc), NewR); 2378 HBS::replaceReg(RD.Reg, NewR, MRI); 2379 return true; 2380 } 2381 2382 return false; 2383 } 2384 2385 // Detect whether RD is a bitfield extract (sign- or zero-extended) of 2386 // some register from the AVs set. Create a new corresponding instruction 2387 // at the location of MI. The intent is to recognize situations where 2388 // a sequence of instructions performs an operation that is equivalent to 2389 // an extract operation, such as a shift left followed by a shift right. 2390 bool BitSimplification::simplifyExtractLow(MachineInstr *MI, 2391 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC, 2392 const RegisterSet &AVs) { 2393 if (!GenExtract) 2394 return false; 2395 if (MaxExtract.getNumOccurrences()) { 2396 if (CountExtract >= MaxExtract) 2397 return false; 2398 CountExtract++; 2399 } 2400 2401 unsigned W = RC.width(); 2402 unsigned RW = W; 2403 unsigned Len; 2404 bool Signed; 2405 2406 // The code is mostly class-independent, except for the part that generates 2407 // the extract instruction, and establishes the source register (in case it 2408 // needs to use a subregister). 2409 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2410 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass) 2411 return false; 2412 assert(RD.Sub == 0); 2413 2414 // Observation: 2415 // If the cell has a form of 00..0xx..x with k zeros and n remaining 2416 // bits, this could be an extractu of the n bits, but it could also be 2417 // an extractu of a longer field which happens to have 0s in the top 2418 // bit positions. 2419 // The same logic applies to sign-extended fields. 2420 // 2421 // Do not check for the extended extracts, since it would expand the 2422 // search space quite a bit. The search may be expensive as it is. 2423 2424 const BitTracker::BitValue &TopV = RC[W-1]; 2425 2426 // Eliminate candidates that have self-referential bits, since they 2427 // cannot be extracts from other registers. Also, skip registers that 2428 // have compile-time constant values. 2429 bool IsConst = true; 2430 for (unsigned I = 0; I != W; ++I) { 2431 const BitTracker::BitValue &V = RC[I]; 2432 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg == RD.Reg) 2433 return false; 2434 IsConst = IsConst && (V.is(0) || V.is(1)); 2435 } 2436 if (IsConst) 2437 return false; 2438 2439 if (TopV.is(0) || TopV.is(1)) { 2440 bool S = TopV.is(1); 2441 for (--W; W > 0 && RC[W-1].is(S); --W) 2442 ; 2443 Len = W; 2444 Signed = S; 2445 // The sign bit must be a part of the field being extended. 2446 if (Signed) 2447 ++Len; 2448 } else { 2449 // This could still be a sign-extended extract. 2450 assert(TopV.Type == BitTracker::BitValue::Ref); 2451 if (TopV.RefI.Reg == RD.Reg || TopV.RefI.Pos == W-1) 2452 return false; 2453 for (--W; W > 0 && RC[W-1] == TopV; --W) 2454 ; 2455 // The top bits of RC are copies of TopV. One occurrence of TopV will 2456 // be a part of the field. 2457 Len = W + 1; 2458 Signed = true; 2459 } 2460 2461 // This would be just a copy. It should be handled elsewhere. 2462 if (Len == RW) 2463 return false; 2464 2465 LLVM_DEBUG({ 2466 dbgs() << __func__ << " on reg: " << printReg(RD.Reg, &HRI, RD.Sub) 2467 << ", MI: " << *MI; 2468 dbgs() << "Cell: " << RC << '\n'; 2469 dbgs() << "Expected bitfield size: " << Len << " bits, " 2470 << (Signed ? "sign" : "zero") << "-extended\n"; 2471 }); 2472 2473 bool Changed = false; 2474 2475 for (unsigned R = AVs.find_first(); R != 0; R = AVs.find_next(R)) { 2476 if (!BT.has(R)) 2477 continue; 2478 const BitTracker::RegisterCell &SC = BT.lookup(R); 2479 unsigned SW = SC.width(); 2480 2481 // The source can be longer than the destination, as long as its size is 2482 // a multiple of the size of the destination. Also, we would need to be 2483 // able to refer to the subregister in the source that would be of the 2484 // same size as the destination, but only check the sizes here. 2485 if (SW < RW || (SW % RW) != 0) 2486 continue; 2487 2488 // The field can start at any offset in SC as long as it contains Len 2489 // bits and does not cross subregister boundary (if the source register 2490 // is longer than the destination). 2491 unsigned Off = 0; 2492 while (Off <= SW-Len) { 2493 unsigned OE = (Off+Len)/RW; 2494 if (OE != Off/RW) { 2495 // The assumption here is that if the source (R) is longer than the 2496 // destination, then the destination is a sequence of words of 2497 // size RW, and each such word in R can be accessed via a subregister. 2498 // 2499 // If the beginning and the end of the field cross the subregister 2500 // boundary, advance to the next subregister. 2501 Off = OE*RW; 2502 continue; 2503 } 2504 if (HBS::isEqual(RC, 0, SC, Off, Len)) 2505 break; 2506 ++Off; 2507 } 2508 2509 if (Off > SW-Len) 2510 continue; 2511 2512 // Found match. 2513 unsigned ExtOpc = 0; 2514 if (Off == 0) { 2515 if (Len == 8) 2516 ExtOpc = Signed ? Hexagon::A2_sxtb : Hexagon::A2_zxtb; 2517 else if (Len == 16) 2518 ExtOpc = Signed ? Hexagon::A2_sxth : Hexagon::A2_zxth; 2519 else if (Len < 10 && !Signed) 2520 ExtOpc = Hexagon::A2_andir; 2521 } 2522 if (ExtOpc == 0) { 2523 ExtOpc = 2524 Signed ? (RW == 32 ? Hexagon::S4_extract : Hexagon::S4_extractp) 2525 : (RW == 32 ? Hexagon::S2_extractu : Hexagon::S2_extractup); 2526 } 2527 unsigned SR = 0; 2528 // This only recognizes isub_lo and isub_hi. 2529 if (RW != SW && RW*2 != SW) 2530 continue; 2531 if (RW != SW) 2532 SR = (Off/RW == 0) ? Hexagon::isub_lo : Hexagon::isub_hi; 2533 Off = Off % RW; 2534 2535 if (!validateReg({R,SR}, ExtOpc, 1)) 2536 continue; 2537 2538 // Don't generate the same instruction as the one being optimized. 2539 if (MI->getOpcode() == ExtOpc) { 2540 // All possible ExtOpc's have the source in operand(1). 2541 const MachineOperand &SrcOp = MI->getOperand(1); 2542 if (SrcOp.getReg() == R) 2543 continue; 2544 } 2545 2546 DebugLoc DL = MI->getDebugLoc(); 2547 MachineBasicBlock &B = *MI->getParent(); 2548 Register NewR = MRI.createVirtualRegister(FRC); 2549 auto At = MI->isPHI() ? B.getFirstNonPHI() 2550 : MachineBasicBlock::iterator(MI); 2551 auto MIB = BuildMI(B, At, DL, HII.get(ExtOpc), NewR) 2552 .addReg(R, 0, SR); 2553 switch (ExtOpc) { 2554 case Hexagon::A2_sxtb: 2555 case Hexagon::A2_zxtb: 2556 case Hexagon::A2_sxth: 2557 case Hexagon::A2_zxth: 2558 break; 2559 case Hexagon::A2_andir: 2560 MIB.addImm((1u << Len) - 1); 2561 break; 2562 case Hexagon::S4_extract: 2563 case Hexagon::S2_extractu: 2564 case Hexagon::S4_extractp: 2565 case Hexagon::S2_extractup: 2566 MIB.addImm(Len) 2567 .addImm(Off); 2568 break; 2569 default: 2570 llvm_unreachable("Unexpected opcode"); 2571 } 2572 2573 HBS::replaceReg(RD.Reg, NewR, MRI); 2574 BT.put(BitTracker::RegisterRef(NewR), RC); 2575 Changed = true; 2576 break; 2577 } 2578 2579 return Changed; 2580 } 2581 2582 bool BitSimplification::simplifyRCmp0(MachineInstr *MI, 2583 BitTracker::RegisterRef RD) { 2584 unsigned Opc = MI->getOpcode(); 2585 if (Opc != Hexagon::A4_rcmpeqi && Opc != Hexagon::A4_rcmpneqi) 2586 return false; 2587 MachineOperand &CmpOp = MI->getOperand(2); 2588 if (!CmpOp.isImm() || CmpOp.getImm() != 0) 2589 return false; 2590 2591 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2592 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass) 2593 return false; 2594 assert(RD.Sub == 0); 2595 2596 MachineBasicBlock &B = *MI->getParent(); 2597 const DebugLoc &DL = MI->getDebugLoc(); 2598 auto At = MI->isPHI() ? B.getFirstNonPHI() 2599 : MachineBasicBlock::iterator(MI); 2600 bool KnownZ = true; 2601 bool KnownNZ = false; 2602 2603 BitTracker::RegisterRef SR = MI->getOperand(1); 2604 if (!BT.has(SR.Reg)) 2605 return false; 2606 const BitTracker::RegisterCell &SC = BT.lookup(SR.Reg); 2607 unsigned F, W; 2608 if (!HBS::getSubregMask(SR, F, W, MRI)) 2609 return false; 2610 2611 for (uint16_t I = F; I != F+W; ++I) { 2612 const BitTracker::BitValue &V = SC[I]; 2613 if (!V.is(0)) 2614 KnownZ = false; 2615 if (V.is(1)) 2616 KnownNZ = true; 2617 } 2618 2619 auto ReplaceWithConst = [&](int C) { 2620 Register NewR = MRI.createVirtualRegister(FRC); 2621 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), NewR) 2622 .addImm(C); 2623 HBS::replaceReg(RD.Reg, NewR, MRI); 2624 BitTracker::RegisterCell NewRC(W); 2625 for (uint16_t I = 0; I != W; ++I) { 2626 NewRC[I] = BitTracker::BitValue(C & 1); 2627 C = unsigned(C) >> 1; 2628 } 2629 BT.put(BitTracker::RegisterRef(NewR), NewRC); 2630 return true; 2631 }; 2632 2633 auto IsNonZero = [] (const MachineOperand &Op) { 2634 if (Op.isGlobal() || Op.isBlockAddress()) 2635 return true; 2636 if (Op.isImm()) 2637 return Op.getImm() != 0; 2638 if (Op.isCImm()) 2639 return !Op.getCImm()->isZero(); 2640 if (Op.isFPImm()) 2641 return !Op.getFPImm()->isZero(); 2642 return false; 2643 }; 2644 2645 auto IsZero = [] (const MachineOperand &Op) { 2646 if (Op.isGlobal() || Op.isBlockAddress()) 2647 return false; 2648 if (Op.isImm()) 2649 return Op.getImm() == 0; 2650 if (Op.isCImm()) 2651 return Op.getCImm()->isZero(); 2652 if (Op.isFPImm()) 2653 return Op.getFPImm()->isZero(); 2654 return false; 2655 }; 2656 2657 // If the source register is known to be 0 or non-0, the comparison can 2658 // be folded to a load of a constant. 2659 if (KnownZ || KnownNZ) { 2660 assert(KnownZ != KnownNZ && "Register cannot be both 0 and non-0"); 2661 return ReplaceWithConst(KnownZ == (Opc == Hexagon::A4_rcmpeqi)); 2662 } 2663 2664 // Special case: if the compare comes from a C2_muxii, then we know the 2665 // two possible constants that can be the source value. 2666 MachineInstr *InpDef = MRI.getVRegDef(SR.Reg); 2667 if (!InpDef) 2668 return false; 2669 if (SR.Sub == 0 && InpDef->getOpcode() == Hexagon::C2_muxii) { 2670 MachineOperand &Src1 = InpDef->getOperand(2); 2671 MachineOperand &Src2 = InpDef->getOperand(3); 2672 // Check if both are non-zero. 2673 bool KnownNZ1 = IsNonZero(Src1), KnownNZ2 = IsNonZero(Src2); 2674 if (KnownNZ1 && KnownNZ2) 2675 return ReplaceWithConst(Opc == Hexagon::A4_rcmpneqi); 2676 // Check if both are zero. 2677 bool KnownZ1 = IsZero(Src1), KnownZ2 = IsZero(Src2); 2678 if (KnownZ1 && KnownZ2) 2679 return ReplaceWithConst(Opc == Hexagon::A4_rcmpeqi); 2680 2681 // If for both operands we know that they are either 0 or non-0, 2682 // replace the comparison with a C2_muxii, using the same predicate 2683 // register, but with operands substituted with 0/1 accordingly. 2684 if ((KnownZ1 || KnownNZ1) && (KnownZ2 || KnownNZ2)) { 2685 Register NewR = MRI.createVirtualRegister(FRC); 2686 BuildMI(B, At, DL, HII.get(Hexagon::C2_muxii), NewR) 2687 .addReg(InpDef->getOperand(1).getReg()) 2688 .addImm(KnownZ1 == (Opc == Hexagon::A4_rcmpeqi)) 2689 .addImm(KnownZ2 == (Opc == Hexagon::A4_rcmpeqi)); 2690 HBS::replaceReg(RD.Reg, NewR, MRI); 2691 // Create a new cell with only the least significant bit unknown. 2692 BitTracker::RegisterCell NewRC(W); 2693 NewRC[0] = BitTracker::BitValue::self(); 2694 NewRC.fill(1, W, BitTracker::BitValue::Zero); 2695 BT.put(BitTracker::RegisterRef(NewR), NewRC); 2696 return true; 2697 } 2698 } 2699 2700 return false; 2701 } 2702 2703 bool BitSimplification::processBlock(MachineBasicBlock &B, 2704 const RegisterSet &AVs) { 2705 if (!BT.reached(&B)) 2706 return false; 2707 bool Changed = false; 2708 RegisterSet AVB = AVs; 2709 RegisterSet Defs; 2710 2711 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) { 2712 MachineInstr *MI = &*I; 2713 Defs.clear(); 2714 HBS::getInstrDefs(*MI, Defs); 2715 2716 unsigned Opc = MI->getOpcode(); 2717 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE) 2718 continue; 2719 2720 if (MI->mayStore()) { 2721 bool T = genStoreUpperHalf(MI); 2722 T = T || genStoreImmediate(MI); 2723 Changed |= T; 2724 continue; 2725 } 2726 2727 if (Defs.count() != 1) 2728 continue; 2729 const MachineOperand &Op0 = MI->getOperand(0); 2730 if (!Op0.isReg() || !Op0.isDef()) 2731 continue; 2732 BitTracker::RegisterRef RD = Op0; 2733 if (!BT.has(RD.Reg)) 2734 continue; 2735 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2736 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg); 2737 2738 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) { 2739 bool T = genPackhl(MI, RD, RC); 2740 T = T || simplifyExtractLow(MI, RD, RC, AVB); 2741 Changed |= T; 2742 continue; 2743 } 2744 2745 if (FRC->getID() == Hexagon::IntRegsRegClassID) { 2746 bool T = genBitSplit(MI, RD, RC, AVB); 2747 T = T || simplifyExtractLow(MI, RD, RC, AVB); 2748 T = T || genExtractHalf(MI, RD, RC); 2749 T = T || genCombineHalf(MI, RD, RC); 2750 T = T || genExtractLow(MI, RD, RC); 2751 T = T || simplifyRCmp0(MI, RD); 2752 Changed |= T; 2753 continue; 2754 } 2755 2756 if (FRC->getID() == Hexagon::PredRegsRegClassID) { 2757 bool T = simplifyTstbit(MI, RD, RC); 2758 Changed |= T; 2759 continue; 2760 } 2761 } 2762 return Changed; 2763 } 2764 2765 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) { 2766 if (skipFunction(MF.getFunction())) 2767 return false; 2768 2769 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2770 auto &HRI = *HST.getRegisterInfo(); 2771 auto &HII = *HST.getInstrInfo(); 2772 2773 MDT = &getAnalysis<MachineDominatorTree>(); 2774 MachineRegisterInfo &MRI = MF.getRegInfo(); 2775 bool Changed; 2776 2777 Changed = DeadCodeElimination(MF, *MDT).run(); 2778 2779 const HexagonEvaluator HE(HRI, MRI, HII, MF); 2780 BitTracker BT(HE, MF); 2781 LLVM_DEBUG(BT.trace(true)); 2782 BT.run(); 2783 2784 MachineBasicBlock &Entry = MF.front(); 2785 2786 RegisterSet AIG; // Available registers for IG. 2787 ConstGeneration ImmG(BT, HII, MRI); 2788 Changed |= visitBlock(Entry, ImmG, AIG); 2789 2790 RegisterSet ARE; // Available registers for RIE. 2791 RedundantInstrElimination RIE(BT, HII, HRI, MRI); 2792 bool Ried = visitBlock(Entry, RIE, ARE); 2793 if (Ried) { 2794 Changed = true; 2795 BT.run(); 2796 } 2797 2798 RegisterSet ACG; // Available registers for CG. 2799 CopyGeneration CopyG(BT, HII, HRI, MRI); 2800 Changed |= visitBlock(Entry, CopyG, ACG); 2801 2802 RegisterSet ACP; // Available registers for CP. 2803 CopyPropagation CopyP(HRI, MRI); 2804 Changed |= visitBlock(Entry, CopyP, ACP); 2805 2806 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2807 2808 BT.run(); 2809 RegisterSet ABS; // Available registers for BS. 2810 BitSimplification BitS(BT, *MDT, HII, HRI, MRI, MF); 2811 Changed |= visitBlock(Entry, BitS, ABS); 2812 2813 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2814 2815 if (Changed) { 2816 for (auto &B : MF) 2817 for (auto &I : B) 2818 I.clearKillInfo(); 2819 DeadCodeElimination(MF, *MDT).run(); 2820 } 2821 return Changed; 2822 } 2823 2824 // Recognize loops where the code at the end of the loop matches the code 2825 // before the entry of the loop, and the matching code is such that is can 2826 // be simplified. This pass relies on the bit simplification above and only 2827 // prepares code in a way that can be handled by the bit simplifcation. 2828 // 2829 // This is the motivating testcase (and explanation): 2830 // 2831 // { 2832 // loop0(.LBB0_2, r1) // %for.body.preheader 2833 // r5:4 = memd(r0++#8) 2834 // } 2835 // { 2836 // r3 = lsr(r4, #16) 2837 // r7:6 = combine(r5, r5) 2838 // } 2839 // { 2840 // r3 = insert(r5, #16, #16) 2841 // r7:6 = vlsrw(r7:6, #16) 2842 // } 2843 // .LBB0_2: 2844 // { 2845 // memh(r2+#4) = r5 2846 // memh(r2+#6) = r6 # R6 is really R5.H 2847 // } 2848 // { 2849 // r2 = add(r2, #8) 2850 // memh(r2+#0) = r4 2851 // memh(r2+#2) = r3 # R3 is really R4.H 2852 // } 2853 // { 2854 // r5:4 = memd(r0++#8) 2855 // } 2856 // { # "Shuffling" code that sets up R3 and R6 2857 // r3 = lsr(r4, #16) # so that their halves can be stored in the 2858 // r7:6 = combine(r5, r5) # next iteration. This could be folded into 2859 // } # the stores if the code was at the beginning 2860 // { # of the loop iteration. Since the same code 2861 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved 2862 // r7:6 = vlsrw(r7:6, #16) # there. 2863 // }:endloop0 2864 // 2865 // 2866 // The outcome: 2867 // 2868 // { 2869 // loop0(.LBB0_2, r1) 2870 // r5:4 = memd(r0++#8) 2871 // } 2872 // .LBB0_2: 2873 // { 2874 // memh(r2+#4) = r5 2875 // memh(r2+#6) = r5.h 2876 // } 2877 // { 2878 // r2 = add(r2, #8) 2879 // memh(r2+#0) = r4 2880 // memh(r2+#2) = r4.h 2881 // } 2882 // { 2883 // r5:4 = memd(r0++#8) 2884 // }:endloop0 2885 2886 namespace llvm { 2887 2888 FunctionPass *createHexagonLoopRescheduling(); 2889 void initializeHexagonLoopReschedulingPass(PassRegistry&); 2890 2891 } // end namespace llvm 2892 2893 namespace { 2894 2895 class HexagonLoopRescheduling : public MachineFunctionPass { 2896 public: 2897 static char ID; 2898 2899 HexagonLoopRescheduling() : MachineFunctionPass(ID) { 2900 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry()); 2901 } 2902 2903 bool runOnMachineFunction(MachineFunction &MF) override; 2904 2905 private: 2906 const HexagonInstrInfo *HII = nullptr; 2907 const HexagonRegisterInfo *HRI = nullptr; 2908 MachineRegisterInfo *MRI = nullptr; 2909 BitTracker *BTP = nullptr; 2910 2911 struct LoopCand { 2912 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb, 2913 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {} 2914 2915 MachineBasicBlock *LB, *PB, *EB; 2916 }; 2917 using InstrList = std::vector<MachineInstr *>; 2918 struct InstrGroup { 2919 BitTracker::RegisterRef Inp, Out; 2920 InstrList Ins; 2921 }; 2922 struct PhiInfo { 2923 PhiInfo(MachineInstr &P, MachineBasicBlock &B); 2924 2925 unsigned DefR; 2926 BitTracker::RegisterRef LR, PR; // Loop Register, Preheader Register 2927 MachineBasicBlock *LB, *PB; // Loop Block, Preheader Block 2928 }; 2929 2930 static unsigned getDefReg(const MachineInstr *MI); 2931 bool isConst(unsigned Reg) const; 2932 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const; 2933 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const; 2934 bool isShuffleOf(unsigned OutR, unsigned InpR) const; 2935 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2, 2936 unsigned &InpR2) const; 2937 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB, 2938 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR); 2939 bool processLoop(LoopCand &C); 2940 }; 2941 2942 } // end anonymous namespace 2943 2944 char HexagonLoopRescheduling::ID = 0; 2945 2946 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched", 2947 "Hexagon Loop Rescheduling", false, false) 2948 2949 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P, 2950 MachineBasicBlock &B) { 2951 DefR = HexagonLoopRescheduling::getDefReg(&P); 2952 LB = &B; 2953 PB = nullptr; 2954 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) { 2955 const MachineOperand &OpB = P.getOperand(i+1); 2956 if (OpB.getMBB() == &B) { 2957 LR = P.getOperand(i); 2958 continue; 2959 } 2960 PB = OpB.getMBB(); 2961 PR = P.getOperand(i); 2962 } 2963 } 2964 2965 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) { 2966 RegisterSet Defs; 2967 HBS::getInstrDefs(*MI, Defs); 2968 if (Defs.count() != 1) 2969 return 0; 2970 return Defs.find_first(); 2971 } 2972 2973 bool HexagonLoopRescheduling::isConst(unsigned Reg) const { 2974 if (!BTP->has(Reg)) 2975 return false; 2976 const BitTracker::RegisterCell &RC = BTP->lookup(Reg); 2977 for (unsigned i = 0, w = RC.width(); i < w; ++i) { 2978 const BitTracker::BitValue &V = RC[i]; 2979 if (!V.is(0) && !V.is(1)) 2980 return false; 2981 } 2982 return true; 2983 } 2984 2985 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI, 2986 unsigned DefR) const { 2987 unsigned Opc = MI->getOpcode(); 2988 switch (Opc) { 2989 case TargetOpcode::COPY: 2990 case Hexagon::S2_lsr_i_r: 2991 case Hexagon::S2_asr_i_r: 2992 case Hexagon::S2_asl_i_r: 2993 case Hexagon::S2_lsr_i_p: 2994 case Hexagon::S2_asr_i_p: 2995 case Hexagon::S2_asl_i_p: 2996 case Hexagon::S2_insert: 2997 case Hexagon::A2_or: 2998 case Hexagon::A2_orp: 2999 case Hexagon::A2_and: 3000 case Hexagon::A2_andp: 3001 case Hexagon::A2_combinew: 3002 case Hexagon::A4_combineri: 3003 case Hexagon::A4_combineir: 3004 case Hexagon::A2_combineii: 3005 case Hexagon::A4_combineii: 3006 case Hexagon::A2_combine_ll: 3007 case Hexagon::A2_combine_lh: 3008 case Hexagon::A2_combine_hl: 3009 case Hexagon::A2_combine_hh: 3010 return true; 3011 } 3012 return false; 3013 } 3014 3015 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI, 3016 unsigned InpR) const { 3017 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 3018 const MachineOperand &Op = MI->getOperand(i); 3019 if (!Op.isReg()) 3020 continue; 3021 if (Op.getReg() == InpR) 3022 return i == n-1; 3023 } 3024 return false; 3025 } 3026 3027 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const { 3028 if (!BTP->has(OutR) || !BTP->has(InpR)) 3029 return false; 3030 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR); 3031 for (unsigned i = 0, w = OutC.width(); i < w; ++i) { 3032 const BitTracker::BitValue &V = OutC[i]; 3033 if (V.Type != BitTracker::BitValue::Ref) 3034 continue; 3035 if (V.RefI.Reg != InpR) 3036 return false; 3037 } 3038 return true; 3039 } 3040 3041 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1, 3042 unsigned OutR2, unsigned &InpR2) const { 3043 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2)) 3044 return false; 3045 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1); 3046 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2); 3047 unsigned W = OutC1.width(); 3048 unsigned MatchR = 0; 3049 if (W != OutC2.width()) 3050 return false; 3051 for (unsigned i = 0; i < W; ++i) { 3052 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i]; 3053 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One) 3054 return false; 3055 if (V1.Type != BitTracker::BitValue::Ref) 3056 continue; 3057 if (V1.RefI.Pos != V2.RefI.Pos) 3058 return false; 3059 if (V1.RefI.Reg != InpR1) 3060 return false; 3061 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2) 3062 return false; 3063 if (!MatchR) 3064 MatchR = V2.RefI.Reg; 3065 else if (V2.RefI.Reg != MatchR) 3066 return false; 3067 } 3068 InpR2 = MatchR; 3069 return true; 3070 } 3071 3072 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, 3073 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR, 3074 unsigned NewPredR) { 3075 DenseMap<unsigned,unsigned> RegMap; 3076 3077 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR); 3078 Register PhiR = MRI->createVirtualRegister(PhiRC); 3079 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR) 3080 .addReg(NewPredR) 3081 .addMBB(&PB) 3082 .addReg(G.Inp.Reg) 3083 .addMBB(&LB); 3084 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR)); 3085 3086 for (unsigned i = G.Ins.size(); i > 0; --i) { 3087 const MachineInstr *SI = G.Ins[i-1]; 3088 unsigned DR = getDefReg(SI); 3089 const TargetRegisterClass *RC = MRI->getRegClass(DR); 3090 Register NewDR = MRI->createVirtualRegister(RC); 3091 DebugLoc DL = SI->getDebugLoc(); 3092 3093 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR); 3094 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { 3095 const MachineOperand &Op = SI->getOperand(j); 3096 if (!Op.isReg()) { 3097 MIB.add(Op); 3098 continue; 3099 } 3100 if (!Op.isUse()) 3101 continue; 3102 unsigned UseR = RegMap[Op.getReg()]; 3103 MIB.addReg(UseR, 0, Op.getSubReg()); 3104 } 3105 RegMap.insert(std::make_pair(DR, NewDR)); 3106 } 3107 3108 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI); 3109 } 3110 3111 bool HexagonLoopRescheduling::processLoop(LoopCand &C) { 3112 LLVM_DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB) 3113 << "\n"); 3114 std::vector<PhiInfo> Phis; 3115 for (auto &I : *C.LB) { 3116 if (!I.isPHI()) 3117 break; 3118 unsigned PR = getDefReg(&I); 3119 if (isConst(PR)) 3120 continue; 3121 bool BadUse = false, GoodUse = false; 3122 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) { 3123 MachineInstr *UseI = UI->getParent(); 3124 if (UseI->getParent() != C.LB) { 3125 BadUse = true; 3126 break; 3127 } 3128 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR)) 3129 GoodUse = true; 3130 } 3131 if (BadUse || !GoodUse) 3132 continue; 3133 3134 Phis.push_back(PhiInfo(I, *C.LB)); 3135 } 3136 3137 LLVM_DEBUG({ 3138 dbgs() << "Phis: {"; 3139 for (auto &I : Phis) { 3140 dbgs() << ' ' << printReg(I.DefR, HRI) << "=phi(" 3141 << printReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber() 3142 << ',' << printReg(I.LR.Reg, HRI, I.LR.Sub) << ":b" 3143 << I.LB->getNumber() << ')'; 3144 } 3145 dbgs() << " }\n"; 3146 }); 3147 3148 if (Phis.empty()) 3149 return false; 3150 3151 bool Changed = false; 3152 InstrList ShufIns; 3153 3154 // Go backwards in the block: for each bit shuffling instruction, check 3155 // if that instruction could potentially be moved to the front of the loop: 3156 // the output of the loop cannot be used in a non-shuffling instruction 3157 // in this loop. 3158 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) { 3159 if (I->isTerminator()) 3160 continue; 3161 if (I->isPHI()) 3162 break; 3163 3164 RegisterSet Defs; 3165 HBS::getInstrDefs(*I, Defs); 3166 if (Defs.count() != 1) 3167 continue; 3168 unsigned DefR = Defs.find_first(); 3169 if (!Register::isVirtualRegister(DefR)) 3170 continue; 3171 if (!isBitShuffle(&*I, DefR)) 3172 continue; 3173 3174 bool BadUse = false; 3175 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) { 3176 MachineInstr *UseI = UI->getParent(); 3177 if (UseI->getParent() == C.LB) { 3178 if (UseI->isPHI()) { 3179 // If the use is in a phi node in this loop, then it should be 3180 // the value corresponding to the back edge. 3181 unsigned Idx = UI.getOperandNo(); 3182 if (UseI->getOperand(Idx+1).getMBB() != C.LB) 3183 BadUse = true; 3184 } else { 3185 auto F = find(ShufIns, UseI); 3186 if (F == ShufIns.end()) 3187 BadUse = true; 3188 } 3189 } else { 3190 // There is a use outside of the loop, but there is no epilog block 3191 // suitable for a copy-out. 3192 if (C.EB == nullptr) 3193 BadUse = true; 3194 } 3195 if (BadUse) 3196 break; 3197 } 3198 3199 if (BadUse) 3200 continue; 3201 ShufIns.push_back(&*I); 3202 } 3203 3204 // Partition the list of shuffling instructions into instruction groups, 3205 // where each group has to be moved as a whole (i.e. a group is a chain of 3206 // dependent instructions). A group produces a single live output register, 3207 // which is meant to be the input of the loop phi node (although this is 3208 // not checked here yet). It also uses a single register as its input, 3209 // which is some value produced in the loop body. After moving the group 3210 // to the beginning of the loop, that input register would need to be 3211 // the loop-carried register (through a phi node) instead of the (currently 3212 // loop-carried) output register. 3213 using InstrGroupList = std::vector<InstrGroup>; 3214 InstrGroupList Groups; 3215 3216 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) { 3217 MachineInstr *SI = ShufIns[i]; 3218 if (SI == nullptr) 3219 continue; 3220 3221 InstrGroup G; 3222 G.Ins.push_back(SI); 3223 G.Out.Reg = getDefReg(SI); 3224 RegisterSet Inputs; 3225 HBS::getInstrUses(*SI, Inputs); 3226 3227 for (unsigned j = i+1; j < n; ++j) { 3228 MachineInstr *MI = ShufIns[j]; 3229 if (MI == nullptr) 3230 continue; 3231 RegisterSet Defs; 3232 HBS::getInstrDefs(*MI, Defs); 3233 // If this instruction does not define any pending inputs, skip it. 3234 if (!Defs.intersects(Inputs)) 3235 continue; 3236 // Otherwise, add it to the current group and remove the inputs that 3237 // are defined by MI. 3238 G.Ins.push_back(MI); 3239 Inputs.remove(Defs); 3240 // Then add all registers used by MI. 3241 HBS::getInstrUses(*MI, Inputs); 3242 ShufIns[j] = nullptr; 3243 } 3244 3245 // Only add a group if it requires at most one register. 3246 if (Inputs.count() > 1) 3247 continue; 3248 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 3249 return G.Out.Reg == P.LR.Reg; 3250 }; 3251 if (llvm::find_if(Phis, LoopInpEq) == Phis.end()) 3252 continue; 3253 3254 G.Inp.Reg = Inputs.find_first(); 3255 Groups.push_back(G); 3256 } 3257 3258 LLVM_DEBUG({ 3259 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 3260 InstrGroup &G = Groups[i]; 3261 dbgs() << "Group[" << i << "] inp: " 3262 << printReg(G.Inp.Reg, HRI, G.Inp.Sub) 3263 << " out: " << printReg(G.Out.Reg, HRI, G.Out.Sub) << "\n"; 3264 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j) 3265 dbgs() << " " << *G.Ins[j]; 3266 } 3267 }); 3268 3269 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 3270 InstrGroup &G = Groups[i]; 3271 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg)) 3272 continue; 3273 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 3274 return G.Out.Reg == P.LR.Reg; 3275 }; 3276 auto F = llvm::find_if(Phis, LoopInpEq); 3277 if (F == Phis.end()) 3278 continue; 3279 unsigned PrehR = 0; 3280 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PrehR)) { 3281 const MachineInstr *DefPrehR = MRI->getVRegDef(F->PR.Reg); 3282 unsigned Opc = DefPrehR->getOpcode(); 3283 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi) 3284 continue; 3285 if (!DefPrehR->getOperand(1).isImm()) 3286 continue; 3287 if (DefPrehR->getOperand(1).getImm() != 0) 3288 continue; 3289 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg); 3290 if (RC != MRI->getRegClass(F->PR.Reg)) { 3291 PrehR = MRI->createVirtualRegister(RC); 3292 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi 3293 : Hexagon::A2_tfrpi; 3294 auto T = C.PB->getFirstTerminator(); 3295 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc(); 3296 BuildMI(*C.PB, T, DL, HII->get(TfrI), PrehR) 3297 .addImm(0); 3298 } else { 3299 PrehR = F->PR.Reg; 3300 } 3301 } 3302 // isSameShuffle could match with PrehR being of a wider class than 3303 // G.Inp.Reg, for example if G shuffles the low 32 bits of its input, 3304 // it would match for the input being a 32-bit register, and PrehR 3305 // being a 64-bit register (where the low 32 bits match). This could 3306 // be handled, but for now skip these cases. 3307 if (MRI->getRegClass(PrehR) != MRI->getRegClass(G.Inp.Reg)) 3308 continue; 3309 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PrehR); 3310 Changed = true; 3311 } 3312 3313 return Changed; 3314 } 3315 3316 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) { 3317 if (skipFunction(MF.getFunction())) 3318 return false; 3319 3320 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 3321 HII = HST.getInstrInfo(); 3322 HRI = HST.getRegisterInfo(); 3323 MRI = &MF.getRegInfo(); 3324 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); 3325 BitTracker BT(HE, MF); 3326 LLVM_DEBUG(BT.trace(true)); 3327 BT.run(); 3328 BTP = &BT; 3329 3330 std::vector<LoopCand> Cand; 3331 3332 for (auto &B : MF) { 3333 if (B.pred_size() != 2 || B.succ_size() != 2) 3334 continue; 3335 MachineBasicBlock *PB = nullptr; 3336 bool IsLoop = false; 3337 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) { 3338 if (*PI != &B) 3339 PB = *PI; 3340 else 3341 IsLoop = true; 3342 } 3343 if (!IsLoop) 3344 continue; 3345 3346 MachineBasicBlock *EB = nullptr; 3347 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) { 3348 if (*SI == &B) 3349 continue; 3350 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the 3351 // edge from B to EP is non-critical. 3352 if ((*SI)->pred_size() == 1) 3353 EB = *SI; 3354 break; 3355 } 3356 3357 Cand.push_back(LoopCand(&B, PB, EB)); 3358 } 3359 3360 bool Changed = false; 3361 for (auto &C : Cand) 3362 Changed |= processLoop(C); 3363 3364 return Changed; 3365 } 3366 3367 //===----------------------------------------------------------------------===// 3368 // Public Constructor Functions 3369 //===----------------------------------------------------------------------===// 3370 3371 FunctionPass *llvm::createHexagonLoopRescheduling() { 3372 return new HexagonLoopRescheduling(); 3373 } 3374 3375 FunctionPass *llvm::createHexagonBitSimplify() { 3376 return new HexagonBitSimplify(); 3377 } 3378