1 //===- HexagonBlockRanges.cpp ---------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "HexagonBlockRanges.h" 10 #include "HexagonInstrInfo.h" 11 #include "HexagonSubtarget.h" 12 #include "llvm/ADT/BitVector.h" 13 #include "llvm/ADT/STLExtras.h" 14 #include "llvm/CodeGen/MachineBasicBlock.h" 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/CodeGen/MachineInstr.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/MC/MCRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include <algorithm> 24 #include <cassert> 25 #include <cstdint> 26 #include <iterator> 27 #include <map> 28 #include <utility> 29 30 using namespace llvm; 31 32 #define DEBUG_TYPE "hbr" 33 34 bool HexagonBlockRanges::IndexRange::overlaps(const IndexRange &A) const { 35 // If A contains start(), or "this" contains A.start(), then overlap. 36 IndexType S = start(), E = end(), AS = A.start(), AE = A.end(); 37 if (AS == S) 38 return true; 39 bool SbAE = (S < AE) || (S == AE && A.TiedEnd); // S-before-AE. 40 bool ASbE = (AS < E) || (AS == E && TiedEnd); // AS-before-E. 41 if ((AS < S && SbAE) || (S < AS && ASbE)) 42 return true; 43 // Otherwise no overlap. 44 return false; 45 } 46 47 bool HexagonBlockRanges::IndexRange::contains(const IndexRange &A) const { 48 if (start() <= A.start()) { 49 // Treat "None" in the range end as equal to the range start. 50 IndexType E = (end() != IndexType::None) ? end() : start(); 51 IndexType AE = (A.end() != IndexType::None) ? A.end() : A.start(); 52 if (AE <= E) 53 return true; 54 } 55 return false; 56 } 57 58 void HexagonBlockRanges::IndexRange::merge(const IndexRange &A) { 59 // Allow merging adjacent ranges. 60 assert(end() == A.start() || overlaps(A)); 61 IndexType AS = A.start(), AE = A.end(); 62 if (AS < start() || start() == IndexType::None) 63 setStart(AS); 64 if (end() < AE || end() == IndexType::None) { 65 setEnd(AE); 66 TiedEnd = A.TiedEnd; 67 } else { 68 if (end() == AE) 69 TiedEnd |= A.TiedEnd; 70 } 71 if (A.Fixed) 72 Fixed = true; 73 } 74 75 void HexagonBlockRanges::RangeList::include(const RangeList &RL) { 76 for (const auto &R : RL) 77 if (!is_contained(*this, R)) 78 push_back(R); 79 } 80 81 // Merge all overlapping ranges in the list, so that all that remains 82 // is a list of disjoint ranges. 83 void HexagonBlockRanges::RangeList::unionize(bool MergeAdjacent) { 84 if (empty()) 85 return; 86 87 llvm::sort(*this); 88 iterator Iter = begin(); 89 90 while (Iter != end()-1) { 91 iterator Next = std::next(Iter); 92 // If MergeAdjacent is true, merge ranges A and B, where A.end == B.start. 93 // This allows merging dead ranges, but is not valid for live ranges. 94 bool Merge = MergeAdjacent && (Iter->end() == Next->start()); 95 if (Merge || Iter->overlaps(*Next)) { 96 Iter->merge(*Next); 97 erase(Next); 98 continue; 99 } 100 ++Iter; 101 } 102 } 103 104 // Compute a range A-B and add it to the list. 105 void HexagonBlockRanges::RangeList::addsub(const IndexRange &A, 106 const IndexRange &B) { 107 // Exclusion of non-overlapping ranges makes some checks simpler 108 // later in this function. 109 if (!A.overlaps(B)) { 110 // A - B = A. 111 add(A); 112 return; 113 } 114 115 IndexType AS = A.start(), AE = A.end(); 116 IndexType BS = B.start(), BE = B.end(); 117 118 // If AE is None, then A is included in B, since A and B overlap. 119 // The result of subtraction if empty, so just return. 120 if (AE == IndexType::None) 121 return; 122 123 if (AS < BS) { 124 // A starts before B. 125 // AE cannot be None since A and B overlap. 126 assert(AE != IndexType::None); 127 // Add the part of A that extends on the "less" side of B. 128 add(AS, BS, A.Fixed, false); 129 } 130 131 if (BE < AE) { 132 // BE cannot be Exit here. 133 if (BE == IndexType::None) 134 add(BS, AE, A.Fixed, false); 135 else 136 add(BE, AE, A.Fixed, false); 137 } 138 } 139 140 // Subtract a given range from each element in the list. 141 void HexagonBlockRanges::RangeList::subtract(const IndexRange &Range) { 142 // Cannot assume that the list is unionized (i.e. contains only non- 143 // overlapping ranges. 144 RangeList T; 145 for (iterator Next, I = begin(); I != end(); I = Next) { 146 IndexRange &Rg = *I; 147 if (Rg.overlaps(Range)) { 148 T.addsub(Rg, Range); 149 Next = this->erase(I); 150 } else { 151 Next = std::next(I); 152 } 153 } 154 include(T); 155 } 156 157 HexagonBlockRanges::InstrIndexMap::InstrIndexMap(MachineBasicBlock &B) 158 : Block(B) { 159 IndexType Idx = IndexType::First; 160 First = Idx; 161 for (auto &In : B) { 162 if (In.isDebugInstr()) 163 continue; 164 assert(getIndex(&In) == IndexType::None && "Instruction already in map"); 165 Map.insert(std::make_pair(Idx, &In)); 166 ++Idx; 167 } 168 Last = B.empty() ? IndexType::None : unsigned(Idx)-1; 169 } 170 171 MachineInstr *HexagonBlockRanges::InstrIndexMap::getInstr(IndexType Idx) const { 172 auto F = Map.find(Idx); 173 return (F != Map.end()) ? F->second : nullptr; 174 } 175 176 HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getIndex( 177 MachineInstr *MI) const { 178 for (const auto &I : Map) 179 if (I.second == MI) 180 return I.first; 181 return IndexType::None; 182 } 183 184 HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getPrevIndex( 185 IndexType Idx) const { 186 assert (Idx != IndexType::None); 187 if (Idx == IndexType::Entry) 188 return IndexType::None; 189 if (Idx == IndexType::Exit) 190 return Last; 191 if (Idx == First) 192 return IndexType::Entry; 193 return unsigned(Idx)-1; 194 } 195 196 HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getNextIndex( 197 IndexType Idx) const { 198 assert (Idx != IndexType::None); 199 if (Idx == IndexType::Entry) 200 return IndexType::First; 201 if (Idx == IndexType::Exit || Idx == Last) 202 return IndexType::None; 203 return unsigned(Idx)+1; 204 } 205 206 void HexagonBlockRanges::InstrIndexMap::replaceInstr(MachineInstr *OldMI, 207 MachineInstr *NewMI) { 208 for (auto &I : Map) { 209 if (I.second != OldMI) 210 continue; 211 if (NewMI != nullptr) 212 I.second = NewMI; 213 else 214 Map.erase(I.first); 215 break; 216 } 217 } 218 219 HexagonBlockRanges::HexagonBlockRanges(MachineFunction &mf) 220 : MF(mf), HST(mf.getSubtarget<HexagonSubtarget>()), 221 TII(*HST.getInstrInfo()), TRI(*HST.getRegisterInfo()), 222 Reserved(TRI.getReservedRegs(mf)) { 223 // Consider all non-allocatable registers as reserved. 224 for (const TargetRegisterClass *RC : TRI.regclasses()) { 225 if (RC->isAllocatable()) 226 continue; 227 for (unsigned R : *RC) 228 Reserved[R] = true; 229 } 230 } 231 232 HexagonBlockRanges::RegisterSet HexagonBlockRanges::getLiveIns( 233 const MachineBasicBlock &B, const MachineRegisterInfo &MRI, 234 const TargetRegisterInfo &TRI) { 235 RegisterSet LiveIns; 236 RegisterSet Tmp; 237 238 for (auto I : B.liveins()) { 239 MCSubRegIndexIterator S(I.PhysReg, &TRI); 240 if (I.LaneMask.all() || (I.LaneMask.any() && !S.isValid())) { 241 Tmp.insert({I.PhysReg, 0}); 242 continue; 243 } 244 for (; S.isValid(); ++S) { 245 unsigned SI = S.getSubRegIndex(); 246 if ((I.LaneMask & TRI.getSubRegIndexLaneMask(SI)).any()) 247 Tmp.insert({S.getSubReg(), 0}); 248 } 249 } 250 251 for (auto R : Tmp) { 252 if (!Reserved[R.Reg]) 253 LiveIns.insert(R); 254 for (auto S : expandToSubRegs(R, MRI, TRI)) 255 if (!Reserved[S.Reg]) 256 LiveIns.insert(S); 257 } 258 return LiveIns; 259 } 260 261 HexagonBlockRanges::RegisterSet HexagonBlockRanges::expandToSubRegs( 262 RegisterRef R, const MachineRegisterInfo &MRI, 263 const TargetRegisterInfo &TRI) { 264 RegisterSet SRs; 265 266 if (R.Sub != 0) { 267 SRs.insert(R); 268 return SRs; 269 } 270 271 if (R.Reg.isPhysical()) { 272 if (TRI.subregs(R.Reg).empty()) 273 SRs.insert({R.Reg, 0}); 274 for (MCPhysReg I : TRI.subregs(R.Reg)) 275 SRs.insert({I, 0}); 276 } else { 277 assert(R.Reg.isVirtual()); 278 auto &RC = *MRI.getRegClass(R.Reg); 279 unsigned PReg = *RC.begin(); 280 MCSubRegIndexIterator I(PReg, &TRI); 281 if (!I.isValid()) 282 SRs.insert({R.Reg, 0}); 283 for (; I.isValid(); ++I) 284 SRs.insert({R.Reg, I.getSubRegIndex()}); 285 } 286 return SRs; 287 } 288 289 void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap, 290 RegToRangeMap &LiveMap) { 291 std::map<RegisterRef,IndexType> LastDef, LastUse; 292 RegisterSet LiveOnEntry; 293 MachineBasicBlock &B = IndexMap.getBlock(); 294 MachineRegisterInfo &MRI = B.getParent()->getRegInfo(); 295 296 for (auto R : getLiveIns(B, MRI, TRI)) 297 LiveOnEntry.insert(R); 298 299 for (auto R : LiveOnEntry) 300 LastDef[R] = IndexType::Entry; 301 302 auto closeRange = [&LastUse,&LastDef,&LiveMap] (RegisterRef R) -> void { 303 auto LD = LastDef[R], LU = LastUse[R]; 304 if (LD == IndexType::None) 305 LD = IndexType::Entry; 306 if (LU == IndexType::None) 307 LU = IndexType::Exit; 308 LiveMap[R].add(LD, LU, false, false); 309 LastUse[R] = LastDef[R] = IndexType::None; 310 }; 311 312 RegisterSet Defs, Clobbers; 313 314 for (auto &In : B) { 315 if (In.isDebugInstr()) 316 continue; 317 IndexType Index = IndexMap.getIndex(&In); 318 // Process uses first. 319 for (auto &Op : In.operands()) { 320 if (!Op.isReg() || !Op.isUse() || Op.isUndef()) 321 continue; 322 RegisterRef R = { Op.getReg(), Op.getSubReg() }; 323 if (R.Reg.isPhysical() && Reserved[R.Reg]) 324 continue; 325 bool IsKill = Op.isKill(); 326 for (auto S : expandToSubRegs(R, MRI, TRI)) { 327 LastUse[S] = Index; 328 if (IsKill) 329 closeRange(S); 330 } 331 } 332 // Process defs and clobbers. 333 Defs.clear(); 334 Clobbers.clear(); 335 for (auto &Op : In.operands()) { 336 if (!Op.isReg() || !Op.isDef() || Op.isUndef()) 337 continue; 338 RegisterRef R = { Op.getReg(), Op.getSubReg() }; 339 for (auto S : expandToSubRegs(R, MRI, TRI)) { 340 if (S.Reg.isPhysical() && Reserved[S.Reg]) 341 continue; 342 if (Op.isDead()) 343 Clobbers.insert(S); 344 else 345 Defs.insert(S); 346 } 347 } 348 349 for (auto &Op : In.operands()) { 350 if (!Op.isRegMask()) 351 continue; 352 const uint32_t *BM = Op.getRegMask(); 353 for (unsigned PR = 1, N = TRI.getNumRegs(); PR != N; ++PR) { 354 // Skip registers that have subregisters. A register is preserved 355 // iff its bit is set in the regmask, so if R1:0 was preserved, both 356 // R1 and R0 would also be present. 357 if (!TRI.subregs(PR).empty()) 358 continue; 359 if (Reserved[PR]) 360 continue; 361 if (BM[PR/32] & (1u << (PR%32))) 362 continue; 363 RegisterRef R = { PR, 0 }; 364 if (!Defs.count(R)) 365 Clobbers.insert(R); 366 } 367 } 368 // Defs and clobbers can overlap, e.g. 369 // dead %d0 = COPY %5, implicit-def %r0, implicit-def %r1 370 for (RegisterRef R : Defs) 371 Clobbers.erase(R); 372 373 // Update maps for defs. 374 for (RegisterRef S : Defs) { 375 // Defs should already be expanded into subregs. 376 assert(!S.Reg.isPhysical() || TRI.subregs(S.Reg).empty()); 377 if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None) 378 closeRange(S); 379 LastDef[S] = Index; 380 } 381 // Update maps for clobbers. 382 for (RegisterRef S : Clobbers) { 383 // Clobbers should already be expanded into subregs. 384 assert(!S.Reg.isPhysical() || TRI.subregs(S.Reg).empty()); 385 if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None) 386 closeRange(S); 387 // Create a single-instruction range. 388 LastDef[S] = LastUse[S] = Index; 389 closeRange(S); 390 } 391 } 392 393 // Collect live-on-exit. 394 RegisterSet LiveOnExit; 395 for (auto *SB : B.successors()) 396 for (auto R : getLiveIns(*SB, MRI, TRI)) 397 LiveOnExit.insert(R); 398 399 for (auto R : LiveOnExit) 400 LastUse[R] = IndexType::Exit; 401 402 // Process remaining registers. 403 RegisterSet Left; 404 for (auto &I : LastUse) 405 if (I.second != IndexType::None) 406 Left.insert(I.first); 407 for (auto &I : LastDef) 408 if (I.second != IndexType::None) 409 Left.insert(I.first); 410 for (auto R : Left) 411 closeRange(R); 412 413 // Finalize the live ranges. 414 for (auto &P : LiveMap) 415 P.second.unionize(); 416 } 417 418 HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeLiveMap( 419 InstrIndexMap &IndexMap) { 420 RegToRangeMap LiveMap; 421 LLVM_DEBUG(dbgs() << __func__ << ": index map\n" << IndexMap << '\n'); 422 computeInitialLiveRanges(IndexMap, LiveMap); 423 LLVM_DEBUG(dbgs() << __func__ << ": live map\n" 424 << PrintRangeMap(LiveMap, TRI) << '\n'); 425 return LiveMap; 426 } 427 428 HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeDeadMap( 429 InstrIndexMap &IndexMap, RegToRangeMap &LiveMap) { 430 RegToRangeMap DeadMap; 431 432 auto addDeadRanges = [&IndexMap,&LiveMap,&DeadMap] (RegisterRef R) -> void { 433 auto F = LiveMap.find(R); 434 if (F == LiveMap.end() || F->second.empty()) { 435 DeadMap[R].add(IndexType::Entry, IndexType::Exit, false, false); 436 return; 437 } 438 439 RangeList &RL = F->second; 440 RangeList::iterator A = RL.begin(), Z = RL.end()-1; 441 442 // Try to create the initial range. 443 if (A->start() != IndexType::Entry) { 444 IndexType DE = IndexMap.getPrevIndex(A->start()); 445 if (DE != IndexType::Entry) 446 DeadMap[R].add(IndexType::Entry, DE, false, false); 447 } 448 449 while (A != Z) { 450 // Creating a dead range that follows A. Pay attention to empty 451 // ranges (i.e. those ending with "None"). 452 IndexType AE = (A->end() == IndexType::None) ? A->start() : A->end(); 453 IndexType DS = IndexMap.getNextIndex(AE); 454 ++A; 455 IndexType DE = IndexMap.getPrevIndex(A->start()); 456 if (DS < DE) 457 DeadMap[R].add(DS, DE, false, false); 458 } 459 460 // Try to create the final range. 461 if (Z->end() != IndexType::Exit) { 462 IndexType ZE = (Z->end() == IndexType::None) ? Z->start() : Z->end(); 463 IndexType DS = IndexMap.getNextIndex(ZE); 464 if (DS < IndexType::Exit) 465 DeadMap[R].add(DS, IndexType::Exit, false, false); 466 } 467 }; 468 469 MachineFunction &MF = *IndexMap.getBlock().getParent(); 470 auto &MRI = MF.getRegInfo(); 471 unsigned NumRegs = TRI.getNumRegs(); 472 BitVector Visited(NumRegs); 473 for (unsigned R = 1; R < NumRegs; ++R) { 474 for (auto S : expandToSubRegs({R,0}, MRI, TRI)) { 475 if (Reserved[S.Reg] || Visited[S.Reg]) 476 continue; 477 addDeadRanges(S); 478 Visited[S.Reg] = true; 479 } 480 } 481 for (auto &P : LiveMap) 482 if (P.first.Reg.isVirtual()) 483 addDeadRanges(P.first); 484 485 LLVM_DEBUG(dbgs() << __func__ << ": dead map\n" 486 << PrintRangeMap(DeadMap, TRI) << '\n'); 487 return DeadMap; 488 } 489 490 raw_ostream &llvm::operator<<(raw_ostream &OS, 491 HexagonBlockRanges::IndexType Idx) { 492 if (Idx == HexagonBlockRanges::IndexType::None) 493 return OS << '-'; 494 if (Idx == HexagonBlockRanges::IndexType::Entry) 495 return OS << 'n'; 496 if (Idx == HexagonBlockRanges::IndexType::Exit) 497 return OS << 'x'; 498 return OS << unsigned(Idx)-HexagonBlockRanges::IndexType::First+1; 499 } 500 501 // A mapping to translate between instructions and their indices. 502 raw_ostream &llvm::operator<<(raw_ostream &OS, 503 const HexagonBlockRanges::IndexRange &IR) { 504 OS << '[' << IR.start() << ':' << IR.end() << (IR.TiedEnd ? '}' : ']'); 505 if (IR.Fixed) 506 OS << '!'; 507 return OS; 508 } 509 510 raw_ostream &llvm::operator<<(raw_ostream &OS, 511 const HexagonBlockRanges::RangeList &RL) { 512 for (const auto &R : RL) 513 OS << R << " "; 514 return OS; 515 } 516 517 raw_ostream &llvm::operator<<(raw_ostream &OS, 518 const HexagonBlockRanges::InstrIndexMap &M) { 519 for (auto &In : M.Block) { 520 HexagonBlockRanges::IndexType Idx = M.getIndex(&In); 521 OS << Idx << (Idx == M.Last ? ". " : " ") << In; 522 } 523 return OS; 524 } 525 526 raw_ostream &llvm::operator<<(raw_ostream &OS, 527 const HexagonBlockRanges::PrintRangeMap &P) { 528 for (const auto &I : P.Map) { 529 const HexagonBlockRanges::RangeList &RL = I.second; 530 OS << printReg(I.first.Reg, &P.TRI, I.first.Sub) << " -> " << RL << "\n"; 531 } 532 return OS; 533 } 534