1 //===- InstrRefBasedImpl.cpp - Tracking Debug Value MIs -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file InstrRefBasedImpl.cpp 9 /// 10 /// This is a separate implementation of LiveDebugValues, see 11 /// LiveDebugValues.cpp and VarLocBasedImpl.cpp for more information. 12 /// 13 /// This pass propagates variable locations between basic blocks, resolving 14 /// control flow conflicts between them. The problem is SSA construction, where 15 /// each debug instruction assigns the *value* that a variable has, and every 16 /// instruction where the variable is in scope uses that variable. The resulting 17 /// map of instruction-to-value is then translated into a register (or spill) 18 /// location for each variable over each instruction. 19 /// 20 /// The primary difference from normal SSA construction is that we cannot 21 /// _create_ PHI values that contain variable values. CodeGen has already 22 /// completed, and we can't alter it just to make debug-info complete. Thus: 23 /// we can identify function positions where we would like a PHI value for a 24 /// variable, but must search the MachineFunction to see whether such a PHI is 25 /// available. If no such PHI exists, the variable location must be dropped. 26 /// 27 /// To achieve this, we perform two kinds of analysis. First, we identify 28 /// every value defined by every instruction (ignoring those that only move 29 /// another value), then re-compute an SSA-form representation of the 30 /// MachineFunction, using value propagation to eliminate any un-necessary 31 /// PHI values. This gives us a map of every value computed in the function, 32 /// and its location within the register file / stack. 33 /// 34 /// Secondly, for each variable we perform the same analysis, where each debug 35 /// instruction is considered a def, and every instruction where the variable 36 /// is in lexical scope as a use. Value propagation is used again to eliminate 37 /// any un-necessary PHIs. This gives us a map of each variable to the value 38 /// it should have in a block. 39 /// 40 /// Once both are complete, we have two maps for each block: 41 /// * Variables to the values they should have, 42 /// * Values to the register / spill slot they are located in. 43 /// After which we can marry-up variable values with a location, and emit 44 /// DBG_VALUE instructions specifying those locations. Variable locations may 45 /// be dropped in this process due to the desired variable value not being 46 /// resident in any machine location, or because there is no PHI value in any 47 /// location that accurately represents the desired value. The building of 48 /// location lists for each block is left to DbgEntityHistoryCalculator. 49 /// 50 /// This pass is kept efficient because the size of the first SSA problem 51 /// is proportional to the working-set size of the function, which the compiler 52 /// tries to keep small. (It's also proportional to the number of blocks). 53 /// Additionally, we repeatedly perform the second SSA problem analysis with 54 /// only the variables and blocks in a single lexical scope, exploiting their 55 /// locality. 56 /// 57 /// ### Terminology 58 /// 59 /// A machine location is a register or spill slot, a value is something that's 60 /// defined by an instruction or PHI node, while a variable value is the value 61 /// assigned to a variable. A variable location is a machine location, that must 62 /// contain the appropriate variable value. A value that is a PHI node is 63 /// occasionally called an mphi. 64 /// 65 /// The first SSA problem is the "machine value location" problem, 66 /// because we're determining which machine locations contain which values. 67 /// The "locations" are constant: what's unknown is what value they contain. 68 /// 69 /// The second SSA problem (the one for variables) is the "variable value 70 /// problem", because it's determining what values a variable has, rather than 71 /// what location those values are placed in. 72 /// 73 /// TODO: 74 /// Overlapping fragments 75 /// Entry values 76 /// Add back DEBUG statements for debugging this 77 /// Collect statistics 78 /// 79 //===----------------------------------------------------------------------===// 80 81 #include "llvm/ADT/DenseMap.h" 82 #include "llvm/ADT/PostOrderIterator.h" 83 #include "llvm/ADT/STLExtras.h" 84 #include "llvm/ADT/SmallPtrSet.h" 85 #include "llvm/ADT/SmallSet.h" 86 #include "llvm/ADT/SmallVector.h" 87 #include "llvm/ADT/Statistic.h" 88 #include "llvm/Analysis/IteratedDominanceFrontier.h" 89 #include "llvm/CodeGen/LexicalScopes.h" 90 #include "llvm/CodeGen/MachineBasicBlock.h" 91 #include "llvm/CodeGen/MachineDominators.h" 92 #include "llvm/CodeGen/MachineFrameInfo.h" 93 #include "llvm/CodeGen/MachineFunction.h" 94 #include "llvm/CodeGen/MachineFunctionPass.h" 95 #include "llvm/CodeGen/MachineInstr.h" 96 #include "llvm/CodeGen/MachineInstrBuilder.h" 97 #include "llvm/CodeGen/MachineInstrBundle.h" 98 #include "llvm/CodeGen/MachineMemOperand.h" 99 #include "llvm/CodeGen/MachineOperand.h" 100 #include "llvm/CodeGen/PseudoSourceValue.h" 101 #include "llvm/CodeGen/RegisterScavenging.h" 102 #include "llvm/CodeGen/TargetFrameLowering.h" 103 #include "llvm/CodeGen/TargetInstrInfo.h" 104 #include "llvm/CodeGen/TargetLowering.h" 105 #include "llvm/CodeGen/TargetPassConfig.h" 106 #include "llvm/CodeGen/TargetRegisterInfo.h" 107 #include "llvm/CodeGen/TargetSubtargetInfo.h" 108 #include "llvm/Config/llvm-config.h" 109 #include "llvm/IR/DIBuilder.h" 110 #include "llvm/IR/DebugInfoMetadata.h" 111 #include "llvm/IR/DebugLoc.h" 112 #include "llvm/IR/Function.h" 113 #include "llvm/IR/Module.h" 114 #include "llvm/InitializePasses.h" 115 #include "llvm/MC/MCRegisterInfo.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/Compiler.h" 119 #include "llvm/Support/Debug.h" 120 #include "llvm/Support/TypeSize.h" 121 #include "llvm/Support/raw_ostream.h" 122 #include "llvm/Target/TargetMachine.h" 123 #include "llvm/Transforms/Utils/SSAUpdaterImpl.h" 124 #include <algorithm> 125 #include <cassert> 126 #include <cstdint> 127 #include <functional> 128 #include <limits.h> 129 #include <limits> 130 #include <queue> 131 #include <tuple> 132 #include <utility> 133 #include <vector> 134 135 #include "InstrRefBasedImpl.h" 136 #include "LiveDebugValues.h" 137 138 using namespace llvm; 139 using namespace LiveDebugValues; 140 141 // SSAUpdaterImple sets DEBUG_TYPE, change it. 142 #undef DEBUG_TYPE 143 #define DEBUG_TYPE "livedebugvalues" 144 145 // Act more like the VarLoc implementation, by propagating some locations too 146 // far and ignoring some transfers. 147 static cl::opt<bool> EmulateOldLDV("emulate-old-livedebugvalues", cl::Hidden, 148 cl::desc("Act like old LiveDebugValues did"), 149 cl::init(false)); 150 151 // Limit for the maximum number of stack slots we should track, past which we 152 // will ignore any spills. InstrRefBasedLDV gathers detailed information on all 153 // stack slots which leads to high memory consumption, and in some scenarios 154 // (such as asan with very many locals) the working set of the function can be 155 // very large, causing many spills. In these scenarios, it is very unlikely that 156 // the developer has hundreds of variables live at the same time that they're 157 // carefully thinking about -- instead, they probably autogenerated the code. 158 // When this happens, gracefully stop tracking excess spill slots, rather than 159 // consuming all the developer's memory. 160 static cl::opt<unsigned> 161 StackWorkingSetLimit("livedebugvalues-max-stack-slots", cl::Hidden, 162 cl::desc("livedebugvalues-stack-ws-limit"), 163 cl::init(250)); 164 165 /// Tracker for converting machine value locations and variable values into 166 /// variable locations (the output of LiveDebugValues), recorded as DBG_VALUEs 167 /// specifying block live-in locations and transfers within blocks. 168 /// 169 /// Operating on a per-block basis, this class takes a (pre-loaded) MLocTracker 170 /// and must be initialized with the set of variable values that are live-in to 171 /// the block. The caller then repeatedly calls process(). TransferTracker picks 172 /// out variable locations for the live-in variable values (if there _is_ a 173 /// location) and creates the corresponding DBG_VALUEs. Then, as the block is 174 /// stepped through, transfers of values between machine locations are 175 /// identified and if profitable, a DBG_VALUE created. 176 /// 177 /// This is where debug use-before-defs would be resolved: a variable with an 178 /// unavailable value could materialize in the middle of a block, when the 179 /// value becomes available. Or, we could detect clobbers and re-specify the 180 /// variable in a backup location. (XXX these are unimplemented). 181 class TransferTracker { 182 public: 183 const TargetInstrInfo *TII; 184 const TargetLowering *TLI; 185 /// This machine location tracker is assumed to always contain the up-to-date 186 /// value mapping for all machine locations. TransferTracker only reads 187 /// information from it. (XXX make it const?) 188 MLocTracker *MTracker; 189 MachineFunction &MF; 190 bool ShouldEmitDebugEntryValues; 191 192 /// Record of all changes in variable locations at a block position. Awkwardly 193 /// we allow inserting either before or after the point: MBB != nullptr 194 /// indicates it's before, otherwise after. 195 struct Transfer { 196 MachineBasicBlock::instr_iterator Pos; /// Position to insert DBG_VALUes 197 MachineBasicBlock *MBB; /// non-null if we should insert after. 198 SmallVector<MachineInstr *, 4> Insts; /// Vector of DBG_VALUEs to insert. 199 }; 200 201 struct LocAndProperties { 202 LocIdx Loc; 203 DbgValueProperties Properties; 204 }; 205 206 /// Collection of transfers (DBG_VALUEs) to be inserted. 207 SmallVector<Transfer, 32> Transfers; 208 209 /// Local cache of what-value-is-in-what-LocIdx. Used to identify differences 210 /// between TransferTrackers view of variable locations and MLocTrackers. For 211 /// example, MLocTracker observes all clobbers, but TransferTracker lazily 212 /// does not. 213 SmallVector<ValueIDNum, 32> VarLocs; 214 215 /// Map from LocIdxes to which DebugVariables are based that location. 216 /// Mantained while stepping through the block. Not accurate if 217 /// VarLocs[Idx] != MTracker->LocIdxToIDNum[Idx]. 218 DenseMap<LocIdx, SmallSet<DebugVariable, 4>> ActiveMLocs; 219 220 /// Map from DebugVariable to it's current location and qualifying meta 221 /// information. To be used in conjunction with ActiveMLocs to construct 222 /// enough information for the DBG_VALUEs for a particular LocIdx. 223 DenseMap<DebugVariable, LocAndProperties> ActiveVLocs; 224 225 /// Temporary cache of DBG_VALUEs to be entered into the Transfers collection. 226 SmallVector<MachineInstr *, 4> PendingDbgValues; 227 228 /// Record of a use-before-def: created when a value that's live-in to the 229 /// current block isn't available in any machine location, but it will be 230 /// defined in this block. 231 struct UseBeforeDef { 232 /// Value of this variable, def'd in block. 233 ValueIDNum ID; 234 /// Identity of this variable. 235 DebugVariable Var; 236 /// Additional variable properties. 237 DbgValueProperties Properties; 238 }; 239 240 /// Map from instruction index (within the block) to the set of UseBeforeDefs 241 /// that become defined at that instruction. 242 DenseMap<unsigned, SmallVector<UseBeforeDef, 1>> UseBeforeDefs; 243 244 /// The set of variables that are in UseBeforeDefs and can become a location 245 /// once the relevant value is defined. An element being erased from this 246 /// collection prevents the use-before-def materializing. 247 DenseSet<DebugVariable> UseBeforeDefVariables; 248 249 const TargetRegisterInfo &TRI; 250 const BitVector &CalleeSavedRegs; 251 252 TransferTracker(const TargetInstrInfo *TII, MLocTracker *MTracker, 253 MachineFunction &MF, const TargetRegisterInfo &TRI, 254 const BitVector &CalleeSavedRegs, const TargetPassConfig &TPC) 255 : TII(TII), MTracker(MTracker), MF(MF), TRI(TRI), 256 CalleeSavedRegs(CalleeSavedRegs) { 257 TLI = MF.getSubtarget().getTargetLowering(); 258 auto &TM = TPC.getTM<TargetMachine>(); 259 ShouldEmitDebugEntryValues = TM.Options.ShouldEmitDebugEntryValues(); 260 } 261 262 /// Load object with live-in variable values. \p mlocs contains the live-in 263 /// values in each machine location, while \p vlocs the live-in variable 264 /// values. This method picks variable locations for the live-in variables, 265 /// creates DBG_VALUEs and puts them in #Transfers, then prepares the other 266 /// object fields to track variable locations as we step through the block. 267 /// FIXME: could just examine mloctracker instead of passing in \p mlocs? 268 void 269 loadInlocs(MachineBasicBlock &MBB, ValueIDNum *MLocs, 270 const SmallVectorImpl<std::pair<DebugVariable, DbgValue>> &VLocs, 271 unsigned NumLocs) { 272 ActiveMLocs.clear(); 273 ActiveVLocs.clear(); 274 VarLocs.clear(); 275 VarLocs.reserve(NumLocs); 276 UseBeforeDefs.clear(); 277 UseBeforeDefVariables.clear(); 278 279 auto isCalleeSaved = [&](LocIdx L) { 280 unsigned Reg = MTracker->LocIdxToLocID[L]; 281 if (Reg >= MTracker->NumRegs) 282 return false; 283 for (MCRegAliasIterator RAI(Reg, &TRI, true); RAI.isValid(); ++RAI) 284 if (CalleeSavedRegs.test(*RAI)) 285 return true; 286 return false; 287 }; 288 289 // Map of the preferred location for each value. 290 DenseMap<ValueIDNum, LocIdx> ValueToLoc; 291 292 // Initialized the preferred-location map with illegal locations, to be 293 // filled in later. 294 for (auto &VLoc : VLocs) 295 if (VLoc.second.Kind == DbgValue::Def) 296 ValueToLoc.insert({VLoc.second.ID, LocIdx::MakeIllegalLoc()}); 297 298 ActiveMLocs.reserve(VLocs.size()); 299 ActiveVLocs.reserve(VLocs.size()); 300 301 // Produce a map of value numbers to the current machine locs they live 302 // in. When emulating VarLocBasedImpl, there should only be one 303 // location; when not, we get to pick. 304 for (auto Location : MTracker->locations()) { 305 LocIdx Idx = Location.Idx; 306 ValueIDNum &VNum = MLocs[Idx.asU64()]; 307 VarLocs.push_back(VNum); 308 309 // Is there a variable that wants a location for this value? If not, skip. 310 auto VIt = ValueToLoc.find(VNum); 311 if (VIt == ValueToLoc.end()) 312 continue; 313 314 LocIdx CurLoc = VIt->second; 315 // In order of preference, pick: 316 // * Callee saved registers, 317 // * Other registers, 318 // * Spill slots. 319 if (CurLoc.isIllegal() || MTracker->isSpill(CurLoc) || 320 (!isCalleeSaved(CurLoc) && isCalleeSaved(Idx.asU64()))) { 321 // Insert, or overwrite if insertion failed. 322 VIt->second = Idx; 323 } 324 } 325 326 // Now map variables to their picked LocIdxes. 327 for (const auto &Var : VLocs) { 328 if (Var.second.Kind == DbgValue::Const) { 329 PendingDbgValues.push_back( 330 emitMOLoc(*Var.second.MO, Var.first, Var.second.Properties)); 331 continue; 332 } 333 334 // If the value has no location, we can't make a variable location. 335 const ValueIDNum &Num = Var.second.ID; 336 auto ValuesPreferredLoc = ValueToLoc.find(Num); 337 if (ValuesPreferredLoc->second.isIllegal()) { 338 // If it's a def that occurs in this block, register it as a 339 // use-before-def to be resolved as we step through the block. 340 if (Num.getBlock() == (unsigned)MBB.getNumber() && !Num.isPHI()) 341 addUseBeforeDef(Var.first, Var.second.Properties, Num); 342 else 343 recoverAsEntryValue(Var.first, Var.second.Properties, Num); 344 continue; 345 } 346 347 LocIdx M = ValuesPreferredLoc->second; 348 auto NewValue = LocAndProperties{M, Var.second.Properties}; 349 auto Result = ActiveVLocs.insert(std::make_pair(Var.first, NewValue)); 350 if (!Result.second) 351 Result.first->second = NewValue; 352 ActiveMLocs[M].insert(Var.first); 353 PendingDbgValues.push_back( 354 MTracker->emitLoc(M, Var.first, Var.second.Properties)); 355 } 356 flushDbgValues(MBB.begin(), &MBB); 357 } 358 359 /// Record that \p Var has value \p ID, a value that becomes available 360 /// later in the function. 361 void addUseBeforeDef(const DebugVariable &Var, 362 const DbgValueProperties &Properties, ValueIDNum ID) { 363 UseBeforeDef UBD = {ID, Var, Properties}; 364 UseBeforeDefs[ID.getInst()].push_back(UBD); 365 UseBeforeDefVariables.insert(Var); 366 } 367 368 /// After the instruction at index \p Inst and position \p pos has been 369 /// processed, check whether it defines a variable value in a use-before-def. 370 /// If so, and the variable value hasn't changed since the start of the 371 /// block, create a DBG_VALUE. 372 void checkInstForNewValues(unsigned Inst, MachineBasicBlock::iterator pos) { 373 auto MIt = UseBeforeDefs.find(Inst); 374 if (MIt == UseBeforeDefs.end()) 375 return; 376 377 for (auto &Use : MIt->second) { 378 LocIdx L = Use.ID.getLoc(); 379 380 // If something goes very wrong, we might end up labelling a COPY 381 // instruction or similar with an instruction number, where it doesn't 382 // actually define a new value, instead it moves a value. In case this 383 // happens, discard. 384 if (MTracker->readMLoc(L) != Use.ID) 385 continue; 386 387 // If a different debug instruction defined the variable value / location 388 // since the start of the block, don't materialize this use-before-def. 389 if (!UseBeforeDefVariables.count(Use.Var)) 390 continue; 391 392 PendingDbgValues.push_back(MTracker->emitLoc(L, Use.Var, Use.Properties)); 393 } 394 flushDbgValues(pos, nullptr); 395 } 396 397 /// Helper to move created DBG_VALUEs into Transfers collection. 398 void flushDbgValues(MachineBasicBlock::iterator Pos, MachineBasicBlock *MBB) { 399 if (PendingDbgValues.size() == 0) 400 return; 401 402 // Pick out the instruction start position. 403 MachineBasicBlock::instr_iterator BundleStart; 404 if (MBB && Pos == MBB->begin()) 405 BundleStart = MBB->instr_begin(); 406 else 407 BundleStart = getBundleStart(Pos->getIterator()); 408 409 Transfers.push_back({BundleStart, MBB, PendingDbgValues}); 410 PendingDbgValues.clear(); 411 } 412 413 bool isEntryValueVariable(const DebugVariable &Var, 414 const DIExpression *Expr) const { 415 if (!Var.getVariable()->isParameter()) 416 return false; 417 418 if (Var.getInlinedAt()) 419 return false; 420 421 if (Expr->getNumElements() > 0) 422 return false; 423 424 return true; 425 } 426 427 bool isEntryValueValue(const ValueIDNum &Val) const { 428 // Must be in entry block (block number zero), and be a PHI / live-in value. 429 if (Val.getBlock() || !Val.isPHI()) 430 return false; 431 432 // Entry values must enter in a register. 433 if (MTracker->isSpill(Val.getLoc())) 434 return false; 435 436 Register SP = TLI->getStackPointerRegisterToSaveRestore(); 437 Register FP = TRI.getFrameRegister(MF); 438 Register Reg = MTracker->LocIdxToLocID[Val.getLoc()]; 439 return Reg != SP && Reg != FP; 440 } 441 442 bool recoverAsEntryValue(const DebugVariable &Var, 443 const DbgValueProperties &Prop, 444 const ValueIDNum &Num) { 445 // Is this variable location a candidate to be an entry value. First, 446 // should we be trying this at all? 447 if (!ShouldEmitDebugEntryValues) 448 return false; 449 450 // Is the variable appropriate for entry values (i.e., is a parameter). 451 if (!isEntryValueVariable(Var, Prop.DIExpr)) 452 return false; 453 454 // Is the value assigned to this variable still the entry value? 455 if (!isEntryValueValue(Num)) 456 return false; 457 458 // Emit a variable location using an entry value expression. 459 DIExpression *NewExpr = 460 DIExpression::prepend(Prop.DIExpr, DIExpression::EntryValue); 461 Register Reg = MTracker->LocIdxToLocID[Num.getLoc()]; 462 MachineOperand MO = MachineOperand::CreateReg(Reg, false); 463 464 PendingDbgValues.push_back(emitMOLoc(MO, Var, {NewExpr, Prop.Indirect})); 465 return true; 466 } 467 468 /// Change a variable value after encountering a DBG_VALUE inside a block. 469 void redefVar(const MachineInstr &MI) { 470 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), 471 MI.getDebugLoc()->getInlinedAt()); 472 DbgValueProperties Properties(MI); 473 474 const MachineOperand &MO = MI.getOperand(0); 475 476 // Ignore non-register locations, we don't transfer those. 477 if (!MO.isReg() || MO.getReg() == 0) { 478 auto It = ActiveVLocs.find(Var); 479 if (It != ActiveVLocs.end()) { 480 ActiveMLocs[It->second.Loc].erase(Var); 481 ActiveVLocs.erase(It); 482 } 483 // Any use-before-defs no longer apply. 484 UseBeforeDefVariables.erase(Var); 485 return; 486 } 487 488 Register Reg = MO.getReg(); 489 LocIdx NewLoc = MTracker->getRegMLoc(Reg); 490 redefVar(MI, Properties, NewLoc); 491 } 492 493 /// Handle a change in variable location within a block. Terminate the 494 /// variables current location, and record the value it now refers to, so 495 /// that we can detect location transfers later on. 496 void redefVar(const MachineInstr &MI, const DbgValueProperties &Properties, 497 Optional<LocIdx> OptNewLoc) { 498 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), 499 MI.getDebugLoc()->getInlinedAt()); 500 // Any use-before-defs no longer apply. 501 UseBeforeDefVariables.erase(Var); 502 503 // Erase any previous location, 504 auto It = ActiveVLocs.find(Var); 505 if (It != ActiveVLocs.end()) 506 ActiveMLocs[It->second.Loc].erase(Var); 507 508 // If there _is_ no new location, all we had to do was erase. 509 if (!OptNewLoc) 510 return; 511 LocIdx NewLoc = *OptNewLoc; 512 513 // Check whether our local copy of values-by-location in #VarLocs is out of 514 // date. Wipe old tracking data for the location if it's been clobbered in 515 // the meantime. 516 if (MTracker->readMLoc(NewLoc) != VarLocs[NewLoc.asU64()]) { 517 for (auto &P : ActiveMLocs[NewLoc]) { 518 ActiveVLocs.erase(P); 519 } 520 ActiveMLocs[NewLoc.asU64()].clear(); 521 VarLocs[NewLoc.asU64()] = MTracker->readMLoc(NewLoc); 522 } 523 524 ActiveMLocs[NewLoc].insert(Var); 525 if (It == ActiveVLocs.end()) { 526 ActiveVLocs.insert( 527 std::make_pair(Var, LocAndProperties{NewLoc, Properties})); 528 } else { 529 It->second.Loc = NewLoc; 530 It->second.Properties = Properties; 531 } 532 } 533 534 /// Account for a location \p mloc being clobbered. Examine the variable 535 /// locations that will be terminated: and try to recover them by using 536 /// another location. Optionally, given \p MakeUndef, emit a DBG_VALUE to 537 /// explicitly terminate a location if it can't be recovered. 538 void clobberMloc(LocIdx MLoc, MachineBasicBlock::iterator Pos, 539 bool MakeUndef = true) { 540 auto ActiveMLocIt = ActiveMLocs.find(MLoc); 541 if (ActiveMLocIt == ActiveMLocs.end()) 542 return; 543 544 // What was the old variable value? 545 ValueIDNum OldValue = VarLocs[MLoc.asU64()]; 546 VarLocs[MLoc.asU64()] = ValueIDNum::EmptyValue; 547 548 // Examine the remaining variable locations: if we can find the same value 549 // again, we can recover the location. 550 Optional<LocIdx> NewLoc = None; 551 for (auto Loc : MTracker->locations()) 552 if (Loc.Value == OldValue) 553 NewLoc = Loc.Idx; 554 555 // If there is no location, and we weren't asked to make the variable 556 // explicitly undef, then stop here. 557 if (!NewLoc && !MakeUndef) { 558 // Try and recover a few more locations with entry values. 559 for (auto &Var : ActiveMLocIt->second) { 560 auto &Prop = ActiveVLocs.find(Var)->second.Properties; 561 recoverAsEntryValue(Var, Prop, OldValue); 562 } 563 flushDbgValues(Pos, nullptr); 564 return; 565 } 566 567 // Examine all the variables based on this location. 568 DenseSet<DebugVariable> NewMLocs; 569 for (auto &Var : ActiveMLocIt->second) { 570 auto ActiveVLocIt = ActiveVLocs.find(Var); 571 // Re-state the variable location: if there's no replacement then NewLoc 572 // is None and a $noreg DBG_VALUE will be created. Otherwise, a DBG_VALUE 573 // identifying the alternative location will be emitted. 574 const DbgValueProperties &Properties = ActiveVLocIt->second.Properties; 575 PendingDbgValues.push_back(MTracker->emitLoc(NewLoc, Var, Properties)); 576 577 // Update machine locations <=> variable locations maps. Defer updating 578 // ActiveMLocs to avoid invalidaing the ActiveMLocIt iterator. 579 if (!NewLoc) { 580 ActiveVLocs.erase(ActiveVLocIt); 581 } else { 582 ActiveVLocIt->second.Loc = *NewLoc; 583 NewMLocs.insert(Var); 584 } 585 } 586 587 // Commit any deferred ActiveMLoc changes. 588 if (!NewMLocs.empty()) 589 for (auto &Var : NewMLocs) 590 ActiveMLocs[*NewLoc].insert(Var); 591 592 // We lazily track what locations have which values; if we've found a new 593 // location for the clobbered value, remember it. 594 if (NewLoc) 595 VarLocs[NewLoc->asU64()] = OldValue; 596 597 flushDbgValues(Pos, nullptr); 598 599 // Re-find ActiveMLocIt, iterator could have been invalidated. 600 ActiveMLocIt = ActiveMLocs.find(MLoc); 601 ActiveMLocIt->second.clear(); 602 } 603 604 /// Transfer variables based on \p Src to be based on \p Dst. This handles 605 /// both register copies as well as spills and restores. Creates DBG_VALUEs 606 /// describing the movement. 607 void transferMlocs(LocIdx Src, LocIdx Dst, MachineBasicBlock::iterator Pos) { 608 // Does Src still contain the value num we expect? If not, it's been 609 // clobbered in the meantime, and our variable locations are stale. 610 if (VarLocs[Src.asU64()] != MTracker->readMLoc(Src)) 611 return; 612 613 // assert(ActiveMLocs[Dst].size() == 0); 614 //^^^ Legitimate scenario on account of un-clobbered slot being assigned to? 615 616 // Move set of active variables from one location to another. 617 auto MovingVars = ActiveMLocs[Src]; 618 ActiveMLocs[Dst] = MovingVars; 619 VarLocs[Dst.asU64()] = VarLocs[Src.asU64()]; 620 621 // For each variable based on Src; create a location at Dst. 622 for (auto &Var : MovingVars) { 623 auto ActiveVLocIt = ActiveVLocs.find(Var); 624 assert(ActiveVLocIt != ActiveVLocs.end()); 625 ActiveVLocIt->second.Loc = Dst; 626 627 MachineInstr *MI = 628 MTracker->emitLoc(Dst, Var, ActiveVLocIt->second.Properties); 629 PendingDbgValues.push_back(MI); 630 } 631 ActiveMLocs[Src].clear(); 632 flushDbgValues(Pos, nullptr); 633 634 // XXX XXX XXX "pretend to be old LDV" means dropping all tracking data 635 // about the old location. 636 if (EmulateOldLDV) 637 VarLocs[Src.asU64()] = ValueIDNum::EmptyValue; 638 } 639 640 MachineInstrBuilder emitMOLoc(const MachineOperand &MO, 641 const DebugVariable &Var, 642 const DbgValueProperties &Properties) { 643 DebugLoc DL = DILocation::get(Var.getVariable()->getContext(), 0, 0, 644 Var.getVariable()->getScope(), 645 const_cast<DILocation *>(Var.getInlinedAt())); 646 auto MIB = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE)); 647 MIB.add(MO); 648 if (Properties.Indirect) 649 MIB.addImm(0); 650 else 651 MIB.addReg(0); 652 MIB.addMetadata(Var.getVariable()); 653 MIB.addMetadata(Properties.DIExpr); 654 return MIB; 655 } 656 }; 657 658 //===----------------------------------------------------------------------===// 659 // Implementation 660 //===----------------------------------------------------------------------===// 661 662 ValueIDNum ValueIDNum::EmptyValue = {UINT_MAX, UINT_MAX, UINT_MAX}; 663 ValueIDNum ValueIDNum::TombstoneValue = {UINT_MAX, UINT_MAX, UINT_MAX - 1}; 664 665 #ifndef NDEBUG 666 void DbgValue::dump(const MLocTracker *MTrack) const { 667 if (Kind == Const) { 668 MO->dump(); 669 } else if (Kind == NoVal) { 670 dbgs() << "NoVal(" << BlockNo << ")"; 671 } else if (Kind == VPHI) { 672 dbgs() << "VPHI(" << BlockNo << "," << MTrack->IDAsString(ID) << ")"; 673 } else { 674 assert(Kind == Def); 675 dbgs() << MTrack->IDAsString(ID); 676 } 677 if (Properties.Indirect) 678 dbgs() << " indir"; 679 if (Properties.DIExpr) 680 dbgs() << " " << *Properties.DIExpr; 681 } 682 #endif 683 684 MLocTracker::MLocTracker(MachineFunction &MF, const TargetInstrInfo &TII, 685 const TargetRegisterInfo &TRI, 686 const TargetLowering &TLI) 687 : MF(MF), TII(TII), TRI(TRI), TLI(TLI), 688 LocIdxToIDNum(ValueIDNum::EmptyValue), LocIdxToLocID(0) { 689 NumRegs = TRI.getNumRegs(); 690 reset(); 691 LocIDToLocIdx.resize(NumRegs, LocIdx::MakeIllegalLoc()); 692 assert(NumRegs < (1u << NUM_LOC_BITS)); // Detect bit packing failure 693 694 // Always track SP. This avoids the implicit clobbering caused by regmasks 695 // from affectings its values. (LiveDebugValues disbelieves calls and 696 // regmasks that claim to clobber SP). 697 Register SP = TLI.getStackPointerRegisterToSaveRestore(); 698 if (SP) { 699 unsigned ID = getLocID(SP); 700 (void)lookupOrTrackRegister(ID); 701 702 for (MCRegAliasIterator RAI(SP, &TRI, true); RAI.isValid(); ++RAI) 703 SPAliases.insert(*RAI); 704 } 705 706 // Build some common stack positions -- full registers being spilt to the 707 // stack. 708 StackSlotIdxes.insert({{8, 0}, 0}); 709 StackSlotIdxes.insert({{16, 0}, 1}); 710 StackSlotIdxes.insert({{32, 0}, 2}); 711 StackSlotIdxes.insert({{64, 0}, 3}); 712 StackSlotIdxes.insert({{128, 0}, 4}); 713 StackSlotIdxes.insert({{256, 0}, 5}); 714 StackSlotIdxes.insert({{512, 0}, 6}); 715 716 // Traverse all the subregister idxes, and ensure there's an index for them. 717 // Duplicates are no problem: we're interested in their position in the 718 // stack slot, we don't want to type the slot. 719 for (unsigned int I = 1; I < TRI.getNumSubRegIndices(); ++I) { 720 unsigned Size = TRI.getSubRegIdxSize(I); 721 unsigned Offs = TRI.getSubRegIdxOffset(I); 722 unsigned Idx = StackSlotIdxes.size(); 723 724 // Some subregs have -1, -2 and so forth fed into their fields, to mean 725 // special backend things. Ignore those. 726 if (Size > 60000 || Offs > 60000) 727 continue; 728 729 StackSlotIdxes.insert({{Size, Offs}, Idx}); 730 } 731 732 for (auto &Idx : StackSlotIdxes) 733 StackIdxesToPos[Idx.second] = Idx.first; 734 735 NumSlotIdxes = StackSlotIdxes.size(); 736 } 737 738 LocIdx MLocTracker::trackRegister(unsigned ID) { 739 assert(ID != 0); 740 LocIdx NewIdx = LocIdx(LocIdxToIDNum.size()); 741 LocIdxToIDNum.grow(NewIdx); 742 LocIdxToLocID.grow(NewIdx); 743 744 // Default: it's an mphi. 745 ValueIDNum ValNum = {CurBB, 0, NewIdx}; 746 // Was this reg ever touched by a regmask? 747 for (const auto &MaskPair : reverse(Masks)) { 748 if (MaskPair.first->clobbersPhysReg(ID)) { 749 // There was an earlier def we skipped. 750 ValNum = {CurBB, MaskPair.second, NewIdx}; 751 break; 752 } 753 } 754 755 LocIdxToIDNum[NewIdx] = ValNum; 756 LocIdxToLocID[NewIdx] = ID; 757 return NewIdx; 758 } 759 760 void MLocTracker::writeRegMask(const MachineOperand *MO, unsigned CurBB, 761 unsigned InstID) { 762 // Def any register we track have that isn't preserved. The regmask 763 // terminates the liveness of a register, meaning its value can't be 764 // relied upon -- we represent this by giving it a new value. 765 for (auto Location : locations()) { 766 unsigned ID = LocIdxToLocID[Location.Idx]; 767 // Don't clobber SP, even if the mask says it's clobbered. 768 if (ID < NumRegs && !SPAliases.count(ID) && MO->clobbersPhysReg(ID)) 769 defReg(ID, CurBB, InstID); 770 } 771 Masks.push_back(std::make_pair(MO, InstID)); 772 } 773 774 Optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) { 775 SpillLocationNo SpillID(SpillLocs.idFor(L)); 776 777 if (SpillID.id() == 0) { 778 // If there is no location, and we have reached the limit of how many stack 779 // slots to track, then don't track this one. 780 if (SpillLocs.size() >= StackWorkingSetLimit) 781 return None; 782 783 // Spill location is untracked: create record for this one, and all 784 // subregister slots too. 785 SpillID = SpillLocationNo(SpillLocs.insert(L)); 786 for (unsigned StackIdx = 0; StackIdx < NumSlotIdxes; ++StackIdx) { 787 unsigned L = getSpillIDWithIdx(SpillID, StackIdx); 788 LocIdx Idx = LocIdx(LocIdxToIDNum.size()); // New idx 789 LocIdxToIDNum.grow(Idx); 790 LocIdxToLocID.grow(Idx); 791 LocIDToLocIdx.push_back(Idx); 792 LocIdxToLocID[Idx] = L; 793 // Initialize to PHI value; corresponds to the location's live-in value 794 // during transfer function construction. 795 LocIdxToIDNum[Idx] = ValueIDNum(CurBB, 0, Idx); 796 } 797 } 798 return SpillID; 799 } 800 801 std::string MLocTracker::LocIdxToName(LocIdx Idx) const { 802 unsigned ID = LocIdxToLocID[Idx]; 803 if (ID >= NumRegs) { 804 StackSlotPos Pos = locIDToSpillIdx(ID); 805 ID -= NumRegs; 806 unsigned Slot = ID / NumSlotIdxes; 807 return Twine("slot ") 808 .concat(Twine(Slot).concat(Twine(" sz ").concat(Twine(Pos.first) 809 .concat(Twine(" offs ").concat(Twine(Pos.second)))))) 810 .str(); 811 } else { 812 return TRI.getRegAsmName(ID).str(); 813 } 814 } 815 816 std::string MLocTracker::IDAsString(const ValueIDNum &Num) const { 817 std::string DefName = LocIdxToName(Num.getLoc()); 818 return Num.asString(DefName); 819 } 820 821 #ifndef NDEBUG 822 LLVM_DUMP_METHOD void MLocTracker::dump() { 823 for (auto Location : locations()) { 824 std::string MLocName = LocIdxToName(Location.Value.getLoc()); 825 std::string DefName = Location.Value.asString(MLocName); 826 dbgs() << LocIdxToName(Location.Idx) << " --> " << DefName << "\n"; 827 } 828 } 829 830 LLVM_DUMP_METHOD void MLocTracker::dump_mloc_map() { 831 for (auto Location : locations()) { 832 std::string foo = LocIdxToName(Location.Idx); 833 dbgs() << "Idx " << Location.Idx.asU64() << " " << foo << "\n"; 834 } 835 } 836 #endif 837 838 MachineInstrBuilder MLocTracker::emitLoc(Optional<LocIdx> MLoc, 839 const DebugVariable &Var, 840 const DbgValueProperties &Properties) { 841 DebugLoc DL = DILocation::get(Var.getVariable()->getContext(), 0, 0, 842 Var.getVariable()->getScope(), 843 const_cast<DILocation *>(Var.getInlinedAt())); 844 auto MIB = BuildMI(MF, DL, TII.get(TargetOpcode::DBG_VALUE)); 845 846 const DIExpression *Expr = Properties.DIExpr; 847 if (!MLoc) { 848 // No location -> DBG_VALUE $noreg 849 MIB.addReg(0); 850 MIB.addReg(0); 851 } else if (LocIdxToLocID[*MLoc] >= NumRegs) { 852 unsigned LocID = LocIdxToLocID[*MLoc]; 853 SpillLocationNo SpillID = locIDToSpill(LocID); 854 StackSlotPos StackIdx = locIDToSpillIdx(LocID); 855 unsigned short Offset = StackIdx.second; 856 857 // TODO: support variables that are located in spill slots, with non-zero 858 // offsets from the start of the spill slot. It would require some more 859 // complex DIExpression calculations. This doesn't seem to be produced by 860 // LLVM right now, so don't try and support it. 861 // Accept no-subregister slots and subregisters where the offset is zero. 862 // The consumer should already have type information to work out how large 863 // the variable is. 864 if (Offset == 0) { 865 const SpillLoc &Spill = SpillLocs[SpillID.id()]; 866 Expr = TRI.prependOffsetExpression(Expr, DIExpression::ApplyOffset, 867 Spill.SpillOffset); 868 unsigned Base = Spill.SpillBase; 869 MIB.addReg(Base); 870 MIB.addImm(0); 871 872 // Being on the stack makes this location indirect; if it was _already_ 873 // indirect though, we need to add extra indirection. See this test for 874 // a scenario where this happens: 875 // llvm/test/DebugInfo/X86/spill-nontrivial-param.ll 876 if (Properties.Indirect) { 877 std::vector<uint64_t> Elts = {dwarf::DW_OP_deref}; 878 Expr = DIExpression::append(Expr, Elts); 879 } 880 } else { 881 // This is a stack location with a weird subregister offset: emit an undef 882 // DBG_VALUE instead. 883 MIB.addReg(0); 884 MIB.addReg(0); 885 } 886 } else { 887 // Non-empty, non-stack slot, must be a plain register. 888 unsigned LocID = LocIdxToLocID[*MLoc]; 889 MIB.addReg(LocID); 890 if (Properties.Indirect) 891 MIB.addImm(0); 892 else 893 MIB.addReg(0); 894 } 895 896 MIB.addMetadata(Var.getVariable()); 897 MIB.addMetadata(Expr); 898 return MIB; 899 } 900 901 /// Default construct and initialize the pass. 902 InstrRefBasedLDV::InstrRefBasedLDV() {} 903 904 bool InstrRefBasedLDV::isCalleeSaved(LocIdx L) const { 905 unsigned Reg = MTracker->LocIdxToLocID[L]; 906 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) 907 if (CalleeSavedRegs.test(*RAI)) 908 return true; 909 return false; 910 } 911 912 //===----------------------------------------------------------------------===// 913 // Debug Range Extension Implementation 914 //===----------------------------------------------------------------------===// 915 916 #ifndef NDEBUG 917 // Something to restore in the future. 918 // void InstrRefBasedLDV::printVarLocInMBB(..) 919 #endif 920 921 Optional<SpillLocationNo> 922 InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { 923 assert(MI.hasOneMemOperand() && 924 "Spill instruction does not have exactly one memory operand?"); 925 auto MMOI = MI.memoperands_begin(); 926 const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); 927 assert(PVal->kind() == PseudoSourceValue::FixedStack && 928 "Inconsistent memory operand in spill instruction"); 929 int FI = cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex(); 930 const MachineBasicBlock *MBB = MI.getParent(); 931 Register Reg; 932 StackOffset Offset = TFI->getFrameIndexReference(*MBB->getParent(), FI, Reg); 933 return MTracker->getOrTrackSpillLoc({Reg, Offset}); 934 } 935 936 Optional<LocIdx> 937 InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { 938 Optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI); 939 if (!SpillLoc) 940 return None; 941 942 // Where in the stack slot is this value defined -- i.e., what size of value 943 // is this? An important question, because it could be loaded into a register 944 // from the stack at some point. Happily the memory operand will tell us 945 // the size written to the stack. 946 auto *MemOperand = *MI.memoperands_begin(); 947 unsigned SizeInBits = MemOperand->getSizeInBits(); 948 949 // Find that position in the stack indexes we're tracking. 950 auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits, 0}); 951 if (IdxIt == MTracker->StackSlotIdxes.end()) 952 // That index is not tracked. This is suprising, and unlikely to ever 953 // occur, but the safe action is to indicate the variable is optimised out. 954 return None; 955 956 unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillLoc, IdxIt->second); 957 return MTracker->getSpillMLoc(SpillID); 958 } 959 960 /// End all previous ranges related to @MI and start a new range from @MI 961 /// if it is a DBG_VALUE instr. 962 bool InstrRefBasedLDV::transferDebugValue(const MachineInstr &MI) { 963 if (!MI.isDebugValue()) 964 return false; 965 966 const DILocalVariable *Var = MI.getDebugVariable(); 967 const DIExpression *Expr = MI.getDebugExpression(); 968 const DILocation *DebugLoc = MI.getDebugLoc(); 969 const DILocation *InlinedAt = DebugLoc->getInlinedAt(); 970 assert(Var->isValidLocationForIntrinsic(DebugLoc) && 971 "Expected inlined-at fields to agree"); 972 973 DebugVariable V(Var, Expr, InlinedAt); 974 DbgValueProperties Properties(MI); 975 976 // If there are no instructions in this lexical scope, do no location tracking 977 // at all, this variable shouldn't get a legitimate location range. 978 auto *Scope = LS.findLexicalScope(MI.getDebugLoc().get()); 979 if (Scope == nullptr) 980 return true; // handled it; by doing nothing 981 982 // For now, ignore DBG_VALUE_LISTs when extending ranges. Allow it to 983 // contribute to locations in this block, but don't propagate further. 984 // Interpret it like a DBG_VALUE $noreg. 985 if (MI.isDebugValueList()) { 986 if (VTracker) 987 VTracker->defVar(MI, Properties, None); 988 if (TTracker) 989 TTracker->redefVar(MI, Properties, None); 990 return true; 991 } 992 993 const MachineOperand &MO = MI.getOperand(0); 994 995 // MLocTracker needs to know that this register is read, even if it's only 996 // read by a debug inst. 997 if (MO.isReg() && MO.getReg() != 0) 998 (void)MTracker->readReg(MO.getReg()); 999 1000 // If we're preparing for the second analysis (variables), the machine value 1001 // locations are already solved, and we report this DBG_VALUE and the value 1002 // it refers to to VLocTracker. 1003 if (VTracker) { 1004 if (MO.isReg()) { 1005 // Feed defVar the new variable location, or if this is a 1006 // DBG_VALUE $noreg, feed defVar None. 1007 if (MO.getReg()) 1008 VTracker->defVar(MI, Properties, MTracker->readReg(MO.getReg())); 1009 else 1010 VTracker->defVar(MI, Properties, None); 1011 } else if (MI.getOperand(0).isImm() || MI.getOperand(0).isFPImm() || 1012 MI.getOperand(0).isCImm()) { 1013 VTracker->defVar(MI, MI.getOperand(0)); 1014 } 1015 } 1016 1017 // If performing final tracking of transfers, report this variable definition 1018 // to the TransferTracker too. 1019 if (TTracker) 1020 TTracker->redefVar(MI); 1021 return true; 1022 } 1023 1024 bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI, 1025 ValueIDNum **MLiveOuts, 1026 ValueIDNum **MLiveIns) { 1027 if (!MI.isDebugRef()) 1028 return false; 1029 1030 // Only handle this instruction when we are building the variable value 1031 // transfer function. 1032 if (!VTracker && !TTracker) 1033 return false; 1034 1035 unsigned InstNo = MI.getOperand(0).getImm(); 1036 unsigned OpNo = MI.getOperand(1).getImm(); 1037 1038 const DILocalVariable *Var = MI.getDebugVariable(); 1039 const DIExpression *Expr = MI.getDebugExpression(); 1040 const DILocation *DebugLoc = MI.getDebugLoc(); 1041 const DILocation *InlinedAt = DebugLoc->getInlinedAt(); 1042 assert(Var->isValidLocationForIntrinsic(DebugLoc) && 1043 "Expected inlined-at fields to agree"); 1044 1045 DebugVariable V(Var, Expr, InlinedAt); 1046 1047 auto *Scope = LS.findLexicalScope(MI.getDebugLoc().get()); 1048 if (Scope == nullptr) 1049 return true; // Handled by doing nothing. This variable is never in scope. 1050 1051 const MachineFunction &MF = *MI.getParent()->getParent(); 1052 1053 // Various optimizations may have happened to the value during codegen, 1054 // recorded in the value substitution table. Apply any substitutions to 1055 // the instruction / operand number in this DBG_INSTR_REF, and collect 1056 // any subregister extractions performed during optimization. 1057 1058 // Create dummy substitution with Src set, for lookup. 1059 auto SoughtSub = 1060 MachineFunction::DebugSubstitution({InstNo, OpNo}, {0, 0}, 0); 1061 1062 SmallVector<unsigned, 4> SeenSubregs; 1063 auto LowerBoundIt = llvm::lower_bound(MF.DebugValueSubstitutions, SoughtSub); 1064 while (LowerBoundIt != MF.DebugValueSubstitutions.end() && 1065 LowerBoundIt->Src == SoughtSub.Src) { 1066 std::tie(InstNo, OpNo) = LowerBoundIt->Dest; 1067 SoughtSub.Src = LowerBoundIt->Dest; 1068 if (unsigned Subreg = LowerBoundIt->Subreg) 1069 SeenSubregs.push_back(Subreg); 1070 LowerBoundIt = llvm::lower_bound(MF.DebugValueSubstitutions, SoughtSub); 1071 } 1072 1073 // Default machine value number is <None> -- if no instruction defines 1074 // the corresponding value, it must have been optimized out. 1075 Optional<ValueIDNum> NewID = None; 1076 1077 // Try to lookup the instruction number, and find the machine value number 1078 // that it defines. It could be an instruction, or a PHI. 1079 auto InstrIt = DebugInstrNumToInstr.find(InstNo); 1080 auto PHIIt = std::lower_bound(DebugPHINumToValue.begin(), 1081 DebugPHINumToValue.end(), InstNo); 1082 if (InstrIt != DebugInstrNumToInstr.end()) { 1083 const MachineInstr &TargetInstr = *InstrIt->second.first; 1084 uint64_t BlockNo = TargetInstr.getParent()->getNumber(); 1085 1086 // Pick out the designated operand. It might be a memory reference, if 1087 // a register def was folded into a stack store. 1088 if (OpNo == MachineFunction::DebugOperandMemNumber && 1089 TargetInstr.hasOneMemOperand()) { 1090 Optional<LocIdx> L = findLocationForMemOperand(TargetInstr); 1091 if (L) 1092 NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L); 1093 } else if (OpNo != MachineFunction::DebugOperandMemNumber) { 1094 assert(OpNo < TargetInstr.getNumOperands()); 1095 const MachineOperand &MO = TargetInstr.getOperand(OpNo); 1096 1097 // Today, this can only be a register. 1098 assert(MO.isReg() && MO.isDef()); 1099 1100 unsigned LocID = MTracker->getLocID(MO.getReg()); 1101 LocIdx L = MTracker->LocIDToLocIdx[LocID]; 1102 NewID = ValueIDNum(BlockNo, InstrIt->second.second, L); 1103 } 1104 // else: NewID is left as None. 1105 } else if (PHIIt != DebugPHINumToValue.end() && PHIIt->InstrNum == InstNo) { 1106 // It's actually a PHI value. Which value it is might not be obvious, use 1107 // the resolver helper to find out. 1108 NewID = resolveDbgPHIs(*MI.getParent()->getParent(), MLiveOuts, MLiveIns, 1109 MI, InstNo); 1110 } 1111 1112 // Apply any subregister extractions, in reverse. We might have seen code 1113 // like this: 1114 // CALL64 @foo, implicit-def $rax 1115 // %0:gr64 = COPY $rax 1116 // %1:gr32 = COPY %0.sub_32bit 1117 // %2:gr16 = COPY %1.sub_16bit 1118 // %3:gr8 = COPY %2.sub_8bit 1119 // In which case each copy would have been recorded as a substitution with 1120 // a subregister qualifier. Apply those qualifiers now. 1121 if (NewID && !SeenSubregs.empty()) { 1122 unsigned Offset = 0; 1123 unsigned Size = 0; 1124 1125 // Look at each subregister that we passed through, and progressively 1126 // narrow in, accumulating any offsets that occur. Substitutions should 1127 // only ever be the same or narrower width than what they read from; 1128 // iterate in reverse order so that we go from wide to small. 1129 for (unsigned Subreg : reverse(SeenSubregs)) { 1130 unsigned ThisSize = TRI->getSubRegIdxSize(Subreg); 1131 unsigned ThisOffset = TRI->getSubRegIdxOffset(Subreg); 1132 Offset += ThisOffset; 1133 Size = (Size == 0) ? ThisSize : std::min(Size, ThisSize); 1134 } 1135 1136 // If that worked, look for an appropriate subregister with the register 1137 // where the define happens. Don't look at values that were defined during 1138 // a stack write: we can't currently express register locations within 1139 // spills. 1140 LocIdx L = NewID->getLoc(); 1141 if (NewID && !MTracker->isSpill(L)) { 1142 // Find the register class for the register where this def happened. 1143 // FIXME: no index for this? 1144 Register Reg = MTracker->LocIdxToLocID[L]; 1145 const TargetRegisterClass *TRC = nullptr; 1146 for (auto *TRCI : TRI->regclasses()) 1147 if (TRCI->contains(Reg)) 1148 TRC = TRCI; 1149 assert(TRC && "Couldn't find target register class?"); 1150 1151 // If the register we have isn't the right size or in the right place, 1152 // Try to find a subregister inside it. 1153 unsigned MainRegSize = TRI->getRegSizeInBits(*TRC); 1154 if (Size != MainRegSize || Offset) { 1155 // Enumerate all subregisters, searching. 1156 Register NewReg = 0; 1157 for (MCSubRegIterator SRI(Reg, TRI, false); SRI.isValid(); ++SRI) { 1158 unsigned Subreg = TRI->getSubRegIndex(Reg, *SRI); 1159 unsigned SubregSize = TRI->getSubRegIdxSize(Subreg); 1160 unsigned SubregOffset = TRI->getSubRegIdxOffset(Subreg); 1161 if (SubregSize == Size && SubregOffset == Offset) { 1162 NewReg = *SRI; 1163 break; 1164 } 1165 } 1166 1167 // If we didn't find anything: there's no way to express our value. 1168 if (!NewReg) { 1169 NewID = None; 1170 } else { 1171 // Re-state the value as being defined within the subregister 1172 // that we found. 1173 LocIdx NewLoc = MTracker->lookupOrTrackRegister(NewReg); 1174 NewID = ValueIDNum(NewID->getBlock(), NewID->getInst(), NewLoc); 1175 } 1176 } 1177 } else { 1178 // If we can't handle subregisters, unset the new value. 1179 NewID = None; 1180 } 1181 } 1182 1183 // We, we have a value number or None. Tell the variable value tracker about 1184 // it. The rest of this LiveDebugValues implementation acts exactly the same 1185 // for DBG_INSTR_REFs as DBG_VALUEs (just, the former can refer to values that 1186 // aren't immediately available). 1187 DbgValueProperties Properties(Expr, false); 1188 if (VTracker) 1189 VTracker->defVar(MI, Properties, NewID); 1190 1191 // If we're on the final pass through the function, decompose this INSTR_REF 1192 // into a plain DBG_VALUE. 1193 if (!TTracker) 1194 return true; 1195 1196 // Pick a location for the machine value number, if such a location exists. 1197 // (This information could be stored in TransferTracker to make it faster). 1198 Optional<LocIdx> FoundLoc = None; 1199 for (auto Location : MTracker->locations()) { 1200 LocIdx CurL = Location.Idx; 1201 ValueIDNum ID = MTracker->readMLoc(CurL); 1202 if (NewID && ID == NewID) { 1203 // If this is the first location with that value, pick it. Otherwise, 1204 // consider whether it's a "longer term" location. 1205 if (!FoundLoc) { 1206 FoundLoc = CurL; 1207 continue; 1208 } 1209 1210 if (MTracker->isSpill(CurL)) 1211 FoundLoc = CurL; // Spills are a longer term location. 1212 else if (!MTracker->isSpill(*FoundLoc) && 1213 !MTracker->isSpill(CurL) && 1214 !isCalleeSaved(*FoundLoc) && 1215 isCalleeSaved(CurL)) 1216 FoundLoc = CurL; // Callee saved regs are longer term than normal. 1217 } 1218 } 1219 1220 // Tell transfer tracker that the variable value has changed. 1221 TTracker->redefVar(MI, Properties, FoundLoc); 1222 1223 // If there was a value with no location; but the value is defined in a 1224 // later instruction in this block, this is a block-local use-before-def. 1225 if (!FoundLoc && NewID && NewID->getBlock() == CurBB && 1226 NewID->getInst() > CurInst) 1227 TTracker->addUseBeforeDef(V, {MI.getDebugExpression(), false}, *NewID); 1228 1229 // Produce a DBG_VALUE representing what this DBG_INSTR_REF meant. 1230 // This DBG_VALUE is potentially a $noreg / undefined location, if 1231 // FoundLoc is None. 1232 // (XXX -- could morph the DBG_INSTR_REF in the future). 1233 MachineInstr *DbgMI = MTracker->emitLoc(FoundLoc, V, Properties); 1234 TTracker->PendingDbgValues.push_back(DbgMI); 1235 TTracker->flushDbgValues(MI.getIterator(), nullptr); 1236 return true; 1237 } 1238 1239 bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) { 1240 if (!MI.isDebugPHI()) 1241 return false; 1242 1243 // Analyse these only when solving the machine value location problem. 1244 if (VTracker || TTracker) 1245 return true; 1246 1247 // First operand is the value location, either a stack slot or register. 1248 // Second is the debug instruction number of the original PHI. 1249 const MachineOperand &MO = MI.getOperand(0); 1250 unsigned InstrNum = MI.getOperand(1).getImm(); 1251 1252 if (MO.isReg()) { 1253 // The value is whatever's currently in the register. Read and record it, 1254 // to be analysed later. 1255 Register Reg = MO.getReg(); 1256 ValueIDNum Num = MTracker->readReg(Reg); 1257 auto PHIRec = DebugPHIRecord( 1258 {InstrNum, MI.getParent(), Num, MTracker->lookupOrTrackRegister(Reg)}); 1259 DebugPHINumToValue.push_back(PHIRec); 1260 1261 // Ensure this register is tracked. 1262 for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) 1263 MTracker->lookupOrTrackRegister(*RAI); 1264 } else { 1265 // The value is whatever's in this stack slot. 1266 assert(MO.isFI()); 1267 unsigned FI = MO.getIndex(); 1268 1269 // If the stack slot is dead, then this was optimized away. 1270 // FIXME: stack slot colouring should account for slots that get merged. 1271 if (MFI->isDeadObjectIndex(FI)) 1272 return true; 1273 1274 // Identify this spill slot, ensure it's tracked. 1275 Register Base; 1276 StackOffset Offs = TFI->getFrameIndexReference(*MI.getMF(), FI, Base); 1277 SpillLoc SL = {Base, Offs}; 1278 Optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(SL); 1279 1280 // We might be able to find a value, but have chosen not to, to avoid 1281 // tracking too much stack information. 1282 if (!SpillNo) 1283 return true; 1284 1285 // Problem: what value should we extract from the stack? LLVM does not 1286 // record what size the last store to the slot was, and it would become 1287 // sketchy after stack slot colouring anyway. Take a look at what values 1288 // are stored on the stack, and pick the largest one that wasn't def'd 1289 // by a spill (i.e., the value most likely to have been def'd in a register 1290 // and then spilt. 1291 std::array<unsigned, 4> CandidateSizes = {64, 32, 16, 8}; 1292 Optional<ValueIDNum> Result = None; 1293 Optional<LocIdx> SpillLoc = None; 1294 for (unsigned CS : CandidateSizes) { 1295 unsigned SpillID = MTracker->getLocID(*SpillNo, {CS, 0}); 1296 SpillLoc = MTracker->getSpillMLoc(SpillID); 1297 ValueIDNum Val = MTracker->readMLoc(*SpillLoc); 1298 // If this value was defined in it's own position, then it was probably 1299 // an aliasing index of a small value that was spilt. 1300 if (Val.getLoc() != SpillLoc->asU64()) { 1301 Result = Val; 1302 break; 1303 } 1304 } 1305 1306 // If we didn't find anything, we're probably looking at a PHI, or a memory 1307 // store folded into an instruction. FIXME: Take a guess that's it's 64 1308 // bits. This isn't ideal, but tracking the size that the spill is 1309 // "supposed" to be is more complex, and benefits a small number of 1310 // locations. 1311 if (!Result) { 1312 unsigned SpillID = MTracker->getLocID(*SpillNo, {64, 0}); 1313 SpillLoc = MTracker->getSpillMLoc(SpillID); 1314 Result = MTracker->readMLoc(*SpillLoc); 1315 } 1316 1317 // Record this DBG_PHI for later analysis. 1318 auto DbgPHI = DebugPHIRecord({InstrNum, MI.getParent(), *Result, *SpillLoc}); 1319 DebugPHINumToValue.push_back(DbgPHI); 1320 } 1321 1322 return true; 1323 } 1324 1325 void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { 1326 // Meta Instructions do not affect the debug liveness of any register they 1327 // define. 1328 if (MI.isImplicitDef()) { 1329 // Except when there's an implicit def, and the location it's defining has 1330 // no value number. The whole point of an implicit def is to announce that 1331 // the register is live, without be specific about it's value. So define 1332 // a value if there isn't one already. 1333 ValueIDNum Num = MTracker->readReg(MI.getOperand(0).getReg()); 1334 // Has a legitimate value -> ignore the implicit def. 1335 if (Num.getLoc() != 0) 1336 return; 1337 // Otherwise, def it here. 1338 } else if (MI.isMetaInstruction()) 1339 return; 1340 1341 // We always ignore SP defines on call instructions, they don't actually 1342 // change the value of the stack pointer... except for win32's _chkstk. This 1343 // is rare: filter quickly for the common case (no stack adjustments, not a 1344 // call, etc). If it is a call that modifies SP, recognise the SP register 1345 // defs. 1346 bool CallChangesSP = false; 1347 if (AdjustsStackInCalls && MI.isCall() && MI.getOperand(0).isSymbol() && 1348 !strcmp(MI.getOperand(0).getSymbolName(), StackProbeSymbolName.data())) 1349 CallChangesSP = true; 1350 1351 // Test whether we should ignore a def of this register due to it being part 1352 // of the stack pointer. 1353 auto IgnoreSPAlias = [this, &MI, CallChangesSP](Register R) -> bool { 1354 if (CallChangesSP) 1355 return false; 1356 return MI.isCall() && MTracker->SPAliases.count(R); 1357 }; 1358 1359 // Find the regs killed by MI, and find regmasks of preserved regs. 1360 // Max out the number of statically allocated elements in `DeadRegs`, as this 1361 // prevents fallback to std::set::count() operations. 1362 SmallSet<uint32_t, 32> DeadRegs; 1363 SmallVector<const uint32_t *, 4> RegMasks; 1364 SmallVector<const MachineOperand *, 4> RegMaskPtrs; 1365 for (const MachineOperand &MO : MI.operands()) { 1366 // Determine whether the operand is a register def. 1367 if (MO.isReg() && MO.isDef() && MO.getReg() && 1368 Register::isPhysicalRegister(MO.getReg()) && 1369 !IgnoreSPAlias(MO.getReg())) { 1370 // Remove ranges of all aliased registers. 1371 for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) 1372 // FIXME: Can we break out of this loop early if no insertion occurs? 1373 DeadRegs.insert(*RAI); 1374 } else if (MO.isRegMask()) { 1375 RegMasks.push_back(MO.getRegMask()); 1376 RegMaskPtrs.push_back(&MO); 1377 } 1378 } 1379 1380 // Tell MLocTracker about all definitions, of regmasks and otherwise. 1381 for (uint32_t DeadReg : DeadRegs) 1382 MTracker->defReg(DeadReg, CurBB, CurInst); 1383 1384 for (auto *MO : RegMaskPtrs) 1385 MTracker->writeRegMask(MO, CurBB, CurInst); 1386 1387 // If this instruction writes to a spill slot, def that slot. 1388 if (hasFoldedStackStore(MI)) { 1389 if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) { 1390 for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { 1391 unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I); 1392 LocIdx L = MTracker->getSpillMLoc(SpillID); 1393 MTracker->setMLoc(L, ValueIDNum(CurBB, CurInst, L)); 1394 } 1395 } 1396 } 1397 1398 if (!TTracker) 1399 return; 1400 1401 // When committing variable values to locations: tell transfer tracker that 1402 // we've clobbered things. It may be able to recover the variable from a 1403 // different location. 1404 1405 // Inform TTracker about any direct clobbers. 1406 for (uint32_t DeadReg : DeadRegs) { 1407 LocIdx Loc = MTracker->lookupOrTrackRegister(DeadReg); 1408 TTracker->clobberMloc(Loc, MI.getIterator(), false); 1409 } 1410 1411 // Look for any clobbers performed by a register mask. Only test locations 1412 // that are actually being tracked. 1413 if (!RegMaskPtrs.empty()) { 1414 for (auto L : MTracker->locations()) { 1415 // Stack locations can't be clobbered by regmasks. 1416 if (MTracker->isSpill(L.Idx)) 1417 continue; 1418 1419 Register Reg = MTracker->LocIdxToLocID[L.Idx]; 1420 if (IgnoreSPAlias(Reg)) 1421 continue; 1422 1423 for (auto *MO : RegMaskPtrs) 1424 if (MO->clobbersPhysReg(Reg)) 1425 TTracker->clobberMloc(L.Idx, MI.getIterator(), false); 1426 } 1427 } 1428 1429 // Tell TTracker about any folded stack store. 1430 if (hasFoldedStackStore(MI)) { 1431 if (Optional<SpillLocationNo> SpillNo = extractSpillBaseRegAndOffset(MI)) { 1432 for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) { 1433 unsigned SpillID = MTracker->getSpillIDWithIdx(*SpillNo, I); 1434 LocIdx L = MTracker->getSpillMLoc(SpillID); 1435 TTracker->clobberMloc(L, MI.getIterator(), true); 1436 } 1437 } 1438 } 1439 } 1440 1441 void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) { 1442 // In all circumstances, re-def all aliases. It's definitely a new value now. 1443 for (MCRegAliasIterator RAI(DstRegNum, TRI, true); RAI.isValid(); ++RAI) 1444 MTracker->defReg(*RAI, CurBB, CurInst); 1445 1446 ValueIDNum SrcValue = MTracker->readReg(SrcRegNum); 1447 MTracker->setReg(DstRegNum, SrcValue); 1448 1449 // Copy subregisters from one location to another. 1450 for (MCSubRegIndexIterator SRI(SrcRegNum, TRI); SRI.isValid(); ++SRI) { 1451 unsigned SrcSubReg = SRI.getSubReg(); 1452 unsigned SubRegIdx = SRI.getSubRegIndex(); 1453 unsigned DstSubReg = TRI->getSubReg(DstRegNum, SubRegIdx); 1454 if (!DstSubReg) 1455 continue; 1456 1457 // Do copy. There are two matching subregisters, the source value should 1458 // have been def'd when the super-reg was, the latter might not be tracked 1459 // yet. 1460 // This will force SrcSubReg to be tracked, if it isn't yet. Will read 1461 // mphi values if it wasn't tracked. 1462 LocIdx SrcL = MTracker->lookupOrTrackRegister(SrcSubReg); 1463 LocIdx DstL = MTracker->lookupOrTrackRegister(DstSubReg); 1464 (void)SrcL; 1465 (void)DstL; 1466 ValueIDNum CpyValue = MTracker->readReg(SrcSubReg); 1467 1468 MTracker->setReg(DstSubReg, CpyValue); 1469 } 1470 } 1471 1472 Optional<SpillLocationNo> 1473 InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, 1474 MachineFunction *MF) { 1475 // TODO: Handle multiple stores folded into one. 1476 if (!MI.hasOneMemOperand()) 1477 return None; 1478 1479 // Reject any memory operand that's aliased -- we can't guarantee its value. 1480 auto MMOI = MI.memoperands_begin(); 1481 const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); 1482 if (PVal->isAliased(MFI)) 1483 return None; 1484 1485 if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) 1486 return None; // This is not a spill instruction, since no valid size was 1487 // returned from either function. 1488 1489 return extractSpillBaseRegAndOffset(MI); 1490 } 1491 1492 bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI, 1493 MachineFunction *MF, unsigned &Reg) { 1494 if (!isSpillInstruction(MI, MF)) 1495 return false; 1496 1497 int FI; 1498 Reg = TII->isStoreToStackSlotPostFE(MI, FI); 1499 return Reg != 0; 1500 } 1501 1502 Optional<SpillLocationNo> 1503 InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, 1504 MachineFunction *MF, unsigned &Reg) { 1505 if (!MI.hasOneMemOperand()) 1506 return None; 1507 1508 // FIXME: Handle folded restore instructions with more than one memory 1509 // operand. 1510 if (MI.getRestoreSize(TII)) { 1511 Reg = MI.getOperand(0).getReg(); 1512 return extractSpillBaseRegAndOffset(MI); 1513 } 1514 return None; 1515 } 1516 1517 bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { 1518 // XXX -- it's too difficult to implement VarLocBasedImpl's stack location 1519 // limitations under the new model. Therefore, when comparing them, compare 1520 // versions that don't attempt spills or restores at all. 1521 if (EmulateOldLDV) 1522 return false; 1523 1524 // Strictly limit ourselves to plain loads and stores, not all instructions 1525 // that can access the stack. 1526 int DummyFI = -1; 1527 if (!TII->isStoreToStackSlotPostFE(MI, DummyFI) && 1528 !TII->isLoadFromStackSlotPostFE(MI, DummyFI)) 1529 return false; 1530 1531 MachineFunction *MF = MI.getMF(); 1532 unsigned Reg; 1533 1534 LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump();); 1535 1536 // Strictly limit ourselves to plain loads and stores, not all instructions 1537 // that can access the stack. 1538 int FIDummy; 1539 if (!TII->isStoreToStackSlotPostFE(MI, FIDummy) && 1540 !TII->isLoadFromStackSlotPostFE(MI, FIDummy)) 1541 return false; 1542 1543 // First, if there are any DBG_VALUEs pointing at a spill slot that is 1544 // written to, terminate that variable location. The value in memory 1545 // will have changed. DbgEntityHistoryCalculator doesn't try to detect this. 1546 if (Optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) { 1547 // Un-set this location and clobber, so that earlier locations don't 1548 // continue past this store. 1549 for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) { 1550 unsigned SpillID = MTracker->getSpillIDWithIdx(*Loc, SlotIdx); 1551 Optional<LocIdx> MLoc = MTracker->getSpillMLoc(SpillID); 1552 if (!MLoc) 1553 continue; 1554 1555 // We need to over-write the stack slot with something (here, a def at 1556 // this instruction) to ensure no values are preserved in this stack slot 1557 // after the spill. It also prevents TTracker from trying to recover the 1558 // location and re-installing it in the same place. 1559 ValueIDNum Def(CurBB, CurInst, *MLoc); 1560 MTracker->setMLoc(*MLoc, Def); 1561 if (TTracker) 1562 TTracker->clobberMloc(*MLoc, MI.getIterator()); 1563 } 1564 } 1565 1566 // Try to recognise spill and restore instructions that may transfer a value. 1567 if (isLocationSpill(MI, MF, Reg)) { 1568 // isLocationSpill returning true should guarantee we can extract a 1569 // location. 1570 SpillLocationNo Loc = *extractSpillBaseRegAndOffset(MI); 1571 1572 auto DoTransfer = [&](Register SrcReg, unsigned SpillID) { 1573 auto ReadValue = MTracker->readReg(SrcReg); 1574 LocIdx DstLoc = MTracker->getSpillMLoc(SpillID); 1575 MTracker->setMLoc(DstLoc, ReadValue); 1576 1577 if (TTracker) { 1578 LocIdx SrcLoc = MTracker->getRegMLoc(SrcReg); 1579 TTracker->transferMlocs(SrcLoc, DstLoc, MI.getIterator()); 1580 } 1581 }; 1582 1583 // Then, transfer subreg bits. 1584 for (MCSubRegIterator SRI(Reg, TRI, false); SRI.isValid(); ++SRI) { 1585 // Ensure this reg is tracked, 1586 (void)MTracker->lookupOrTrackRegister(*SRI); 1587 unsigned SubregIdx = TRI->getSubRegIndex(Reg, *SRI); 1588 unsigned SpillID = MTracker->getLocID(Loc, SubregIdx); 1589 DoTransfer(*SRI, SpillID); 1590 } 1591 1592 // Directly lookup size of main source reg, and transfer. 1593 unsigned Size = TRI->getRegSizeInBits(Reg, *MRI); 1594 unsigned SpillID = MTracker->getLocID(Loc, {Size, 0}); 1595 DoTransfer(Reg, SpillID); 1596 } else { 1597 Optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg); 1598 if (!Loc) 1599 return false; 1600 1601 // Assumption: we're reading from the base of the stack slot, not some 1602 // offset into it. It seems very unlikely LLVM would ever generate 1603 // restores where this wasn't true. This then becomes a question of what 1604 // subregisters in the destination register line up with positions in the 1605 // stack slot. 1606 1607 // Def all registers that alias the destination. 1608 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) 1609 MTracker->defReg(*RAI, CurBB, CurInst); 1610 1611 // Now find subregisters within the destination register, and load values 1612 // from stack slot positions. 1613 auto DoTransfer = [&](Register DestReg, unsigned SpillID) { 1614 LocIdx SrcIdx = MTracker->getSpillMLoc(SpillID); 1615 auto ReadValue = MTracker->readMLoc(SrcIdx); 1616 MTracker->setReg(DestReg, ReadValue); 1617 1618 if (TTracker) { 1619 LocIdx DstLoc = MTracker->getRegMLoc(DestReg); 1620 TTracker->transferMlocs(SrcIdx, DstLoc, MI.getIterator()); 1621 } 1622 }; 1623 1624 for (MCSubRegIterator SRI(Reg, TRI, false); SRI.isValid(); ++SRI) { 1625 unsigned Subreg = TRI->getSubRegIndex(Reg, *SRI); 1626 unsigned SpillID = MTracker->getLocID(*Loc, Subreg); 1627 DoTransfer(*SRI, SpillID); 1628 } 1629 1630 // Directly look up this registers slot idx by size, and transfer. 1631 unsigned Size = TRI->getRegSizeInBits(Reg, *MRI); 1632 unsigned SpillID = MTracker->getLocID(*Loc, {Size, 0}); 1633 DoTransfer(Reg, SpillID); 1634 } 1635 return true; 1636 } 1637 1638 bool InstrRefBasedLDV::transferRegisterCopy(MachineInstr &MI) { 1639 auto DestSrc = TII->isCopyInstr(MI); 1640 if (!DestSrc) 1641 return false; 1642 1643 const MachineOperand *DestRegOp = DestSrc->Destination; 1644 const MachineOperand *SrcRegOp = DestSrc->Source; 1645 1646 auto isCalleeSavedReg = [&](unsigned Reg) { 1647 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) 1648 if (CalleeSavedRegs.test(*RAI)) 1649 return true; 1650 return false; 1651 }; 1652 1653 Register SrcReg = SrcRegOp->getReg(); 1654 Register DestReg = DestRegOp->getReg(); 1655 1656 // Ignore identity copies. Yep, these make it as far as LiveDebugValues. 1657 if (SrcReg == DestReg) 1658 return true; 1659 1660 // For emulating VarLocBasedImpl: 1661 // We want to recognize instructions where destination register is callee 1662 // saved register. If register that could be clobbered by the call is 1663 // included, there would be a great chance that it is going to be clobbered 1664 // soon. It is more likely that previous register, which is callee saved, is 1665 // going to stay unclobbered longer, even if it is killed. 1666 // 1667 // For InstrRefBasedImpl, we can track multiple locations per value, so 1668 // ignore this condition. 1669 if (EmulateOldLDV && !isCalleeSavedReg(DestReg)) 1670 return false; 1671 1672 // InstrRefBasedImpl only followed killing copies. 1673 if (EmulateOldLDV && !SrcRegOp->isKill()) 1674 return false; 1675 1676 // Copy MTracker info, including subregs if available. 1677 InstrRefBasedLDV::performCopy(SrcReg, DestReg); 1678 1679 // Only produce a transfer of DBG_VALUE within a block where old LDV 1680 // would have. We might make use of the additional value tracking in some 1681 // other way, later. 1682 if (TTracker && isCalleeSavedReg(DestReg) && SrcRegOp->isKill()) 1683 TTracker->transferMlocs(MTracker->getRegMLoc(SrcReg), 1684 MTracker->getRegMLoc(DestReg), MI.getIterator()); 1685 1686 // VarLocBasedImpl would quit tracking the old location after copying. 1687 if (EmulateOldLDV && SrcReg != DestReg) 1688 MTracker->defReg(SrcReg, CurBB, CurInst); 1689 1690 // Finally, the copy might have clobbered variables based on the destination 1691 // register. Tell TTracker about it, in case a backup location exists. 1692 if (TTracker) { 1693 for (MCRegAliasIterator RAI(DestReg, TRI, true); RAI.isValid(); ++RAI) { 1694 LocIdx ClobberedLoc = MTracker->getRegMLoc(*RAI); 1695 TTracker->clobberMloc(ClobberedLoc, MI.getIterator(), false); 1696 } 1697 } 1698 1699 return true; 1700 } 1701 1702 /// Accumulate a mapping between each DILocalVariable fragment and other 1703 /// fragments of that DILocalVariable which overlap. This reduces work during 1704 /// the data-flow stage from "Find any overlapping fragments" to "Check if the 1705 /// known-to-overlap fragments are present". 1706 /// \param MI A previously unprocessed debug instruction to analyze for 1707 /// fragment usage. 1708 void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) { 1709 assert(MI.isDebugValue() || MI.isDebugRef()); 1710 DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(), 1711 MI.getDebugLoc()->getInlinedAt()); 1712 FragmentInfo ThisFragment = MIVar.getFragmentOrDefault(); 1713 1714 // If this is the first sighting of this variable, then we are guaranteed 1715 // there are currently no overlapping fragments either. Initialize the set 1716 // of seen fragments, record no overlaps for the current one, and return. 1717 auto SeenIt = SeenFragments.find(MIVar.getVariable()); 1718 if (SeenIt == SeenFragments.end()) { 1719 SmallSet<FragmentInfo, 4> OneFragment; 1720 OneFragment.insert(ThisFragment); 1721 SeenFragments.insert({MIVar.getVariable(), OneFragment}); 1722 1723 OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); 1724 return; 1725 } 1726 1727 // If this particular Variable/Fragment pair already exists in the overlap 1728 // map, it has already been accounted for. 1729 auto IsInOLapMap = 1730 OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); 1731 if (!IsInOLapMap.second) 1732 return; 1733 1734 auto &ThisFragmentsOverlaps = IsInOLapMap.first->second; 1735 auto &AllSeenFragments = SeenIt->second; 1736 1737 // Otherwise, examine all other seen fragments for this variable, with "this" 1738 // fragment being a previously unseen fragment. Record any pair of 1739 // overlapping fragments. 1740 for (auto &ASeenFragment : AllSeenFragments) { 1741 // Does this previously seen fragment overlap? 1742 if (DIExpression::fragmentsOverlap(ThisFragment, ASeenFragment)) { 1743 // Yes: Mark the current fragment as being overlapped. 1744 ThisFragmentsOverlaps.push_back(ASeenFragment); 1745 // Mark the previously seen fragment as being overlapped by the current 1746 // one. 1747 auto ASeenFragmentsOverlaps = 1748 OverlapFragments.find({MIVar.getVariable(), ASeenFragment}); 1749 assert(ASeenFragmentsOverlaps != OverlapFragments.end() && 1750 "Previously seen var fragment has no vector of overlaps"); 1751 ASeenFragmentsOverlaps->second.push_back(ThisFragment); 1752 } 1753 } 1754 1755 AllSeenFragments.insert(ThisFragment); 1756 } 1757 1758 void InstrRefBasedLDV::process(MachineInstr &MI, ValueIDNum **MLiveOuts, 1759 ValueIDNum **MLiveIns) { 1760 // Try to interpret an MI as a debug or transfer instruction. Only if it's 1761 // none of these should we interpret it's register defs as new value 1762 // definitions. 1763 if (transferDebugValue(MI)) 1764 return; 1765 if (transferDebugInstrRef(MI, MLiveOuts, MLiveIns)) 1766 return; 1767 if (transferDebugPHI(MI)) 1768 return; 1769 if (transferRegisterCopy(MI)) 1770 return; 1771 if (transferSpillOrRestoreInst(MI)) 1772 return; 1773 transferRegisterDef(MI); 1774 } 1775 1776 void InstrRefBasedLDV::produceMLocTransferFunction( 1777 MachineFunction &MF, SmallVectorImpl<MLocTransferMap> &MLocTransfer, 1778 unsigned MaxNumBlocks) { 1779 // Because we try to optimize around register mask operands by ignoring regs 1780 // that aren't currently tracked, we set up something ugly for later: RegMask 1781 // operands that are seen earlier than the first use of a register, still need 1782 // to clobber that register in the transfer function. But this information 1783 // isn't actively recorded. Instead, we track each RegMask used in each block, 1784 // and accumulated the clobbered but untracked registers in each block into 1785 // the following bitvector. Later, if new values are tracked, we can add 1786 // appropriate clobbers. 1787 SmallVector<BitVector, 32> BlockMasks; 1788 BlockMasks.resize(MaxNumBlocks); 1789 1790 // Reserve one bit per register for the masks described above. 1791 unsigned BVWords = MachineOperand::getRegMaskSize(TRI->getNumRegs()); 1792 for (auto &BV : BlockMasks) 1793 BV.resize(TRI->getNumRegs(), true); 1794 1795 // Step through all instructions and inhale the transfer function. 1796 for (auto &MBB : MF) { 1797 // Object fields that are read by trackers to know where we are in the 1798 // function. 1799 CurBB = MBB.getNumber(); 1800 CurInst = 1; 1801 1802 // Set all machine locations to a PHI value. For transfer function 1803 // production only, this signifies the live-in value to the block. 1804 MTracker->reset(); 1805 MTracker->setMPhis(CurBB); 1806 1807 // Step through each instruction in this block. 1808 for (auto &MI : MBB) { 1809 process(MI); 1810 // Also accumulate fragment map. 1811 if (MI.isDebugValue() || MI.isDebugRef()) 1812 accumulateFragmentMap(MI); 1813 1814 // Create a map from the instruction number (if present) to the 1815 // MachineInstr and its position. 1816 if (uint64_t InstrNo = MI.peekDebugInstrNum()) { 1817 auto InstrAndPos = std::make_pair(&MI, CurInst); 1818 auto InsertResult = 1819 DebugInstrNumToInstr.insert(std::make_pair(InstrNo, InstrAndPos)); 1820 1821 // There should never be duplicate instruction numbers. 1822 assert(InsertResult.second); 1823 (void)InsertResult; 1824 } 1825 1826 ++CurInst; 1827 } 1828 1829 // Produce the transfer function, a map of machine location to new value. If 1830 // any machine location has the live-in phi value from the start of the 1831 // block, it's live-through and doesn't need recording in the transfer 1832 // function. 1833 for (auto Location : MTracker->locations()) { 1834 LocIdx Idx = Location.Idx; 1835 ValueIDNum &P = Location.Value; 1836 if (P.isPHI() && P.getLoc() == Idx.asU64()) 1837 continue; 1838 1839 // Insert-or-update. 1840 auto &TransferMap = MLocTransfer[CurBB]; 1841 auto Result = TransferMap.insert(std::make_pair(Idx.asU64(), P)); 1842 if (!Result.second) 1843 Result.first->second = P; 1844 } 1845 1846 // Accumulate any bitmask operands into the clobberred reg mask for this 1847 // block. 1848 for (auto &P : MTracker->Masks) { 1849 BlockMasks[CurBB].clearBitsNotInMask(P.first->getRegMask(), BVWords); 1850 } 1851 } 1852 1853 // Compute a bitvector of all the registers that are tracked in this block. 1854 BitVector UsedRegs(TRI->getNumRegs()); 1855 for (auto Location : MTracker->locations()) { 1856 unsigned ID = MTracker->LocIdxToLocID[Location.Idx]; 1857 // Ignore stack slots, and aliases of the stack pointer. 1858 if (ID >= TRI->getNumRegs() || MTracker->SPAliases.count(ID)) 1859 continue; 1860 UsedRegs.set(ID); 1861 } 1862 1863 // Check that any regmask-clobber of a register that gets tracked, is not 1864 // live-through in the transfer function. It needs to be clobbered at the 1865 // very least. 1866 for (unsigned int I = 0; I < MaxNumBlocks; ++I) { 1867 BitVector &BV = BlockMasks[I]; 1868 BV.flip(); 1869 BV &= UsedRegs; 1870 // This produces all the bits that we clobber, but also use. Check that 1871 // they're all clobbered or at least set in the designated transfer 1872 // elem. 1873 for (unsigned Bit : BV.set_bits()) { 1874 unsigned ID = MTracker->getLocID(Bit); 1875 LocIdx Idx = MTracker->LocIDToLocIdx[ID]; 1876 auto &TransferMap = MLocTransfer[I]; 1877 1878 // Install a value representing the fact that this location is effectively 1879 // written to in this block. As there's no reserved value, instead use 1880 // a value number that is never generated. Pick the value number for the 1881 // first instruction in the block, def'ing this location, which we know 1882 // this block never used anyway. 1883 ValueIDNum NotGeneratedNum = ValueIDNum(I, 1, Idx); 1884 auto Result = 1885 TransferMap.insert(std::make_pair(Idx.asU64(), NotGeneratedNum)); 1886 if (!Result.second) { 1887 ValueIDNum &ValueID = Result.first->second; 1888 if (ValueID.getBlock() == I && ValueID.isPHI()) 1889 // It was left as live-through. Set it to clobbered. 1890 ValueID = NotGeneratedNum; 1891 } 1892 } 1893 } 1894 } 1895 1896 bool InstrRefBasedLDV::mlocJoin( 1897 MachineBasicBlock &MBB, SmallPtrSet<const MachineBasicBlock *, 16> &Visited, 1898 ValueIDNum **OutLocs, ValueIDNum *InLocs) { 1899 LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n"); 1900 bool Changed = false; 1901 1902 // Handle value-propagation when control flow merges on entry to a block. For 1903 // any location without a PHI already placed, the location has the same value 1904 // as its predecessors. If a PHI is placed, test to see whether it's now a 1905 // redundant PHI that we can eliminate. 1906 1907 SmallVector<const MachineBasicBlock *, 8> BlockOrders; 1908 for (auto Pred : MBB.predecessors()) 1909 BlockOrders.push_back(Pred); 1910 1911 // Visit predecessors in RPOT order. 1912 auto Cmp = [&](const MachineBasicBlock *A, const MachineBasicBlock *B) { 1913 return BBToOrder.find(A)->second < BBToOrder.find(B)->second; 1914 }; 1915 llvm::sort(BlockOrders, Cmp); 1916 1917 // Skip entry block. 1918 if (BlockOrders.size() == 0) 1919 return false; 1920 1921 // Step through all machine locations, look at each predecessor and test 1922 // whether we can eliminate redundant PHIs. 1923 for (auto Location : MTracker->locations()) { 1924 LocIdx Idx = Location.Idx; 1925 1926 // Pick out the first predecessors live-out value for this location. It's 1927 // guaranteed to not be a backedge, as we order by RPO. 1928 ValueIDNum FirstVal = OutLocs[BlockOrders[0]->getNumber()][Idx.asU64()]; 1929 1930 // If we've already eliminated a PHI here, do no further checking, just 1931 // propagate the first live-in value into this block. 1932 if (InLocs[Idx.asU64()] != ValueIDNum(MBB.getNumber(), 0, Idx)) { 1933 if (InLocs[Idx.asU64()] != FirstVal) { 1934 InLocs[Idx.asU64()] = FirstVal; 1935 Changed |= true; 1936 } 1937 continue; 1938 } 1939 1940 // We're now examining a PHI to see whether it's un-necessary. Loop around 1941 // the other live-in values and test whether they're all the same. 1942 bool Disagree = false; 1943 for (unsigned int I = 1; I < BlockOrders.size(); ++I) { 1944 const MachineBasicBlock *PredMBB = BlockOrders[I]; 1945 const ValueIDNum &PredLiveOut = 1946 OutLocs[PredMBB->getNumber()][Idx.asU64()]; 1947 1948 // Incoming values agree, continue trying to eliminate this PHI. 1949 if (FirstVal == PredLiveOut) 1950 continue; 1951 1952 // We can also accept a PHI value that feeds back into itself. 1953 if (PredLiveOut == ValueIDNum(MBB.getNumber(), 0, Idx)) 1954 continue; 1955 1956 // Live-out of a predecessor disagrees with the first predecessor. 1957 Disagree = true; 1958 } 1959 1960 // No disagreement? No PHI. Otherwise, leave the PHI in live-ins. 1961 if (!Disagree) { 1962 InLocs[Idx.asU64()] = FirstVal; 1963 Changed |= true; 1964 } 1965 } 1966 1967 // TODO: Reimplement NumInserted and NumRemoved. 1968 return Changed; 1969 } 1970 1971 void InstrRefBasedLDV::findStackIndexInterference( 1972 SmallVectorImpl<unsigned> &Slots) { 1973 // We could spend a bit of time finding the exact, minimal, set of stack 1974 // indexes that interfere with each other, much like reg units. Or, we can 1975 // rely on the fact that: 1976 // * The smallest / lowest index will interfere with everything at zero 1977 // offset, which will be the largest set of registers, 1978 // * Most indexes with non-zero offset will end up being interference units 1979 // anyway. 1980 // So just pick those out and return them. 1981 1982 // We can rely on a single-byte stack index existing already, because we 1983 // initialize them in MLocTracker. 1984 auto It = MTracker->StackSlotIdxes.find({8, 0}); 1985 assert(It != MTracker->StackSlotIdxes.end()); 1986 Slots.push_back(It->second); 1987 1988 // Find anything that has a non-zero offset and add that too. 1989 for (auto &Pair : MTracker->StackSlotIdxes) { 1990 // Is offset zero? If so, ignore. 1991 if (!Pair.first.second) 1992 continue; 1993 Slots.push_back(Pair.second); 1994 } 1995 } 1996 1997 void InstrRefBasedLDV::placeMLocPHIs( 1998 MachineFunction &MF, SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks, 1999 ValueIDNum **MInLocs, SmallVectorImpl<MLocTransferMap> &MLocTransfer) { 2000 SmallVector<unsigned, 4> StackUnits; 2001 findStackIndexInterference(StackUnits); 2002 2003 // To avoid repeatedly running the PHI placement algorithm, leverage the 2004 // fact that a def of register MUST also def its register units. Find the 2005 // units for registers, place PHIs for them, and then replicate them for 2006 // aliasing registers. Some inputs that are never def'd (DBG_PHIs of 2007 // arguments) don't lead to register units being tracked, just place PHIs for 2008 // those registers directly. Stack slots have their own form of "unit", 2009 // store them to one side. 2010 SmallSet<Register, 32> RegUnitsToPHIUp; 2011 SmallSet<LocIdx, 32> NormalLocsToPHI; 2012 SmallSet<SpillLocationNo, 32> StackSlots; 2013 for (auto Location : MTracker->locations()) { 2014 LocIdx L = Location.Idx; 2015 if (MTracker->isSpill(L)) { 2016 StackSlots.insert(MTracker->locIDToSpill(MTracker->LocIdxToLocID[L])); 2017 continue; 2018 } 2019 2020 Register R = MTracker->LocIdxToLocID[L]; 2021 SmallSet<Register, 8> FoundRegUnits; 2022 bool AnyIllegal = false; 2023 for (MCRegUnitIterator RUI(R.asMCReg(), TRI); RUI.isValid(); ++RUI) { 2024 for (MCRegUnitRootIterator URoot(*RUI, TRI); URoot.isValid(); ++URoot){ 2025 if (!MTracker->isRegisterTracked(*URoot)) { 2026 // Not all roots were loaded into the tracking map: this register 2027 // isn't actually def'd anywhere, we only read from it. Generate PHIs 2028 // for this reg, but don't iterate units. 2029 AnyIllegal = true; 2030 } else { 2031 FoundRegUnits.insert(*URoot); 2032 } 2033 } 2034 } 2035 2036 if (AnyIllegal) { 2037 NormalLocsToPHI.insert(L); 2038 continue; 2039 } 2040 2041 RegUnitsToPHIUp.insert(FoundRegUnits.begin(), FoundRegUnits.end()); 2042 } 2043 2044 // Lambda to fetch PHIs for a given location, and write into the PHIBlocks 2045 // collection. 2046 SmallVector<MachineBasicBlock *, 32> PHIBlocks; 2047 auto CollectPHIsForLoc = [&](LocIdx L) { 2048 // Collect the set of defs. 2049 SmallPtrSet<MachineBasicBlock *, 32> DefBlocks; 2050 for (unsigned int I = 0; I < OrderToBB.size(); ++I) { 2051 MachineBasicBlock *MBB = OrderToBB[I]; 2052 const auto &TransferFunc = MLocTransfer[MBB->getNumber()]; 2053 if (TransferFunc.find(L) != TransferFunc.end()) 2054 DefBlocks.insert(MBB); 2055 } 2056 2057 // The entry block defs the location too: it's the live-in / argument value. 2058 // Only insert if there are other defs though; everything is trivially live 2059 // through otherwise. 2060 if (!DefBlocks.empty()) 2061 DefBlocks.insert(&*MF.begin()); 2062 2063 // Ask the SSA construction algorithm where we should put PHIs. Clear 2064 // anything that might have been hanging around from earlier. 2065 PHIBlocks.clear(); 2066 BlockPHIPlacement(AllBlocks, DefBlocks, PHIBlocks); 2067 }; 2068 2069 auto InstallPHIsAtLoc = [&PHIBlocks, &MInLocs](LocIdx L) { 2070 for (const MachineBasicBlock *MBB : PHIBlocks) 2071 MInLocs[MBB->getNumber()][L.asU64()] = ValueIDNum(MBB->getNumber(), 0, L); 2072 }; 2073 2074 // For locations with no reg units, just place PHIs. 2075 for (LocIdx L : NormalLocsToPHI) { 2076 CollectPHIsForLoc(L); 2077 // Install those PHI values into the live-in value array. 2078 InstallPHIsAtLoc(L); 2079 } 2080 2081 // For stack slots, calculate PHIs for the equivalent of the units, then 2082 // install for each index. 2083 for (SpillLocationNo Slot : StackSlots) { 2084 for (unsigned Idx : StackUnits) { 2085 unsigned SpillID = MTracker->getSpillIDWithIdx(Slot, Idx); 2086 LocIdx L = MTracker->getSpillMLoc(SpillID); 2087 CollectPHIsForLoc(L); 2088 InstallPHIsAtLoc(L); 2089 2090 // Find anything that aliases this stack index, install PHIs for it too. 2091 unsigned Size, Offset; 2092 std::tie(Size, Offset) = MTracker->StackIdxesToPos[Idx]; 2093 for (auto &Pair : MTracker->StackSlotIdxes) { 2094 unsigned ThisSize, ThisOffset; 2095 std::tie(ThisSize, ThisOffset) = Pair.first; 2096 if (ThisSize + ThisOffset <= Offset || Size + Offset <= ThisOffset) 2097 continue; 2098 2099 unsigned ThisID = MTracker->getSpillIDWithIdx(Slot, Pair.second); 2100 LocIdx ThisL = MTracker->getSpillMLoc(ThisID); 2101 InstallPHIsAtLoc(ThisL); 2102 } 2103 } 2104 } 2105 2106 // For reg units, place PHIs, and then place them for any aliasing registers. 2107 for (Register R : RegUnitsToPHIUp) { 2108 LocIdx L = MTracker->lookupOrTrackRegister(R); 2109 CollectPHIsForLoc(L); 2110 2111 // Install those PHI values into the live-in value array. 2112 InstallPHIsAtLoc(L); 2113 2114 // Now find aliases and install PHIs for those. 2115 for (MCRegAliasIterator RAI(R, TRI, true); RAI.isValid(); ++RAI) { 2116 // Super-registers that are "above" the largest register read/written by 2117 // the function will alias, but will not be tracked. 2118 if (!MTracker->isRegisterTracked(*RAI)) 2119 continue; 2120 2121 LocIdx AliasLoc = MTracker->lookupOrTrackRegister(*RAI); 2122 InstallPHIsAtLoc(AliasLoc); 2123 } 2124 } 2125 } 2126 2127 void InstrRefBasedLDV::buildMLocValueMap( 2128 MachineFunction &MF, ValueIDNum **MInLocs, ValueIDNum **MOutLocs, 2129 SmallVectorImpl<MLocTransferMap> &MLocTransfer) { 2130 std::priority_queue<unsigned int, std::vector<unsigned int>, 2131 std::greater<unsigned int>> 2132 Worklist, Pending; 2133 2134 // We track what is on the current and pending worklist to avoid inserting 2135 // the same thing twice. We could avoid this with a custom priority queue, 2136 // but this is probably not worth it. 2137 SmallPtrSet<MachineBasicBlock *, 16> OnPending, OnWorklist; 2138 2139 // Initialize worklist with every block to be visited. Also produce list of 2140 // all blocks. 2141 SmallPtrSet<MachineBasicBlock *, 32> AllBlocks; 2142 for (unsigned int I = 0; I < BBToOrder.size(); ++I) { 2143 Worklist.push(I); 2144 OnWorklist.insert(OrderToBB[I]); 2145 AllBlocks.insert(OrderToBB[I]); 2146 } 2147 2148 // Initialize entry block to PHIs. These represent arguments. 2149 for (auto Location : MTracker->locations()) 2150 MInLocs[0][Location.Idx.asU64()] = ValueIDNum(0, 0, Location.Idx); 2151 2152 MTracker->reset(); 2153 2154 // Start by placing PHIs, using the usual SSA constructor algorithm. Consider 2155 // any machine-location that isn't live-through a block to be def'd in that 2156 // block. 2157 placeMLocPHIs(MF, AllBlocks, MInLocs, MLocTransfer); 2158 2159 // Propagate values to eliminate redundant PHIs. At the same time, this 2160 // produces the table of Block x Location => Value for the entry to each 2161 // block. 2162 // The kind of PHIs we can eliminate are, for example, where one path in a 2163 // conditional spills and restores a register, and the register still has 2164 // the same value once control flow joins, unbeknowns to the PHI placement 2165 // code. Propagating values allows us to identify such un-necessary PHIs and 2166 // remove them. 2167 SmallPtrSet<const MachineBasicBlock *, 16> Visited; 2168 while (!Worklist.empty() || !Pending.empty()) { 2169 // Vector for storing the evaluated block transfer function. 2170 SmallVector<std::pair<LocIdx, ValueIDNum>, 32> ToRemap; 2171 2172 while (!Worklist.empty()) { 2173 MachineBasicBlock *MBB = OrderToBB[Worklist.top()]; 2174 CurBB = MBB->getNumber(); 2175 Worklist.pop(); 2176 2177 // Join the values in all predecessor blocks. 2178 bool InLocsChanged; 2179 InLocsChanged = mlocJoin(*MBB, Visited, MOutLocs, MInLocs[CurBB]); 2180 InLocsChanged |= Visited.insert(MBB).second; 2181 2182 // Don't examine transfer function if we've visited this loc at least 2183 // once, and inlocs haven't changed. 2184 if (!InLocsChanged) 2185 continue; 2186 2187 // Load the current set of live-ins into MLocTracker. 2188 MTracker->loadFromArray(MInLocs[CurBB], CurBB); 2189 2190 // Each element of the transfer function can be a new def, or a read of 2191 // a live-in value. Evaluate each element, and store to "ToRemap". 2192 ToRemap.clear(); 2193 for (auto &P : MLocTransfer[CurBB]) { 2194 if (P.second.getBlock() == CurBB && P.second.isPHI()) { 2195 // This is a movement of whatever was live in. Read it. 2196 ValueIDNum NewID = MTracker->readMLoc(P.second.getLoc()); 2197 ToRemap.push_back(std::make_pair(P.first, NewID)); 2198 } else { 2199 // It's a def. Just set it. 2200 assert(P.second.getBlock() == CurBB); 2201 ToRemap.push_back(std::make_pair(P.first, P.second)); 2202 } 2203 } 2204 2205 // Commit the transfer function changes into mloc tracker, which 2206 // transforms the contents of the MLocTracker into the live-outs. 2207 for (auto &P : ToRemap) 2208 MTracker->setMLoc(P.first, P.second); 2209 2210 // Now copy out-locs from mloc tracker into out-loc vector, checking 2211 // whether changes have occurred. These changes can have come from both 2212 // the transfer function, and mlocJoin. 2213 bool OLChanged = false; 2214 for (auto Location : MTracker->locations()) { 2215 OLChanged |= MOutLocs[CurBB][Location.Idx.asU64()] != Location.Value; 2216 MOutLocs[CurBB][Location.Idx.asU64()] = Location.Value; 2217 } 2218 2219 MTracker->reset(); 2220 2221 // No need to examine successors again if out-locs didn't change. 2222 if (!OLChanged) 2223 continue; 2224 2225 // All successors should be visited: put any back-edges on the pending 2226 // list for the next pass-through, and any other successors to be 2227 // visited this pass, if they're not going to be already. 2228 for (auto s : MBB->successors()) { 2229 // Does branching to this successor represent a back-edge? 2230 if (BBToOrder[s] > BBToOrder[MBB]) { 2231 // No: visit it during this dataflow iteration. 2232 if (OnWorklist.insert(s).second) 2233 Worklist.push(BBToOrder[s]); 2234 } else { 2235 // Yes: visit it on the next iteration. 2236 if (OnPending.insert(s).second) 2237 Pending.push(BBToOrder[s]); 2238 } 2239 } 2240 } 2241 2242 Worklist.swap(Pending); 2243 std::swap(OnPending, OnWorklist); 2244 OnPending.clear(); 2245 // At this point, pending must be empty, since it was just the empty 2246 // worklist 2247 assert(Pending.empty() && "Pending should be empty"); 2248 } 2249 2250 // Once all the live-ins don't change on mlocJoin(), we've eliminated all 2251 // redundant PHIs. 2252 } 2253 2254 void InstrRefBasedLDV::BlockPHIPlacement( 2255 const SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks, 2256 const SmallPtrSetImpl<MachineBasicBlock *> &DefBlocks, 2257 SmallVectorImpl<MachineBasicBlock *> &PHIBlocks) { 2258 // Apply IDF calculator to the designated set of location defs, storing 2259 // required PHIs into PHIBlocks. Uses the dominator tree stored in the 2260 // InstrRefBasedLDV object. 2261 IDFCalculatorBase<MachineBasicBlock, false> IDF(DomTree->getBase()); 2262 2263 IDF.setLiveInBlocks(AllBlocks); 2264 IDF.setDefiningBlocks(DefBlocks); 2265 IDF.calculate(PHIBlocks); 2266 } 2267 2268 Optional<ValueIDNum> InstrRefBasedLDV::pickVPHILoc( 2269 const MachineBasicBlock &MBB, const DebugVariable &Var, 2270 const LiveIdxT &LiveOuts, ValueIDNum **MOutLocs, 2271 const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) { 2272 // Collect a set of locations from predecessor where its live-out value can 2273 // be found. 2274 SmallVector<SmallVector<LocIdx, 4>, 8> Locs; 2275 SmallVector<const DbgValueProperties *, 4> Properties; 2276 unsigned NumLocs = MTracker->getNumLocs(); 2277 2278 // No predecessors means no PHIs. 2279 if (BlockOrders.empty()) 2280 return None; 2281 2282 for (auto p : BlockOrders) { 2283 unsigned ThisBBNum = p->getNumber(); 2284 auto OutValIt = LiveOuts.find(p); 2285 if (OutValIt == LiveOuts.end()) 2286 // If we have a predecessor not in scope, we'll never find a PHI position. 2287 return None; 2288 const DbgValue &OutVal = *OutValIt->second; 2289 2290 if (OutVal.Kind == DbgValue::Const || OutVal.Kind == DbgValue::NoVal) 2291 // Consts and no-values cannot have locations we can join on. 2292 return None; 2293 2294 Properties.push_back(&OutVal.Properties); 2295 2296 // Create new empty vector of locations. 2297 Locs.resize(Locs.size() + 1); 2298 2299 // If the live-in value is a def, find the locations where that value is 2300 // present. Do the same for VPHIs where we know the VPHI value. 2301 if (OutVal.Kind == DbgValue::Def || 2302 (OutVal.Kind == DbgValue::VPHI && OutVal.BlockNo != MBB.getNumber() && 2303 OutVal.ID != ValueIDNum::EmptyValue)) { 2304 ValueIDNum ValToLookFor = OutVal.ID; 2305 // Search the live-outs of the predecessor for the specified value. 2306 for (unsigned int I = 0; I < NumLocs; ++I) { 2307 if (MOutLocs[ThisBBNum][I] == ValToLookFor) 2308 Locs.back().push_back(LocIdx(I)); 2309 } 2310 } else { 2311 assert(OutVal.Kind == DbgValue::VPHI); 2312 // For VPHIs where we don't know the location, we definitely can't find 2313 // a join loc. 2314 if (OutVal.BlockNo != MBB.getNumber()) 2315 return None; 2316 2317 // Otherwise: this is a VPHI on a backedge feeding back into itself, i.e. 2318 // a value that's live-through the whole loop. (It has to be a backedge, 2319 // because a block can't dominate itself). We can accept as a PHI location 2320 // any location where the other predecessors agree, _and_ the machine 2321 // locations feed back into themselves. Therefore, add all self-looping 2322 // machine-value PHI locations. 2323 for (unsigned int I = 0; I < NumLocs; ++I) { 2324 ValueIDNum MPHI(MBB.getNumber(), 0, LocIdx(I)); 2325 if (MOutLocs[ThisBBNum][I] == MPHI) 2326 Locs.back().push_back(LocIdx(I)); 2327 } 2328 } 2329 } 2330 2331 // We should have found locations for all predecessors, or returned. 2332 assert(Locs.size() == BlockOrders.size()); 2333 2334 // Check that all properties are the same. We can't pick a location if they're 2335 // not. 2336 const DbgValueProperties *Properties0 = Properties[0]; 2337 for (auto *Prop : Properties) 2338 if (*Prop != *Properties0) 2339 return None; 2340 2341 // Starting with the first set of locations, take the intersection with 2342 // subsequent sets. 2343 SmallVector<LocIdx, 4> CandidateLocs = Locs[0]; 2344 for (unsigned int I = 1; I < Locs.size(); ++I) { 2345 auto &LocVec = Locs[I]; 2346 SmallVector<LocIdx, 4> NewCandidates; 2347 std::set_intersection(CandidateLocs.begin(), CandidateLocs.end(), 2348 LocVec.begin(), LocVec.end(), std::inserter(NewCandidates, NewCandidates.begin())); 2349 CandidateLocs = NewCandidates; 2350 } 2351 if (CandidateLocs.empty()) 2352 return None; 2353 2354 // We now have a set of LocIdxes that contain the right output value in 2355 // each of the predecessors. Pick the lowest; if there's a register loc, 2356 // that'll be it. 2357 LocIdx L = *CandidateLocs.begin(); 2358 2359 // Return a PHI-value-number for the found location. 2360 ValueIDNum PHIVal = {(unsigned)MBB.getNumber(), 0, L}; 2361 return PHIVal; 2362 } 2363 2364 bool InstrRefBasedLDV::vlocJoin( 2365 MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, 2366 SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore, 2367 DbgValue &LiveIn) { 2368 LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n"); 2369 bool Changed = false; 2370 2371 // Order predecessors by RPOT order, for exploring them in that order. 2372 SmallVector<MachineBasicBlock *, 8> BlockOrders(MBB.predecessors()); 2373 2374 auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { 2375 return BBToOrder[A] < BBToOrder[B]; 2376 }; 2377 2378 llvm::sort(BlockOrders, Cmp); 2379 2380 unsigned CurBlockRPONum = BBToOrder[&MBB]; 2381 2382 // Collect all the incoming DbgValues for this variable, from predecessor 2383 // live-out values. 2384 SmallVector<InValueT, 8> Values; 2385 bool Bail = false; 2386 int BackEdgesStart = 0; 2387 for (auto p : BlockOrders) { 2388 // If the predecessor isn't in scope / to be explored, we'll never be 2389 // able to join any locations. 2390 if (!BlocksToExplore.contains(p)) { 2391 Bail = true; 2392 break; 2393 } 2394 2395 // All Live-outs will have been initialized. 2396 DbgValue &OutLoc = *VLOCOutLocs.find(p)->second; 2397 2398 // Keep track of where back-edges begin in the Values vector. Relies on 2399 // BlockOrders being sorted by RPO. 2400 unsigned ThisBBRPONum = BBToOrder[p]; 2401 if (ThisBBRPONum < CurBlockRPONum) 2402 ++BackEdgesStart; 2403 2404 Values.push_back(std::make_pair(p, &OutLoc)); 2405 } 2406 2407 // If there were no values, or one of the predecessors couldn't have a 2408 // value, then give up immediately. It's not safe to produce a live-in 2409 // value. Leave as whatever it was before. 2410 if (Bail || Values.size() == 0) 2411 return false; 2412 2413 // All (non-entry) blocks have at least one non-backedge predecessor. 2414 // Pick the variable value from the first of these, to compare against 2415 // all others. 2416 const DbgValue &FirstVal = *Values[0].second; 2417 2418 // If the old live-in value is not a PHI then either a) no PHI is needed 2419 // here, or b) we eliminated the PHI that was here. If so, we can just 2420 // propagate in the first parent's incoming value. 2421 if (LiveIn.Kind != DbgValue::VPHI || LiveIn.BlockNo != MBB.getNumber()) { 2422 Changed = LiveIn != FirstVal; 2423 if (Changed) 2424 LiveIn = FirstVal; 2425 return Changed; 2426 } 2427 2428 // Scan for variable values that can never be resolved: if they have 2429 // different DIExpressions, different indirectness, or are mixed constants / 2430 // non-constants. 2431 for (auto &V : Values) { 2432 if (V.second->Properties != FirstVal.Properties) 2433 return false; 2434 if (V.second->Kind == DbgValue::NoVal) 2435 return false; 2436 if (V.second->Kind == DbgValue::Const && FirstVal.Kind != DbgValue::Const) 2437 return false; 2438 } 2439 2440 // Try to eliminate this PHI. Do the incoming values all agree? 2441 bool Disagree = false; 2442 for (auto &V : Values) { 2443 if (*V.second == FirstVal) 2444 continue; // No disagreement. 2445 2446 // Eliminate if a backedge feeds a VPHI back into itself. 2447 if (V.second->Kind == DbgValue::VPHI && 2448 V.second->BlockNo == MBB.getNumber() && 2449 // Is this a backedge? 2450 std::distance(Values.begin(), &V) >= BackEdgesStart) 2451 continue; 2452 2453 Disagree = true; 2454 } 2455 2456 // No disagreement -> live-through value. 2457 if (!Disagree) { 2458 Changed = LiveIn != FirstVal; 2459 if (Changed) 2460 LiveIn = FirstVal; 2461 return Changed; 2462 } else { 2463 // Otherwise use a VPHI. 2464 DbgValue VPHI(MBB.getNumber(), FirstVal.Properties, DbgValue::VPHI); 2465 Changed = LiveIn != VPHI; 2466 if (Changed) 2467 LiveIn = VPHI; 2468 return Changed; 2469 } 2470 } 2471 2472 void InstrRefBasedLDV::getBlocksForScope( 2473 const DILocation *DILoc, 2474 SmallPtrSetImpl<const MachineBasicBlock *> &BlocksToExplore, 2475 const SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks) { 2476 // Get the set of "normal" in-lexical-scope blocks. 2477 LS.getMachineBasicBlocks(DILoc, BlocksToExplore); 2478 2479 // VarLoc LiveDebugValues tracks variable locations that are defined in 2480 // blocks not in scope. This is something we could legitimately ignore, but 2481 // lets allow it for now for the sake of coverage. 2482 BlocksToExplore.insert(AssignBlocks.begin(), AssignBlocks.end()); 2483 2484 // Storage for artificial blocks we intend to add to BlocksToExplore. 2485 DenseSet<const MachineBasicBlock *> ToAdd; 2486 2487 // To avoid needlessly dropping large volumes of variable locations, propagate 2488 // variables through aritifical blocks, i.e. those that don't have any 2489 // instructions in scope at all. To accurately replicate VarLoc 2490 // LiveDebugValues, this means exploring all artificial successors too. 2491 // Perform a depth-first-search to enumerate those blocks. 2492 for (auto *MBB : BlocksToExplore) { 2493 // Depth-first-search state: each node is a block and which successor 2494 // we're currently exploring. 2495 SmallVector<std::pair<const MachineBasicBlock *, 2496 MachineBasicBlock::const_succ_iterator>, 2497 8> 2498 DFS; 2499 2500 // Find any artificial successors not already tracked. 2501 for (auto *succ : MBB->successors()) { 2502 if (BlocksToExplore.count(succ)) 2503 continue; 2504 if (!ArtificialBlocks.count(succ)) 2505 continue; 2506 ToAdd.insert(succ); 2507 DFS.push_back({succ, succ->succ_begin()}); 2508 } 2509 2510 // Search all those blocks, depth first. 2511 while (!DFS.empty()) { 2512 const MachineBasicBlock *CurBB = DFS.back().first; 2513 MachineBasicBlock::const_succ_iterator &CurSucc = DFS.back().second; 2514 // Walk back if we've explored this blocks successors to the end. 2515 if (CurSucc == CurBB->succ_end()) { 2516 DFS.pop_back(); 2517 continue; 2518 } 2519 2520 // If the current successor is artificial and unexplored, descend into 2521 // it. 2522 if (!ToAdd.count(*CurSucc) && ArtificialBlocks.count(*CurSucc)) { 2523 ToAdd.insert(*CurSucc); 2524 DFS.push_back({*CurSucc, (*CurSucc)->succ_begin()}); 2525 continue; 2526 } 2527 2528 ++CurSucc; 2529 } 2530 }; 2531 2532 BlocksToExplore.insert(ToAdd.begin(), ToAdd.end()); 2533 } 2534 2535 void InstrRefBasedLDV::buildVLocValueMap( 2536 const DILocation *DILoc, const SmallSet<DebugVariable, 4> &VarsWeCareAbout, 2537 SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks, LiveInsT &Output, 2538 ValueIDNum **MOutLocs, ValueIDNum **MInLocs, 2539 SmallVectorImpl<VLocTracker> &AllTheVLocs) { 2540 // This method is much like buildMLocValueMap: but focuses on a single 2541 // LexicalScope at a time. Pick out a set of blocks and variables that are 2542 // to have their value assignments solved, then run our dataflow algorithm 2543 // until a fixedpoint is reached. 2544 std::priority_queue<unsigned int, std::vector<unsigned int>, 2545 std::greater<unsigned int>> 2546 Worklist, Pending; 2547 SmallPtrSet<MachineBasicBlock *, 16> OnWorklist, OnPending; 2548 2549 // The set of blocks we'll be examining. 2550 SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; 2551 2552 // The order in which to examine them (RPO). 2553 SmallVector<MachineBasicBlock *, 8> BlockOrders; 2554 2555 // RPO ordering function. 2556 auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { 2557 return BBToOrder[A] < BBToOrder[B]; 2558 }; 2559 2560 getBlocksForScope(DILoc, BlocksToExplore, AssignBlocks); 2561 2562 // Single block scope: not interesting! No propagation at all. Note that 2563 // this could probably go above ArtificialBlocks without damage, but 2564 // that then produces output differences from original-live-debug-values, 2565 // which propagates from a single block into many artificial ones. 2566 if (BlocksToExplore.size() == 1) 2567 return; 2568 2569 // Convert a const set to a non-const set. LexicalScopes 2570 // getMachineBasicBlocks returns const MBB pointers, IDF wants mutable ones. 2571 // (Neither of them mutate anything). 2572 SmallPtrSet<MachineBasicBlock *, 8> MutBlocksToExplore; 2573 for (const auto *MBB : BlocksToExplore) 2574 MutBlocksToExplore.insert(const_cast<MachineBasicBlock *>(MBB)); 2575 2576 // Picks out relevants blocks RPO order and sort them. 2577 for (auto *MBB : BlocksToExplore) 2578 BlockOrders.push_back(const_cast<MachineBasicBlock *>(MBB)); 2579 2580 llvm::sort(BlockOrders, Cmp); 2581 unsigned NumBlocks = BlockOrders.size(); 2582 2583 // Allocate some vectors for storing the live ins and live outs. Large. 2584 SmallVector<DbgValue, 32> LiveIns, LiveOuts; 2585 LiveIns.reserve(NumBlocks); 2586 LiveOuts.reserve(NumBlocks); 2587 2588 // Initialize all values to start as NoVals. This signifies "it's live 2589 // through, but we don't know what it is". 2590 DbgValueProperties EmptyProperties(EmptyExpr, false); 2591 for (unsigned int I = 0; I < NumBlocks; ++I) { 2592 DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal); 2593 LiveIns.push_back(EmptyDbgValue); 2594 LiveOuts.push_back(EmptyDbgValue); 2595 } 2596 2597 // Produce by-MBB indexes of live-in/live-outs, to ease lookup within 2598 // vlocJoin. 2599 LiveIdxT LiveOutIdx, LiveInIdx; 2600 LiveOutIdx.reserve(NumBlocks); 2601 LiveInIdx.reserve(NumBlocks); 2602 for (unsigned I = 0; I < NumBlocks; ++I) { 2603 LiveOutIdx[BlockOrders[I]] = &LiveOuts[I]; 2604 LiveInIdx[BlockOrders[I]] = &LiveIns[I]; 2605 } 2606 2607 // Loop over each variable and place PHIs for it, then propagate values 2608 // between blocks. This keeps the locality of working on one lexical scope at 2609 // at time, but avoids re-processing variable values because some other 2610 // variable has been assigned. 2611 for (auto &Var : VarsWeCareAbout) { 2612 // Re-initialize live-ins and live-outs, to clear the remains of previous 2613 // variables live-ins / live-outs. 2614 for (unsigned int I = 0; I < NumBlocks; ++I) { 2615 DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal); 2616 LiveIns[I] = EmptyDbgValue; 2617 LiveOuts[I] = EmptyDbgValue; 2618 } 2619 2620 // Place PHIs for variable values, using the LLVM IDF calculator. 2621 // Collect the set of blocks where variables are def'd. 2622 SmallPtrSet<MachineBasicBlock *, 32> DefBlocks; 2623 for (const MachineBasicBlock *ExpMBB : BlocksToExplore) { 2624 auto &TransferFunc = AllTheVLocs[ExpMBB->getNumber()].Vars; 2625 if (TransferFunc.find(Var) != TransferFunc.end()) 2626 DefBlocks.insert(const_cast<MachineBasicBlock *>(ExpMBB)); 2627 } 2628 2629 SmallVector<MachineBasicBlock *, 32> PHIBlocks; 2630 2631 // Request the set of PHIs we should insert for this variable. If there's 2632 // only one value definition, things are very simple. 2633 if (DefBlocks.size() == 1) { 2634 placePHIsForSingleVarDefinition(MutBlocksToExplore, *DefBlocks.begin(), 2635 AllTheVLocs, Var, Output); 2636 continue; 2637 } 2638 2639 // Otherwise: we need to place PHIs through SSA and propagate values. 2640 BlockPHIPlacement(MutBlocksToExplore, DefBlocks, PHIBlocks); 2641 2642 // Insert PHIs into the per-block live-in tables for this variable. 2643 for (MachineBasicBlock *PHIMBB : PHIBlocks) { 2644 unsigned BlockNo = PHIMBB->getNumber(); 2645 DbgValue *LiveIn = LiveInIdx[PHIMBB]; 2646 *LiveIn = DbgValue(BlockNo, EmptyProperties, DbgValue::VPHI); 2647 } 2648 2649 for (auto *MBB : BlockOrders) { 2650 Worklist.push(BBToOrder[MBB]); 2651 OnWorklist.insert(MBB); 2652 } 2653 2654 // Iterate over all the blocks we selected, propagating the variables value. 2655 // This loop does two things: 2656 // * Eliminates un-necessary VPHIs in vlocJoin, 2657 // * Evaluates the blocks transfer function (i.e. variable assignments) and 2658 // stores the result to the blocks live-outs. 2659 // Always evaluate the transfer function on the first iteration, and when 2660 // the live-ins change thereafter. 2661 bool FirstTrip = true; 2662 while (!Worklist.empty() || !Pending.empty()) { 2663 while (!Worklist.empty()) { 2664 auto *MBB = OrderToBB[Worklist.top()]; 2665 CurBB = MBB->getNumber(); 2666 Worklist.pop(); 2667 2668 auto LiveInsIt = LiveInIdx.find(MBB); 2669 assert(LiveInsIt != LiveInIdx.end()); 2670 DbgValue *LiveIn = LiveInsIt->second; 2671 2672 // Join values from predecessors. Updates LiveInIdx, and writes output 2673 // into JoinedInLocs. 2674 bool InLocsChanged = 2675 vlocJoin(*MBB, LiveOutIdx, BlocksToExplore, *LiveIn); 2676 2677 SmallVector<const MachineBasicBlock *, 8> Preds; 2678 for (const auto *Pred : MBB->predecessors()) 2679 Preds.push_back(Pred); 2680 2681 // If this block's live-in value is a VPHI, try to pick a machine-value 2682 // for it. This makes the machine-value available and propagated 2683 // through all blocks by the time value propagation finishes. We can't 2684 // do this any earlier as it needs to read the block live-outs. 2685 if (LiveIn->Kind == DbgValue::VPHI && LiveIn->BlockNo == (int)CurBB) { 2686 // There's a small possibility that on a preceeding path, a VPHI is 2687 // eliminated and transitions from VPHI-with-location to 2688 // live-through-value. As a result, the selected location of any VPHI 2689 // might change, so we need to re-compute it on each iteration. 2690 Optional<ValueIDNum> ValueNum = 2691 pickVPHILoc(*MBB, Var, LiveOutIdx, MOutLocs, Preds); 2692 2693 if (ValueNum) { 2694 InLocsChanged |= LiveIn->ID != *ValueNum; 2695 LiveIn->ID = *ValueNum; 2696 } 2697 } 2698 2699 if (!InLocsChanged && !FirstTrip) 2700 continue; 2701 2702 DbgValue *LiveOut = LiveOutIdx[MBB]; 2703 bool OLChanged = false; 2704 2705 // Do transfer function. 2706 auto &VTracker = AllTheVLocs[MBB->getNumber()]; 2707 auto TransferIt = VTracker.Vars.find(Var); 2708 if (TransferIt != VTracker.Vars.end()) { 2709 // Erase on empty transfer (DBG_VALUE $noreg). 2710 if (TransferIt->second.Kind == DbgValue::Undef) { 2711 DbgValue NewVal(MBB->getNumber(), EmptyProperties, DbgValue::NoVal); 2712 if (*LiveOut != NewVal) { 2713 *LiveOut = NewVal; 2714 OLChanged = true; 2715 } 2716 } else { 2717 // Insert new variable value; or overwrite. 2718 if (*LiveOut != TransferIt->second) { 2719 *LiveOut = TransferIt->second; 2720 OLChanged = true; 2721 } 2722 } 2723 } else { 2724 // Just copy live-ins to live-outs, for anything not transferred. 2725 if (*LiveOut != *LiveIn) { 2726 *LiveOut = *LiveIn; 2727 OLChanged = true; 2728 } 2729 } 2730 2731 // If no live-out value changed, there's no need to explore further. 2732 if (!OLChanged) 2733 continue; 2734 2735 // We should visit all successors. Ensure we'll visit any non-backedge 2736 // successors during this dataflow iteration; book backedge successors 2737 // to be visited next time around. 2738 for (auto s : MBB->successors()) { 2739 // Ignore out of scope / not-to-be-explored successors. 2740 if (LiveInIdx.find(s) == LiveInIdx.end()) 2741 continue; 2742 2743 if (BBToOrder[s] > BBToOrder[MBB]) { 2744 if (OnWorklist.insert(s).second) 2745 Worklist.push(BBToOrder[s]); 2746 } else if (OnPending.insert(s).second && (FirstTrip || OLChanged)) { 2747 Pending.push(BBToOrder[s]); 2748 } 2749 } 2750 } 2751 Worklist.swap(Pending); 2752 std::swap(OnWorklist, OnPending); 2753 OnPending.clear(); 2754 assert(Pending.empty()); 2755 FirstTrip = false; 2756 } 2757 2758 // Save live-ins to output vector. Ignore any that are still marked as being 2759 // VPHIs with no location -- those are variables that we know the value of, 2760 // but are not actually available in the register file. 2761 for (auto *MBB : BlockOrders) { 2762 DbgValue *BlockLiveIn = LiveInIdx[MBB]; 2763 if (BlockLiveIn->Kind == DbgValue::NoVal) 2764 continue; 2765 if (BlockLiveIn->Kind == DbgValue::VPHI && 2766 BlockLiveIn->ID == ValueIDNum::EmptyValue) 2767 continue; 2768 if (BlockLiveIn->Kind == DbgValue::VPHI) 2769 BlockLiveIn->Kind = DbgValue::Def; 2770 assert(BlockLiveIn->Properties.DIExpr->getFragmentInfo() == 2771 Var.getFragment() && "Fragment info missing during value prop"); 2772 Output[MBB->getNumber()].push_back(std::make_pair(Var, *BlockLiveIn)); 2773 } 2774 } // Per-variable loop. 2775 2776 BlockOrders.clear(); 2777 BlocksToExplore.clear(); 2778 } 2779 2780 void InstrRefBasedLDV::placePHIsForSingleVarDefinition( 2781 const SmallPtrSetImpl<MachineBasicBlock *> &InScopeBlocks, 2782 MachineBasicBlock *AssignMBB, SmallVectorImpl<VLocTracker> &AllTheVLocs, 2783 const DebugVariable &Var, LiveInsT &Output) { 2784 // If there is a single definition of the variable, then working out it's 2785 // value everywhere is very simple: it's every block dominated by the 2786 // definition. At the dominance frontier, the usual algorithm would: 2787 // * Place PHIs, 2788 // * Propagate values into them, 2789 // * Find there's no incoming variable value from the other incoming branches 2790 // of the dominance frontier, 2791 // * Specify there's no variable value in blocks past the frontier. 2792 // This is a common case, hence it's worth special-casing it. 2793 2794 // Pick out the variables value from the block transfer function. 2795 VLocTracker &VLocs = AllTheVLocs[AssignMBB->getNumber()]; 2796 auto ValueIt = VLocs.Vars.find(Var); 2797 const DbgValue &Value = ValueIt->second; 2798 2799 // If it's an explicit assignment of "undef", that means there is no location 2800 // anyway, anywhere. 2801 if (Value.Kind == DbgValue::Undef) 2802 return; 2803 2804 // Assign the variable value to entry to each dominated block that's in scope. 2805 // Skip the definition block -- it's assigned the variable value in the middle 2806 // of the block somewhere. 2807 for (auto *ScopeBlock : InScopeBlocks) { 2808 if (!DomTree->properlyDominates(AssignMBB, ScopeBlock)) 2809 continue; 2810 2811 Output[ScopeBlock->getNumber()].push_back({Var, Value}); 2812 } 2813 2814 // All blocks that aren't dominated have no live-in value, thus no variable 2815 // value will be given to them. 2816 } 2817 2818 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2819 void InstrRefBasedLDV::dump_mloc_transfer( 2820 const MLocTransferMap &mloc_transfer) const { 2821 for (auto &P : mloc_transfer) { 2822 std::string foo = MTracker->LocIdxToName(P.first); 2823 std::string bar = MTracker->IDAsString(P.second); 2824 dbgs() << "Loc " << foo << " --> " << bar << "\n"; 2825 } 2826 } 2827 #endif 2828 2829 void InstrRefBasedLDV::initialSetup(MachineFunction &MF) { 2830 // Build some useful data structures. 2831 2832 LLVMContext &Context = MF.getFunction().getContext(); 2833 EmptyExpr = DIExpression::get(Context, {}); 2834 2835 auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool { 2836 if (const DebugLoc &DL = MI.getDebugLoc()) 2837 return DL.getLine() != 0; 2838 return false; 2839 }; 2840 // Collect a set of all the artificial blocks. 2841 for (auto &MBB : MF) 2842 if (none_of(MBB.instrs(), hasNonArtificialLocation)) 2843 ArtificialBlocks.insert(&MBB); 2844 2845 // Compute mappings of block <=> RPO order. 2846 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); 2847 unsigned int RPONumber = 0; 2848 for (MachineBasicBlock *MBB : RPOT) { 2849 OrderToBB[RPONumber] = MBB; 2850 BBToOrder[MBB] = RPONumber; 2851 BBNumToRPO[MBB->getNumber()] = RPONumber; 2852 ++RPONumber; 2853 } 2854 2855 // Order value substitutions by their "source" operand pair, for quick lookup. 2856 llvm::sort(MF.DebugValueSubstitutions); 2857 2858 #ifdef EXPENSIVE_CHECKS 2859 // As an expensive check, test whether there are any duplicate substitution 2860 // sources in the collection. 2861 if (MF.DebugValueSubstitutions.size() > 2) { 2862 for (auto It = MF.DebugValueSubstitutions.begin(); 2863 It != std::prev(MF.DebugValueSubstitutions.end()); ++It) { 2864 assert(It->Src != std::next(It)->Src && "Duplicate variable location " 2865 "substitution seen"); 2866 } 2867 } 2868 #endif 2869 } 2870 2871 // Produce an "ejection map" for blocks, i.e., what's the highest-numbered 2872 // lexical scope it's used in. When exploring in DFS order and we pass that 2873 // scope, the block can be processed and any tracking information freed. 2874 void InstrRefBasedLDV::makeDepthFirstEjectionMap( 2875 SmallVectorImpl<unsigned> &EjectionMap, 2876 const ScopeToDILocT &ScopeToDILocation, 2877 ScopeToAssignBlocksT &ScopeToAssignBlocks) { 2878 SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; 2879 SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack; 2880 auto *TopScope = LS.getCurrentFunctionScope(); 2881 2882 // Unlike lexical scope explorers, we explore in reverse order, to find the 2883 // "last" lexical scope used for each block early. 2884 WorkStack.push_back({TopScope, TopScope->getChildren().size() - 1}); 2885 2886 while (!WorkStack.empty()) { 2887 auto &ScopePosition = WorkStack.back(); 2888 LexicalScope *WS = ScopePosition.first; 2889 ssize_t ChildNum = ScopePosition.second--; 2890 2891 const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren(); 2892 if (ChildNum >= 0) { 2893 // If ChildNum is positive, there are remaining children to explore. 2894 // Push the child and its children-count onto the stack. 2895 auto &ChildScope = Children[ChildNum]; 2896 WorkStack.push_back( 2897 std::make_pair(ChildScope, ChildScope->getChildren().size() - 1)); 2898 } else { 2899 WorkStack.pop_back(); 2900 2901 // We've explored all children and any later blocks: examine all blocks 2902 // in our scope. If they haven't yet had an ejection number set, then 2903 // this scope will be the last to use that block. 2904 auto DILocationIt = ScopeToDILocation.find(WS); 2905 if (DILocationIt != ScopeToDILocation.end()) { 2906 getBlocksForScope(DILocationIt->second, BlocksToExplore, 2907 ScopeToAssignBlocks.find(WS)->second); 2908 for (auto *MBB : BlocksToExplore) { 2909 unsigned BBNum = MBB->getNumber(); 2910 if (EjectionMap[BBNum] == 0) 2911 EjectionMap[BBNum] = WS->getDFSOut(); 2912 } 2913 2914 BlocksToExplore.clear(); 2915 } 2916 } 2917 } 2918 } 2919 2920 bool InstrRefBasedLDV::depthFirstVLocAndEmit( 2921 unsigned MaxNumBlocks, const ScopeToDILocT &ScopeToDILocation, 2922 const ScopeToVarsT &ScopeToVars, ScopeToAssignBlocksT &ScopeToAssignBlocks, 2923 LiveInsT &Output, ValueIDNum **MOutLocs, ValueIDNum **MInLocs, 2924 SmallVectorImpl<VLocTracker> &AllTheVLocs, MachineFunction &MF, 2925 DenseMap<DebugVariable, unsigned> &AllVarsNumbering, 2926 const TargetPassConfig &TPC) { 2927 TTracker = new TransferTracker(TII, MTracker, MF, *TRI, CalleeSavedRegs, TPC); 2928 unsigned NumLocs = MTracker->getNumLocs(); 2929 VTracker = nullptr; 2930 2931 // No scopes? No variable locations. 2932 if (!LS.getCurrentFunctionScope()) { 2933 // FIXME: this is a sticking plaster to prevent a memory leak, these 2934 // pointers will be automagically freed by being unique pointers, shortly. 2935 for (unsigned int I = 0; I < MaxNumBlocks; ++I) { 2936 delete[] MInLocs[I]; 2937 delete[] MOutLocs[I]; 2938 } 2939 return false; 2940 } 2941 2942 // Build map from block number to the last scope that uses the block. 2943 SmallVector<unsigned, 16> EjectionMap; 2944 EjectionMap.resize(MaxNumBlocks, 0); 2945 makeDepthFirstEjectionMap(EjectionMap, ScopeToDILocation, 2946 ScopeToAssignBlocks); 2947 2948 // Helper lambda for ejecting a block -- if nothing is going to use the block, 2949 // we can translate the variable location information into DBG_VALUEs and then 2950 // free all of InstrRefBasedLDV's data structures. 2951 auto EjectBlock = [&](MachineBasicBlock &MBB) -> void { 2952 unsigned BBNum = MBB.getNumber(); 2953 AllTheVLocs[BBNum].clear(); 2954 2955 // Prime the transfer-tracker, and then step through all the block 2956 // instructions, installing transfers. 2957 MTracker->reset(); 2958 MTracker->loadFromArray(MInLocs[BBNum], BBNum); 2959 TTracker->loadInlocs(MBB, MInLocs[BBNum], Output[BBNum], NumLocs); 2960 2961 CurBB = BBNum; 2962 CurInst = 1; 2963 for (auto &MI : MBB) { 2964 process(MI, MOutLocs, MInLocs); 2965 TTracker->checkInstForNewValues(CurInst, MI.getIterator()); 2966 ++CurInst; 2967 } 2968 2969 // Free machine-location tables for this block. 2970 delete[] MInLocs[BBNum]; 2971 delete[] MOutLocs[BBNum]; 2972 // Make ourselves brittle to use-after-free errors. 2973 MInLocs[BBNum] = nullptr; 2974 MOutLocs[BBNum] = nullptr; 2975 // We don't need live-in variable values for this block either. 2976 Output[BBNum].clear(); 2977 AllTheVLocs[BBNum].clear(); 2978 }; 2979 2980 SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore; 2981 SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack; 2982 WorkStack.push_back({LS.getCurrentFunctionScope(), 0}); 2983 unsigned HighestDFSIn = 0; 2984 2985 // Proceed to explore in depth first order. 2986 while (!WorkStack.empty()) { 2987 auto &ScopePosition = WorkStack.back(); 2988 LexicalScope *WS = ScopePosition.first; 2989 ssize_t ChildNum = ScopePosition.second++; 2990 2991 // We obesrve scopes with children twice here, once descending in, once 2992 // ascending out of the scope nest. Use HighestDFSIn as a ratchet to ensure 2993 // we don't process a scope twice. Additionally, ignore scopes that don't 2994 // have a DILocation -- by proxy, this means we never tracked any variable 2995 // assignments in that scope. 2996 auto DILocIt = ScopeToDILocation.find(WS); 2997 if (HighestDFSIn <= WS->getDFSIn() && DILocIt != ScopeToDILocation.end()) { 2998 const DILocation *DILoc = DILocIt->second; 2999 auto &VarsWeCareAbout = ScopeToVars.find(WS)->second; 3000 auto &BlocksInScope = ScopeToAssignBlocks.find(WS)->second; 3001 3002 buildVLocValueMap(DILoc, VarsWeCareAbout, BlocksInScope, Output, MOutLocs, 3003 MInLocs, AllTheVLocs); 3004 } 3005 3006 HighestDFSIn = std::max(HighestDFSIn, WS->getDFSIn()); 3007 3008 // Descend into any scope nests. 3009 const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren(); 3010 if (ChildNum < (ssize_t)Children.size()) { 3011 // There are children to explore -- push onto stack and continue. 3012 auto &ChildScope = Children[ChildNum]; 3013 WorkStack.push_back(std::make_pair(ChildScope, 0)); 3014 } else { 3015 WorkStack.pop_back(); 3016 3017 // We've explored a leaf, or have explored all the children of a scope. 3018 // Try to eject any blocks where this is the last scope it's relevant to. 3019 auto DILocationIt = ScopeToDILocation.find(WS); 3020 if (DILocationIt == ScopeToDILocation.end()) 3021 continue; 3022 3023 getBlocksForScope(DILocationIt->second, BlocksToExplore, 3024 ScopeToAssignBlocks.find(WS)->second); 3025 for (auto *MBB : BlocksToExplore) 3026 if (WS->getDFSOut() == EjectionMap[MBB->getNumber()]) 3027 EjectBlock(const_cast<MachineBasicBlock &>(*MBB)); 3028 3029 BlocksToExplore.clear(); 3030 } 3031 } 3032 3033 // Some artificial blocks may not have been ejected, meaning they're not 3034 // connected to an actual legitimate scope. This can technically happen 3035 // with things like the entry block. In theory, we shouldn't need to do 3036 // anything for such out-of-scope blocks, but for the sake of being similar 3037 // to VarLocBasedLDV, eject these too. 3038 for (auto *MBB : ArtificialBlocks) 3039 if (MOutLocs[MBB->getNumber()]) 3040 EjectBlock(*MBB); 3041 3042 // Finally, there might have been gaps in the block numbering, from dead 3043 // blocks being deleted or folded. In those scenarios, we might allocate a 3044 // block-table that's never ejected, meaning we have to free it at the end. 3045 for (unsigned int I = 0; I < MaxNumBlocks; ++I) { 3046 if (MInLocs[I]) { 3047 delete[] MInLocs[I]; 3048 delete[] MOutLocs[I]; 3049 } 3050 } 3051 3052 return emitTransfers(AllVarsNumbering); 3053 } 3054 3055 bool InstrRefBasedLDV::emitTransfers( 3056 DenseMap<DebugVariable, unsigned> &AllVarsNumbering) { 3057 // Go through all the transfers recorded in the TransferTracker -- this is 3058 // both the live-ins to a block, and any movements of values that happen 3059 // in the middle. 3060 for (const auto &P : TTracker->Transfers) { 3061 // We have to insert DBG_VALUEs in a consistent order, otherwise they 3062 // appear in DWARF in different orders. Use the order that they appear 3063 // when walking through each block / each instruction, stored in 3064 // AllVarsNumbering. 3065 SmallVector<std::pair<unsigned, MachineInstr *>> Insts; 3066 for (MachineInstr *MI : P.Insts) { 3067 DebugVariable Var(MI->getDebugVariable(), MI->getDebugExpression(), 3068 MI->getDebugLoc()->getInlinedAt()); 3069 Insts.emplace_back(AllVarsNumbering.find(Var)->second, MI); 3070 } 3071 llvm::sort(Insts, 3072 [](const auto &A, const auto &B) { return A.first < B.first; }); 3073 3074 // Insert either before or after the designated point... 3075 if (P.MBB) { 3076 MachineBasicBlock &MBB = *P.MBB; 3077 for (const auto &Pair : Insts) 3078 MBB.insert(P.Pos, Pair.second); 3079 } else { 3080 // Terminators, like tail calls, can clobber things. Don't try and place 3081 // transfers after them. 3082 if (P.Pos->isTerminator()) 3083 continue; 3084 3085 MachineBasicBlock &MBB = *P.Pos->getParent(); 3086 for (const auto &Pair : Insts) 3087 MBB.insertAfterBundle(P.Pos, Pair.second); 3088 } 3089 } 3090 3091 return TTracker->Transfers.size() != 0; 3092 } 3093 3094 /// Calculate the liveness information for the given machine function and 3095 /// extend ranges across basic blocks. 3096 bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF, 3097 MachineDominatorTree *DomTree, 3098 TargetPassConfig *TPC, 3099 unsigned InputBBLimit, 3100 unsigned InputDbgValLimit) { 3101 // No subprogram means this function contains no debuginfo. 3102 if (!MF.getFunction().getSubprogram()) 3103 return false; 3104 3105 LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n"); 3106 this->TPC = TPC; 3107 3108 this->DomTree = DomTree; 3109 TRI = MF.getSubtarget().getRegisterInfo(); 3110 MRI = &MF.getRegInfo(); 3111 TII = MF.getSubtarget().getInstrInfo(); 3112 TFI = MF.getSubtarget().getFrameLowering(); 3113 TFI->getCalleeSaves(MF, CalleeSavedRegs); 3114 MFI = &MF.getFrameInfo(); 3115 LS.initialize(MF); 3116 3117 const auto &STI = MF.getSubtarget(); 3118 AdjustsStackInCalls = MFI->adjustsStack() && 3119 STI.getFrameLowering()->stackProbeFunctionModifiesSP(); 3120 if (AdjustsStackInCalls) 3121 StackProbeSymbolName = STI.getTargetLowering()->getStackProbeSymbolName(MF); 3122 3123 MTracker = 3124 new MLocTracker(MF, *TII, *TRI, *MF.getSubtarget().getTargetLowering()); 3125 VTracker = nullptr; 3126 TTracker = nullptr; 3127 3128 SmallVector<MLocTransferMap, 32> MLocTransfer; 3129 SmallVector<VLocTracker, 8> vlocs; 3130 LiveInsT SavedLiveIns; 3131 3132 int MaxNumBlocks = -1; 3133 for (auto &MBB : MF) 3134 MaxNumBlocks = std::max(MBB.getNumber(), MaxNumBlocks); 3135 assert(MaxNumBlocks >= 0); 3136 ++MaxNumBlocks; 3137 3138 MLocTransfer.resize(MaxNumBlocks); 3139 vlocs.resize(MaxNumBlocks, VLocTracker(OverlapFragments, EmptyExpr)); 3140 SavedLiveIns.resize(MaxNumBlocks); 3141 3142 initialSetup(MF); 3143 3144 produceMLocTransferFunction(MF, MLocTransfer, MaxNumBlocks); 3145 3146 // Allocate and initialize two array-of-arrays for the live-in and live-out 3147 // machine values. The outer dimension is the block number; while the inner 3148 // dimension is a LocIdx from MLocTracker. 3149 ValueIDNum **MOutLocs = new ValueIDNum *[MaxNumBlocks]; 3150 ValueIDNum **MInLocs = new ValueIDNum *[MaxNumBlocks]; 3151 unsigned NumLocs = MTracker->getNumLocs(); 3152 for (int i = 0; i < MaxNumBlocks; ++i) { 3153 // These all auto-initialize to ValueIDNum::EmptyValue 3154 MOutLocs[i] = new ValueIDNum[NumLocs]; 3155 MInLocs[i] = new ValueIDNum[NumLocs]; 3156 } 3157 3158 // Solve the machine value dataflow problem using the MLocTransfer function, 3159 // storing the computed live-ins / live-outs into the array-of-arrays. We use 3160 // both live-ins and live-outs for decision making in the variable value 3161 // dataflow problem. 3162 buildMLocValueMap(MF, MInLocs, MOutLocs, MLocTransfer); 3163 3164 // Patch up debug phi numbers, turning unknown block-live-in values into 3165 // either live-through machine values, or PHIs. 3166 for (auto &DBG_PHI : DebugPHINumToValue) { 3167 // Identify unresolved block-live-ins. 3168 ValueIDNum &Num = DBG_PHI.ValueRead; 3169 if (!Num.isPHI()) 3170 continue; 3171 3172 unsigned BlockNo = Num.getBlock(); 3173 LocIdx LocNo = Num.getLoc(); 3174 Num = MInLocs[BlockNo][LocNo.asU64()]; 3175 } 3176 // Later, we'll be looking up ranges of instruction numbers. 3177 llvm::sort(DebugPHINumToValue); 3178 3179 // Walk back through each block / instruction, collecting DBG_VALUE 3180 // instructions and recording what machine value their operands refer to. 3181 for (auto &OrderPair : OrderToBB) { 3182 MachineBasicBlock &MBB = *OrderPair.second; 3183 CurBB = MBB.getNumber(); 3184 VTracker = &vlocs[CurBB]; 3185 VTracker->MBB = &MBB; 3186 MTracker->loadFromArray(MInLocs[CurBB], CurBB); 3187 CurInst = 1; 3188 for (auto &MI : MBB) { 3189 process(MI, MOutLocs, MInLocs); 3190 ++CurInst; 3191 } 3192 MTracker->reset(); 3193 } 3194 3195 // Number all variables in the order that they appear, to be used as a stable 3196 // insertion order later. 3197 DenseMap<DebugVariable, unsigned> AllVarsNumbering; 3198 3199 // Map from one LexicalScope to all the variables in that scope. 3200 ScopeToVarsT ScopeToVars; 3201 3202 // Map from One lexical scope to all blocks where assignments happen for 3203 // that scope. 3204 ScopeToAssignBlocksT ScopeToAssignBlocks; 3205 3206 // Store map of DILocations that describes scopes. 3207 ScopeToDILocT ScopeToDILocation; 3208 3209 // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise 3210 // the order is unimportant, it just has to be stable. 3211 unsigned VarAssignCount = 0; 3212 for (unsigned int I = 0; I < OrderToBB.size(); ++I) { 3213 auto *MBB = OrderToBB[I]; 3214 auto *VTracker = &vlocs[MBB->getNumber()]; 3215 // Collect each variable with a DBG_VALUE in this block. 3216 for (auto &idx : VTracker->Vars) { 3217 const auto &Var = idx.first; 3218 const DILocation *ScopeLoc = VTracker->Scopes[Var]; 3219 assert(ScopeLoc != nullptr); 3220 auto *Scope = LS.findLexicalScope(ScopeLoc); 3221 3222 // No insts in scope -> shouldn't have been recorded. 3223 assert(Scope != nullptr); 3224 3225 AllVarsNumbering.insert(std::make_pair(Var, AllVarsNumbering.size())); 3226 ScopeToVars[Scope].insert(Var); 3227 ScopeToAssignBlocks[Scope].insert(VTracker->MBB); 3228 ScopeToDILocation[Scope] = ScopeLoc; 3229 ++VarAssignCount; 3230 } 3231 } 3232 3233 bool Changed = false; 3234 3235 // If we have an extremely large number of variable assignments and blocks, 3236 // bail out at this point. We've burnt some time doing analysis already, 3237 // however we should cut our losses. 3238 if ((unsigned)MaxNumBlocks > InputBBLimit && 3239 VarAssignCount > InputDbgValLimit) { 3240 LLVM_DEBUG(dbgs() << "Disabling InstrRefBasedLDV: " << MF.getName() 3241 << " has " << MaxNumBlocks << " basic blocks and " 3242 << VarAssignCount 3243 << " variable assignments, exceeding limits.\n"); 3244 3245 // Perform memory cleanup that emitLocations would do otherwise. 3246 for (int Idx = 0; Idx < MaxNumBlocks; ++Idx) { 3247 delete[] MOutLocs[Idx]; 3248 delete[] MInLocs[Idx]; 3249 } 3250 } else { 3251 // Optionally, solve the variable value problem and emit to blocks by using 3252 // a lexical-scope-depth search. It should be functionally identical to 3253 // the "else" block of this condition. 3254 Changed = depthFirstVLocAndEmit( 3255 MaxNumBlocks, ScopeToDILocation, ScopeToVars, ScopeToAssignBlocks, 3256 SavedLiveIns, MOutLocs, MInLocs, vlocs, MF, AllVarsNumbering, *TPC); 3257 } 3258 3259 // Elements of these arrays will be deleted by emitLocations. 3260 delete[] MOutLocs; 3261 delete[] MInLocs; 3262 3263 delete MTracker; 3264 delete TTracker; 3265 MTracker = nullptr; 3266 VTracker = nullptr; 3267 TTracker = nullptr; 3268 3269 ArtificialBlocks.clear(); 3270 OrderToBB.clear(); 3271 BBToOrder.clear(); 3272 BBNumToRPO.clear(); 3273 DebugInstrNumToInstr.clear(); 3274 DebugPHINumToValue.clear(); 3275 OverlapFragments.clear(); 3276 SeenFragments.clear(); 3277 SeenDbgPHIs.clear(); 3278 3279 return Changed; 3280 } 3281 3282 LDVImpl *llvm::makeInstrRefBasedLiveDebugValues() { 3283 return new InstrRefBasedLDV(); 3284 } 3285 3286 namespace { 3287 class LDVSSABlock; 3288 class LDVSSAUpdater; 3289 3290 // Pick a type to identify incoming block values as we construct SSA. We 3291 // can't use anything more robust than an integer unfortunately, as SSAUpdater 3292 // expects to zero-initialize the type. 3293 typedef uint64_t BlockValueNum; 3294 3295 /// Represents an SSA PHI node for the SSA updater class. Contains the block 3296 /// this PHI is in, the value number it would have, and the expected incoming 3297 /// values from parent blocks. 3298 class LDVSSAPhi { 3299 public: 3300 SmallVector<std::pair<LDVSSABlock *, BlockValueNum>, 4> IncomingValues; 3301 LDVSSABlock *ParentBlock; 3302 BlockValueNum PHIValNum; 3303 LDVSSAPhi(BlockValueNum PHIValNum, LDVSSABlock *ParentBlock) 3304 : ParentBlock(ParentBlock), PHIValNum(PHIValNum) {} 3305 3306 LDVSSABlock *getParent() { return ParentBlock; } 3307 }; 3308 3309 /// Thin wrapper around a block predecessor iterator. Only difference from a 3310 /// normal block iterator is that it dereferences to an LDVSSABlock. 3311 class LDVSSABlockIterator { 3312 public: 3313 MachineBasicBlock::pred_iterator PredIt; 3314 LDVSSAUpdater &Updater; 3315 3316 LDVSSABlockIterator(MachineBasicBlock::pred_iterator PredIt, 3317 LDVSSAUpdater &Updater) 3318 : PredIt(PredIt), Updater(Updater) {} 3319 3320 bool operator!=(const LDVSSABlockIterator &OtherIt) const { 3321 return OtherIt.PredIt != PredIt; 3322 } 3323 3324 LDVSSABlockIterator &operator++() { 3325 ++PredIt; 3326 return *this; 3327 } 3328 3329 LDVSSABlock *operator*(); 3330 }; 3331 3332 /// Thin wrapper around a block for SSA Updater interface. Necessary because 3333 /// we need to track the PHI value(s) that we may have observed as necessary 3334 /// in this block. 3335 class LDVSSABlock { 3336 public: 3337 MachineBasicBlock &BB; 3338 LDVSSAUpdater &Updater; 3339 using PHIListT = SmallVector<LDVSSAPhi, 1>; 3340 /// List of PHIs in this block. There should only ever be one. 3341 PHIListT PHIList; 3342 3343 LDVSSABlock(MachineBasicBlock &BB, LDVSSAUpdater &Updater) 3344 : BB(BB), Updater(Updater) {} 3345 3346 LDVSSABlockIterator succ_begin() { 3347 return LDVSSABlockIterator(BB.succ_begin(), Updater); 3348 } 3349 3350 LDVSSABlockIterator succ_end() { 3351 return LDVSSABlockIterator(BB.succ_end(), Updater); 3352 } 3353 3354 /// SSAUpdater has requested a PHI: create that within this block record. 3355 LDVSSAPhi *newPHI(BlockValueNum Value) { 3356 PHIList.emplace_back(Value, this); 3357 return &PHIList.back(); 3358 } 3359 3360 /// SSAUpdater wishes to know what PHIs already exist in this block. 3361 PHIListT &phis() { return PHIList; } 3362 }; 3363 3364 /// Utility class for the SSAUpdater interface: tracks blocks, PHIs and values 3365 /// while SSAUpdater is exploring the CFG. It's passed as a handle / baton to 3366 // SSAUpdaterTraits<LDVSSAUpdater>. 3367 class LDVSSAUpdater { 3368 public: 3369 /// Map of value numbers to PHI records. 3370 DenseMap<BlockValueNum, LDVSSAPhi *> PHIs; 3371 /// Map of which blocks generate Undef values -- blocks that are not 3372 /// dominated by any Def. 3373 DenseMap<MachineBasicBlock *, BlockValueNum> UndefMap; 3374 /// Map of machine blocks to our own records of them. 3375 DenseMap<MachineBasicBlock *, LDVSSABlock *> BlockMap; 3376 /// Machine location where any PHI must occur. 3377 LocIdx Loc; 3378 /// Table of live-in machine value numbers for blocks / locations. 3379 ValueIDNum **MLiveIns; 3380 3381 LDVSSAUpdater(LocIdx L, ValueIDNum **MLiveIns) : Loc(L), MLiveIns(MLiveIns) {} 3382 3383 void reset() { 3384 for (auto &Block : BlockMap) 3385 delete Block.second; 3386 3387 PHIs.clear(); 3388 UndefMap.clear(); 3389 BlockMap.clear(); 3390 } 3391 3392 ~LDVSSAUpdater() { reset(); } 3393 3394 /// For a given MBB, create a wrapper block for it. Stores it in the 3395 /// LDVSSAUpdater block map. 3396 LDVSSABlock *getSSALDVBlock(MachineBasicBlock *BB) { 3397 auto it = BlockMap.find(BB); 3398 if (it == BlockMap.end()) { 3399 BlockMap[BB] = new LDVSSABlock(*BB, *this); 3400 it = BlockMap.find(BB); 3401 } 3402 return it->second; 3403 } 3404 3405 /// Find the live-in value number for the given block. Looks up the value at 3406 /// the PHI location on entry. 3407 BlockValueNum getValue(LDVSSABlock *LDVBB) { 3408 return MLiveIns[LDVBB->BB.getNumber()][Loc.asU64()].asU64(); 3409 } 3410 }; 3411 3412 LDVSSABlock *LDVSSABlockIterator::operator*() { 3413 return Updater.getSSALDVBlock(*PredIt); 3414 } 3415 3416 #ifndef NDEBUG 3417 3418 raw_ostream &operator<<(raw_ostream &out, const LDVSSAPhi &PHI) { 3419 out << "SSALDVPHI " << PHI.PHIValNum; 3420 return out; 3421 } 3422 3423 #endif 3424 3425 } // namespace 3426 3427 namespace llvm { 3428 3429 /// Template specialization to give SSAUpdater access to CFG and value 3430 /// information. SSAUpdater calls methods in these traits, passing in the 3431 /// LDVSSAUpdater object, to learn about blocks and the values they define. 3432 /// It also provides methods to create PHI nodes and track them. 3433 template <> class SSAUpdaterTraits<LDVSSAUpdater> { 3434 public: 3435 using BlkT = LDVSSABlock; 3436 using ValT = BlockValueNum; 3437 using PhiT = LDVSSAPhi; 3438 using BlkSucc_iterator = LDVSSABlockIterator; 3439 3440 // Methods to access block successors -- dereferencing to our wrapper class. 3441 static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); } 3442 static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); } 3443 3444 /// Iterator for PHI operands. 3445 class PHI_iterator { 3446 private: 3447 LDVSSAPhi *PHI; 3448 unsigned Idx; 3449 3450 public: 3451 explicit PHI_iterator(LDVSSAPhi *P) // begin iterator 3452 : PHI(P), Idx(0) {} 3453 PHI_iterator(LDVSSAPhi *P, bool) // end iterator 3454 : PHI(P), Idx(PHI->IncomingValues.size()) {} 3455 3456 PHI_iterator &operator++() { 3457 Idx++; 3458 return *this; 3459 } 3460 bool operator==(const PHI_iterator &X) const { return Idx == X.Idx; } 3461 bool operator!=(const PHI_iterator &X) const { return !operator==(X); } 3462 3463 BlockValueNum getIncomingValue() { return PHI->IncomingValues[Idx].second; } 3464 3465 LDVSSABlock *getIncomingBlock() { return PHI->IncomingValues[Idx].first; } 3466 }; 3467 3468 static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); } 3469 3470 static inline PHI_iterator PHI_end(PhiT *PHI) { 3471 return PHI_iterator(PHI, true); 3472 } 3473 3474 /// FindPredecessorBlocks - Put the predecessors of BB into the Preds 3475 /// vector. 3476 static void FindPredecessorBlocks(LDVSSABlock *BB, 3477 SmallVectorImpl<LDVSSABlock *> *Preds) { 3478 for (MachineBasicBlock *Pred : BB->BB.predecessors()) 3479 Preds->push_back(BB->Updater.getSSALDVBlock(Pred)); 3480 } 3481 3482 /// GetUndefVal - Normally creates an IMPLICIT_DEF instruction with a new 3483 /// register. For LiveDebugValues, represents a block identified as not having 3484 /// any DBG_PHI predecessors. 3485 static BlockValueNum GetUndefVal(LDVSSABlock *BB, LDVSSAUpdater *Updater) { 3486 // Create a value number for this block -- it needs to be unique and in the 3487 // "undef" collection, so that we know it's not real. Use a number 3488 // representing a PHI into this block. 3489 BlockValueNum Num = ValueIDNum(BB->BB.getNumber(), 0, Updater->Loc).asU64(); 3490 Updater->UndefMap[&BB->BB] = Num; 3491 return Num; 3492 } 3493 3494 /// CreateEmptyPHI - Create a (representation of a) PHI in the given block. 3495 /// SSAUpdater will populate it with information about incoming values. The 3496 /// value number of this PHI is whatever the machine value number problem 3497 /// solution determined it to be. This includes non-phi values if SSAUpdater 3498 /// tries to create a PHI where the incoming values are identical. 3499 static BlockValueNum CreateEmptyPHI(LDVSSABlock *BB, unsigned NumPreds, 3500 LDVSSAUpdater *Updater) { 3501 BlockValueNum PHIValNum = Updater->getValue(BB); 3502 LDVSSAPhi *PHI = BB->newPHI(PHIValNum); 3503 Updater->PHIs[PHIValNum] = PHI; 3504 return PHIValNum; 3505 } 3506 3507 /// AddPHIOperand - Add the specified value as an operand of the PHI for 3508 /// the specified predecessor block. 3509 static void AddPHIOperand(LDVSSAPhi *PHI, BlockValueNum Val, LDVSSABlock *Pred) { 3510 PHI->IncomingValues.push_back(std::make_pair(Pred, Val)); 3511 } 3512 3513 /// ValueIsPHI - Check if the instruction that defines the specified value 3514 /// is a PHI instruction. 3515 static LDVSSAPhi *ValueIsPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { 3516 auto PHIIt = Updater->PHIs.find(Val); 3517 if (PHIIt == Updater->PHIs.end()) 3518 return nullptr; 3519 return PHIIt->second; 3520 } 3521 3522 /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source 3523 /// operands, i.e., it was just added. 3524 static LDVSSAPhi *ValueIsNewPHI(BlockValueNum Val, LDVSSAUpdater *Updater) { 3525 LDVSSAPhi *PHI = ValueIsPHI(Val, Updater); 3526 if (PHI && PHI->IncomingValues.size() == 0) 3527 return PHI; 3528 return nullptr; 3529 } 3530 3531 /// GetPHIValue - For the specified PHI instruction, return the value 3532 /// that it defines. 3533 static BlockValueNum GetPHIValue(LDVSSAPhi *PHI) { return PHI->PHIValNum; } 3534 }; 3535 3536 } // end namespace llvm 3537 3538 Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(MachineFunction &MF, 3539 ValueIDNum **MLiveOuts, 3540 ValueIDNum **MLiveIns, 3541 MachineInstr &Here, 3542 uint64_t InstrNum) { 3543 // This function will be called twice per DBG_INSTR_REF, and might end up 3544 // computing lots of SSA information: memoize it. 3545 auto SeenDbgPHIIt = SeenDbgPHIs.find(&Here); 3546 if (SeenDbgPHIIt != SeenDbgPHIs.end()) 3547 return SeenDbgPHIIt->second; 3548 3549 Optional<ValueIDNum> Result = 3550 resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum); 3551 SeenDbgPHIs.insert({&Here, Result}); 3552 return Result; 3553 } 3554 3555 Optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl( 3556 MachineFunction &MF, ValueIDNum **MLiveOuts, ValueIDNum **MLiveIns, 3557 MachineInstr &Here, uint64_t InstrNum) { 3558 // Pick out records of DBG_PHI instructions that have been observed. If there 3559 // are none, then we cannot compute a value number. 3560 auto RangePair = std::equal_range(DebugPHINumToValue.begin(), 3561 DebugPHINumToValue.end(), InstrNum); 3562 auto LowerIt = RangePair.first; 3563 auto UpperIt = RangePair.second; 3564 3565 // No DBG_PHI means there can be no location. 3566 if (LowerIt == UpperIt) 3567 return None; 3568 3569 // If there's only one DBG_PHI, then that is our value number. 3570 if (std::distance(LowerIt, UpperIt) == 1) 3571 return LowerIt->ValueRead; 3572 3573 auto DBGPHIRange = make_range(LowerIt, UpperIt); 3574 3575 // Pick out the location (physreg, slot) where any PHIs must occur. It's 3576 // technically possible for us to merge values in different registers in each 3577 // block, but highly unlikely that LLVM will generate such code after register 3578 // allocation. 3579 LocIdx Loc = LowerIt->ReadLoc; 3580 3581 // We have several DBG_PHIs, and a use position (the Here inst). All each 3582 // DBG_PHI does is identify a value at a program position. We can treat each 3583 // DBG_PHI like it's a Def of a value, and the use position is a Use of a 3584 // value, just like SSA. We use the bulk-standard LLVM SSA updater class to 3585 // determine which Def is used at the Use, and any PHIs that happen along 3586 // the way. 3587 // Adapted LLVM SSA Updater: 3588 LDVSSAUpdater Updater(Loc, MLiveIns); 3589 // Map of which Def or PHI is the current value in each block. 3590 DenseMap<LDVSSABlock *, BlockValueNum> AvailableValues; 3591 // Set of PHIs that we have created along the way. 3592 SmallVector<LDVSSAPhi *, 8> CreatedPHIs; 3593 3594 // Each existing DBG_PHI is a Def'd value under this model. Record these Defs 3595 // for the SSAUpdater. 3596 for (const auto &DBG_PHI : DBGPHIRange) { 3597 LDVSSABlock *Block = Updater.getSSALDVBlock(DBG_PHI.MBB); 3598 const ValueIDNum &Num = DBG_PHI.ValueRead; 3599 AvailableValues.insert(std::make_pair(Block, Num.asU64())); 3600 } 3601 3602 LDVSSABlock *HereBlock = Updater.getSSALDVBlock(Here.getParent()); 3603 const auto &AvailIt = AvailableValues.find(HereBlock); 3604 if (AvailIt != AvailableValues.end()) { 3605 // Actually, we already know what the value is -- the Use is in the same 3606 // block as the Def. 3607 return ValueIDNum::fromU64(AvailIt->second); 3608 } 3609 3610 // Otherwise, we must use the SSA Updater. It will identify the value number 3611 // that we are to use, and the PHIs that must happen along the way. 3612 SSAUpdaterImpl<LDVSSAUpdater> Impl(&Updater, &AvailableValues, &CreatedPHIs); 3613 BlockValueNum ResultInt = Impl.GetValue(Updater.getSSALDVBlock(Here.getParent())); 3614 ValueIDNum Result = ValueIDNum::fromU64(ResultInt); 3615 3616 // We have the number for a PHI, or possibly live-through value, to be used 3617 // at this Use. There are a number of things we have to check about it though: 3618 // * Does any PHI use an 'Undef' (like an IMPLICIT_DEF) value? If so, this 3619 // Use was not completely dominated by DBG_PHIs and we should abort. 3620 // * Are the Defs or PHIs clobbered in a block? SSAUpdater isn't aware that 3621 // we've left SSA form. Validate that the inputs to each PHI are the 3622 // expected values. 3623 // * Is a PHI we've created actually a merging of values, or are all the 3624 // predecessor values the same, leading to a non-PHI machine value number? 3625 // (SSAUpdater doesn't know that either). Remap validated PHIs into the 3626 // the ValidatedValues collection below to sort this out. 3627 DenseMap<LDVSSABlock *, ValueIDNum> ValidatedValues; 3628 3629 // Define all the input DBG_PHI values in ValidatedValues. 3630 for (const auto &DBG_PHI : DBGPHIRange) { 3631 LDVSSABlock *Block = Updater.getSSALDVBlock(DBG_PHI.MBB); 3632 const ValueIDNum &Num = DBG_PHI.ValueRead; 3633 ValidatedValues.insert(std::make_pair(Block, Num)); 3634 } 3635 3636 // Sort PHIs to validate into RPO-order. 3637 SmallVector<LDVSSAPhi *, 8> SortedPHIs; 3638 for (auto &PHI : CreatedPHIs) 3639 SortedPHIs.push_back(PHI); 3640 3641 std::sort( 3642 SortedPHIs.begin(), SortedPHIs.end(), [&](LDVSSAPhi *A, LDVSSAPhi *B) { 3643 return BBToOrder[&A->getParent()->BB] < BBToOrder[&B->getParent()->BB]; 3644 }); 3645 3646 for (auto &PHI : SortedPHIs) { 3647 ValueIDNum ThisBlockValueNum = 3648 MLiveIns[PHI->ParentBlock->BB.getNumber()][Loc.asU64()]; 3649 3650 // Are all these things actually defined? 3651 for (auto &PHIIt : PHI->IncomingValues) { 3652 // Any undef input means DBG_PHIs didn't dominate the use point. 3653 if (Updater.UndefMap.find(&PHIIt.first->BB) != Updater.UndefMap.end()) 3654 return None; 3655 3656 ValueIDNum ValueToCheck; 3657 ValueIDNum *BlockLiveOuts = MLiveOuts[PHIIt.first->BB.getNumber()]; 3658 3659 auto VVal = ValidatedValues.find(PHIIt.first); 3660 if (VVal == ValidatedValues.end()) { 3661 // We cross a loop, and this is a backedge. LLVMs tail duplication 3662 // happens so late that DBG_PHI instructions should not be able to 3663 // migrate into loops -- meaning we can only be live-through this 3664 // loop. 3665 ValueToCheck = ThisBlockValueNum; 3666 } else { 3667 // Does the block have as a live-out, in the location we're examining, 3668 // the value that we expect? If not, it's been moved or clobbered. 3669 ValueToCheck = VVal->second; 3670 } 3671 3672 if (BlockLiveOuts[Loc.asU64()] != ValueToCheck) 3673 return None; 3674 } 3675 3676 // Record this value as validated. 3677 ValidatedValues.insert({PHI->ParentBlock, ThisBlockValueNum}); 3678 } 3679 3680 // All the PHIs are valid: we can return what the SSAUpdater said our value 3681 // number was. 3682 return Result; 3683 } 3684