1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetRegisterInfo interface.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/TargetRegisterInfo.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/BinaryFormat/Dwarf.h"
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetFrameLowering.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/VirtRegMap.h"
28 #include "llvm/CodeGenTypes/MachineValueType.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/Printable.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <cassert>
40 #include <utility>
41
42 #define DEBUG_TYPE "target-reg-info"
43
44 using namespace llvm;
45
46 static cl::opt<unsigned>
47 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
48 cl::desc("A threshold of live range size which may cause "
49 "high compile time cost in global splitting."),
50 cl::init(5000));
51
TargetRegisterInfo(const TargetRegisterInfoDesc * ID,regclass_iterator RCB,regclass_iterator RCE,const char * const * SRINames,const SubRegCoveredBits * SubIdxRanges,const LaneBitmask * SRILaneMasks,LaneBitmask SRICoveringLanes,const RegClassInfo * const RCIs,const MVT::SimpleValueType * const RCVTLists,unsigned Mode)52 TargetRegisterInfo::TargetRegisterInfo(
53 const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
54 regclass_iterator RCE, const char *const *SRINames,
55 const SubRegCoveredBits *SubIdxRanges, const LaneBitmask *SRILaneMasks,
56 LaneBitmask SRICoveringLanes, const RegClassInfo *const RCIs,
57 const MVT::SimpleValueType *const RCVTLists, unsigned Mode)
58 : InfoDesc(ID), SubRegIndexNames(SRINames), SubRegIdxRanges(SubIdxRanges),
59 SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE),
60 CoveringLanes(SRICoveringLanes), RCInfos(RCIs), RCVTLists(RCVTLists),
61 HwMode(Mode) {}
62
63 TargetRegisterInfo::~TargetRegisterInfo() = default;
64
shouldRegionSplitForVirtReg(const MachineFunction & MF,const LiveInterval & VirtReg) const65 bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
66 const MachineFunction &MF, const LiveInterval &VirtReg) const {
67 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
68 const MachineRegisterInfo &MRI = MF.getRegInfo();
69 MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg());
70 if (MI && TII->isTriviallyReMaterializable(*MI) &&
71 VirtReg.size() > HugeSizeForSplit)
72 return false;
73 return true;
74 }
75
markSuperRegs(BitVector & RegisterSet,MCRegister Reg) const76 void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
77 MCRegister Reg) const {
78 for (MCPhysReg SR : superregs_inclusive(Reg))
79 RegisterSet.set(SR);
80 }
81
checkAllSuperRegsMarked(const BitVector & RegisterSet,ArrayRef<MCPhysReg> Exceptions) const82 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
83 ArrayRef<MCPhysReg> Exceptions) const {
84 // Check that all super registers of reserved regs are reserved as well.
85 BitVector Checked(getNumRegs());
86 for (unsigned Reg : RegisterSet.set_bits()) {
87 if (Checked[Reg])
88 continue;
89 for (MCPhysReg SR : superregs(Reg)) {
90 if (!RegisterSet[SR] && !is_contained(Exceptions, Reg)) {
91 dbgs() << "Error: Super register " << printReg(SR, this)
92 << " of reserved register " << printReg(Reg, this)
93 << " is not reserved.\n";
94 return false;
95 }
96
97 // We transitively check superregs. So we can remember this for later
98 // to avoid compiletime explosion in deep register hierarchies.
99 Checked.set(SR);
100 }
101 }
102 return true;
103 }
104
105 namespace llvm {
106
printReg(Register Reg,const TargetRegisterInfo * TRI,unsigned SubIdx,const MachineRegisterInfo * MRI)107 Printable printReg(Register Reg, const TargetRegisterInfo *TRI,
108 unsigned SubIdx, const MachineRegisterInfo *MRI) {
109 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
110 if (!Reg)
111 OS << "$noreg";
112 else if (Reg.isStack())
113 OS << "SS#" << Reg.stackSlotIndex();
114 else if (Reg.isVirtual()) {
115 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
116 if (Name != "") {
117 OS << '%' << Name;
118 } else {
119 OS << '%' << Reg.virtRegIndex();
120 }
121 } else if (!TRI)
122 OS << '$' << "physreg" << Reg.id();
123 else if (Reg < TRI->getNumRegs()) {
124 OS << '$';
125 printLowerCase(TRI->getName(Reg), OS);
126 } else
127 llvm_unreachable("Register kind is unsupported.");
128
129 if (SubIdx) {
130 if (TRI)
131 OS << ':' << TRI->getSubRegIndexName(SubIdx);
132 else
133 OS << ":sub(" << SubIdx << ')';
134 }
135 });
136 }
137
printRegUnit(unsigned Unit,const TargetRegisterInfo * TRI)138 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
139 return Printable([Unit, TRI](raw_ostream &OS) {
140 // Generic printout when TRI is missing.
141 if (!TRI) {
142 OS << "Unit~" << Unit;
143 return;
144 }
145
146 // Check for invalid register units.
147 if (Unit >= TRI->getNumRegUnits()) {
148 OS << "BadUnit~" << Unit;
149 return;
150 }
151
152 // Normal units have at least one root.
153 MCRegUnitRootIterator Roots(Unit, TRI);
154 assert(Roots.isValid() && "Unit has no roots.");
155 OS << TRI->getName(*Roots);
156 for (++Roots; Roots.isValid(); ++Roots)
157 OS << '~' << TRI->getName(*Roots);
158 });
159 }
160
printVRegOrUnit(unsigned Unit,const TargetRegisterInfo * TRI)161 Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
162 return Printable([Unit, TRI](raw_ostream &OS) {
163 if (Register::isVirtualRegister(Unit)) {
164 OS << '%' << Register(Unit).virtRegIndex();
165 } else {
166 OS << printRegUnit(Unit, TRI);
167 }
168 });
169 }
170
printRegClassOrBank(Register Reg,const MachineRegisterInfo & RegInfo,const TargetRegisterInfo * TRI)171 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
172 const TargetRegisterInfo *TRI) {
173 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
174 if (RegInfo.getRegClassOrNull(Reg))
175 OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
176 else if (RegInfo.getRegBankOrNull(Reg))
177 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
178 else {
179 OS << "_";
180 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
181 "Generic registers must have a valid type");
182 }
183 });
184 }
185
186 } // end namespace llvm
187
188 /// getAllocatableClass - Return the maximal subclass of the given register
189 /// class that is alloctable, or NULL.
190 const TargetRegisterClass *
getAllocatableClass(const TargetRegisterClass * RC) const191 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
192 if (!RC || RC->isAllocatable())
193 return RC;
194
195 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
196 ++It) {
197 const TargetRegisterClass *SubRC = getRegClass(It.getID());
198 if (SubRC->isAllocatable())
199 return SubRC;
200 }
201 return nullptr;
202 }
203
204 template <typename TypeT>
205 static const TargetRegisterClass *
getMinimalPhysRegClass(const TargetRegisterInfo * TRI,MCRegister Reg,TypeT Ty)206 getMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg,
207 TypeT Ty) {
208 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>);
209 assert(Reg.isPhysical() && "reg must be a physical register");
210
211 bool IsDefault = [&]() {
212 if constexpr (std::is_same_v<TypeT, MVT>)
213 return Ty == MVT::Other;
214 else
215 return !Ty.isValid();
216 }();
217
218 // Pick the most sub register class of the right type that contains
219 // this physreg.
220 const TargetRegisterClass *BestRC = nullptr;
221 for (const TargetRegisterClass *RC : TRI->regclasses()) {
222 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) && RC->contains(Reg) &&
223 (!BestRC || BestRC->hasSubClass(RC)))
224 BestRC = RC;
225 }
226
227 if constexpr (std::is_same_v<TypeT, MVT>)
228 assert(BestRC && "Couldn't find the register class");
229 return BestRC;
230 }
231
232 template <typename TypeT>
233 static const TargetRegisterClass *
getCommonMinimalPhysRegClass(const TargetRegisterInfo * TRI,MCRegister Reg1,MCRegister Reg2,TypeT Ty)234 getCommonMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg1,
235 MCRegister Reg2, TypeT Ty) {
236 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>);
237 assert(Reg1.isPhysical() && Reg2.isPhysical() &&
238 "Reg1/Reg2 must be a physical register");
239
240 bool IsDefault = [&]() {
241 if constexpr (std::is_same_v<TypeT, MVT>)
242 return Ty == MVT::Other;
243 else
244 return !Ty.isValid();
245 }();
246
247 // Pick the most sub register class of the right type that contains
248 // this physreg.
249 const TargetRegisterClass *BestRC = nullptr;
250 for (const TargetRegisterClass *RC : TRI->regclasses()) {
251 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) &&
252 RC->contains(Reg1, Reg2) && (!BestRC || BestRC->hasSubClass(RC)))
253 BestRC = RC;
254 }
255
256 if constexpr (std::is_same_v<TypeT, MVT>)
257 assert(BestRC && "Couldn't find the register class");
258 return BestRC;
259 }
260
261 const TargetRegisterClass *
getMinimalPhysRegClass(MCRegister Reg,MVT VT) const262 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister Reg, MVT VT) const {
263 return ::getMinimalPhysRegClass(this, Reg, VT);
264 }
265
getCommonMinimalPhysRegClass(MCRegister Reg1,MCRegister Reg2,MVT VT) const266 const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClass(
267 MCRegister Reg1, MCRegister Reg2, MVT VT) const {
268 return ::getCommonMinimalPhysRegClass(this, Reg1, Reg2, VT);
269 }
270
271 const TargetRegisterClass *
getMinimalPhysRegClassLLT(MCRegister Reg,LLT Ty) const272 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister Reg, LLT Ty) const {
273 return ::getMinimalPhysRegClass(this, Reg, Ty);
274 }
275
getCommonMinimalPhysRegClassLLT(MCRegister Reg1,MCRegister Reg2,LLT Ty) const276 const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClassLLT(
277 MCRegister Reg1, MCRegister Reg2, LLT Ty) const {
278 return ::getCommonMinimalPhysRegClass(this, Reg1, Reg2, Ty);
279 }
280
281 /// getAllocatableSetForRC - Toggle the bits that represent allocatable
282 /// registers for the specific register class.
getAllocatableSetForRC(const MachineFunction & MF,const TargetRegisterClass * RC,BitVector & R)283 static void getAllocatableSetForRC(const MachineFunction &MF,
284 const TargetRegisterClass *RC, BitVector &R){
285 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
286 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
287 for (MCPhysReg PR : Order)
288 R.set(PR);
289 }
290
getAllocatableSet(const MachineFunction & MF,const TargetRegisterClass * RC) const291 BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
292 const TargetRegisterClass *RC) const {
293 BitVector Allocatable(getNumRegs());
294 if (RC) {
295 // A register class with no allocatable subclass returns an empty set.
296 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
297 if (SubClass)
298 getAllocatableSetForRC(MF, SubClass, Allocatable);
299 } else {
300 for (const TargetRegisterClass *C : regclasses())
301 if (C->isAllocatable())
302 getAllocatableSetForRC(MF, C, Allocatable);
303 }
304
305 // Mask out the reserved registers
306 const MachineRegisterInfo &MRI = MF.getRegInfo();
307 const BitVector &Reserved = MRI.getReservedRegs();
308 Allocatable.reset(Reserved);
309
310 return Allocatable;
311 }
312
313 static inline
firstCommonClass(const uint32_t * A,const uint32_t * B,const TargetRegisterInfo * TRI)314 const TargetRegisterClass *firstCommonClass(const uint32_t *A,
315 const uint32_t *B,
316 const TargetRegisterInfo *TRI) {
317 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
318 if (unsigned Common = *A++ & *B++)
319 return TRI->getRegClass(I + llvm::countr_zero(Common));
320 return nullptr;
321 }
322
323 const TargetRegisterClass *
getCommonSubClass(const TargetRegisterClass * A,const TargetRegisterClass * B) const324 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
325 const TargetRegisterClass *B) const {
326 // First take care of the trivial cases.
327 if (A == B)
328 return A;
329 if (!A || !B)
330 return nullptr;
331
332 // Register classes are ordered topologically, so the largest common
333 // sub-class it the common sub-class with the smallest ID.
334 return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this);
335 }
336
337 const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass * A,const TargetRegisterClass * B,unsigned Idx) const338 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
339 const TargetRegisterClass *B,
340 unsigned Idx) const {
341 assert(A && B && "Missing register class");
342 assert(Idx && "Bad sub-register index");
343
344 // Find Idx in the list of super-register indices.
345 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
346 if (RCI.getSubReg() == Idx)
347 // The bit mask contains all register classes that are projected into B
348 // by Idx. Find a class that is also a sub-class of A.
349 return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this);
350 return nullptr;
351 }
352
353 const TargetRegisterClass *TargetRegisterInfo::
getCommonSuperRegClass(const TargetRegisterClass * RCA,unsigned SubA,const TargetRegisterClass * RCB,unsigned SubB,unsigned & PreA,unsigned & PreB) const354 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
355 const TargetRegisterClass *RCB, unsigned SubB,
356 unsigned &PreA, unsigned &PreB) const {
357 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
358
359 // Search all pairs of sub-register indices that project into RCA and RCB
360 // respectively. This is quadratic, but usually the sets are very small. On
361 // most targets like X86, there will only be a single sub-register index
362 // (e.g., sub_16bit projecting into GR16).
363 //
364 // The worst case is a register class like DPR on ARM.
365 // We have indices dsub_0..dsub_7 projecting into that class.
366 //
367 // It is very common that one register class is a sub-register of the other.
368 // Arrange for RCA to be the larger register so the answer will be found in
369 // the first iteration. This makes the search linear for the most common
370 // case.
371 const TargetRegisterClass *BestRC = nullptr;
372 unsigned *BestPreA = &PreA;
373 unsigned *BestPreB = &PreB;
374 if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) {
375 std::swap(RCA, RCB);
376 std::swap(SubA, SubB);
377 std::swap(BestPreA, BestPreB);
378 }
379
380 // Also terminate the search one we have found a register class as small as
381 // RCA.
382 unsigned MinSize = getRegSizeInBits(*RCA);
383
384 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
385 unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
386 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
387 // Check if a common super-register class exists for this index pair.
388 const TargetRegisterClass *RC =
389 firstCommonClass(IA.getMask(), IB.getMask(), this);
390 if (!RC || getRegSizeInBits(*RC) < MinSize)
391 continue;
392
393 // The indexes must compose identically: PreA+SubA == PreB+SubB.
394 unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB);
395 if (FinalA != FinalB)
396 continue;
397
398 // Is RC a better candidate than BestRC?
399 if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC))
400 continue;
401
402 // Yes, RC is the smallest super-register seen so far.
403 BestRC = RC;
404 *BestPreA = IA.getSubReg();
405 *BestPreB = IB.getSubReg();
406
407 // Bail early if we reached MinSize. We won't find a better candidate.
408 if (getRegSizeInBits(*BestRC) == MinSize)
409 return BestRC;
410 }
411 }
412 return BestRC;
413 }
414
415 /// Check if the registers defined by the pair (RegisterClass, SubReg)
416 /// share the same register file.
shareSameRegisterFile(const TargetRegisterInfo & TRI,const TargetRegisterClass * DefRC,unsigned DefSubReg,const TargetRegisterClass * SrcRC,unsigned SrcSubReg)417 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
418 const TargetRegisterClass *DefRC,
419 unsigned DefSubReg,
420 const TargetRegisterClass *SrcRC,
421 unsigned SrcSubReg) {
422 // Same register class.
423 //
424 // When processing uncoalescable copies / bitcasts, it is possible we reach
425 // here with the same register class, but mismatched subregister indices.
426 if (DefRC == SrcRC && DefSubReg == SrcSubReg)
427 return true;
428
429 // Both operands are sub registers. Check if they share a register class.
430 unsigned SrcIdx, DefIdx;
431 if (SrcSubReg && DefSubReg) {
432 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
433 SrcIdx, DefIdx) != nullptr;
434 }
435
436 // At most one of the register is a sub register, make it Src to avoid
437 // duplicating the test.
438 if (!SrcSubReg) {
439 std::swap(DefSubReg, SrcSubReg);
440 std::swap(DefRC, SrcRC);
441 }
442
443 // One of the register is a sub register, check if we can get a superclass.
444 if (SrcSubReg)
445 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;
446
447 // Plain copy.
448 return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
449 }
450
shouldRewriteCopySrc(const TargetRegisterClass * DefRC,unsigned DefSubReg,const TargetRegisterClass * SrcRC,unsigned SrcSubReg) const451 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
452 unsigned DefSubReg,
453 const TargetRegisterClass *SrcRC,
454 unsigned SrcSubReg) const {
455 // If this source does not incur a cross register bank copy, use it.
456 return shareSameRegisterFile(*this, DefRC, DefSubReg, SrcRC, SrcSubReg);
457 }
458
getSpillWeightScaleFactor(const TargetRegisterClass * RC) const459 float TargetRegisterInfo::getSpillWeightScaleFactor(
460 const TargetRegisterClass *RC) const {
461 return 1.0;
462 }
463
464 // Compute target-independent register allocator hints to help eliminate copies.
getRegAllocationHints(Register VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const465 bool TargetRegisterInfo::getRegAllocationHints(
466 Register VirtReg, ArrayRef<MCPhysReg> Order,
467 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
468 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
469 const MachineRegisterInfo &MRI = MF.getRegInfo();
470 const std::pair<unsigned, SmallVector<Register, 4>> *Hints_MRI =
471 MRI.getRegAllocationHints(VirtReg);
472
473 if (!Hints_MRI)
474 return false;
475
476 SmallSet<Register, 32> HintedRegs;
477 // First hint may be a target hint.
478 bool Skip = (Hints_MRI->first != 0);
479 for (auto Reg : Hints_MRI->second) {
480 if (Skip) {
481 Skip = false;
482 continue;
483 }
484
485 // Target-independent hints are either a physical or a virtual register.
486 Register Phys = Reg;
487 if (VRM && Phys.isVirtual())
488 Phys = VRM->getPhys(Phys);
489
490 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
491 // registers allocated to the same physreg).
492 if (!HintedRegs.insert(Phys).second)
493 continue;
494 // Check that Phys is a valid hint in VirtReg's register class.
495 if (!Phys.isPhysical())
496 continue;
497 if (MRI.isReserved(Phys))
498 continue;
499 // Check that Phys is in the allocation order. We shouldn't heed hints
500 // from VirtReg's register class if they aren't in the allocation order. The
501 // target probably has a reason for removing the register.
502 if (!is_contained(Order, Phys))
503 continue;
504
505 // All clear, tell the register allocator to prefer this register.
506 Hints.push_back(Phys.id());
507 }
508 return false;
509 }
510
isCalleeSavedPhysReg(MCRegister PhysReg,const MachineFunction & MF) const511 bool TargetRegisterInfo::isCalleeSavedPhysReg(
512 MCRegister PhysReg, const MachineFunction &MF) const {
513 if (!PhysReg)
514 return false;
515 const uint32_t *callerPreservedRegs =
516 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
517 if (callerPreservedRegs) {
518 assert(PhysReg.isPhysical() && "Expected physical register");
519 return (callerPreservedRegs[PhysReg.id() / 32] >> PhysReg.id() % 32) & 1;
520 }
521 return false;
522 }
523
canRealignStack(const MachineFunction & MF) const524 bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
525 return MF.getFrameInfo().isStackRealignable();
526 }
527
shouldRealignStack(const MachineFunction & MF) const528 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
529 return MF.getFrameInfo().shouldRealignStack();
530 }
531
regmaskSubsetEqual(const uint32_t * mask0,const uint32_t * mask1) const532 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
533 const uint32_t *mask1) const {
534 unsigned N = (getNumRegs()+31) / 32;
535 for (unsigned I = 0; I < N; ++I)
536 if ((mask0[I] & mask1[I]) != mask0[I])
537 return false;
538 return true;
539 }
540
541 TypeSize
getRegSizeInBits(Register Reg,const MachineRegisterInfo & MRI) const542 TargetRegisterInfo::getRegSizeInBits(Register Reg,
543 const MachineRegisterInfo &MRI) const {
544 const TargetRegisterClass *RC{};
545 if (Reg.isPhysical()) {
546 // The size is not directly available for physical registers.
547 // Instead, we need to access a register class that contains Reg and
548 // get the size of that register class.
549 RC = getMinimalPhysRegClass(Reg);
550 assert(RC && "Unable to deduce the register class");
551 return getRegSizeInBits(*RC);
552 }
553 LLT Ty = MRI.getType(Reg);
554 if (Ty.isValid())
555 return Ty.getSizeInBits();
556
557 // Since Reg is not a generic register, it may have a register class.
558 RC = MRI.getRegClass(Reg);
559 assert(RC && "Unable to deduce the register class");
560 return getRegSizeInBits(*RC);
561 }
562
getCoveringSubRegIndexes(const TargetRegisterClass * RC,LaneBitmask LaneMask,SmallVectorImpl<unsigned> & NeededIndexes) const563 bool TargetRegisterInfo::getCoveringSubRegIndexes(
564 const TargetRegisterClass *RC, LaneBitmask LaneMask,
565 SmallVectorImpl<unsigned> &NeededIndexes) const {
566 SmallVector<unsigned, 8> PossibleIndexes;
567 unsigned BestIdx = 0;
568 unsigned BestCover = 0;
569
570 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
571 // Is this index even compatible with the given class?
572 if (getSubClassWithSubReg(RC, Idx) != RC)
573 continue;
574 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
575 // Early exit if we found a perfect match.
576 if (SubRegMask == LaneMask) {
577 BestIdx = Idx;
578 break;
579 }
580
581 // The index must not cover any lanes outside \p LaneMask.
582 if ((SubRegMask & ~LaneMask).any())
583 continue;
584
585 unsigned PopCount = SubRegMask.getNumLanes();
586 PossibleIndexes.push_back(Idx);
587 if (PopCount > BestCover) {
588 BestCover = PopCount;
589 BestIdx = Idx;
590 }
591 }
592
593 // Abort if we cannot possibly implement the COPY with the given indexes.
594 if (BestIdx == 0)
595 return false;
596
597 NeededIndexes.push_back(BestIdx);
598
599 // Greedy heuristic: Keep iterating keeping the best covering subreg index
600 // each time.
601 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(BestIdx);
602 while (LanesLeft.any()) {
603 unsigned BestIdx = 0;
604 int BestCover = std::numeric_limits<int>::min();
605 for (unsigned Idx : PossibleIndexes) {
606 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
607 // Early exit if we found a perfect match.
608 if (SubRegMask == LanesLeft) {
609 BestIdx = Idx;
610 break;
611 }
612
613 // Do not cover already-covered lanes to avoid creating cycles
614 // in copy bundles (= bundle contains copies that write to the
615 // registers).
616 if ((SubRegMask & ~LanesLeft).any())
617 continue;
618
619 // Try to cover as many of the remaining lanes as possible.
620 const int Cover = (SubRegMask & LanesLeft).getNumLanes();
621 if (Cover > BestCover) {
622 BestCover = Cover;
623 BestIdx = Idx;
624 }
625 }
626
627 if (BestIdx == 0)
628 return false; // Impossible to handle
629
630 NeededIndexes.push_back(BestIdx);
631
632 LanesLeft &= ~getSubRegIndexLaneMask(BestIdx);
633 }
634
635 return BestIdx;
636 }
637
getSubRegIdxSize(unsigned Idx) const638 unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const {
639 assert(Idx && Idx < getNumSubRegIndices() &&
640 "This is not a subregister index");
641 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size;
642 }
643
getSubRegIdxOffset(unsigned Idx) const644 unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const {
645 assert(Idx && Idx < getNumSubRegIndices() &&
646 "This is not a subregister index");
647 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset;
648 }
649
650 Register
lookThruCopyLike(Register SrcReg,const MachineRegisterInfo * MRI) const651 TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
652 const MachineRegisterInfo *MRI) const {
653 while (true) {
654 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
655 if (!MI->isCopyLike())
656 return SrcReg;
657
658 Register CopySrcReg;
659 if (MI->isCopy())
660 CopySrcReg = MI->getOperand(1).getReg();
661 else {
662 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
663 CopySrcReg = MI->getOperand(2).getReg();
664 }
665
666 if (!CopySrcReg.isVirtual())
667 return CopySrcReg;
668
669 SrcReg = CopySrcReg;
670 }
671 }
672
lookThruSingleUseCopyChain(Register SrcReg,const MachineRegisterInfo * MRI) const673 Register TargetRegisterInfo::lookThruSingleUseCopyChain(
674 Register SrcReg, const MachineRegisterInfo *MRI) const {
675 while (true) {
676 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
677 // Found the real definition, return it if it has a single use.
678 if (!MI->isCopyLike())
679 return MRI->hasOneNonDBGUse(SrcReg) ? SrcReg : Register();
680
681 Register CopySrcReg;
682 if (MI->isCopy())
683 CopySrcReg = MI->getOperand(1).getReg();
684 else {
685 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
686 CopySrcReg = MI->getOperand(2).getReg();
687 }
688
689 // Continue only if the next definition in the chain is for a virtual
690 // register that has a single use.
691 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(CopySrcReg))
692 return Register();
693
694 SrcReg = CopySrcReg;
695 }
696 }
697
getOffsetOpcodes(const StackOffset & Offset,SmallVectorImpl<uint64_t> & Ops) const698 void TargetRegisterInfo::getOffsetOpcodes(
699 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
700 assert(!Offset.getScalable() && "Scalable offsets are not handled");
701 DIExpression::appendOffset(Ops, Offset.getFixed());
702 }
703
704 DIExpression *
prependOffsetExpression(const DIExpression * Expr,unsigned PrependFlags,const StackOffset & Offset) const705 TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
706 unsigned PrependFlags,
707 const StackOffset &Offset) const {
708 assert((PrependFlags &
709 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
710 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
711 "Unsupported prepend flag");
712 SmallVector<uint64_t, 16> OffsetExpr;
713 if (PrependFlags & DIExpression::DerefBefore)
714 OffsetExpr.push_back(dwarf::DW_OP_deref);
715 getOffsetOpcodes(Offset, OffsetExpr);
716 if (PrependFlags & DIExpression::DerefAfter)
717 OffsetExpr.push_back(dwarf::DW_OP_deref);
718 return DIExpression::prependOpcodes(Expr, OffsetExpr,
719 PrependFlags & DIExpression::StackValue,
720 PrependFlags & DIExpression::EntryValue);
721 }
722
723 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
724 LLVM_DUMP_METHOD
dumpReg(Register Reg,unsigned SubRegIndex,const TargetRegisterInfo * TRI)725 void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
726 const TargetRegisterInfo *TRI) {
727 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n";
728 }
729 #endif
730