1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SystemZRegisterInfo.h" 10 #include "SystemZInstrInfo.h" 11 #include "SystemZSubtarget.h" 12 #include "llvm/ADT/SmallSet.h" 13 #include "llvm/CodeGen/LiveIntervals.h" 14 #include "llvm/CodeGen/MachineInstrBuilder.h" 15 #include "llvm/CodeGen/MachineRegisterInfo.h" 16 #include "llvm/CodeGen/TargetFrameLowering.h" 17 #include "llvm/CodeGen/VirtRegMap.h" 18 #include "llvm/IR/DebugInfoMetadata.h" 19 20 using namespace llvm; 21 22 #define GET_REGINFO_TARGET_DESC 23 #include "SystemZGenRegisterInfo.inc" 24 25 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO 26 // somehow belongs in it. Otherwise, return GRX32. 27 static const TargetRegisterClass *getRC32(MachineOperand &MO, 28 const VirtRegMap *VRM, 29 const MachineRegisterInfo *MRI) { 30 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg()); 31 32 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) || 33 MO.getSubReg() == SystemZ::subreg_l32 || 34 MO.getSubReg() == SystemZ::subreg_hl32) 35 return &SystemZ::GR32BitRegClass; 36 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) || 37 MO.getSubReg() == SystemZ::subreg_h32 || 38 MO.getSubReg() == SystemZ::subreg_hh32) 39 return &SystemZ::GRH32BitRegClass; 40 41 if (VRM && VRM->hasPhys(MO.getReg())) { 42 Register PhysReg = VRM->getPhys(MO.getReg()); 43 if (SystemZ::GR32BitRegClass.contains(PhysReg)) 44 return &SystemZ::GR32BitRegClass; 45 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && 46 "Phys reg not in GR32 or GRH32?"); 47 return &SystemZ::GRH32BitRegClass; 48 } 49 50 assert (RC == &SystemZ::GRX32BitRegClass); 51 return RC; 52 } 53 54 // Pass the registers of RC as hints while making sure that if any of these 55 // registers are copy hints (and therefore already in Hints), hint them 56 // first. 57 static void addHints(ArrayRef<MCPhysReg> Order, 58 SmallVectorImpl<MCPhysReg> &Hints, 59 const TargetRegisterClass *RC, 60 const MachineRegisterInfo *MRI) { 61 SmallSet<unsigned, 4> CopyHints; 62 CopyHints.insert(Hints.begin(), Hints.end()); 63 Hints.clear(); 64 for (MCPhysReg Reg : Order) 65 if (CopyHints.count(Reg) && 66 RC->contains(Reg) && !MRI->isReserved(Reg)) 67 Hints.push_back(Reg); 68 for (MCPhysReg Reg : Order) 69 if (!CopyHints.count(Reg) && 70 RC->contains(Reg) && !MRI->isReserved(Reg)) 71 Hints.push_back(Reg); 72 } 73 74 bool SystemZRegisterInfo::getRegAllocationHints( 75 Register VirtReg, ArrayRef<MCPhysReg> Order, 76 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 77 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 78 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 79 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 80 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 81 82 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 83 VirtReg, Order, Hints, MF, VRM, Matrix); 84 85 if (VRM != nullptr) { 86 // Add any two address hints after any copy hints. 87 SmallSet<unsigned, 4> TwoAddrHints; 88 for (auto &Use : MRI->reg_nodbg_instructions(VirtReg)) 89 if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) { 90 const MachineOperand *VRRegMO = nullptr; 91 const MachineOperand *OtherMO = nullptr; 92 const MachineOperand *CommuMO = nullptr; 93 if (VirtReg == Use.getOperand(0).getReg()) { 94 VRRegMO = &Use.getOperand(0); 95 OtherMO = &Use.getOperand(1); 96 if (Use.isCommutable()) 97 CommuMO = &Use.getOperand(2); 98 } else if (VirtReg == Use.getOperand(1).getReg()) { 99 VRRegMO = &Use.getOperand(1); 100 OtherMO = &Use.getOperand(0); 101 } else if (VirtReg == Use.getOperand(2).getReg() && 102 Use.isCommutable()) { 103 VRRegMO = &Use.getOperand(2); 104 OtherMO = &Use.getOperand(0); 105 } else 106 continue; 107 108 auto tryAddHint = [&](const MachineOperand *MO) -> void { 109 Register Reg = MO->getReg(); 110 Register PhysReg = Register::isPhysicalRegister(Reg) 111 ? Reg 112 : Register(VRM->getPhys(Reg)); 113 if (PhysReg) { 114 if (MO->getSubReg()) 115 PhysReg = getSubReg(PhysReg, MO->getSubReg()); 116 if (VRRegMO->getSubReg()) 117 PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(), 118 MRI->getRegClass(VirtReg)); 119 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg)) 120 TwoAddrHints.insert(PhysReg); 121 } 122 }; 123 tryAddHint(OtherMO); 124 if (CommuMO) 125 tryAddHint(CommuMO); 126 } 127 for (MCPhysReg OrderReg : Order) 128 if (TwoAddrHints.count(OrderReg)) 129 Hints.push_back(OrderReg); 130 } 131 132 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) { 133 SmallVector<Register, 8> Worklist; 134 SmallSet<Register, 4> DoneRegs; 135 Worklist.push_back(VirtReg); 136 while (Worklist.size()) { 137 Register Reg = Worklist.pop_back_val(); 138 if (!DoneRegs.insert(Reg).second) 139 continue; 140 141 for (auto &Use : MRI->reg_instructions(Reg)) { 142 // For LOCRMux, see if the other operand is already a high or low 143 // register, and in that case give the corresponding hints for 144 // VirtReg. LOCR instructions need both operands in either high or 145 // low parts. Same handling for SELRMux. 146 if (Use.getOpcode() == SystemZ::LOCRMux || 147 Use.getOpcode() == SystemZ::SELRMux) { 148 MachineOperand &TrueMO = Use.getOperand(1); 149 MachineOperand &FalseMO = Use.getOperand(2); 150 const TargetRegisterClass *RC = 151 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI), 152 getRC32(TrueMO, VRM, MRI)); 153 if (Use.getOpcode() == SystemZ::SELRMux) 154 RC = TRI->getCommonSubClass(RC, 155 getRC32(Use.getOperand(0), VRM, MRI)); 156 if (RC && RC != &SystemZ::GRX32BitRegClass) { 157 addHints(Order, Hints, RC, MRI); 158 // Return true to make these hints the only regs available to 159 // RA. This may mean extra spilling but since the alternative is 160 // a jump sequence expansion of the LOCRMux, it is preferred. 161 return true; 162 } 163 164 // Add the other operand of the LOCRMux to the worklist. 165 Register OtherReg = 166 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); 167 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass) 168 Worklist.push_back(OtherReg); 169 } // end LOCRMux 170 else if (Use.getOpcode() == SystemZ::CHIMux || 171 Use.getOpcode() == SystemZ::CFIMux) { 172 if (Use.getOperand(1).getImm() == 0) { 173 bool OnlyLMuxes = true; 174 for (MachineInstr &DefMI : MRI->def_instructions(VirtReg)) 175 if (DefMI.getOpcode() != SystemZ::LMux) 176 OnlyLMuxes = false; 177 if (OnlyLMuxes) { 178 addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI); 179 // Return false to make these hints preferred but not obligatory. 180 return false; 181 } 182 } 183 } // end CHIMux / CFIMux 184 } 185 } 186 } 187 188 return BaseImplRetVal; 189 } 190 191 const MCPhysReg * 192 SystemZXPLINK64Registers::getCalleeSavedRegs(const MachineFunction *MF) const { 193 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>(); 194 return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_SaveList 195 : CSR_SystemZ_XPLINK64_SaveList; 196 } 197 198 const MCPhysReg * 199 SystemZELFRegisters::getCalleeSavedRegs(const MachineFunction *MF) const { 200 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>(); 201 if (MF->getFunction().getCallingConv() == CallingConv::GHC) 202 return CSR_SystemZ_NoRegs_SaveList; 203 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) 204 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList 205 : CSR_SystemZ_AllRegs_SaveList; 206 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() && 207 MF->getFunction().getAttributes().hasAttrSomewhere( 208 Attribute::SwiftError)) 209 return CSR_SystemZ_SwiftError_SaveList; 210 return CSR_SystemZ_ELF_SaveList; 211 } 212 213 const uint32_t * 214 SystemZXPLINK64Registers::getCallPreservedMask(const MachineFunction &MF, 215 CallingConv::ID CC) const { 216 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 217 return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_RegMask 218 : CSR_SystemZ_XPLINK64_RegMask; 219 } 220 221 const uint32_t * 222 SystemZELFRegisters::getCallPreservedMask(const MachineFunction &MF, 223 CallingConv::ID CC) const { 224 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 225 if (CC == CallingConv::GHC) 226 return CSR_SystemZ_NoRegs_RegMask; 227 if (CC == CallingConv::AnyReg) 228 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask 229 : CSR_SystemZ_AllRegs_RegMask; 230 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() && 231 MF.getFunction().getAttributes().hasAttrSomewhere( 232 Attribute::SwiftError)) 233 return CSR_SystemZ_SwiftError_RegMask; 234 return CSR_SystemZ_ELF_RegMask; 235 } 236 237 SystemZRegisterInfo::SystemZRegisterInfo(unsigned int RA) 238 : SystemZGenRegisterInfo(RA) {} 239 240 const MCPhysReg * 241 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 242 243 const SystemZSubtarget *Subtarget = &MF->getSubtarget<SystemZSubtarget>(); 244 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 245 246 return Regs->getCalleeSavedRegs(MF); 247 } 248 249 const uint32_t * 250 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 251 CallingConv::ID CC) const { 252 253 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 254 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 255 return Regs->getCallPreservedMask(MF, CC); 256 } 257 258 BitVector 259 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 260 BitVector Reserved(getNumRegs()); 261 const SystemZFrameLowering *TFI = getFrameLowering(MF); 262 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 263 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 264 if (TFI->hasFP(MF)) 265 // The frame pointer. Reserve all aliases. 266 for (MCRegAliasIterator AI(Regs->getFramePointerRegister(), this, true); 267 AI.isValid(); ++AI) 268 Reserved.set(*AI); 269 270 // Reserve all aliases for the stack pointer. 271 for (MCRegAliasIterator AI(Regs->getStackPointerRegister(), this, true); 272 AI.isValid(); ++AI) 273 Reserved.set(*AI); 274 275 // A0 and A1 hold the thread pointer. 276 Reserved.set(SystemZ::A0); 277 Reserved.set(SystemZ::A1); 278 279 // FPC is the floating-point control register. 280 Reserved.set(SystemZ::FPC); 281 282 return Reserved; 283 } 284 285 void 286 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, 287 int SPAdj, unsigned FIOperandNum, 288 RegScavenger *RS) const { 289 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame"); 290 291 MachineBasicBlock &MBB = *MI->getParent(); 292 MachineFunction &MF = *MBB.getParent(); 293 auto *TII = 294 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 295 const SystemZFrameLowering *TFI = getFrameLowering(MF); 296 DebugLoc DL = MI->getDebugLoc(); 297 298 // Decompose the frame index into a base and offset. 299 int FrameIndex = MI->getOperand(FIOperandNum).getIndex(); 300 Register BasePtr; 301 int64_t Offset = 302 (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed() + 303 MI->getOperand(FIOperandNum + 1).getImm()); 304 305 // Special handling of dbg_value instructions. 306 if (MI->isDebugValue()) { 307 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false); 308 if (MI->isNonListDebugValue()) { 309 MI->getDebugOffset().ChangeToImmediate(Offset); 310 } else { 311 unsigned OpIdx = MI->getDebugOperandIndex(&MI->getOperand(FIOperandNum)); 312 SmallVector<uint64_t, 3> Ops; 313 DIExpression::appendOffset( 314 Ops, TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed()); 315 MI->getDebugExpressionOp().setMetadata( 316 DIExpression::appendOpsToArg(MI->getDebugExpression(), Ops, OpIdx)); 317 } 318 return; 319 } 320 321 // See if the offset is in range, or if an equivalent instruction that 322 // accepts the offset exists. 323 unsigned Opcode = MI->getOpcode(); 324 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 325 if (OpcodeForOffset) { 326 if (OpcodeForOffset == SystemZ::LE && 327 MF.getSubtarget<SystemZSubtarget>().hasVector()) { 328 // If LE is ok for offset, use LDE instead on z13. 329 OpcodeForOffset = SystemZ::LDE32; 330 } 331 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 332 } 333 else { 334 // Create an anchor point that is in range. Start at 0xffff so that 335 // can use LLILH to load the immediate. 336 int64_t OldOffset = Offset; 337 int64_t Mask = 0xffff; 338 do { 339 Offset = OldOffset & Mask; 340 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 341 Mask >>= 1; 342 assert(Mask && "One offset must be OK"); 343 } while (!OpcodeForOffset); 344 345 Register ScratchReg = 346 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); 347 int64_t HighOffset = OldOffset - Offset; 348 349 if (MI->getDesc().TSFlags & SystemZII::HasIndex 350 && MI->getOperand(FIOperandNum + 2).getReg() == 0) { 351 // Load the offset into the scratch register and use it as an index. 352 // The scratch register then dies here. 353 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 354 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 355 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg, 356 false, false, true); 357 } else { 358 // Load the anchor address into a scratch register. 359 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); 360 if (LAOpcode) 361 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) 362 .addReg(BasePtr).addImm(HighOffset).addReg(0); 363 else { 364 // Load the high offset into the scratch register and use it as 365 // an index. 366 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 367 BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg) 368 .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg); 369 } 370 371 // Use the scratch register as the base. It then dies here. 372 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg, 373 false, false, true); 374 } 375 } 376 MI->setDesc(TII->get(OpcodeForOffset)); 377 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 378 } 379 380 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI, 381 const TargetRegisterClass *SrcRC, 382 unsigned SubReg, 383 const TargetRegisterClass *DstRC, 384 unsigned DstSubReg, 385 const TargetRegisterClass *NewRC, 386 LiveIntervals &LIS) const { 387 assert (MI->isCopy() && "Only expecting COPY instructions"); 388 389 // Coalesce anything which is not a COPY involving a subreg to/from GR128. 390 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) && 391 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64))) 392 return true; 393 394 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small 395 // and local to one MBB with not too much interferring registers. Otherwise 396 // regalloc may run out of registers. 397 398 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); 399 Register GR128Reg = MI->getOperand(WideOpNo).getReg(); 400 Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); 401 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg); 402 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); 403 404 // Check that the two virtual registers are local to MBB. 405 MachineBasicBlock *MBB = MI->getParent(); 406 MachineInstr *FirstMI_GR128 = 407 LIS.getInstructionFromIndex(IntGR128.beginIndex()); 408 MachineInstr *FirstMI_GRNar = 409 LIS.getInstructionFromIndex(IntGRNar.beginIndex()); 410 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex()); 411 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex()); 412 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) || 413 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) || 414 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) || 415 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB)) 416 return false; 417 418 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr; 419 if (WideOpNo == 1) { 420 MII = FirstMI_GR128; 421 MEE = LastMI_GRNar; 422 } else { 423 MII = FirstMI_GRNar; 424 MEE = LastMI_GR128; 425 } 426 427 // Check if coalescing seems safe by finding the set of clobbered physreg 428 // pairs in the region. 429 BitVector PhysClobbered(getNumRegs()); 430 MEE++; 431 for (; MII != MEE; ++MII) { 432 for (const MachineOperand &MO : MII->operands()) 433 if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) { 434 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/); 435 SI.isValid(); ++SI) 436 if (NewRC->contains(*SI)) { 437 PhysClobbered.set(*SI); 438 break; 439 } 440 } 441 } 442 443 // Demand an arbitrary margin of free regs. 444 unsigned const DemandedFreeGR128 = 3; 445 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128)) 446 return false; 447 448 return true; 449 } 450 451 Register 452 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 453 const SystemZFrameLowering *TFI = getFrameLowering(MF); 454 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 455 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 456 457 return TFI->hasFP(MF) ? Regs->getFramePointerRegister() 458 : Regs->getStackPointerRegister(); 459 } 460 461 const TargetRegisterClass * 462 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 463 if (RC == &SystemZ::CCRRegClass) 464 return &SystemZ::GR32BitRegClass; 465 return RC; 466 } 467 468