1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the base ARM implementation of TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseRegisterInfo.h" 14 #include "ARM.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMMachineFunctionInfo.h" 18 #include "ARMSubtarget.h" 19 #include "MCTargetDesc/ARMAddressingModes.h" 20 #include "MCTargetDesc/ARMBaseInfo.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RegisterScavenging.h" 33 #include "llvm/CodeGen/TargetInstrInfo.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/VirtRegMap.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/MC/MCInstrDesc.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include <cassert> 48 #include <utility> 49 50 #define DEBUG_TYPE "arm-register-info" 51 52 #define GET_REGINFO_TARGET_DESC 53 #include "ARMGenRegisterInfo.inc" 54 55 using namespace llvm; 56 57 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) {} 59 60 static unsigned getFramePointerReg(const ARMSubtarget &STI) { 61 return STI.useR7AsFramePointer() ? ARM::R7 : ARM::R11; 62 } 63 64 const MCPhysReg* 65 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 66 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 67 bool UseSplitPush = STI.splitFramePushPop(*MF); 68 const MCPhysReg *RegList = 69 STI.isTargetDarwin() 70 ? CSR_iOS_SaveList 71 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); 72 73 const Function &F = MF->getFunction(); 74 if (F.getCallingConv() == CallingConv::GHC) { 75 // GHC set of callee saved regs is empty as all those regs are 76 // used for passing STG regs around 77 return CSR_NoRegs_SaveList; 78 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) { 79 return CSR_Win_AAPCS_CFGuard_Check_SaveList; 80 } else if (F.hasFnAttribute("interrupt")) { 81 if (STI.isMClass()) { 82 // M-class CPUs have hardware which saves the registers needed to allow a 83 // function conforming to the AAPCS to function as a handler. 84 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; 85 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") { 86 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 87 // need to be saved to restore user-mode state. 88 return CSR_FIQ_SaveList; 89 } else { 90 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 91 // exception handling. 92 return CSR_GenericInt_SaveList; 93 } 94 } 95 96 if (STI.getTargetLowering()->supportSwiftError() && 97 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 98 if (STI.isTargetDarwin()) 99 return CSR_iOS_SwiftError_SaveList; 100 101 return UseSplitPush ? CSR_AAPCS_SplitPush_SwiftError_SaveList : 102 CSR_AAPCS_SwiftError_SaveList; 103 } 104 105 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS) 106 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 107 ? CSR_iOS_CXX_TLS_PE_SaveList 108 : CSR_iOS_CXX_TLS_SaveList; 109 return RegList; 110 } 111 112 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 113 const MachineFunction *MF) const { 114 assert(MF && "Invalid MachineFunction pointer."); 115 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 116 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 117 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 118 return nullptr; 119 } 120 121 const uint32_t * 122 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 123 CallingConv::ID CC) const { 124 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 125 if (CC == CallingConv::GHC) 126 // This is academic because all GHC calls are (supposed to be) tail calls 127 return CSR_NoRegs_RegMask; 128 if (CC == CallingConv::CFGuard_Check) 129 return CSR_Win_AAPCS_CFGuard_Check_RegMask; 130 if (STI.getTargetLowering()->supportSwiftError() && 131 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 132 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask 133 : CSR_AAPCS_SwiftError_RegMask; 134 135 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 136 return CSR_iOS_CXX_TLS_RegMask; 137 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 138 } 139 140 const uint32_t* 141 ARMBaseRegisterInfo::getNoPreservedMask() const { 142 return CSR_NoRegs_RegMask; 143 } 144 145 const uint32_t * 146 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 147 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 148 "only know about special TLS call on Darwin"); 149 return CSR_iOS_TLSCall_RegMask; 150 } 151 152 const uint32_t * 153 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { 154 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 155 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only()) 156 return CSR_NoRegs_RegMask; 157 else 158 return CSR_FPRegs_RegMask; 159 } 160 161 const uint32_t * 162 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 163 CallingConv::ID CC) const { 164 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 165 // This should return a register mask that is the same as that returned by 166 // getCallPreservedMask but that additionally preserves the register used for 167 // the first i32 argument (which must also be the register used to return a 168 // single i32 return value) 169 // 170 // In case that the calling convention does not use the same register for 171 // both or otherwise does not want to enable this optimization, the function 172 // should return NULL 173 if (CC == CallingConv::GHC) 174 // This is academic because all GHC calls are (supposed to be) tail calls 175 return nullptr; 176 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 177 : CSR_AAPCS_ThisReturn_RegMask; 178 } 179 180 ArrayRef<MCPhysReg> ARMBaseRegisterInfo::getIntraCallClobberedRegs( 181 const MachineFunction *MF) const { 182 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12}; 183 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs); 184 } 185 186 BitVector ARMBaseRegisterInfo:: 187 getReservedRegs(const MachineFunction &MF) const { 188 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 189 const ARMFrameLowering *TFI = getFrameLowering(MF); 190 191 // FIXME: avoid re-calculating this every time. 192 BitVector Reserved(getNumRegs()); 193 markSuperRegs(Reserved, ARM::SP); 194 markSuperRegs(Reserved, ARM::PC); 195 markSuperRegs(Reserved, ARM::FPSCR); 196 markSuperRegs(Reserved, ARM::APSR_NZCV); 197 if (TFI->hasFP(MF)) 198 markSuperRegs(Reserved, getFramePointerReg(STI)); 199 if (hasBasePointer(MF)) 200 markSuperRegs(Reserved, BasePtr); 201 // Some targets reserve R9. 202 if (STI.isR9Reserved()) 203 markSuperRegs(Reserved, ARM::R9); 204 // Reserve D16-D31 if the subtarget doesn't support them. 205 if (!STI.hasD32()) { 206 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 207 for (unsigned R = 0; R < 16; ++R) 208 markSuperRegs(Reserved, ARM::D16 + R); 209 } 210 const TargetRegisterClass &RC = ARM::GPRPairRegClass; 211 for (unsigned Reg : RC) 212 for (MCSubRegIterator SI(Reg, this); SI.isValid(); ++SI) 213 if (Reserved.test(*SI)) 214 markSuperRegs(Reserved, Reg); 215 // For v8.1m architecture 216 markSuperRegs(Reserved, ARM::ZR); 217 218 assert(checkAllSuperRegsMarked(Reserved)); 219 return Reserved; 220 } 221 222 bool ARMBaseRegisterInfo:: 223 isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const { 224 return !getReservedRegs(MF).test(PhysReg); 225 } 226 227 bool ARMBaseRegisterInfo::isInlineAsmReadOnlyReg(const MachineFunction &MF, 228 unsigned PhysReg) const { 229 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 230 const ARMFrameLowering *TFI = getFrameLowering(MF); 231 232 BitVector Reserved(getNumRegs()); 233 markSuperRegs(Reserved, ARM::PC); 234 if (TFI->hasFP(MF)) 235 markSuperRegs(Reserved, getFramePointerReg(STI)); 236 if (hasBasePointer(MF)) 237 markSuperRegs(Reserved, BasePtr); 238 assert(checkAllSuperRegsMarked(Reserved)); 239 return Reserved.test(PhysReg); 240 } 241 242 const TargetRegisterClass * 243 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 244 const MachineFunction &MF) const { 245 const TargetRegisterClass *Super = RC; 246 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 247 do { 248 switch (Super->getID()) { 249 case ARM::GPRRegClassID: 250 case ARM::SPRRegClassID: 251 case ARM::DPRRegClassID: 252 case ARM::GPRPairRegClassID: 253 return Super; 254 case ARM::QPRRegClassID: 255 case ARM::QQPRRegClassID: 256 case ARM::QQQQPRRegClassID: 257 if (MF.getSubtarget<ARMSubtarget>().hasNEON()) 258 return Super; 259 } 260 Super = *I++; 261 } while (Super); 262 return RC; 263 } 264 265 const TargetRegisterClass * 266 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 267 const { 268 return &ARM::GPRRegClass; 269 } 270 271 const TargetRegisterClass * 272 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 273 if (RC == &ARM::CCRRegClass) 274 return &ARM::rGPRRegClass; // Can't copy CCR registers. 275 return RC; 276 } 277 278 unsigned 279 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 280 MachineFunction &MF) const { 281 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 282 const ARMFrameLowering *TFI = getFrameLowering(MF); 283 284 switch (RC->getID()) { 285 default: 286 return 0; 287 case ARM::tGPRRegClassID: { 288 // hasFP ends up calling getMaxCallFrameComputed() which may not be 289 // available when getPressureLimit() is called as part of 290 // ScheduleDAGRRList. 291 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 292 ? TFI->hasFP(MF) : true; 293 return 5 - HasFP; 294 } 295 case ARM::GPRRegClassID: { 296 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 297 ? TFI->hasFP(MF) : true; 298 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0); 299 } 300 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 301 case ARM::DPRRegClassID: 302 return 32 - 10; 303 } 304 } 305 306 // Get the other register in a GPRPair. 307 static MCPhysReg getPairedGPR(MCPhysReg Reg, bool Odd, 308 const MCRegisterInfo *RI) { 309 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 310 if (ARM::GPRPairRegClass.contains(*Supers)) 311 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 312 return 0; 313 } 314 315 // Resolve the RegPairEven / RegPairOdd register allocator hints. 316 bool ARMBaseRegisterInfo::getRegAllocationHints( 317 Register VirtReg, ArrayRef<MCPhysReg> Order, 318 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 319 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 320 const MachineRegisterInfo &MRI = MF.getRegInfo(); 321 std::pair<Register, Register> Hint = MRI.getRegAllocationHint(VirtReg); 322 323 unsigned Odd; 324 switch (Hint.first) { 325 case ARMRI::RegPairEven: 326 Odd = 0; 327 break; 328 case ARMRI::RegPairOdd: 329 Odd = 1; 330 break; 331 default: 332 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 333 return false; 334 } 335 336 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 337 // Check if the other part of the pair has already been assigned, and provide 338 // the paired register as the first hint. 339 Register Paired = Hint.second; 340 if (!Paired) 341 return false; 342 343 Register PairedPhys; 344 if (Paired.isPhysical()) { 345 PairedPhys = Paired; 346 } else if (VRM && VRM->hasPhys(Paired)) { 347 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 348 } 349 350 // First prefer the paired physreg. 351 if (PairedPhys && is_contained(Order, PairedPhys)) 352 Hints.push_back(PairedPhys); 353 354 // Then prefer even or odd registers. 355 for (MCPhysReg Reg : Order) { 356 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 357 continue; 358 // Don't provide hints that are paired to a reserved register. 359 MCPhysReg Paired = getPairedGPR(Reg, !Odd, this); 360 if (!Paired || MRI.isReserved(Paired)) 361 continue; 362 Hints.push_back(Reg); 363 } 364 return false; 365 } 366 367 void ARMBaseRegisterInfo::updateRegAllocHint(Register Reg, Register NewReg, 368 MachineFunction &MF) const { 369 MachineRegisterInfo *MRI = &MF.getRegInfo(); 370 std::pair<Register, Register> Hint = MRI->getRegAllocationHint(Reg); 371 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) && 372 Hint.second.isVirtual()) { 373 // If 'Reg' is one of the even / odd register pair and it's now changed 374 // (e.g. coalesced) into a different register. The other register of the 375 // pair allocation hint must be updated to reflect the relationship 376 // change. 377 Register OtherReg = Hint.second; 378 Hint = MRI->getRegAllocationHint(OtherReg); 379 // Make sure the pair has not already divorced. 380 if (Hint.second == Reg) { 381 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 382 if (Register::isVirtualRegister(NewReg)) 383 MRI->setRegAllocationHint(NewReg, 384 Hint.first == ARMRI::RegPairOdd 385 ? ARMRI::RegPairEven 386 : ARMRI::RegPairOdd, 387 OtherReg); 388 } 389 } 390 } 391 392 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 393 const MachineFrameInfo &MFI = MF.getFrameInfo(); 394 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 395 const ARMFrameLowering *TFI = getFrameLowering(MF); 396 397 // If we have stack realignment and VLAs, we have no pointer to use to 398 // access the stack. If we have stack realignment, and a large call frame, 399 // we have no place to allocate the emergency spill slot. 400 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 401 return true; 402 403 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 404 // negative range for ldr/str (255), and Thumb1 is positive offsets only. 405 // 406 // It's going to be better to use the SP or Base Pointer instead. When there 407 // are variable sized objects, we can't reference off of the SP, so we 408 // reserve a Base Pointer. 409 // 410 // For Thumb2, estimate whether a negative offset from the frame pointer 411 // will be sufficient to reach the whole stack frame. If a function has a 412 // smallish frame, it's less likely to have lots of spills and callee saved 413 // space, so it's all more likely to be within range of the frame pointer. 414 // If it's wrong, the scavenger will still enable access to work, it just 415 // won't be optimal. (We should always be able to reach the emergency 416 // spill slot from the frame pointer.) 417 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() && 418 MFI.getLocalFrameSize() >= 128) 419 return true; 420 // For Thumb1, if sp moves, nothing is in range, so force a base pointer. 421 // This is necessary for correctness in cases where we need an emergency 422 // spill slot. (In Thumb1, we can't use a negative offset from the frame 423 // pointer.) 424 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF)) 425 return true; 426 return false; 427 } 428 429 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 430 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 431 const ARMFrameLowering *TFI = getFrameLowering(MF); 432 // We can't realign the stack if: 433 // 1. Dynamic stack realignment is explicitly disabled, 434 // 2. There are VLAs in the function and the base pointer is disabled. 435 if (!TargetRegisterInfo::canRealignStack(MF)) 436 return false; 437 // Stack realignment requires a frame pointer. If we already started 438 // register allocation with frame pointer elimination, it is too late now. 439 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>()))) 440 return false; 441 // We may also need a base pointer if there are dynamic allocas or stack 442 // pointer adjustments around calls. 443 if (TFI->hasReservedCallFrame(MF)) 444 return true; 445 // A base pointer is required and allowed. Check that it isn't too late to 446 // reserve it. 447 return MRI->canReserveReg(BasePtr); 448 } 449 450 bool ARMBaseRegisterInfo:: 451 cannotEliminateFrame(const MachineFunction &MF) const { 452 const MachineFrameInfo &MFI = MF.getFrameInfo(); 453 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 454 return true; 455 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() 456 || needsStackRealignment(MF); 457 } 458 459 Register 460 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 461 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 462 const ARMFrameLowering *TFI = getFrameLowering(MF); 463 464 if (TFI->hasFP(MF)) 465 return getFramePointerReg(STI); 466 return ARM::SP; 467 } 468 469 /// emitLoadConstPool - Emits a load from constpool to materialize the 470 /// specified immediate. 471 void ARMBaseRegisterInfo::emitLoadConstPool( 472 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 473 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, 474 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const { 475 MachineFunction &MF = *MBB.getParent(); 476 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 477 MachineConstantPool *ConstantPool = MF.getConstantPool(); 478 const Constant *C = 479 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val); 480 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4)); 481 482 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 483 .addReg(DestReg, getDefRegState(true), SubIdx) 484 .addConstantPoolIndex(Idx) 485 .addImm(0) 486 .add(predOps(Pred, PredReg)) 487 .setMIFlags(MIFlags); 488 } 489 490 bool ARMBaseRegisterInfo:: 491 requiresRegisterScavenging(const MachineFunction &MF) const { 492 return true; 493 } 494 495 bool ARMBaseRegisterInfo:: 496 requiresFrameIndexScavenging(const MachineFunction &MF) const { 497 return true; 498 } 499 500 bool ARMBaseRegisterInfo:: 501 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 502 return true; 503 } 504 505 int64_t ARMBaseRegisterInfo:: 506 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 507 const MCInstrDesc &Desc = MI->getDesc(); 508 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 509 int64_t InstrOffs = 0; 510 int Scale = 1; 511 unsigned ImmIdx = 0; 512 switch (AddrMode) { 513 case ARMII::AddrModeT2_i8: 514 case ARMII::AddrModeT2_i12: 515 case ARMII::AddrMode_i12: 516 InstrOffs = MI->getOperand(Idx+1).getImm(); 517 Scale = 1; 518 break; 519 case ARMII::AddrMode5: { 520 // VFP address mode. 521 const MachineOperand &OffOp = MI->getOperand(Idx+1); 522 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 523 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 524 InstrOffs = -InstrOffs; 525 Scale = 4; 526 break; 527 } 528 case ARMII::AddrMode2: 529 ImmIdx = Idx+2; 530 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 531 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 532 InstrOffs = -InstrOffs; 533 break; 534 case ARMII::AddrMode3: 535 ImmIdx = Idx+2; 536 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 537 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 538 InstrOffs = -InstrOffs; 539 break; 540 case ARMII::AddrModeT1_s: 541 ImmIdx = Idx+1; 542 InstrOffs = MI->getOperand(ImmIdx).getImm(); 543 Scale = 4; 544 break; 545 default: 546 llvm_unreachable("Unsupported addressing mode!"); 547 } 548 549 return InstrOffs * Scale; 550 } 551 552 /// needsFrameBaseReg - Returns true if the instruction's frame index 553 /// reference would be better served by a base register other than FP 554 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 555 /// references it should create new base registers for. 556 bool ARMBaseRegisterInfo:: 557 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 558 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 559 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 560 } 561 562 // It's the load/store FI references that cause issues, as it can be difficult 563 // to materialize the offset if it won't fit in the literal field. Estimate 564 // based on the size of the local frame and some conservative assumptions 565 // about the rest of the stack frame (note, this is pre-regalloc, so 566 // we don't know everything for certain yet) whether this offset is likely 567 // to be out of range of the immediate. Return true if so. 568 569 // We only generate virtual base registers for loads and stores, so 570 // return false for everything else. 571 unsigned Opc = MI->getOpcode(); 572 switch (Opc) { 573 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 574 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 575 case ARM::t2LDRi12: case ARM::t2LDRi8: 576 case ARM::t2STRi12: case ARM::t2STRi8: 577 case ARM::VLDRS: case ARM::VLDRD: 578 case ARM::VSTRS: case ARM::VSTRD: 579 case ARM::tSTRspi: case ARM::tLDRspi: 580 break; 581 default: 582 return false; 583 } 584 585 // Without a virtual base register, if the function has variable sized 586 // objects, all fixed-size local references will be via the frame pointer, 587 // Approximate the offset and see if it's legal for the instruction. 588 // Note that the incoming offset is based on the SP value at function entry, 589 // so it'll be negative. 590 MachineFunction &MF = *MI->getParent()->getParent(); 591 const ARMFrameLowering *TFI = getFrameLowering(MF); 592 MachineFrameInfo &MFI = MF.getFrameInfo(); 593 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 594 595 // Estimate an offset from the frame pointer. 596 // Conservatively assume all callee-saved registers get pushed. R4-R6 597 // will be earlier than the FP, so we ignore those. 598 // R7, LR 599 int64_t FPOffset = Offset - 8; 600 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 601 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 602 FPOffset -= 80; 603 // Estimate an offset from the stack pointer. 604 // The incoming offset is relating to the SP at the start of the function, 605 // but when we access the local it'll be relative to the SP after local 606 // allocation, so adjust our SP-relative offset by that allocation size. 607 Offset += MFI.getLocalFrameSize(); 608 // Assume that we'll have at least some spill slots allocated. 609 // FIXME: This is a total SWAG number. We should run some statistics 610 // and pick a real one. 611 Offset += 128; // 128 bytes of spill slots 612 613 // If there's a frame pointer and the addressing mode allows it, try using it. 614 // The FP is only available if there is no dynamic realignment. We 615 // don't know for sure yet whether we'll need that, so we guess based 616 // on whether there are any local variables that would trigger it. 617 if (TFI->hasFP(MF) && 618 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) && 619 canRealignStack(MF))) { 620 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 621 return false; 622 } 623 // If we can reference via the stack pointer, try that. 624 // FIXME: This (and the code that resolves the references) can be improved 625 // to only disallow SP relative references in the live range of 626 // the VLA(s). In practice, it's unclear how much difference that 627 // would make, but it may be worth doing. 628 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 629 return false; 630 631 // The offset likely isn't legal, we want to allocate a virtual base register. 632 return true; 633 } 634 635 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 636 /// be a pointer to FrameIdx at the beginning of the basic block. 637 void ARMBaseRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 638 Register BaseReg, 639 int FrameIdx, 640 int64_t Offset) const { 641 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 642 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 643 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 644 645 MachineBasicBlock::iterator Ins = MBB->begin(); 646 DebugLoc DL; // Defaults to "unknown" 647 if (Ins != MBB->end()) 648 DL = Ins->getDebugLoc(); 649 650 const MachineFunction &MF = *MBB->getParent(); 651 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 652 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 653 const MCInstrDesc &MCID = TII.get(ADDriOpc); 654 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 655 656 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 657 .addFrameIndex(FrameIdx).addImm(Offset); 658 659 if (!AFI->isThumb1OnlyFunction()) 660 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 661 } 662 663 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 664 int64_t Offset) const { 665 MachineBasicBlock &MBB = *MI.getParent(); 666 MachineFunction &MF = *MBB.getParent(); 667 const ARMBaseInstrInfo &TII = 668 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 669 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 670 int Off = Offset; // ARM doesn't need the general 64-bit offsets 671 unsigned i = 0; 672 673 assert(!AFI->isThumb1OnlyFunction() && 674 "This resolveFrameIndex does not support Thumb1!"); 675 676 while (!MI.getOperand(i).isFI()) { 677 ++i; 678 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 679 } 680 bool Done = false; 681 if (!AFI->isThumbFunction()) 682 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 683 else { 684 assert(AFI->isThumb2Function()); 685 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this); 686 } 687 assert(Done && "Unable to resolve frame index!"); 688 (void)Done; 689 } 690 691 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 692 Register BaseReg, 693 int64_t Offset) const { 694 const MCInstrDesc &Desc = MI->getDesc(); 695 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 696 unsigned i = 0; 697 for (; !MI->getOperand(i).isFI(); ++i) 698 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!"); 699 700 // AddrMode4 and AddrMode6 cannot handle any offset. 701 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 702 return Offset == 0; 703 704 unsigned NumBits = 0; 705 unsigned Scale = 1; 706 bool isSigned = true; 707 switch (AddrMode) { 708 case ARMII::AddrModeT2_i8: 709 case ARMII::AddrModeT2_i12: 710 // i8 supports only negative, and i12 supports only positive, so 711 // based on Offset sign, consider the appropriate instruction 712 Scale = 1; 713 if (Offset < 0) { 714 NumBits = 8; 715 Offset = -Offset; 716 } else { 717 NumBits = 12; 718 } 719 break; 720 case ARMII::AddrMode5: 721 // VFP address mode. 722 NumBits = 8; 723 Scale = 4; 724 break; 725 case ARMII::AddrMode_i12: 726 case ARMII::AddrMode2: 727 NumBits = 12; 728 break; 729 case ARMII::AddrMode3: 730 NumBits = 8; 731 break; 732 case ARMII::AddrModeT1_s: 733 NumBits = (BaseReg == ARM::SP ? 8 : 5); 734 Scale = 4; 735 isSigned = false; 736 break; 737 default: 738 llvm_unreachable("Unsupported addressing mode!"); 739 } 740 741 Offset += getFrameIndexInstrOffset(MI, i); 742 // Make sure the offset is encodable for instructions that scale the 743 // immediate. 744 if ((Offset & (Scale-1)) != 0) 745 return false; 746 747 if (isSigned && Offset < 0) 748 Offset = -Offset; 749 750 unsigned Mask = (1 << NumBits) - 1; 751 if ((unsigned)Offset <= Mask * Scale) 752 return true; 753 754 return false; 755 } 756 757 void 758 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 759 int SPAdj, unsigned FIOperandNum, 760 RegScavenger *RS) const { 761 MachineInstr &MI = *II; 762 MachineBasicBlock &MBB = *MI.getParent(); 763 MachineFunction &MF = *MBB.getParent(); 764 const ARMBaseInstrInfo &TII = 765 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 766 const ARMFrameLowering *TFI = getFrameLowering(MF); 767 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 768 assert(!AFI->isThumb1OnlyFunction() && 769 "This eliminateFrameIndex does not support Thumb1!"); 770 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 771 Register FrameReg; 772 773 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 774 775 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 776 // call frame setup/destroy instructions have already been eliminated. That 777 // means the stack pointer cannot be used to access the emergency spill slot 778 // when !hasReservedCallFrame(). 779 #ifndef NDEBUG 780 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 781 assert(TFI->hasReservedCallFrame(MF) && 782 "Cannot use SP to access the emergency spill slot in " 783 "functions without a reserved call frame"); 784 assert(!MF.getFrameInfo().hasVarSizedObjects() && 785 "Cannot use SP to access the emergency spill slot in " 786 "functions with variable sized frame objects"); 787 } 788 #endif // NDEBUG 789 790 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 791 792 // Modify MI as necessary to handle as much of 'Offset' as possible 793 bool Done = false; 794 if (!AFI->isThumbFunction()) 795 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 796 else { 797 assert(AFI->isThumb2Function()); 798 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this); 799 } 800 if (Done) 801 return; 802 803 // If we get here, the immediate doesn't fit into the instruction. We folded 804 // as much as possible above, handle the rest, providing a register that is 805 // SP+LargeImm. 806 assert( 807 (Offset || 808 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 809 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 || 810 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 || 811 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 || 812 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == 813 ARMII::AddrModeT2_i7s4) && 814 "This code isn't needed if offset already handled!"); 815 816 unsigned ScratchReg = 0; 817 int PIdx = MI.findFirstPredOperandIdx(); 818 ARMCC::CondCodes Pred = (PIdx == -1) 819 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 820 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg(); 821 822 const MCInstrDesc &MCID = MI.getDesc(); 823 const TargetRegisterClass *RegClass = 824 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent()); 825 826 if (Offset == 0 && 827 (Register::isVirtualRegister(FrameReg) || RegClass->contains(FrameReg))) 828 // Must be addrmode4/6. 829 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 830 else { 831 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass); 832 if (!AFI->isThumbFunction()) 833 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 834 Offset, Pred, PredReg, TII); 835 else { 836 assert(AFI->isThumb2Function()); 837 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 838 Offset, Pred, PredReg, TII); 839 } 840 // Update the original instruction to use the scratch register. 841 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 842 } 843 } 844 845 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 846 const TargetRegisterClass *SrcRC, 847 unsigned SubReg, 848 const TargetRegisterClass *DstRC, 849 unsigned DstSubReg, 850 const TargetRegisterClass *NewRC, 851 LiveIntervals &LIS) const { 852 auto MBB = MI->getParent(); 853 auto MF = MBB->getParent(); 854 const MachineRegisterInfo &MRI = MF->getRegInfo(); 855 // If not copying into a sub-register this should be ok because we shouldn't 856 // need to split the reg. 857 if (!DstSubReg) 858 return true; 859 // Small registers don't frequently cause a problem, so we can coalesce them. 860 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 && 861 getRegSizeInBits(*SrcRC) < 256) 862 return true; 863 864 auto NewRCWeight = 865 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 866 auto SrcRCWeight = 867 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 868 auto DstRCWeight = 869 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 870 // If the source register class is more expensive than the destination, the 871 // coalescing is probably profitable. 872 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 873 return true; 874 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 875 return true; 876 877 // If the register allocator isn't constrained, we can always allow coalescing 878 // unfortunately we don't know yet if we will be constrained. 879 // The goal of this heuristic is to restrict how many expensive registers 880 // we allow to coalesce in a given basic block. 881 auto AFI = MF->getInfo<ARMFunctionInfo>(); 882 auto It = AFI->getCoalescedWeight(MBB); 883 884 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 885 << It->second << "\n"); 886 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 887 << NewRCWeight.RegWeight << "\n"); 888 889 // This number is the largest round number that which meets the criteria: 890 // (1) addresses PR18825 891 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 892 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 893 // In practice the SizeMultiplier will only factor in for straight line code 894 // that uses a lot of NEON vectors, which isn't terribly common. 895 unsigned SizeMultiplier = MBB->size()/100; 896 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 897 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 898 It->second += NewRCWeight.RegWeight; 899 return true; 900 } 901 return false; 902 } 903