1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the base ARM implementation of TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseRegisterInfo.h" 14 #include "ARM.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMMachineFunctionInfo.h" 18 #include "ARMSubtarget.h" 19 #include "MCTargetDesc/ARMAddressingModes.h" 20 #include "MCTargetDesc/ARMBaseInfo.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RegisterScavenging.h" 33 #include "llvm/CodeGen/TargetInstrInfo.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/VirtRegMap.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/MC/MCInstrDesc.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include <cassert> 48 #include <utility> 49 50 #define DEBUG_TYPE "arm-register-info" 51 52 #define GET_REGINFO_TARGET_DESC 53 #include "ARMGenRegisterInfo.inc" 54 55 using namespace llvm; 56 57 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) { 59 ARM_MC::initLLVMToCVRegMapping(this); 60 } 61 62 const MCPhysReg* 63 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 64 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 65 bool UseSplitPush = STI.splitFramePushPop(*MF); 66 const MCPhysReg *RegList = 67 STI.isTargetDarwin() 68 ? CSR_iOS_SaveList 69 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); 70 71 const Function &F = MF->getFunction(); 72 if (F.getCallingConv() == CallingConv::GHC) { 73 // GHC set of callee saved regs is empty as all those regs are 74 // used for passing STG regs around 75 return CSR_NoRegs_SaveList; 76 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) { 77 return CSR_Win_AAPCS_CFGuard_Check_SaveList; 78 } else if (F.getCallingConv() == CallingConv::SwiftTail) { 79 return STI.isTargetDarwin() 80 ? CSR_iOS_SwiftTail_SaveList 81 : (UseSplitPush ? CSR_AAPCS_SplitPush_SwiftTail_SaveList 82 : CSR_AAPCS_SwiftTail_SaveList); 83 } else if (F.hasFnAttribute("interrupt")) { 84 if (STI.isMClass()) { 85 // M-class CPUs have hardware which saves the registers needed to allow a 86 // function conforming to the AAPCS to function as a handler. 87 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; 88 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") { 89 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 90 // need to be saved to restore user-mode state. 91 return CSR_FIQ_SaveList; 92 } else { 93 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 94 // exception handling. 95 return CSR_GenericInt_SaveList; 96 } 97 } 98 99 if (STI.getTargetLowering()->supportSwiftError() && 100 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 101 if (STI.isTargetDarwin()) 102 return CSR_iOS_SwiftError_SaveList; 103 104 return UseSplitPush ? CSR_AAPCS_SplitPush_SwiftError_SaveList : 105 CSR_AAPCS_SwiftError_SaveList; 106 } 107 108 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS) 109 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 110 ? CSR_iOS_CXX_TLS_PE_SaveList 111 : CSR_iOS_CXX_TLS_SaveList; 112 return RegList; 113 } 114 115 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 116 const MachineFunction *MF) const { 117 assert(MF && "Invalid MachineFunction pointer."); 118 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 119 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 120 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 121 return nullptr; 122 } 123 124 const uint32_t * 125 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 126 CallingConv::ID CC) const { 127 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 128 if (CC == CallingConv::GHC) 129 // This is academic because all GHC calls are (supposed to be) tail calls 130 return CSR_NoRegs_RegMask; 131 if (CC == CallingConv::CFGuard_Check) 132 return CSR_Win_AAPCS_CFGuard_Check_RegMask; 133 if (CC == CallingConv::SwiftTail) { 134 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_RegMask 135 : CSR_AAPCS_SwiftTail_RegMask; 136 } 137 if (STI.getTargetLowering()->supportSwiftError() && 138 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 139 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask 140 : CSR_AAPCS_SwiftError_RegMask; 141 142 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 143 return CSR_iOS_CXX_TLS_RegMask; 144 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 145 } 146 147 const uint32_t* 148 ARMBaseRegisterInfo::getNoPreservedMask() const { 149 return CSR_NoRegs_RegMask; 150 } 151 152 const uint32_t * 153 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 154 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 155 "only know about special TLS call on Darwin"); 156 return CSR_iOS_TLSCall_RegMask; 157 } 158 159 const uint32_t * 160 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { 161 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 162 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only()) 163 return CSR_NoRegs_RegMask; 164 else 165 return CSR_FPRegs_RegMask; 166 } 167 168 const uint32_t * 169 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 170 CallingConv::ID CC) const { 171 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 172 // This should return a register mask that is the same as that returned by 173 // getCallPreservedMask but that additionally preserves the register used for 174 // the first i32 argument (which must also be the register used to return a 175 // single i32 return value) 176 // 177 // In case that the calling convention does not use the same register for 178 // both or otherwise does not want to enable this optimization, the function 179 // should return NULL 180 if (CC == CallingConv::GHC) 181 // This is academic because all GHC calls are (supposed to be) tail calls 182 return nullptr; 183 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 184 : CSR_AAPCS_ThisReturn_RegMask; 185 } 186 187 ArrayRef<MCPhysReg> ARMBaseRegisterInfo::getIntraCallClobberedRegs( 188 const MachineFunction *MF) const { 189 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12}; 190 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs); 191 } 192 193 BitVector ARMBaseRegisterInfo:: 194 getReservedRegs(const MachineFunction &MF) const { 195 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 196 const ARMFrameLowering *TFI = getFrameLowering(MF); 197 198 // FIXME: avoid re-calculating this every time. 199 BitVector Reserved(getNumRegs()); 200 markSuperRegs(Reserved, ARM::SP); 201 markSuperRegs(Reserved, ARM::PC); 202 markSuperRegs(Reserved, ARM::FPSCR); 203 markSuperRegs(Reserved, ARM::APSR_NZCV); 204 if (TFI->hasFP(MF)) 205 markSuperRegs(Reserved, STI.getFramePointerReg()); 206 if (hasBasePointer(MF)) 207 markSuperRegs(Reserved, BasePtr); 208 // Some targets reserve R9. 209 if (STI.isR9Reserved()) 210 markSuperRegs(Reserved, ARM::R9); 211 // Reserve D16-D31 if the subtarget doesn't support them. 212 if (!STI.hasD32()) { 213 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 214 for (unsigned R = 0; R < 16; ++R) 215 markSuperRegs(Reserved, ARM::D16 + R); 216 } 217 const TargetRegisterClass &RC = ARM::GPRPairRegClass; 218 for (unsigned Reg : RC) 219 for (MCSubRegIterator SI(Reg, this); SI.isValid(); ++SI) 220 if (Reserved.test(*SI)) 221 markSuperRegs(Reserved, Reg); 222 // For v8.1m architecture 223 markSuperRegs(Reserved, ARM::ZR); 224 225 assert(checkAllSuperRegsMarked(Reserved)); 226 return Reserved; 227 } 228 229 bool ARMBaseRegisterInfo:: 230 isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const { 231 return !getReservedRegs(MF).test(PhysReg); 232 } 233 234 bool ARMBaseRegisterInfo::isInlineAsmReadOnlyReg(const MachineFunction &MF, 235 unsigned PhysReg) const { 236 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 237 const ARMFrameLowering *TFI = getFrameLowering(MF); 238 239 BitVector Reserved(getNumRegs()); 240 markSuperRegs(Reserved, ARM::PC); 241 if (TFI->hasFP(MF)) 242 markSuperRegs(Reserved, STI.getFramePointerReg()); 243 if (hasBasePointer(MF)) 244 markSuperRegs(Reserved, BasePtr); 245 assert(checkAllSuperRegsMarked(Reserved)); 246 return Reserved.test(PhysReg); 247 } 248 249 const TargetRegisterClass * 250 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 251 const MachineFunction &MF) const { 252 const TargetRegisterClass *Super = RC; 253 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 254 do { 255 switch (Super->getID()) { 256 case ARM::GPRRegClassID: 257 case ARM::SPRRegClassID: 258 case ARM::DPRRegClassID: 259 case ARM::GPRPairRegClassID: 260 return Super; 261 case ARM::QPRRegClassID: 262 case ARM::QQPRRegClassID: 263 case ARM::QQQQPRRegClassID: 264 if (MF.getSubtarget<ARMSubtarget>().hasNEON()) 265 return Super; 266 break; 267 case ARM::MQPRRegClassID: 268 case ARM::MQQPRRegClassID: 269 case ARM::MQQQQPRRegClassID: 270 if (MF.getSubtarget<ARMSubtarget>().hasMVEIntegerOps()) 271 return Super; 272 break; 273 } 274 Super = *I++; 275 } while (Super); 276 return RC; 277 } 278 279 const TargetRegisterClass * 280 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 281 const { 282 return &ARM::GPRRegClass; 283 } 284 285 const TargetRegisterClass * 286 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 287 if (RC == &ARM::CCRRegClass) 288 return &ARM::rGPRRegClass; // Can't copy CCR registers. 289 return RC; 290 } 291 292 unsigned 293 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 294 MachineFunction &MF) const { 295 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 296 const ARMFrameLowering *TFI = getFrameLowering(MF); 297 298 switch (RC->getID()) { 299 default: 300 return 0; 301 case ARM::tGPRRegClassID: { 302 // hasFP ends up calling getMaxCallFrameComputed() which may not be 303 // available when getPressureLimit() is called as part of 304 // ScheduleDAGRRList. 305 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 306 ? TFI->hasFP(MF) : true; 307 return 5 - HasFP; 308 } 309 case ARM::GPRRegClassID: { 310 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 311 ? TFI->hasFP(MF) : true; 312 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0); 313 } 314 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 315 case ARM::DPRRegClassID: 316 return 32 - 10; 317 } 318 } 319 320 // Get the other register in a GPRPair. 321 static MCPhysReg getPairedGPR(MCPhysReg Reg, bool Odd, 322 const MCRegisterInfo *RI) { 323 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 324 if (ARM::GPRPairRegClass.contains(*Supers)) 325 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 326 return 0; 327 } 328 329 // Resolve the RegPairEven / RegPairOdd register allocator hints. 330 bool ARMBaseRegisterInfo::getRegAllocationHints( 331 Register VirtReg, ArrayRef<MCPhysReg> Order, 332 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 333 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 334 const MachineRegisterInfo &MRI = MF.getRegInfo(); 335 std::pair<Register, Register> Hint = MRI.getRegAllocationHint(VirtReg); 336 337 unsigned Odd; 338 switch (Hint.first) { 339 case ARMRI::RegPairEven: 340 Odd = 0; 341 break; 342 case ARMRI::RegPairOdd: 343 Odd = 1; 344 break; 345 case ARMRI::RegLR: 346 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 347 if (MRI.getRegClass(VirtReg)->contains(ARM::LR)) 348 Hints.push_back(ARM::LR); 349 return false; 350 default: 351 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 352 } 353 354 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 355 // Check if the other part of the pair has already been assigned, and provide 356 // the paired register as the first hint. 357 Register Paired = Hint.second; 358 if (!Paired) 359 return false; 360 361 Register PairedPhys; 362 if (Paired.isPhysical()) { 363 PairedPhys = Paired; 364 } else if (VRM && VRM->hasPhys(Paired)) { 365 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 366 } 367 368 // First prefer the paired physreg. 369 if (PairedPhys && is_contained(Order, PairedPhys)) 370 Hints.push_back(PairedPhys); 371 372 // Then prefer even or odd registers. 373 for (MCPhysReg Reg : Order) { 374 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 375 continue; 376 // Don't provide hints that are paired to a reserved register. 377 MCPhysReg Paired = getPairedGPR(Reg, !Odd, this); 378 if (!Paired || MRI.isReserved(Paired)) 379 continue; 380 Hints.push_back(Reg); 381 } 382 return false; 383 } 384 385 void ARMBaseRegisterInfo::updateRegAllocHint(Register Reg, Register NewReg, 386 MachineFunction &MF) const { 387 MachineRegisterInfo *MRI = &MF.getRegInfo(); 388 std::pair<Register, Register> Hint = MRI->getRegAllocationHint(Reg); 389 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) && 390 Hint.second.isVirtual()) { 391 // If 'Reg' is one of the even / odd register pair and it's now changed 392 // (e.g. coalesced) into a different register. The other register of the 393 // pair allocation hint must be updated to reflect the relationship 394 // change. 395 Register OtherReg = Hint.second; 396 Hint = MRI->getRegAllocationHint(OtherReg); 397 // Make sure the pair has not already divorced. 398 if (Hint.second == Reg) { 399 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 400 if (Register::isVirtualRegister(NewReg)) 401 MRI->setRegAllocationHint(NewReg, 402 Hint.first == ARMRI::RegPairOdd 403 ? ARMRI::RegPairEven 404 : ARMRI::RegPairOdd, 405 OtherReg); 406 } 407 } 408 } 409 410 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 411 const MachineFrameInfo &MFI = MF.getFrameInfo(); 412 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 413 const ARMFrameLowering *TFI = getFrameLowering(MF); 414 415 // If we have stack realignment and VLAs, we have no pointer to use to 416 // access the stack. If we have stack realignment, and a large call frame, 417 // we have no place to allocate the emergency spill slot. 418 if (hasStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 419 return true; 420 421 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 422 // negative range for ldr/str (255), and Thumb1 is positive offsets only. 423 // 424 // It's going to be better to use the SP or Base Pointer instead. When there 425 // are variable sized objects, we can't reference off of the SP, so we 426 // reserve a Base Pointer. 427 // 428 // For Thumb2, estimate whether a negative offset from the frame pointer 429 // will be sufficient to reach the whole stack frame. If a function has a 430 // smallish frame, it's less likely to have lots of spills and callee saved 431 // space, so it's all more likely to be within range of the frame pointer. 432 // If it's wrong, the scavenger will still enable access to work, it just 433 // won't be optimal. (We should always be able to reach the emergency 434 // spill slot from the frame pointer.) 435 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() && 436 MFI.getLocalFrameSize() >= 128) 437 return true; 438 // For Thumb1, if sp moves, nothing is in range, so force a base pointer. 439 // This is necessary for correctness in cases where we need an emergency 440 // spill slot. (In Thumb1, we can't use a negative offset from the frame 441 // pointer.) 442 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF)) 443 return true; 444 return false; 445 } 446 447 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 448 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 449 const ARMFrameLowering *TFI = getFrameLowering(MF); 450 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 451 // We can't realign the stack if: 452 // 1. Dynamic stack realignment is explicitly disabled, 453 // 2. There are VLAs in the function and the base pointer is disabled. 454 if (!TargetRegisterInfo::canRealignStack(MF)) 455 return false; 456 // Stack realignment requires a frame pointer. If we already started 457 // register allocation with frame pointer elimination, it is too late now. 458 if (!MRI->canReserveReg(STI.getFramePointerReg())) 459 return false; 460 // We may also need a base pointer if there are dynamic allocas or stack 461 // pointer adjustments around calls. 462 if (TFI->hasReservedCallFrame(MF)) 463 return true; 464 // A base pointer is required and allowed. Check that it isn't too late to 465 // reserve it. 466 return MRI->canReserveReg(BasePtr); 467 } 468 469 bool ARMBaseRegisterInfo:: 470 cannotEliminateFrame(const MachineFunction &MF) const { 471 const MachineFrameInfo &MFI = MF.getFrameInfo(); 472 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 473 return true; 474 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 475 hasStackRealignment(MF); 476 } 477 478 Register 479 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 480 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 481 const ARMFrameLowering *TFI = getFrameLowering(MF); 482 483 if (TFI->hasFP(MF)) 484 return STI.getFramePointerReg(); 485 return ARM::SP; 486 } 487 488 /// emitLoadConstPool - Emits a load from constpool to materialize the 489 /// specified immediate. 490 void ARMBaseRegisterInfo::emitLoadConstPool( 491 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 492 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, 493 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const { 494 MachineFunction &MF = *MBB.getParent(); 495 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 496 MachineConstantPool *ConstantPool = MF.getConstantPool(); 497 const Constant *C = 498 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val); 499 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4)); 500 501 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 502 .addReg(DestReg, getDefRegState(true), SubIdx) 503 .addConstantPoolIndex(Idx) 504 .addImm(0) 505 .add(predOps(Pred, PredReg)) 506 .setMIFlags(MIFlags); 507 } 508 509 bool ARMBaseRegisterInfo:: 510 requiresRegisterScavenging(const MachineFunction &MF) const { 511 return true; 512 } 513 514 bool ARMBaseRegisterInfo:: 515 requiresFrameIndexScavenging(const MachineFunction &MF) const { 516 return true; 517 } 518 519 bool ARMBaseRegisterInfo:: 520 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 521 return true; 522 } 523 524 int64_t ARMBaseRegisterInfo:: 525 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 526 const MCInstrDesc &Desc = MI->getDesc(); 527 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 528 int64_t InstrOffs = 0; 529 int Scale = 1; 530 unsigned ImmIdx = 0; 531 switch (AddrMode) { 532 case ARMII::AddrModeT2_i8: 533 case ARMII::AddrModeT2_i8neg: 534 case ARMII::AddrModeT2_i8pos: 535 case ARMII::AddrModeT2_i12: 536 case ARMII::AddrMode_i12: 537 InstrOffs = MI->getOperand(Idx+1).getImm(); 538 Scale = 1; 539 break; 540 case ARMII::AddrMode5: { 541 // VFP address mode. 542 const MachineOperand &OffOp = MI->getOperand(Idx+1); 543 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 544 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 545 InstrOffs = -InstrOffs; 546 Scale = 4; 547 break; 548 } 549 case ARMII::AddrMode2: 550 ImmIdx = Idx+2; 551 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 552 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 553 InstrOffs = -InstrOffs; 554 break; 555 case ARMII::AddrMode3: 556 ImmIdx = Idx+2; 557 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 558 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 559 InstrOffs = -InstrOffs; 560 break; 561 case ARMII::AddrModeT1_s: 562 ImmIdx = Idx+1; 563 InstrOffs = MI->getOperand(ImmIdx).getImm(); 564 Scale = 4; 565 break; 566 default: 567 llvm_unreachable("Unsupported addressing mode!"); 568 } 569 570 return InstrOffs * Scale; 571 } 572 573 /// needsFrameBaseReg - Returns true if the instruction's frame index 574 /// reference would be better served by a base register other than FP 575 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 576 /// references it should create new base registers for. 577 bool ARMBaseRegisterInfo:: 578 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 579 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 580 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 581 } 582 583 // It's the load/store FI references that cause issues, as it can be difficult 584 // to materialize the offset if it won't fit in the literal field. Estimate 585 // based on the size of the local frame and some conservative assumptions 586 // about the rest of the stack frame (note, this is pre-regalloc, so 587 // we don't know everything for certain yet) whether this offset is likely 588 // to be out of range of the immediate. Return true if so. 589 590 // We only generate virtual base registers for loads and stores, so 591 // return false for everything else. 592 unsigned Opc = MI->getOpcode(); 593 switch (Opc) { 594 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 595 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 596 case ARM::t2LDRi12: case ARM::t2LDRi8: 597 case ARM::t2STRi12: case ARM::t2STRi8: 598 case ARM::VLDRS: case ARM::VLDRD: 599 case ARM::VSTRS: case ARM::VSTRD: 600 case ARM::tSTRspi: case ARM::tLDRspi: 601 break; 602 default: 603 return false; 604 } 605 606 // Without a virtual base register, if the function has variable sized 607 // objects, all fixed-size local references will be via the frame pointer, 608 // Approximate the offset and see if it's legal for the instruction. 609 // Note that the incoming offset is based on the SP value at function entry, 610 // so it'll be negative. 611 MachineFunction &MF = *MI->getParent()->getParent(); 612 const ARMFrameLowering *TFI = getFrameLowering(MF); 613 MachineFrameInfo &MFI = MF.getFrameInfo(); 614 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 615 616 // Estimate an offset from the frame pointer. 617 // Conservatively assume all callee-saved registers get pushed. R4-R6 618 // will be earlier than the FP, so we ignore those. 619 // R7, LR 620 int64_t FPOffset = Offset - 8; 621 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 622 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 623 FPOffset -= 80; 624 // Estimate an offset from the stack pointer. 625 // The incoming offset is relating to the SP at the start of the function, 626 // but when we access the local it'll be relative to the SP after local 627 // allocation, so adjust our SP-relative offset by that allocation size. 628 Offset += MFI.getLocalFrameSize(); 629 // Assume that we'll have at least some spill slots allocated. 630 // FIXME: This is a total SWAG number. We should run some statistics 631 // and pick a real one. 632 Offset += 128; // 128 bytes of spill slots 633 634 // If there's a frame pointer and the addressing mode allows it, try using it. 635 // The FP is only available if there is no dynamic realignment. We 636 // don't know for sure yet whether we'll need that, so we guess based 637 // on whether there are any local variables that would trigger it. 638 if (TFI->hasFP(MF) && 639 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) && 640 canRealignStack(MF))) { 641 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 642 return false; 643 } 644 // If we can reference via the stack pointer, try that. 645 // FIXME: This (and the code that resolves the references) can be improved 646 // to only disallow SP relative references in the live range of 647 // the VLA(s). In practice, it's unclear how much difference that 648 // would make, but it may be worth doing. 649 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 650 return false; 651 652 // The offset likely isn't legal, we want to allocate a virtual base register. 653 return true; 654 } 655 656 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 657 /// be a pointer to FrameIdx at the beginning of the basic block. 658 Register 659 ARMBaseRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 660 int FrameIdx, 661 int64_t Offset) const { 662 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 663 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 664 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 665 666 MachineBasicBlock::iterator Ins = MBB->begin(); 667 DebugLoc DL; // Defaults to "unknown" 668 if (Ins != MBB->end()) 669 DL = Ins->getDebugLoc(); 670 671 const MachineFunction &MF = *MBB->getParent(); 672 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 673 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 674 const MCInstrDesc &MCID = TII.get(ADDriOpc); 675 Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 676 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 677 678 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 679 .addFrameIndex(FrameIdx).addImm(Offset); 680 681 if (!AFI->isThumb1OnlyFunction()) 682 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 683 684 return BaseReg; 685 } 686 687 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 688 int64_t Offset) const { 689 MachineBasicBlock &MBB = *MI.getParent(); 690 MachineFunction &MF = *MBB.getParent(); 691 const ARMBaseInstrInfo &TII = 692 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 693 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 694 int Off = Offset; // ARM doesn't need the general 64-bit offsets 695 unsigned i = 0; 696 697 assert(!AFI->isThumb1OnlyFunction() && 698 "This resolveFrameIndex does not support Thumb1!"); 699 700 while (!MI.getOperand(i).isFI()) { 701 ++i; 702 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 703 } 704 bool Done = false; 705 if (!AFI->isThumbFunction()) 706 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 707 else { 708 assert(AFI->isThumb2Function()); 709 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this); 710 } 711 assert(Done && "Unable to resolve frame index!"); 712 (void)Done; 713 } 714 715 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 716 Register BaseReg, 717 int64_t Offset) const { 718 const MCInstrDesc &Desc = MI->getDesc(); 719 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 720 unsigned i = 0; 721 for (; !MI->getOperand(i).isFI(); ++i) 722 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!"); 723 724 // AddrMode4 and AddrMode6 cannot handle any offset. 725 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 726 return Offset == 0; 727 728 unsigned NumBits = 0; 729 unsigned Scale = 1; 730 bool isSigned = true; 731 switch (AddrMode) { 732 case ARMII::AddrModeT2_i8: 733 case ARMII::AddrModeT2_i8pos: 734 case ARMII::AddrModeT2_i8neg: 735 case ARMII::AddrModeT2_i12: 736 // i8 supports only negative, and i12 supports only positive, so 737 // based on Offset sign, consider the appropriate instruction 738 Scale = 1; 739 if (Offset < 0) { 740 NumBits = 8; 741 Offset = -Offset; 742 } else { 743 NumBits = 12; 744 } 745 break; 746 case ARMII::AddrMode5: 747 // VFP address mode. 748 NumBits = 8; 749 Scale = 4; 750 break; 751 case ARMII::AddrMode_i12: 752 case ARMII::AddrMode2: 753 NumBits = 12; 754 break; 755 case ARMII::AddrMode3: 756 NumBits = 8; 757 break; 758 case ARMII::AddrModeT1_s: 759 NumBits = (BaseReg == ARM::SP ? 8 : 5); 760 Scale = 4; 761 isSigned = false; 762 break; 763 default: 764 llvm_unreachable("Unsupported addressing mode!"); 765 } 766 767 Offset += getFrameIndexInstrOffset(MI, i); 768 // Make sure the offset is encodable for instructions that scale the 769 // immediate. 770 if ((Offset & (Scale-1)) != 0) 771 return false; 772 773 if (isSigned && Offset < 0) 774 Offset = -Offset; 775 776 unsigned Mask = (1 << NumBits) - 1; 777 if ((unsigned)Offset <= Mask * Scale) 778 return true; 779 780 return false; 781 } 782 783 void 784 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 785 int SPAdj, unsigned FIOperandNum, 786 RegScavenger *RS) const { 787 MachineInstr &MI = *II; 788 MachineBasicBlock &MBB = *MI.getParent(); 789 MachineFunction &MF = *MBB.getParent(); 790 const ARMBaseInstrInfo &TII = 791 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 792 const ARMFrameLowering *TFI = getFrameLowering(MF); 793 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 794 assert(!AFI->isThumb1OnlyFunction() && 795 "This eliminateFrameIndex does not support Thumb1!"); 796 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 797 Register FrameReg; 798 799 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 800 801 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 802 // call frame setup/destroy instructions have already been eliminated. That 803 // means the stack pointer cannot be used to access the emergency spill slot 804 // when !hasReservedCallFrame(). 805 #ifndef NDEBUG 806 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 807 assert(TFI->hasReservedCallFrame(MF) && 808 "Cannot use SP to access the emergency spill slot in " 809 "functions without a reserved call frame"); 810 assert(!MF.getFrameInfo().hasVarSizedObjects() && 811 "Cannot use SP to access the emergency spill slot in " 812 "functions with variable sized frame objects"); 813 } 814 #endif // NDEBUG 815 816 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 817 818 // Modify MI as necessary to handle as much of 'Offset' as possible 819 bool Done = false; 820 if (!AFI->isThumbFunction()) 821 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 822 else { 823 assert(AFI->isThumb2Function()); 824 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this); 825 } 826 if (Done) 827 return; 828 829 // If we get here, the immediate doesn't fit into the instruction. We folded 830 // as much as possible above, handle the rest, providing a register that is 831 // SP+LargeImm. 832 assert( 833 (Offset || 834 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 835 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 || 836 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 || 837 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 || 838 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == 839 ARMII::AddrModeT2_i7s4) && 840 "This code isn't needed if offset already handled!"); 841 842 unsigned ScratchReg = 0; 843 int PIdx = MI.findFirstPredOperandIdx(); 844 ARMCC::CondCodes Pred = (PIdx == -1) 845 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 846 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg(); 847 848 const MCInstrDesc &MCID = MI.getDesc(); 849 const TargetRegisterClass *RegClass = 850 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent()); 851 852 if (Offset == 0 && 853 (Register::isVirtualRegister(FrameReg) || RegClass->contains(FrameReg))) 854 // Must be addrmode4/6. 855 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 856 else { 857 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass); 858 if (!AFI->isThumbFunction()) 859 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 860 Offset, Pred, PredReg, TII); 861 else { 862 assert(AFI->isThumb2Function()); 863 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 864 Offset, Pred, PredReg, TII); 865 } 866 // Update the original instruction to use the scratch register. 867 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 868 } 869 } 870 871 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 872 const TargetRegisterClass *SrcRC, 873 unsigned SubReg, 874 const TargetRegisterClass *DstRC, 875 unsigned DstSubReg, 876 const TargetRegisterClass *NewRC, 877 LiveIntervals &LIS) const { 878 auto MBB = MI->getParent(); 879 auto MF = MBB->getParent(); 880 const MachineRegisterInfo &MRI = MF->getRegInfo(); 881 // If not copying into a sub-register this should be ok because we shouldn't 882 // need to split the reg. 883 if (!DstSubReg) 884 return true; 885 // Small registers don't frequently cause a problem, so we can coalesce them. 886 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 && 887 getRegSizeInBits(*SrcRC) < 256) 888 return true; 889 890 auto NewRCWeight = 891 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 892 auto SrcRCWeight = 893 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 894 auto DstRCWeight = 895 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 896 // If the source register class is more expensive than the destination, the 897 // coalescing is probably profitable. 898 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 899 return true; 900 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 901 return true; 902 903 // If the register allocator isn't constrained, we can always allow coalescing 904 // unfortunately we don't know yet if we will be constrained. 905 // The goal of this heuristic is to restrict how many expensive registers 906 // we allow to coalesce in a given basic block. 907 auto AFI = MF->getInfo<ARMFunctionInfo>(); 908 auto It = AFI->getCoalescedWeight(MBB); 909 910 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 911 << It->second << "\n"); 912 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 913 << NewRCWeight.RegWeight << "\n"); 914 915 // This number is the largest round number that which meets the criteria: 916 // (1) addresses PR18825 917 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 918 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 919 // In practice the SizeMultiplier will only factor in for straight line code 920 // that uses a lot of NEON vectors, which isn't terribly common. 921 unsigned SizeMultiplier = MBB->size()/100; 922 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 923 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 924 It->second += NewRCWeight.RegWeight; 925 return true; 926 } 927 return false; 928 } 929 930 bool ARMBaseRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, 931 unsigned DefSubReg, 932 const TargetRegisterClass *SrcRC, 933 unsigned SrcSubReg) const { 934 // We can't extract an SPR from an arbitary DPR (as opposed to a DPR_VFP2). 935 if (DefRC == &ARM::SPRRegClass && DefSubReg == 0 && 936 SrcRC == &ARM::DPRRegClass && 937 (SrcSubReg == ARM::ssub_0 || SrcSubReg == ARM::ssub_1)) 938 return false; 939 940 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg, 941 SrcRC, SrcSubReg); 942 } 943