1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the base ARM implementation of TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseRegisterInfo.h" 14 #include "ARM.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMMachineFunctionInfo.h" 18 #include "ARMSubtarget.h" 19 #include "MCTargetDesc/ARMAddressingModes.h" 20 #include "MCTargetDesc/ARMBaseInfo.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RegisterScavenging.h" 33 #include "llvm/CodeGen/TargetInstrInfo.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/VirtRegMap.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/MC/MCInstrDesc.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include <cassert> 48 #include <utility> 49 50 #define DEBUG_TYPE "arm-register-info" 51 52 #define GET_REGINFO_TARGET_DESC 53 #include "ARMGenRegisterInfo.inc" 54 55 using namespace llvm; 56 57 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) { 59 ARM_MC::initLLVMToCVRegMapping(this); 60 } 61 62 static unsigned getFramePointerReg(const ARMSubtarget &STI) { 63 return STI.useR7AsFramePointer() ? ARM::R7 : ARM::R11; 64 } 65 66 const MCPhysReg* 67 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 68 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 69 bool UseSplitPush = STI.splitFramePushPop(*MF); 70 const MCPhysReg *RegList = 71 STI.isTargetDarwin() 72 ? CSR_iOS_SaveList 73 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); 74 75 const Function &F = MF->getFunction(); 76 if (F.getCallingConv() == CallingConv::GHC) { 77 // GHC set of callee saved regs is empty as all those regs are 78 // used for passing STG regs around 79 return CSR_NoRegs_SaveList; 80 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) { 81 return CSR_Win_AAPCS_CFGuard_Check_SaveList; 82 } else if (F.hasFnAttribute("interrupt")) { 83 if (STI.isMClass()) { 84 // M-class CPUs have hardware which saves the registers needed to allow a 85 // function conforming to the AAPCS to function as a handler. 86 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; 87 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") { 88 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 89 // need to be saved to restore user-mode state. 90 return CSR_FIQ_SaveList; 91 } else { 92 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 93 // exception handling. 94 return CSR_GenericInt_SaveList; 95 } 96 } 97 98 if (STI.getTargetLowering()->supportSwiftError() && 99 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 100 if (STI.isTargetDarwin()) 101 return CSR_iOS_SwiftError_SaveList; 102 103 return UseSplitPush ? CSR_AAPCS_SplitPush_SwiftError_SaveList : 104 CSR_AAPCS_SwiftError_SaveList; 105 } 106 107 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS) 108 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 109 ? CSR_iOS_CXX_TLS_PE_SaveList 110 : CSR_iOS_CXX_TLS_SaveList; 111 return RegList; 112 } 113 114 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 115 const MachineFunction *MF) const { 116 assert(MF && "Invalid MachineFunction pointer."); 117 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 118 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 119 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 120 return nullptr; 121 } 122 123 const uint32_t * 124 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 125 CallingConv::ID CC) const { 126 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 127 if (CC == CallingConv::GHC) 128 // This is academic because all GHC calls are (supposed to be) tail calls 129 return CSR_NoRegs_RegMask; 130 if (CC == CallingConv::CFGuard_Check) 131 return CSR_Win_AAPCS_CFGuard_Check_RegMask; 132 if (STI.getTargetLowering()->supportSwiftError() && 133 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 134 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask 135 : CSR_AAPCS_SwiftError_RegMask; 136 137 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 138 return CSR_iOS_CXX_TLS_RegMask; 139 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 140 } 141 142 const uint32_t* 143 ARMBaseRegisterInfo::getNoPreservedMask() const { 144 return CSR_NoRegs_RegMask; 145 } 146 147 const uint32_t * 148 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 149 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 150 "only know about special TLS call on Darwin"); 151 return CSR_iOS_TLSCall_RegMask; 152 } 153 154 const uint32_t * 155 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { 156 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 157 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only()) 158 return CSR_NoRegs_RegMask; 159 else 160 return CSR_FPRegs_RegMask; 161 } 162 163 const uint32_t * 164 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 165 CallingConv::ID CC) const { 166 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 167 // This should return a register mask that is the same as that returned by 168 // getCallPreservedMask but that additionally preserves the register used for 169 // the first i32 argument (which must also be the register used to return a 170 // single i32 return value) 171 // 172 // In case that the calling convention does not use the same register for 173 // both or otherwise does not want to enable this optimization, the function 174 // should return NULL 175 if (CC == CallingConv::GHC) 176 // This is academic because all GHC calls are (supposed to be) tail calls 177 return nullptr; 178 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 179 : CSR_AAPCS_ThisReturn_RegMask; 180 } 181 182 ArrayRef<MCPhysReg> ARMBaseRegisterInfo::getIntraCallClobberedRegs( 183 const MachineFunction *MF) const { 184 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12}; 185 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs); 186 } 187 188 BitVector ARMBaseRegisterInfo:: 189 getReservedRegs(const MachineFunction &MF) const { 190 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 191 const ARMFrameLowering *TFI = getFrameLowering(MF); 192 193 // FIXME: avoid re-calculating this every time. 194 BitVector Reserved(getNumRegs()); 195 markSuperRegs(Reserved, ARM::SP); 196 markSuperRegs(Reserved, ARM::PC); 197 markSuperRegs(Reserved, ARM::FPSCR); 198 markSuperRegs(Reserved, ARM::APSR_NZCV); 199 if (TFI->hasFP(MF)) 200 markSuperRegs(Reserved, getFramePointerReg(STI)); 201 if (hasBasePointer(MF)) 202 markSuperRegs(Reserved, BasePtr); 203 // Some targets reserve R9. 204 if (STI.isR9Reserved()) 205 markSuperRegs(Reserved, ARM::R9); 206 // Reserve D16-D31 if the subtarget doesn't support them. 207 if (!STI.hasD32()) { 208 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 209 for (unsigned R = 0; R < 16; ++R) 210 markSuperRegs(Reserved, ARM::D16 + R); 211 } 212 const TargetRegisterClass &RC = ARM::GPRPairRegClass; 213 for (unsigned Reg : RC) 214 for (MCSubRegIterator SI(Reg, this); SI.isValid(); ++SI) 215 if (Reserved.test(*SI)) 216 markSuperRegs(Reserved, Reg); 217 // For v8.1m architecture 218 markSuperRegs(Reserved, ARM::ZR); 219 220 assert(checkAllSuperRegsMarked(Reserved)); 221 return Reserved; 222 } 223 224 bool ARMBaseRegisterInfo:: 225 isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const { 226 return !getReservedRegs(MF).test(PhysReg); 227 } 228 229 bool ARMBaseRegisterInfo::isInlineAsmReadOnlyReg(const MachineFunction &MF, 230 unsigned PhysReg) const { 231 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 232 const ARMFrameLowering *TFI = getFrameLowering(MF); 233 234 BitVector Reserved(getNumRegs()); 235 markSuperRegs(Reserved, ARM::PC); 236 if (TFI->hasFP(MF)) 237 markSuperRegs(Reserved, getFramePointerReg(STI)); 238 if (hasBasePointer(MF)) 239 markSuperRegs(Reserved, BasePtr); 240 assert(checkAllSuperRegsMarked(Reserved)); 241 return Reserved.test(PhysReg); 242 } 243 244 const TargetRegisterClass * 245 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 246 const MachineFunction &MF) const { 247 const TargetRegisterClass *Super = RC; 248 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 249 do { 250 switch (Super->getID()) { 251 case ARM::GPRRegClassID: 252 case ARM::SPRRegClassID: 253 case ARM::DPRRegClassID: 254 case ARM::GPRPairRegClassID: 255 return Super; 256 case ARM::QPRRegClassID: 257 case ARM::QQPRRegClassID: 258 case ARM::QQQQPRRegClassID: 259 if (MF.getSubtarget<ARMSubtarget>().hasNEON()) 260 return Super; 261 } 262 Super = *I++; 263 } while (Super); 264 return RC; 265 } 266 267 const TargetRegisterClass * 268 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 269 const { 270 return &ARM::GPRRegClass; 271 } 272 273 const TargetRegisterClass * 274 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 275 if (RC == &ARM::CCRRegClass) 276 return &ARM::rGPRRegClass; // Can't copy CCR registers. 277 return RC; 278 } 279 280 unsigned 281 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 282 MachineFunction &MF) const { 283 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 284 const ARMFrameLowering *TFI = getFrameLowering(MF); 285 286 switch (RC->getID()) { 287 default: 288 return 0; 289 case ARM::tGPRRegClassID: { 290 // hasFP ends up calling getMaxCallFrameComputed() which may not be 291 // available when getPressureLimit() is called as part of 292 // ScheduleDAGRRList. 293 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 294 ? TFI->hasFP(MF) : true; 295 return 5 - HasFP; 296 } 297 case ARM::GPRRegClassID: { 298 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 299 ? TFI->hasFP(MF) : true; 300 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0); 301 } 302 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 303 case ARM::DPRRegClassID: 304 return 32 - 10; 305 } 306 } 307 308 // Get the other register in a GPRPair. 309 static MCPhysReg getPairedGPR(MCPhysReg Reg, bool Odd, 310 const MCRegisterInfo *RI) { 311 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 312 if (ARM::GPRPairRegClass.contains(*Supers)) 313 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 314 return 0; 315 } 316 317 // Resolve the RegPairEven / RegPairOdd register allocator hints. 318 bool ARMBaseRegisterInfo::getRegAllocationHints( 319 Register VirtReg, ArrayRef<MCPhysReg> Order, 320 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 321 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 322 const MachineRegisterInfo &MRI = MF.getRegInfo(); 323 std::pair<Register, Register> Hint = MRI.getRegAllocationHint(VirtReg); 324 325 unsigned Odd; 326 switch (Hint.first) { 327 case ARMRI::RegPairEven: 328 Odd = 0; 329 break; 330 case ARMRI::RegPairOdd: 331 Odd = 1; 332 break; 333 case ARMRI::RegLR: 334 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 335 if (MRI.getRegClass(VirtReg)->contains(ARM::LR)) 336 Hints.push_back(ARM::LR); 337 return false; 338 default: 339 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 340 } 341 342 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 343 // Check if the other part of the pair has already been assigned, and provide 344 // the paired register as the first hint. 345 Register Paired = Hint.second; 346 if (!Paired) 347 return false; 348 349 Register PairedPhys; 350 if (Paired.isPhysical()) { 351 PairedPhys = Paired; 352 } else if (VRM && VRM->hasPhys(Paired)) { 353 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 354 } 355 356 // First prefer the paired physreg. 357 if (PairedPhys && is_contained(Order, PairedPhys)) 358 Hints.push_back(PairedPhys); 359 360 // Then prefer even or odd registers. 361 for (MCPhysReg Reg : Order) { 362 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 363 continue; 364 // Don't provide hints that are paired to a reserved register. 365 MCPhysReg Paired = getPairedGPR(Reg, !Odd, this); 366 if (!Paired || MRI.isReserved(Paired)) 367 continue; 368 Hints.push_back(Reg); 369 } 370 return false; 371 } 372 373 void ARMBaseRegisterInfo::updateRegAllocHint(Register Reg, Register NewReg, 374 MachineFunction &MF) const { 375 MachineRegisterInfo *MRI = &MF.getRegInfo(); 376 std::pair<Register, Register> Hint = MRI->getRegAllocationHint(Reg); 377 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) && 378 Hint.second.isVirtual()) { 379 // If 'Reg' is one of the even / odd register pair and it's now changed 380 // (e.g. coalesced) into a different register. The other register of the 381 // pair allocation hint must be updated to reflect the relationship 382 // change. 383 Register OtherReg = Hint.second; 384 Hint = MRI->getRegAllocationHint(OtherReg); 385 // Make sure the pair has not already divorced. 386 if (Hint.second == Reg) { 387 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 388 if (Register::isVirtualRegister(NewReg)) 389 MRI->setRegAllocationHint(NewReg, 390 Hint.first == ARMRI::RegPairOdd 391 ? ARMRI::RegPairEven 392 : ARMRI::RegPairOdd, 393 OtherReg); 394 } 395 } 396 } 397 398 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 399 const MachineFrameInfo &MFI = MF.getFrameInfo(); 400 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 401 const ARMFrameLowering *TFI = getFrameLowering(MF); 402 403 // If we have stack realignment and VLAs, we have no pointer to use to 404 // access the stack. If we have stack realignment, and a large call frame, 405 // we have no place to allocate the emergency spill slot. 406 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 407 return true; 408 409 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 410 // negative range for ldr/str (255), and Thumb1 is positive offsets only. 411 // 412 // It's going to be better to use the SP or Base Pointer instead. When there 413 // are variable sized objects, we can't reference off of the SP, so we 414 // reserve a Base Pointer. 415 // 416 // For Thumb2, estimate whether a negative offset from the frame pointer 417 // will be sufficient to reach the whole stack frame. If a function has a 418 // smallish frame, it's less likely to have lots of spills and callee saved 419 // space, so it's all more likely to be within range of the frame pointer. 420 // If it's wrong, the scavenger will still enable access to work, it just 421 // won't be optimal. (We should always be able to reach the emergency 422 // spill slot from the frame pointer.) 423 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() && 424 MFI.getLocalFrameSize() >= 128) 425 return true; 426 // For Thumb1, if sp moves, nothing is in range, so force a base pointer. 427 // This is necessary for correctness in cases where we need an emergency 428 // spill slot. (In Thumb1, we can't use a negative offset from the frame 429 // pointer.) 430 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF)) 431 return true; 432 return false; 433 } 434 435 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 436 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 437 const ARMFrameLowering *TFI = getFrameLowering(MF); 438 // We can't realign the stack if: 439 // 1. Dynamic stack realignment is explicitly disabled, 440 // 2. There are VLAs in the function and the base pointer is disabled. 441 if (!TargetRegisterInfo::canRealignStack(MF)) 442 return false; 443 // Stack realignment requires a frame pointer. If we already started 444 // register allocation with frame pointer elimination, it is too late now. 445 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>()))) 446 return false; 447 // We may also need a base pointer if there are dynamic allocas or stack 448 // pointer adjustments around calls. 449 if (TFI->hasReservedCallFrame(MF)) 450 return true; 451 // A base pointer is required and allowed. Check that it isn't too late to 452 // reserve it. 453 return MRI->canReserveReg(BasePtr); 454 } 455 456 bool ARMBaseRegisterInfo:: 457 cannotEliminateFrame(const MachineFunction &MF) const { 458 const MachineFrameInfo &MFI = MF.getFrameInfo(); 459 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 460 return true; 461 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() 462 || needsStackRealignment(MF); 463 } 464 465 Register 466 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 467 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 468 const ARMFrameLowering *TFI = getFrameLowering(MF); 469 470 if (TFI->hasFP(MF)) 471 return getFramePointerReg(STI); 472 return ARM::SP; 473 } 474 475 /// emitLoadConstPool - Emits a load from constpool to materialize the 476 /// specified immediate. 477 void ARMBaseRegisterInfo::emitLoadConstPool( 478 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 479 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, 480 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const { 481 MachineFunction &MF = *MBB.getParent(); 482 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 483 MachineConstantPool *ConstantPool = MF.getConstantPool(); 484 const Constant *C = 485 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val); 486 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4)); 487 488 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 489 .addReg(DestReg, getDefRegState(true), SubIdx) 490 .addConstantPoolIndex(Idx) 491 .addImm(0) 492 .add(predOps(Pred, PredReg)) 493 .setMIFlags(MIFlags); 494 } 495 496 bool ARMBaseRegisterInfo:: 497 requiresRegisterScavenging(const MachineFunction &MF) const { 498 return true; 499 } 500 501 bool ARMBaseRegisterInfo:: 502 requiresFrameIndexScavenging(const MachineFunction &MF) const { 503 return true; 504 } 505 506 bool ARMBaseRegisterInfo:: 507 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 508 return true; 509 } 510 511 int64_t ARMBaseRegisterInfo:: 512 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 513 const MCInstrDesc &Desc = MI->getDesc(); 514 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 515 int64_t InstrOffs = 0; 516 int Scale = 1; 517 unsigned ImmIdx = 0; 518 switch (AddrMode) { 519 case ARMII::AddrModeT2_i8: 520 case ARMII::AddrModeT2_i12: 521 case ARMII::AddrMode_i12: 522 InstrOffs = MI->getOperand(Idx+1).getImm(); 523 Scale = 1; 524 break; 525 case ARMII::AddrMode5: { 526 // VFP address mode. 527 const MachineOperand &OffOp = MI->getOperand(Idx+1); 528 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 529 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 530 InstrOffs = -InstrOffs; 531 Scale = 4; 532 break; 533 } 534 case ARMII::AddrMode2: 535 ImmIdx = Idx+2; 536 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 537 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 538 InstrOffs = -InstrOffs; 539 break; 540 case ARMII::AddrMode3: 541 ImmIdx = Idx+2; 542 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 543 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 544 InstrOffs = -InstrOffs; 545 break; 546 case ARMII::AddrModeT1_s: 547 ImmIdx = Idx+1; 548 InstrOffs = MI->getOperand(ImmIdx).getImm(); 549 Scale = 4; 550 break; 551 default: 552 llvm_unreachable("Unsupported addressing mode!"); 553 } 554 555 return InstrOffs * Scale; 556 } 557 558 /// needsFrameBaseReg - Returns true if the instruction's frame index 559 /// reference would be better served by a base register other than FP 560 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 561 /// references it should create new base registers for. 562 bool ARMBaseRegisterInfo:: 563 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 564 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 565 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 566 } 567 568 // It's the load/store FI references that cause issues, as it can be difficult 569 // to materialize the offset if it won't fit in the literal field. Estimate 570 // based on the size of the local frame and some conservative assumptions 571 // about the rest of the stack frame (note, this is pre-regalloc, so 572 // we don't know everything for certain yet) whether this offset is likely 573 // to be out of range of the immediate. Return true if so. 574 575 // We only generate virtual base registers for loads and stores, so 576 // return false for everything else. 577 unsigned Opc = MI->getOpcode(); 578 switch (Opc) { 579 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 580 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 581 case ARM::t2LDRi12: case ARM::t2LDRi8: 582 case ARM::t2STRi12: case ARM::t2STRi8: 583 case ARM::VLDRS: case ARM::VLDRD: 584 case ARM::VSTRS: case ARM::VSTRD: 585 case ARM::tSTRspi: case ARM::tLDRspi: 586 break; 587 default: 588 return false; 589 } 590 591 // Without a virtual base register, if the function has variable sized 592 // objects, all fixed-size local references will be via the frame pointer, 593 // Approximate the offset and see if it's legal for the instruction. 594 // Note that the incoming offset is based on the SP value at function entry, 595 // so it'll be negative. 596 MachineFunction &MF = *MI->getParent()->getParent(); 597 const ARMFrameLowering *TFI = getFrameLowering(MF); 598 MachineFrameInfo &MFI = MF.getFrameInfo(); 599 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 600 601 // Estimate an offset from the frame pointer. 602 // Conservatively assume all callee-saved registers get pushed. R4-R6 603 // will be earlier than the FP, so we ignore those. 604 // R7, LR 605 int64_t FPOffset = Offset - 8; 606 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 607 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 608 FPOffset -= 80; 609 // Estimate an offset from the stack pointer. 610 // The incoming offset is relating to the SP at the start of the function, 611 // but when we access the local it'll be relative to the SP after local 612 // allocation, so adjust our SP-relative offset by that allocation size. 613 Offset += MFI.getLocalFrameSize(); 614 // Assume that we'll have at least some spill slots allocated. 615 // FIXME: This is a total SWAG number. We should run some statistics 616 // and pick a real one. 617 Offset += 128; // 128 bytes of spill slots 618 619 // If there's a frame pointer and the addressing mode allows it, try using it. 620 // The FP is only available if there is no dynamic realignment. We 621 // don't know for sure yet whether we'll need that, so we guess based 622 // on whether there are any local variables that would trigger it. 623 if (TFI->hasFP(MF) && 624 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) && 625 canRealignStack(MF))) { 626 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 627 return false; 628 } 629 // If we can reference via the stack pointer, try that. 630 // FIXME: This (and the code that resolves the references) can be improved 631 // to only disallow SP relative references in the live range of 632 // the VLA(s). In practice, it's unclear how much difference that 633 // would make, but it may be worth doing. 634 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 635 return false; 636 637 // The offset likely isn't legal, we want to allocate a virtual base register. 638 return true; 639 } 640 641 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 642 /// be a pointer to FrameIdx at the beginning of the basic block. 643 Register 644 ARMBaseRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 645 int FrameIdx, 646 int64_t Offset) const { 647 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 648 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 649 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 650 651 MachineBasicBlock::iterator Ins = MBB->begin(); 652 DebugLoc DL; // Defaults to "unknown" 653 if (Ins != MBB->end()) 654 DL = Ins->getDebugLoc(); 655 656 const MachineFunction &MF = *MBB->getParent(); 657 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 658 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 659 const MCInstrDesc &MCID = TII.get(ADDriOpc); 660 Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 661 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 662 663 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 664 .addFrameIndex(FrameIdx).addImm(Offset); 665 666 if (!AFI->isThumb1OnlyFunction()) 667 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 668 669 return BaseReg; 670 } 671 672 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 673 int64_t Offset) const { 674 MachineBasicBlock &MBB = *MI.getParent(); 675 MachineFunction &MF = *MBB.getParent(); 676 const ARMBaseInstrInfo &TII = 677 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 678 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 679 int Off = Offset; // ARM doesn't need the general 64-bit offsets 680 unsigned i = 0; 681 682 assert(!AFI->isThumb1OnlyFunction() && 683 "This resolveFrameIndex does not support Thumb1!"); 684 685 while (!MI.getOperand(i).isFI()) { 686 ++i; 687 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 688 } 689 bool Done = false; 690 if (!AFI->isThumbFunction()) 691 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 692 else { 693 assert(AFI->isThumb2Function()); 694 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this); 695 } 696 assert(Done && "Unable to resolve frame index!"); 697 (void)Done; 698 } 699 700 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 701 Register BaseReg, 702 int64_t Offset) const { 703 const MCInstrDesc &Desc = MI->getDesc(); 704 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 705 unsigned i = 0; 706 for (; !MI->getOperand(i).isFI(); ++i) 707 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!"); 708 709 // AddrMode4 and AddrMode6 cannot handle any offset. 710 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 711 return Offset == 0; 712 713 unsigned NumBits = 0; 714 unsigned Scale = 1; 715 bool isSigned = true; 716 switch (AddrMode) { 717 case ARMII::AddrModeT2_i8: 718 case ARMII::AddrModeT2_i12: 719 // i8 supports only negative, and i12 supports only positive, so 720 // based on Offset sign, consider the appropriate instruction 721 Scale = 1; 722 if (Offset < 0) { 723 NumBits = 8; 724 Offset = -Offset; 725 } else { 726 NumBits = 12; 727 } 728 break; 729 case ARMII::AddrMode5: 730 // VFP address mode. 731 NumBits = 8; 732 Scale = 4; 733 break; 734 case ARMII::AddrMode_i12: 735 case ARMII::AddrMode2: 736 NumBits = 12; 737 break; 738 case ARMII::AddrMode3: 739 NumBits = 8; 740 break; 741 case ARMII::AddrModeT1_s: 742 NumBits = (BaseReg == ARM::SP ? 8 : 5); 743 Scale = 4; 744 isSigned = false; 745 break; 746 default: 747 llvm_unreachable("Unsupported addressing mode!"); 748 } 749 750 Offset += getFrameIndexInstrOffset(MI, i); 751 // Make sure the offset is encodable for instructions that scale the 752 // immediate. 753 if ((Offset & (Scale-1)) != 0) 754 return false; 755 756 if (isSigned && Offset < 0) 757 Offset = -Offset; 758 759 unsigned Mask = (1 << NumBits) - 1; 760 if ((unsigned)Offset <= Mask * Scale) 761 return true; 762 763 return false; 764 } 765 766 void 767 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 768 int SPAdj, unsigned FIOperandNum, 769 RegScavenger *RS) const { 770 MachineInstr &MI = *II; 771 MachineBasicBlock &MBB = *MI.getParent(); 772 MachineFunction &MF = *MBB.getParent(); 773 const ARMBaseInstrInfo &TII = 774 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 775 const ARMFrameLowering *TFI = getFrameLowering(MF); 776 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 777 assert(!AFI->isThumb1OnlyFunction() && 778 "This eliminateFrameIndex does not support Thumb1!"); 779 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 780 Register FrameReg; 781 782 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 783 784 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 785 // call frame setup/destroy instructions have already been eliminated. That 786 // means the stack pointer cannot be used to access the emergency spill slot 787 // when !hasReservedCallFrame(). 788 #ifndef NDEBUG 789 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 790 assert(TFI->hasReservedCallFrame(MF) && 791 "Cannot use SP to access the emergency spill slot in " 792 "functions without a reserved call frame"); 793 assert(!MF.getFrameInfo().hasVarSizedObjects() && 794 "Cannot use SP to access the emergency spill slot in " 795 "functions with variable sized frame objects"); 796 } 797 #endif // NDEBUG 798 799 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 800 801 // Modify MI as necessary to handle as much of 'Offset' as possible 802 bool Done = false; 803 if (!AFI->isThumbFunction()) 804 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 805 else { 806 assert(AFI->isThumb2Function()); 807 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this); 808 } 809 if (Done) 810 return; 811 812 // If we get here, the immediate doesn't fit into the instruction. We folded 813 // as much as possible above, handle the rest, providing a register that is 814 // SP+LargeImm. 815 assert( 816 (Offset || 817 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 818 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 || 819 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 || 820 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 || 821 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == 822 ARMII::AddrModeT2_i7s4) && 823 "This code isn't needed if offset already handled!"); 824 825 unsigned ScratchReg = 0; 826 int PIdx = MI.findFirstPredOperandIdx(); 827 ARMCC::CondCodes Pred = (PIdx == -1) 828 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 829 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg(); 830 831 const MCInstrDesc &MCID = MI.getDesc(); 832 const TargetRegisterClass *RegClass = 833 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent()); 834 835 if (Offset == 0 && 836 (Register::isVirtualRegister(FrameReg) || RegClass->contains(FrameReg))) 837 // Must be addrmode4/6. 838 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 839 else { 840 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass); 841 if (!AFI->isThumbFunction()) 842 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 843 Offset, Pred, PredReg, TII); 844 else { 845 assert(AFI->isThumb2Function()); 846 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 847 Offset, Pred, PredReg, TII); 848 } 849 // Update the original instruction to use the scratch register. 850 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 851 } 852 } 853 854 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 855 const TargetRegisterClass *SrcRC, 856 unsigned SubReg, 857 const TargetRegisterClass *DstRC, 858 unsigned DstSubReg, 859 const TargetRegisterClass *NewRC, 860 LiveIntervals &LIS) const { 861 auto MBB = MI->getParent(); 862 auto MF = MBB->getParent(); 863 const MachineRegisterInfo &MRI = MF->getRegInfo(); 864 // If not copying into a sub-register this should be ok because we shouldn't 865 // need to split the reg. 866 if (!DstSubReg) 867 return true; 868 // Small registers don't frequently cause a problem, so we can coalesce them. 869 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 && 870 getRegSizeInBits(*SrcRC) < 256) 871 return true; 872 873 auto NewRCWeight = 874 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 875 auto SrcRCWeight = 876 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 877 auto DstRCWeight = 878 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 879 // If the source register class is more expensive than the destination, the 880 // coalescing is probably profitable. 881 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 882 return true; 883 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 884 return true; 885 886 // If the register allocator isn't constrained, we can always allow coalescing 887 // unfortunately we don't know yet if we will be constrained. 888 // The goal of this heuristic is to restrict how many expensive registers 889 // we allow to coalesce in a given basic block. 890 auto AFI = MF->getInfo<ARMFunctionInfo>(); 891 auto It = AFI->getCoalescedWeight(MBB); 892 893 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 894 << It->second << "\n"); 895 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 896 << NewRCWeight.RegWeight << "\n"); 897 898 // This number is the largest round number that which meets the criteria: 899 // (1) addresses PR18825 900 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 901 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 902 // In practice the SizeMultiplier will only factor in for straight line code 903 // that uses a lot of NEON vectors, which isn't terribly common. 904 unsigned SizeMultiplier = MBB->size()/100; 905 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 906 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 907 It->second += NewRCWeight.RegWeight; 908 return true; 909 } 910 return false; 911 } 912