1 //===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ARM specific subclass of TargetSubtargetInfo. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 15 #include "ARMCallLowering.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMInstrInfo.h" 18 #include "ARMLegalizerInfo.h" 19 #include "ARMRegisterBankInfo.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "MCTargetDesc/ARMMCTargetDesc.h" 23 #include "Thumb1FrameLowering.h" 24 #include "Thumb1InstrInfo.h" 25 #include "Thumb2InstrInfo.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalValue.h" 33 #include "llvm/MC/MCAsmInfo.h" 34 #include "llvm/MC/MCTargetOptions.h" 35 #include "llvm/Support/CodeGen.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Target/TargetOptions.h" 38 #include "llvm/TargetParser/ARMTargetParser.h" 39 #include "llvm/TargetParser/Triple.h" 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "arm-subtarget" 44 45 #define GET_SUBTARGETINFO_TARGET_DESC 46 #define GET_SUBTARGETINFO_CTOR 47 #include "ARMGenSubtargetInfo.inc" 48 49 static cl::opt<bool> 50 UseFusedMulOps("arm-use-mulops", 51 cl::init(true), cl::Hidden); 52 53 enum ITMode { 54 DefaultIT, 55 RestrictedIT 56 }; 57 58 static cl::opt<ITMode> 59 IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), 60 cl::values(clEnumValN(DefaultIT, "arm-default-it", 61 "Generate any type of IT block"), 62 clEnumValN(RestrictedIT, "arm-restrict-it", 63 "Disallow complex IT blocks"))); 64 65 /// ForceFastISel - Use the fast-isel, even for subtargets where it is not 66 /// currently supported (for testing only). 67 static cl::opt<bool> 68 ForceFastISel("arm-force-fast-isel", 69 cl::init(false), cl::Hidden); 70 71 static cl::opt<bool> EnableSubRegLiveness("arm-enable-subreg-liveness", 72 cl::init(false), cl::Hidden); 73 74 /// initializeSubtargetDependencies - Initializes using a CPU and feature string 75 /// so that we can use initializer lists for subtarget initialization. 76 ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU, 77 StringRef FS) { 78 initializeEnvironment(); 79 initSubtargetFeatures(CPU, FS); 80 return *this; 81 } 82 83 ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU, 84 StringRef FS) { 85 ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS); 86 if (STI.isThumb1Only()) 87 return (ARMFrameLowering *)new Thumb1FrameLowering(STI); 88 89 return new ARMFrameLowering(STI); 90 } 91 92 ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU, 93 const std::string &FS, 94 const ARMBaseTargetMachine &TM, bool IsLittle, 95 bool MinSize) 96 : ARMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), 97 UseMulOps(UseFusedMulOps), CPUString(CPU), OptMinSize(MinSize), 98 IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM), 99 FrameLowering(initializeFrameLowering(CPU, FS)), 100 // At this point initializeSubtargetDependencies has been called so 101 // we can query directly. 102 InstrInfo(isThumb1Only() 103 ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this) 104 : !isThumb() 105 ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this) 106 : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)), 107 TLInfo(TM, *this) { 108 109 CallLoweringInfo.reset(new ARMCallLowering(*getTargetLowering())); 110 Legalizer.reset(new ARMLegalizerInfo(*this)); 111 112 auto *RBI = new ARMRegisterBankInfo(*getRegisterInfo()); 113 114 // FIXME: At this point, we can't rely on Subtarget having RBI. 115 // It's awkward to mix passing RBI and the Subtarget; should we pass 116 // TII/TRI as well? 117 InstSelector.reset(createARMInstructionSelector( 118 *static_cast<const ARMBaseTargetMachine *>(&TM), *this, *RBI)); 119 120 RegBankInfo.reset(RBI); 121 } 122 123 const CallLowering *ARMSubtarget::getCallLowering() const { 124 return CallLoweringInfo.get(); 125 } 126 127 InstructionSelector *ARMSubtarget::getInstructionSelector() const { 128 return InstSelector.get(); 129 } 130 131 const LegalizerInfo *ARMSubtarget::getLegalizerInfo() const { 132 return Legalizer.get(); 133 } 134 135 const RegisterBankInfo *ARMSubtarget::getRegBankInfo() const { 136 return RegBankInfo.get(); 137 } 138 139 bool ARMSubtarget::isXRaySupported() const { 140 // We don't currently suppport Thumb, but Windows requires Thumb. 141 return hasV6Ops() && hasARMOps() && !isTargetWindows(); 142 } 143 144 void ARMSubtarget::initializeEnvironment() { 145 // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this 146 // directly from it, but we can try to make sure they're consistent when both 147 // available. 148 UseSjLjEH = (isTargetDarwin() && !isTargetWatchABI() && 149 Options.ExceptionModel == ExceptionHandling::None) || 150 Options.ExceptionModel == ExceptionHandling::SjLj; 151 assert((!TM.getMCAsmInfo() || 152 (TM.getMCAsmInfo()->getExceptionHandlingType() == 153 ExceptionHandling::SjLj) == UseSjLjEH) && 154 "inconsistent sjlj choice between CodeGen and MC"); 155 } 156 157 void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { 158 if (CPUString.empty()) { 159 CPUString = "generic"; 160 161 if (isTargetDarwin()) { 162 StringRef ArchName = TargetTriple.getArchName(); 163 ARM::ArchKind AK = ARM::parseArch(ArchName); 164 if (AK == ARM::ArchKind::ARMV7S) 165 // Default to the Swift CPU when targeting armv7s/thumbv7s. 166 CPUString = "swift"; 167 else if (AK == ARM::ArchKind::ARMV7K) 168 // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k. 169 // ARMv7k does not use SjLj exception handling. 170 CPUString = "cortex-a7"; 171 } 172 } 173 174 // Insert the architecture feature derived from the target triple into the 175 // feature string. This is important for setting features that are implied 176 // based on the architecture version. 177 std::string ArchFS = ARM_MC::ParseARMTriple(TargetTriple, CPUString); 178 if (!FS.empty()) { 179 if (!ArchFS.empty()) 180 ArchFS = (Twine(ArchFS) + "," + FS).str(); 181 else 182 ArchFS = std::string(FS); 183 } 184 ParseSubtargetFeatures(CPUString, /*TuneCPU*/ CPUString, ArchFS); 185 186 // FIXME: This used enable V6T2 support implicitly for Thumb2 mode. 187 // Assert this for now to make the change obvious. 188 assert(hasV6T2Ops() || !hasThumb2()); 189 190 if (genExecuteOnly()) { 191 // Execute only support for >= v8-M Baseline requires movt support 192 if (hasV8MBaselineOps()) 193 NoMovt = false; 194 if (!hasV6MOps()) 195 report_fatal_error("Cannot generate execute-only code for this target"); 196 } 197 198 // Keep a pointer to static instruction cost data for the specified CPU. 199 SchedModel = getSchedModelForCPU(CPUString); 200 201 // Initialize scheduling itinerary for the specified CPU. 202 InstrItins = getInstrItineraryForCPU(CPUString); 203 204 // FIXME: this is invalid for WindowsCE 205 if (isTargetWindows()) 206 NoARM = true; 207 208 if (isAAPCS_ABI()) 209 stackAlignment = Align(8); 210 if (isTargetNaCl() || isAAPCS16_ABI()) 211 stackAlignment = Align(16); 212 213 // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo:: 214 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 215 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 216 // support in the assembler and linker to be used. This would need to be 217 // fixed to fully support tail calls in Thumb1. 218 // 219 // For ARMv8-M, we /do/ implement tail calls. Doing this is tricky for v8-M 220 // baseline, since the LDM/POP instruction on Thumb doesn't take LR. This 221 // means if we need to reload LR, it takes extra instructions, which outweighs 222 // the value of the tail call; but here we don't know yet whether LR is going 223 // to be used. We take the optimistic approach of generating the tail call and 224 // perhaps taking a hit if we need to restore the LR. 225 226 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 227 // but we need to make sure there are enough registers; the only valid 228 // registers are the 4 used for parameters. We don't currently do this 229 // case. 230 231 SupportsTailCall = !isThumb1Only() || hasV8MBaselineOps(); 232 233 if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0)) 234 SupportsTailCall = false; 235 236 switch (IT) { 237 case DefaultIT: 238 RestrictIT = false; 239 break; 240 case RestrictedIT: 241 RestrictIT = true; 242 break; 243 } 244 245 // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default. 246 const FeatureBitset &Bits = getFeatureBits(); 247 if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters 248 (Options.UnsafeFPMath || isTargetDarwin())) 249 HasNEONForFP = true; 250 251 if (isRWPI()) 252 ReserveR9 = true; 253 254 // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2 255 if (MVEVectorCostFactor == 0) 256 MVEVectorCostFactor = 2; 257 258 // FIXME: Teach TableGen to deal with these instead of doing it manually here. 259 switch (ARMProcFamily) { 260 case Others: 261 case CortexA5: 262 break; 263 case CortexA7: 264 LdStMultipleTiming = DoubleIssue; 265 break; 266 case CortexA8: 267 LdStMultipleTiming = DoubleIssue; 268 break; 269 case CortexA9: 270 LdStMultipleTiming = DoubleIssueCheckUnalignedAccess; 271 PreISelOperandLatencyAdjustment = 1; 272 break; 273 case CortexA12: 274 break; 275 case CortexA15: 276 MaxInterleaveFactor = 2; 277 PreISelOperandLatencyAdjustment = 1; 278 PartialUpdateClearance = 12; 279 break; 280 case CortexA17: 281 case CortexA32: 282 case CortexA35: 283 case CortexA53: 284 case CortexA55: 285 case CortexA57: 286 case CortexA72: 287 case CortexA73: 288 case CortexA75: 289 case CortexA76: 290 case CortexA77: 291 case CortexA78: 292 case CortexA78C: 293 case CortexA710: 294 case CortexR4: 295 case CortexR4F: 296 case CortexR5: 297 case CortexR7: 298 case CortexM3: 299 case CortexM7: 300 case CortexR52: 301 case CortexX1: 302 case CortexX1C: 303 break; 304 case Exynos: 305 LdStMultipleTiming = SingleIssuePlusExtras; 306 MaxInterleaveFactor = 4; 307 if (!isThumb()) 308 PrefLoopLogAlignment = 3; 309 break; 310 case Kryo: 311 break; 312 case Krait: 313 PreISelOperandLatencyAdjustment = 1; 314 break; 315 case NeoverseN1: 316 case NeoverseN2: 317 case NeoverseV1: 318 break; 319 case Swift: 320 MaxInterleaveFactor = 2; 321 LdStMultipleTiming = SingleIssuePlusExtras; 322 PreISelOperandLatencyAdjustment = 1; 323 PartialUpdateClearance = 12; 324 break; 325 } 326 } 327 328 bool ARMSubtarget::isTargetHardFloat() const { return TM.isTargetHardFloat(); } 329 330 bool ARMSubtarget::isAPCS_ABI() const { 331 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 332 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_APCS; 333 } 334 bool ARMSubtarget::isAAPCS_ABI() const { 335 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 336 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS || 337 TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; 338 } 339 bool ARMSubtarget::isAAPCS16_ABI() const { 340 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 341 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; 342 } 343 344 bool ARMSubtarget::isROPI() const { 345 return TM.getRelocationModel() == Reloc::ROPI || 346 TM.getRelocationModel() == Reloc::ROPI_RWPI; 347 } 348 bool ARMSubtarget::isRWPI() const { 349 return TM.getRelocationModel() == Reloc::RWPI || 350 TM.getRelocationModel() == Reloc::ROPI_RWPI; 351 } 352 353 bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const { 354 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 355 return true; 356 357 // 32 bit macho has no relocation for a-b if a is undefined, even if b is in 358 // the section that is being relocated. This means we have to use o load even 359 // for GVs that are known to be local to the dso. 360 if (isTargetMachO() && TM.isPositionIndependent() && 361 (GV->isDeclarationForLinker() || GV->hasCommonLinkage())) 362 return true; 363 364 return false; 365 } 366 367 bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const { 368 return isTargetELF() && TM.isPositionIndependent() && 369 !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 370 } 371 372 unsigned ARMSubtarget::getMispredictionPenalty() const { 373 return SchedModel.MispredictPenalty; 374 } 375 376 bool ARMSubtarget::enableMachineScheduler() const { 377 // The MachineScheduler can increase register usage, so we use more high 378 // registers and end up with more T2 instructions that cannot be converted to 379 // T1 instructions. At least until we do better at converting to thumb1 380 // instructions, on cortex-m at Oz where we are size-paranoid, don't use the 381 // Machine scheduler, relying on the DAG register pressure scheduler instead. 382 if (isMClass() && hasMinSize()) 383 return false; 384 // Enable the MachineScheduler before register allocation for subtargets 385 // with the use-misched feature. 386 return useMachineScheduler(); 387 } 388 389 bool ARMSubtarget::enableSubRegLiveness() const { 390 if (EnableSubRegLiveness.getNumOccurrences()) 391 return EnableSubRegLiveness; 392 // Enable SubRegLiveness for MVE to better optimize s subregs for mqpr regs 393 // and q subregs for qqqqpr regs. 394 return hasMVEIntegerOps(); 395 } 396 397 bool ARMSubtarget::enableMachinePipeliner() const { 398 // Enable the MachinePipeliner before register allocation for subtargets 399 // with the use-mipipeliner feature. 400 return getSchedModel().hasInstrSchedModel() && useMachinePipeliner(); 401 } 402 403 bool ARMSubtarget::useDFAforSMS() const { return false; } 404 405 // This overrides the PostRAScheduler bit in the SchedModel for any CPU. 406 bool ARMSubtarget::enablePostRAScheduler() const { 407 if (enableMachineScheduler()) 408 return false; 409 if (disablePostRAScheduler()) 410 return false; 411 // Thumb1 cores will generally not benefit from post-ra scheduling 412 return !isThumb1Only(); 413 } 414 415 bool ARMSubtarget::enablePostRAMachineScheduler() const { 416 if (!enableMachineScheduler()) 417 return false; 418 if (disablePostRAScheduler()) 419 return false; 420 return !isThumb1Only(); 421 } 422 423 bool ARMSubtarget::useStride4VFPs() const { 424 // For general targets, the prologue can grow when VFPs are allocated with 425 // stride 4 (more vpush instructions). But WatchOS uses a compact unwind 426 // format which it's more important to get right. 427 return isTargetWatchABI() || 428 (useWideStrideVFP() && !OptMinSize); 429 } 430 431 bool ARMSubtarget::useMovt() const { 432 // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit 433 // immediates as it is inherently position independent, and may be out of 434 // range otherwise. 435 return !NoMovt && hasV8MBaselineOps() && 436 (isTargetWindows() || !OptMinSize || genExecuteOnly()); 437 } 438 439 bool ARMSubtarget::useFastISel() const { 440 // Enable fast-isel for any target, for testing only. 441 if (ForceFastISel) 442 return true; 443 444 // Limit fast-isel to the targets that are or have been tested. 445 if (!hasV6Ops()) 446 return false; 447 448 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 449 return TM.Options.EnableFastISel && 450 ((isTargetMachO() && !isThumb1Only()) || 451 (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb())); 452 } 453 454 unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction &MF) const { 455 // The GPR register class has multiple possible allocation orders, with 456 // tradeoffs preferred by different sub-architectures and optimisation goals. 457 // The allocation orders are: 458 // 0: (the default tablegen order, not used) 459 // 1: r14, r0-r13 460 // 2: r0-r7 461 // 3: r0-r7, r12, lr, r8-r11 462 // Note that the register allocator will change this order so that 463 // callee-saved registers are used later, as they require extra work in the 464 // prologue/epilogue (though we sometimes override that). 465 466 // For thumb1-only targets, only the low registers are allocatable. 467 if (isThumb1Only()) 468 return 2; 469 470 // Allocate low registers first, so we can select more 16-bit instructions. 471 // We also (in ignoreCSRForAllocationOrder) override the default behaviour 472 // with regards to callee-saved registers, because pushing extra registers is 473 // much cheaper (in terms of code size) than using high registers. After 474 // that, we allocate r12 (doesn't need to be saved), lr (saving it means we 475 // can return with the pop, don't need an extra "bx lr") and then the rest of 476 // the high registers. 477 if (isThumb2() && MF.getFunction().hasMinSize()) 478 return 3; 479 480 // Otherwise, allocate in the default order, using LR first because saving it 481 // allows a shorter epilogue sequence. 482 return 1; 483 } 484 485 bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction &MF, 486 unsigned PhysReg) const { 487 // To minimize code size in Thumb2, we prefer the usage of low regs (lower 488 // cost per use) so we can use narrow encoding. By default, caller-saved 489 // registers (e.g. lr, r12) are always allocated first, regardless of 490 // their cost per use. When optForMinSize, we prefer the low regs even if 491 // they are CSR because usually push/pop can be folded into existing ones. 492 return isThumb2() && MF.getFunction().hasMinSize() && 493 ARM::GPRRegClass.contains(PhysReg); 494 } 495 496 bool ARMSubtarget::splitFramePointerPush(const MachineFunction &MF) const { 497 const Function &F = MF.getFunction(); 498 if (!MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || 499 !F.needsUnwindTableEntry()) 500 return false; 501 const MachineFrameInfo &MFI = MF.getFrameInfo(); 502 return MFI.hasVarSizedObjects() || getRegisterInfo()->hasStackRealignment(MF); 503 } 504