1 //===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ARM specific subclass of TargetSubtargetInfo. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 15 #include "ARMCallLowering.h" 16 #include "ARMLegalizerInfo.h" 17 #include "ARMRegisterBankInfo.h" 18 #include "ARMSubtarget.h" 19 #include "ARMFrameLowering.h" 20 #include "ARMInstrInfo.h" 21 #include "ARMSubtarget.h" 22 #include "ARMTargetMachine.h" 23 #include "MCTargetDesc/ARMMCTargetDesc.h" 24 #include "Thumb1FrameLowering.h" 25 #include "Thumb1InstrInfo.h" 26 #include "Thumb2InstrInfo.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/ADT/Triple.h" 29 #include "llvm/ADT/Twine.h" 30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/GlobalValue.h" 34 #include "llvm/MC/MCAsmInfo.h" 35 #include "llvm/MC/MCTargetOptions.h" 36 #include "llvm/Support/CodeGen.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/TargetParser.h" 39 #include "llvm/Target/TargetOptions.h" 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "arm-subtarget" 44 45 #define GET_SUBTARGETINFO_TARGET_DESC 46 #define GET_SUBTARGETINFO_CTOR 47 #include "ARMGenSubtargetInfo.inc" 48 49 static cl::opt<bool> 50 UseFusedMulOps("arm-use-mulops", 51 cl::init(true), cl::Hidden); 52 53 enum ITMode { 54 DefaultIT, 55 RestrictedIT, 56 NoRestrictedIT 57 }; 58 59 static cl::opt<ITMode> 60 IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), 61 cl::ZeroOrMore, 62 cl::values(clEnumValN(DefaultIT, "arm-default-it", 63 "Generate IT block based on arch"), 64 clEnumValN(RestrictedIT, "arm-restrict-it", 65 "Disallow deprecated IT based on ARMv8"), 66 clEnumValN(NoRestrictedIT, "arm-no-restrict-it", 67 "Allow IT blocks based on ARMv7"))); 68 69 /// ForceFastISel - Use the fast-isel, even for subtargets where it is not 70 /// currently supported (for testing only). 71 static cl::opt<bool> 72 ForceFastISel("arm-force-fast-isel", 73 cl::init(false), cl::Hidden); 74 75 /// initializeSubtargetDependencies - Initializes using a CPU and feature string 76 /// so that we can use initializer lists for subtarget initialization. 77 ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU, 78 StringRef FS) { 79 initializeEnvironment(); 80 initSubtargetFeatures(CPU, FS); 81 return *this; 82 } 83 84 ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU, 85 StringRef FS) { 86 ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS); 87 if (STI.isThumb1Only()) 88 return (ARMFrameLowering *)new Thumb1FrameLowering(STI); 89 90 return new ARMFrameLowering(STI); 91 } 92 93 ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU, 94 const std::string &FS, 95 const ARMBaseTargetMachine &TM, bool IsLittle, 96 bool MinSize) 97 : ARMGenSubtargetInfo(TT, CPU, FS), UseMulOps(UseFusedMulOps), 98 CPUString(CPU), OptMinSize(MinSize), IsLittle(IsLittle), 99 TargetTriple(TT), Options(TM.Options), TM(TM), 100 FrameLowering(initializeFrameLowering(CPU, FS)), 101 // At this point initializeSubtargetDependencies has been called so 102 // we can query directly. 103 InstrInfo(isThumb1Only() 104 ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this) 105 : !isThumb() 106 ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this) 107 : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)), 108 TLInfo(TM, *this) { 109 110 CallLoweringInfo.reset(new ARMCallLowering(*getTargetLowering())); 111 Legalizer.reset(new ARMLegalizerInfo(*this)); 112 113 auto *RBI = new ARMRegisterBankInfo(*getRegisterInfo()); 114 115 // FIXME: At this point, we can't rely on Subtarget having RBI. 116 // It's awkward to mix passing RBI and the Subtarget; should we pass 117 // TII/TRI as well? 118 InstSelector.reset(createARMInstructionSelector( 119 *static_cast<const ARMBaseTargetMachine *>(&TM), *this, *RBI)); 120 121 RegBankInfo.reset(RBI); 122 } 123 124 const CallLowering *ARMSubtarget::getCallLowering() const { 125 return CallLoweringInfo.get(); 126 } 127 128 InstructionSelector *ARMSubtarget::getInstructionSelector() const { 129 return InstSelector.get(); 130 } 131 132 const LegalizerInfo *ARMSubtarget::getLegalizerInfo() const { 133 return Legalizer.get(); 134 } 135 136 const RegisterBankInfo *ARMSubtarget::getRegBankInfo() const { 137 return RegBankInfo.get(); 138 } 139 140 bool ARMSubtarget::isXRaySupported() const { 141 // We don't currently suppport Thumb, but Windows requires Thumb. 142 return hasV6Ops() && hasARMOps() && !isTargetWindows(); 143 } 144 145 void ARMSubtarget::initializeEnvironment() { 146 // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this 147 // directly from it, but we can try to make sure they're consistent when both 148 // available. 149 UseSjLjEH = (isTargetDarwin() && !isTargetWatchABI() && 150 Options.ExceptionModel == ExceptionHandling::None) || 151 Options.ExceptionModel == ExceptionHandling::SjLj; 152 assert((!TM.getMCAsmInfo() || 153 (TM.getMCAsmInfo()->getExceptionHandlingType() == 154 ExceptionHandling::SjLj) == UseSjLjEH) && 155 "inconsistent sjlj choice between CodeGen and MC"); 156 } 157 158 void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { 159 if (CPUString.empty()) { 160 CPUString = "generic"; 161 162 if (isTargetDarwin()) { 163 StringRef ArchName = TargetTriple.getArchName(); 164 ARM::ArchKind AK = ARM::parseArch(ArchName); 165 if (AK == ARM::ArchKind::ARMV7S) 166 // Default to the Swift CPU when targeting armv7s/thumbv7s. 167 CPUString = "swift"; 168 else if (AK == ARM::ArchKind::ARMV7K) 169 // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k. 170 // ARMv7k does not use SjLj exception handling. 171 CPUString = "cortex-a7"; 172 } 173 } 174 175 // Insert the architecture feature derived from the target triple into the 176 // feature string. This is important for setting features that are implied 177 // based on the architecture version. 178 std::string ArchFS = ARM_MC::ParseARMTriple(TargetTriple, CPUString); 179 if (!FS.empty()) { 180 if (!ArchFS.empty()) 181 ArchFS = (Twine(ArchFS) + "," + FS).str(); 182 else 183 ArchFS = FS; 184 } 185 ParseSubtargetFeatures(CPUString, ArchFS); 186 187 // FIXME: This used enable V6T2 support implicitly for Thumb2 mode. 188 // Assert this for now to make the change obvious. 189 assert(hasV6T2Ops() || !hasThumb2()); 190 191 // Execute only support requires movt support 192 if (genExecuteOnly()) { 193 NoMovt = false; 194 assert(hasV8MBaselineOps() && "Cannot generate execute-only code for this target"); 195 } 196 197 // Keep a pointer to static instruction cost data for the specified CPU. 198 SchedModel = getSchedModelForCPU(CPUString); 199 200 // Initialize scheduling itinerary for the specified CPU. 201 InstrItins = getInstrItineraryForCPU(CPUString); 202 203 // FIXME: this is invalid for WindowsCE 204 if (isTargetWindows()) 205 NoARM = true; 206 207 if (isAAPCS_ABI()) 208 stackAlignment = Align(8); 209 if (isTargetNaCl() || isAAPCS16_ABI()) 210 stackAlignment = Align(16); 211 212 // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo:: 213 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 214 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 215 // support in the assembler and linker to be used. This would need to be 216 // fixed to fully support tail calls in Thumb1. 217 // 218 // For ARMv8-M, we /do/ implement tail calls. Doing this is tricky for v8-M 219 // baseline, since the LDM/POP instruction on Thumb doesn't take LR. This 220 // means if we need to reload LR, it takes extra instructions, which outweighs 221 // the value of the tail call; but here we don't know yet whether LR is going 222 // to be used. We take the optimistic approach of generating the tail call and 223 // perhaps taking a hit if we need to restore the LR. 224 225 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 226 // but we need to make sure there are enough registers; the only valid 227 // registers are the 4 used for parameters. We don't currently do this 228 // case. 229 230 SupportsTailCall = !isThumb() || hasV8MBaselineOps(); 231 232 if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0)) 233 SupportsTailCall = false; 234 235 switch (IT) { 236 case DefaultIT: 237 RestrictIT = hasV8Ops(); 238 break; 239 case RestrictedIT: 240 RestrictIT = true; 241 break; 242 case NoRestrictedIT: 243 RestrictIT = false; 244 break; 245 } 246 247 // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default. 248 const FeatureBitset &Bits = getFeatureBits(); 249 if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters 250 (Options.UnsafeFPMath || isTargetDarwin())) 251 UseNEONForSinglePrecisionFP = true; 252 253 if (isRWPI()) 254 ReserveR9 = true; 255 256 // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2 257 if (MVEVectorCostFactor == 0) 258 MVEVectorCostFactor = 2; 259 260 // FIXME: Teach TableGen to deal with these instead of doing it manually here. 261 switch (ARMProcFamily) { 262 case Others: 263 case CortexA5: 264 break; 265 case CortexA7: 266 LdStMultipleTiming = DoubleIssue; 267 break; 268 case CortexA8: 269 LdStMultipleTiming = DoubleIssue; 270 break; 271 case CortexA9: 272 LdStMultipleTiming = DoubleIssueCheckUnalignedAccess; 273 PreISelOperandLatencyAdjustment = 1; 274 break; 275 case CortexA12: 276 break; 277 case CortexA15: 278 MaxInterleaveFactor = 2; 279 PreISelOperandLatencyAdjustment = 1; 280 PartialUpdateClearance = 12; 281 break; 282 case CortexA17: 283 case CortexA32: 284 case CortexA35: 285 case CortexA53: 286 case CortexA55: 287 case CortexA57: 288 case CortexA72: 289 case CortexA73: 290 case CortexA75: 291 case CortexA76: 292 case CortexR4: 293 case CortexR4F: 294 case CortexR5: 295 case CortexR7: 296 case CortexM3: 297 case CortexR52: 298 break; 299 case Exynos: 300 LdStMultipleTiming = SingleIssuePlusExtras; 301 MaxInterleaveFactor = 4; 302 if (!isThumb()) 303 PrefLoopLogAlignment = 3; 304 break; 305 case Kryo: 306 break; 307 case Krait: 308 PreISelOperandLatencyAdjustment = 1; 309 break; 310 case NeoverseN1: 311 break; 312 case Swift: 313 MaxInterleaveFactor = 2; 314 LdStMultipleTiming = SingleIssuePlusExtras; 315 PreISelOperandLatencyAdjustment = 1; 316 PartialUpdateClearance = 12; 317 break; 318 } 319 } 320 321 bool ARMSubtarget::isTargetHardFloat() const { return TM.isTargetHardFloat(); } 322 323 bool ARMSubtarget::isAPCS_ABI() const { 324 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 325 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_APCS; 326 } 327 bool ARMSubtarget::isAAPCS_ABI() const { 328 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 329 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS || 330 TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; 331 } 332 bool ARMSubtarget::isAAPCS16_ABI() const { 333 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 334 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; 335 } 336 337 bool ARMSubtarget::isROPI() const { 338 return TM.getRelocationModel() == Reloc::ROPI || 339 TM.getRelocationModel() == Reloc::ROPI_RWPI; 340 } 341 bool ARMSubtarget::isRWPI() const { 342 return TM.getRelocationModel() == Reloc::RWPI || 343 TM.getRelocationModel() == Reloc::ROPI_RWPI; 344 } 345 346 bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const { 347 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 348 return true; 349 350 // 32 bit macho has no relocation for a-b if a is undefined, even if b is in 351 // the section that is being relocated. This means we have to use o load even 352 // for GVs that are known to be local to the dso. 353 if (isTargetMachO() && TM.isPositionIndependent() && 354 (GV->isDeclarationForLinker() || GV->hasCommonLinkage())) 355 return true; 356 357 return false; 358 } 359 360 bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const { 361 return isTargetELF() && TM.isPositionIndependent() && 362 !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 363 } 364 365 unsigned ARMSubtarget::getMispredictionPenalty() const { 366 return SchedModel.MispredictPenalty; 367 } 368 369 bool ARMSubtarget::enableMachineScheduler() const { 370 // The MachineScheduler can increase register usage, so we use more high 371 // registers and end up with more T2 instructions that cannot be converted to 372 // T1 instructions. At least until we do better at converting to thumb1 373 // instructions, on cortex-m at Oz where we are size-paranoid, don't use the 374 // Machine scheduler, relying on the DAG register pressure scheduler instead. 375 if (isMClass() && hasMinSize()) 376 return false; 377 // Enable the MachineScheduler before register allocation for subtargets 378 // with the use-misched feature. 379 return useMachineScheduler(); 380 } 381 382 // This overrides the PostRAScheduler bit in the SchedModel for any CPU. 383 bool ARMSubtarget::enablePostRAScheduler() const { 384 if (disablePostRAScheduler()) 385 return false; 386 // Don't reschedule potential IT blocks. 387 return !isThumb1Only(); 388 } 389 390 bool ARMSubtarget::enableAtomicExpand() const { return hasAnyDataBarrier(); } 391 392 bool ARMSubtarget::useStride4VFPs() const { 393 // For general targets, the prologue can grow when VFPs are allocated with 394 // stride 4 (more vpush instructions). But WatchOS uses a compact unwind 395 // format which it's more important to get right. 396 return isTargetWatchABI() || 397 (useWideStrideVFP() && !OptMinSize); 398 } 399 400 bool ARMSubtarget::useMovt() const { 401 // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit 402 // immediates as it is inherently position independent, and may be out of 403 // range otherwise. 404 return !NoMovt && hasV8MBaselineOps() && 405 (isTargetWindows() || !OptMinSize || genExecuteOnly()); 406 } 407 408 bool ARMSubtarget::useFastISel() const { 409 // Enable fast-isel for any target, for testing only. 410 if (ForceFastISel) 411 return true; 412 413 // Limit fast-isel to the targets that are or have been tested. 414 if (!hasV6Ops()) 415 return false; 416 417 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 418 return TM.Options.EnableFastISel && 419 ((isTargetMachO() && !isThumb1Only()) || 420 (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb())); 421 } 422 423 unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction &MF) const { 424 // The GPR register class has multiple possible allocation orders, with 425 // tradeoffs preferred by different sub-architectures and optimisation goals. 426 // The allocation orders are: 427 // 0: (the default tablegen order, not used) 428 // 1: r14, r0-r13 429 // 2: r0-r7 430 // 3: r0-r7, r12, lr, r8-r11 431 // Note that the register allocator will change this order so that 432 // callee-saved registers are used later, as they require extra work in the 433 // prologue/epilogue (though we sometimes override that). 434 435 // For thumb1-only targets, only the low registers are allocatable. 436 if (isThumb1Only()) 437 return 2; 438 439 // Allocate low registers first, so we can select more 16-bit instructions. 440 // We also (in ignoreCSRForAllocationOrder) override the default behaviour 441 // with regards to callee-saved registers, because pushing extra registers is 442 // much cheaper (in terms of code size) than using high registers. After 443 // that, we allocate r12 (doesn't need to be saved), lr (saving it means we 444 // can return with the pop, don't need an extra "bx lr") and then the rest of 445 // the high registers. 446 if (isThumb2() && MF.getFunction().hasMinSize()) 447 return 3; 448 449 // Otherwise, allocate in the default order, using LR first because saving it 450 // allows a shorter epilogue sequence. 451 return 1; 452 } 453 454 bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction &MF, 455 unsigned PhysReg) const { 456 // To minimize code size in Thumb2, we prefer the usage of low regs (lower 457 // cost per use) so we can use narrow encoding. By default, caller-saved 458 // registers (e.g. lr, r12) are always allocated first, regardless of 459 // their cost per use. When optForMinSize, we prefer the low regs even if 460 // they are CSR because usually push/pop can be folded into existing ones. 461 return isThumb2() && MF.getFunction().hasMinSize() && 462 ARM::GPRRegClass.contains(PhysReg); 463 } 464