1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file describes how to lower LLVM code to machine code. This has two 11 /// main components: 12 /// 13 /// 1. Which ValueTypes are natively supported by the target. 14 /// 2. Which operations are supported for supported ValueTypes. 15 /// 3. Cost thresholds for alternative implementations of certain operations. 16 /// 17 /// In addition it has a few other components, like information about FP 18 /// immediates. 19 /// 20 //===----------------------------------------------------------------------===// 21 22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H 23 #define LLVM_CODEGEN_TARGETLOWERING_H 24 25 #include "llvm/ADT/APInt.h" 26 #include "llvm/ADT/ArrayRef.h" 27 #include "llvm/ADT/DenseMap.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/StringRef.h" 30 #include "llvm/CodeGen/DAGCombine.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/LowLevelTypeUtils.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/RuntimeLibcallUtil.h" 35 #include "llvm/CodeGen/SelectionDAG.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/TargetCallingConv.h" 38 #include "llvm/CodeGen/ValueTypes.h" 39 #include "llvm/CodeGenTypes/MachineValueType.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/CallingConv.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/InlineAsm.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/RuntimeLibcalls.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/Support/Alignment.h" 51 #include "llvm/Support/AtomicOrdering.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/Compiler.h" 54 #include "llvm/Support/ErrorHandling.h" 55 #include "llvm/Support/KnownFPClass.h" 56 #include <algorithm> 57 #include <cassert> 58 #include <climits> 59 #include <cstdint> 60 #include <iterator> 61 #include <map> 62 #include <string> 63 #include <utility> 64 #include <vector> 65 66 namespace llvm { 67 68 class AssumptionCache; 69 class CCState; 70 class CCValAssign; 71 enum class ComplexDeinterleavingOperation; 72 enum class ComplexDeinterleavingRotation; 73 class Constant; 74 class FastISel; 75 class FunctionLoweringInfo; 76 class GlobalValue; 77 class Loop; 78 class GISelValueTracking; 79 class IntrinsicInst; 80 class IRBuilderBase; 81 struct KnownBits; 82 class LLVMContext; 83 class MachineBasicBlock; 84 class MachineFunction; 85 class MachineInstr; 86 class MachineJumpTableInfo; 87 class MachineLoop; 88 class MachineRegisterInfo; 89 class MCContext; 90 class MCExpr; 91 class Module; 92 class ProfileSummaryInfo; 93 class TargetLibraryInfo; 94 class TargetMachine; 95 class TargetRegisterClass; 96 class TargetRegisterInfo; 97 class TargetTransformInfo; 98 class Value; 99 class VPIntrinsic; 100 101 namespace Sched { 102 103 enum Preference : uint8_t { 104 None, // No preference 105 Source, // Follow source order. 106 RegPressure, // Scheduling for lowest register pressure. 107 Hybrid, // Scheduling for both latency and register pressure. 108 ILP, // Scheduling for ILP in low register pressure mode. 109 VLIW, // Scheduling for VLIW targets. 110 Fast, // Fast suboptimal list scheduling 111 Linearize, // Linearize DAG, no scheduling 112 Last = Linearize // Marker for the last Sched::Preference 113 }; 114 115 } // end namespace Sched 116 117 // MemOp models a memory operation, either memset or memcpy/memmove. 118 struct MemOp { 119 private: 120 // Shared 121 uint64_t Size; 122 bool DstAlignCanChange; // true if destination alignment can satisfy any 123 // constraint. 124 Align DstAlign; // Specified alignment of the memory operation. 125 126 bool AllowOverlap; 127 // memset only 128 bool IsMemset; // If setthis memory operation is a memset. 129 bool ZeroMemset; // If set clears out memory with zeros. 130 // memcpy only 131 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register 132 // constant so it does not need to be loaded. 133 Align SrcAlign; // Inferred alignment of the source or default value if the 134 // memory operation does not need to load the value. 135 public: 136 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 137 Align SrcAlign, bool IsVolatile, 138 bool MemcpyStrSrc = false) { 139 MemOp Op; 140 Op.Size = Size; 141 Op.DstAlignCanChange = DstAlignCanChange; 142 Op.DstAlign = DstAlign; 143 Op.AllowOverlap = !IsVolatile; 144 Op.IsMemset = false; 145 Op.ZeroMemset = false; 146 Op.MemcpyStrSrc = MemcpyStrSrc; 147 Op.SrcAlign = SrcAlign; 148 return Op; 149 } 150 SetMemOp151 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 152 bool IsZeroMemset, bool IsVolatile) { 153 MemOp Op; 154 Op.Size = Size; 155 Op.DstAlignCanChange = DstAlignCanChange; 156 Op.DstAlign = DstAlign; 157 Op.AllowOverlap = !IsVolatile; 158 Op.IsMemset = true; 159 Op.ZeroMemset = IsZeroMemset; 160 Op.MemcpyStrSrc = false; 161 return Op; 162 } 163 sizeMemOp164 uint64_t size() const { return Size; } getDstAlignMemOp165 Align getDstAlign() const { 166 assert(!DstAlignCanChange); 167 return DstAlign; 168 } isFixedDstAlignMemOp169 bool isFixedDstAlign() const { return !DstAlignCanChange; } allowOverlapMemOp170 bool allowOverlap() const { return AllowOverlap; } isMemsetMemOp171 bool isMemset() const { return IsMemset; } isMemcpyMemOp172 bool isMemcpy() const { return !IsMemset; } isMemcpyWithFixedDstAlignMemOp173 bool isMemcpyWithFixedDstAlign() const { 174 return isMemcpy() && !DstAlignCanChange; 175 } isZeroMemsetMemOp176 bool isZeroMemset() const { return isMemset() && ZeroMemset; } isMemcpyStrSrcMemOp177 bool isMemcpyStrSrc() const { 178 assert(isMemcpy() && "Must be a memcpy"); 179 return MemcpyStrSrc; 180 } getSrcAlignMemOp181 Align getSrcAlign() const { 182 assert(isMemcpy() && "Must be a memcpy"); 183 return SrcAlign; 184 } isSrcAlignedMemOp185 bool isSrcAligned(Align AlignCheck) const { 186 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value()); 187 } isDstAlignedMemOp188 bool isDstAligned(Align AlignCheck) const { 189 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value()); 190 } isAlignedMemOp191 bool isAligned(Align AlignCheck) const { 192 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck); 193 } 194 }; 195 196 /// This base class for TargetLowering contains the SelectionDAG-independent 197 /// parts that can be used from the rest of CodeGen. 198 class LLVM_ABI TargetLoweringBase { 199 public: 200 /// This enum indicates whether operations are valid for a target, and if not, 201 /// what action should be used to make them valid. 202 enum LegalizeAction : uint8_t { 203 Legal, // The target natively supports this operation. 204 Promote, // This operation should be executed in a larger type. 205 Expand, // Try to expand this to other ops, otherwise use a libcall. 206 LibCall, // Don't try to expand this to other ops, always use a libcall. 207 Custom // Use the LowerOperation hook to implement custom lowering. 208 }; 209 210 /// This enum indicates whether a types are legal for a target, and if not, 211 /// what action should be used to make them valid. 212 enum LegalizeTypeAction : uint8_t { 213 TypeLegal, // The target natively supports this type. 214 TypePromoteInteger, // Replace this integer with a larger one. 215 TypeExpandInteger, // Split this integer into two of half the size. 216 TypeSoftenFloat, // Convert this float to a same size integer type. 217 TypeExpandFloat, // Split this float into two of half the size. 218 TypeScalarizeVector, // Replace this one-element vector with its element. 219 TypeSplitVector, // Split this vector into two of half the size. 220 TypeWidenVector, // This vector should be widened into a larger vector. 221 TypePromoteFloat, // Replace this float with a larger one. 222 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic. 223 TypeScalarizeScalableVector, // This action is explicitly left unimplemented. 224 // While it is theoretically possible to 225 // legalize operations on scalable types with a 226 // loop that handles the vscale * #lanes of the 227 // vector, this is non-trivial at SelectionDAG 228 // level and these types are better to be 229 // widened or promoted. 230 }; 231 232 /// LegalizeKind holds the legalization kind that needs to happen to EVT 233 /// in order to type-legalize it. 234 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>; 235 236 /// Enum that describes how the target represents true/false values. 237 enum BooleanContent { 238 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 239 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 240 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 241 }; 242 243 /// Enum that describes what type of support for selects the target has. 244 enum SelectSupportKind { 245 ScalarValSelect, // The target supports scalar selects (ex: cmov). 246 ScalarCondVectorVal, // The target supports selects with a scalar condition 247 // and vector values (ex: cmov). 248 VectorMaskSelect // The target supports vector selects with a vector 249 // mask (ex: x86 blends). 250 }; 251 252 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded 253 /// to, if at all. Exists because different targets have different levels of 254 /// support for these atomic instructions, and also have different options 255 /// w.r.t. what they should expand to. 256 enum class AtomicExpansionKind { 257 None, // Don't expand the instruction. 258 CastToInteger, // Cast the atomic instruction to another type, e.g. from 259 // floating-point to integer type. 260 LLSC, // Expand the instruction into loadlinked/storeconditional; used 261 // by ARM/AArch64/PowerPC. 262 LLOnly, // Expand the (load) instruction into just a load-linked, which has 263 // greater atomic guarantees than a normal load. 264 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. 265 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. 266 BitTestIntrinsic, // Use a target-specific intrinsic for special bit 267 // operations; used by X86. 268 CmpArithIntrinsic, // Use a target-specific intrinsic for special compare 269 // operations; used by X86. 270 Expand, // Generic expansion in terms of other atomic operations. 271 272 // Rewrite to a non-atomic form for use in a known non-preemptible 273 // environment. 274 NotAtomic 275 }; 276 277 /// Enum that specifies when a multiplication should be expanded. 278 enum class MulExpansionKind { 279 Always, // Always expand the instruction. 280 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal 281 // or custom. 282 }; 283 284 /// Enum that specifies when a float negation is beneficial. 285 enum class NegatibleCost { 286 Cheaper = 0, // Negated expression is cheaper. 287 Neutral = 1, // Negated expression has the same cost. 288 Expensive = 2 // Negated expression is more expensive. 289 }; 290 291 /// Enum of different potentially desirable ways to fold (and/or (setcc ...), 292 /// (setcc ...)). 293 enum AndOrSETCCFoldKind : uint8_t { 294 None = 0, // No fold is preferable. 295 AddAnd = 1, // Fold with `Add` op and `And` op is preferable. 296 NotAnd = 2, // Fold with `Not` op and `And` op is preferable. 297 ABS = 4, // Fold with `llvm.abs` op is preferable. 298 }; 299 300 class ArgListEntry { 301 public: 302 Value *Val = nullptr; 303 SDValue Node = SDValue(); 304 Type *Ty = nullptr; 305 bool IsSExt : 1; 306 bool IsZExt : 1; 307 bool IsNoExt : 1; 308 bool IsInReg : 1; 309 bool IsSRet : 1; 310 bool IsNest : 1; 311 bool IsByVal : 1; 312 bool IsByRef : 1; 313 bool IsInAlloca : 1; 314 bool IsPreallocated : 1; 315 bool IsReturned : 1; 316 bool IsSwiftSelf : 1; 317 bool IsSwiftAsync : 1; 318 bool IsSwiftError : 1; 319 bool IsCFGuardTarget : 1; 320 MaybeAlign Alignment = std::nullopt; 321 Type *IndirectType = nullptr; 322 ArgListEntry()323 ArgListEntry() 324 : IsSExt(false), IsZExt(false), IsNoExt(false), IsInReg(false), 325 IsSRet(false), IsNest(false), IsByVal(false), IsByRef(false), 326 IsInAlloca(false), IsPreallocated(false), IsReturned(false), 327 IsSwiftSelf(false), IsSwiftAsync(false), IsSwiftError(false), 328 IsCFGuardTarget(false) {} 329 330 LLVM_ABI void setAttributes(const CallBase *Call, unsigned ArgIdx); 331 }; 332 using ArgListTy = std::vector<ArgListEntry>; 333 getExtendForContent(BooleanContent Content)334 static ISD::NodeType getExtendForContent(BooleanContent Content) { 335 switch (Content) { 336 case UndefinedBooleanContent: 337 // Extend by adding rubbish bits. 338 return ISD::ANY_EXTEND; 339 case ZeroOrOneBooleanContent: 340 // Extend by adding zero bits. 341 return ISD::ZERO_EXTEND; 342 case ZeroOrNegativeOneBooleanContent: 343 // Extend by copying the sign bit. 344 return ISD::SIGN_EXTEND; 345 } 346 llvm_unreachable("Invalid content kind"); 347 } 348 349 explicit TargetLoweringBase(const TargetMachine &TM); 350 TargetLoweringBase(const TargetLoweringBase &) = delete; 351 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete; 352 virtual ~TargetLoweringBase(); 353 354 /// Return true if the target support strict float operation isStrictFPEnabled()355 bool isStrictFPEnabled() const { 356 return IsStrictFPEnabled; 357 } 358 359 protected: 360 /// Initialize all of the actions to default values. 361 void initActions(); 362 363 public: getTargetMachine()364 const TargetMachine &getTargetMachine() const { return TM; } 365 useSoftFloat()366 virtual bool useSoftFloat() const { return false; } 367 368 /// Return the pointer type for the given address space, defaults to 369 /// the pointer type from the data layout. 370 /// FIXME: The default needs to be removed once all the code is updated. 371 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const { 372 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 373 } 374 375 /// Return the in-memory pointer type for the given address space, defaults to 376 /// the pointer type from the data layout. 377 /// FIXME: The default needs to be removed once all the code is updated. 378 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { 379 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 380 } 381 382 /// Return the type for frame index, which is determined by 383 /// the alloca address space specified through the data layout. getFrameIndexTy(const DataLayout & DL)384 MVT getFrameIndexTy(const DataLayout &DL) const { 385 return getPointerTy(DL, DL.getAllocaAddrSpace()); 386 } 387 388 /// Return the type for code pointers, which is determined by the program 389 /// address space specified through the data layout. getProgramPointerTy(const DataLayout & DL)390 MVT getProgramPointerTy(const DataLayout &DL) const { 391 return getPointerTy(DL, DL.getProgramAddressSpace()); 392 } 393 394 /// Return the type for operands of fence. 395 /// TODO: Let fence operands be of i32 type and remove this. getFenceOperandTy(const DataLayout & DL)396 virtual MVT getFenceOperandTy(const DataLayout &DL) const { 397 return getPointerTy(DL); 398 } 399 400 /// Return the type to use for a scalar shift opcode, given the shifted amount 401 /// type. Targets should return a legal type if the input type is legal. 402 /// Targets can return a type that is too small if the input type is illegal. 403 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const; 404 405 /// Returns the type for the shift amount of a shift opcode. For vectors, 406 /// returns the input type. For scalars, calls getScalarShiftAmountTy. 407 /// If getScalarShiftAmountTy type cannot represent all possible shift 408 /// amounts, returns MVT::i32. 409 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const; 410 411 /// Return the preferred type to use for a shift opcode, given the shifted 412 /// amount type is \p ShiftValueTy. 413 LLVM_READONLY getPreferredShiftAmountTy(LLT ShiftValueTy)414 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const { 415 return ShiftValueTy; 416 } 417 418 /// Returns the type to be used for the index operand vector operations. By 419 /// default we assume it will have the same size as an address space 0 420 /// pointer. getVectorIdxWidth(const DataLayout & DL)421 virtual unsigned getVectorIdxWidth(const DataLayout &DL) const { 422 return DL.getPointerSizeInBits(0); 423 } 424 425 /// Returns the type to be used for the index operand of: 426 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, 427 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR getVectorIdxTy(const DataLayout & DL)428 MVT getVectorIdxTy(const DataLayout &DL) const { 429 return MVT::getIntegerVT(getVectorIdxWidth(DL)); 430 } 431 432 /// Returns the type to be used for the index operand of: 433 /// G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT, 434 /// G_INSERT_SUBVECTOR, and G_EXTRACT_SUBVECTOR getVectorIdxLLT(const DataLayout & DL)435 LLT getVectorIdxLLT(const DataLayout &DL) const { 436 return LLT::scalar(getVectorIdxWidth(DL)); 437 } 438 439 /// Returns the type to be used for the EVL/AVL operand of VP nodes: 440 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type, 441 /// and must be at least as large as i32. The EVL is implicitly zero-extended 442 /// to any larger type. getVPExplicitVectorLengthTy()443 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; } 444 445 /// This callback is used to inspect load/store instructions and add 446 /// target-specific MachineMemOperand flags to them. The default 447 /// implementation does nothing. getTargetMMOFlags(const Instruction & I)448 virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const { 449 return MachineMemOperand::MONone; 450 } 451 452 /// This callback is used to inspect load/store SDNode. 453 /// The default implementation does nothing. 454 virtual MachineMemOperand::Flags getTargetMMOFlags(const MemSDNode & Node)455 getTargetMMOFlags(const MemSDNode &Node) const { 456 return MachineMemOperand::MONone; 457 } 458 459 MachineMemOperand::Flags 460 getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, 461 AssumptionCache *AC = nullptr, 462 const TargetLibraryInfo *LibInfo = nullptr) const; 463 MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, 464 const DataLayout &DL) const; 465 MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, 466 const DataLayout &DL) const; 467 isSelectSupported(SelectSupportKind)468 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const { 469 return true; 470 } 471 472 /// Return true if the @llvm.experimental.vector.partial.reduce.* intrinsic 473 /// should be expanded using generic code in SelectionDAGBuilder. 474 virtual bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst * I)475 shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const { 476 return true; 477 } 478 479 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded 480 /// using generic code in SelectionDAGBuilder. shouldExpandGetActiveLaneMask(EVT VT,EVT OpVT)481 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { 482 return true; 483 } 484 shouldExpandGetVectorLength(EVT CountVT,unsigned VF,bool IsScalable)485 virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, 486 bool IsScalable) const { 487 return true; 488 } 489 490 /// Return true if the @llvm.experimental.cttz.elts intrinsic should be 491 /// expanded using generic code in SelectionDAGBuilder. shouldExpandCttzElements(EVT VT)492 virtual bool shouldExpandCttzElements(EVT VT) const { return true; } 493 494 /// Return the minimum number of bits required to hold the maximum possible 495 /// number of trailing zero vector elements. 496 unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, 497 bool ZeroIsPoison, 498 const ConstantRange *VScaleRange) const; 499 500 /// Return true if the @llvm.experimental.vector.match intrinsic should be 501 /// expanded for vector type `VT' and search size `SearchSize' using generic 502 /// code in SelectionDAGBuilder. shouldExpandVectorMatch(EVT VT,unsigned SearchSize)503 virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const { 504 return true; 505 } 506 507 // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to 508 // vecreduce(op(x, y)) for the reduction opcode RedOpc. shouldReassociateReduction(unsigned RedOpc,EVT VT)509 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const { 510 return true; 511 } 512 513 /// Return true if it is profitable to convert a select of FP constants into 514 /// a constant pool load whose address depends on the select condition. The 515 /// parameter may be used to differentiate a select with FP compare from 516 /// integer compare. reduceSelectOfFPConstantLoads(EVT CmpOpVT)517 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const { 518 return true; 519 } 520 521 /// Return true if multiple condition registers are available. hasMultipleConditionRegisters()522 bool hasMultipleConditionRegisters() const { 523 return HasMultipleConditionRegisters; 524 } 525 526 /// Return true if the target has BitExtract instructions. hasExtractBitsInsn()527 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; } 528 529 /// Return the preferred vector type legalization action. 530 virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)531 getPreferredVectorAction(MVT VT) const { 532 // The default action for one element vectors is to scalarize 533 if (VT.getVectorElementCount().isScalar()) 534 return TypeScalarizeVector; 535 // The default action for an odd-width vector is to widen. 536 if (!VT.isPow2VectorType()) 537 return TypeWidenVector; 538 // The default action for other vectors is to promote 539 return TypePromoteInteger; 540 } 541 542 // Return true if the half type should be promoted using soft promotion rules 543 // where each operation is promoted to f32 individually, then converted to 544 // fp16. The default behavior is to promote chains of operations, keeping 545 // intermediate results in f32 precision and range. softPromoteHalfType()546 virtual bool softPromoteHalfType() const { return false; } 547 548 // Return true if, for soft-promoted half, the half type should be passed 549 // passed to and returned from functions as f32. The default behavior is to 550 // pass as i16. If soft-promoted half is not used, this function is ignored 551 // and values are always passed and returned as f32. useFPRegsForHalfType()552 virtual bool useFPRegsForHalfType() const { return false; } 553 554 // There are two general methods for expanding a BUILD_VECTOR node: 555 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle 556 // them together. 557 // 2. Build the vector on the stack and then load it. 558 // If this function returns true, then method (1) will be used, subject to 559 // the constraint that all of the necessary shuffles are legal (as determined 560 // by isShuffleMaskLegal). If this function returns false, then method (2) is 561 // always used. The vector type, and the number of defined values, are 562 // provided. 563 virtual bool shouldExpandBuildVectorWithShuffles(EVT,unsigned DefinedValues)564 shouldExpandBuildVectorWithShuffles(EVT /* VT */, 565 unsigned DefinedValues) const { 566 return DefinedValues < 3; 567 } 568 569 /// Return true if integer divide is usually cheaper than a sequence of 570 /// several shifts, adds, and multiplies for this target. 571 /// The definition of "cheaper" may depend on whether we're optimizing 572 /// for speed or for size. isIntDivCheap(EVT VT,AttributeList Attr)573 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; } 574 575 /// Return true if the target can handle a standalone remainder operation. hasStandaloneRem(EVT VT)576 virtual bool hasStandaloneRem(EVT VT) const { 577 return true; 578 } 579 580 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X). isFsqrtCheap(SDValue X,SelectionDAG & DAG)581 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const { 582 // Default behavior is to replace SQRT(X) with X*RSQRT(X). 583 return false; 584 } 585 586 /// Reciprocal estimate status values used by the functions below. 587 enum ReciprocalEstimate : int { 588 Unspecified = -1, 589 Disabled = 0, 590 Enabled = 1 591 }; 592 593 /// Return a ReciprocalEstimate enum value for a square root of the given type 594 /// based on the function's attributes. If the operation is not overridden by 595 /// the function's attributes, "Unspecified" is returned and target defaults 596 /// are expected to be used for instruction selection. 597 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const; 598 599 /// Return a ReciprocalEstimate enum value for a division of the given type 600 /// based on the function's attributes. If the operation is not overridden by 601 /// the function's attributes, "Unspecified" is returned and target defaults 602 /// are expected to be used for instruction selection. 603 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const; 604 605 /// Return the refinement step count for a square root of the given type based 606 /// on the function's attributes. If the operation is not overridden by 607 /// the function's attributes, "Unspecified" is returned and target defaults 608 /// are expected to be used for instruction selection. 609 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const; 610 611 /// Return the refinement step count for a division of the given type based 612 /// on the function's attributes. If the operation is not overridden by 613 /// the function's attributes, "Unspecified" is returned and target defaults 614 /// are expected to be used for instruction selection. 615 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const; 616 617 /// Returns true if target has indicated at least one type should be bypassed. isSlowDivBypassed()618 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 619 620 /// Returns map of slow types for division or remainder with corresponding 621 /// fast types getBypassSlowDivWidths()622 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 623 return BypassSlowDivWidths; 624 } 625 626 /// Return true only if vscale must be a power of two. isVScaleKnownToBeAPowerOfTwo()627 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; } 628 629 /// Return true if Flow Control is an expensive operation that should be 630 /// avoided. isJumpExpensive()631 bool isJumpExpensive() const { return JumpIsExpensive; } 632 633 // Costs parameters used by 634 // SelectionDAGBuilder::shouldKeepJumpConditionsTogether. 635 // shouldKeepJumpConditionsTogether will use these parameter value to 636 // determine if two conditions in the form `br (and/or cond1, cond2)` should 637 // be split into two branches or left as one. 638 // 639 // BaseCost is the cost threshold (in latency). If the estimated latency of 640 // computing both `cond1` and `cond2` is below the cost of just computing 641 // `cond1` + BaseCost, the two conditions will be kept together. Otherwise 642 // they will be split. 643 // 644 // LikelyBias increases BaseCost if branch probability info indicates that it 645 // is likely that both `cond1` and `cond2` will be computed. 646 // 647 // UnlikelyBias decreases BaseCost if branch probability info indicates that 648 // it is likely that both `cond1` and `cond2` will be computed. 649 // 650 // Set any field to -1 to make it ignored (setting BaseCost to -1 results in 651 // `shouldKeepJumpConditionsTogether` always returning false). 652 struct CondMergingParams { 653 int BaseCost; 654 int LikelyBias; 655 int UnlikelyBias; 656 }; 657 // Return params for deciding if we should keep two branch conditions merged 658 // or split them into two separate branches. 659 // Arg0: The binary op joining the two conditions (and/or). 660 // Arg1: The first condition (cond1) 661 // Arg2: The second condition (cond2) 662 virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps,const Value *,const Value *)663 getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, 664 const Value *) const { 665 // -1 will always result in splitting. 666 return {-1, -1, -1}; 667 } 668 669 /// Return true if selects are only cheaper than branches if the branch is 670 /// unlikely to be predicted right. isPredictableSelectExpensive()671 bool isPredictableSelectExpensive() const { 672 return PredictableSelectIsExpensive; 673 } 674 fallBackToDAGISel(const Instruction & Inst)675 virtual bool fallBackToDAGISel(const Instruction &Inst) const { 676 return false; 677 } 678 679 /// Return true if the following transform is beneficial: 680 /// fold (conv (load x)) -> (load (conv*)x) 681 /// On architectures that don't natively support some vector loads 682 /// efficiently, casting the load to a smaller vector of larger types and 683 /// loading is more efficient, however, this can be undone by optimizations in 684 /// dag combiner. 685 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, 686 const SelectionDAG &DAG, 687 const MachineMemOperand &MMO) const; 688 689 /// Return true if the following transform is beneficial: 690 /// (store (y (conv x)), y*)) -> (store x, (x*)) isStoreBitCastBeneficial(EVT StoreVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO)691 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, 692 const SelectionDAG &DAG, 693 const MachineMemOperand &MMO) const { 694 // Default to the same logic as loads. 695 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO); 696 } 697 698 /// Return true if it is expected to be cheaper to do a store of vector 699 /// constant with the given size and type for the address space than to 700 /// store the individual scalar element constants. storeOfVectorConstantIsCheap(bool IsZero,EVT MemVT,unsigned NumElem,unsigned AddrSpace)701 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, 702 unsigned NumElem, 703 unsigned AddrSpace) const { 704 return IsZero; 705 } 706 707 /// Allow store merging for the specified type after legalization in addition 708 /// to before legalization. This may transform stores that do not exist 709 /// earlier (for example, stores created from intrinsics). mergeStoresAfterLegalization(EVT MemVT)710 virtual bool mergeStoresAfterLegalization(EVT MemVT) const { 711 return true; 712 } 713 714 /// Returns if it's reasonable to merge stores to MemVT size. canMergeStoresTo(unsigned AS,EVT MemVT,const MachineFunction & MF)715 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, 716 const MachineFunction &MF) const { 717 return true; 718 } 719 720 /// Return true if it is cheap to speculate a call to intrinsic cttz. isCheapToSpeculateCttz(Type * Ty)721 virtual bool isCheapToSpeculateCttz(Type *Ty) const { 722 return false; 723 } 724 725 /// Return true if it is cheap to speculate a call to intrinsic ctlz. isCheapToSpeculateCtlz(Type * Ty)726 virtual bool isCheapToSpeculateCtlz(Type *Ty) const { 727 return false; 728 } 729 730 /// Return true if ctlz instruction is fast. isCtlzFast()731 virtual bool isCtlzFast() const { 732 return false; 733 } 734 735 /// Return true if ctpop instruction is fast. isCtpopFast(EVT VT)736 virtual bool isCtpopFast(EVT VT) const { 737 return isOperationLegal(ISD::CTPOP, VT); 738 } 739 740 /// Return the maximum number of "x & (x - 1)" operations that can be done 741 /// instead of deferring to a custom CTPOP. getCustomCtpopCost(EVT VT,ISD::CondCode Cond)742 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const { 743 return 1; 744 } 745 746 /// Return true if instruction generated for equality comparison is folded 747 /// with instruction generated for signed comparison. isEqualityCmpFoldedWithSignedCmp()748 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; } 749 750 /// Return true if the heuristic to prefer icmp eq zero should be used in code 751 /// gen prepare. preferZeroCompareBranch()752 virtual bool preferZeroCompareBranch() const { return false; } 753 754 /// Return true if it is cheaper to split the store of a merged int val 755 /// from a pair of smaller values into multiple stores. isMultiStoresCheaperThanBitsMerge(EVT LTy,EVT HTy)756 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const { 757 return false; 758 } 759 760 /// Return if the target supports combining a 761 /// chain like: 762 /// \code 763 /// %andResult = and %val1, #mask 764 /// %icmpResult = icmp %andResult, 0 765 /// \endcode 766 /// into a single machine instruction of a form like: 767 /// \code 768 /// cc = test %register, #mask 769 /// \endcode isMaskAndCmp0FoldingBeneficial(const Instruction & AndI)770 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 771 return false; 772 } 773 774 /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes. 775 virtual bool areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode & NodeX,const MemSDNode & NodeY)776 areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, 777 const MemSDNode &NodeY) const { 778 return true; 779 } 780 781 /// Use bitwise logic to make pairs of compares more efficient. For example: 782 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0 783 /// This should be true when it takes more than one instruction to lower 784 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on 785 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win. convertSetCCLogicToBitwiseLogic(EVT VT)786 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const { 787 return false; 788 } 789 790 /// Return the preferred operand type if the target has a quick way to compare 791 /// integer values of the given size. Assume that any legal integer type can 792 /// be compared efficiently. Targets may override this to allow illegal wide 793 /// types to return a vector type if there is support to compare that type. hasFastEqualityCompare(unsigned NumBits)794 virtual MVT hasFastEqualityCompare(unsigned NumBits) const { 795 MVT VT = MVT::getIntegerVT(NumBits); 796 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE; 797 } 798 799 /// Return true if the target should transform: 800 /// (X & Y) == Y ---> (~X & Y) == 0 801 /// (X & Y) != Y ---> (~X & Y) != 0 802 /// 803 /// This may be profitable if the target has a bitwise and-not operation that 804 /// sets comparison flags. A target may want to limit the transformation based 805 /// on the type of Y or if Y is a constant. 806 /// 807 /// Note that the transform will not occur if Y is known to be a power-of-2 808 /// because a mask and compare of a single bit can be handled by inverting the 809 /// predicate, for example: 810 /// (X & 8) == 8 ---> (X & 8) != 0 hasAndNotCompare(SDValue Y)811 virtual bool hasAndNotCompare(SDValue Y) const { 812 return false; 813 } 814 815 /// Return true if the target has a bitwise and-not operation: 816 /// X = ~A & B 817 /// This can be used to simplify select or other instructions. hasAndNot(SDValue X)818 virtual bool hasAndNot(SDValue X) const { 819 // If the target has the more complex version of this operation, assume that 820 // it has this operation too. 821 return hasAndNotCompare(X); 822 } 823 824 /// Return true if the target has a bit-test instruction: 825 /// (X & (1 << Y)) ==/!= 0 826 /// This knowledge can be used to prevent breaking the pattern, 827 /// or creating it if it could be recognized. hasBitTest(SDValue X,SDValue Y)828 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; } 829 830 /// There are two ways to clear extreme bits (either low or high): 831 /// Mask: x & (-1 << y) (the instcombine canonical form) 832 /// Shifts: x >> y << y 833 /// Return true if the variant with 2 variable shifts is preferred. 834 /// Return false if there is no preference. shouldFoldMaskToVariableShiftPair(SDValue X)835 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const { 836 // By default, let's assume that no one prefers shifts. 837 return false; 838 } 839 840 /// Return true if it is profitable to fold a pair of shifts into a mask. 841 /// This is usually true on most targets. But some targets, like Thumb1, 842 /// have immediate shift instructions, but no immediate "and" instruction; 843 /// this makes the fold unprofitable. shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level)844 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, 845 CombineLevel Level) const { 846 return true; 847 } 848 849 /// Should we tranform the IR-optimal check for whether given truncation 850 /// down into KeptBits would be truncating or not: 851 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 852 /// Into it's more traditional form: 853 /// ((%x << C) a>> C) dstcond %x 854 /// Return true if we should transform. 855 /// Return false if there is no preference. shouldTransformSignedTruncationCheck(EVT XVT,unsigned KeptBits)856 virtual bool shouldTransformSignedTruncationCheck(EVT XVT, 857 unsigned KeptBits) const { 858 // By default, let's assume that no one prefers shifts. 859 return false; 860 } 861 862 /// Given the pattern 863 /// (X & (C l>>/<< Y)) ==/!= 0 864 /// return true if it should be transformed into: 865 /// ((X <</l>> Y) & C) ==/!= 0 866 /// WARNING: if 'X' is a constant, the fold may deadlock! 867 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat() 868 /// here because it can end up being not linked in. shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X,ConstantSDNode * XC,ConstantSDNode * CC,SDValue Y,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG & DAG)869 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 870 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, 871 unsigned OldShiftOpcode, unsigned NewShiftOpcode, 872 SelectionDAG &DAG) const { 873 if (hasBitTest(X, Y)) { 874 // One interesting pattern that we'd want to form is 'bit test': 875 // ((1 << Y) & C) ==/!= 0 876 // But we also need to be careful not to try to reverse that fold. 877 878 // Is this '1 << Y' ? 879 if (OldShiftOpcode == ISD::SHL && CC->isOne()) 880 return false; // Keep the 'bit test' pattern. 881 882 // Will it be '1 << Y' after the transform ? 883 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne()) 884 return true; // Do form the 'bit test' pattern. 885 } 886 887 // If 'X' is a constant, and we transform, then we will immediately 888 // try to undo the fold, thus causing endless combine loop. 889 // So by default, let's assume everyone prefers the fold 890 // iff 'X' is not a constant. 891 return !XC; 892 } 893 894 // Return true if its desirable to perform the following transform: 895 // (fmul C, (uitofp Pow2)) 896 // -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa)) 897 // (fdiv C, (uitofp Pow2)) 898 // -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa)) 899 // 900 // This is only queried after we have verified the transform will be bitwise 901 // equals. 902 // 903 // SDNode *N : The FDiv/FMul node we want to transform. 904 // SDValue FPConst: The Float constant operand in `N`. 905 // SDValue IntPow2: The Integer power of 2 operand in `N`. optimizeFMulOrFDivAsShiftAddBitcast(SDNode * N,SDValue FPConst,SDValue IntPow2)906 virtual bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst, 907 SDValue IntPow2) const { 908 // Default to avoiding fdiv which is often very expensive. 909 return N->getOpcode() == ISD::FDIV; 910 } 911 912 // Given: 913 // (icmp eq/ne (and X, C0), (shift X, C1)) 914 // or 915 // (icmp eq/ne X, (rotate X, CPow2)) 916 917 // If C0 is a mask or shifted mask and the shift amt (C1) isolates the 918 // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`) 919 // Do we prefer the shift to be shift-right, shift-left, or rotate. 920 // Note: Its only valid to convert the rotate version to the shift version iff 921 // the shift-amt (`C1`) is a power of 2 (including 0). 922 // If ShiftOpc (current Opcode) is returned, do nothing. preferedOpcodeForCmpEqPiecesOfOperand(EVT VT,unsigned ShiftOpc,bool MayTransformRotate,const APInt & ShiftOrRotateAmt,const std::optional<APInt> & AndMask)923 virtual unsigned preferedOpcodeForCmpEqPiecesOfOperand( 924 EVT VT, unsigned ShiftOpc, bool MayTransformRotate, 925 const APInt &ShiftOrRotateAmt, 926 const std::optional<APInt> &AndMask) const { 927 return ShiftOpc; 928 } 929 930 /// These two forms are equivalent: 931 /// sub %y, (xor %x, -1) 932 /// add (add %x, 1), %y 933 /// The variant with two add's is IR-canonical. 934 /// Some targets may prefer one to the other. preferIncOfAddToSubOfNot(EVT VT)935 virtual bool preferIncOfAddToSubOfNot(EVT VT) const { 936 // By default, let's assume that everyone prefers the form with two add's. 937 return true; 938 } 939 940 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets 941 // may want to avoid this to prevent loss of sub_nsw pattern. preferABDSToABSWithNSW(EVT VT)942 virtual bool preferABDSToABSWithNSW(EVT VT) const { 943 return true; 944 } 945 946 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X)) preferScalarizeSplat(SDNode * N)947 virtual bool preferScalarizeSplat(SDNode *N) const { return true; } 948 949 // Return true if the target wants to transform: 950 // (TruncVT truncate(sext_in_reg(VT X, ExtVT)) 951 // -> (TruncVT sext_in_reg(truncate(VT X), ExtVT)) 952 // Some targets might prefer pre-sextinreg to improve truncation/saturation. preferSextInRegOfTruncate(EVT TruncVT,EVT VT,EVT ExtVT)953 virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const { 954 return true; 955 } 956 957 /// Return true if the target wants to use the optimization that 958 /// turns ext(promotableInst1(...(promotableInstN(load)))) into 959 /// promotedInst1(...(promotedInstN(ext(load)))). enableExtLdPromotion()960 bool enableExtLdPromotion() const { return EnableExtLdPromotion; } 961 962 /// Return true if the target can combine store(extractelement VectorTy, 963 /// Idx). 964 /// \p Cost[out] gives the cost of that transformation when this is true. canCombineStoreAndExtract(Type * VectorTy,Value * Idx,unsigned & Cost)965 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 966 unsigned &Cost) const { 967 return false; 968 } 969 970 /// Return true if the target shall perform extract vector element and store 971 /// given that the vector is known to be splat of constant. 972 /// \p Index[out] gives the index of the vector element to be extracted when 973 /// this is true. shallExtractConstSplatVectorElementToStore(Type * VectorTy,unsigned ElemSizeInBits,unsigned & Index)974 virtual bool shallExtractConstSplatVectorElementToStore( 975 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const { 976 return false; 977 } 978 979 /// Return true if inserting a scalar into a variable element of an undef 980 /// vector is more efficiently handled by splatting the scalar instead. shouldSplatInsEltVarIndex(EVT)981 virtual bool shouldSplatInsEltVarIndex(EVT) const { 982 return false; 983 } 984 985 /// Return true if target always benefits from combining into FMA for a 986 /// given value type. This must typically return false on targets where FMA 987 /// takes more cycles to execute than FADD. enableAggressiveFMAFusion(EVT VT)988 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; } 989 990 /// Return true if target always benefits from combining into FMA for a 991 /// given value type. This must typically return false on targets where FMA 992 /// takes more cycles to execute than FADD. enableAggressiveFMAFusion(LLT Ty)993 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; } 994 995 /// Return the ValueType of the result of SETCC operations. 996 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 997 EVT VT) const; 998 999 /// Return the ValueType for comparison libcalls. Comparison libcalls include 1000 /// floating point comparison calls, and Ordered/Unordered check calls on 1001 /// floating point numbers. 1002 virtual 1003 MVT::SimpleValueType getCmpLibcallReturnType() const; 1004 1005 /// For targets without i1 registers, this gives the nature of the high-bits 1006 /// of boolean values held in types wider than i1. 1007 /// 1008 /// "Boolean values" are special true/false values produced by nodes like 1009 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 1010 /// Not to be confused with general values promoted from i1. Some cpus 1011 /// distinguish between vectors of boolean and scalars; the isVec parameter 1012 /// selects between the two kinds. For example on X86 a scalar boolean should 1013 /// be zero extended from i1, while the elements of a vector of booleans 1014 /// should be sign extended from i1. 1015 /// 1016 /// Some cpus also treat floating point types the same way as they treat 1017 /// vectors instead of the way they treat scalars. getBooleanContents(bool isVec,bool isFloat)1018 BooleanContent getBooleanContents(bool isVec, bool isFloat) const { 1019 if (isVec) 1020 return BooleanVectorContents; 1021 return isFloat ? BooleanFloatContents : BooleanContents; 1022 } 1023 getBooleanContents(EVT Type)1024 BooleanContent getBooleanContents(EVT Type) const { 1025 return getBooleanContents(Type.isVector(), Type.isFloatingPoint()); 1026 } 1027 1028 /// Promote the given target boolean to a target boolean of the given type. 1029 /// A target boolean is an integer value, not necessarily of type i1, the bits 1030 /// of which conform to getBooleanContents. 1031 /// 1032 /// ValVT is the type of values that produced the boolean. promoteTargetBoolean(SelectionDAG & DAG,SDValue Bool,EVT ValVT)1033 SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, 1034 EVT ValVT) const { 1035 SDLoc dl(Bool); 1036 EVT BoolVT = 1037 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT); 1038 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT)); 1039 return DAG.getNode(ExtendCode, dl, BoolVT, Bool); 1040 } 1041 1042 /// Return target scheduling preference. getSchedulingPreference()1043 Sched::Preference getSchedulingPreference() const { 1044 return SchedPreferenceInfo; 1045 } 1046 1047 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics 1048 /// for different nodes. This function returns the preference (or none) for 1049 /// the given node. getSchedulingPreference(SDNode *)1050 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 1051 return Sched::None; 1052 } 1053 1054 /// Return the register class that should be used for the specified value 1055 /// type. 1056 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const { 1057 (void)isDivergent; 1058 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 1059 assert(RC && "This value type is not natively supported!"); 1060 return RC; 1061 } 1062 1063 /// Allows target to decide about the register class of the 1064 /// specific value that is live outside the defining block. 1065 /// Returns true if the value needs uniform register class. requiresUniformRegister(MachineFunction & MF,const Value *)1066 virtual bool requiresUniformRegister(MachineFunction &MF, 1067 const Value *) const { 1068 return false; 1069 } 1070 1071 /// Return the 'representative' register class for the specified value 1072 /// type. 1073 /// 1074 /// The 'representative' register class is the largest legal super-reg 1075 /// register class for the register class of the value type. For example, on 1076 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep 1077 /// register class is GR64 on x86_64. getRepRegClassFor(MVT VT)1078 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { 1079 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; 1080 return RC; 1081 } 1082 1083 /// Return the cost of the 'representative' register class for the specified 1084 /// value type. getRepRegClassCostFor(MVT VT)1085 virtual uint8_t getRepRegClassCostFor(MVT VT) const { 1086 return RepRegClassCostForVT[VT.SimpleTy]; 1087 } 1088 1089 /// Return the preferred strategy to legalize tihs SHIFT instruction, with 1090 /// \p ExpansionFactor being the recursion depth - how many expansion needed. 1091 enum class ShiftLegalizationStrategy { 1092 ExpandToParts, 1093 ExpandThroughStack, 1094 LowerToLibcall 1095 }; 1096 virtual ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG & DAG,SDNode * N,unsigned ExpansionFactor)1097 preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, 1098 unsigned ExpansionFactor) const { 1099 if (ExpansionFactor == 1) 1100 return ShiftLegalizationStrategy::ExpandToParts; 1101 return ShiftLegalizationStrategy::ExpandThroughStack; 1102 } 1103 1104 /// Return true if the target has native support for the specified value type. 1105 /// This means that it has a register that directly holds it without 1106 /// promotions or expansions. isTypeLegal(EVT VT)1107 bool isTypeLegal(EVT VT) const { 1108 assert(!VT.isSimple() || 1109 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT)); 1110 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr; 1111 } 1112 1113 class ValueTypeActionImpl { 1114 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 1115 /// that indicates how instruction selection should deal with the type. 1116 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE]; 1117 1118 public: ValueTypeActionImpl()1119 ValueTypeActionImpl() { llvm::fill(ValueTypeActions, TypeLegal); } 1120 getTypeAction(MVT VT)1121 LegalizeTypeAction getTypeAction(MVT VT) const { 1122 return ValueTypeActions[VT.SimpleTy]; 1123 } 1124 setTypeAction(MVT VT,LegalizeTypeAction Action)1125 void setTypeAction(MVT VT, LegalizeTypeAction Action) { 1126 ValueTypeActions[VT.SimpleTy] = Action; 1127 } 1128 }; 1129 getValueTypeActions()1130 const ValueTypeActionImpl &getValueTypeActions() const { 1131 return ValueTypeActions; 1132 } 1133 1134 /// Return pair that represents the legalization kind (first) that needs to 1135 /// happen to EVT (second) in order to type-legalize it. 1136 /// 1137 /// First: how we should legalize values of this type, either it is already 1138 /// legal (return 'Legal') or we need to promote it to a larger type (return 1139 /// 'Promote'), or we need to expand it into multiple registers of smaller 1140 /// integer type (return 'Expand'). 'Custom' is not an option. 1141 /// 1142 /// Second: for types supported by the target, this is an identity function. 1143 /// For types that must be promoted to larger types, this returns the larger 1144 /// type to promote to. For integer types that are larger than the largest 1145 /// integer register, this contains one step in the expansion to get to the 1146 /// smaller register. For illegal floating point types, this returns the 1147 /// integer type to transform to. 1148 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const; 1149 1150 /// Return how we should legalize values of this type, either it is already 1151 /// legal (return 'Legal') or we need to promote it to a larger type (return 1152 /// 'Promote'), or we need to expand it into multiple registers of smaller 1153 /// integer type (return 'Expand'). 'Custom' is not an option. getTypeAction(LLVMContext & Context,EVT VT)1154 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 1155 return getTypeConversion(Context, VT).first; 1156 } getTypeAction(MVT VT)1157 LegalizeTypeAction getTypeAction(MVT VT) const { 1158 return ValueTypeActions.getTypeAction(VT); 1159 } 1160 1161 /// For types supported by the target, this is an identity function. For 1162 /// types that must be promoted to larger types, this returns the larger type 1163 /// to promote to. For integer types that are larger than the largest integer 1164 /// register, this contains one step in the expansion to get to the smaller 1165 /// register. For illegal floating point types, this returns the integer type 1166 /// to transform to. getTypeToTransformTo(LLVMContext & Context,EVT VT)1167 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 1168 return getTypeConversion(Context, VT).second; 1169 } 1170 1171 /// For types supported by the target, this is an identity function. For 1172 /// types that must be expanded (i.e. integer types that are larger than the 1173 /// largest integer register or illegal floating point types), this returns 1174 /// the largest legal type it will be expanded to. getTypeToExpandTo(LLVMContext & Context,EVT VT)1175 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 1176 assert(!VT.isVector()); 1177 while (true) { 1178 switch (getTypeAction(Context, VT)) { 1179 case TypeLegal: 1180 return VT; 1181 case TypeExpandInteger: 1182 VT = getTypeToTransformTo(Context, VT); 1183 break; 1184 default: 1185 llvm_unreachable("Type is not legal nor is it to be expanded!"); 1186 } 1187 } 1188 } 1189 1190 /// Vector types are broken down into some number of legal first class types. 1191 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8 1192 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64 1193 /// turns into 4 EVT::i32 values with both PPC and X86. 1194 /// 1195 /// This method returns the number of registers needed, and the VT for each 1196 /// register. It also returns the VT and quantity of the intermediate values 1197 /// before they are promoted/expanded. 1198 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 1199 EVT &IntermediateVT, 1200 unsigned &NumIntermediates, 1201 MVT &RegisterVT) const; 1202 1203 /// Certain targets such as MIPS require that some types such as vectors are 1204 /// always broken down into scalars in some contexts. This occurs even if the 1205 /// vector type is legal. getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT)1206 virtual unsigned getVectorTypeBreakdownForCallingConv( 1207 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, 1208 unsigned &NumIntermediates, MVT &RegisterVT) const { 1209 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates, 1210 RegisterVT); 1211 } 1212 1213 struct IntrinsicInfo { 1214 unsigned opc = 0; // target opcode 1215 EVT memVT; // memory VT 1216 1217 // value representing memory location 1218 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal; 1219 1220 // Fallback address space for use if ptrVal is nullptr. std::nullopt means 1221 // unknown address space. 1222 std::optional<unsigned> fallbackAddressSpace; 1223 1224 int offset = 0; // offset off of ptrVal 1225 uint64_t size = 0; // the size of the memory location 1226 // (taken from memVT if zero) 1227 MaybeAlign align = Align(1); // alignment 1228 1229 MachineMemOperand::Flags flags = MachineMemOperand::MONone; 1230 SyncScope::ID ssid = SyncScope::System; 1231 AtomicOrdering order = AtomicOrdering::NotAtomic; 1232 AtomicOrdering failureOrder = AtomicOrdering::NotAtomic; 1233 IntrinsicInfo() = default; 1234 }; 1235 1236 /// Given an intrinsic, checks if on the target the intrinsic will need to map 1237 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 1238 /// true and store the intrinsic information into the IntrinsicInfo that was 1239 /// passed to the function. getTgtMemIntrinsic(IntrinsicInfo &,const CallInst &,MachineFunction &,unsigned)1240 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 1241 MachineFunction &, 1242 unsigned /*Intrinsic*/) const { 1243 return false; 1244 } 1245 1246 /// Returns true if the target can instruction select the specified FP 1247 /// immediate natively. If false, the legalizer will materialize the FP 1248 /// immediate as a load from a constant pool. 1249 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/, 1250 bool ForCodeSize = false) const { 1251 return false; 1252 } 1253 1254 /// Targets can use this to indicate that they only support *some* 1255 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a 1256 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be 1257 /// legal. isShuffleMaskLegal(ArrayRef<int>,EVT)1258 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const { 1259 return true; 1260 } 1261 1262 /// Returns true if the operation can trap for the value type. 1263 /// 1264 /// VT must be a legal type. By default, we optimistically assume most 1265 /// operations don't trap except for integer divide and remainder. 1266 virtual bool canOpTrap(unsigned Op, EVT VT) const; 1267 1268 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there 1269 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a 1270 /// constant pool entry. isVectorClearMaskLegal(ArrayRef<int>,EVT)1271 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/, 1272 EVT /*VT*/) const { 1273 return false; 1274 } 1275 1276 /// How to legalize this custom operation? getCustomOperationAction(SDNode & Op)1277 virtual LegalizeAction getCustomOperationAction(SDNode &Op) const { 1278 return Legal; 1279 } 1280 1281 /// Return how this operation should be treated: either it is legal, needs to 1282 /// be promoted to a larger size, needs to be expanded to some other code 1283 /// sequence, or the target has a custom expander for it. getOperationAction(unsigned Op,EVT VT)1284 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 1285 // If a target-specific SDNode requires legalization, require the target 1286 // to provide custom legalization for it. 1287 if (Op >= std::size(OpActions[0])) 1288 return Custom; 1289 if (VT.isExtended()) 1290 return Expand; 1291 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op]; 1292 } 1293 1294 /// Custom method defined by each target to indicate if an operation which 1295 /// may require a scale is supported natively by the target. 1296 /// If not, the operation is illegal. isSupportedFixedPointOperation(unsigned Op,EVT VT,unsigned Scale)1297 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, 1298 unsigned Scale) const { 1299 return false; 1300 } 1301 1302 /// Some fixed point operations may be natively supported by the target but 1303 /// only for specific scales. This method allows for checking 1304 /// if the width is supported by the target for a given operation that may 1305 /// depend on scale. getFixedPointOperationAction(unsigned Op,EVT VT,unsigned Scale)1306 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, 1307 unsigned Scale) const { 1308 auto Action = getOperationAction(Op, VT); 1309 if (Action != Legal) 1310 return Action; 1311 1312 // This operation is supported in this type but may only work on specific 1313 // scales. 1314 bool Supported; 1315 switch (Op) { 1316 default: 1317 llvm_unreachable("Unexpected fixed point operation."); 1318 case ISD::SMULFIX: 1319 case ISD::SMULFIXSAT: 1320 case ISD::UMULFIX: 1321 case ISD::UMULFIXSAT: 1322 case ISD::SDIVFIX: 1323 case ISD::SDIVFIXSAT: 1324 case ISD::UDIVFIX: 1325 case ISD::UDIVFIXSAT: 1326 Supported = isSupportedFixedPointOperation(Op, VT, Scale); 1327 break; 1328 } 1329 1330 return Supported ? Action : Expand; 1331 } 1332 1333 // If Op is a strict floating-point operation, return the result 1334 // of getOperationAction for the equivalent non-strict operation. getStrictFPOperationAction(unsigned Op,EVT VT)1335 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const { 1336 unsigned EqOpc; 1337 switch (Op) { 1338 default: llvm_unreachable("Unexpected FP pseudo-opcode"); 1339 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1340 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break; 1341 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1342 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break; 1343 #include "llvm/IR/ConstrainedOps.def" 1344 } 1345 1346 return getOperationAction(EqOpc, VT); 1347 } 1348 1349 /// Return true if the specified operation is legal on this target or can be 1350 /// made legal with custom lowering. This is used to help guide high-level 1351 /// lowering decisions. LegalOnly is an optional convenience for code paths 1352 /// traversed pre and post legalisation. 1353 bool isOperationLegalOrCustom(unsigned Op, EVT VT, 1354 bool LegalOnly = false) const { 1355 if (LegalOnly) 1356 return isOperationLegal(Op, VT); 1357 1358 return (VT == MVT::Other || isTypeLegal(VT)) && 1359 (getOperationAction(Op, VT) == Legal || 1360 getOperationAction(Op, VT) == Custom); 1361 } 1362 1363 /// Return true if the specified operation is legal on this target or can be 1364 /// made legal using promotion. This is used to help guide high-level lowering 1365 /// decisions. LegalOnly is an optional convenience for code paths traversed 1366 /// pre and post legalisation. 1367 bool isOperationLegalOrPromote(unsigned Op, EVT VT, 1368 bool LegalOnly = false) const { 1369 if (LegalOnly) 1370 return isOperationLegal(Op, VT); 1371 1372 return (VT == MVT::Other || isTypeLegal(VT)) && 1373 (getOperationAction(Op, VT) == Legal || 1374 getOperationAction(Op, VT) == Promote); 1375 } 1376 1377 /// Return true if the specified operation is legal on this target or can be 1378 /// made legal with custom lowering or using promotion. This is used to help 1379 /// guide high-level lowering decisions. LegalOnly is an optional convenience 1380 /// for code paths traversed pre and post legalisation. 1381 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, 1382 bool LegalOnly = false) const { 1383 if (LegalOnly) 1384 return isOperationLegal(Op, VT); 1385 1386 return (VT == MVT::Other || isTypeLegal(VT)) && 1387 (getOperationAction(Op, VT) == Legal || 1388 getOperationAction(Op, VT) == Custom || 1389 getOperationAction(Op, VT) == Promote); 1390 } 1391 1392 /// Return true if the operation uses custom lowering, regardless of whether 1393 /// the type is legal or not. isOperationCustom(unsigned Op,EVT VT)1394 bool isOperationCustom(unsigned Op, EVT VT) const { 1395 return getOperationAction(Op, VT) == Custom; 1396 } 1397 1398 /// Return true if lowering to a jump table is allowed. areJTsAllowed(const Function * Fn)1399 virtual bool areJTsAllowed(const Function *Fn) const { 1400 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool()) 1401 return false; 1402 1403 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 1404 isOperationLegalOrCustom(ISD::BRIND, MVT::Other); 1405 } 1406 1407 /// Check whether the range [Low,High] fits in a machine word. rangeFitsInWord(const APInt & Low,const APInt & High,const DataLayout & DL)1408 bool rangeFitsInWord(const APInt &Low, const APInt &High, 1409 const DataLayout &DL) const { 1410 // FIXME: Using the pointer type doesn't seem ideal. 1411 uint64_t BW = DL.getIndexSizeInBits(0u); 1412 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1; 1413 return Range <= BW; 1414 } 1415 1416 /// Return true if lowering to a jump table is suitable for a set of case 1417 /// clusters which may contain \p NumCases cases, \p Range range of values. 1418 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, 1419 uint64_t Range, ProfileSummaryInfo *PSI, 1420 BlockFrequencyInfo *BFI) const; 1421 1422 /// Returns preferred type for switch condition. 1423 virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, 1424 EVT ConditionVT) const; 1425 1426 /// Return true if lowering to a bit test is suitable for a set of case 1427 /// clusters which contains \p NumDests unique destinations, \p Low and 1428 /// \p High as its lowest and highest case values, and expects \p NumCmps 1429 /// case value comparisons. Check if the number of destinations, comparison 1430 /// metric, and range are all suitable. isSuitableForBitTests(unsigned NumDests,unsigned NumCmps,const APInt & Low,const APInt & High,const DataLayout & DL)1431 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, 1432 const APInt &Low, const APInt &High, 1433 const DataLayout &DL) const { 1434 // FIXME: I don't think NumCmps is the correct metric: a single case and a 1435 // range of cases both require only one branch to lower. Just looking at the 1436 // number of clusters and destinations should be enough to decide whether to 1437 // build bit tests. 1438 1439 // To lower a range with bit tests, the range must fit the bitwidth of a 1440 // machine word. 1441 if (!rangeFitsInWord(Low, High, DL)) 1442 return false; 1443 1444 // Decide whether it's profitable to lower this range with bit tests. Each 1445 // destination requires a bit test and branch, and there is an overall range 1446 // check branch. For a small number of clusters, separate comparisons might 1447 // be cheaper, and for many destinations, splitting the range might be 1448 // better. 1449 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) || 1450 (NumDests == 3 && NumCmps >= 6); 1451 } 1452 1453 /// Return true if the specified operation is illegal on this target or 1454 /// unlikely to be made legal with custom lowering. This is used to help guide 1455 /// high-level lowering decisions. isOperationExpand(unsigned Op,EVT VT)1456 bool isOperationExpand(unsigned Op, EVT VT) const { 1457 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); 1458 } 1459 1460 /// Return true if the specified operation is legal on this target. isOperationLegal(unsigned Op,EVT VT)1461 bool isOperationLegal(unsigned Op, EVT VT) const { 1462 return (VT == MVT::Other || isTypeLegal(VT)) && 1463 getOperationAction(Op, VT) == Legal; 1464 } 1465 1466 /// Return how this load with extension should be treated: either it is legal, 1467 /// needs to be promoted to a larger size, needs to be expanded to some other 1468 /// code sequence, or the target has a custom expander for it. getLoadExtAction(unsigned ExtType,EVT ValVT,EVT MemVT)1469 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, 1470 EVT MemVT) const { 1471 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1472 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1473 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1474 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && 1475 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!"); 1476 unsigned Shift = 4 * ExtType; 1477 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf); 1478 } 1479 1480 /// Return true if the specified load with extension is legal on this target. isLoadExtLegal(unsigned ExtType,EVT ValVT,EVT MemVT)1481 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1482 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal; 1483 } 1484 1485 /// Return true if the specified load with extension is legal or custom 1486 /// on this target. isLoadExtLegalOrCustom(unsigned ExtType,EVT ValVT,EVT MemVT)1487 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1488 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal || 1489 getLoadExtAction(ExtType, ValVT, MemVT) == Custom; 1490 } 1491 1492 /// Same as getLoadExtAction, but for atomic loads. getAtomicLoadExtAction(unsigned ExtType,EVT ValVT,EVT MemVT)1493 LegalizeAction getAtomicLoadExtAction(unsigned ExtType, EVT ValVT, 1494 EVT MemVT) const { 1495 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1496 unsigned ValI = (unsigned)ValVT.getSimpleVT().SimpleTy; 1497 unsigned MemI = (unsigned)MemVT.getSimpleVT().SimpleTy; 1498 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && 1499 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!"); 1500 unsigned Shift = 4 * ExtType; 1501 LegalizeAction Action = 1502 (LegalizeAction)((AtomicLoadExtActions[ValI][MemI] >> Shift) & 0xf); 1503 assert((Action == Legal || Action == Expand) && 1504 "Unsupported atomic load extension action."); 1505 return Action; 1506 } 1507 1508 /// Return true if the specified atomic load with extension is legal on 1509 /// this target. isAtomicLoadExtLegal(unsigned ExtType,EVT ValVT,EVT MemVT)1510 bool isAtomicLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1511 return getAtomicLoadExtAction(ExtType, ValVT, MemVT) == Legal; 1512 } 1513 1514 /// Return how this store with truncation should be treated: either it is 1515 /// legal, needs to be promoted to a larger size, needs to be expanded to some 1516 /// other code sequence, or the target has a custom expander for it. getTruncStoreAction(EVT ValVT,EVT MemVT)1517 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { 1518 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1519 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1520 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1521 assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && 1522 "Table isn't big enough!"); 1523 return TruncStoreActions[ValI][MemI]; 1524 } 1525 1526 /// Return true if the specified store with truncation is legal on this 1527 /// target. isTruncStoreLegal(EVT ValVT,EVT MemVT)1528 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 1529 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal; 1530 } 1531 1532 /// Return true if the specified store with truncation has solution on this 1533 /// target. isTruncStoreLegalOrCustom(EVT ValVT,EVT MemVT)1534 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const { 1535 return isTypeLegal(ValVT) && 1536 (getTruncStoreAction(ValVT, MemVT) == Legal || 1537 getTruncStoreAction(ValVT, MemVT) == Custom); 1538 } 1539 canCombineTruncStore(EVT ValVT,EVT MemVT,bool LegalOnly)1540 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, 1541 bool LegalOnly) const { 1542 if (LegalOnly) 1543 return isTruncStoreLegal(ValVT, MemVT); 1544 1545 return isTruncStoreLegalOrCustom(ValVT, MemVT); 1546 } 1547 1548 /// Return how the indexed load should be treated: either it is legal, needs 1549 /// to be promoted to a larger size, needs to be expanded to some other code 1550 /// sequence, or the target has a custom expander for it. getIndexedLoadAction(unsigned IdxMode,MVT VT)1551 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 1552 return getIndexedModeAction(IdxMode, VT, IMAB_Load); 1553 } 1554 1555 /// Return true if the specified indexed load is legal on this target. isIndexedLoadLegal(unsigned IdxMode,EVT VT)1556 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 1557 return VT.isSimple() && 1558 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1559 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1560 } 1561 1562 /// Return how the indexed store should be treated: either it is legal, needs 1563 /// to be promoted to a larger size, needs to be expanded to some other code 1564 /// sequence, or the target has a custom expander for it. getIndexedStoreAction(unsigned IdxMode,MVT VT)1565 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 1566 return getIndexedModeAction(IdxMode, VT, IMAB_Store); 1567 } 1568 1569 /// Return true if the specified indexed load is legal on this target. isIndexedStoreLegal(unsigned IdxMode,EVT VT)1570 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 1571 return VT.isSimple() && 1572 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1573 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1574 } 1575 1576 /// Return how the indexed load should be treated: either it is legal, needs 1577 /// to be promoted to a larger size, needs to be expanded to some other code 1578 /// sequence, or the target has a custom expander for it. getIndexedMaskedLoadAction(unsigned IdxMode,MVT VT)1579 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const { 1580 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad); 1581 } 1582 1583 /// Return true if the specified indexed load is legal on this target. isIndexedMaskedLoadLegal(unsigned IdxMode,EVT VT)1584 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const { 1585 return VT.isSimple() && 1586 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1587 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1588 } 1589 1590 /// Return how the indexed store should be treated: either it is legal, needs 1591 /// to be promoted to a larger size, needs to be expanded to some other code 1592 /// sequence, or the target has a custom expander for it. getIndexedMaskedStoreAction(unsigned IdxMode,MVT VT)1593 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const { 1594 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore); 1595 } 1596 1597 /// Return true if the specified indexed load is legal on this target. isIndexedMaskedStoreLegal(unsigned IdxMode,EVT VT)1598 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const { 1599 return VT.isSimple() && 1600 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1601 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1602 } 1603 1604 /// Returns true if the index type for a masked gather/scatter requires 1605 /// extending shouldExtendGSIndex(EVT VT,EVT & EltTy)1606 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; } 1607 1608 // Returns true if Extend can be folded into the index of a masked gathers/scatters 1609 // on this target. shouldRemoveExtendFromGSIndex(SDValue Extend,EVT DataVT)1610 virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const { 1611 return false; 1612 } 1613 1614 // Return true if the target supports a scatter/gather instruction with 1615 // indices which are scaled by the particular value. Note that all targets 1616 // must by definition support scale of 1. isLegalScaleForGatherScatter(uint64_t Scale,uint64_t ElemSize)1617 virtual bool isLegalScaleForGatherScatter(uint64_t Scale, 1618 uint64_t ElemSize) const { 1619 // MGATHER/MSCATTER are only required to support scaling by one or by the 1620 // element size. 1621 if (Scale != ElemSize && Scale != 1) 1622 return false; 1623 return true; 1624 } 1625 1626 /// Return how the condition code should be treated: either it is legal, needs 1627 /// to be expanded to some other code sequence, or the target has a custom 1628 /// expander for it. 1629 LegalizeAction getCondCodeAction(ISD::CondCode CC,MVT VT)1630 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 1631 assert((unsigned)CC < std::size(CondCodeActions) && 1632 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) && 1633 "Table isn't big enough!"); 1634 // See setCondCodeAction for how this is encoded. 1635 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 1636 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3]; 1637 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF); 1638 assert(Action != Promote && "Can't promote condition code!"); 1639 return Action; 1640 } 1641 1642 /// Return true if the specified condition code is legal for a comparison of 1643 /// the specified types on this target. isCondCodeLegal(ISD::CondCode CC,MVT VT)1644 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 1645 return getCondCodeAction(CC, VT) == Legal; 1646 } 1647 1648 /// Return true if the specified condition code is legal or custom for a 1649 /// comparison of the specified types on this target. isCondCodeLegalOrCustom(ISD::CondCode CC,MVT VT)1650 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const { 1651 return getCondCodeAction(CC, VT) == Legal || 1652 getCondCodeAction(CC, VT) == Custom; 1653 } 1654 1655 /// Return how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type 1656 /// InputVT should be treated. Either it's legal, needs to be promoted to a 1657 /// larger size, needs to be expanded to some other code sequence, or the 1658 /// target has a custom expander for it. getPartialReduceMLAAction(unsigned Opc,EVT AccVT,EVT InputVT)1659 LegalizeAction getPartialReduceMLAAction(unsigned Opc, EVT AccVT, 1660 EVT InputVT) const { 1661 assert(Opc == ISD::PARTIAL_REDUCE_SMLA || Opc == ISD::PARTIAL_REDUCE_UMLA || 1662 Opc == ISD::PARTIAL_REDUCE_SUMLA); 1663 PartialReduceActionTypes Key = {Opc, AccVT.getSimpleVT().SimpleTy, 1664 InputVT.getSimpleVT().SimpleTy}; 1665 auto It = PartialReduceMLAActions.find(Key); 1666 return It != PartialReduceMLAActions.end() ? It->second : Expand; 1667 } 1668 1669 /// Return true if a PARTIAL_REDUCE_U/SMLA node with the specified types is 1670 /// legal or custom for this target. isPartialReduceMLALegalOrCustom(unsigned Opc,EVT AccVT,EVT InputVT)1671 bool isPartialReduceMLALegalOrCustom(unsigned Opc, EVT AccVT, 1672 EVT InputVT) const { 1673 LegalizeAction Action = getPartialReduceMLAAction(Opc, AccVT, InputVT); 1674 return Action == Legal || Action == Custom; 1675 } 1676 1677 /// If the action for this operation is to promote, this method returns the 1678 /// ValueType to promote to. getTypeToPromoteTo(unsigned Op,MVT VT)1679 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 1680 assert(getOperationAction(Op, VT) == Promote && 1681 "This operation isn't promoted!"); 1682 1683 // See if this has an explicit type specified. 1684 std::map<std::pair<unsigned, MVT::SimpleValueType>, 1685 MVT::SimpleValueType>::const_iterator PTTI = 1686 PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); 1687 if (PTTI != PromoteToType.end()) return PTTI->second; 1688 1689 assert((VT.isInteger() || VT.isFloatingPoint()) && 1690 "Cannot autopromote this type, add it with AddPromotedToType."); 1691 1692 uint64_t VTBits = VT.getScalarSizeInBits(); 1693 MVT NVT = VT; 1694 do { 1695 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1); 1696 assert(NVT.isInteger() == VT.isInteger() && 1697 NVT.isFloatingPoint() == VT.isFloatingPoint() && 1698 "Didn't find type to promote to!"); 1699 } while (VTBits >= NVT.getScalarSizeInBits() || !isTypeLegal(NVT) || 1700 getOperationAction(Op, NVT) == Promote); 1701 return NVT; 1702 } 1703 1704 virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, 1705 bool AllowUnknown = false) const { 1706 return getValueType(DL, Ty, AllowUnknown); 1707 } 1708 1709 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM 1710 /// operations except for the pointer size. If AllowUnknown is true, this 1711 /// will return MVT::Other for types with no EVT counterpart (e.g. structs), 1712 /// otherwise it will assert. 1713 EVT getValueType(const DataLayout &DL, Type *Ty, 1714 bool AllowUnknown = false) const { 1715 // Lower scalar pointers to native pointer types. 1716 if (auto *PTy = dyn_cast<PointerType>(Ty)) 1717 return getPointerTy(DL, PTy->getAddressSpace()); 1718 1719 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1720 Type *EltTy = VTy->getElementType(); 1721 // Lower vectors of pointers to native pointer types. 1722 if (auto *PTy = dyn_cast<PointerType>(EltTy)) { 1723 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace())); 1724 EltTy = PointerTy.getTypeForEVT(Ty->getContext()); 1725 } 1726 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), 1727 VTy->getElementCount()); 1728 } 1729 1730 return EVT::getEVT(Ty, AllowUnknown); 1731 } 1732 1733 EVT getMemValueType(const DataLayout &DL, Type *Ty, 1734 bool AllowUnknown = false) const { 1735 // Lower scalar pointers to native pointer types. 1736 if (auto *PTy = dyn_cast<PointerType>(Ty)) 1737 return getPointerMemTy(DL, PTy->getAddressSpace()); 1738 1739 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1740 Type *EltTy = VTy->getElementType(); 1741 if (auto *PTy = dyn_cast<PointerType>(EltTy)) { 1742 EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace())); 1743 EltTy = PointerTy.getTypeForEVT(Ty->getContext()); 1744 } 1745 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), 1746 VTy->getElementCount()); 1747 } 1748 1749 return getValueType(DL, Ty, AllowUnknown); 1750 } 1751 1752 1753 /// Return the MVT corresponding to this LLVM type. See getValueType. 1754 MVT getSimpleValueType(const DataLayout &DL, Type *Ty, 1755 bool AllowUnknown = false) const { 1756 return getValueType(DL, Ty, AllowUnknown).getSimpleVT(); 1757 } 1758 1759 /// Returns the desired alignment for ByVal or InAlloca aggregate function 1760 /// arguments in the caller parameter area. 1761 virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const; 1762 1763 /// Return the type of registers that this ValueType will eventually require. getRegisterType(MVT VT)1764 MVT getRegisterType(MVT VT) const { 1765 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT)); 1766 return RegisterTypeForVT[VT.SimpleTy]; 1767 } 1768 1769 /// Return the type of registers that this ValueType will eventually require. getRegisterType(LLVMContext & Context,EVT VT)1770 MVT getRegisterType(LLVMContext &Context, EVT VT) const { 1771 if (VT.isSimple()) 1772 return getRegisterType(VT.getSimpleVT()); 1773 if (VT.isVector()) { 1774 EVT VT1; 1775 MVT RegisterVT; 1776 unsigned NumIntermediates; 1777 (void)getVectorTypeBreakdown(Context, VT, VT1, 1778 NumIntermediates, RegisterVT); 1779 return RegisterVT; 1780 } 1781 if (VT.isInteger()) { 1782 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 1783 } 1784 llvm_unreachable("Unsupported extended type!"); 1785 } 1786 1787 /// Return the number of registers that this ValueType will eventually 1788 /// require. 1789 /// 1790 /// This is one for any types promoted to live in larger registers, but may be 1791 /// more than one for types (like i64) that are split into pieces. For types 1792 /// like i140, which are first promoted then expanded, it is the number of 1793 /// registers needed to hold all the bits of the original type. For an i140 1794 /// on a 32 bit machine this means 5 registers. 1795 /// 1796 /// RegisterVT may be passed as a way to override the default settings, for 1797 /// instance with i128 inline assembly operands on SystemZ. 1798 virtual unsigned 1799 getNumRegisters(LLVMContext &Context, EVT VT, 1800 std::optional<MVT> RegisterVT = std::nullopt) const { 1801 if (VT.isSimple()) { 1802 assert((unsigned)VT.getSimpleVT().SimpleTy < 1803 std::size(NumRegistersForVT)); 1804 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 1805 } 1806 if (VT.isVector()) { 1807 EVT VT1; 1808 MVT VT2; 1809 unsigned NumIntermediates; 1810 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 1811 } 1812 if (VT.isInteger()) { 1813 unsigned BitWidth = VT.getSizeInBits(); 1814 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 1815 return (BitWidth + RegWidth - 1) / RegWidth; 1816 } 1817 llvm_unreachable("Unsupported extended type!"); 1818 } 1819 1820 /// Certain combinations of ABIs, Targets and features require that types 1821 /// are legal for some operations and not for other operations. 1822 /// For MIPS all vector types must be passed through the integer register set. getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1823 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, 1824 CallingConv::ID CC, EVT VT) const { 1825 return getRegisterType(Context, VT); 1826 } 1827 1828 /// Certain targets require unusual breakdowns of certain types. For MIPS, 1829 /// this occurs when a vector type is used, as vector are passed through the 1830 /// integer register set. getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1831 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, 1832 CallingConv::ID CC, 1833 EVT VT) const { 1834 return getNumRegisters(Context, VT); 1835 } 1836 1837 /// Certain targets have context sensitive alignment requirements, where one 1838 /// type has the alignment requirement of another type. getABIAlignmentForCallingConv(Type * ArgTy,const DataLayout & DL)1839 virtual Align getABIAlignmentForCallingConv(Type *ArgTy, 1840 const DataLayout &DL) const { 1841 return DL.getABITypeAlign(ArgTy); 1842 } 1843 1844 /// If true, then instruction selection should seek to shrink the FP constant 1845 /// of the specified type to a smaller type in order to save space and / or 1846 /// reduce runtime. ShouldShrinkFPConstant(EVT)1847 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 1848 1849 /// Return true if it is profitable to reduce a load to a smaller type. 1850 /// \p ByteOffset is only set if we know the pointer offset at compile time 1851 /// otherwise we should assume that additional pointer math is required. 1852 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x 1853 /// Example: (i16 (trunc (srl (i32 (load x)), 16)) -> i16 load x+2 1854 virtual bool shouldReduceLoadWidth( 1855 SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, 1856 std::optional<unsigned> ByteOffset = std::nullopt) const { 1857 // By default, assume that it is cheaper to extract a subvector from a wide 1858 // vector load rather than creating multiple narrow vector loads. 1859 if (NewVT.isVector() && !SDValue(Load, 0).hasOneUse()) 1860 return false; 1861 1862 return true; 1863 } 1864 1865 /// Return true (the default) if it is profitable to remove a sext_inreg(x) 1866 /// where the sext is redundant, and use x directly. shouldRemoveRedundantExtend(SDValue Op)1867 virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; } 1868 1869 /// Indicates if any padding is guaranteed to go at the most significant bits 1870 /// when storing the type to memory and the type size isn't equal to the store 1871 /// size. isPaddedAtMostSignificantBitsWhenStored(EVT VT)1872 bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const { 1873 return VT.isScalarInteger() && !VT.isByteSized(); 1874 } 1875 1876 /// When splitting a value of the specified type into parts, does the Lo 1877 /// or Hi part come first? This usually follows the endianness, except 1878 /// for ppcf128, where the Hi part always comes first. hasBigEndianPartOrdering(EVT VT,const DataLayout & DL)1879 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const { 1880 return DL.isBigEndian() || VT == MVT::ppcf128; 1881 } 1882 1883 /// If true, the target has custom DAG combine transformations that it can 1884 /// perform for the specified node. hasTargetDAGCombine(ISD::NodeType NT)1885 bool hasTargetDAGCombine(ISD::NodeType NT) const { 1886 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray)); 1887 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 1888 } 1889 getGatherAllAliasesMaxDepth()1890 unsigned getGatherAllAliasesMaxDepth() const { 1891 return GatherAllAliasesMaxDepth; 1892 } 1893 1894 /// Returns the size of the platform's va_list object. getVaListSizeInBits(const DataLayout & DL)1895 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const { 1896 return getPointerTy(DL).getSizeInBits(); 1897 } 1898 1899 /// Get maximum # of store operations permitted for llvm.memset 1900 /// 1901 /// This function returns the maximum number of store operations permitted 1902 /// to replace a call to llvm.memset. The value is set by the target at the 1903 /// performance threshold for such a replacement. If OptSize is true, 1904 /// return the limit for functions that have OptSize attribute. getMaxStoresPerMemset(bool OptSize)1905 unsigned getMaxStoresPerMemset(bool OptSize) const { 1906 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset; 1907 } 1908 1909 /// Get maximum # of store operations permitted for llvm.memcpy 1910 /// 1911 /// This function returns the maximum number of store operations permitted 1912 /// to replace a call to llvm.memcpy. The value is set by the target at the 1913 /// performance threshold for such a replacement. If OptSize is true, 1914 /// return the limit for functions that have OptSize attribute. getMaxStoresPerMemcpy(bool OptSize)1915 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 1916 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy; 1917 } 1918 1919 /// \brief Get maximum # of store operations to be glued together 1920 /// 1921 /// This function returns the maximum number of store operations permitted 1922 /// to glue together during lowering of llvm.memcpy. The value is set by 1923 // the target at the performance threshold for such a replacement. getMaxGluedStoresPerMemcpy()1924 virtual unsigned getMaxGluedStoresPerMemcpy() const { 1925 return MaxGluedStoresPerMemcpy; 1926 } 1927 1928 /// Get maximum # of load operations permitted for memcmp 1929 /// 1930 /// This function returns the maximum number of load operations permitted 1931 /// to replace a call to memcmp. The value is set by the target at the 1932 /// performance threshold for such a replacement. If OptSize is true, 1933 /// return the limit for functions that have OptSize attribute. getMaxExpandSizeMemcmp(bool OptSize)1934 unsigned getMaxExpandSizeMemcmp(bool OptSize) const { 1935 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp; 1936 } 1937 1938 /// Get maximum # of store operations permitted for llvm.memmove 1939 /// 1940 /// This function returns the maximum number of store operations permitted 1941 /// to replace a call to llvm.memmove. The value is set by the target at the 1942 /// performance threshold for such a replacement. If OptSize is true, 1943 /// return the limit for functions that have OptSize attribute. getMaxStoresPerMemmove(bool OptSize)1944 unsigned getMaxStoresPerMemmove(bool OptSize) const { 1945 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove; 1946 } 1947 1948 /// Determine if the target supports unaligned memory accesses. 1949 /// 1950 /// This function returns true if the target allows unaligned memory accesses 1951 /// of the specified type in the given address space. If true, it also returns 1952 /// a relative speed of the unaligned memory access in the last argument by 1953 /// reference. The higher the speed number the faster the operation comparing 1954 /// to a number returned by another such call. This is used, for example, in 1955 /// situations where an array copy/move/set is converted to a sequence of 1956 /// store operations. Its use helps to ensure that such replacements don't 1957 /// generate code that causes an alignment error (trap) on the target machine. 1958 virtual bool allowsMisalignedMemoryAccesses( 1959 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1960 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1961 unsigned * /*Fast*/ = nullptr) const { 1962 return false; 1963 } 1964 1965 /// LLT handling variant. 1966 virtual bool allowsMisalignedMemoryAccesses( 1967 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1968 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1969 unsigned * /*Fast*/ = nullptr) const { 1970 return false; 1971 } 1972 1973 /// This function returns true if the memory access is aligned or if the 1974 /// target allows this specific unaligned memory access. If the access is 1975 /// allowed, the optional final parameter returns a relative speed of the 1976 /// access (as defined by the target). 1977 bool allowsMemoryAccessForAlignment( 1978 LLVMContext &Context, const DataLayout &DL, EVT VT, 1979 unsigned AddrSpace = 0, Align Alignment = Align(1), 1980 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1981 unsigned *Fast = nullptr) const; 1982 1983 /// Return true if the memory access of this type is aligned or if the target 1984 /// allows this specific unaligned access for the given MachineMemOperand. 1985 /// If the access is allowed, the optional final parameter returns a relative 1986 /// speed of the access (as defined by the target). 1987 bool allowsMemoryAccessForAlignment(LLVMContext &Context, 1988 const DataLayout &DL, EVT VT, 1989 const MachineMemOperand &MMO, 1990 unsigned *Fast = nullptr) const; 1991 1992 /// Return true if the target supports a memory access of this type for the 1993 /// given address space and alignment. If the access is allowed, the optional 1994 /// final parameter returns the relative speed of the access (as defined by 1995 /// the target). 1996 virtual bool 1997 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 1998 unsigned AddrSpace = 0, Align Alignment = Align(1), 1999 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 2000 unsigned *Fast = nullptr) const; 2001 2002 /// Return true if the target supports a memory access of this type for the 2003 /// given MachineMemOperand. If the access is allowed, the optional 2004 /// final parameter returns the relative access speed (as defined by the 2005 /// target). 2006 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 2007 const MachineMemOperand &MMO, 2008 unsigned *Fast = nullptr) const; 2009 2010 /// LLT handling variant. 2011 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty, 2012 const MachineMemOperand &MMO, 2013 unsigned *Fast = nullptr) const; 2014 2015 /// Returns the target specific optimal type for load and store operations as 2016 /// a result of memset, memcpy, and memmove lowering. 2017 /// It returns EVT::Other if the type should be determined using generic 2018 /// target-independent logic. 2019 virtual EVT getOptimalMemOpType(LLVMContext & Context,const MemOp & Op,const AttributeList &)2020 getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, 2021 const AttributeList & /*FuncAttributes*/) const { 2022 return MVT::Other; 2023 } 2024 2025 /// LLT returning variant. 2026 virtual LLT getOptimalMemOpLLT(const MemOp & Op,const AttributeList &)2027 getOptimalMemOpLLT(const MemOp &Op, 2028 const AttributeList & /*FuncAttributes*/) const { 2029 return LLT(); 2030 } 2031 2032 /// Returns true if it's safe to use load / store of the specified type to 2033 /// expand memcpy / memset inline. 2034 /// 2035 /// This is mostly true for all types except for some special cases. For 2036 /// example, on X86 targets without SSE2 f64 load / store are done with fldl / 2037 /// fstpl which also does type conversion. Note the specified type doesn't 2038 /// have to be legal as the hook is used before type legalization. isSafeMemOpType(MVT)2039 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } 2040 2041 /// Return lower limit for number of blocks in a jump table. 2042 virtual unsigned getMinimumJumpTableEntries() const; 2043 2044 /// Return lower limit of the density in a jump table. 2045 unsigned getMinimumJumpTableDensity(bool OptForSize) const; 2046 2047 /// Return upper limit for number of entries in a jump table. 2048 /// Zero if no limit. 2049 unsigned getMaximumJumpTableSize() const; 2050 2051 virtual bool isJumpTableRelative() const; 2052 2053 /// If a physical register, this specifies the register that 2054 /// llvm.savestack/llvm.restorestack should save and restore. getStackPointerRegisterToSaveRestore()2055 Register getStackPointerRegisterToSaveRestore() const { 2056 return StackPointerRegisterToSaveRestore; 2057 } 2058 2059 /// If a physical register, this returns the register that receives the 2060 /// exception address on entry to an EH pad. 2061 virtual Register getExceptionPointerRegister(const Constant * PersonalityFn)2062 getExceptionPointerRegister(const Constant *PersonalityFn) const { 2063 return Register(); 2064 } 2065 2066 /// If a physical register, this returns the register that receives the 2067 /// exception typeid on entry to a landing pad. 2068 virtual Register getExceptionSelectorRegister(const Constant * PersonalityFn)2069 getExceptionSelectorRegister(const Constant *PersonalityFn) const { 2070 return Register(); 2071 } 2072 needsFixedCatchObjects()2073 virtual bool needsFixedCatchObjects() const { 2074 report_fatal_error("Funclet EH is not implemented for this target"); 2075 } 2076 2077 /// Return the minimum stack alignment of an argument. getMinStackArgumentAlignment()2078 Align getMinStackArgumentAlignment() const { 2079 return MinStackArgumentAlignment; 2080 } 2081 2082 /// Return the minimum function alignment. getMinFunctionAlignment()2083 Align getMinFunctionAlignment() const { return MinFunctionAlignment; } 2084 2085 /// Return the preferred function alignment. getPrefFunctionAlignment()2086 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } 2087 2088 /// Return the preferred loop alignment. 2089 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const; 2090 2091 /// Return the maximum amount of bytes allowed to be emitted when padding for 2092 /// alignment 2093 virtual unsigned 2094 getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const; 2095 2096 /// Should loops be aligned even when the function is marked OptSize (but not 2097 /// MinSize). alignLoopsWithOptSize()2098 virtual bool alignLoopsWithOptSize() const { return false; } 2099 2100 /// If the target has a standard location for the stack protector guard, 2101 /// returns the address of that location. Otherwise, returns nullptr. 2102 /// DEPRECATED: please override useLoadStackGuardNode and customize 2103 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard(). 2104 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const; 2105 2106 /// Inserts necessary declarations for SSP (stack protection) purpose. 2107 /// Should be used only when getIRStackGuard returns nullptr. 2108 virtual void insertSSPDeclarations(Module &M) const; 2109 2110 /// Return the variable that's previously inserted by insertSSPDeclarations, 2111 /// if any, otherwise return nullptr. Should be used only when 2112 /// getIRStackGuard returns nullptr. 2113 virtual Value *getSDagStackGuard(const Module &M) const; 2114 2115 /// If this function returns true, stack protection checks should XOR the 2116 /// frame pointer (or whichever pointer is used to address locals) into the 2117 /// stack guard value before checking it. getIRStackGuard must return nullptr 2118 /// if this returns true. useStackGuardXorFP()2119 virtual bool useStackGuardXorFP() const { return false; } 2120 2121 /// If the target has a standard stack protection check function that 2122 /// performs validation and error handling, returns the function. Otherwise, 2123 /// returns nullptr. Must be previously inserted by insertSSPDeclarations. 2124 /// Should be used only when getIRStackGuard returns nullptr. 2125 virtual Function *getSSPStackGuardCheck(const Module &M) const; 2126 2127 protected: 2128 Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 2129 bool UseTLS) const; 2130 2131 public: 2132 /// Returns the target-specific address of the unsafe stack pointer. 2133 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const; 2134 2135 /// Returns the name of the symbol used to emit stack probes or the empty 2136 /// string if not applicable. hasStackProbeSymbol(const MachineFunction & MF)2137 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; } 2138 hasInlineStackProbe(const MachineFunction & MF)2139 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; } 2140 getStackProbeSymbolName(const MachineFunction & MF)2141 virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const { 2142 return ""; 2143 } 2144 2145 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we 2146 /// are happy to sink it into basic blocks. A cast may be free, but not 2147 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer. 2148 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const; 2149 2150 /// Return true if the pointer arguments to CI should be aligned by aligning 2151 /// the object whose address is being passed. If so then MinSize is set to the 2152 /// minimum size the object must be to be aligned and PrefAlign is set to the 2153 /// preferred alignment. shouldAlignPointerArgs(CallInst *,unsigned &,Align &)2154 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/, 2155 Align & /*PrefAlign*/) const { 2156 return false; 2157 } 2158 2159 //===--------------------------------------------------------------------===// 2160 /// \name Helpers for TargetTransformInfo implementations 2161 /// @{ 2162 2163 /// Get the ISD node that corresponds to the Instruction class opcode. 2164 int InstructionOpcodeToISD(unsigned Opcode) const; 2165 2166 /// Get the ISD node that corresponds to the Intrinsic ID. Returns 2167 /// ISD::DELETED_NODE by default for an unsupported Intrinsic ID. 2168 int IntrinsicIDToISD(Intrinsic::ID ID) const; 2169 2170 /// @} 2171 2172 //===--------------------------------------------------------------------===// 2173 /// \name Helpers for atomic expansion. 2174 /// @{ 2175 2176 /// Returns the maximum atomic operation size (in bits) supported by 2177 /// the backend. Atomic operations greater than this size (as well 2178 /// as ones that are not naturally aligned), will be expanded by 2179 /// AtomicExpandPass into an __atomic_* library call. getMaxAtomicSizeInBitsSupported()2180 unsigned getMaxAtomicSizeInBitsSupported() const { 2181 return MaxAtomicSizeInBitsSupported; 2182 } 2183 2184 /// Returns the size in bits of the maximum div/rem the backend supports. 2185 /// Larger operations will be expanded by ExpandLargeDivRem. getMaxDivRemBitWidthSupported()2186 unsigned getMaxDivRemBitWidthSupported() const { 2187 return MaxDivRemBitWidthSupported; 2188 } 2189 2190 /// Returns the size in bits of the maximum fp to/from int conversion the 2191 /// backend supports. Larger operations will be expanded by ExpandFp. getMaxLargeFPConvertBitWidthSupported()2192 unsigned getMaxLargeFPConvertBitWidthSupported() const { 2193 return MaxLargeFPConvertBitWidthSupported; 2194 } 2195 2196 /// Returns the size of the smallest cmpxchg or ll/sc instruction 2197 /// the backend supports. Any smaller operations are widened in 2198 /// AtomicExpandPass. 2199 /// 2200 /// Note that *unlike* operations above the maximum size, atomic ops 2201 /// are still natively supported below the minimum; they just 2202 /// require a more complex expansion. getMinCmpXchgSizeInBits()2203 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; } 2204 2205 /// Whether the target supports unaligned atomic operations. supportsUnalignedAtomics()2206 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; } 2207 2208 /// Whether AtomicExpandPass should automatically insert fences and reduce 2209 /// ordering for this atomic. This should be true for most architectures with 2210 /// weak memory ordering. Defaults to false. shouldInsertFencesForAtomic(const Instruction * I)2211 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const { 2212 return false; 2213 } 2214 2215 // The memory ordering that AtomicExpandPass should assign to a atomic 2216 // instruction that it has lowered by adding fences. This can be used 2217 // to "fold" one of the fences into the atomic instruction. 2218 virtual AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction * I)2219 atomicOperationOrderAfterFenceSplit(const Instruction *I) const { 2220 return AtomicOrdering::Monotonic; 2221 } 2222 2223 /// Whether AtomicExpandPass should automatically insert a trailing fence 2224 /// without reducing the ordering for this atomic. Defaults to false. 2225 virtual bool shouldInsertTrailingFenceForAtomicStore(const Instruction * I)2226 shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const { 2227 return false; 2228 } 2229 2230 /// Perform a load-linked operation on Addr, returning a "Value *" with the 2231 /// corresponding pointee type. This may entail some non-trivial operations to 2232 /// truncate or reconstruct types that will be illegal in the backend. See 2233 /// ARMISelLowering for an example implementation. emitLoadLinked(IRBuilderBase & Builder,Type * ValueTy,Value * Addr,AtomicOrdering Ord)2234 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, 2235 Value *Addr, AtomicOrdering Ord) const { 2236 llvm_unreachable("Load linked unimplemented on this target"); 2237 } 2238 2239 /// Perform a store-conditional operation to Addr. Return the status of the 2240 /// store. This should be 0 if the store succeeded, non-zero otherwise. emitStoreConditional(IRBuilderBase & Builder,Value * Val,Value * Addr,AtomicOrdering Ord)2241 virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, 2242 Value *Addr, AtomicOrdering Ord) const { 2243 llvm_unreachable("Store conditional unimplemented on this target"); 2244 } 2245 2246 /// Perform a masked atomicrmw using a target-specific intrinsic. This 2247 /// represents the core LL/SC loop which will be lowered at a late stage by 2248 /// the backend. The target-specific intrinsic returns the loaded value and 2249 /// is not responsible for masking and shifting the result. emitMaskedAtomicRMWIntrinsic(IRBuilderBase & Builder,AtomicRMWInst * AI,Value * AlignedAddr,Value * Incr,Value * Mask,Value * ShiftAmt,AtomicOrdering Ord)2250 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, 2251 AtomicRMWInst *AI, 2252 Value *AlignedAddr, Value *Incr, 2253 Value *Mask, Value *ShiftAmt, 2254 AtomicOrdering Ord) const { 2255 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target"); 2256 } 2257 2258 /// Perform a atomicrmw expansion using a target-specific way. This is 2259 /// expected to be called when masked atomicrmw and bit test atomicrmw don't 2260 /// work, and the target supports another way to lower atomicrmw. emitExpandAtomicRMW(AtomicRMWInst * AI)2261 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const { 2262 llvm_unreachable( 2263 "Generic atomicrmw expansion unimplemented on this target"); 2264 } 2265 2266 /// Perform a cmpxchg expansion using a target-specific method. emitExpandAtomicCmpXchg(AtomicCmpXchgInst * CI)2267 virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const { 2268 llvm_unreachable("Generic cmpxchg expansion unimplemented on this target"); 2269 } 2270 2271 /// Perform a bit test atomicrmw using a target-specific intrinsic. This 2272 /// represents the combined bit test intrinsic which will be lowered at a late 2273 /// stage by the backend. emitBitTestAtomicRMWIntrinsic(AtomicRMWInst * AI)2274 virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const { 2275 llvm_unreachable( 2276 "Bit test atomicrmw expansion unimplemented on this target"); 2277 } 2278 2279 /// Perform a atomicrmw which the result is only used by comparison, using a 2280 /// target-specific intrinsic. This represents the combined atomic and compare 2281 /// intrinsic which will be lowered at a late stage by the backend. emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst * AI)2282 virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const { 2283 llvm_unreachable( 2284 "Compare arith atomicrmw expansion unimplemented on this target"); 2285 } 2286 2287 /// Perform a masked cmpxchg using a target-specific intrinsic. This 2288 /// represents the core LL/SC loop which will be lowered at a late stage by 2289 /// the backend. The target-specific intrinsic returns the loaded value and 2290 /// is not responsible for masking and shifting the result. emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase & Builder,AtomicCmpXchgInst * CI,Value * AlignedAddr,Value * CmpVal,Value * NewVal,Value * Mask,AtomicOrdering Ord)2291 virtual Value *emitMaskedAtomicCmpXchgIntrinsic( 2292 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 2293 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 2294 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target"); 2295 } 2296 2297 //===--------------------------------------------------------------------===// 2298 /// \name KCFI check lowering. 2299 /// @{ 2300 EmitKCFICheck(MachineBasicBlock & MBB,MachineBasicBlock::instr_iterator & MBBI,const TargetInstrInfo * TII)2301 virtual MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB, 2302 MachineBasicBlock::instr_iterator &MBBI, 2303 const TargetInstrInfo *TII) const { 2304 llvm_unreachable("KCFI is not supported on this target"); 2305 } 2306 2307 /// @} 2308 2309 /// Inserts in the IR a target-specific intrinsic specifying a fence. 2310 /// It is called by AtomicExpandPass before expanding an 2311 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad 2312 /// if shouldInsertFencesForAtomic returns true. 2313 /// 2314 /// Inst is the original atomic instruction, prior to other expansions that 2315 /// may be performed. 2316 /// 2317 /// This function should either return a nullptr, or a pointer to an IR-level 2318 /// Instruction*. Even complex fence sequences can be represented by a 2319 /// single Instruction* through an intrinsic to be lowered later. 2320 /// 2321 /// The default implementation emits an IR fence before any release (or 2322 /// stronger) operation that stores, and after any acquire (or stronger) 2323 /// operation. This is generally a correct implementation, but backends may 2324 /// override if they wish to use alternative schemes (e.g. the PowerPC 2325 /// standard ABI uses a fence before a seq_cst load instead of after a 2326 /// seq_cst store). 2327 /// @{ 2328 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder, 2329 Instruction *Inst, 2330 AtomicOrdering Ord) const; 2331 2332 virtual Instruction *emitTrailingFence(IRBuilderBase &Builder, 2333 Instruction *Inst, 2334 AtomicOrdering Ord) const; 2335 /// @} 2336 2337 // Emits code that executes when the comparison result in the ll/sc 2338 // expansion of a cmpxchg instruction is such that the store-conditional will 2339 // not execute. This makes it possible to balance out the load-linked with 2340 // a dedicated instruction, if desired. 2341 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would 2342 // be unnecessarily held, except if clrex, inserted by this hook, is executed. emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase & Builder)2343 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {} 2344 2345 /// Returns true if arguments should be sign-extended in lib calls. shouldSignExtendTypeInLibCall(Type * Ty,bool IsSigned)2346 virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const { 2347 return IsSigned; 2348 } 2349 2350 /// Returns true if arguments should be extended in lib calls. shouldExtendTypeInLibCall(EVT Type)2351 virtual bool shouldExtendTypeInLibCall(EVT Type) const { 2352 return true; 2353 } 2354 2355 /// Returns how the given (atomic) load should be expanded by the 2356 /// IR-level AtomicExpand pass. shouldExpandAtomicLoadInIR(LoadInst * LI)2357 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const { 2358 return AtomicExpansionKind::None; 2359 } 2360 2361 /// Returns how the given (atomic) load should be cast by the IR-level 2362 /// AtomicExpand pass. shouldCastAtomicLoadInIR(LoadInst * LI)2363 virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const { 2364 if (LI->getType()->isFloatingPointTy()) 2365 return AtomicExpansionKind::CastToInteger; 2366 return AtomicExpansionKind::None; 2367 } 2368 2369 /// Returns how the given (atomic) store should be expanded by the IR-level 2370 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try 2371 /// to use an atomicrmw xchg. shouldExpandAtomicStoreInIR(StoreInst * SI)2372 virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { 2373 return AtomicExpansionKind::None; 2374 } 2375 2376 /// Returns how the given (atomic) store should be cast by the IR-level 2377 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger 2378 /// will try to cast the operands to integer values. shouldCastAtomicStoreInIR(StoreInst * SI)2379 virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const { 2380 if (SI->getValueOperand()->getType()->isFloatingPointTy()) 2381 return AtomicExpansionKind::CastToInteger; 2382 return AtomicExpansionKind::None; 2383 } 2384 2385 /// Returns how the given atomic cmpxchg should be expanded by the IR-level 2386 /// AtomicExpand pass. 2387 virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * AI)2388 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 2389 return AtomicExpansionKind::None; 2390 } 2391 2392 /// Returns how the IR-level AtomicExpand pass should expand the given 2393 /// AtomicRMW, if at all. Default is to never expand. shouldExpandAtomicRMWInIR(AtomicRMWInst * RMW)2394 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 2395 return RMW->isFloatingPointOperation() ? 2396 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None; 2397 } 2398 2399 /// Returns how the given atomic atomicrmw should be cast by the IR-level 2400 /// AtomicExpand pass. 2401 virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst * RMWI)2402 shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const { 2403 if (RMWI->getOperation() == AtomicRMWInst::Xchg && 2404 (RMWI->getValOperand()->getType()->isFloatingPointTy() || 2405 RMWI->getValOperand()->getType()->isPointerTy())) 2406 return AtomicExpansionKind::CastToInteger; 2407 2408 return AtomicExpansionKind::None; 2409 } 2410 2411 /// On some platforms, an AtomicRMW that never actually modifies the value 2412 /// (such as fetch_add of 0) can be turned into a fence followed by an 2413 /// atomic load. This may sound useless, but it makes it possible for the 2414 /// processor to keep the cacheline shared, dramatically improving 2415 /// performance. And such idempotent RMWs are useful for implementing some 2416 /// kinds of locks, see for example (justification + benchmarks): 2417 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf 2418 /// This method tries doing that transformation, returning the atomic load if 2419 /// it succeeds, and nullptr otherwise. 2420 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo 2421 /// another round of expansion. 2422 virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * RMWI)2423 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { 2424 return nullptr; 2425 } 2426 2427 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND, 2428 /// SIGN_EXTEND, or ANY_EXTEND). getExtendForAtomicOps()2429 virtual ISD::NodeType getExtendForAtomicOps() const { 2430 return ISD::ZERO_EXTEND; 2431 } 2432 2433 /// Returns how the platform's atomic compare and swap expects its comparison 2434 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is 2435 /// separate from getExtendForAtomicOps, which is concerned with the 2436 /// sign-extension of the instruction's output, whereas here we are concerned 2437 /// with the sign-extension of the input. For targets with compare-and-swap 2438 /// instructions (or sub-word comparisons in their LL/SC loop expansions), 2439 /// the input can be ANY_EXTEND, but the output will still have a specific 2440 /// extension. getExtendForAtomicCmpSwapArg()2441 virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const { 2442 return ISD::ANY_EXTEND; 2443 } 2444 2445 /// @} 2446 2447 /// Returns true if we should normalize 2448 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and 2449 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely 2450 /// that it saves us from materializing N0 and N1 in an integer register. 2451 /// Targets that are able to perform and/or on flags should return false here. shouldNormalizeToSelectSequence(LLVMContext & Context,EVT VT)2452 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, 2453 EVT VT) const { 2454 // If a target has multiple condition registers, then it likely has logical 2455 // operations on those registers. 2456 if (hasMultipleConditionRegisters()) 2457 return false; 2458 // Only do the transform if the value won't be split into multiple 2459 // registers. 2460 LegalizeTypeAction Action = getTypeAction(Context, VT); 2461 return Action != TypeExpandInteger && Action != TypeExpandFloat && 2462 Action != TypeSplitVector; 2463 } 2464 isProfitableToCombineMinNumMaxNum(EVT VT)2465 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; } 2466 2467 /// Return true if a select of constants (select Cond, C1, C2) should be 2468 /// transformed into simple math ops with the condition value. For example: 2469 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1 convertSelectOfConstantsToMath(EVT VT)2470 virtual bool convertSelectOfConstantsToMath(EVT VT) const { 2471 return false; 2472 } 2473 2474 /// Return true if it is profitable to transform an integer 2475 /// multiplication-by-constant into simpler operations like shifts and adds. 2476 /// This may be true if the target does not directly support the 2477 /// multiplication operation for the specified type or the sequence of simpler 2478 /// ops is faster than the multiply. decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C)2479 virtual bool decomposeMulByConstant(LLVMContext &Context, 2480 EVT VT, SDValue C) const { 2481 return false; 2482 } 2483 2484 /// Return true if it may be profitable to transform 2485 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 2486 /// This may not be true if c1 and c2 can be represented as immediates but 2487 /// c1*c2 cannot, for example. 2488 /// The target should check if c1, c2 and c1*c2 can be represented as 2489 /// immediates, or have to be materialized into registers. If it is not sure 2490 /// about some cases, a default true can be returned to let the DAGCombiner 2491 /// decide. 2492 /// AddNode is (add x, c1), and ConstNode is c2. isMulAddWithConstProfitable(SDValue AddNode,SDValue ConstNode)2493 virtual bool isMulAddWithConstProfitable(SDValue AddNode, 2494 SDValue ConstNode) const { 2495 return true; 2496 } 2497 2498 /// Return true if it is more correct/profitable to use strict FP_TO_INT 2499 /// conversion operations - canonicalizing the FP source value instead of 2500 /// converting all cases and then selecting based on value. 2501 /// This may be true if the target throws exceptions for out of bounds 2502 /// conversions or has fast FP CMOV. shouldUseStrictFP_TO_INT(EVT FpVT,EVT IntVT,bool IsSigned)2503 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, 2504 bool IsSigned) const { 2505 return false; 2506 } 2507 2508 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic. 2509 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always 2510 /// considered beneficial. 2511 /// If optimizing for size, expansion is only considered beneficial for upto 2512 /// 5 multiplies and a divide (if the exponent is negative). isBeneficialToExpandPowI(int64_t Exponent,bool OptForSize)2513 bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const { 2514 if (Exponent < 0) 2515 Exponent = -Exponent; 2516 uint64_t E = static_cast<uint64_t>(Exponent); 2517 return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7); 2518 } 2519 2520 //===--------------------------------------------------------------------===// 2521 // TargetLowering Configuration Methods - These methods should be invoked by 2522 // the derived class constructor to configure this object for the target. 2523 // 2524 protected: 2525 /// Specify how the target extends the result of integer and floating point 2526 /// boolean values from i1 to a wider type. See getBooleanContents. setBooleanContents(BooleanContent Ty)2527 void setBooleanContents(BooleanContent Ty) { 2528 BooleanContents = Ty; 2529 BooleanFloatContents = Ty; 2530 } 2531 2532 /// Specify how the target extends the result of integer and floating point 2533 /// boolean values from i1 to a wider type. See getBooleanContents. setBooleanContents(BooleanContent IntTy,BooleanContent FloatTy)2534 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) { 2535 BooleanContents = IntTy; 2536 BooleanFloatContents = FloatTy; 2537 } 2538 2539 /// Specify how the target extends the result of a vector boolean value from a 2540 /// vector of i1 to a wider type. See getBooleanContents. setBooleanVectorContents(BooleanContent Ty)2541 void setBooleanVectorContents(BooleanContent Ty) { 2542 BooleanVectorContents = Ty; 2543 } 2544 2545 /// Specify the target scheduling preference. setSchedulingPreference(Sched::Preference Pref)2546 void setSchedulingPreference(Sched::Preference Pref) { 2547 SchedPreferenceInfo = Pref; 2548 } 2549 2550 /// Indicate the minimum number of blocks to generate jump tables. 2551 void setMinimumJumpTableEntries(unsigned Val); 2552 2553 /// Indicate the maximum number of entries in jump tables. 2554 /// Set to zero to generate unlimited jump tables. 2555 void setMaximumJumpTableSize(unsigned); 2556 2557 /// If set to a physical register, this specifies the register that 2558 /// llvm.savestack/llvm.restorestack should save and restore. setStackPointerRegisterToSaveRestore(Register R)2559 void setStackPointerRegisterToSaveRestore(Register R) { 2560 StackPointerRegisterToSaveRestore = R; 2561 } 2562 2563 /// Tells the code generator that the target has multiple (allocatable) 2564 /// condition registers that can be used to store the results of comparisons 2565 /// for use by selects and conditional branches. With multiple condition 2566 /// registers, the code generator will not aggressively sink comparisons into 2567 /// the blocks of their users. 2568 void setHasMultipleConditionRegisters(bool hasManyRegs = true) { 2569 HasMultipleConditionRegisters = hasManyRegs; 2570 } 2571 2572 /// Tells the code generator that the target has BitExtract instructions. 2573 /// The code generator will aggressively sink "shift"s into the blocks of 2574 /// their users if the users will generate "and" instructions which can be 2575 /// combined with "shift" to BitExtract instructions. 2576 void setHasExtractBitsInsn(bool hasExtractInsn = true) { 2577 HasExtractBitsInsn = hasExtractInsn; 2578 } 2579 2580 /// Tells the code generator not to expand logic operations on comparison 2581 /// predicates into separate sequences that increase the amount of flow 2582 /// control. 2583 void setJumpIsExpensive(bool isExpensive = true); 2584 2585 /// Tells the code generator which bitwidths to bypass. addBypassSlowDiv(unsigned int SlowBitWidth,unsigned int FastBitWidth)2586 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 2587 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 2588 } 2589 2590 /// Add the specified register class as an available regclass for the 2591 /// specified value type. This indicates the selector can handle values of 2592 /// that class natively. addRegisterClass(MVT VT,const TargetRegisterClass * RC)2593 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { 2594 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT)); 2595 RegClassForVT[VT.SimpleTy] = RC; 2596 } 2597 2598 /// Return the largest legal super-reg register class of the register class 2599 /// for the specified type and its associated "cost". 2600 virtual std::pair<const TargetRegisterClass *, uint8_t> 2601 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const; 2602 2603 /// Once all of the register classes are added, this allows us to compute 2604 /// derived properties we expose. 2605 void computeRegisterProperties(const TargetRegisterInfo *TRI); 2606 2607 /// Indicate that the specified operation does not work with the specified 2608 /// type and indicate what to do about it. Note that VT may refer to either 2609 /// the type of a result or that of an operand of Op. setOperationAction(unsigned Op,MVT VT,LegalizeAction Action)2610 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) { 2611 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!"); 2612 OpActions[(unsigned)VT.SimpleTy][Op] = Action; 2613 } setOperationAction(ArrayRef<unsigned> Ops,MVT VT,LegalizeAction Action)2614 void setOperationAction(ArrayRef<unsigned> Ops, MVT VT, 2615 LegalizeAction Action) { 2616 for (auto Op : Ops) 2617 setOperationAction(Op, VT, Action); 2618 } setOperationAction(ArrayRef<unsigned> Ops,ArrayRef<MVT> VTs,LegalizeAction Action)2619 void setOperationAction(ArrayRef<unsigned> Ops, ArrayRef<MVT> VTs, 2620 LegalizeAction Action) { 2621 for (auto VT : VTs) 2622 setOperationAction(Ops, VT, Action); 2623 } 2624 2625 /// Indicate that the specified load with extension does not work with the 2626 /// specified type and indicate what to do about it. setLoadExtAction(unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action)2627 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, 2628 LegalizeAction Action) { 2629 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && 2630 MemVT.isValid() && "Table isn't big enough!"); 2631 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2632 unsigned Shift = 4 * ExtType; 2633 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift); 2634 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift; 2635 } setLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,MVT MemVT,LegalizeAction Action)2636 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT, 2637 LegalizeAction Action) { 2638 for (auto ExtType : ExtTypes) 2639 setLoadExtAction(ExtType, ValVT, MemVT, Action); 2640 } setLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,ArrayRef<MVT> MemVTs,LegalizeAction Action)2641 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, 2642 ArrayRef<MVT> MemVTs, LegalizeAction Action) { 2643 for (auto MemVT : MemVTs) 2644 setLoadExtAction(ExtTypes, ValVT, MemVT, Action); 2645 } 2646 2647 /// Let target indicate that an extending atomic load of the specified type 2648 /// is legal. setAtomicLoadExtAction(unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action)2649 void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, 2650 LegalizeAction Action) { 2651 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && 2652 MemVT.isValid() && "Table isn't big enough!"); 2653 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2654 unsigned Shift = 4 * ExtType; 2655 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= 2656 ~((uint16_t)0xF << Shift); 2657 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= 2658 ((uint16_t)Action << Shift); 2659 } setAtomicLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,MVT MemVT,LegalizeAction Action)2660 void setAtomicLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT, 2661 LegalizeAction Action) { 2662 for (auto ExtType : ExtTypes) 2663 setAtomicLoadExtAction(ExtType, ValVT, MemVT, Action); 2664 } setAtomicLoadExtAction(ArrayRef<unsigned> ExtTypes,MVT ValVT,ArrayRef<MVT> MemVTs,LegalizeAction Action)2665 void setAtomicLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, 2666 ArrayRef<MVT> MemVTs, LegalizeAction Action) { 2667 for (auto MemVT : MemVTs) 2668 setAtomicLoadExtAction(ExtTypes, ValVT, MemVT, Action); 2669 } 2670 2671 /// Indicate that the specified truncating store does not work with the 2672 /// specified type and indicate what to do about it. setTruncStoreAction(MVT ValVT,MVT MemVT,LegalizeAction Action)2673 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) { 2674 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!"); 2675 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action; 2676 } 2677 2678 /// Indicate that the specified indexed load does or does not work with the 2679 /// specified type and indicate what to do abort it. 2680 /// 2681 /// NOTE: All indexed mode loads are initialized to Expand in 2682 /// TargetLowering.cpp setIndexedLoadAction(ArrayRef<unsigned> IdxModes,MVT VT,LegalizeAction Action)2683 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT, 2684 LegalizeAction Action) { 2685 for (auto IdxMode : IdxModes) 2686 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action); 2687 } 2688 setIndexedLoadAction(ArrayRef<unsigned> IdxModes,ArrayRef<MVT> VTs,LegalizeAction Action)2689 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2690 LegalizeAction Action) { 2691 for (auto VT : VTs) 2692 setIndexedLoadAction(IdxModes, VT, Action); 2693 } 2694 2695 /// Indicate that the specified indexed store does or does not work with the 2696 /// specified type and indicate what to do about it. 2697 /// 2698 /// NOTE: All indexed mode stores are initialized to Expand in 2699 /// TargetLowering.cpp setIndexedStoreAction(ArrayRef<unsigned> IdxModes,MVT VT,LegalizeAction Action)2700 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT, 2701 LegalizeAction Action) { 2702 for (auto IdxMode : IdxModes) 2703 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action); 2704 } 2705 setIndexedStoreAction(ArrayRef<unsigned> IdxModes,ArrayRef<MVT> VTs,LegalizeAction Action)2706 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2707 LegalizeAction Action) { 2708 for (auto VT : VTs) 2709 setIndexedStoreAction(IdxModes, VT, Action); 2710 } 2711 2712 /// Indicate that the specified indexed masked load does or does not work with 2713 /// the specified type and indicate what to do about it. 2714 /// 2715 /// NOTE: All indexed mode masked loads are initialized to Expand in 2716 /// TargetLowering.cpp setIndexedMaskedLoadAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2717 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, 2718 LegalizeAction Action) { 2719 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action); 2720 } 2721 2722 /// Indicate that the specified indexed masked store does or does not work 2723 /// with the specified type and indicate what to do about it. 2724 /// 2725 /// NOTE: All indexed mode masked stores are initialized to Expand in 2726 /// TargetLowering.cpp setIndexedMaskedStoreAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2727 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, 2728 LegalizeAction Action) { 2729 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action); 2730 } 2731 2732 /// Indicate that the specified condition code is or isn't supported on the 2733 /// target and indicate what to do about it. setCondCodeAction(ArrayRef<ISD::CondCode> CCs,MVT VT,LegalizeAction Action)2734 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT, 2735 LegalizeAction Action) { 2736 for (auto CC : CCs) { 2737 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) && 2738 "Table isn't big enough!"); 2739 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2740 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 2741 /// 32-bit value and the upper 29 bits index into the second dimension of 2742 /// the array to select what 32-bit value to use. 2743 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 2744 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift); 2745 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift; 2746 } 2747 } setCondCodeAction(ArrayRef<ISD::CondCode> CCs,ArrayRef<MVT> VTs,LegalizeAction Action)2748 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, ArrayRef<MVT> VTs, 2749 LegalizeAction Action) { 2750 for (auto VT : VTs) 2751 setCondCodeAction(CCs, VT, Action); 2752 } 2753 2754 /// Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input 2755 /// type InputVT should be treated by the target. Either it's legal, needs to 2756 /// be promoted to a larger size, needs to be expanded to some other code 2757 /// sequence, or the target has a custom expander for it. setPartialReduceMLAAction(unsigned Opc,MVT AccVT,MVT InputVT,LegalizeAction Action)2758 void setPartialReduceMLAAction(unsigned Opc, MVT AccVT, MVT InputVT, 2759 LegalizeAction Action) { 2760 assert(Opc == ISD::PARTIAL_REDUCE_SMLA || Opc == ISD::PARTIAL_REDUCE_UMLA || 2761 Opc == ISD::PARTIAL_REDUCE_SUMLA); 2762 assert(AccVT.isValid() && InputVT.isValid() && 2763 "setPartialReduceMLAAction types aren't valid"); 2764 PartialReduceActionTypes Key = {Opc, AccVT.SimpleTy, InputVT.SimpleTy}; 2765 PartialReduceMLAActions[Key] = Action; 2766 } setPartialReduceMLAAction(ArrayRef<unsigned> Opcodes,MVT AccVT,MVT InputVT,LegalizeAction Action)2767 void setPartialReduceMLAAction(ArrayRef<unsigned> Opcodes, MVT AccVT, 2768 MVT InputVT, LegalizeAction Action) { 2769 for (unsigned Opc : Opcodes) 2770 setPartialReduceMLAAction(Opc, AccVT, InputVT, Action); 2771 } 2772 2773 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults 2774 /// to trying a larger integer/fp until it can find one that works. If that 2775 /// default is insufficient, this method can be used by the target to override 2776 /// the default. AddPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2777 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2778 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 2779 } 2780 2781 /// Convenience method to set an operation to Promote and specify the type 2782 /// in a single call. setOperationPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2783 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2784 setOperationAction(Opc, OrigVT, Promote); 2785 AddPromotedToType(Opc, OrigVT, DestVT); 2786 } setOperationPromotedToType(ArrayRef<unsigned> Ops,MVT OrigVT,MVT DestVT)2787 void setOperationPromotedToType(ArrayRef<unsigned> Ops, MVT OrigVT, 2788 MVT DestVT) { 2789 for (auto Op : Ops) { 2790 setOperationAction(Op, OrigVT, Promote); 2791 AddPromotedToType(Op, OrigVT, DestVT); 2792 } 2793 } 2794 2795 /// Targets should invoke this method for each target independent node that 2796 /// they want to provide a custom DAG combiner for by implementing the 2797 /// PerformDAGCombine virtual method. setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs)2798 void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) { 2799 for (auto NT : NTs) { 2800 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray)); 2801 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7); 2802 } 2803 } 2804 2805 /// Set the target's minimum function alignment. setMinFunctionAlignment(Align Alignment)2806 void setMinFunctionAlignment(Align Alignment) { 2807 MinFunctionAlignment = Alignment; 2808 } 2809 2810 /// Set the target's preferred function alignment. This should be set if 2811 /// there is a performance benefit to higher-than-minimum alignment setPrefFunctionAlignment(Align Alignment)2812 void setPrefFunctionAlignment(Align Alignment) { 2813 PrefFunctionAlignment = Alignment; 2814 } 2815 2816 /// Set the target's preferred loop alignment. Default alignment is one, it 2817 /// means the target does not care about loop alignment. The target may also 2818 /// override getPrefLoopAlignment to provide per-loop values. setPrefLoopAlignment(Align Alignment)2819 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; } setMaxBytesForAlignment(unsigned MaxBytes)2820 void setMaxBytesForAlignment(unsigned MaxBytes) { 2821 MaxBytesForAlignment = MaxBytes; 2822 } 2823 2824 /// Set the minimum stack alignment of an argument. setMinStackArgumentAlignment(Align Alignment)2825 void setMinStackArgumentAlignment(Align Alignment) { 2826 MinStackArgumentAlignment = Alignment; 2827 } 2828 2829 /// Set the maximum atomic operation size supported by the 2830 /// backend. Atomic operations greater than this size (as well as 2831 /// ones that are not naturally aligned), will be expanded by 2832 /// AtomicExpandPass into an __atomic_* library call. setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)2833 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) { 2834 MaxAtomicSizeInBitsSupported = SizeInBits; 2835 } 2836 2837 /// Set the size in bits of the maximum div/rem the backend supports. 2838 /// Larger operations will be expanded by ExpandLargeDivRem. setMaxDivRemBitWidthSupported(unsigned SizeInBits)2839 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) { 2840 MaxDivRemBitWidthSupported = SizeInBits; 2841 } 2842 2843 /// Set the size in bits of the maximum fp to/from int conversion the backend 2844 /// supports. Larger operations will be expanded by ExpandFp. setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)2845 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) { 2846 MaxLargeFPConvertBitWidthSupported = SizeInBits; 2847 } 2848 2849 /// Sets the minimum cmpxchg or ll/sc size supported by the backend. setMinCmpXchgSizeInBits(unsigned SizeInBits)2850 void setMinCmpXchgSizeInBits(unsigned SizeInBits) { 2851 MinCmpXchgSizeInBits = SizeInBits; 2852 } 2853 2854 /// Sets whether unaligned atomic operations are supported. setSupportsUnalignedAtomics(bool UnalignedSupported)2855 void setSupportsUnalignedAtomics(bool UnalignedSupported) { 2856 SupportsUnalignedAtomics = UnalignedSupported; 2857 } 2858 2859 public: 2860 //===--------------------------------------------------------------------===// 2861 // Addressing mode description hooks (used by LSR etc). 2862 // 2863 2864 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store 2865 /// instructions reading the address. This allows as much computation as 2866 /// possible to be done in the address mode for that operand. This hook lets 2867 /// targets also pass back when this should be done on intrinsics which 2868 /// load/store. getAddrModeArguments(const IntrinsicInst *,SmallVectorImpl<Value * > &,Type * &)2869 virtual bool getAddrModeArguments(const IntrinsicInst * /*I*/, 2870 SmallVectorImpl<Value *> & /*Ops*/, 2871 Type *& /*AccessTy*/) const { 2872 return false; 2873 } 2874 2875 /// This represents an addressing mode of: 2876 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*vscale 2877 /// If BaseGV is null, there is no BaseGV. 2878 /// If BaseOffs is zero, there is no base offset. 2879 /// If HasBaseReg is false, there is no base register. 2880 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 2881 /// no scale. 2882 /// If ScalableOffset is zero, there is no scalable offset. 2883 struct AddrMode { 2884 GlobalValue *BaseGV = nullptr; 2885 int64_t BaseOffs = 0; 2886 bool HasBaseReg = false; 2887 int64_t Scale = 0; 2888 int64_t ScalableOffset = 0; 2889 AddrMode() = default; 2890 }; 2891 2892 /// Return true if the addressing mode represented by AM is legal for this 2893 /// target, for a load/store of the specified type. 2894 /// 2895 /// The type may be VoidTy, in which case only return true if the addressing 2896 /// mode is legal for a load/store of any legal type. TODO: Handle 2897 /// pre/postinc as well. 2898 /// 2899 /// If the address space cannot be determined, it will be -1. 2900 /// 2901 /// TODO: Remove default argument 2902 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 2903 Type *Ty, unsigned AddrSpace, 2904 Instruction *I = nullptr) const; 2905 2906 /// Returns true if the targets addressing mode can target thread local 2907 /// storage (TLS). addressingModeSupportsTLS(const GlobalValue &)2908 virtual bool addressingModeSupportsTLS(const GlobalValue &) const { 2909 return false; 2910 } 2911 2912 /// Return the prefered common base offset. getPreferredLargeGEPBaseOffset(int64_t MinOffset,int64_t MaxOffset)2913 virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, 2914 int64_t MaxOffset) const { 2915 return 0; 2916 } 2917 2918 /// Return true if the specified immediate is legal icmp immediate, that is 2919 /// the target has icmp instructions which can compare a register against the 2920 /// immediate without having to materialize the immediate into a register. isLegalICmpImmediate(int64_t)2921 virtual bool isLegalICmpImmediate(int64_t) const { 2922 return true; 2923 } 2924 2925 /// Return true if the specified immediate is legal add immediate, that is the 2926 /// target has add instructions which can add a register with the immediate 2927 /// without having to materialize the immediate into a register. isLegalAddImmediate(int64_t)2928 virtual bool isLegalAddImmediate(int64_t) const { 2929 return true; 2930 } 2931 2932 /// Return true if adding the specified scalable immediate is legal, that is 2933 /// the target has add instructions which can add a register with the 2934 /// immediate (multiplied by vscale) without having to materialize the 2935 /// immediate into a register. isLegalAddScalableImmediate(int64_t)2936 virtual bool isLegalAddScalableImmediate(int64_t) const { return false; } 2937 2938 /// Return true if the specified immediate is legal for the value input of a 2939 /// store instruction. isLegalStoreImmediate(int64_t Value)2940 virtual bool isLegalStoreImmediate(int64_t Value) const { 2941 // Default implementation assumes that at least 0 works since it is likely 2942 // that a zero register exists or a zero immediate is allowed. 2943 return Value == 0; 2944 } 2945 2946 /// Given a shuffle vector SVI representing a vector splat, return a new 2947 /// scalar type of size equal to SVI's scalar type if the new type is more 2948 /// profitable. Returns nullptr otherwise. For example under MVE float splats 2949 /// are converted to integer to prevent the need to move from SPR to GPR 2950 /// registers. shouldConvertSplatType(ShuffleVectorInst * SVI)2951 virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const { 2952 return nullptr; 2953 } 2954 2955 /// Given a set in interconnected phis of type 'From' that are loaded/stored 2956 /// or bitcast to type 'To', return true if the set should be converted to 2957 /// 'To'. shouldConvertPhiType(Type * From,Type * To)2958 virtual bool shouldConvertPhiType(Type *From, Type *To) const { 2959 return (From->isIntegerTy() || From->isFloatingPointTy()) && 2960 (To->isIntegerTy() || To->isFloatingPointTy()); 2961 } 2962 2963 /// Returns true if the opcode is a commutative binary operation. isCommutativeBinOp(unsigned Opcode)2964 virtual bool isCommutativeBinOp(unsigned Opcode) const { 2965 // FIXME: This should get its info from the td file. 2966 switch (Opcode) { 2967 case ISD::ADD: 2968 case ISD::SMIN: 2969 case ISD::SMAX: 2970 case ISD::UMIN: 2971 case ISD::UMAX: 2972 case ISD::MUL: 2973 case ISD::MULHU: 2974 case ISD::MULHS: 2975 case ISD::SMUL_LOHI: 2976 case ISD::UMUL_LOHI: 2977 case ISD::FADD: 2978 case ISD::FMUL: 2979 case ISD::AND: 2980 case ISD::OR: 2981 case ISD::XOR: 2982 case ISD::SADDO: 2983 case ISD::UADDO: 2984 case ISD::ADDC: 2985 case ISD::ADDE: 2986 case ISD::SADDSAT: 2987 case ISD::UADDSAT: 2988 case ISD::FMINNUM: 2989 case ISD::FMAXNUM: 2990 case ISD::FMINNUM_IEEE: 2991 case ISD::FMAXNUM_IEEE: 2992 case ISD::FMINIMUM: 2993 case ISD::FMAXIMUM: 2994 case ISD::FMINIMUMNUM: 2995 case ISD::FMAXIMUMNUM: 2996 case ISD::AVGFLOORS: 2997 case ISD::AVGFLOORU: 2998 case ISD::AVGCEILS: 2999 case ISD::AVGCEILU: 3000 case ISD::ABDS: 3001 case ISD::ABDU: 3002 return true; 3003 default: return false; 3004 } 3005 } 3006 3007 /// Return true if the node is a math/logic binary operator. isBinOp(unsigned Opcode)3008 virtual bool isBinOp(unsigned Opcode) const { 3009 // A commutative binop must be a binop. 3010 if (isCommutativeBinOp(Opcode)) 3011 return true; 3012 // These are non-commutative binops. 3013 switch (Opcode) { 3014 case ISD::SUB: 3015 case ISD::SHL: 3016 case ISD::SRL: 3017 case ISD::SRA: 3018 case ISD::ROTL: 3019 case ISD::ROTR: 3020 case ISD::SDIV: 3021 case ISD::UDIV: 3022 case ISD::SREM: 3023 case ISD::UREM: 3024 case ISD::SSUBSAT: 3025 case ISD::USUBSAT: 3026 case ISD::FSUB: 3027 case ISD::FDIV: 3028 case ISD::FREM: 3029 return true; 3030 default: 3031 return false; 3032 } 3033 } 3034 3035 /// Return true if it's free to truncate a value of type FromTy to type 3036 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 3037 /// by referencing its sub-register AX. 3038 /// Targets must return false when FromTy <= ToTy. isTruncateFree(Type * FromTy,Type * ToTy)3039 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const { 3040 return false; 3041 } 3042 3043 /// Return true if a truncation from FromTy to ToTy is permitted when deciding 3044 /// whether a call is in tail position. Typically this means that both results 3045 /// would be assigned to the same register or stack slot, but it could mean 3046 /// the target performs adequate checks of its own before proceeding with the 3047 /// tail call. Targets must return false when FromTy <= ToTy. allowTruncateForTailCall(Type * FromTy,Type * ToTy)3048 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const { 3049 return false; 3050 } 3051 isTruncateFree(EVT FromVT,EVT ToVT)3052 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; } isTruncateFree(LLT FromTy,LLT ToTy,LLVMContext & Ctx)3053 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const { 3054 return isTruncateFree(getApproximateEVTForLLT(FromTy, Ctx), 3055 getApproximateEVTForLLT(ToTy, Ctx)); 3056 } 3057 3058 /// Return true if truncating the specific node Val to type VT2 is free. isTruncateFree(SDValue Val,EVT VT2)3059 virtual bool isTruncateFree(SDValue Val, EVT VT2) const { 3060 // Fallback to type matching. 3061 return isTruncateFree(Val.getValueType(), VT2); 3062 } 3063 isProfitableToHoist(Instruction * I)3064 virtual bool isProfitableToHoist(Instruction *I) const { return true; } 3065 3066 /// Return true if the extension represented by \p I is free. 3067 /// Unlikely the is[Z|FP]ExtFree family which is based on types, 3068 /// this method can use the context provided by \p I to decide 3069 /// whether or not \p I is free. 3070 /// This method extends the behavior of the is[Z|FP]ExtFree family. 3071 /// In other words, if is[Z|FP]Free returns true, then this method 3072 /// returns true as well. The converse is not true. 3073 /// The target can perform the adequate checks by overriding isExtFreeImpl. 3074 /// \pre \p I must be a sign, zero, or fp extension. isExtFree(const Instruction * I)3075 bool isExtFree(const Instruction *I) const { 3076 switch (I->getOpcode()) { 3077 case Instruction::FPExt: 3078 if (isFPExtFree(EVT::getEVT(I->getType()), 3079 EVT::getEVT(I->getOperand(0)->getType()))) 3080 return true; 3081 break; 3082 case Instruction::ZExt: 3083 if (isZExtFree(I->getOperand(0)->getType(), I->getType())) 3084 return true; 3085 break; 3086 case Instruction::SExt: 3087 break; 3088 default: 3089 llvm_unreachable("Instruction is not an extension"); 3090 } 3091 return isExtFreeImpl(I); 3092 } 3093 3094 /// Return true if \p Load and \p Ext can form an ExtLoad. 3095 /// For example, in AArch64 3096 /// %L = load i8, i8* %ptr 3097 /// %E = zext i8 %L to i32 3098 /// can be lowered into one load instruction 3099 /// ldrb w0, [x0] isExtLoad(const LoadInst * Load,const Instruction * Ext,const DataLayout & DL)3100 bool isExtLoad(const LoadInst *Load, const Instruction *Ext, 3101 const DataLayout &DL) const { 3102 EVT VT = getValueType(DL, Ext->getType()); 3103 EVT LoadVT = getValueType(DL, Load->getType()); 3104 3105 // If the load has other users and the truncate is not free, the ext 3106 // probably isn't free. 3107 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) && 3108 !isTruncateFree(Ext->getType(), Load->getType())) 3109 return false; 3110 3111 // Check whether the target supports casts folded into loads. 3112 unsigned LType; 3113 if (isa<ZExtInst>(Ext)) 3114 LType = ISD::ZEXTLOAD; 3115 else { 3116 assert(isa<SExtInst>(Ext) && "Unexpected ext type!"); 3117 LType = ISD::SEXTLOAD; 3118 } 3119 3120 return isLoadExtLegal(LType, VT, LoadVT); 3121 } 3122 3123 /// Return true if any actual instruction that defines a value of type FromTy 3124 /// implicitly zero-extends the value to ToTy in the result register. 3125 /// 3126 /// The function should return true when it is likely that the truncate can 3127 /// be freely folded with an instruction defining a value of FromTy. If 3128 /// the defining instruction is unknown (because you're looking at a 3129 /// function argument, PHI, etc.) then the target may require an 3130 /// explicit truncate, which is not necessarily free, but this function 3131 /// does not deal with those cases. 3132 /// Targets must return false when FromTy >= ToTy. isZExtFree(Type * FromTy,Type * ToTy)3133 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const { 3134 return false; 3135 } 3136 isZExtFree(EVT FromTy,EVT ToTy)3137 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; } isZExtFree(LLT FromTy,LLT ToTy,LLVMContext & Ctx)3138 virtual bool isZExtFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const { 3139 return isZExtFree(getApproximateEVTForLLT(FromTy, Ctx), 3140 getApproximateEVTForLLT(ToTy, Ctx)); 3141 } 3142 3143 /// Return true if zero-extending the specific node Val to type VT2 is free 3144 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or 3145 /// because it's folded such as X86 zero-extending loads). isZExtFree(SDValue Val,EVT VT2)3146 virtual bool isZExtFree(SDValue Val, EVT VT2) const { 3147 return isZExtFree(Val.getValueType(), VT2); 3148 } 3149 3150 /// Return true if sign-extension from FromTy to ToTy is cheaper than 3151 /// zero-extension. isSExtCheaperThanZExt(EVT FromTy,EVT ToTy)3152 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const { 3153 return false; 3154 } 3155 3156 /// Return true if this constant should be sign extended when promoting to 3157 /// a larger type. signExtendConstant(const ConstantInt * C)3158 virtual bool signExtendConstant(const ConstantInt *C) const { return false; } 3159 3160 /// Try to optimize extending or truncating conversion instructions (like 3161 /// zext, trunc, fptoui, uitofp) for the target. 3162 virtual bool optimizeExtendOrTruncateConversion(Instruction * I,Loop * L,const TargetTransformInfo & TTI)3163 optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, 3164 const TargetTransformInfo &TTI) const { 3165 return false; 3166 } 3167 3168 /// Return true if the target supplies and combines to a paired load 3169 /// two loaded values of type LoadedType next to each other in memory. 3170 /// RequiredAlignment gives the minimal alignment constraints that must be met 3171 /// to be able to select this paired load. 3172 /// 3173 /// This information is *not* used to generate actual paired loads, but it is 3174 /// used to generate a sequence of loads that is easier to combine into a 3175 /// paired load. 3176 /// For instance, something like this: 3177 /// a = load i64* addr 3178 /// b = trunc i64 a to i32 3179 /// c = lshr i64 a, 32 3180 /// d = trunc i64 c to i32 3181 /// will be optimized into: 3182 /// b = load i32* addr1 3183 /// d = load i32* addr2 3184 /// Where addr1 = addr2 +/- sizeof(i32). 3185 /// 3186 /// In other words, unless the target performs a post-isel load combining, 3187 /// this information should not be provided because it will generate more 3188 /// loads. hasPairedLoad(EVT,Align &)3189 virtual bool hasPairedLoad(EVT /*LoadedType*/, 3190 Align & /*RequiredAlignment*/) const { 3191 return false; 3192 } 3193 3194 /// Return true if the target has a vector blend instruction. hasVectorBlend()3195 virtual bool hasVectorBlend() const { return false; } 3196 3197 /// Get the maximum supported factor for interleaved memory accesses. 3198 /// Default to be the minimum interleave factor: 2. getMaxSupportedInterleaveFactor()3199 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; } 3200 3201 /// Lower an interleaved load to target specific intrinsics. Return 3202 /// true on success. 3203 /// 3204 /// \p LI is the vector load instruction. 3205 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector. 3206 /// \p Indices is the corresponding indices for each shufflevector. 3207 /// \p Factor is the interleave factor. lowerInterleavedLoad(LoadInst * LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned> Indices,unsigned Factor)3208 virtual bool lowerInterleavedLoad(LoadInst *LI, 3209 ArrayRef<ShuffleVectorInst *> Shuffles, 3210 ArrayRef<unsigned> Indices, 3211 unsigned Factor) const { 3212 return false; 3213 } 3214 3215 /// Lower an interleaved store to target specific intrinsics. Return 3216 /// true on success. 3217 /// 3218 /// \p SI is the vector store instruction. 3219 /// \p SVI is the shufflevector to RE-interleave the stored vector. 3220 /// \p Factor is the interleave factor. lowerInterleavedStore(StoreInst * SI,ShuffleVectorInst * SVI,unsigned Factor)3221 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 3222 unsigned Factor) const { 3223 return false; 3224 } 3225 3226 /// Lower an interleaved load to target specific intrinsics. Return 3227 /// true on success. 3228 /// 3229 /// \p Load is a vp.load instruction. 3230 /// \p Mask is a mask value 3231 /// \p DeinterleaveRes is a list of deinterleaved results. lowerInterleavedVPLoad(VPIntrinsic * Load,Value * Mask,ArrayRef<Value * > DeinterleaveRes)3232 virtual bool lowerInterleavedVPLoad(VPIntrinsic *Load, Value *Mask, 3233 ArrayRef<Value *> DeinterleaveRes) const { 3234 return false; 3235 } 3236 3237 /// Lower an interleaved store to target specific intrinsics. Return 3238 /// true on success. 3239 /// 3240 /// \p Store is the vp.store instruction. 3241 /// \p Mask is a mask value 3242 /// \p InterleaveOps is a list of values being interleaved. lowerInterleavedVPStore(VPIntrinsic * Store,Value * Mask,ArrayRef<Value * > InterleaveOps)3243 virtual bool lowerInterleavedVPStore(VPIntrinsic *Store, Value *Mask, 3244 ArrayRef<Value *> InterleaveOps) const { 3245 return false; 3246 } 3247 3248 /// Lower a deinterleave intrinsic to a target specific load intrinsic. 3249 /// Return true on success. Currently only supports 3250 /// llvm.vector.deinterleave{2,3,5,7} 3251 /// 3252 /// \p LI is the accompanying load instruction. 3253 /// \p DeinterleaveValues contains the deinterleaved values. 3254 virtual bool lowerDeinterleaveIntrinsicToLoad(LoadInst * LI,ArrayRef<Value * > DeinterleaveValues)3255 lowerDeinterleaveIntrinsicToLoad(LoadInst *LI, 3256 ArrayRef<Value *> DeinterleaveValues) const { 3257 return false; 3258 } 3259 3260 /// Lower an interleave intrinsic to a target specific store intrinsic. 3261 /// Return true on success. Currently only supports 3262 /// llvm.vector.interleave{2,3,5,7} 3263 /// 3264 /// \p SI is the accompanying store instruction 3265 /// \p InterleaveValues contains the interleaved values. 3266 virtual bool lowerInterleaveIntrinsicToStore(StoreInst * SI,ArrayRef<Value * > InterleaveValues)3267 lowerInterleaveIntrinsicToStore(StoreInst *SI, 3268 ArrayRef<Value *> InterleaveValues) const { 3269 return false; 3270 } 3271 3272 /// Return true if an fpext operation is free (for instance, because 3273 /// single-precision floating-point numbers are implicitly extended to 3274 /// double-precision). isFPExtFree(EVT DestVT,EVT SrcVT)3275 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const { 3276 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && 3277 "invalid fpext types"); 3278 return false; 3279 } 3280 3281 /// Return true if an fpext operation input to an \p Opcode operation is free 3282 /// (for instance, because half-precision floating-point numbers are 3283 /// implicitly extended to float-precision) for an FMA instruction. isFPExtFoldable(const MachineInstr & MI,unsigned Opcode,LLT DestTy,LLT SrcTy)3284 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, 3285 LLT DestTy, LLT SrcTy) const { 3286 return false; 3287 } 3288 3289 /// Return true if an fpext operation input to an \p Opcode operation is free 3290 /// (for instance, because half-precision floating-point numbers are 3291 /// implicitly extended to float-precision) for an FMA instruction. isFPExtFoldable(const SelectionDAG & DAG,unsigned Opcode,EVT DestVT,EVT SrcVT)3292 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, 3293 EVT DestVT, EVT SrcVT) const { 3294 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 3295 "invalid fpext types"); 3296 return isFPExtFree(DestVT, SrcVT); 3297 } 3298 3299 /// Return true if folding a vector load into ExtVal (a sign, zero, or any 3300 /// extend node) is profitable. isVectorLoadExtDesirable(SDValue ExtVal)3301 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; } 3302 3303 /// Return true if an fneg operation is free to the point where it is never 3304 /// worthwhile to replace it with a bitwise operation. isFNegFree(EVT VT)3305 virtual bool isFNegFree(EVT VT) const { 3306 assert(VT.isFloatingPoint()); 3307 return false; 3308 } 3309 3310 /// Return true if an fabs operation is free to the point where it is never 3311 /// worthwhile to replace it with a bitwise operation. isFAbsFree(EVT VT)3312 virtual bool isFAbsFree(EVT VT) const { 3313 assert(VT.isFloatingPoint()); 3314 return false; 3315 } 3316 3317 /// Return true if an FMA operation is faster than a pair of fmul and fadd 3318 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 3319 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 3320 /// 3321 /// NOTE: This may be called before legalization on types for which FMAs are 3322 /// not legal, but should return true if those types will eventually legalize 3323 /// to types that support FMAs. After legalization, it will only be called on 3324 /// types that support FMAs (via Legal or Custom actions) 3325 /// 3326 /// Targets that care about soft float support should return false when soft 3327 /// float code is being generated (i.e. use-soft-float). isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT)3328 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3329 EVT) const { 3330 return false; 3331 } 3332 3333 /// Return true if an FMA operation is faster than a pair of fmul and fadd 3334 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 3335 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 3336 /// 3337 /// NOTE: This may be called before legalization on types for which FMAs are 3338 /// not legal, but should return true if those types will eventually legalize 3339 /// to types that support FMAs. After legalization, it will only be called on 3340 /// types that support FMAs (via Legal or Custom actions) isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,LLT)3341 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3342 LLT) const { 3343 return false; 3344 } 3345 3346 /// IR version isFMAFasterThanFMulAndFAdd(const Function & F,Type *)3347 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const { 3348 return false; 3349 } 3350 3351 /// Returns true if \p MI can be combined with another instruction to 3352 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD, 3353 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be 3354 /// distributed into an fadd/fsub. isFMADLegal(const MachineInstr & MI,LLT Ty)3355 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const { 3356 assert((MI.getOpcode() == TargetOpcode::G_FADD || 3357 MI.getOpcode() == TargetOpcode::G_FSUB || 3358 MI.getOpcode() == TargetOpcode::G_FMUL) && 3359 "unexpected node in FMAD forming combine"); 3360 switch (Ty.getScalarSizeInBits()) { 3361 case 16: 3362 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16); 3363 case 32: 3364 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32); 3365 case 64: 3366 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64); 3367 default: 3368 break; 3369 } 3370 3371 return false; 3372 } 3373 3374 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an 3375 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an 3376 /// fadd/fsub. isFMADLegal(const SelectionDAG & DAG,const SDNode * N)3377 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const { 3378 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || 3379 N->getOpcode() == ISD::FMUL) && 3380 "unexpected node in FMAD forming combine"); 3381 return isOperationLegal(ISD::FMAD, N->getValueType(0)); 3382 } 3383 3384 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather 3385 // than FMUL and ADD is delegated to the machine combiner. generateFMAsInMachineCombiner(EVT VT,CodeGenOptLevel OptLevel)3386 virtual bool generateFMAsInMachineCombiner(EVT VT, 3387 CodeGenOptLevel OptLevel) const { 3388 return false; 3389 } 3390 3391 /// Return true if it's profitable to narrow operations of type SrcVT to 3392 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from 3393 /// i32 to i16. isNarrowingProfitable(SDNode * N,EVT SrcVT,EVT DestVT)3394 virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const { 3395 return false; 3396 } 3397 3398 /// Return true if pulling a binary operation into a select with an identity 3399 /// constant is profitable. This is the inverse of an IR transform. 3400 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,EVT VT,unsigned SelectOpcode,SDValue X,SDValue Y)3401 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, 3402 unsigned SelectOpcode, 3403 SDValue X, 3404 SDValue Y) const { 3405 return false; 3406 } 3407 3408 /// Return true if it is beneficial to convert a load of a constant to 3409 /// just the constant itself. 3410 /// On some targets it might be more efficient to use a combination of 3411 /// arithmetic instructions to materialize the constant instead of loading it 3412 /// from a constant pool. shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)3413 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 3414 Type *Ty) const { 3415 return false; 3416 } 3417 3418 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type 3419 /// from this source type with this index. This is needed because 3420 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of 3421 /// the first element, and only the target knows which lowering is cheap. isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index)3422 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 3423 unsigned Index) const { 3424 return false; 3425 } 3426 3427 /// Try to convert an extract element of a vector binary operation into an 3428 /// extract element followed by a scalar operation. shouldScalarizeBinop(SDValue VecOp)3429 virtual bool shouldScalarizeBinop(SDValue VecOp) const { 3430 return false; 3431 } 3432 3433 /// Return true if extraction of a scalar element from the given vector type 3434 /// at the given index is cheap. For example, if scalar operations occur on 3435 /// the same register file as vector operations, then an extract element may 3436 /// be a sub-register rename rather than an actual instruction. isExtractVecEltCheap(EVT VT,unsigned Index)3437 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const { 3438 return false; 3439 } 3440 3441 /// Try to convert math with an overflow comparison into the corresponding DAG 3442 /// node operation. Targets may want to override this independently of whether 3443 /// the operation is legal/custom for the given type because it may obscure 3444 /// matching of other patterns. shouldFormOverflowOp(unsigned Opcode,EVT VT,bool MathUsed)3445 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 3446 bool MathUsed) const { 3447 // TODO: The default logic is inherited from code in CodeGenPrepare. 3448 // The opcode should not make a difference by default? 3449 if (Opcode != ISD::UADDO) 3450 return false; 3451 3452 // Allow the transform as long as we have an integer type that is not 3453 // obviously illegal and unsupported and if the math result is used 3454 // besides the overflow check. On some targets (e.g. SPARC), it is 3455 // not profitable to form on overflow op if the math result has no 3456 // concrete users. 3457 if (VT.isVector()) 3458 return false; 3459 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT)); 3460 } 3461 3462 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR 3463 // even if the vector itself has multiple uses. aggressivelyPreferBuildVectorSources(EVT VecVT)3464 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const { 3465 return false; 3466 } 3467 3468 // Return true if CodeGenPrepare should consider splitting large offset of a 3469 // GEP to make the GEP fit into the addressing mode and can be sunk into the 3470 // same blocks of its users. shouldConsiderGEPOffsetSplit()3471 virtual bool shouldConsiderGEPOffsetSplit() const { return false; } 3472 3473 /// Return true if creating a shift of the type by the given 3474 /// amount is not profitable. shouldAvoidTransformToShift(EVT VT,unsigned Amount)3475 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const { 3476 return false; 3477 } 3478 3479 // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x)) 3480 // A) where y has a single bit set? shouldFoldSelectWithSingleBitTest(EVT VT,const APInt & AndMask)3481 virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, 3482 const APInt &AndMask) const { 3483 unsigned ShCt = AndMask.getBitWidth() - 1; 3484 return !shouldAvoidTransformToShift(VT, ShCt); 3485 } 3486 3487 /// Does this target require the clearing of high-order bits in a register 3488 /// passed to the fp16 to fp conversion library function. shouldKeepZExtForFP16Conv()3489 virtual bool shouldKeepZExtForFP16Conv() const { return false; } 3490 3491 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT 3492 /// from min(max(fptoi)) saturation patterns. shouldConvertFpToSat(unsigned Op,EVT FPVT,EVT VT)3493 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const { 3494 return isOperationLegalOrCustom(Op, VT); 3495 } 3496 3497 /// Should we expand [US]CMP nodes using two selects and two compares, or by 3498 /// doing arithmetic on boolean types shouldExpandCmpUsingSelects(EVT VT)3499 virtual bool shouldExpandCmpUsingSelects(EVT VT) const { return false; } 3500 3501 /// True if target has some particular form of dealing with pointer arithmetic 3502 /// semantics for pointers with the given value type. False if pointer 3503 /// arithmetic should not be preserved for passes such as instruction 3504 /// selection, and can fallback to regular arithmetic. 3505 /// This should be removed when PTRADD nodes are widely supported by backends. shouldPreservePtrArith(const Function & F,EVT PtrVT)3506 virtual bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const { 3507 return false; 3508 } 3509 3510 /// Does this target support complex deinterleaving isComplexDeinterleavingSupported()3511 virtual bool isComplexDeinterleavingSupported() const { return false; } 3512 3513 /// Does this target support complex deinterleaving with the given operation 3514 /// and type isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation,Type * Ty)3515 virtual bool isComplexDeinterleavingOperationSupported( 3516 ComplexDeinterleavingOperation Operation, Type *Ty) const { 3517 return false; 3518 } 3519 3520 // Get the preferred opcode for FP_TO_XINT nodes. 3521 // By default, this checks if the provded operation is an illegal FP_TO_UINT 3522 // and if so, checks if FP_TO_SINT is legal or custom for use as a 3523 // replacement. If both UINT and SINT conversions are Custom, we choose SINT 3524 // by default because that's the right thing on PPC. getPreferredFPToIntOpcode(unsigned Op,EVT FromVT,EVT ToVT)3525 virtual unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, 3526 EVT ToVT) const { 3527 if (isOperationLegal(Op, ToVT)) 3528 return Op; 3529 switch (Op) { 3530 case ISD::FP_TO_UINT: 3531 if (isOperationLegalOrCustom(ISD::FP_TO_SINT, ToVT)) 3532 return ISD::FP_TO_SINT; 3533 break; 3534 case ISD::STRICT_FP_TO_UINT: 3535 if (isOperationLegalOrCustom(ISD::STRICT_FP_TO_SINT, ToVT)) 3536 return ISD::STRICT_FP_TO_SINT; 3537 break; 3538 case ISD::VP_FP_TO_UINT: 3539 if (isOperationLegalOrCustom(ISD::VP_FP_TO_SINT, ToVT)) 3540 return ISD::VP_FP_TO_SINT; 3541 break; 3542 default: 3543 break; 3544 } 3545 return Op; 3546 } 3547 3548 /// Create the IR node for the given complex deinterleaving operation. 3549 /// If one cannot be created using all the given inputs, nullptr should be 3550 /// returned. 3551 virtual Value *createComplexDeinterleavingIR( 3552 IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, 3553 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, 3554 Value *Accumulator = nullptr) const { 3555 return nullptr; 3556 } 3557 setLibcallImpl(RTLIB::Libcall Call,RTLIB::LibcallImpl Impl)3558 void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl) { 3559 Libcalls.setLibcallImpl(Call, Impl); 3560 } 3561 3562 /// Get the libcall impl routine name for the specified libcall. getLibcallImpl(RTLIB::Libcall Call)3563 RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const { 3564 return Libcalls.getLibcallImpl(Call); 3565 } 3566 3567 /// Get the libcall routine name for the specified libcall. getLibcallName(RTLIB::Libcall Call)3568 const char *getLibcallName(RTLIB::Libcall Call) const { 3569 return Libcalls.getLibcallName(Call); 3570 } 3571 getMemcpyName()3572 const char *getMemcpyName() const { return Libcalls.getMemcpyName(); } 3573 3574 /// Get the comparison predicate that's to be used to test the result of the 3575 /// comparison libcall against zero. This should only be used with 3576 /// floating-point compare libcalls. 3577 ISD::CondCode getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Call) const; 3578 3579 /// Set the CallingConv that should be used for the specified libcall. setLibcallImplCallingConv(RTLIB::LibcallImpl Call,CallingConv::ID CC)3580 void setLibcallImplCallingConv(RTLIB::LibcallImpl Call, CallingConv::ID CC) { 3581 Libcalls.setLibcallImplCallingConv(Call, CC); 3582 } 3583 3584 /// Get the CallingConv that should be used for the specified libcall 3585 /// implementation. getLibcallImplCallingConv(RTLIB::LibcallImpl Call)3586 CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const { 3587 return Libcalls.getLibcallImplCallingConv(Call); 3588 } 3589 3590 /// Get the CallingConv that should be used for the specified libcall. 3591 // FIXME: Remove this wrapper and directly use the used LibcallImpl getLibcallCallingConv(RTLIB::Libcall Call)3592 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 3593 return Libcalls.getLibcallCallingConv(Call); 3594 } 3595 3596 /// Execute target specific actions to finalize target lowering. 3597 /// This is used to set extra flags in MachineFrameInformation and freezing 3598 /// the set of reserved registers. 3599 /// The default implementation just freezes the set of reserved registers. 3600 virtual void finalizeLowering(MachineFunction &MF) const; 3601 3602 /// Returns true if it's profitable to allow merging store of loads when there 3603 /// are functions calls between the load and the store. shouldMergeStoreOfLoadsOverCall(EVT,EVT)3604 virtual bool shouldMergeStoreOfLoadsOverCall(EVT, EVT) const { return true; } 3605 3606 //===----------------------------------------------------------------------===// 3607 // GlobalISel Hooks 3608 //===----------------------------------------------------------------------===// 3609 /// Check whether or not \p MI needs to be moved close to its uses. 3610 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const; 3611 3612 3613 private: 3614 const TargetMachine &TM; 3615 3616 /// Tells the code generator that the target has multiple (allocatable) 3617 /// condition registers that can be used to store the results of comparisons 3618 /// for use by selects and conditional branches. With multiple condition 3619 /// registers, the code generator will not aggressively sink comparisons into 3620 /// the blocks of their users. 3621 bool HasMultipleConditionRegisters; 3622 3623 /// Tells the code generator that the target has BitExtract instructions. 3624 /// The code generator will aggressively sink "shift"s into the blocks of 3625 /// their users if the users will generate "and" instructions which can be 3626 /// combined with "shift" to BitExtract instructions. 3627 bool HasExtractBitsInsn; 3628 3629 /// Tells the code generator to bypass slow divide or remainder 3630 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code 3631 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer 3632 /// div/rem when the operands are positive and less than 256. 3633 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 3634 3635 /// Tells the code generator that it shouldn't generate extra flow control 3636 /// instructions and should attempt to combine flow control instructions via 3637 /// predication. 3638 bool JumpIsExpensive; 3639 3640 /// Information about the contents of the high-bits in boolean values held in 3641 /// a type wider than i1. See getBooleanContents. 3642 BooleanContent BooleanContents; 3643 3644 /// Information about the contents of the high-bits in boolean values held in 3645 /// a type wider than i1. See getBooleanContents. 3646 BooleanContent BooleanFloatContents; 3647 3648 /// Information about the contents of the high-bits in boolean vector values 3649 /// when the element type is wider than i1. See getBooleanContents. 3650 BooleanContent BooleanVectorContents; 3651 3652 /// The target scheduling preference: shortest possible total cycles or lowest 3653 /// register usage. 3654 Sched::Preference SchedPreferenceInfo; 3655 3656 /// The minimum alignment that any argument on the stack needs to have. 3657 Align MinStackArgumentAlignment; 3658 3659 /// The minimum function alignment (used when optimizing for size, and to 3660 /// prevent explicitly provided alignment from leading to incorrect code). 3661 Align MinFunctionAlignment; 3662 3663 /// The preferred function alignment (used when alignment unspecified and 3664 /// optimizing for speed). 3665 Align PrefFunctionAlignment; 3666 3667 /// The preferred loop alignment (in log2 bot in bytes). 3668 Align PrefLoopAlignment; 3669 /// The maximum amount of bytes permitted to be emitted for alignment. 3670 unsigned MaxBytesForAlignment; 3671 3672 /// Size in bits of the maximum atomics size the backend supports. 3673 /// Accesses larger than this will be expanded by AtomicExpandPass. 3674 unsigned MaxAtomicSizeInBitsSupported; 3675 3676 /// Size in bits of the maximum div/rem size the backend supports. 3677 /// Larger operations will be expanded by ExpandLargeDivRem. 3678 unsigned MaxDivRemBitWidthSupported; 3679 3680 /// Size in bits of the maximum fp to/from int conversion size the 3681 /// backend supports. Larger operations will be expanded by 3682 /// ExpandFp. 3683 unsigned MaxLargeFPConvertBitWidthSupported; 3684 3685 /// Size in bits of the minimum cmpxchg or ll/sc operation the 3686 /// backend supports. 3687 unsigned MinCmpXchgSizeInBits; 3688 3689 /// This indicates if the target supports unaligned atomic operations. 3690 bool SupportsUnalignedAtomics; 3691 3692 /// If set to a physical register, this specifies the register that 3693 /// llvm.savestack/llvm.restorestack should save and restore. 3694 Register StackPointerRegisterToSaveRestore; 3695 3696 /// This indicates the default register class to use for each ValueType the 3697 /// target supports natively. 3698 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE]; 3699 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE]; 3700 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE]; 3701 3702 /// This indicates the "representative" register class to use for each 3703 /// ValueType the target supports natively. This information is used by the 3704 /// scheduler to track register pressure. By default, the representative 3705 /// register class is the largest legal super-reg register class of the 3706 /// register class of the specified type. e.g. On x86, i8, i16, and i32's 3707 /// representative class would be GR32. 3708 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0}; 3709 3710 /// This indicates the "cost" of the "representative" register class for each 3711 /// ValueType. The cost is used by the scheduler to approximate register 3712 /// pressure. 3713 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE]; 3714 3715 /// For any value types we are promoting or expanding, this contains the value 3716 /// type that we are changing to. For Expanded types, this contains one step 3717 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required 3718 /// (e.g. i64 -> i16). For types natively supported by the system, this holds 3719 /// the same type (e.g. i32 -> i32). 3720 MVT TransformToType[MVT::VALUETYPE_SIZE]; 3721 3722 /// For each operation and each value type, keep a LegalizeAction that 3723 /// indicates how instruction selection should deal with the operation. Most 3724 /// operations are Legal (aka, supported natively by the target), but 3725 /// operations that are not should be described. Note that operations on 3726 /// non-legal value types are not described here. 3727 LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END]; 3728 3729 /// For each load extension type and each value type, keep a LegalizeAction 3730 /// that indicates how instruction selection should deal with a load of a 3731 /// specific value type and extension type. Uses 4-bits to store the action 3732 /// for each of the 4 load ext types. 3733 uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3734 3735 /// Similar to LoadExtActions, but for atomic loads. Only Legal or Expand 3736 /// (default) values are supported. 3737 uint16_t AtomicLoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3738 3739 /// For each value type pair keep a LegalizeAction that indicates whether a 3740 /// truncating store of a specific value type and truncating type is legal. 3741 LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3742 3743 /// For each indexed mode and each value type, keep a quad of LegalizeAction 3744 /// that indicates how instruction selection should deal with the load / 3745 /// store / maskedload / maskedstore. 3746 /// 3747 /// The first dimension is the value_type for the reference. The second 3748 /// dimension represents the various modes for load store. 3749 uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE]; 3750 3751 /// For each condition code (ISD::CondCode) keep a LegalizeAction that 3752 /// indicates how instruction selection should deal with the condition code. 3753 /// 3754 /// Because each CC action takes up 4 bits, we need to have the array size be 3755 /// large enough to fit all of the value types. This can be done by rounding 3756 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8. 3757 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8]; 3758 3759 using PartialReduceActionTypes = 3760 std::tuple<unsigned, MVT::SimpleValueType, MVT::SimpleValueType>; 3761 /// For each partial reduce opcode, result type and input type combination, 3762 /// keep a LegalizeAction which indicates how instruction selection should 3763 /// deal with this operation. 3764 DenseMap<PartialReduceActionTypes, LegalizeAction> PartialReduceMLAActions; 3765 3766 ValueTypeActionImpl ValueTypeActions; 3767 3768 private: 3769 /// Targets can specify ISD nodes that they would like PerformDAGCombine 3770 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this 3771 /// array. 3772 unsigned char 3773 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 3774 3775 /// For operations that must be promoted to a specific type, this holds the 3776 /// destination type. This map should be sparse, so don't hold it as an 3777 /// array. 3778 /// 3779 /// Targets add entries to this map with AddPromotedToType(..), clients access 3780 /// this with getTypeToPromoteTo(..). 3781 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 3782 PromoteToType; 3783 3784 /// The list of libcalls that the target will use. 3785 RTLIB::RuntimeLibcallsInfo Libcalls; 3786 3787 /// The ISD::CondCode that should be used to test the result of each of the 3788 /// comparison libcall against zero. 3789 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 3790 3791 /// The bits of IndexedModeActions used to store the legalisation actions 3792 /// We store the data as | ML | MS | L | S | each taking 4 bits. 3793 enum IndexedModeActionsBits { 3794 IMAB_Store = 0, 3795 IMAB_Load = 4, 3796 IMAB_MaskedStore = 8, 3797 IMAB_MaskedLoad = 12 3798 }; 3799 setIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift,LegalizeAction Action)3800 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift, 3801 LegalizeAction Action) { 3802 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && 3803 (unsigned)Action < 0xf && "Table isn't big enough!"); 3804 unsigned Ty = (unsigned)VT.SimpleTy; 3805 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift); 3806 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift; 3807 } 3808 getIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift)3809 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT, 3810 unsigned Shift) const { 3811 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && 3812 "Table isn't big enough!"); 3813 unsigned Ty = (unsigned)VT.SimpleTy; 3814 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf); 3815 } 3816 3817 protected: 3818 /// Return true if the extension represented by \p I is free. 3819 /// \pre \p I is a sign, zero, or fp extension and 3820 /// is[Z|FP]ExtFree of the related types is not true. isExtFreeImpl(const Instruction * I)3821 virtual bool isExtFreeImpl(const Instruction *I) const { return false; } 3822 3823 /// Depth that GatherAllAliases should continue looking for chain 3824 /// dependencies when trying to find a more preferable chain. As an 3825 /// approximation, this should be more than the number of consecutive stores 3826 /// expected to be merged. 3827 unsigned GatherAllAliasesMaxDepth; 3828 3829 /// \brief Specify maximum number of store instructions per memset call. 3830 /// 3831 /// When lowering \@llvm.memset this field specifies the maximum number of 3832 /// store operations that may be substituted for the call to memset. Targets 3833 /// must set this value based on the cost threshold for that target. Targets 3834 /// should assume that the memset will be done using as many of the largest 3835 /// store operations first, followed by smaller ones, if necessary, per 3836 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 3837 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 3838 /// store. This only applies to setting a constant array of a constant size. 3839 unsigned MaxStoresPerMemset; 3840 /// Likewise for functions with the OptSize attribute. 3841 unsigned MaxStoresPerMemsetOptSize; 3842 3843 /// \brief Specify maximum number of store instructions per memcpy call. 3844 /// 3845 /// When lowering \@llvm.memcpy this field specifies the maximum number of 3846 /// store operations that may be substituted for a call to memcpy. Targets 3847 /// must set this value based on the cost threshold for that target. Targets 3848 /// should assume that the memcpy will be done using as many of the largest 3849 /// store operations first, followed by smaller ones, if necessary, per 3850 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 3851 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 3852 /// and one 1-byte store. This only applies to copying a constant array of 3853 /// constant size. 3854 unsigned MaxStoresPerMemcpy; 3855 /// Likewise for functions with the OptSize attribute. 3856 unsigned MaxStoresPerMemcpyOptSize; 3857 /// \brief Specify max number of store instructions to glue in inlined memcpy. 3858 /// 3859 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number 3860 /// of store instructions to keep together. This helps in pairing and 3861 // vectorization later on. 3862 unsigned MaxGluedStoresPerMemcpy = 0; 3863 3864 /// \brief Specify maximum number of load instructions per memcmp call. 3865 /// 3866 /// When lowering \@llvm.memcmp this field specifies the maximum number of 3867 /// pairs of load operations that may be substituted for a call to memcmp. 3868 /// Targets must set this value based on the cost threshold for that target. 3869 /// Targets should assume that the memcmp will be done using as many of the 3870 /// largest load operations first, followed by smaller ones, if necessary, per 3871 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine 3872 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load 3873 /// and one 1-byte load. This only applies to copying a constant array of 3874 /// constant size. 3875 unsigned MaxLoadsPerMemcmp; 3876 /// Likewise for functions with the OptSize attribute. 3877 unsigned MaxLoadsPerMemcmpOptSize; 3878 3879 /// \brief Specify maximum number of store instructions per memmove call. 3880 /// 3881 /// When lowering \@llvm.memmove this field specifies the maximum number of 3882 /// store instructions that may be substituted for a call to memmove. Targets 3883 /// must set this value based on the cost threshold for that target. Targets 3884 /// should assume that the memmove will be done using as many of the largest 3885 /// store operations first, followed by smaller ones, if necessary, per 3886 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 3887 /// with 8-bit alignment would result in nine 1-byte stores. This only 3888 /// applies to copying a constant array of constant size. 3889 unsigned MaxStoresPerMemmove; 3890 /// Likewise for functions with the OptSize attribute. 3891 unsigned MaxStoresPerMemmoveOptSize; 3892 3893 /// Tells the code generator that select is more expensive than a branch if 3894 /// the branch is usually predicted right. 3895 bool PredictableSelectIsExpensive; 3896 3897 /// \see enableExtLdPromotion. 3898 bool EnableExtLdPromotion; 3899 3900 /// Return true if the value types that can be represented by the specified 3901 /// register class are all legal. 3902 bool isLegalRC(const TargetRegisterInfo &TRI, 3903 const TargetRegisterClass &RC) const; 3904 3905 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 3906 /// sequence of memory operands that is recognized by PrologEpilogInserter. 3907 MachineBasicBlock *emitPatchPoint(MachineInstr &MI, 3908 MachineBasicBlock *MBB) const; 3909 3910 bool IsStrictFPEnabled; 3911 }; 3912 3913 /// This class defines information used to lower LLVM code to legal SelectionDAG 3914 /// operators that the target instruction selector can accept natively. 3915 /// 3916 /// This class also defines callbacks that targets must implement to lower 3917 /// target-specific constructs to SelectionDAG operators. 3918 class LLVM_ABI TargetLowering : public TargetLoweringBase { 3919 public: 3920 struct DAGCombinerInfo; 3921 struct MakeLibCallOptions; 3922 3923 TargetLowering(const TargetLowering &) = delete; 3924 TargetLowering &operator=(const TargetLowering &) = delete; 3925 3926 explicit TargetLowering(const TargetMachine &TM); 3927 ~TargetLowering() override; 3928 3929 bool isPositionIndependent() const; 3930 isSDNodeSourceOfDivergence(const SDNode * N,FunctionLoweringInfo * FLI,UniformityInfo * UA)3931 virtual bool isSDNodeSourceOfDivergence(const SDNode *N, 3932 FunctionLoweringInfo *FLI, 3933 UniformityInfo *UA) const { 3934 return false; 3935 } 3936 3937 // Lets target to control the following reassociation of operands: (op (op x, 3938 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By 3939 // default consider profitable any case where N0 has single use. This 3940 // behavior reflects the condition replaced by this target hook call in the 3941 // DAGCombiner. Any particular target can implement its own heuristic to 3942 // restrict common combiner. isReassocProfitable(SelectionDAG & DAG,SDValue N0,SDValue N1)3943 virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, 3944 SDValue N1) const { 3945 return N0.hasOneUse(); 3946 } 3947 3948 // Lets target to control the following reassociation of operands: (op (op x, 3949 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By 3950 // default consider profitable any case where N0 has single use. This 3951 // behavior reflects the condition replaced by this target hook call in the 3952 // combiner. Any particular target can implement its own heuristic to 3953 // restrict common combiner. isReassocProfitable(MachineRegisterInfo & MRI,Register N0,Register N1)3954 virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, 3955 Register N1) const { 3956 return MRI.hasOneNonDBGUse(N0); 3957 } 3958 isSDNodeAlwaysUniform(const SDNode * N)3959 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const { 3960 return false; 3961 } 3962 3963 /// Returns true by value, base pointer and offset pointer and addressing mode 3964 /// by reference if the node's address can be legally represented as 3965 /// pre-indexed load / store address. getPreIndexedAddressParts(SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3966 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 3967 SDValue &/*Offset*/, 3968 ISD::MemIndexedMode &/*AM*/, 3969 SelectionDAG &/*DAG*/) const { 3970 return false; 3971 } 3972 3973 /// Returns true by value, base pointer and offset pointer and addressing mode 3974 /// by reference if this node can be combined with a load / store to form a 3975 /// post-indexed load / store. getPostIndexedAddressParts(SDNode *,SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3976 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 3977 SDValue &/*Base*/, 3978 SDValue &/*Offset*/, 3979 ISD::MemIndexedMode &/*AM*/, 3980 SelectionDAG &/*DAG*/) const { 3981 return false; 3982 } 3983 3984 /// Returns true if the specified base+offset is a legal indexed addressing 3985 /// mode for this target. \p MI is the load or store instruction that is being 3986 /// considered for transformation. isIndexingLegal(MachineInstr & MI,Register Base,Register Offset,bool IsPre,MachineRegisterInfo & MRI)3987 virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, 3988 bool IsPre, MachineRegisterInfo &MRI) const { 3989 return false; 3990 } 3991 3992 /// Return the entry encoding for a jump table in the current function. The 3993 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 3994 virtual unsigned getJumpTableEncoding() const; 3995 getJumpTableRegTy(const DataLayout & DL)3996 virtual MVT getJumpTableRegTy(const DataLayout &DL) const { 3997 return getPointerTy(DL); 3998 } 3999 4000 virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *,const MachineBasicBlock *,unsigned,MCContext &)4001 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 4002 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 4003 MCContext &/*Ctx*/) const { 4004 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 4005 } 4006 4007 /// Returns relocation base for the given PIC jumptable. 4008 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 4009 SelectionDAG &DAG) const; 4010 4011 /// This returns the relocation base for the given PIC jumptable, the same as 4012 /// getPICJumpTableRelocBase, but as an MCExpr. 4013 virtual const MCExpr * 4014 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 4015 unsigned JTI, MCContext &Ctx) const; 4016 4017 /// Return true if folding a constant offset with the given GlobalAddress is 4018 /// legal. It is frequently not legal in PIC relocation models. 4019 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 4020 4021 /// On x86, return true if the operand with index OpNo is a CALL or JUMP 4022 /// instruction, which can use either a memory constraint or an address 4023 /// constraint. -fasm-blocks "__asm call foo" lowers to 4024 /// call void asm sideeffect inteldialect "call ${0:P}", "*m..." 4025 /// 4026 /// This function is used by a hack to choose the address constraint, 4027 /// lowering to a direct call. 4028 virtual bool isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo)4029 isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs, 4030 unsigned OpNo) const { 4031 return false; 4032 } 4033 4034 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 4035 SDValue &Chain) const; 4036 4037 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 4038 SDValue &NewRHS, ISD::CondCode &CCCode, 4039 const SDLoc &DL, const SDValue OldLHS, 4040 const SDValue OldRHS) const; 4041 4042 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 4043 SDValue &NewRHS, ISD::CondCode &CCCode, 4044 const SDLoc &DL, const SDValue OldLHS, 4045 const SDValue OldRHS, SDValue &Chain, 4046 bool IsSignaling = false) const; 4047 visitMaskedLoad(SelectionDAG & DAG,const SDLoc & DL,SDValue Chain,MachineMemOperand * MMO,SDValue & NewLoad,SDValue Ptr,SDValue PassThru,SDValue Mask)4048 virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, 4049 SDValue Chain, MachineMemOperand *MMO, 4050 SDValue &NewLoad, SDValue Ptr, 4051 SDValue PassThru, SDValue Mask) const { 4052 llvm_unreachable("Not Implemented"); 4053 } 4054 visitMaskedStore(SelectionDAG & DAG,const SDLoc & DL,SDValue Chain,MachineMemOperand * MMO,SDValue Ptr,SDValue Val,SDValue Mask)4055 virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, 4056 SDValue Chain, MachineMemOperand *MMO, 4057 SDValue Ptr, SDValue Val, 4058 SDValue Mask) const { 4059 llvm_unreachable("Not Implemented"); 4060 } 4061 4062 /// Returns a pair of (return value, chain). 4063 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC. 4064 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, 4065 EVT RetVT, ArrayRef<SDValue> Ops, 4066 MakeLibCallOptions CallOptions, 4067 const SDLoc &dl, 4068 SDValue Chain = SDValue()) const; 4069 4070 /// Check whether parameters to a call that are passed in callee saved 4071 /// registers are the same as from the calling function. This needs to be 4072 /// checked for tail call eligibility. 4073 bool parametersInCSRMatch(const MachineRegisterInfo &MRI, 4074 const uint32_t *CallerPreservedMask, 4075 const SmallVectorImpl<CCValAssign> &ArgLocs, 4076 const SmallVectorImpl<SDValue> &OutVals) const; 4077 4078 //===--------------------------------------------------------------------===// 4079 // TargetLowering Optimization Methods 4080 // 4081 4082 /// A convenience struct that encapsulates a DAG, and two SDValues for 4083 /// returning information from TargetLowering to its clients that want to 4084 /// combine. 4085 struct TargetLoweringOpt { 4086 SelectionDAG &DAG; 4087 bool LegalTys; 4088 bool LegalOps; 4089 SDValue Old; 4090 SDValue New; 4091 TargetLoweringOptTargetLoweringOpt4092 explicit TargetLoweringOpt(SelectionDAG &InDAG, 4093 bool LT, bool LO) : 4094 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 4095 LegalTypesTargetLoweringOpt4096 bool LegalTypes() const { return LegalTys; } LegalOperationsTargetLoweringOpt4097 bool LegalOperations() const { return LegalOps; } 4098 CombineToTargetLoweringOpt4099 bool CombineTo(SDValue O, SDValue N) { 4100 Old = O; 4101 New = N; 4102 return true; 4103 } 4104 }; 4105 4106 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4107 /// Return true if the number of memory ops is below the threshold (Limit). 4108 /// Note that this is always the case when Limit is ~0. 4109 /// It returns the types of the sequence of memory ops to perform 4110 /// memset / memcpy by reference. 4111 virtual bool 4112 findOptimalMemOpLowering(LLVMContext &Context, std::vector<EVT> &MemOps, 4113 unsigned Limit, const MemOp &Op, unsigned DstAS, 4114 unsigned SrcAS, 4115 const AttributeList &FuncAttributes) const; 4116 4117 /// Check to see if the specified operand of the specified instruction is a 4118 /// constant integer. If so, check to see if there are any bits set in the 4119 /// constant that are not demanded. If so, shrink the constant and return 4120 /// true. 4121 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 4122 const APInt &DemandedElts, 4123 TargetLoweringOpt &TLO) const; 4124 4125 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements. 4126 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 4127 TargetLoweringOpt &TLO) const; 4128 4129 // Target hook to do target-specific const optimization, which is called by 4130 // ShrinkDemandedConstant. This function should return true if the target 4131 // doesn't want ShrinkDemandedConstant to further optimize the constant. targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO)4132 virtual bool targetShrinkDemandedConstant(SDValue Op, 4133 const APInt &DemandedBits, 4134 const APInt &DemandedElts, 4135 TargetLoweringOpt &TLO) const { 4136 return false; 4137 } 4138 4139 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 4140 /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast, 4141 /// but it could be generalized for targets with other types of implicit 4142 /// widening casts. 4143 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 4144 const APInt &DemandedBits, 4145 TargetLoweringOpt &TLO) const; 4146 4147 /// Look at Op. At this point, we know that only the DemandedBits bits of the 4148 /// result of Op are ever used downstream. If we can use this information to 4149 /// simplify Op, create a new simplified DAG node and return true, returning 4150 /// the original and new nodes in Old and New. Otherwise, analyze the 4151 /// expression and return a mask of KnownOne and KnownZero bits for the 4152 /// expression (used to simplify the caller). The KnownZero/One bits may only 4153 /// be accurate for those bits in the Demanded masks. 4154 /// \p AssumeSingleUse When this parameter is true, this function will 4155 /// attempt to simplify \p Op even if there are multiple uses. 4156 /// Callers are responsible for correctly updating the DAG based on the 4157 /// results of this function, because simply replacing TLO.Old 4158 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 4159 /// has multiple uses. 4160 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 4161 const APInt &DemandedElts, KnownBits &Known, 4162 TargetLoweringOpt &TLO, unsigned Depth = 0, 4163 bool AssumeSingleUse = false) const; 4164 4165 /// Helper wrapper around SimplifyDemandedBits, demanding all elements. 4166 /// Adds Op back to the worklist upon success. 4167 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 4168 KnownBits &Known, TargetLoweringOpt &TLO, 4169 unsigned Depth = 0, 4170 bool AssumeSingleUse = false) const; 4171 4172 /// Helper wrapper around SimplifyDemandedBits. 4173 /// Adds Op back to the worklist upon success. 4174 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 4175 DAGCombinerInfo &DCI) const; 4176 4177 /// Helper wrapper around SimplifyDemandedBits. 4178 /// Adds Op back to the worklist upon success. 4179 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 4180 const APInt &DemandedElts, 4181 DAGCombinerInfo &DCI) const; 4182 4183 /// More limited version of SimplifyDemandedBits that can be used to "look 4184 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 4185 /// bitwise ops etc. 4186 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 4187 const APInt &DemandedElts, 4188 SelectionDAG &DAG, 4189 unsigned Depth = 0) const; 4190 4191 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 4192 /// elements. 4193 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 4194 SelectionDAG &DAG, 4195 unsigned Depth = 0) const; 4196 4197 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 4198 /// bits from only some vector elements. 4199 SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, 4200 const APInt &DemandedElts, 4201 SelectionDAG &DAG, 4202 unsigned Depth = 0) const; 4203 4204 /// Look at Vector Op. At this point, we know that only the DemandedElts 4205 /// elements of the result of Op are ever used downstream. If we can use 4206 /// this information to simplify Op, create a new simplified DAG node and 4207 /// return true, storing the original and new nodes in TLO. 4208 /// Otherwise, analyze the expression and return a mask of KnownUndef and 4209 /// KnownZero elements for the expression (used to simplify the caller). 4210 /// The KnownUndef/Zero elements may only be accurate for those bits 4211 /// in the DemandedMask. 4212 /// \p AssumeSingleUse When this parameter is true, this function will 4213 /// attempt to simplify \p Op even if there are multiple uses. 4214 /// Callers are responsible for correctly updating the DAG based on the 4215 /// results of this function, because simply replacing TLO.Old 4216 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 4217 /// has multiple uses. 4218 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, 4219 APInt &KnownUndef, APInt &KnownZero, 4220 TargetLoweringOpt &TLO, unsigned Depth = 0, 4221 bool AssumeSingleUse = false) const; 4222 4223 /// Helper wrapper around SimplifyDemandedVectorElts. 4224 /// Adds Op back to the worklist upon success. 4225 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts, 4226 DAGCombinerInfo &DCI) const; 4227 4228 /// Return true if the target supports simplifying demanded vector elements by 4229 /// converting them to undefs. 4230 virtual bool shouldSimplifyDemandedVectorElts(SDValue Op,const TargetLoweringOpt & TLO)4231 shouldSimplifyDemandedVectorElts(SDValue Op, 4232 const TargetLoweringOpt &TLO) const { 4233 return true; 4234 } 4235 4236 /// Determine which of the bits specified in Mask are known to be either zero 4237 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 4238 /// argument allows us to only collect the known bits that are shared by the 4239 /// requested vector elements. 4240 virtual void computeKnownBitsForTargetNode(const SDValue Op, 4241 KnownBits &Known, 4242 const APInt &DemandedElts, 4243 const SelectionDAG &DAG, 4244 unsigned Depth = 0) const; 4245 4246 /// Determine which of the bits specified in Mask are known to be either zero 4247 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 4248 /// argument allows us to only collect the known bits that are shared by the 4249 /// requested vector elements. This is for GISel. 4250 virtual void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis, 4251 Register R, KnownBits &Known, 4252 const APInt &DemandedElts, 4253 const MachineRegisterInfo &MRI, 4254 unsigned Depth = 0) const; 4255 4256 virtual void computeKnownFPClassForTargetInstr(GISelValueTracking &Analysis, 4257 Register R, 4258 KnownFPClass &Known, 4259 const APInt &DemandedElts, 4260 const MachineRegisterInfo &MRI, 4261 unsigned Depth = 0) const; 4262 4263 /// Determine the known alignment for the pointer value \p R. This is can 4264 /// typically be inferred from the number of low known 0 bits. However, for a 4265 /// pointer with a non-integral address space, the alignment value may be 4266 /// independent from the known low bits. 4267 virtual Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis, 4268 Register R, 4269 const MachineRegisterInfo &MRI, 4270 unsigned Depth = 0) const; 4271 4272 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0. 4273 /// Default implementation computes low bits based on alignment 4274 /// information. This should preserve known bits passed into it. 4275 virtual void computeKnownBitsForFrameIndex(int FIOp, 4276 KnownBits &Known, 4277 const MachineFunction &MF) const; 4278 4279 /// This method can be implemented by targets that want to expose additional 4280 /// information about sign bits to the DAG Combiner. The DemandedElts 4281 /// argument allows us to only collect the minimum sign bits that are shared 4282 /// by the requested vector elements. 4283 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 4284 const APInt &DemandedElts, 4285 const SelectionDAG &DAG, 4286 unsigned Depth = 0) const; 4287 4288 /// This method can be implemented by targets that want to expose additional 4289 /// information about sign bits to GlobalISel combiners. The DemandedElts 4290 /// argument allows us to only collect the minimum sign bits that are shared 4291 /// by the requested vector elements. 4292 virtual unsigned computeNumSignBitsForTargetInstr( 4293 GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, 4294 const MachineRegisterInfo &MRI, unsigned Depth = 0) const; 4295 4296 /// Attempt to simplify any target nodes based on the demanded vector 4297 /// elements, returning true on success. Otherwise, analyze the expression and 4298 /// return a mask of KnownUndef and KnownZero elements for the expression 4299 /// (used to simplify the caller). The KnownUndef/Zero elements may only be 4300 /// accurate for those bits in the DemandedMask. 4301 virtual bool SimplifyDemandedVectorEltsForTargetNode( 4302 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, 4303 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const; 4304 4305 /// Attempt to simplify any target nodes based on the demanded bits/elts, 4306 /// returning true on success. Otherwise, analyze the 4307 /// expression and return a mask of KnownOne and KnownZero bits for the 4308 /// expression (used to simplify the caller). The KnownZero/One bits may only 4309 /// be accurate for those bits in the Demanded masks. 4310 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, 4311 const APInt &DemandedBits, 4312 const APInt &DemandedElts, 4313 KnownBits &Known, 4314 TargetLoweringOpt &TLO, 4315 unsigned Depth = 0) const; 4316 4317 /// More limited version of SimplifyDemandedBits that can be used to "look 4318 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 4319 /// bitwise ops etc. 4320 virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode( 4321 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 4322 SelectionDAG &DAG, unsigned Depth) const; 4323 4324 /// Return true if this function can prove that \p Op is never poison 4325 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts 4326 /// argument limits the check to the requested vector elements. 4327 virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( 4328 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4329 bool PoisonOnly, unsigned Depth) const; 4330 4331 /// Return true if Op can create undef or poison from non-undef & non-poison 4332 /// operands. The DemandedElts argument limits the check to the requested 4333 /// vector elements. 4334 virtual bool 4335 canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, 4336 const SelectionDAG &DAG, bool PoisonOnly, 4337 bool ConsiderFlags, unsigned Depth) const; 4338 4339 /// Tries to build a legal vector shuffle using the provided parameters 4340 /// or equivalent variations. The Mask argument maybe be modified as the 4341 /// function tries different variations. 4342 /// Returns an empty SDValue if the operation fails. 4343 SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 4344 SDValue N1, MutableArrayRef<int> Mask, 4345 SelectionDAG &DAG) const; 4346 4347 /// This method returns the constant pool value that will be loaded by LD. 4348 /// NOTE: You must check for implicit extensions of the constant by LD. 4349 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const; 4350 4351 /// If \p SNaN is false, \returns true if \p Op is known to never be any 4352 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling 4353 /// NaN. 4354 virtual bool isKnownNeverNaNForTargetNode(SDValue Op, 4355 const APInt &DemandedElts, 4356 const SelectionDAG &DAG, 4357 bool SNaN = false, 4358 unsigned Depth = 0) const; 4359 4360 /// Return true if vector \p Op has the same value across all \p DemandedElts, 4361 /// indicating any elements which may be undef in the output \p UndefElts. 4362 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, 4363 APInt &UndefElts, 4364 const SelectionDAG &DAG, 4365 unsigned Depth = 0) const; 4366 4367 /// Returns true if the given Opc is considered a canonical constant for the 4368 /// target, which should not be transformed back into a BUILD_VECTOR. isTargetCanonicalConstantNode(SDValue Op)4369 virtual bool isTargetCanonicalConstantNode(SDValue Op) const { 4370 return Op.getOpcode() == ISD::SPLAT_VECTOR || 4371 Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS; 4372 } 4373 4374 /// Return true if the given select/vselect should be considered canonical and 4375 /// not be transformed. Currently only used for "vselect (not Cond), N1, N2 -> 4376 /// vselect Cond, N2, N1". isTargetCanonicalSelect(SDNode * N)4377 virtual bool isTargetCanonicalSelect(SDNode *N) const { return false; } 4378 4379 struct DAGCombinerInfo { 4380 void *DC; // The DAG Combiner object. 4381 CombineLevel Level; 4382 bool CalledByLegalizer; 4383 4384 public: 4385 SelectionDAG &DAG; 4386 DAGCombinerInfoDAGCombinerInfo4387 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc) 4388 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {} 4389 isBeforeLegalizeDAGCombinerInfo4390 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } isBeforeLegalizeOpsDAGCombinerInfo4391 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } isAfterLegalizeDAGDAGCombinerInfo4392 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; } getDAGCombineLevelDAGCombinerInfo4393 CombineLevel getDAGCombineLevel() { return Level; } isCalledByLegalizerDAGCombinerInfo4394 bool isCalledByLegalizer() const { return CalledByLegalizer; } 4395 4396 LLVM_ABI void AddToWorklist(SDNode *N); 4397 LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, 4398 bool AddTo = true); 4399 LLVM_ABI SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 4400 LLVM_ABI SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, 4401 bool AddTo = true); 4402 4403 LLVM_ABI bool recursivelyDeleteUnusedNodes(SDNode *N); 4404 4405 LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 4406 }; 4407 4408 /// Return if the N is a constant or constant vector equal to the true value 4409 /// from getBooleanContents(). 4410 bool isConstTrueVal(SDValue N) const; 4411 4412 /// Return if the N is a constant or constant vector equal to the false value 4413 /// from getBooleanContents(). 4414 bool isConstFalseVal(SDValue N) const; 4415 4416 /// Return if \p N is a True value when extended to \p VT. 4417 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const; 4418 4419 /// Try to simplify a setcc built with the specified operands and cc. If it is 4420 /// unable to simplify it, return a null SDValue. 4421 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 4422 bool foldBooleans, DAGCombinerInfo &DCI, 4423 const SDLoc &dl) const; 4424 4425 // For targets which wrap address, unwrap for analysis. unwrapAddress(SDValue N)4426 virtual SDValue unwrapAddress(SDValue N) const { return N; } 4427 4428 /// Returns true (and the GlobalValue and the offset) if the node is a 4429 /// GlobalAddress + offset. 4430 virtual bool 4431 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 4432 4433 /// This method will be invoked for all target nodes and for any 4434 /// target-independent nodes that the target has registered with invoke it 4435 /// for. 4436 /// 4437 /// The semantics are as follows: 4438 /// Return Value: 4439 /// SDValue.Val == 0 - No change was made 4440 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 4441 /// otherwise - N should be replaced by the returned Operand. 4442 /// 4443 /// In addition, methods provided by DAGCombinerInfo may be used to perform 4444 /// more complex transformations. 4445 /// 4446 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 4447 4448 /// Return true if it is profitable to move this shift by a constant amount 4449 /// through its operand, adjusting any immediate operands as necessary to 4450 /// preserve semantics. This transformation may not be desirable if it 4451 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield 4452 /// extraction in AArch64). By default, it returns true. 4453 /// 4454 /// @param N the shift node 4455 /// @param Level the current DAGCombine legalization level. isDesirableToCommuteWithShift(const SDNode * N,CombineLevel Level)4456 virtual bool isDesirableToCommuteWithShift(const SDNode *N, 4457 CombineLevel Level) const { 4458 SDValue ShiftLHS = N->getOperand(0); 4459 if (!ShiftLHS->hasOneUse()) 4460 return false; 4461 if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND && 4462 !ShiftLHS.getOperand(0)->hasOneUse()) 4463 return false; 4464 return true; 4465 } 4466 4467 /// GlobalISel - return true if it is profitable to move this shift by a 4468 /// constant amount through its operand, adjusting any immediate operands as 4469 /// necessary to preserve semantics. This transformation may not be desirable 4470 /// if it disrupts a particularly auspicious target-specific tree (e.g. 4471 /// bitfield extraction in AArch64). By default, it returns true. 4472 /// 4473 /// @param MI the shift instruction 4474 /// @param IsAfterLegal true if running after legalization. isDesirableToCommuteWithShift(const MachineInstr & MI,bool IsAfterLegal)4475 virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI, 4476 bool IsAfterLegal) const { 4477 return true; 4478 } 4479 4480 /// GlobalISel - return true if it's profitable to perform the combine: 4481 /// shl ([sza]ext x), y => zext (shl x, y) isDesirableToPullExtFromShl(const MachineInstr & MI)4482 virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const { 4483 return true; 4484 } 4485 4486 // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and 4487 // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of 4488 // writing this) is: 4489 // With C as a power of 2 and C != 0 and C != INT_MIN: 4490 // AddAnd: 4491 // (icmp eq A, C) | (icmp eq A, -C) 4492 // -> (icmp eq and(add(A, C), ~(C + C)), 0) 4493 // (icmp ne A, C) & (icmp ne A, -C)w 4494 // -> (icmp ne and(add(A, C), ~(C + C)), 0) 4495 // ABS: 4496 // (icmp eq A, C) | (icmp eq A, -C) 4497 // -> (icmp eq Abs(A), C) 4498 // (icmp ne A, C) & (icmp ne A, -C)w 4499 // -> (icmp ne Abs(A), C) 4500 // 4501 // @param LogicOp the logic op 4502 // @param SETCC0 the first of the SETCC nodes 4503 // @param SETCC0 the second of the SETCC nodes isDesirableToCombineLogicOpOfSETCC(const SDNode * LogicOp,const SDNode * SETCC0,const SDNode * SETCC1)4504 virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC( 4505 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const { 4506 return AndOrSETCCFoldKind::None; 4507 } 4508 4509 /// Return true if it is profitable to combine an XOR of a logical shift 4510 /// to create a logical shift of NOT. This transformation may not be desirable 4511 /// if it disrupts a particularly auspicious target-specific tree (e.g. 4512 /// BIC on ARM/AArch64). By default, it returns true. isDesirableToCommuteXorWithShift(const SDNode * N)4513 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const { 4514 return true; 4515 } 4516 4517 /// Return true if the target has native support for the specified value type 4518 /// and it is 'desirable' to use the type for the given node type. e.g. On x86 4519 /// i16 is legal, but undesirable since i16 instruction encodings are longer 4520 /// and some i16 instructions are slow. isTypeDesirableForOp(unsigned,EVT VT)4521 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 4522 // By default, assume all legal types are desirable. 4523 return isTypeLegal(VT); 4524 } 4525 4526 /// Return true if it is profitable for dag combiner to transform a floating 4527 /// point op of specified opcode to a equivalent op of an integer 4528 /// type. e.g. f32 load -> i32 load can be profitable on ARM. isDesirableToTransformToIntegerOp(unsigned,EVT)4529 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 4530 EVT /*VT*/) const { 4531 return false; 4532 } 4533 4534 /// This method query the target whether it is beneficial for dag combiner to 4535 /// promote the specified node. If true, it should return the desired 4536 /// promotion type by reference. IsDesirableToPromoteOp(SDValue,EVT &)4537 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 4538 return false; 4539 } 4540 4541 /// Return true if the target supports swifterror attribute. It optimizes 4542 /// loads and stores to reading and writing a specific register. supportSwiftError()4543 virtual bool supportSwiftError() const { 4544 return false; 4545 } 4546 4547 /// Return true if the target supports that a subset of CSRs for the given 4548 /// machine function is handled explicitly via copies. supportSplitCSR(MachineFunction * MF)4549 virtual bool supportSplitCSR(MachineFunction *MF) const { 4550 return false; 4551 } 4552 4553 /// Return true if the target supports kcfi operand bundles. supportKCFIBundles()4554 virtual bool supportKCFIBundles() const { return false; } 4555 4556 /// Return true if the target supports ptrauth operand bundles. supportPtrAuthBundles()4557 virtual bool supportPtrAuthBundles() const { return false; } 4558 4559 /// Perform necessary initialization to handle a subset of CSRs explicitly 4560 /// via copies. This function is called at the beginning of instruction 4561 /// selection. initializeSplitCSR(MachineBasicBlock * Entry)4562 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const { 4563 llvm_unreachable("Not Implemented"); 4564 } 4565 4566 /// Insert explicit copies in entry and exit blocks. We copy a subset of 4567 /// CSRs to virtual registers in the entry block, and copy them back to 4568 /// physical registers in the exit blocks. This function is called at the end 4569 /// of instruction selection. insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits)4570 virtual void insertCopiesSplitCSR( 4571 MachineBasicBlock *Entry, 4572 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 4573 llvm_unreachable("Not Implemented"); 4574 } 4575 4576 /// Return the newly negated expression if the cost is not expensive and 4577 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to 4578 /// do the negation. 4579 virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, 4580 bool LegalOps, bool OptForSize, 4581 NegatibleCost &Cost, 4582 unsigned Depth = 0) const; 4583 4584 SDValue getCheaperOrNeutralNegatedExpression( 4585 SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, 4586 const NegatibleCost CostThreshold = NegatibleCost::Neutral, 4587 unsigned Depth = 0) const { 4588 NegatibleCost Cost = NegatibleCost::Expensive; 4589 SDValue Neg = 4590 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 4591 if (!Neg) 4592 return SDValue(); 4593 4594 if (Cost <= CostThreshold) 4595 return Neg; 4596 4597 // Remove the new created node to avoid the side effect to the DAG. 4598 if (Neg->use_empty()) 4599 DAG.RemoveDeadNode(Neg.getNode()); 4600 return SDValue(); 4601 } 4602 4603 /// This is the helper function to return the newly negated expression only 4604 /// when the cost is cheaper. 4605 SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, 4606 bool LegalOps, bool OptForSize, 4607 unsigned Depth = 0) const { 4608 return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize, 4609 NegatibleCost::Cheaper, Depth); 4610 } 4611 4612 /// This is the helper function to return the newly negated expression if 4613 /// the cost is not expensive. 4614 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, 4615 bool OptForSize, unsigned Depth = 0) const { 4616 NegatibleCost Cost = NegatibleCost::Expensive; 4617 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 4618 } 4619 4620 //===--------------------------------------------------------------------===// 4621 // Lowering methods - These methods must be implemented by targets so that 4622 // the SelectionDAGBuilder code knows how to lower these. 4623 // 4624 4625 /// Target-specific splitting of values into parts that fit a register 4626 /// storing a legal type splitValueIntoRegisterParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,std::optional<CallingConv::ID> CC)4627 virtual bool splitValueIntoRegisterParts( 4628 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 4629 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { 4630 return false; 4631 } 4632 4633 /// Allows the target to handle physreg-carried dependency 4634 /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether 4635 /// to add the edge to the dependency graph. 4636 /// Def - input: Selection DAG node defininfg physical register 4637 /// User - input: Selection DAG node using physical register 4638 /// Op - input: Number of User operand 4639 /// PhysReg - inout: set to the physical register if the edge is 4640 /// necessary, unchanged otherwise 4641 /// Cost - inout: physical register copy cost. 4642 /// Returns 'true' is the edge is necessary, 'false' otherwise checkForPhysRegDependency(SDNode * Def,SDNode * User,unsigned Op,const TargetRegisterInfo * TRI,const TargetInstrInfo * TII,MCRegister & PhysReg,int & Cost)4643 virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, 4644 const TargetRegisterInfo *TRI, 4645 const TargetInstrInfo *TII, 4646 MCRegister &PhysReg, int &Cost) const { 4647 return false; 4648 } 4649 4650 /// Target-specific combining of register parts into its original value 4651 virtual SDValue joinRegisterPartsIntoValue(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,std::optional<CallingConv::ID> CC)4652 joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, 4653 const SDValue *Parts, unsigned NumParts, 4654 MVT PartVT, EVT ValueVT, 4655 std::optional<CallingConv::ID> CC) const { 4656 return SDValue(); 4657 } 4658 4659 /// This hook must be implemented to lower the incoming (formal) arguments, 4660 /// described by the Ins array, into the specified DAG. The implementation 4661 /// should fill in the InVals array with legal-type argument values, and 4662 /// return the resulting token chain value. LowerFormalArguments(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::InputArg> &,const SDLoc &,SelectionDAG &,SmallVectorImpl<SDValue> &)4663 virtual SDValue LowerFormalArguments( 4664 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/, 4665 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/, 4666 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const { 4667 llvm_unreachable("Not Implemented"); 4668 } 4669 markLibCallAttributes(MachineFunction * MF,unsigned CC,ArgListTy & Args)4670 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, 4671 ArgListTy &Args) const {} 4672 4673 /// This structure contains the information necessary for lowering 4674 /// pointer-authenticating indirect calls. It is equivalent to the "ptrauth" 4675 /// operand bundle found on the call instruction, if any. 4676 struct PtrAuthInfo { 4677 uint64_t Key; 4678 SDValue Discriminator; 4679 }; 4680 4681 /// This structure contains all information that is necessary for lowering 4682 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder 4683 /// needs to lower a call, and targets will see this struct in their LowerCall 4684 /// implementation. 4685 struct CallLoweringInfo { 4686 SDValue Chain; 4687 Type *RetTy = nullptr; 4688 bool RetSExt : 1; 4689 bool RetZExt : 1; 4690 bool IsVarArg : 1; 4691 bool IsInReg : 1; 4692 bool DoesNotReturn : 1; 4693 bool IsReturnValueUsed : 1; 4694 bool IsConvergent : 1; 4695 bool IsPatchPoint : 1; 4696 bool IsPreallocated : 1; 4697 bool NoMerge : 1; 4698 4699 // IsTailCall should be modified by implementations of 4700 // TargetLowering::LowerCall that perform tail call conversions. 4701 bool IsTailCall = false; 4702 4703 // Is Call lowering done post SelectionDAG type legalization. 4704 bool IsPostTypeLegalization = false; 4705 4706 unsigned NumFixedArgs = -1; 4707 CallingConv::ID CallConv = CallingConv::C; 4708 SDValue Callee; 4709 ArgListTy Args; 4710 SelectionDAG &DAG; 4711 SDLoc DL; 4712 const CallBase *CB = nullptr; 4713 SmallVector<ISD::OutputArg, 32> Outs; 4714 SmallVector<SDValue, 32> OutVals; 4715 SmallVector<ISD::InputArg, 32> Ins; 4716 SmallVector<SDValue, 4> InVals; 4717 const ConstantInt *CFIType = nullptr; 4718 SDValue ConvergenceControlToken; 4719 4720 std::optional<PtrAuthInfo> PAI; 4721 CallLoweringInfoCallLoweringInfo4722 CallLoweringInfo(SelectionDAG &DAG) 4723 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false), 4724 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false), 4725 IsPatchPoint(false), IsPreallocated(false), NoMerge(false), 4726 DAG(DAG) {} 4727 setDebugLocCallLoweringInfo4728 CallLoweringInfo &setDebugLoc(const SDLoc &dl) { 4729 DL = dl; 4730 return *this; 4731 } 4732 setChainCallLoweringInfo4733 CallLoweringInfo &setChain(SDValue InChain) { 4734 Chain = InChain; 4735 return *this; 4736 } 4737 4738 // setCallee with target/module-specific attributes setLibCalleeCallLoweringInfo4739 CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType, 4740 SDValue Target, ArgListTy &&ArgsList) { 4741 RetTy = ResultType; 4742 Callee = Target; 4743 CallConv = CC; 4744 NumFixedArgs = ArgsList.size(); 4745 Args = std::move(ArgsList); 4746 4747 DAG.getTargetLoweringInfo().markLibCallAttributes( 4748 &(DAG.getMachineFunction()), CC, Args); 4749 return *this; 4750 } 4751 4752 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType, 4753 SDValue Target, ArgListTy &&ArgsList, 4754 AttributeSet ResultAttrs = {}) { 4755 RetTy = ResultType; 4756 IsInReg = ResultAttrs.hasAttribute(Attribute::InReg); 4757 RetSExt = ResultAttrs.hasAttribute(Attribute::SExt); 4758 RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt); 4759 NoMerge = ResultAttrs.hasAttribute(Attribute::NoMerge); 4760 4761 Callee = Target; 4762 CallConv = CC; 4763 NumFixedArgs = ArgsList.size(); 4764 Args = std::move(ArgsList); 4765 return *this; 4766 } 4767 setCalleeCallLoweringInfo4768 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy, 4769 SDValue Target, ArgListTy &&ArgsList, 4770 const CallBase &Call) { 4771 RetTy = ResultType; 4772 4773 IsInReg = Call.hasRetAttr(Attribute::InReg); 4774 DoesNotReturn = 4775 Call.doesNotReturn() || 4776 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode())); 4777 IsVarArg = FTy->isVarArg(); 4778 IsReturnValueUsed = !Call.use_empty(); 4779 RetSExt = Call.hasRetAttr(Attribute::SExt); 4780 RetZExt = Call.hasRetAttr(Attribute::ZExt); 4781 NoMerge = Call.hasFnAttr(Attribute::NoMerge); 4782 4783 Callee = Target; 4784 4785 CallConv = Call.getCallingConv(); 4786 NumFixedArgs = FTy->getNumParams(); 4787 Args = std::move(ArgsList); 4788 4789 CB = &Call; 4790 4791 return *this; 4792 } 4793 4794 CallLoweringInfo &setInRegister(bool Value = true) { 4795 IsInReg = Value; 4796 return *this; 4797 } 4798 4799 CallLoweringInfo &setNoReturn(bool Value = true) { 4800 DoesNotReturn = Value; 4801 return *this; 4802 } 4803 4804 CallLoweringInfo &setVarArg(bool Value = true) { 4805 IsVarArg = Value; 4806 return *this; 4807 } 4808 4809 CallLoweringInfo &setTailCall(bool Value = true) { 4810 IsTailCall = Value; 4811 return *this; 4812 } 4813 4814 CallLoweringInfo &setDiscardResult(bool Value = true) { 4815 IsReturnValueUsed = !Value; 4816 return *this; 4817 } 4818 4819 CallLoweringInfo &setConvergent(bool Value = true) { 4820 IsConvergent = Value; 4821 return *this; 4822 } 4823 4824 CallLoweringInfo &setSExtResult(bool Value = true) { 4825 RetSExt = Value; 4826 return *this; 4827 } 4828 4829 CallLoweringInfo &setZExtResult(bool Value = true) { 4830 RetZExt = Value; 4831 return *this; 4832 } 4833 4834 CallLoweringInfo &setIsPatchPoint(bool Value = true) { 4835 IsPatchPoint = Value; 4836 return *this; 4837 } 4838 4839 CallLoweringInfo &setIsPreallocated(bool Value = true) { 4840 IsPreallocated = Value; 4841 return *this; 4842 } 4843 setPtrAuthCallLoweringInfo4844 CallLoweringInfo &setPtrAuth(PtrAuthInfo Value) { 4845 PAI = Value; 4846 return *this; 4847 } 4848 4849 CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) { 4850 IsPostTypeLegalization = Value; 4851 return *this; 4852 } 4853 setCFITypeCallLoweringInfo4854 CallLoweringInfo &setCFIType(const ConstantInt *Type) { 4855 CFIType = Type; 4856 return *this; 4857 } 4858 setConvergenceControlTokenCallLoweringInfo4859 CallLoweringInfo &setConvergenceControlToken(SDValue Token) { 4860 ConvergenceControlToken = Token; 4861 return *this; 4862 } 4863 getArgsCallLoweringInfo4864 ArgListTy &getArgs() { 4865 return Args; 4866 } 4867 }; 4868 4869 /// This structure is used to pass arguments to makeLibCall function. 4870 struct MakeLibCallOptions { 4871 // By passing type list before soften to makeLibCall, the target hook 4872 // shouldExtendTypeInLibCall can get the original type before soften. 4873 ArrayRef<EVT> OpsVTBeforeSoften; 4874 EVT RetVTBeforeSoften; 4875 ArrayRef<Type *> OpsTypeOverrides; 4876 4877 bool IsSigned : 1; 4878 bool DoesNotReturn : 1; 4879 bool IsReturnValueUsed : 1; 4880 bool IsPostTypeLegalization : 1; 4881 bool IsSoften : 1; 4882 MakeLibCallOptionsMakeLibCallOptions4883 MakeLibCallOptions() 4884 : IsSigned(false), DoesNotReturn(false), IsReturnValueUsed(true), 4885 IsPostTypeLegalization(false), IsSoften(false) {} 4886 4887 MakeLibCallOptions &setIsSigned(bool Value = true) { 4888 IsSigned = Value; 4889 return *this; 4890 } 4891 4892 MakeLibCallOptions &setNoReturn(bool Value = true) { 4893 DoesNotReturn = Value; 4894 return *this; 4895 } 4896 4897 MakeLibCallOptions &setDiscardResult(bool Value = true) { 4898 IsReturnValueUsed = !Value; 4899 return *this; 4900 } 4901 4902 MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) { 4903 IsPostTypeLegalization = Value; 4904 return *this; 4905 } 4906 4907 MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT, 4908 bool Value = true) { 4909 OpsVTBeforeSoften = OpsVT; 4910 RetVTBeforeSoften = RetVT; 4911 IsSoften = Value; 4912 return *this; 4913 } 4914 4915 /// Override the argument type for an operand. Leave the type as null to use 4916 /// the type from the operand's node. setOpsTypeOverridesMakeLibCallOptions4917 MakeLibCallOptions &setOpsTypeOverrides(ArrayRef<Type *> OpsTypes) { 4918 OpsTypeOverrides = OpsTypes; 4919 return *this; 4920 } 4921 }; 4922 4923 /// This function lowers an abstract call to a function into an actual call. 4924 /// This returns a pair of operands. The first element is the return value 4925 /// for the function (if RetTy is not VoidTy). The second element is the 4926 /// outgoing token chain. It calls LowerCall to do the actual lowering. 4927 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 4928 4929 /// This hook must be implemented to lower calls into the specified 4930 /// DAG. The outgoing arguments to the call are described by the Outs array, 4931 /// and the values to be returned by the call are described by the Ins 4932 /// array. The implementation should fill in the InVals array with legal-type 4933 /// return values from the call, and return the resulting token chain value. 4934 virtual SDValue LowerCall(CallLoweringInfo &,SmallVectorImpl<SDValue> &)4935 LowerCall(CallLoweringInfo &/*CLI*/, 4936 SmallVectorImpl<SDValue> &/*InVals*/) const { 4937 llvm_unreachable("Not Implemented"); 4938 } 4939 4940 /// Target-specific cleanup for formal ByVal parameters. HandleByVal(CCState *,unsigned &,Align)4941 virtual void HandleByVal(CCState *, unsigned &, Align) const {} 4942 4943 /// This hook should be implemented to check whether the return values 4944 /// described by the Outs array can fit into the return registers. If false 4945 /// is returned, an sret-demotion is performed. CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> &,LLVMContext &,const Type * RetTy)4946 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 4947 MachineFunction &/*MF*/, bool /*isVarArg*/, 4948 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 4949 LLVMContext &/*Context*/, const Type *RetTy) const 4950 { 4951 // Return true by default to get preexisting behavior. 4952 return true; 4953 } 4954 4955 /// This hook must be implemented to lower outgoing return values, described 4956 /// by the Outs array, into the specified DAG. The implementation should 4957 /// return the resulting token chain value. LowerReturn(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::OutputArg> &,const SmallVectorImpl<SDValue> &,const SDLoc &,SelectionDAG &)4958 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 4959 bool /*isVarArg*/, 4960 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/, 4961 const SmallVectorImpl<SDValue> & /*OutVals*/, 4962 const SDLoc & /*dl*/, 4963 SelectionDAG & /*DAG*/) const { 4964 llvm_unreachable("Not Implemented"); 4965 } 4966 4967 /// Return true if result of the specified node is used by a return node 4968 /// only. It also compute and return the input chain for the tail call. 4969 /// 4970 /// This is used to determine whether it is possible to codegen a libcall as 4971 /// tail call at legalization time. isUsedByReturnOnly(SDNode *,SDValue &)4972 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const { 4973 return false; 4974 } 4975 4976 /// Return true if the target may be able emit the call instruction as a tail 4977 /// call. This is used by optimization passes to determine if it's profitable 4978 /// to duplicate return instructions to enable tailcall optimization. mayBeEmittedAsTailCall(const CallInst *)4979 virtual bool mayBeEmittedAsTailCall(const CallInst *) const { 4980 return false; 4981 } 4982 4983 /// Return the register ID of the name passed in. Used by named register 4984 /// global variables extension. There is no target-independent behaviour 4985 /// so the default action is to bail. getRegisterByName(const char * RegName,LLT Ty,const MachineFunction & MF)4986 virtual Register getRegisterByName(const char* RegName, LLT Ty, 4987 const MachineFunction &MF) const { 4988 report_fatal_error("Named registers not implemented for this target"); 4989 } 4990 4991 /// Return the type that should be used to zero or sign extend a 4992 /// zeroext/signext integer return value. FIXME: Some C calling conventions 4993 /// require the return type to be promoted, but this is not true all the time, 4994 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling 4995 /// conventions. The frontend should handle this and include all of the 4996 /// necessary information. getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType)4997 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, 4998 ISD::NodeType /*ExtendKind*/) const { 4999 EVT MinVT = getRegisterType(MVT::i32); 5000 return VT.bitsLT(MinVT) ? MinVT : VT; 5001 } 5002 5003 /// For some targets, an LLVM struct type must be broken down into multiple 5004 /// simple types, but the calling convention specifies that the entire struct 5005 /// must be passed in a block of consecutive registers. 5006 virtual bool functionArgumentNeedsConsecutiveRegisters(Type * Ty,CallingConv::ID CallConv,bool isVarArg,const DataLayout & DL)5007 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, 5008 bool isVarArg, 5009 const DataLayout &DL) const { 5010 return false; 5011 } 5012 5013 /// For most targets, an LLVM type must be broken down into multiple 5014 /// smaller types. Usually the halves are ordered according to the endianness 5015 /// but for some platform that would break. So this method will default to 5016 /// matching the endianness but can be overridden. 5017 virtual bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout & DL)5018 shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const { 5019 return DL.isLittleEndian(); 5020 } 5021 5022 /// Returns a 0 terminated array of registers that can be safely used as 5023 /// scratch registers. getScratchRegisters(CallingConv::ID CC)5024 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const { 5025 return nullptr; 5026 } 5027 5028 /// Returns a 0 terminated array of rounding control registers that can be 5029 /// attached into strict FP call. getRoundingControlRegisters()5030 virtual ArrayRef<MCPhysReg> getRoundingControlRegisters() const { 5031 return ArrayRef<MCPhysReg>(); 5032 } 5033 5034 /// This callback is used to prepare for a volatile or atomic load. 5035 /// It takes a chain node as input and returns the chain for the load itself. 5036 /// 5037 /// Having a callback like this is necessary for targets like SystemZ, 5038 /// which allows a CPU to reuse the result of a previous load indefinitely, 5039 /// even if a cache-coherent store is performed by another CPU. The default 5040 /// implementation does nothing. prepareVolatileOrAtomicLoad(SDValue Chain,const SDLoc & DL,SelectionDAG & DAG)5041 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, 5042 SelectionDAG &DAG) const { 5043 return Chain; 5044 } 5045 5046 /// This callback is invoked by the type legalizer to legalize nodes with an 5047 /// illegal operand type but legal result types. It replaces the 5048 /// LowerOperation callback in the type Legalizer. The reason we can not do 5049 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to 5050 /// use this callback. 5051 /// 5052 /// TODO: Consider merging with ReplaceNodeResults. 5053 /// 5054 /// The target places new result values for the node in Results (their number 5055 /// and types must exactly match those of the original return values of 5056 /// the node), or leaves Results empty, which indicates that the node is not 5057 /// to be custom lowered after all. 5058 /// The default implementation calls LowerOperation. 5059 virtual void LowerOperationWrapper(SDNode *N, 5060 SmallVectorImpl<SDValue> &Results, 5061 SelectionDAG &DAG) const; 5062 5063 /// This callback is invoked for operations that are unsupported by the 5064 /// target, which are registered to use 'custom' lowering, and whose defined 5065 /// values are all legal. If the target has no operations that require custom 5066 /// lowering, it need not implement this. The default implementation of this 5067 /// aborts. 5068 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 5069 5070 /// This callback is invoked when a node result type is illegal for the 5071 /// target, and the operation was registered to use 'custom' lowering for that 5072 /// result type. The target places new result values for the node in Results 5073 /// (their number and types must exactly match those of the original return 5074 /// values of the node), or leaves Results empty, which indicates that the 5075 /// node is not to be custom lowered after all. 5076 /// 5077 /// If the target has no operations that require custom lowering, it need not 5078 /// implement this. The default implementation aborts. ReplaceNodeResults(SDNode *,SmallVectorImpl<SDValue> &,SelectionDAG &)5079 virtual void ReplaceNodeResults(SDNode * /*N*/, 5080 SmallVectorImpl<SDValue> &/*Results*/, 5081 SelectionDAG &/*DAG*/) const { 5082 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 5083 } 5084 5085 /// This method returns the name of a target specific DAG node. 5086 virtual const char *getTargetNodeName(unsigned Opcode) const; 5087 5088 /// This method returns a target specific FastISel object, or null if the 5089 /// target does not support "fast" ISel. createFastISel(FunctionLoweringInfo &,const TargetLibraryInfo *)5090 virtual FastISel *createFastISel(FunctionLoweringInfo &, 5091 const TargetLibraryInfo *) const { 5092 return nullptr; 5093 } 5094 5095 //===--------------------------------------------------------------------===// 5096 // Inline Asm Support hooks 5097 // 5098 5099 /// This hook allows the target to expand an inline asm call to be explicit 5100 /// llvm code if it wants to. This is useful for turning simple inline asms 5101 /// into LLVM intrinsics, which gives the compiler more information about the 5102 /// behavior of the code. ExpandInlineAsm(CallInst *)5103 virtual bool ExpandInlineAsm(CallInst *) const { 5104 return false; 5105 } 5106 5107 enum ConstraintType { 5108 C_Register, // Constraint represents specific register(s). 5109 C_RegisterClass, // Constraint represents any of register(s) in class. 5110 C_Memory, // Memory constraint. 5111 C_Address, // Address constraint. 5112 C_Immediate, // Requires an immediate. 5113 C_Other, // Something else. 5114 C_Unknown // Unsupported constraint. 5115 }; 5116 5117 enum ConstraintWeight { 5118 // Generic weights. 5119 CW_Invalid = -1, // No match. 5120 CW_Okay = 0, // Acceptable. 5121 CW_Good = 1, // Good weight. 5122 CW_Better = 2, // Better weight. 5123 CW_Best = 3, // Best weight. 5124 5125 // Well-known weights. 5126 CW_SpecificReg = CW_Okay, // Specific register operands. 5127 CW_Register = CW_Good, // Register operands. 5128 CW_Memory = CW_Better, // Memory operands. 5129 CW_Constant = CW_Best, // Constant operand. 5130 CW_Default = CW_Okay // Default or don't know type. 5131 }; 5132 5133 /// This contains information for each constraint that we are lowering. 5134 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 5135 /// This contains the actual string for the code, like "m". TargetLowering 5136 /// picks the 'best' code from ConstraintInfo::Codes that most closely 5137 /// matches the operand. 5138 std::string ConstraintCode; 5139 5140 /// Information about the constraint code, e.g. Register, RegisterClass, 5141 /// Memory, Other, Unknown. 5142 TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown; 5143 5144 /// If this is the result output operand or a clobber, this is null, 5145 /// otherwise it is the incoming operand to the CallInst. This gets 5146 /// modified as the asm is processed. 5147 Value *CallOperandVal = nullptr; 5148 5149 /// The ValueType for the operand value. 5150 MVT ConstraintVT = MVT::Other; 5151 5152 /// Copy constructor for copying from a ConstraintInfo. AsmOperandInfoAsmOperandInfo5153 AsmOperandInfo(InlineAsm::ConstraintInfo Info) 5154 : InlineAsm::ConstraintInfo(std::move(Info)) {} 5155 5156 /// Return true of this is an input operand that is a matching constraint 5157 /// like "4". 5158 LLVM_ABI bool isMatchingInputConstraint() const; 5159 5160 /// If this is an input matching constraint, this method returns the output 5161 /// operand it matches. 5162 LLVM_ABI unsigned getMatchedOperand() const; 5163 }; 5164 5165 using AsmOperandInfoVector = std::vector<AsmOperandInfo>; 5166 5167 /// Split up the constraint string from the inline assembly value into the 5168 /// specific constraints and their prefixes, and also tie in the associated 5169 /// operand values. If this returns an empty vector, and if the constraint 5170 /// string itself isn't empty, there was an error parsing. 5171 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, 5172 const TargetRegisterInfo *TRI, 5173 const CallBase &Call) const; 5174 5175 /// Examine constraint type and operand type and determine a weight value. 5176 /// The operand object must already have been set up with the operand type. 5177 virtual ConstraintWeight getMultipleConstraintMatchWeight( 5178 AsmOperandInfo &info, int maIndex) const; 5179 5180 /// Examine constraint string and operand type and determine a weight value. 5181 /// The operand object must already have been set up with the operand type. 5182 virtual ConstraintWeight getSingleConstraintMatchWeight( 5183 AsmOperandInfo &info, const char *constraint) const; 5184 5185 /// Determines the constraint code and constraint type to use for the specific 5186 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 5187 /// If the actual operand being passed in is available, it can be passed in as 5188 /// Op, otherwise an empty SDValue can be passed. 5189 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 5190 SDValue Op, 5191 SelectionDAG *DAG = nullptr) const; 5192 5193 /// Given a constraint, return the type of constraint it is for this target. 5194 virtual ConstraintType getConstraintType(StringRef Constraint) const; 5195 5196 using ConstraintPair = std::pair<StringRef, TargetLowering::ConstraintType>; 5197 using ConstraintGroup = SmallVector<ConstraintPair>; 5198 /// Given an OpInfo with list of constraints codes as strings, return a 5199 /// sorted Vector of pairs of constraint codes and their types in priority of 5200 /// what we'd prefer to lower them as. This may contain immediates that 5201 /// cannot be lowered, but it is meant to be a machine agnostic order of 5202 /// preferences. 5203 ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const; 5204 5205 /// Given a physical register constraint (e.g. {edx}), return the register 5206 /// number and the register class for the register. 5207 /// 5208 /// Given a register class constraint, like 'r', if this corresponds directly 5209 /// to an LLVM register class, return a register of 0 and the register class 5210 /// pointer. 5211 /// 5212 /// This should only be used for C_Register constraints. On error, this 5213 /// returns a register number of 0 and a null register class pointer. 5214 virtual std::pair<unsigned, const TargetRegisterClass *> 5215 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 5216 StringRef Constraint, MVT VT) const; 5217 5218 virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode)5219 getInlineAsmMemConstraint(StringRef ConstraintCode) const { 5220 if (ConstraintCode == "m") 5221 return InlineAsm::ConstraintCode::m; 5222 if (ConstraintCode == "o") 5223 return InlineAsm::ConstraintCode::o; 5224 if (ConstraintCode == "X") 5225 return InlineAsm::ConstraintCode::X; 5226 if (ConstraintCode == "p") 5227 return InlineAsm::ConstraintCode::p; 5228 return InlineAsm::ConstraintCode::Unknown; 5229 } 5230 5231 /// Try to replace an X constraint, which matches anything, with another that 5232 /// has more specific requirements based on the type of the corresponding 5233 /// operand. This returns null if there is no replacement to make. 5234 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 5235 5236 /// Lower the specified operand into the Ops vector. If it is invalid, don't 5237 /// add anything to Ops. 5238 virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, 5239 std::vector<SDValue> &Ops, 5240 SelectionDAG &DAG) const; 5241 5242 // Lower custom output constraints. If invalid, return SDValue(). 5243 virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, 5244 const SDLoc &DL, 5245 const AsmOperandInfo &OpInfo, 5246 SelectionDAG &DAG) const; 5247 5248 // Targets may override this function to collect operands from the CallInst 5249 // and for example, lower them into the SelectionDAG operands. 5250 virtual void CollectTargetIntrinsicOperands(const CallInst &I, 5251 SmallVectorImpl<SDValue> &Ops, 5252 SelectionDAG &DAG) const; 5253 5254 //===--------------------------------------------------------------------===// 5255 // Div utility functions 5256 // 5257 5258 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 5259 bool IsAfterLegalTypes, 5260 SmallVectorImpl<SDNode *> &Created) const; 5261 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 5262 bool IsAfterLegalTypes, 5263 SmallVectorImpl<SDNode *> &Created) const; 5264 // Build sdiv by power-of-2 with conditional move instructions 5265 SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, 5266 SelectionDAG &DAG, 5267 SmallVectorImpl<SDNode *> &Created) const; 5268 5269 /// Targets may override this function to provide custom SDIV lowering for 5270 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 5271 /// assumes SDIV is expensive and replaces it with a series of other integer 5272 /// operations. 5273 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, 5274 SelectionDAG &DAG, 5275 SmallVectorImpl<SDNode *> &Created) const; 5276 5277 /// Targets may override this function to provide custom SREM lowering for 5278 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 5279 /// assumes SREM is expensive and replaces it with a series of other integer 5280 /// operations. 5281 virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, 5282 SelectionDAG &DAG, 5283 SmallVectorImpl<SDNode *> &Created) const; 5284 5285 /// Indicate whether this target prefers to combine FDIVs with the same 5286 /// divisor. If the transform should never be done, return zero. If the 5287 /// transform should be done, return the minimum number of divisor uses 5288 /// that must exist. combineRepeatedFPDivisors()5289 virtual unsigned combineRepeatedFPDivisors() const { 5290 return 0; 5291 } 5292 5293 /// Hooks for building estimates in place of slower divisions and square 5294 /// roots. 5295 5296 /// Return either a square root or its reciprocal estimate value for the input 5297 /// operand. 5298 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 5299 /// 'Enabled' as set by a potential default override attribute. 5300 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 5301 /// refinement iterations required to generate a sufficient (though not 5302 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 5303 /// The boolean UseOneConstNR output is used to select a Newton-Raphson 5304 /// algorithm implementation that uses either one or two constants. 5305 /// The boolean Reciprocal is used to select whether the estimate is for the 5306 /// square root of the input operand or the reciprocal of its square root. 5307 /// A target may choose to implement its own refinement within this function. 5308 /// If that's true, then return '0' as the number of RefinementSteps to avoid 5309 /// any further refinement of the estimate. 5310 /// An empty SDValue return means no estimate sequence can be created. getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal)5311 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 5312 int Enabled, int &RefinementSteps, 5313 bool &UseOneConstNR, bool Reciprocal) const { 5314 return SDValue(); 5315 } 5316 5317 /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is 5318 /// required for correctness since InstCombine might have canonicalized a 5319 /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic. If we were to fall 5320 /// through to the default expansion/soften to libcall, we might introduce a 5321 /// link-time dependency on libm into a file that originally did not have one. 5322 SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const; 5323 5324 /// Return a reciprocal estimate value for the input operand. 5325 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 5326 /// 'Enabled' as set by a potential default override attribute. 5327 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 5328 /// refinement iterations required to generate a sufficient (though not 5329 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 5330 /// A target may choose to implement its own refinement within this function. 5331 /// If that's true, then return '0' as the number of RefinementSteps to avoid 5332 /// any further refinement of the estimate. 5333 /// An empty SDValue return means no estimate sequence can be created. getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps)5334 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 5335 int Enabled, int &RefinementSteps) const { 5336 return SDValue(); 5337 } 5338 5339 /// Return a target-dependent comparison result if the input operand is 5340 /// suitable for use with a square root estimate calculation. For example, the 5341 /// comparison may check if the operand is NAN, INF, zero, normal, etc. The 5342 /// result should be used as the condition operand for a select or branch. 5343 virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, 5344 const DenormalMode &Mode) const; 5345 5346 /// Return a target-dependent result if the input operand is not suitable for 5347 /// use with a square root estimate calculation. getSqrtResultForDenormInput(SDValue Operand,SelectionDAG & DAG)5348 virtual SDValue getSqrtResultForDenormInput(SDValue Operand, 5349 SelectionDAG &DAG) const { 5350 return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType()); 5351 } 5352 5353 //===--------------------------------------------------------------------===// 5354 // Legalization utility functions 5355 // 5356 5357 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, 5358 /// respectively, each computing an n/2-bit part of the result. 5359 /// \param Result A vector that will be filled with the parts of the result 5360 /// in little-endian order. 5361 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 5362 /// if you want to control how low bits are extracted from the LHS. 5363 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 5364 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 5365 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 5366 /// \returns true if the node has been expanded, false if it has not 5367 bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, 5368 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT, 5369 SelectionDAG &DAG, MulExpansionKind Kind, 5370 SDValue LL = SDValue(), SDValue LH = SDValue(), 5371 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 5372 5373 /// Expand a MUL into two nodes. One that computes the high bits of 5374 /// the result and one that computes the low bits. 5375 /// \param HiLoVT The value type to use for the Lo and Hi nodes. 5376 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 5377 /// if you want to control how low bits are extracted from the LHS. 5378 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 5379 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 5380 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 5381 /// \returns true if the node has been expanded. false if it has not 5382 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 5383 SelectionDAG &DAG, MulExpansionKind Kind, 5384 SDValue LL = SDValue(), SDValue LH = SDValue(), 5385 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 5386 5387 /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit 5388 /// urem by constant and other arithmetic ops. The n/2-bit urem by constant 5389 /// will be expanded by DAGCombiner. This is not possible for all constant 5390 /// divisors. 5391 /// \param N Node to expand 5392 /// \param Result A vector that will be filled with the lo and high parts of 5393 /// the results. For *DIVREM, this will be the quotient parts followed 5394 /// by the remainder parts. 5395 /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be 5396 /// half of VT. 5397 /// \param LL Low bits of the LHS of the operation. You can use this 5398 /// parameter if you want to control how low bits are extracted from 5399 /// the LHS. 5400 /// \param LH High bits of the LHS of the operation. See LL for meaning. 5401 /// \returns true if the node has been expanded, false if it has not. 5402 bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl<SDValue> &Result, 5403 EVT HiLoVT, SelectionDAG &DAG, 5404 SDValue LL = SDValue(), 5405 SDValue LH = SDValue()) const; 5406 5407 /// Expand funnel shift. 5408 /// \param N Node to expand 5409 /// \returns The expansion if successful, SDValue() otherwise 5410 SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const; 5411 5412 /// Expand rotations. 5413 /// \param N Node to expand 5414 /// \param AllowVectorOps expand vector rotate, this should only be performed 5415 /// if the legalization is happening outside of LegalizeVectorOps 5416 /// \returns The expansion if successful, SDValue() otherwise 5417 SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const; 5418 5419 /// Expand shift-by-parts. 5420 /// \param N Node to expand 5421 /// \param Lo lower-output-part after conversion 5422 /// \param Hi upper-output-part after conversion 5423 void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, 5424 SelectionDAG &DAG) const; 5425 5426 /// Expand float(f32) to SINT(i64) conversion 5427 /// \param N Node to expand 5428 /// \param Result output after conversion 5429 /// \returns True, if the expansion was successful, false otherwise 5430 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; 5431 5432 /// Expand float to UINT conversion 5433 /// \param N Node to expand 5434 /// \param Result output after conversion 5435 /// \param Chain output chain after conversion 5436 /// \returns True, if the expansion was successful, false otherwise 5437 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, 5438 SelectionDAG &DAG) const; 5439 5440 /// Expand UINT(i64) to double(f64) conversion 5441 /// \param N Node to expand 5442 /// \param Result output after conversion 5443 /// \param Chain output chain after conversion 5444 /// \returns True, if the expansion was successful, false otherwise 5445 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, 5446 SelectionDAG &DAG) const; 5447 5448 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs. 5449 SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const; 5450 5451 /// Expand fminimum/fmaximum into multiple comparison with selects. 5452 SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const; 5453 5454 /// Expand fminimumnum/fmaximumnum into multiple comparison with selects. 5455 SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const; 5456 5457 /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max. 5458 /// \param N Node to expand 5459 /// \returns The expansion result 5460 SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const; 5461 5462 /// Truncate Op to ResultVT. If the result is exact, leave it alone. If it is 5463 /// not exact, force the result to be odd. 5464 /// \param ResultVT The type of result. 5465 /// \param Op The value to round. 5466 /// \returns The expansion result 5467 SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, 5468 SelectionDAG &DAG) const; 5469 5470 /// Expand round(fp) to fp conversion 5471 /// \param N Node to expand 5472 /// \returns The expansion result 5473 SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const; 5474 5475 /// Expand check for floating point class. 5476 /// \param ResultVT The type of intrinsic call result. 5477 /// \param Op The tested value. 5478 /// \param Test The test to perform. 5479 /// \param Flags The optimization flags. 5480 /// \returns The expansion result or SDValue() if it fails. 5481 SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, 5482 SDNodeFlags Flags, const SDLoc &DL, 5483 SelectionDAG &DAG) const; 5484 5485 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes, 5486 /// vector nodes can only succeed if all operations are legal/custom. 5487 /// \param N Node to expand 5488 /// \returns The expansion result or SDValue() if it fails. 5489 SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const; 5490 5491 /// Expand VP_CTPOP nodes. 5492 /// \returns The expansion result or SDValue() if it fails. 5493 SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const; 5494 5495 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes, 5496 /// vector nodes can only succeed if all operations are legal/custom. 5497 /// \param N Node to expand 5498 /// \returns The expansion result or SDValue() if it fails. 5499 SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const; 5500 5501 /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes. 5502 /// \param N Node to expand 5503 /// \returns The expansion result or SDValue() if it fails. 5504 SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const; 5505 5506 /// Expand CTTZ via Table Lookup. 5507 /// \param N Node to expand 5508 /// \returns The expansion result or SDValue() if it fails. 5509 SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, 5510 SDValue Op, unsigned NumBitsPerElt) const; 5511 5512 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes, 5513 /// vector nodes can only succeed if all operations are legal/custom. 5514 /// \param N Node to expand 5515 /// \returns The expansion result or SDValue() if it fails. 5516 SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const; 5517 5518 /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes. 5519 /// \param N Node to expand 5520 /// \returns The expansion result or SDValue() if it fails. 5521 SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const; 5522 5523 /// Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes. 5524 /// \param N Node to expand 5525 /// \returns The expansion result or SDValue() if it fails. 5526 SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const; 5527 5528 /// Expand VECTOR_FIND_LAST_ACTIVE nodes 5529 /// \param N Node to expand 5530 /// \returns The expansion result or SDValue() if it fails. 5531 SDValue expandVectorFindLastActive(SDNode *N, SelectionDAG &DAG) const; 5532 5533 /// Expand ABS nodes. Expands vector/scalar ABS nodes, 5534 /// vector nodes can only succeed if all operations are legal/custom. 5535 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size)) 5536 /// \param N Node to expand 5537 /// \param IsNegative indicate negated abs 5538 /// \returns The expansion result or SDValue() if it fails. 5539 SDValue expandABS(SDNode *N, SelectionDAG &DAG, 5540 bool IsNegative = false) const; 5541 5542 /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes. 5543 /// \param N Node to expand 5544 /// \returns The expansion result or SDValue() if it fails. 5545 SDValue expandABD(SDNode *N, SelectionDAG &DAG) const; 5546 5547 /// Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes. 5548 /// \param N Node to expand 5549 /// \returns The expansion result or SDValue() if it fails. 5550 SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const; 5551 5552 /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64 5553 /// scalar types. Returns SDValue() if expand fails. 5554 /// \param N Node to expand 5555 /// \returns The expansion result or SDValue() if it fails. 5556 SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const; 5557 5558 /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with 5559 /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node 5560 /// to expand \returns The expansion result or SDValue() if it fails. 5561 SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const; 5562 5563 /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes. 5564 /// Returns SDValue() if expand fails. 5565 /// \param N Node to expand 5566 /// \returns The expansion result or SDValue() if it fails. 5567 SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const; 5568 5569 /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with 5570 /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The 5571 /// expansion result or SDValue() if it fails. 5572 SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const; 5573 5574 /// Turn load of vector type into a load of the individual elements. 5575 /// \param LD load to expand 5576 /// \returns BUILD_VECTOR and TokenFactor nodes. 5577 std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD, 5578 SelectionDAG &DAG) const; 5579 5580 // Turn a store of a vector type into stores of the individual elements. 5581 /// \param ST Store with a vector value type 5582 /// \returns TokenFactor of the individual store chains. 5583 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const; 5584 5585 /// Expands an unaligned load to 2 half-size loads for an integer, and 5586 /// possibly more for vectors. 5587 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD, 5588 SelectionDAG &DAG) const; 5589 5590 /// Expands an unaligned store to 2 half-size stores for integer values, and 5591 /// possibly more for vectors. 5592 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const; 5593 5594 /// Increments memory address \p Addr according to the type of the value 5595 /// \p DataVT that should be stored. If the data is stored in compressed 5596 /// form, the memory address should be incremented according to the number of 5597 /// the stored elements. This number is equal to the number of '1's bits 5598 /// in the \p Mask. 5599 /// \p DataVT is a vector type. \p Mask is a vector value. 5600 /// \p DataVT and \p Mask have the same number of vector elements. 5601 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, 5602 EVT DataVT, SelectionDAG &DAG, 5603 bool IsCompressedMemory) const; 5604 5605 /// Get a pointer to vector element \p Idx located in memory for a vector of 5606 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of 5607 /// bounds the returned pointer is unspecified, but will be within the vector 5608 /// bounds. 5609 SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 5610 SDValue Index) const; 5611 5612 /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located 5613 /// in memory for a vector of type \p VecVT starting at a base address of 5614 /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the 5615 /// returned pointer is unspecified, but the value returned will be such that 5616 /// the entire subvector would be within the vector bounds. 5617 SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 5618 EVT SubVecVT, SDValue Index) const; 5619 5620 /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This 5621 /// method accepts integers as its arguments. 5622 SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const; 5623 5624 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This 5625 /// method accepts integers as its arguments. 5626 SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const; 5627 5628 /// Method for building the DAG expansion of ISD::[US]CMP. This 5629 /// method accepts integers as its arguments 5630 SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const; 5631 5632 /// Method for building the DAG expansion of ISD::[US]SHLSAT. This 5633 /// method accepts integers as its arguments. 5634 SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const; 5635 5636 /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This 5637 /// method accepts integers as its arguments. 5638 SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const; 5639 5640 /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This 5641 /// method accepts integers as its arguments. 5642 /// Note: This method may fail if the division could not be performed 5643 /// within the type. Clients must retry with a wider type if this happens. 5644 SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 5645 SDValue LHS, SDValue RHS, 5646 unsigned Scale, SelectionDAG &DAG) const; 5647 5648 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion 5649 /// always suceeds and populates the Result and Overflow arguments. 5650 void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5651 SelectionDAG &DAG) const; 5652 5653 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion 5654 /// always suceeds and populates the Result and Overflow arguments. 5655 void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5656 SelectionDAG &DAG) const; 5657 5658 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether 5659 /// expansion was successful and populates the Result and Overflow arguments. 5660 bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5661 SelectionDAG &DAG) const; 5662 5663 /// Calculate the product twice the width of LHS and RHS. If HiLHS/HiRHS are 5664 /// non-null they will be included in the multiplication. The expansion works 5665 /// by splitting the 2 inputs into 4 pieces that we can multiply and add 5666 /// together without neding MULH or MUL_LOHI. 5667 void forceExpandMultiply(SelectionDAG &DAG, const SDLoc &dl, bool Signed, 5668 SDValue &Lo, SDValue &Hi, SDValue LHS, SDValue RHS, 5669 SDValue HiLHS = SDValue(), 5670 SDValue HiRHS = SDValue()) const; 5671 5672 /// Calculate full product of LHS and RHS either via a libcall or through 5673 /// brute force expansion of the multiplication. The expansion works by 5674 /// splitting the 2 inputs into 4 pieces that we can multiply and add together 5675 /// without needing MULH or MUL_LOHI. 5676 void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, 5677 const SDValue LHS, const SDValue RHS, SDValue &Lo, 5678 SDValue &Hi) const; 5679 5680 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified, 5681 /// only the first Count elements of the vector are used. 5682 SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const; 5683 5684 /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation. 5685 SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const; 5686 5687 /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal. 5688 /// Returns true if the expansion was successful. 5689 bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const; 5690 5691 /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This 5692 /// method accepts vectors as its arguments. 5693 SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const; 5694 5695 /// Expand a vector VECTOR_COMPRESS into a sequence of extract element, store 5696 /// temporarily, advance store position, before re-loading the final vector. 5697 SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const; 5698 5699 /// Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations, 5700 /// consisting of zext/sext, extract_subvector, mul and add operations. 5701 SDValue expandPartialReduceMLA(SDNode *Node, SelectionDAG &DAG) const; 5702 5703 /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC 5704 /// on the current target. A VP_SETCC will additionally be given a Mask 5705 /// and/or EVL not equal to SDValue(). 5706 /// 5707 /// If the SETCC has been legalized using AND / OR, then the legalized node 5708 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert 5709 /// will be set to false. This will also hold if the VP_SETCC has been 5710 /// legalized using VP_AND / VP_OR. 5711 /// 5712 /// If the SETCC / VP_SETCC has been legalized by using 5713 /// getSetCCSwappedOperands(), then the values of LHS and RHS will be 5714 /// swapped, CC will be set to the new condition, and NeedInvert will be set 5715 /// to false. 5716 /// 5717 /// If the SETCC / VP_SETCC has been legalized using the inverse condcode, 5718 /// then LHS and RHS will be unchanged, CC will set to the inverted condcode, 5719 /// and NeedInvert will be set to true. The caller must invert the result of 5720 /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to 5721 /// swap the effect of a true/false result. 5722 /// 5723 /// \returns true if the SETCC / VP_SETCC has been legalized, false if it 5724 /// hasn't. 5725 bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, 5726 SDValue &RHS, SDValue &CC, SDValue Mask, 5727 SDValue EVL, bool &NeedInvert, const SDLoc &dl, 5728 SDValue &Chain, bool IsSignaling = false) const; 5729 5730 //===--------------------------------------------------------------------===// 5731 // Instruction Emitting Hooks 5732 // 5733 5734 /// This method should be implemented by targets that mark instructions with 5735 /// the 'usesCustomInserter' flag. These instructions are special in various 5736 /// ways, which require special support to insert. The specified MachineInstr 5737 /// is created but not inserted into any basic blocks, and this method is 5738 /// called to expand it into a sequence of instructions, potentially also 5739 /// creating new basic blocks and control flow. 5740 /// As long as the returned basic block is different (i.e., we created a new 5741 /// one), the custom inserter is free to modify the rest of \p MBB. 5742 virtual MachineBasicBlock * 5743 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const; 5744 5745 /// This method should be implemented by targets that mark instructions with 5746 /// the 'hasPostISelHook' flag. These instructions must be adjusted after 5747 /// instruction selection by target hooks. e.g. To fill in optional defs for 5748 /// ARM 's' setting instructions. 5749 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI, 5750 SDNode *Node) const; 5751 5752 /// If this function returns true, SelectionDAGBuilder emits a 5753 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector. useLoadStackGuardNode(const Module & M)5754 virtual bool useLoadStackGuardNode(const Module &M) const { return false; } 5755 emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL)5756 virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, 5757 const SDLoc &DL) const { 5758 llvm_unreachable("not implemented for this target"); 5759 } 5760 5761 /// Lower TLS global address SDNode for target independent emulated TLS model. 5762 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 5763 SelectionDAG &DAG) const; 5764 5765 /// Expands target specific indirect branch for the case of JumpTable 5766 /// expansion. 5767 virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, 5768 SDValue Addr, int JTI, 5769 SelectionDAG &DAG) const; 5770 5771 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits))) 5772 // If we're comparing for equality to zero and isCtlzFast is true, expose the 5773 // fact that this can be implemented as a ctlz/srl pair, so that the dag 5774 // combiner can fold the new nodes. 5775 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const; 5776 5777 // Return true if `X & Y eq/ne 0` is preferable to `X & Y ne/eq Y` isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode,EVT)5778 virtual bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode, EVT) const { 5779 return true; 5780 } 5781 5782 // Expand vector operation by dividing it into smaller length operations and 5783 // joining their results. SDValue() is returned when expansion did not happen. 5784 SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const; 5785 5786 /// Replace an extraction of a load with a narrowed load. 5787 /// 5788 /// \param ResultVT type of the result extraction. 5789 /// \param InVecVT type of the input vector to with bitcasts resolved. 5790 /// \param EltNo index of the vector element to load. 5791 /// \param OriginalLoad vector load that to be replaced. 5792 /// \returns \p ResultVT Load on success SDValue() on failure. 5793 SDValue scalarizeExtractedVectorLoad(EVT ResultVT, const SDLoc &DL, 5794 EVT InVecVT, SDValue EltNo, 5795 LoadSDNode *OriginalLoad, 5796 SelectionDAG &DAG) const; 5797 5798 private: 5799 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 5800 const SDLoc &DL, DAGCombinerInfo &DCI) const; 5801 SDValue foldSetCCWithOr(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 5802 const SDLoc &DL, DAGCombinerInfo &DCI) const; 5803 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 5804 const SDLoc &DL, DAGCombinerInfo &DCI) const; 5805 5806 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0, 5807 SDValue N1, ISD::CondCode Cond, 5808 DAGCombinerInfo &DCI, 5809 const SDLoc &DL) const; 5810 5811 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 5812 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift( 5813 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 5814 DAGCombinerInfo &DCI, const SDLoc &DL) const; 5815 5816 SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5817 SDValue CompTargetNode, ISD::CondCode Cond, 5818 DAGCombinerInfo &DCI, const SDLoc &DL, 5819 SmallVectorImpl<SDNode *> &Created) const; 5820 SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 5821 ISD::CondCode Cond, DAGCombinerInfo &DCI, 5822 const SDLoc &DL) const; 5823 5824 SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5825 SDValue CompTargetNode, ISD::CondCode Cond, 5826 DAGCombinerInfo &DCI, const SDLoc &DL, 5827 SmallVectorImpl<SDNode *> &Created) const; 5828 SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 5829 ISD::CondCode Cond, DAGCombinerInfo &DCI, 5830 const SDLoc &DL) const; 5831 }; 5832 5833 /// Given an LLVM IR type and return type attributes, compute the return value 5834 /// EVTs and flags, and optionally also the offsets, if the return value is 5835 /// being lowered to memory. 5836 LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, 5837 AttributeList attr, 5838 SmallVectorImpl<ISD::OutputArg> &Outs, 5839 const TargetLowering &TLI, const DataLayout &DL); 5840 5841 } // end namespace llvm 5842 5843 #endif // LLVM_CODEGEN_TARGETLOWERING_H 5844