1 //===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements routines for translating from LLVM IR into SelectionDAG IR. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 14 #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 15 16 #include "StatepointLowering.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/MapVector.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/CodeGen/ISDOpcodes.h" 24 #include "llvm/CodeGen/SelectionDAG.h" 25 #include "llvm/CodeGen/SelectionDAGNodes.h" 26 #include "llvm/CodeGen/SwitchLoweringUtils.h" 27 #include "llvm/CodeGen/TargetLowering.h" 28 #include "llvm/CodeGen/ValueTypes.h" 29 #include "llvm/IR/CallSite.h" 30 #include "llvm/IR/DebugLoc.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Statepoint.h" 33 #include "llvm/Support/BranchProbability.h" 34 #include "llvm/Support/CodeGen.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/MachineValueType.h" 37 #include <algorithm> 38 #include <cassert> 39 #include <cstdint> 40 #include <utility> 41 #include <vector> 42 43 namespace llvm { 44 45 class AllocaInst; 46 class AtomicCmpXchgInst; 47 class AtomicRMWInst; 48 class BasicBlock; 49 class BranchInst; 50 class CallInst; 51 class CallBrInst; 52 class CatchPadInst; 53 class CatchReturnInst; 54 class CatchSwitchInst; 55 class CleanupPadInst; 56 class CleanupReturnInst; 57 class Constant; 58 class ConstantInt; 59 class ConstrainedFPIntrinsic; 60 class DbgValueInst; 61 class DataLayout; 62 class DIExpression; 63 class DILocalVariable; 64 class DILocation; 65 class FenceInst; 66 class FunctionLoweringInfo; 67 class GCFunctionInfo; 68 class GCRelocateInst; 69 class GCResultInst; 70 class IndirectBrInst; 71 class InvokeInst; 72 class LandingPadInst; 73 class LLVMContext; 74 class LoadInst; 75 class MachineBasicBlock; 76 class PHINode; 77 class ResumeInst; 78 class ReturnInst; 79 class SDDbgValue; 80 class StoreInst; 81 class SwiftErrorValueTracking; 82 class SwitchInst; 83 class TargetLibraryInfo; 84 class TargetMachine; 85 class Type; 86 class VAArgInst; 87 class UnreachableInst; 88 class Use; 89 class User; 90 class Value; 91 92 //===----------------------------------------------------------------------===// 93 /// SelectionDAGBuilder - This is the common target-independent lowering 94 /// implementation that is parameterized by a TargetLowering object. 95 /// 96 class SelectionDAGBuilder { 97 /// The current instruction being visited. 98 const Instruction *CurInst = nullptr; 99 100 DenseMap<const Value*, SDValue> NodeMap; 101 102 /// Maps argument value for unused arguments. This is used 103 /// to preserve debug information for incoming arguments. 104 DenseMap<const Value*, SDValue> UnusedArgNodeMap; 105 106 /// Helper type for DanglingDebugInfoMap. 107 class DanglingDebugInfo { 108 const DbgValueInst* DI = nullptr; 109 DebugLoc dl; 110 unsigned SDNodeOrder = 0; 111 112 public: 113 DanglingDebugInfo() = default; 114 DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) 115 : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {} 116 117 const DbgValueInst* getDI() { return DI; } 118 DebugLoc getdl() { return dl; } 119 unsigned getSDNodeOrder() { return SDNodeOrder; } 120 }; 121 122 /// Helper type for DanglingDebugInfoMap. 123 typedef std::vector<DanglingDebugInfo> DanglingDebugInfoVector; 124 125 /// Keeps track of dbg_values for which we have not yet seen the referent. 126 /// We defer handling these until we do see it. 127 MapVector<const Value*, DanglingDebugInfoVector> DanglingDebugInfoMap; 128 129 public: 130 /// Loads are not emitted to the program immediately. We bunch them up and 131 /// then emit token factor nodes when possible. This allows us to get simple 132 /// disambiguation between loads without worrying about alias analysis. 133 SmallVector<SDValue, 8> PendingLoads; 134 135 /// State used while lowering a statepoint sequence (gc_statepoint, 136 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details. 137 StatepointLoweringState StatepointLowering; 138 139 private: 140 /// CopyToReg nodes that copy values to virtual registers for export to other 141 /// blocks need to be emitted before any terminator instruction, but they have 142 /// no other ordering requirements. We bunch them up and the emit a single 143 /// tokenfactor for them just before terminator instructions. 144 SmallVector<SDValue, 8> PendingExports; 145 146 /// Similar to loads, nodes corresponding to constrained FP intrinsics are 147 /// bunched up and emitted when necessary. These can be moved across each 148 /// other and any (normal) memory operation (load or store), but not across 149 /// calls or instructions having unspecified side effects. As a special 150 /// case, constrained FP intrinsics using fpexcept.strict may not be deleted 151 /// even if otherwise unused, so they need to be chained before any 152 /// terminator instruction (like PendingExports). We track the latter 153 /// set of nodes in a separate list. 154 SmallVector<SDValue, 8> PendingConstrainedFP; 155 SmallVector<SDValue, 8> PendingConstrainedFPStrict; 156 157 /// Update root to include all chains from the Pending list. 158 SDValue updateRoot(SmallVectorImpl<SDValue> &Pending); 159 160 /// A unique monotonically increasing number used to order the SDNodes we 161 /// create. 162 unsigned SDNodeOrder; 163 164 /// Determine the rank by weight of CC in [First,Last]. If CC has more weight 165 /// than each cluster in the range, its rank is 0. 166 unsigned caseClusterRank(const SwitchCG::CaseCluster &CC, 167 SwitchCG::CaseClusterIt First, 168 SwitchCG::CaseClusterIt Last); 169 170 /// Emit comparison and split W into two subtrees. 171 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList, 172 const SwitchCG::SwitchWorkListItem &W, Value *Cond, 173 MachineBasicBlock *SwitchMBB); 174 175 /// Lower W. 176 void lowerWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond, 177 MachineBasicBlock *SwitchMBB, 178 MachineBasicBlock *DefaultMBB); 179 180 /// Peel the top probability case if it exceeds the threshold 181 MachineBasicBlock * 182 peelDominantCaseCluster(const SwitchInst &SI, 183 SwitchCG::CaseClusterVector &Clusters, 184 BranchProbability &PeeledCaseProb); 185 186 /// A class which encapsulates all of the information needed to generate a 187 /// stack protector check and signals to isel via its state being initialized 188 /// that a stack protector needs to be generated. 189 /// 190 /// *NOTE* The following is a high level documentation of SelectionDAG Stack 191 /// Protector Generation. The reason that it is placed here is for a lack of 192 /// other good places to stick it. 193 /// 194 /// High Level Overview of SelectionDAG Stack Protector Generation: 195 /// 196 /// Previously, generation of stack protectors was done exclusively in the 197 /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated 198 /// splitting basic blocks at the IR level to create the success/failure basic 199 /// blocks in the tail of the basic block in question. As a result of this, 200 /// calls that would have qualified for the sibling call optimization were no 201 /// longer eligible for optimization since said calls were no longer right in 202 /// the "tail position" (i.e. the immediate predecessor of a ReturnInst 203 /// instruction). 204 /// 205 /// Then it was noticed that since the sibling call optimization causes the 206 /// callee to reuse the caller's stack, if we could delay the generation of 207 /// the stack protector check until later in CodeGen after the sibling call 208 /// decision was made, we get both the tail call optimization and the stack 209 /// protector check! 210 /// 211 /// A few goals in solving this problem were: 212 /// 213 /// 1. Preserve the architecture independence of stack protector generation. 214 /// 215 /// 2. Preserve the normal IR level stack protector check for platforms like 216 /// OpenBSD for which we support platform-specific stack protector 217 /// generation. 218 /// 219 /// The main problem that guided the present solution is that one can not 220 /// solve this problem in an architecture independent manner at the IR level 221 /// only. This is because: 222 /// 223 /// 1. The decision on whether or not to perform a sibling call on certain 224 /// platforms (for instance i386) requires lower level information 225 /// related to available registers that can not be known at the IR level. 226 /// 227 /// 2. Even if the previous point were not true, the decision on whether to 228 /// perform a tail call is done in LowerCallTo in SelectionDAG which 229 /// occurs after the Stack Protector Pass. As a result, one would need to 230 /// put the relevant callinst into the stack protector check success 231 /// basic block (where the return inst is placed) and then move it back 232 /// later at SelectionDAG/MI time before the stack protector check if the 233 /// tail call optimization failed. The MI level option was nixed 234 /// immediately since it would require platform-specific pattern 235 /// matching. The SelectionDAG level option was nixed because 236 /// SelectionDAG only processes one IR level basic block at a time 237 /// implying one could not create a DAG Combine to move the callinst. 238 /// 239 /// To get around this problem a few things were realized: 240 /// 241 /// 1. While one can not handle multiple IR level basic blocks at the 242 /// SelectionDAG Level, one can generate multiple machine basic blocks 243 /// for one IR level basic block. This is how we handle bit tests and 244 /// switches. 245 /// 246 /// 2. At the MI level, tail calls are represented via a special return 247 /// MIInst called "tcreturn". Thus if we know the basic block in which we 248 /// wish to insert the stack protector check, we get the correct behavior 249 /// by always inserting the stack protector check right before the return 250 /// statement. This is a "magical transformation" since no matter where 251 /// the stack protector check intrinsic is, we always insert the stack 252 /// protector check code at the end of the BB. 253 /// 254 /// Given the aforementioned constraints, the following solution was devised: 255 /// 256 /// 1. On platforms that do not support SelectionDAG stack protector check 257 /// generation, allow for the normal IR level stack protector check 258 /// generation to continue. 259 /// 260 /// 2. On platforms that do support SelectionDAG stack protector check 261 /// generation: 262 /// 263 /// a. Use the IR level stack protector pass to decide if a stack 264 /// protector is required/which BB we insert the stack protector check 265 /// in by reusing the logic already therein. If we wish to generate a 266 /// stack protector check in a basic block, we place a special IR 267 /// intrinsic called llvm.stackprotectorcheck right before the BB's 268 /// returninst or if there is a callinst that could potentially be 269 /// sibling call optimized, before the call inst. 270 /// 271 /// b. Then when a BB with said intrinsic is processed, we codegen the BB 272 /// normally via SelectBasicBlock. In said process, when we visit the 273 /// stack protector check, we do not actually emit anything into the 274 /// BB. Instead, we just initialize the stack protector descriptor 275 /// class (which involves stashing information/creating the success 276 /// mbbb and the failure mbb if we have not created one for this 277 /// function yet) and export the guard variable that we are going to 278 /// compare. 279 /// 280 /// c. After we finish selecting the basic block, in FinishBasicBlock if 281 /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is 282 /// initialized, we produce the validation code with one of these 283 /// techniques: 284 /// 1) with a call to a guard check function 285 /// 2) with inlined instrumentation 286 /// 287 /// 1) We insert a call to the check function before the terminator. 288 /// 289 /// 2) We first find a splice point in the parent basic block 290 /// before the terminator and then splice the terminator of said basic 291 /// block into the success basic block. Then we code-gen a new tail for 292 /// the parent basic block consisting of the two loads, the comparison, 293 /// and finally two branches to the success/failure basic blocks. We 294 /// conclude by code-gening the failure basic block if we have not 295 /// code-gened it already (all stack protector checks we generate in 296 /// the same function, use the same failure basic block). 297 class StackProtectorDescriptor { 298 public: 299 StackProtectorDescriptor() = default; 300 301 /// Returns true if all fields of the stack protector descriptor are 302 /// initialized implying that we should/are ready to emit a stack protector. 303 bool shouldEmitStackProtector() const { 304 return ParentMBB && SuccessMBB && FailureMBB; 305 } 306 307 bool shouldEmitFunctionBasedCheckStackProtector() const { 308 return ParentMBB && !SuccessMBB && !FailureMBB; 309 } 310 311 /// Initialize the stack protector descriptor structure for a new basic 312 /// block. 313 void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, 314 bool FunctionBasedInstrumentation) { 315 // Make sure we are not initialized yet. 316 assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " 317 "already initialized!"); 318 ParentMBB = MBB; 319 if (!FunctionBasedInstrumentation) { 320 SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true); 321 FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB); 322 } 323 } 324 325 /// Reset state that changes when we handle different basic blocks. 326 /// 327 /// This currently includes: 328 /// 329 /// 1. The specific basic block we are generating a 330 /// stack protector for (ParentMBB). 331 /// 332 /// 2. The successor machine basic block that will contain the tail of 333 /// parent mbb after we create the stack protector check (SuccessMBB). This 334 /// BB is visited only on stack protector check success. 335 void resetPerBBState() { 336 ParentMBB = nullptr; 337 SuccessMBB = nullptr; 338 } 339 340 /// Reset state that only changes when we switch functions. 341 /// 342 /// This currently includes: 343 /// 344 /// 1. FailureMBB since we reuse the failure code path for all stack 345 /// protector checks created in an individual function. 346 /// 347 /// 2.The guard variable since the guard variable we are checking against is 348 /// always the same. 349 void resetPerFunctionState() { 350 FailureMBB = nullptr; 351 } 352 353 MachineBasicBlock *getParentMBB() { return ParentMBB; } 354 MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } 355 MachineBasicBlock *getFailureMBB() { return FailureMBB; } 356 357 private: 358 /// The basic block for which we are generating the stack protector. 359 /// 360 /// As a result of stack protector generation, we will splice the 361 /// terminators of this basic block into the successor mbb SuccessMBB and 362 /// replace it with a compare/branch to the successor mbbs 363 /// SuccessMBB/FailureMBB depending on whether or not the stack protector 364 /// was violated. 365 MachineBasicBlock *ParentMBB = nullptr; 366 367 /// A basic block visited on stack protector check success that contains the 368 /// terminators of ParentMBB. 369 MachineBasicBlock *SuccessMBB = nullptr; 370 371 /// This basic block visited on stack protector check failure that will 372 /// contain a call to __stack_chk_fail(). 373 MachineBasicBlock *FailureMBB = nullptr; 374 375 /// Add a successor machine basic block to ParentMBB. If the successor mbb 376 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic 377 /// block will be created. Assign a large weight if IsLikely is true. 378 MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB, 379 MachineBasicBlock *ParentMBB, 380 bool IsLikely, 381 MachineBasicBlock *SuccMBB = nullptr); 382 }; 383 384 private: 385 const TargetMachine &TM; 386 387 public: 388 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling 389 /// nodes without a corresponding SDNode. 390 static const unsigned LowestSDNodeOrder = 1; 391 392 SelectionDAG &DAG; 393 const DataLayout *DL = nullptr; 394 AliasAnalysis *AA = nullptr; 395 const TargetLibraryInfo *LibInfo; 396 397 class SDAGSwitchLowering : public SwitchCG::SwitchLowering { 398 public: 399 SDAGSwitchLowering(SelectionDAGBuilder *sdb, FunctionLoweringInfo &funcinfo) 400 : SwitchCG::SwitchLowering(funcinfo), SDB(sdb) {} 401 402 virtual void addSuccessorWithProb( 403 MachineBasicBlock *Src, MachineBasicBlock *Dst, 404 BranchProbability Prob = BranchProbability::getUnknown()) override { 405 SDB->addSuccessorWithProb(Src, Dst, Prob); 406 } 407 408 private: 409 SelectionDAGBuilder *SDB; 410 }; 411 412 std::unique_ptr<SDAGSwitchLowering> SL; 413 414 /// A StackProtectorDescriptor structure used to communicate stack protector 415 /// information in between SelectBasicBlock and FinishBasicBlock. 416 StackProtectorDescriptor SPDescriptor; 417 418 // Emit PHI-node-operand constants only once even if used by multiple 419 // PHI nodes. 420 DenseMap<const Constant *, unsigned> ConstantsOut; 421 422 /// Information about the function as a whole. 423 FunctionLoweringInfo &FuncInfo; 424 425 /// Information about the swifterror values used throughout the function. 426 SwiftErrorValueTracking &SwiftError; 427 428 /// Garbage collection metadata for the function. 429 GCFunctionInfo *GFI; 430 431 /// Map a landing pad to the call site indexes. 432 DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap; 433 434 /// This is set to true if a call in the current block has been translated as 435 /// a tail call. In this case, no subsequent DAG nodes should be created. 436 bool HasTailCall = false; 437 438 LLVMContext *Context; 439 440 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo, 441 SwiftErrorValueTracking &swifterror, CodeGenOpt::Level ol) 442 : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag), 443 SL(std::make_unique<SDAGSwitchLowering>(this, funcinfo)), FuncInfo(funcinfo), 444 SwiftError(swifterror) {} 445 446 void init(GCFunctionInfo *gfi, AliasAnalysis *AA, 447 const TargetLibraryInfo *li); 448 449 /// Clear out the current SelectionDAG and the associated state and prepare 450 /// this SelectionDAGBuilder object to be used for a new block. This doesn't 451 /// clear out information about additional blocks that are needed to complete 452 /// switch lowering or PHI node updating; that information is cleared out as 453 /// it is consumed. 454 void clear(); 455 456 /// Clear the dangling debug information map. This function is separated from 457 /// the clear so that debug information that is dangling in a basic block can 458 /// be properly resolved in a different basic block. This allows the 459 /// SelectionDAG to resolve dangling debug information attached to PHI nodes. 460 void clearDanglingDebugInfo(); 461 462 /// Return the current virtual root of the Selection DAG, flushing any 463 /// PendingLoad items. This must be done before emitting a store or any other 464 /// memory node that may need to be ordered after any prior load instructions. 465 SDValue getMemoryRoot(); 466 467 /// Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) 468 /// items. This must be done before emitting any call other any other node 469 /// that may need to be ordered after FP instructions due to other side 470 /// effects. 471 SDValue getRoot(); 472 473 /// Similar to getRoot, but instead of flushing all the PendingLoad items, 474 /// flush all the PendingExports (and PendingConstrainedFPStrict) items. 475 /// It is necessary to do this before emitting a terminator instruction. 476 SDValue getControlRoot(); 477 478 SDLoc getCurSDLoc() const { 479 return SDLoc(CurInst, SDNodeOrder); 480 } 481 482 DebugLoc getCurDebugLoc() const { 483 return CurInst ? CurInst->getDebugLoc() : DebugLoc(); 484 } 485 486 void CopyValueToVirtualRegister(const Value *V, unsigned Reg); 487 488 void visit(const Instruction &I); 489 490 void visit(unsigned Opcode, const User &I); 491 492 /// If there was virtual register allocated for the value V emit CopyFromReg 493 /// of the specified type Ty. Return empty SDValue() otherwise. 494 SDValue getCopyFromRegs(const Value *V, Type *Ty); 495 496 /// If we have dangling debug info that describes \p Variable, or an 497 /// overlapping part of variable considering the \p Expr, then this method 498 /// will drop that debug info as it isn't valid any longer. 499 void dropDanglingDebugInfo(const DILocalVariable *Variable, 500 const DIExpression *Expr); 501 502 /// If we saw an earlier dbg_value referring to V, generate the debug data 503 /// structures now that we've seen its definition. 504 void resolveDanglingDebugInfo(const Value *V, SDValue Val); 505 506 /// For the given dangling debuginfo record, perform last-ditch efforts to 507 /// resolve the debuginfo to something that is represented in this DAG. If 508 /// this cannot be done, produce an Undef debug value record. 509 void salvageUnresolvedDbgValue(DanglingDebugInfo &DDI); 510 511 /// For a given Value, attempt to create and record a SDDbgValue in the 512 /// SelectionDAG. 513 bool handleDebugValue(const Value *V, DILocalVariable *Var, 514 DIExpression *Expr, DebugLoc CurDL, 515 DebugLoc InstDL, unsigned Order); 516 517 /// Evict any dangling debug information, attempting to salvage it first. 518 void resolveOrClearDbgInfo(); 519 520 SDValue getValue(const Value *V); 521 bool findValue(const Value *V) const; 522 523 /// Return the SDNode for the specified IR value if it exists. 524 SDNode *getNodeForIRValue(const Value *V) { 525 if (NodeMap.find(V) == NodeMap.end()) 526 return nullptr; 527 return NodeMap[V].getNode(); 528 } 529 530 SDValue getNonRegisterValue(const Value *V); 531 SDValue getValueImpl(const Value *V); 532 533 void setValue(const Value *V, SDValue NewN) { 534 SDValue &N = NodeMap[V]; 535 assert(!N.getNode() && "Already set a value for this node!"); 536 N = NewN; 537 } 538 539 void setUnusedArgValue(const Value *V, SDValue NewN) { 540 SDValue &N = UnusedArgNodeMap[V]; 541 assert(!N.getNode() && "Already set a value for this node!"); 542 N = NewN; 543 } 544 545 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, 546 MachineBasicBlock *FBB, MachineBasicBlock *CurBB, 547 MachineBasicBlock *SwitchBB, 548 Instruction::BinaryOps Opc, BranchProbability TProb, 549 BranchProbability FProb, bool InvertCond); 550 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, 551 MachineBasicBlock *FBB, 552 MachineBasicBlock *CurBB, 553 MachineBasicBlock *SwitchBB, 554 BranchProbability TProb, BranchProbability FProb, 555 bool InvertCond); 556 bool ShouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases); 557 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB); 558 void CopyToExportRegsIfNeeded(const Value *V); 559 void ExportFromCurrentBlock(const Value *V); 560 void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall, 561 const BasicBlock *EHPadBB = nullptr); 562 563 // Lower range metadata from 0 to N to assert zext to an integer of nearest 564 // floor power of two. 565 SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, 566 SDValue Op); 567 568 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, 569 const CallBase *Call, unsigned ArgIdx, 570 unsigned NumArgs, SDValue Callee, 571 Type *ReturnTy, bool IsPatchPoint); 572 573 std::pair<SDValue, SDValue> 574 lowerInvokable(TargetLowering::CallLoweringInfo &CLI, 575 const BasicBlock *EHPadBB = nullptr); 576 577 /// When an MBB was split during scheduling, update the 578 /// references that need to refer to the last resulting block. 579 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last); 580 581 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes 582 /// of lowering into a STATEPOINT node. 583 struct StatepointLoweringInfo { 584 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set 585 /// of gc pointers this STATEPOINT has to relocate. 586 SmallVector<const Value *, 16> Bases; 587 SmallVector<const Value *, 16> Ptrs; 588 589 /// The set of gc.relocate calls associated with this gc.statepoint. 590 SmallVector<const GCRelocateInst *, 16> GCRelocates; 591 592 /// The full list of gc arguments to the gc.statepoint being lowered. 593 ArrayRef<const Use> GCArgs; 594 595 /// The gc.statepoint instruction. 596 const Instruction *StatepointInstr = nullptr; 597 598 /// The list of gc transition arguments present in the gc.statepoint being 599 /// lowered. 600 ArrayRef<const Use> GCTransitionArgs; 601 602 /// The ID that the resulting STATEPOINT instruction has to report. 603 unsigned ID = -1; 604 605 /// Information regarding the underlying call instruction. 606 TargetLowering::CallLoweringInfo CLI; 607 608 /// The deoptimization state associated with this gc.statepoint call, if 609 /// any. 610 ArrayRef<const Use> DeoptState; 611 612 /// Flags associated with the meta arguments being lowered. 613 uint64_t StatepointFlags = -1; 614 615 /// The number of patchable bytes the call needs to get lowered into. 616 unsigned NumPatchBytes = -1; 617 618 /// The exception handling unwind destination, in case this represents an 619 /// invoke of gc.statepoint. 620 const BasicBlock *EHPadBB = nullptr; 621 622 explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {} 623 }; 624 625 /// Lower \p SLI into a STATEPOINT instruction. 626 SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SI); 627 628 // This function is responsible for the whole statepoint lowering process. 629 // It uniformly handles invoke and call statepoints. 630 void LowerStatepoint(ImmutableStatepoint ISP, 631 const BasicBlock *EHPadBB = nullptr); 632 633 void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, 634 const BasicBlock *EHPadBB); 635 636 void LowerDeoptimizeCall(const CallInst *CI); 637 void LowerDeoptimizingReturn(); 638 639 void LowerCallSiteWithDeoptBundleImpl(const CallBase *Call, SDValue Callee, 640 const BasicBlock *EHPadBB, 641 bool VarArgDisallowed, 642 bool ForceVoidReturnTy); 643 644 /// Returns the type of FrameIndex and TargetFrameIndex nodes. 645 MVT getFrameIndexTy() { 646 return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()); 647 } 648 649 private: 650 // Terminator instructions. 651 void visitRet(const ReturnInst &I); 652 void visitBr(const BranchInst &I); 653 void visitSwitch(const SwitchInst &I); 654 void visitIndirectBr(const IndirectBrInst &I); 655 void visitUnreachable(const UnreachableInst &I); 656 void visitCleanupRet(const CleanupReturnInst &I); 657 void visitCatchSwitch(const CatchSwitchInst &I); 658 void visitCatchRet(const CatchReturnInst &I); 659 void visitCatchPad(const CatchPadInst &I); 660 void visitCleanupPad(const CleanupPadInst &CPI); 661 662 BranchProbability getEdgeProbability(const MachineBasicBlock *Src, 663 const MachineBasicBlock *Dst) const; 664 void addSuccessorWithProb( 665 MachineBasicBlock *Src, MachineBasicBlock *Dst, 666 BranchProbability Prob = BranchProbability::getUnknown()); 667 668 public: 669 void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB); 670 void visitSPDescriptorParent(StackProtectorDescriptor &SPD, 671 MachineBasicBlock *ParentBB); 672 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD); 673 void visitBitTestHeader(SwitchCG::BitTestBlock &B, 674 MachineBasicBlock *SwitchBB); 675 void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, 676 BranchProbability BranchProbToNext, unsigned Reg, 677 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB); 678 void visitJumpTable(SwitchCG::JumpTable &JT); 679 void visitJumpTableHeader(SwitchCG::JumpTable &JT, 680 SwitchCG::JumpTableHeader &JTH, 681 MachineBasicBlock *SwitchBB); 682 683 private: 684 // These all get lowered before this pass. 685 void visitInvoke(const InvokeInst &I); 686 void visitCallBr(const CallBrInst &I); 687 void visitResume(const ResumeInst &I); 688 689 void visitUnary(const User &I, unsigned Opcode); 690 void visitFNeg(const User &I) { visitUnary(I, ISD::FNEG); } 691 692 void visitBinary(const User &I, unsigned Opcode); 693 void visitShift(const User &I, unsigned Opcode); 694 void visitAdd(const User &I) { visitBinary(I, ISD::ADD); } 695 void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); } 696 void visitSub(const User &I) { visitBinary(I, ISD::SUB); } 697 void visitFSub(const User &I); 698 void visitMul(const User &I) { visitBinary(I, ISD::MUL); } 699 void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); } 700 void visitURem(const User &I) { visitBinary(I, ISD::UREM); } 701 void visitSRem(const User &I) { visitBinary(I, ISD::SREM); } 702 void visitFRem(const User &I) { visitBinary(I, ISD::FREM); } 703 void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); } 704 void visitSDiv(const User &I); 705 void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); } 706 void visitAnd (const User &I) { visitBinary(I, ISD::AND); } 707 void visitOr (const User &I) { visitBinary(I, ISD::OR); } 708 void visitXor (const User &I) { visitBinary(I, ISD::XOR); } 709 void visitShl (const User &I) { visitShift(I, ISD::SHL); } 710 void visitLShr(const User &I) { visitShift(I, ISD::SRL); } 711 void visitAShr(const User &I) { visitShift(I, ISD::SRA); } 712 void visitICmp(const User &I); 713 void visitFCmp(const User &I); 714 // Visit the conversion instructions 715 void visitTrunc(const User &I); 716 void visitZExt(const User &I); 717 void visitSExt(const User &I); 718 void visitFPTrunc(const User &I); 719 void visitFPExt(const User &I); 720 void visitFPToUI(const User &I); 721 void visitFPToSI(const User &I); 722 void visitUIToFP(const User &I); 723 void visitSIToFP(const User &I); 724 void visitPtrToInt(const User &I); 725 void visitIntToPtr(const User &I); 726 void visitBitCast(const User &I); 727 void visitAddrSpaceCast(const User &I); 728 729 void visitExtractElement(const User &I); 730 void visitInsertElement(const User &I); 731 void visitShuffleVector(const User &I); 732 733 void visitExtractValue(const User &I); 734 void visitInsertValue(const User &I); 735 void visitLandingPad(const LandingPadInst &LP); 736 737 void visitGetElementPtr(const User &I); 738 void visitSelect(const User &I); 739 740 void visitAlloca(const AllocaInst &I); 741 void visitLoad(const LoadInst &I); 742 void visitStore(const StoreInst &I); 743 void visitMaskedLoad(const CallInst &I, bool IsExpanding = false); 744 void visitMaskedStore(const CallInst &I, bool IsCompressing = false); 745 void visitMaskedGather(const CallInst &I); 746 void visitMaskedScatter(const CallInst &I); 747 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I); 748 void visitAtomicRMW(const AtomicRMWInst &I); 749 void visitFence(const FenceInst &I); 750 void visitPHI(const PHINode &I); 751 void visitCall(const CallInst &I); 752 bool visitMemCmpCall(const CallInst &I); 753 bool visitMemPCpyCall(const CallInst &I); 754 bool visitMemChrCall(const CallInst &I); 755 bool visitStrCpyCall(const CallInst &I, bool isStpcpy); 756 bool visitStrCmpCall(const CallInst &I); 757 bool visitStrLenCall(const CallInst &I); 758 bool visitStrNLenCall(const CallInst &I); 759 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode); 760 bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode); 761 void visitAtomicLoad(const LoadInst &I); 762 void visitAtomicStore(const StoreInst &I); 763 void visitLoadFromSwiftError(const LoadInst &I); 764 void visitStoreToSwiftError(const StoreInst &I); 765 void visitFreeze(const FreezeInst &I); 766 767 void visitInlineAsm(ImmutableCallSite CS); 768 void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); 769 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); 770 void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI); 771 772 void visitVAStart(const CallInst &I); 773 void visitVAArg(const VAArgInst &I); 774 void visitVAEnd(const CallInst &I); 775 void visitVACopy(const CallInst &I); 776 void visitStackmap(const CallInst &I); 777 void visitPatchpoint(ImmutableCallSite CS, 778 const BasicBlock *EHPadBB = nullptr); 779 780 // These two are implemented in StatepointLowering.cpp 781 void visitGCRelocate(const GCRelocateInst &Relocate); 782 void visitGCResult(const GCResultInst &I); 783 784 void visitVectorReduce(const CallInst &I, unsigned Intrinsic); 785 786 void visitUserOp1(const Instruction &I) { 787 llvm_unreachable("UserOp1 should not exist at instruction selection time!"); 788 } 789 void visitUserOp2(const Instruction &I) { 790 llvm_unreachable("UserOp2 should not exist at instruction selection time!"); 791 } 792 793 void processIntegerCallValue(const Instruction &I, 794 SDValue Value, bool IsSigned); 795 796 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB); 797 798 void emitInlineAsmError(ImmutableCallSite CS, const Twine &Message); 799 800 /// If V is an function argument then create corresponding DBG_VALUE machine 801 /// instruction for it now. At the end of instruction selection, they will be 802 /// inserted to the entry BB. 803 bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable, 804 DIExpression *Expr, DILocation *DL, 805 bool IsDbgDeclare, const SDValue &N); 806 807 /// Return the next block after MBB, or nullptr if there is none. 808 MachineBasicBlock *NextBlock(MachineBasicBlock *MBB); 809 810 /// Update the DAG and DAG builder with the relevant information after 811 /// a new root node has been created which could be a tail call. 812 void updateDAGForMaybeTailCall(SDValue MaybeTC); 813 814 /// Return the appropriate SDDbgValue based on N. 815 SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable, 816 DIExpression *Expr, const DebugLoc &dl, 817 unsigned DbgSDNodeOrder); 818 819 /// Lowers CallInst to an external symbol. 820 void lowerCallToExternalSymbol(const CallInst &I, const char *FunctionName); 821 }; 822 823 /// This struct represents the registers (physical or virtual) 824 /// that a particular set of values is assigned, and the type information about 825 /// the value. The most common situation is to represent one value at a time, 826 /// but struct or array values are handled element-wise as multiple values. The 827 /// splitting of aggregates is performed recursively, so that we never have 828 /// aggregate-typed registers. The values at this point do not necessarily have 829 /// legal types, so each value may require one or more registers of some legal 830 /// type. 831 /// 832 struct RegsForValue { 833 /// The value types of the values, which may not be legal, and 834 /// may need be promoted or synthesized from one or more registers. 835 SmallVector<EVT, 4> ValueVTs; 836 837 /// The value types of the registers. This is the same size as ValueVTs and it 838 /// records, for each value, what the type of the assigned register or 839 /// registers are. (Individual values are never synthesized from more than one 840 /// type of register.) 841 /// 842 /// With virtual registers, the contents of RegVTs is redundant with TLI's 843 /// getRegisterType member function, however when with physical registers 844 /// it is necessary to have a separate record of the types. 845 SmallVector<MVT, 4> RegVTs; 846 847 /// This list holds the registers assigned to the values. 848 /// Each legal or promoted value requires one register, and each 849 /// expanded value requires multiple registers. 850 SmallVector<unsigned, 4> Regs; 851 852 /// This list holds the number of registers for each value. 853 SmallVector<unsigned, 4> RegCount; 854 855 /// Records if this value needs to be treated in an ABI dependant manner, 856 /// different to normal type legalization. 857 Optional<CallingConv::ID> CallConv; 858 859 RegsForValue() = default; 860 RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt, 861 Optional<CallingConv::ID> CC = None); 862 RegsForValue(LLVMContext &Context, const TargetLowering &TLI, 863 const DataLayout &DL, unsigned Reg, Type *Ty, 864 Optional<CallingConv::ID> CC); 865 866 bool isABIMangled() const { 867 return CallConv.hasValue(); 868 } 869 870 /// Add the specified values to this one. 871 void append(const RegsForValue &RHS) { 872 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end()); 873 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end()); 874 Regs.append(RHS.Regs.begin(), RHS.Regs.end()); 875 RegCount.push_back(RHS.Regs.size()); 876 } 877 878 /// Emit a series of CopyFromReg nodes that copies from this value and returns 879 /// the result as a ValueVTs value. This uses Chain/Flag as the input and 880 /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no 881 /// flag is used. 882 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, 883 const SDLoc &dl, SDValue &Chain, SDValue *Flag, 884 const Value *V = nullptr) const; 885 886 /// Emit a series of CopyToReg nodes that copies the specified value into the 887 /// registers specified by this object. This uses Chain/Flag as the input and 888 /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no 889 /// flag is used. If V is not nullptr, then it is used in printing better 890 /// diagnostic messages on error. 891 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, 892 SDValue &Chain, SDValue *Flag, const Value *V = nullptr, 893 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const; 894 895 /// Add this value to the specified inlineasm node operand list. This adds the 896 /// code marker, matching input operand index (if applicable), and includes 897 /// the number of values added into it. 898 void AddInlineAsmOperands(unsigned Code, bool HasMatching, 899 unsigned MatchingIdx, const SDLoc &dl, 900 SelectionDAG &DAG, std::vector<SDValue> &Ops) const; 901 902 /// Check if the total RegCount is greater than one. 903 bool occupiesMultipleRegs() const { 904 return std::accumulate(RegCount.begin(), RegCount.end(), 0) > 1; 905 } 906 907 /// Return a list of registers and their sizes. 908 SmallVector<std::pair<unsigned, unsigned>, 4> getRegsAndSizes() const; 909 }; 910 911 } // end namespace llvm 912 913 #endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H 914