1 //===-- CodeGenCommonISel.cpp ---------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines common utilies that are shared between SelectionDAG and 10 // GlobalISel frameworks. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/CodeGenCommonISel.h" 15 #include "llvm/Analysis/BranchProbabilityInfo.h" 16 #include "llvm/CodeGen/MachineBasicBlock.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/TargetInstrInfo.h" 19 #include "llvm/CodeGen/TargetOpcodes.h" 20 #include "llvm/IR/DebugInfoMetadata.h" 21 22 #define DEBUG_TYPE "codegen-common" 23 24 using namespace llvm; 25 26 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB 27 /// is 0. 28 MachineBasicBlock * 29 StackProtectorDescriptor::addSuccessorMBB( 30 const BasicBlock *BB, MachineBasicBlock *ParentMBB, bool IsLikely, 31 MachineBasicBlock *SuccMBB) { 32 // If SuccBB has not been created yet, create it. 33 if (!SuccMBB) { 34 MachineFunction *MF = ParentMBB->getParent(); 35 MachineFunction::iterator BBI(ParentMBB); 36 SuccMBB = MF->CreateMachineBasicBlock(BB); 37 MF->insert(++BBI, SuccMBB); 38 } 39 // Add it as a successor of ParentMBB. 40 ParentMBB->addSuccessor( 41 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely)); 42 return SuccMBB; 43 } 44 45 /// Given that the input MI is before a partial terminator sequence TSeq, return 46 /// true if M + TSeq also a partial terminator sequence. 47 /// 48 /// A Terminator sequence is a sequence of MachineInstrs which at this point in 49 /// lowering copy vregs into physical registers, which are then passed into 50 /// terminator instructors so we can satisfy ABI constraints. A partial 51 /// terminator sequence is an improper subset of a terminator sequence (i.e. it 52 /// may be the whole terminator sequence). 53 static bool MIIsInTerminatorSequence(const MachineInstr &MI) { 54 // If we do not have a copy or an implicit def, we return true if and only if 55 // MI is a debug value. 56 if (!MI.isCopy() && !MI.isImplicitDef()) { 57 // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the 58 // physical registers if there is debug info associated with the terminator 59 // of our mbb. We want to include said debug info in our terminator 60 // sequence, so we return true in that case. 61 if (MI.isDebugInstr()) 62 return true; 63 64 // For GlobalISel, we may have extension instructions for arguments within 65 // copy sequences. Allow these. 66 switch (MI.getOpcode()) { 67 case TargetOpcode::G_TRUNC: 68 case TargetOpcode::G_ZEXT: 69 case TargetOpcode::G_ANYEXT: 70 case TargetOpcode::G_SEXT: 71 case TargetOpcode::G_MERGE_VALUES: 72 case TargetOpcode::G_UNMERGE_VALUES: 73 case TargetOpcode::G_CONCAT_VECTORS: 74 case TargetOpcode::G_BUILD_VECTOR: 75 case TargetOpcode::G_EXTRACT: 76 return true; 77 default: 78 return false; 79 } 80 } 81 82 // We have left the terminator sequence if we are not doing one of the 83 // following: 84 // 85 // 1. Copying a vreg into a physical register. 86 // 2. Copying a vreg into a vreg. 87 // 3. Defining a register via an implicit def. 88 89 // OPI should always be a register definition... 90 MachineInstr::const_mop_iterator OPI = MI.operands_begin(); 91 if (!OPI->isReg() || !OPI->isDef()) 92 return false; 93 94 // Defining any register via an implicit def is always ok. 95 if (MI.isImplicitDef()) 96 return true; 97 98 // Grab the copy source... 99 MachineInstr::const_mop_iterator OPI2 = OPI; 100 ++OPI2; 101 assert(OPI2 != MI.operands_end() 102 && "Should have a copy implying we should have 2 arguments."); 103 104 // Make sure that the copy dest is not a vreg when the copy source is a 105 // physical register. 106 if (!OPI2->isReg() || 107 (!OPI->getReg().isPhysical() && OPI2->getReg().isPhysical())) 108 return false; 109 110 return true; 111 } 112 113 /// Find the split point at which to splice the end of BB into its success stack 114 /// protector check machine basic block. 115 /// 116 /// On many platforms, due to ABI constraints, terminators, even before register 117 /// allocation, use physical registers. This creates an issue for us since 118 /// physical registers at this point can not travel across basic 119 /// blocks. Luckily, selectiondag always moves physical registers into vregs 120 /// when they enter functions and moves them through a sequence of copies back 121 /// into the physical registers right before the terminator creating a 122 /// ``Terminator Sequence''. This function is searching for the beginning of the 123 /// terminator sequence so that we can ensure that we splice off not just the 124 /// terminator, but additionally the copies that move the vregs into the 125 /// physical registers. 126 MachineBasicBlock::iterator 127 llvm::findSplitPointForStackProtector(MachineBasicBlock *BB, 128 const TargetInstrInfo &TII) { 129 MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator(); 130 if (SplitPoint == BB->begin()) 131 return SplitPoint; 132 133 MachineBasicBlock::iterator Start = BB->begin(); 134 MachineBasicBlock::iterator Previous = SplitPoint; 135 do { 136 --Previous; 137 } while (Previous != Start && Previous->isDebugInstr()); 138 139 if (TII.isTailCall(*SplitPoint) && 140 Previous->getOpcode() == TII.getCallFrameDestroyOpcode()) { 141 // Call frames cannot be nested, so if this frame is describing the tail 142 // call itself, then we must insert before the sequence even starts. For 143 // example: 144 // <split point> 145 // ADJCALLSTACKDOWN ... 146 // <Moves> 147 // ADJCALLSTACKUP ... 148 // TAILJMP somewhere 149 // On the other hand, it could be an unrelated call in which case this tail 150 // call has no register moves of its own and should be the split point. For 151 // example: 152 // ADJCALLSTACKDOWN 153 // CALL something_else 154 // ADJCALLSTACKUP 155 // <split point> 156 // TAILJMP somewhere 157 do { 158 --Previous; 159 if (Previous->isCall()) 160 return SplitPoint; 161 } while(Previous->getOpcode() != TII.getCallFrameSetupOpcode()); 162 163 return Previous; 164 } 165 166 while (MIIsInTerminatorSequence(*Previous)) { 167 SplitPoint = Previous; 168 if (Previous == Start) 169 break; 170 --Previous; 171 } 172 173 return SplitPoint; 174 } 175 176 unsigned llvm::getInvertedFPClassTest(unsigned Test) { 177 unsigned InvertedTest = ~Test & fcAllFlags; 178 switch (InvertedTest) { 179 default: 180 break; 181 case fcNan: 182 case fcSNan: 183 case fcQNan: 184 case fcInf: 185 case fcPosInf: 186 case fcNegInf: 187 case fcNormal: 188 case fcPosNormal: 189 case fcNegNormal: 190 case fcSubnormal: 191 case fcPosSubnormal: 192 case fcNegSubnormal: 193 case fcZero: 194 case fcPosZero: 195 case fcNegZero: 196 case fcFinite: 197 case fcPosFinite: 198 case fcNegFinite: 199 return InvertedTest; 200 } 201 return 0; 202 } 203 204 static MachineOperand *getSalvageOpsForCopy(const MachineRegisterInfo &MRI, 205 MachineInstr &Copy) { 206 assert(Copy.getOpcode() == TargetOpcode::COPY && "Must be a COPY"); 207 208 return &Copy.getOperand(1); 209 } 210 211 static MachineOperand *getSalvageOpsForTrunc(const MachineRegisterInfo &MRI, 212 MachineInstr &Trunc, 213 SmallVectorImpl<uint64_t> &Ops) { 214 assert(Trunc.getOpcode() == TargetOpcode::G_TRUNC && "Must be a G_TRUNC"); 215 216 const auto FromLLT = MRI.getType(Trunc.getOperand(1).getReg()); 217 const auto ToLLT = MRI.getType(Trunc.defs().begin()->getReg()); 218 219 // TODO: Support non-scalar types. 220 if (!FromLLT.isScalar()) { 221 return nullptr; 222 } 223 224 auto ExtOps = DIExpression::getExtOps(FromLLT.getSizeInBits(), 225 ToLLT.getSizeInBits(), false); 226 Ops.append(ExtOps.begin(), ExtOps.end()); 227 return &Trunc.getOperand(1); 228 } 229 230 static MachineOperand *salvageDebugInfoImpl(const MachineRegisterInfo &MRI, 231 MachineInstr &MI, 232 SmallVectorImpl<uint64_t> &Ops) { 233 switch (MI.getOpcode()) { 234 case TargetOpcode::G_TRUNC: 235 return getSalvageOpsForTrunc(MRI, MI, Ops); 236 case TargetOpcode::COPY: 237 return getSalvageOpsForCopy(MRI, MI); 238 default: 239 return nullptr; 240 } 241 } 242 243 void llvm::salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, 244 MachineInstr &MI, 245 ArrayRef<MachineOperand *> DbgUsers) { 246 // These are arbitrary chosen limits on the maximum number of values and the 247 // maximum size of a debug expression we can salvage up to, used for 248 // performance reasons. 249 const unsigned MaxExpressionSize = 128; 250 251 for (auto *DefMO : DbgUsers) { 252 MachineInstr *DbgMI = DefMO->getParent(); 253 if (DbgMI->isIndirectDebugValue()) { 254 continue; 255 } 256 257 int UseMOIdx = DbgMI->findRegisterUseOperandIdx(DefMO->getReg()); 258 assert(UseMOIdx != -1 && DbgMI->hasDebugOperandForReg(DefMO->getReg()) && 259 "Must use salvaged instruction as its location"); 260 261 // TODO: Support DBG_VALUE_LIST. 262 if (DbgMI->getOpcode() != TargetOpcode::DBG_VALUE) { 263 assert(DbgMI->getOpcode() == TargetOpcode::DBG_VALUE_LIST && 264 "Must be either DBG_VALUE or DBG_VALUE_LIST"); 265 continue; 266 } 267 268 const DIExpression *SalvagedExpr = DbgMI->getDebugExpression(); 269 270 SmallVector<uint64_t, 16> Ops; 271 auto Op0 = salvageDebugInfoImpl(MRI, MI, Ops); 272 if (!Op0) 273 continue; 274 SalvagedExpr = DIExpression::appendOpsToArg(SalvagedExpr, Ops, 0, true); 275 276 bool IsValidSalvageExpr = 277 SalvagedExpr->getNumElements() <= MaxExpressionSize; 278 if (IsValidSalvageExpr) { 279 auto &UseMO = DbgMI->getOperand(UseMOIdx); 280 UseMO.setReg(Op0->getReg()); 281 UseMO.setSubReg(Op0->getSubReg()); 282 DbgMI->getDebugExpressionOp().setMetadata(SalvagedExpr); 283 284 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DbgMI << '\n'); 285 } 286 } 287 } 288