1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements routines for translating from LLVM IR into SelectionDAG IR. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "SelectionDAGBuilder.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/BitVector.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/AliasAnalysis.h" 30 #include "llvm/Analysis/BlockFrequencyInfo.h" 31 #include "llvm/Analysis/BranchProbabilityInfo.h" 32 #include "llvm/Analysis/ConstantFolding.h" 33 #include "llvm/Analysis/EHPersonalities.h" 34 #include "llvm/Analysis/Loads.h" 35 #include "llvm/Analysis/MemoryLocation.h" 36 #include "llvm/Analysis/ProfileSummaryInfo.h" 37 #include "llvm/Analysis/TargetLibraryInfo.h" 38 #include "llvm/Analysis/ValueTracking.h" 39 #include "llvm/Analysis/VectorUtils.h" 40 #include "llvm/CodeGen/Analysis.h" 41 #include "llvm/CodeGen/FunctionLoweringInfo.h" 42 #include "llvm/CodeGen/GCMetadata.h" 43 #include "llvm/CodeGen/ISDOpcodes.h" 44 #include "llvm/CodeGen/MachineBasicBlock.h" 45 #include "llvm/CodeGen/MachineFrameInfo.h" 46 #include "llvm/CodeGen/MachineFunction.h" 47 #include "llvm/CodeGen/MachineInstr.h" 48 #include "llvm/CodeGen/MachineInstrBuilder.h" 49 #include "llvm/CodeGen/MachineJumpTableInfo.h" 50 #include "llvm/CodeGen/MachineMemOperand.h" 51 #include "llvm/CodeGen/MachineModuleInfo.h" 52 #include "llvm/CodeGen/MachineOperand.h" 53 #include "llvm/CodeGen/MachineRegisterInfo.h" 54 #include "llvm/CodeGen/RuntimeLibcalls.h" 55 #include "llvm/CodeGen/SelectionDAG.h" 56 #include "llvm/CodeGen/SelectionDAGNodes.h" 57 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 58 #include "llvm/CodeGen/StackMaps.h" 59 #include "llvm/CodeGen/SwiftErrorValueTracking.h" 60 #include "llvm/CodeGen/TargetFrameLowering.h" 61 #include "llvm/CodeGen/TargetInstrInfo.h" 62 #include "llvm/CodeGen/TargetLowering.h" 63 #include "llvm/CodeGen/TargetOpcodes.h" 64 #include "llvm/CodeGen/TargetRegisterInfo.h" 65 #include "llvm/CodeGen/TargetSubtargetInfo.h" 66 #include "llvm/CodeGen/ValueTypes.h" 67 #include "llvm/CodeGen/WinEHFuncInfo.h" 68 #include "llvm/IR/Argument.h" 69 #include "llvm/IR/Attributes.h" 70 #include "llvm/IR/BasicBlock.h" 71 #include "llvm/IR/CFG.h" 72 #include "llvm/IR/CallingConv.h" 73 #include "llvm/IR/Constant.h" 74 #include "llvm/IR/ConstantRange.h" 75 #include "llvm/IR/Constants.h" 76 #include "llvm/IR/DataLayout.h" 77 #include "llvm/IR/DebugInfoMetadata.h" 78 #include "llvm/IR/DebugLoc.h" 79 #include "llvm/IR/DerivedTypes.h" 80 #include "llvm/IR/Function.h" 81 #include "llvm/IR/GetElementPtrTypeIterator.h" 82 #include "llvm/IR/InlineAsm.h" 83 #include "llvm/IR/InstrTypes.h" 84 #include "llvm/IR/Instruction.h" 85 #include "llvm/IR/Instructions.h" 86 #include "llvm/IR/IntrinsicInst.h" 87 #include "llvm/IR/Intrinsics.h" 88 #include "llvm/IR/IntrinsicsAArch64.h" 89 #include "llvm/IR/IntrinsicsWebAssembly.h" 90 #include "llvm/IR/LLVMContext.h" 91 #include "llvm/IR/Metadata.h" 92 #include "llvm/IR/Module.h" 93 #include "llvm/IR/Operator.h" 94 #include "llvm/IR/PatternMatch.h" 95 #include "llvm/IR/Statepoint.h" 96 #include "llvm/IR/Type.h" 97 #include "llvm/IR/User.h" 98 #include "llvm/IR/Value.h" 99 #include "llvm/MC/MCContext.h" 100 #include "llvm/MC/MCSymbol.h" 101 #include "llvm/Support/AtomicOrdering.h" 102 #include "llvm/Support/BranchProbability.h" 103 #include "llvm/Support/Casting.h" 104 #include "llvm/Support/CodeGen.h" 105 #include "llvm/Support/CommandLine.h" 106 #include "llvm/Support/Compiler.h" 107 #include "llvm/Support/Debug.h" 108 #include "llvm/Support/ErrorHandling.h" 109 #include "llvm/Support/MachineValueType.h" 110 #include "llvm/Support/MathExtras.h" 111 #include "llvm/Support/raw_ostream.h" 112 #include "llvm/Target/TargetIntrinsicInfo.h" 113 #include "llvm/Target/TargetMachine.h" 114 #include "llvm/Target/TargetOptions.h" 115 #include "llvm/Transforms/Utils/Local.h" 116 #include <algorithm> 117 #include <cassert> 118 #include <cstddef> 119 #include <cstdint> 120 #include <cstring> 121 #include <iterator> 122 #include <limits> 123 #include <numeric> 124 #include <tuple> 125 #include <utility> 126 #include <vector> 127 128 using namespace llvm; 129 using namespace PatternMatch; 130 using namespace SwitchCG; 131 132 #define DEBUG_TYPE "isel" 133 134 /// LimitFloatPrecision - Generate low-precision inline sequences for 135 /// some float libcalls (6, 8 or 12 bits). 136 static unsigned LimitFloatPrecision; 137 138 static cl::opt<bool> 139 InsertAssertAlign("insert-assert-align", cl::init(true), 140 cl::desc("Insert the experimental `assertalign` node."), 141 cl::ReallyHidden); 142 143 static cl::opt<unsigned, true> 144 LimitFPPrecision("limit-float-precision", 145 cl::desc("Generate low-precision inline sequences " 146 "for some float libcalls"), 147 cl::location(LimitFloatPrecision), cl::Hidden, 148 cl::init(0)); 149 150 static cl::opt<unsigned> SwitchPeelThreshold( 151 "switch-peel-threshold", cl::Hidden, cl::init(66), 152 cl::desc("Set the case probability threshold for peeling the case from a " 153 "switch statement. A value greater than 100 will void this " 154 "optimization")); 155 156 // Limit the width of DAG chains. This is important in general to prevent 157 // DAG-based analysis from blowing up. For example, alias analysis and 158 // load clustering may not complete in reasonable time. It is difficult to 159 // recognize and avoid this situation within each individual analysis, and 160 // future analyses are likely to have the same behavior. Limiting DAG width is 161 // the safe approach and will be especially important with global DAGs. 162 // 163 // MaxParallelChains default is arbitrarily high to avoid affecting 164 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st 165 // sequence over this should have been converted to llvm.memcpy by the 166 // frontend. It is easy to induce this behavior with .ll code such as: 167 // %buffer = alloca [4096 x i8] 168 // %data = load [4096 x i8]* %argPtr 169 // store [4096 x i8] %data, [4096 x i8]* %buffer 170 static const unsigned MaxParallelChains = 64; 171 172 // Return the calling convention if the Value passed requires ABI mangling as it 173 // is a parameter to a function or a return value from a function which is not 174 // an intrinsic. 175 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) { 176 if (auto *R = dyn_cast<ReturnInst>(V)) 177 return R->getParent()->getParent()->getCallingConv(); 178 179 if (auto *CI = dyn_cast<CallInst>(V)) { 180 const bool IsInlineAsm = CI->isInlineAsm(); 181 const bool IsIndirectFunctionCall = 182 !IsInlineAsm && !CI->getCalledFunction(); 183 184 // It is possible that the call instruction is an inline asm statement or an 185 // indirect function call in which case the return value of 186 // getCalledFunction() would be nullptr. 187 const bool IsInstrinsicCall = 188 !IsInlineAsm && !IsIndirectFunctionCall && 189 CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic; 190 191 if (!IsInlineAsm && !IsInstrinsicCall) 192 return CI->getCallingConv(); 193 } 194 195 return None; 196 } 197 198 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, 199 const SDValue *Parts, unsigned NumParts, 200 MVT PartVT, EVT ValueVT, const Value *V, 201 Optional<CallingConv::ID> CC); 202 203 /// getCopyFromParts - Create a value that contains the specified legal parts 204 /// combined into the value they represent. If the parts combine to a type 205 /// larger than ValueVT then AssertOp can be used to specify whether the extra 206 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT 207 /// (ISD::AssertSext). 208 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, 209 const SDValue *Parts, unsigned NumParts, 210 MVT PartVT, EVT ValueVT, const Value *V, 211 Optional<CallingConv::ID> CC = None, 212 Optional<ISD::NodeType> AssertOp = None) { 213 // Let the target assemble the parts if it wants to 214 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 215 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts, 216 PartVT, ValueVT, CC)) 217 return Val; 218 219 if (ValueVT.isVector()) 220 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V, 221 CC); 222 223 assert(NumParts > 0 && "No parts to assemble!"); 224 SDValue Val = Parts[0]; 225 226 if (NumParts > 1) { 227 // Assemble the value from multiple parts. 228 if (ValueVT.isInteger()) { 229 unsigned PartBits = PartVT.getSizeInBits(); 230 unsigned ValueBits = ValueVT.getSizeInBits(); 231 232 // Assemble the power of 2 part. 233 unsigned RoundParts = 234 (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts; 235 unsigned RoundBits = PartBits * RoundParts; 236 EVT RoundVT = RoundBits == ValueBits ? 237 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits); 238 SDValue Lo, Hi; 239 240 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2); 241 242 if (RoundParts > 2) { 243 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, 244 PartVT, HalfVT, V); 245 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, 246 RoundParts / 2, PartVT, HalfVT, V); 247 } else { 248 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]); 249 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); 250 } 251 252 if (DAG.getDataLayout().isBigEndian()) 253 std::swap(Lo, Hi); 254 255 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi); 256 257 if (RoundParts < NumParts) { 258 // Assemble the trailing non-power-of-2 part. 259 unsigned OddParts = NumParts - RoundParts; 260 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); 261 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT, 262 OddVT, V, CC); 263 264 // Combine the round and odd parts. 265 Lo = Val; 266 if (DAG.getDataLayout().isBigEndian()) 267 std::swap(Lo, Hi); 268 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 269 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi); 270 Hi = 271 DAG.getNode(ISD::SHL, DL, TotalVT, Hi, 272 DAG.getConstant(Lo.getValueSizeInBits(), DL, 273 TLI.getPointerTy(DAG.getDataLayout()))); 274 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo); 275 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi); 276 } 277 } else if (PartVT.isFloatingPoint()) { 278 // FP split into multiple FP parts (for ppcf128) 279 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && 280 "Unexpected split"); 281 SDValue Lo, Hi; 282 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]); 283 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]); 284 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout())) 285 std::swap(Lo, Hi); 286 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi); 287 } else { 288 // FP split into integer parts (soft fp) 289 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && 290 !PartVT.isVector() && "Unexpected split"); 291 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 292 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC); 293 } 294 } 295 296 // There is now one part, held in Val. Correct it to match ValueVT. 297 // PartEVT is the type of the register class that holds the value. 298 // ValueVT is the type of the inline asm operation. 299 EVT PartEVT = Val.getValueType(); 300 301 if (PartEVT == ValueVT) 302 return Val; 303 304 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() && 305 ValueVT.bitsLT(PartEVT)) { 306 // For an FP value in an integer part, we need to truncate to the right 307 // width first. 308 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 309 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val); 310 } 311 312 // Handle types that have the same size. 313 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits()) 314 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 315 316 // Handle types with different sizes. 317 if (PartEVT.isInteger() && ValueVT.isInteger()) { 318 if (ValueVT.bitsLT(PartEVT)) { 319 // For a truncate, see if we have any information to 320 // indicate whether the truncated bits will always be 321 // zero or sign-extension. 322 if (AssertOp.hasValue()) 323 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val, 324 DAG.getValueType(ValueVT)); 325 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 326 } 327 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val); 328 } 329 330 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 331 // FP_ROUND's are always exact here. 332 if (ValueVT.bitsLT(Val.getValueType())) 333 return DAG.getNode( 334 ISD::FP_ROUND, DL, ValueVT, Val, 335 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()))); 336 337 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val); 338 } 339 340 // Handle MMX to a narrower integer type by bitcasting MMX to integer and 341 // then truncating. 342 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() && 343 ValueVT.bitsLT(PartEVT)) { 344 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val); 345 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 346 } 347 348 report_fatal_error("Unknown mismatch in getCopyFromParts!"); 349 } 350 351 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, 352 const Twine &ErrMsg) { 353 const Instruction *I = dyn_cast_or_null<Instruction>(V); 354 if (!V) 355 return Ctx.emitError(ErrMsg); 356 357 const char *AsmError = ", possible invalid constraint for vector type"; 358 if (const CallInst *CI = dyn_cast<CallInst>(I)) 359 if (CI->isInlineAsm()) 360 return Ctx.emitError(I, ErrMsg + AsmError); 361 362 return Ctx.emitError(I, ErrMsg); 363 } 364 365 /// getCopyFromPartsVector - Create a value that contains the specified legal 366 /// parts combined into the value they represent. If the parts combine to a 367 /// type larger than ValueVT then AssertOp can be used to specify whether the 368 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from 369 /// ValueVT (ISD::AssertSext). 370 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, 371 const SDValue *Parts, unsigned NumParts, 372 MVT PartVT, EVT ValueVT, const Value *V, 373 Optional<CallingConv::ID> CallConv) { 374 assert(ValueVT.isVector() && "Not a vector value"); 375 assert(NumParts > 0 && "No parts to assemble!"); 376 const bool IsABIRegCopy = CallConv.hasValue(); 377 378 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 379 SDValue Val = Parts[0]; 380 381 // Handle a multi-element vector. 382 if (NumParts > 1) { 383 EVT IntermediateVT; 384 MVT RegisterVT; 385 unsigned NumIntermediates; 386 unsigned NumRegs; 387 388 if (IsABIRegCopy) { 389 NumRegs = TLI.getVectorTypeBreakdownForCallingConv( 390 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT, 391 NumIntermediates, RegisterVT); 392 } else { 393 NumRegs = 394 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 395 NumIntermediates, RegisterVT); 396 } 397 398 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 399 NumParts = NumRegs; // Silence a compiler warning. 400 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 401 assert(RegisterVT.getSizeInBits() == 402 Parts[0].getSimpleValueType().getSizeInBits() && 403 "Part type sizes don't match!"); 404 405 // Assemble the parts into intermediate operands. 406 SmallVector<SDValue, 8> Ops(NumIntermediates); 407 if (NumIntermediates == NumParts) { 408 // If the register was not expanded, truncate or copy the value, 409 // as appropriate. 410 for (unsigned i = 0; i != NumParts; ++i) 411 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, 412 PartVT, IntermediateVT, V, CallConv); 413 } else if (NumParts > 0) { 414 // If the intermediate type was expanded, build the intermediate 415 // operands from the parts. 416 assert(NumParts % NumIntermediates == 0 && 417 "Must expand into a divisible number of parts!"); 418 unsigned Factor = NumParts / NumIntermediates; 419 for (unsigned i = 0; i != NumIntermediates; ++i) 420 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, 421 PartVT, IntermediateVT, V, CallConv); 422 } 423 424 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the 425 // intermediate operands. 426 EVT BuiltVectorTy = 427 IntermediateVT.isVector() 428 ? EVT::getVectorVT( 429 *DAG.getContext(), IntermediateVT.getScalarType(), 430 IntermediateVT.getVectorElementCount() * NumParts) 431 : EVT::getVectorVT(*DAG.getContext(), 432 IntermediateVT.getScalarType(), 433 NumIntermediates); 434 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS 435 : ISD::BUILD_VECTOR, 436 DL, BuiltVectorTy, Ops); 437 } 438 439 // There is now one part, held in Val. Correct it to match ValueVT. 440 EVT PartEVT = Val.getValueType(); 441 442 if (PartEVT == ValueVT) 443 return Val; 444 445 if (PartEVT.isVector()) { 446 // If the element type of the source/dest vectors are the same, but the 447 // parts vector has more elements than the value vector, then we have a 448 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the 449 // elements we want. 450 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) { 451 assert((PartEVT.getVectorElementCount().Min > 452 ValueVT.getVectorElementCount().Min) && 453 (PartEVT.getVectorElementCount().Scalable == 454 ValueVT.getVectorElementCount().Scalable) && 455 "Cannot narrow, it would be a lossy transformation"); 456 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, 457 DAG.getVectorIdxConstant(0, DL)); 458 } 459 460 // Vector/Vector bitcast. 461 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) 462 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 463 464 assert(PartEVT.getVectorElementCount() == ValueVT.getVectorElementCount() && 465 "Cannot handle this kind of promotion"); 466 // Promoted vector extract 467 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT); 468 469 } 470 471 // Trivial bitcast if the types are the same size and the destination 472 // vector type is legal. 473 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() && 474 TLI.isTypeLegal(ValueVT)) 475 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 476 477 if (ValueVT.getVectorNumElements() != 1) { 478 // Certain ABIs require that vectors are passed as integers. For vectors 479 // are the same size, this is an obvious bitcast. 480 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) { 481 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 482 } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) { 483 // Bitcast Val back the original type and extract the corresponding 484 // vector we want. 485 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits(); 486 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(), 487 ValueVT.getVectorElementType(), Elts); 488 Val = DAG.getBitcast(WiderVecType, Val); 489 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, 490 DAG.getVectorIdxConstant(0, DL)); 491 } 492 493 diagnosePossiblyInvalidConstraint( 494 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion"); 495 return DAG.getUNDEF(ValueVT); 496 } 497 498 // Handle cases such as i8 -> <1 x i1> 499 EVT ValueSVT = ValueVT.getVectorElementType(); 500 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) { 501 if (ValueSVT.getSizeInBits() == PartEVT.getSizeInBits()) 502 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val); 503 else 504 Val = ValueVT.isFloatingPoint() 505 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT) 506 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT); 507 } 508 509 return DAG.getBuildVector(ValueVT, DL, Val); 510 } 511 512 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, 513 SDValue Val, SDValue *Parts, unsigned NumParts, 514 MVT PartVT, const Value *V, 515 Optional<CallingConv::ID> CallConv); 516 517 /// getCopyToParts - Create a series of nodes that contain the specified value 518 /// split into legal parts. If the parts contain more bits than Val, then, for 519 /// integers, ExtendKind can be used to specify how to generate the extra bits. 520 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, 521 SDValue *Parts, unsigned NumParts, MVT PartVT, 522 const Value *V, 523 Optional<CallingConv::ID> CallConv = None, 524 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { 525 // Let the target split the parts if it wants to 526 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 527 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT, 528 CallConv)) 529 return; 530 EVT ValueVT = Val.getValueType(); 531 532 // Handle the vector case separately. 533 if (ValueVT.isVector()) 534 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V, 535 CallConv); 536 537 unsigned PartBits = PartVT.getSizeInBits(); 538 unsigned OrigNumParts = NumParts; 539 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && 540 "Copying to an illegal type!"); 541 542 if (NumParts == 0) 543 return; 544 545 assert(!ValueVT.isVector() && "Vector case handled elsewhere"); 546 EVT PartEVT = PartVT; 547 if (PartEVT == ValueVT) { 548 assert(NumParts == 1 && "No-op copy with multiple parts!"); 549 Parts[0] = Val; 550 return; 551 } 552 553 if (NumParts * PartBits > ValueVT.getSizeInBits()) { 554 // If the parts cover more bits than the value has, promote the value. 555 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 556 assert(NumParts == 1 && "Do not know what to promote to!"); 557 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val); 558 } else { 559 if (ValueVT.isFloatingPoint()) { 560 // FP values need to be bitcast, then extended if they are being put 561 // into a larger container. 562 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 563 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 564 } 565 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 566 ValueVT.isInteger() && 567 "Unknown mismatch!"); 568 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 569 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val); 570 if (PartVT == MVT::x86mmx) 571 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 572 } 573 } else if (PartBits == ValueVT.getSizeInBits()) { 574 // Different types of the same size. 575 assert(NumParts == 1 && PartEVT != ValueVT); 576 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 577 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { 578 // If the parts cover less bits than value has, truncate the value. 579 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 580 ValueVT.isInteger() && 581 "Unknown mismatch!"); 582 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 583 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 584 if (PartVT == MVT::x86mmx) 585 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 586 } 587 588 // The value may have changed - recompute ValueVT. 589 ValueVT = Val.getValueType(); 590 assert(NumParts * PartBits == ValueVT.getSizeInBits() && 591 "Failed to tile the value with PartVT!"); 592 593 if (NumParts == 1) { 594 if (PartEVT != ValueVT) { 595 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, 596 "scalar-to-vector conversion failed"); 597 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 598 } 599 600 Parts[0] = Val; 601 return; 602 } 603 604 // Expand the value into multiple parts. 605 if (NumParts & (NumParts - 1)) { 606 // The number of parts is not a power of 2. Split off and copy the tail. 607 assert(PartVT.isInteger() && ValueVT.isInteger() && 608 "Do not know what to expand to!"); 609 unsigned RoundParts = 1 << Log2_32(NumParts); 610 unsigned RoundBits = RoundParts * PartBits; 611 unsigned OddParts = NumParts - RoundParts; 612 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, 613 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false)); 614 615 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V, 616 CallConv); 617 618 if (DAG.getDataLayout().isBigEndian()) 619 // The odd parts were reversed by getCopyToParts - unreverse them. 620 std::reverse(Parts + RoundParts, Parts + NumParts); 621 622 NumParts = RoundParts; 623 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 624 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 625 } 626 627 // The number of parts is a power of 2. Repeatedly bisect the value using 628 // EXTRACT_ELEMENT. 629 Parts[0] = DAG.getNode(ISD::BITCAST, DL, 630 EVT::getIntegerVT(*DAG.getContext(), 631 ValueVT.getSizeInBits()), 632 Val); 633 634 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { 635 for (unsigned i = 0; i < NumParts; i += StepSize) { 636 unsigned ThisBits = StepSize * PartBits / 2; 637 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits); 638 SDValue &Part0 = Parts[i]; 639 SDValue &Part1 = Parts[i+StepSize/2]; 640 641 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 642 ThisVT, Part0, DAG.getIntPtrConstant(1, DL)); 643 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 644 ThisVT, Part0, DAG.getIntPtrConstant(0, DL)); 645 646 if (ThisBits == PartBits && ThisVT != PartVT) { 647 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0); 648 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1); 649 } 650 } 651 } 652 653 if (DAG.getDataLayout().isBigEndian()) 654 std::reverse(Parts, Parts + OrigNumParts); 655 } 656 657 static SDValue widenVectorToPartType(SelectionDAG &DAG, 658 SDValue Val, const SDLoc &DL, EVT PartVT) { 659 if (!PartVT.isFixedLengthVector()) 660 return SDValue(); 661 662 EVT ValueVT = Val.getValueType(); 663 unsigned PartNumElts = PartVT.getVectorNumElements(); 664 unsigned ValueNumElts = ValueVT.getVectorNumElements(); 665 if (PartNumElts > ValueNumElts && 666 PartVT.getVectorElementType() == ValueVT.getVectorElementType()) { 667 EVT ElementVT = PartVT.getVectorElementType(); 668 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in 669 // undef elements. 670 SmallVector<SDValue, 16> Ops; 671 DAG.ExtractVectorElements(Val, Ops); 672 SDValue EltUndef = DAG.getUNDEF(ElementVT); 673 for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i) 674 Ops.push_back(EltUndef); 675 676 // FIXME: Use CONCAT for 2x -> 4x. 677 return DAG.getBuildVector(PartVT, DL, Ops); 678 } 679 680 return SDValue(); 681 } 682 683 /// getCopyToPartsVector - Create a series of nodes that contain the specified 684 /// value split into legal parts. 685 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, 686 SDValue Val, SDValue *Parts, unsigned NumParts, 687 MVT PartVT, const Value *V, 688 Optional<CallingConv::ID> CallConv) { 689 EVT ValueVT = Val.getValueType(); 690 assert(ValueVT.isVector() && "Not a vector"); 691 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 692 const bool IsABIRegCopy = CallConv.hasValue(); 693 694 if (NumParts == 1) { 695 EVT PartEVT = PartVT; 696 if (PartEVT == ValueVT) { 697 // Nothing to do. 698 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { 699 // Bitconvert vector->vector case. 700 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 701 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) { 702 Val = Widened; 703 } else if (PartVT.isVector() && 704 PartEVT.getVectorElementType().bitsGE( 705 ValueVT.getVectorElementType()) && 706 PartEVT.getVectorElementCount() == 707 ValueVT.getVectorElementCount()) { 708 709 // Promoted vector extract 710 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 711 } else { 712 if (ValueVT.getVectorNumElements() == 1) { 713 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val, 714 DAG.getVectorIdxConstant(0, DL)); 715 } else { 716 assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() && 717 "lossy conversion of vector to scalar type"); 718 EVT IntermediateType = 719 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 720 Val = DAG.getBitcast(IntermediateType, Val); 721 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 722 } 723 } 724 725 assert(Val.getValueType() == PartVT && "Unexpected vector part value type"); 726 Parts[0] = Val; 727 return; 728 } 729 730 // Handle a multi-element vector. 731 EVT IntermediateVT; 732 MVT RegisterVT; 733 unsigned NumIntermediates; 734 unsigned NumRegs; 735 if (IsABIRegCopy) { 736 NumRegs = TLI.getVectorTypeBreakdownForCallingConv( 737 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT, 738 NumIntermediates, RegisterVT); 739 } else { 740 NumRegs = 741 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 742 NumIntermediates, RegisterVT); 743 } 744 745 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 746 NumParts = NumRegs; // Silence a compiler warning. 747 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 748 749 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() && 750 "Mixing scalable and fixed vectors when copying in parts"); 751 752 ElementCount DestEltCnt; 753 754 if (IntermediateVT.isVector()) 755 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates; 756 else 757 DestEltCnt = ElementCount(NumIntermediates, false); 758 759 EVT BuiltVectorTy = EVT::getVectorVT( 760 *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt); 761 if (ValueVT != BuiltVectorTy) { 762 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) 763 Val = Widened; 764 765 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val); 766 } 767 768 // Split the vector into intermediate operands. 769 SmallVector<SDValue, 8> Ops(NumIntermediates); 770 for (unsigned i = 0; i != NumIntermediates; ++i) { 771 if (IntermediateVT.isVector()) { 772 // This does something sensible for scalable vectors - see the 773 // definition of EXTRACT_SUBVECTOR for further details. 774 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements(); 775 Ops[i] = 776 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val, 777 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL)); 778 } else { 779 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val, 780 DAG.getVectorIdxConstant(i, DL)); 781 } 782 } 783 784 // Split the intermediate operands into legal parts. 785 if (NumParts == NumIntermediates) { 786 // If the register was not expanded, promote or copy the value, 787 // as appropriate. 788 for (unsigned i = 0; i != NumParts; ++i) 789 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv); 790 } else if (NumParts > 0) { 791 // If the intermediate type was expanded, split each the value into 792 // legal parts. 793 assert(NumIntermediates != 0 && "division by zero"); 794 assert(NumParts % NumIntermediates == 0 && 795 "Must expand into a divisible number of parts!"); 796 unsigned Factor = NumParts / NumIntermediates; 797 for (unsigned i = 0; i != NumIntermediates; ++i) 798 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V, 799 CallConv); 800 } 801 } 802 803 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, 804 EVT valuevt, Optional<CallingConv::ID> CC) 805 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs), 806 RegCount(1, regs.size()), CallConv(CC) {} 807 808 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, 809 const DataLayout &DL, unsigned Reg, Type *Ty, 810 Optional<CallingConv::ID> CC) { 811 ComputeValueVTs(TLI, DL, Ty, ValueVTs); 812 813 CallConv = CC; 814 815 for (EVT ValueVT : ValueVTs) { 816 unsigned NumRegs = 817 isABIMangled() 818 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT) 819 : TLI.getNumRegisters(Context, ValueVT); 820 MVT RegisterVT = 821 isABIMangled() 822 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT) 823 : TLI.getRegisterType(Context, ValueVT); 824 for (unsigned i = 0; i != NumRegs; ++i) 825 Regs.push_back(Reg + i); 826 RegVTs.push_back(RegisterVT); 827 RegCount.push_back(NumRegs); 828 Reg += NumRegs; 829 } 830 } 831 832 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 833 FunctionLoweringInfo &FuncInfo, 834 const SDLoc &dl, SDValue &Chain, 835 SDValue *Flag, const Value *V) const { 836 // A Value with type {} or [0 x %t] needs no registers. 837 if (ValueVTs.empty()) 838 return SDValue(); 839 840 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 841 842 // Assemble the legal parts into the final values. 843 SmallVector<SDValue, 4> Values(ValueVTs.size()); 844 SmallVector<SDValue, 8> Parts; 845 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 846 // Copy the legal parts from the registers. 847 EVT ValueVT = ValueVTs[Value]; 848 unsigned NumRegs = RegCount[Value]; 849 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv( 850 *DAG.getContext(), 851 CallConv.getValue(), RegVTs[Value]) 852 : RegVTs[Value]; 853 854 Parts.resize(NumRegs); 855 for (unsigned i = 0; i != NumRegs; ++i) { 856 SDValue P; 857 if (!Flag) { 858 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT); 859 } else { 860 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag); 861 *Flag = P.getValue(2); 862 } 863 864 Chain = P.getValue(1); 865 Parts[i] = P; 866 867 // If the source register was virtual and if we know something about it, 868 // add an assert node. 869 if (!Register::isVirtualRegister(Regs[Part + i]) || 870 !RegisterVT.isInteger()) 871 continue; 872 873 const FunctionLoweringInfo::LiveOutInfo *LOI = 874 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]); 875 if (!LOI) 876 continue; 877 878 unsigned RegSize = RegisterVT.getScalarSizeInBits(); 879 unsigned NumSignBits = LOI->NumSignBits; 880 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros(); 881 882 if (NumZeroBits == RegSize) { 883 // The current value is a zero. 884 // Explicitly express that as it would be easier for 885 // optimizations to kick in. 886 Parts[i] = DAG.getConstant(0, dl, RegisterVT); 887 continue; 888 } 889 890 // FIXME: We capture more information than the dag can represent. For 891 // now, just use the tightest assertzext/assertsext possible. 892 bool isSExt; 893 EVT FromVT(MVT::Other); 894 if (NumZeroBits) { 895 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits); 896 isSExt = false; 897 } else if (NumSignBits > 1) { 898 FromVT = 899 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1); 900 isSExt = true; 901 } else { 902 continue; 903 } 904 // Add an assertion node. 905 assert(FromVT != MVT::Other); 906 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl, 907 RegisterVT, P, DAG.getValueType(FromVT)); 908 } 909 910 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs, 911 RegisterVT, ValueVT, V, CallConv); 912 Part += NumRegs; 913 Parts.clear(); 914 } 915 916 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values); 917 } 918 919 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, 920 const SDLoc &dl, SDValue &Chain, SDValue *Flag, 921 const Value *V, 922 ISD::NodeType PreferredExtendType) const { 923 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 924 ISD::NodeType ExtendKind = PreferredExtendType; 925 926 // Get the list of the values's legal parts. 927 unsigned NumRegs = Regs.size(); 928 SmallVector<SDValue, 8> Parts(NumRegs); 929 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 930 unsigned NumParts = RegCount[Value]; 931 932 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv( 933 *DAG.getContext(), 934 CallConv.getValue(), RegVTs[Value]) 935 : RegVTs[Value]; 936 937 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) 938 ExtendKind = ISD::ZERO_EXTEND; 939 940 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part], 941 NumParts, RegisterVT, V, CallConv, ExtendKind); 942 Part += NumParts; 943 } 944 945 // Copy the parts into the registers. 946 SmallVector<SDValue, 8> Chains(NumRegs); 947 for (unsigned i = 0; i != NumRegs; ++i) { 948 SDValue Part; 949 if (!Flag) { 950 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]); 951 } else { 952 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag); 953 *Flag = Part.getValue(1); 954 } 955 956 Chains[i] = Part.getValue(0); 957 } 958 959 if (NumRegs == 1 || Flag) 960 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is 961 // flagged to it. That is the CopyToReg nodes and the user are considered 962 // a single scheduling unit. If we create a TokenFactor and return it as 963 // chain, then the TokenFactor is both a predecessor (operand) of the 964 // user as well as a successor (the TF operands are flagged to the user). 965 // c1, f1 = CopyToReg 966 // c2, f2 = CopyToReg 967 // c3 = TokenFactor c1, c2 968 // ... 969 // = op c3, ..., f2 970 Chain = Chains[NumRegs-1]; 971 else 972 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 973 } 974 975 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching, 976 unsigned MatchingIdx, const SDLoc &dl, 977 SelectionDAG &DAG, 978 std::vector<SDValue> &Ops) const { 979 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 980 981 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size()); 982 if (HasMatching) 983 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx); 984 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) { 985 // Put the register class of the virtual registers in the flag word. That 986 // way, later passes can recompute register class constraints for inline 987 // assembly as well as normal instructions. 988 // Don't do this for tied operands that can use the regclass information 989 // from the def. 990 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 991 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); 992 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); 993 } 994 995 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32); 996 Ops.push_back(Res); 997 998 if (Code == InlineAsm::Kind_Clobber) { 999 // Clobbers should always have a 1:1 mapping with registers, and may 1000 // reference registers that have illegal (e.g. vector) types. Hence, we 1001 // shouldn't try to apply any sort of splitting logic to them. 1002 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && 1003 "No 1:1 mapping from clobbers to regs?"); 1004 unsigned SP = TLI.getStackPointerRegisterToSaveRestore(); 1005 (void)SP; 1006 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) { 1007 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I])); 1008 assert( 1009 (Regs[I] != SP || 1010 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && 1011 "If we clobbered the stack pointer, MFI should know about it."); 1012 } 1013 return; 1014 } 1015 1016 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { 1017 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]); 1018 MVT RegisterVT = RegVTs[Value]; 1019 for (unsigned i = 0; i != NumRegs; ++i) { 1020 assert(Reg < Regs.size() && "Mismatch in # registers expected"); 1021 unsigned TheReg = Regs[Reg++]; 1022 Ops.push_back(DAG.getRegister(TheReg, RegisterVT)); 1023 } 1024 } 1025 } 1026 1027 SmallVector<std::pair<unsigned, unsigned>, 4> 1028 RegsForValue::getRegsAndSizes() const { 1029 SmallVector<std::pair<unsigned, unsigned>, 4> OutVec; 1030 unsigned I = 0; 1031 for (auto CountAndVT : zip_first(RegCount, RegVTs)) { 1032 unsigned RegCount = std::get<0>(CountAndVT); 1033 MVT RegisterVT = std::get<1>(CountAndVT); 1034 unsigned RegisterSize = RegisterVT.getSizeInBits(); 1035 for (unsigned E = I + RegCount; I != E; ++I) 1036 OutVec.push_back(std::make_pair(Regs[I], RegisterSize)); 1037 } 1038 return OutVec; 1039 } 1040 1041 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa, 1042 const TargetLibraryInfo *li) { 1043 AA = aa; 1044 GFI = gfi; 1045 LibInfo = li; 1046 DL = &DAG.getDataLayout(); 1047 Context = DAG.getContext(); 1048 LPadToCallSiteMap.clear(); 1049 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout()); 1050 } 1051 1052 void SelectionDAGBuilder::clear() { 1053 NodeMap.clear(); 1054 UnusedArgNodeMap.clear(); 1055 PendingLoads.clear(); 1056 PendingExports.clear(); 1057 PendingConstrainedFP.clear(); 1058 PendingConstrainedFPStrict.clear(); 1059 CurInst = nullptr; 1060 HasTailCall = false; 1061 SDNodeOrder = LowestSDNodeOrder; 1062 StatepointLowering.clear(); 1063 } 1064 1065 void SelectionDAGBuilder::clearDanglingDebugInfo() { 1066 DanglingDebugInfoMap.clear(); 1067 } 1068 1069 // Update DAG root to include dependencies on Pending chains. 1070 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) { 1071 SDValue Root = DAG.getRoot(); 1072 1073 if (Pending.empty()) 1074 return Root; 1075 1076 // Add current root to PendingChains, unless we already indirectly 1077 // depend on it. 1078 if (Root.getOpcode() != ISD::EntryToken) { 1079 unsigned i = 0, e = Pending.size(); 1080 for (; i != e; ++i) { 1081 assert(Pending[i].getNode()->getNumOperands() > 1); 1082 if (Pending[i].getNode()->getOperand(0) == Root) 1083 break; // Don't add the root if we already indirectly depend on it. 1084 } 1085 1086 if (i == e) 1087 Pending.push_back(Root); 1088 } 1089 1090 if (Pending.size() == 1) 1091 Root = Pending[0]; 1092 else 1093 Root = DAG.getTokenFactor(getCurSDLoc(), Pending); 1094 1095 DAG.setRoot(Root); 1096 Pending.clear(); 1097 return Root; 1098 } 1099 1100 SDValue SelectionDAGBuilder::getMemoryRoot() { 1101 return updateRoot(PendingLoads); 1102 } 1103 1104 SDValue SelectionDAGBuilder::getRoot() { 1105 // Chain up all pending constrained intrinsics together with all 1106 // pending loads, by simply appending them to PendingLoads and 1107 // then calling getMemoryRoot(). 1108 PendingLoads.reserve(PendingLoads.size() + 1109 PendingConstrainedFP.size() + 1110 PendingConstrainedFPStrict.size()); 1111 PendingLoads.append(PendingConstrainedFP.begin(), 1112 PendingConstrainedFP.end()); 1113 PendingLoads.append(PendingConstrainedFPStrict.begin(), 1114 PendingConstrainedFPStrict.end()); 1115 PendingConstrainedFP.clear(); 1116 PendingConstrainedFPStrict.clear(); 1117 return getMemoryRoot(); 1118 } 1119 1120 SDValue SelectionDAGBuilder::getControlRoot() { 1121 // We need to emit pending fpexcept.strict constrained intrinsics, 1122 // so append them to the PendingExports list. 1123 PendingExports.append(PendingConstrainedFPStrict.begin(), 1124 PendingConstrainedFPStrict.end()); 1125 PendingConstrainedFPStrict.clear(); 1126 return updateRoot(PendingExports); 1127 } 1128 1129 void SelectionDAGBuilder::visit(const Instruction &I) { 1130 // Set up outgoing PHI node register values before emitting the terminator. 1131 if (I.isTerminator()) { 1132 HandlePHINodesInSuccessorBlocks(I.getParent()); 1133 } 1134 1135 // Increase the SDNodeOrder if dealing with a non-debug instruction. 1136 if (!isa<DbgInfoIntrinsic>(I)) 1137 ++SDNodeOrder; 1138 1139 CurInst = &I; 1140 1141 visit(I.getOpcode(), I); 1142 1143 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) { 1144 // ConstrainedFPIntrinsics handle their own FMF. 1145 if (!isa<ConstrainedFPIntrinsic>(&I)) { 1146 // Propagate the fast-math-flags of this IR instruction to the DAG node that 1147 // maps to this instruction. 1148 // TODO: We could handle all flags (nsw, etc) here. 1149 // TODO: If an IR instruction maps to >1 node, only the final node will have 1150 // flags set. 1151 if (SDNode *Node = getNodeForIRValue(&I)) { 1152 SDNodeFlags IncomingFlags; 1153 IncomingFlags.copyFMF(*FPMO); 1154 if (!Node->getFlags().isDefined()) 1155 Node->setFlags(IncomingFlags); 1156 else 1157 Node->intersectFlagsWith(IncomingFlags); 1158 } 1159 } 1160 } 1161 1162 if (!I.isTerminator() && !HasTailCall && 1163 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally 1164 CopyToExportRegsIfNeeded(&I); 1165 1166 CurInst = nullptr; 1167 } 1168 1169 void SelectionDAGBuilder::visitPHI(const PHINode &) { 1170 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!"); 1171 } 1172 1173 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) { 1174 // Note: this doesn't use InstVisitor, because it has to work with 1175 // ConstantExpr's in addition to instructions. 1176 switch (Opcode) { 1177 default: llvm_unreachable("Unknown instruction type encountered!"); 1178 // Build the switch statement using the Instruction.def file. 1179 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1180 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; 1181 #include "llvm/IR/Instruction.def" 1182 } 1183 } 1184 1185 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable, 1186 const DIExpression *Expr) { 1187 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) { 1188 const DbgValueInst *DI = DDI.getDI(); 1189 DIVariable *DanglingVariable = DI->getVariable(); 1190 DIExpression *DanglingExpr = DI->getExpression(); 1191 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) { 1192 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n"); 1193 return true; 1194 } 1195 return false; 1196 }; 1197 1198 for (auto &DDIMI : DanglingDebugInfoMap) { 1199 DanglingDebugInfoVector &DDIV = DDIMI.second; 1200 1201 // If debug info is to be dropped, run it through final checks to see 1202 // whether it can be salvaged. 1203 for (auto &DDI : DDIV) 1204 if (isMatchingDbgValue(DDI)) 1205 salvageUnresolvedDbgValue(DDI); 1206 1207 DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end()); 1208 } 1209 } 1210 1211 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, 1212 // generate the debug data structures now that we've seen its definition. 1213 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V, 1214 SDValue Val) { 1215 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V); 1216 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end()) 1217 return; 1218 1219 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second; 1220 for (auto &DDI : DDIV) { 1221 const DbgValueInst *DI = DDI.getDI(); 1222 assert(DI && "Ill-formed DanglingDebugInfo"); 1223 DebugLoc dl = DDI.getdl(); 1224 unsigned ValSDNodeOrder = Val.getNode()->getIROrder(); 1225 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder(); 1226 DILocalVariable *Variable = DI->getVariable(); 1227 DIExpression *Expr = DI->getExpression(); 1228 assert(Variable->isValidLocationForIntrinsic(dl) && 1229 "Expected inlined-at fields to agree"); 1230 SDDbgValue *SDV; 1231 if (Val.getNode()) { 1232 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a 1233 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if 1234 // we couldn't resolve it directly when examining the DbgValue intrinsic 1235 // in the first place we should not be more successful here). Unless we 1236 // have some test case that prove this to be correct we should avoid 1237 // calling EmitFuncArgumentDbgValue here. 1238 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) { 1239 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order=" 1240 << DbgSDNodeOrder << "] for:\n " << *DI << "\n"); 1241 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump()); 1242 // Increase the SDNodeOrder for the DbgValue here to make sure it is 1243 // inserted after the definition of Val when emitting the instructions 1244 // after ISel. An alternative could be to teach 1245 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly. 1246 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() 1247 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to " 1248 << ValSDNodeOrder << "\n"); 1249 SDV = getDbgValue(Val, Variable, Expr, dl, 1250 std::max(DbgSDNodeOrder, ValSDNodeOrder)); 1251 DAG.AddDbgValue(SDV, Val.getNode(), false); 1252 } else 1253 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI 1254 << "in EmitFuncArgumentDbgValue\n"); 1255 } else { 1256 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1257 auto Undef = 1258 UndefValue::get(DDI.getDI()->getVariableLocation()->getType()); 1259 auto SDV = 1260 DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder); 1261 DAG.AddDbgValue(SDV, nullptr, false); 1262 } 1263 } 1264 DDIV.clear(); 1265 } 1266 1267 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) { 1268 Value *V = DDI.getDI()->getValue(); 1269 DILocalVariable *Var = DDI.getDI()->getVariable(); 1270 DIExpression *Expr = DDI.getDI()->getExpression(); 1271 DebugLoc DL = DDI.getdl(); 1272 DebugLoc InstDL = DDI.getDI()->getDebugLoc(); 1273 unsigned SDOrder = DDI.getSDNodeOrder(); 1274 1275 // Currently we consider only dbg.value intrinsics -- we tell the salvager 1276 // that DW_OP_stack_value is desired. 1277 assert(isa<DbgValueInst>(DDI.getDI())); 1278 bool StackValue = true; 1279 1280 // Can this Value can be encoded without any further work? 1281 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) 1282 return; 1283 1284 // Attempt to salvage back through as many instructions as possible. Bail if 1285 // a non-instruction is seen, such as a constant expression or global 1286 // variable. FIXME: Further work could recover those too. 1287 while (isa<Instruction>(V)) { 1288 Instruction &VAsInst = *cast<Instruction>(V); 1289 DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue); 1290 1291 // If we cannot salvage any further, and haven't yet found a suitable debug 1292 // expression, bail out. 1293 if (!NewExpr) 1294 break; 1295 1296 // New value and expr now represent this debuginfo. 1297 V = VAsInst.getOperand(0); 1298 Expr = NewExpr; 1299 1300 // Some kind of simplification occurred: check whether the operand of the 1301 // salvaged debug expression can be encoded in this DAG. 1302 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) { 1303 LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n " 1304 << DDI.getDI() << "\nBy stripping back to:\n " << V); 1305 return; 1306 } 1307 } 1308 1309 // This was the final opportunity to salvage this debug information, and it 1310 // couldn't be done. Place an undef DBG_VALUE at this location to terminate 1311 // any earlier variable location. 1312 auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType()); 1313 auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder); 1314 DAG.AddDbgValue(SDV, nullptr, false); 1315 1316 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI() 1317 << "\n"); 1318 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI.getDI()->getOperand(0) 1319 << "\n"); 1320 } 1321 1322 bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var, 1323 DIExpression *Expr, DebugLoc dl, 1324 DebugLoc InstDL, unsigned Order) { 1325 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1326 SDDbgValue *SDV; 1327 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) || 1328 isa<ConstantPointerNull>(V)) { 1329 SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder); 1330 DAG.AddDbgValue(SDV, nullptr, false); 1331 return true; 1332 } 1333 1334 // If the Value is a frame index, we can create a FrameIndex debug value 1335 // without relying on the DAG at all. 1336 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1337 auto SI = FuncInfo.StaticAllocaMap.find(AI); 1338 if (SI != FuncInfo.StaticAllocaMap.end()) { 1339 auto SDV = 1340 DAG.getFrameIndexDbgValue(Var, Expr, SI->second, 1341 /*IsIndirect*/ false, dl, SDNodeOrder); 1342 // Do not attach the SDNodeDbgValue to an SDNode: this variable location 1343 // is still available even if the SDNode gets optimized out. 1344 DAG.AddDbgValue(SDV, nullptr, false); 1345 return true; 1346 } 1347 } 1348 1349 // Do not use getValue() in here; we don't want to generate code at 1350 // this point if it hasn't been done yet. 1351 SDValue N = NodeMap[V]; 1352 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map. 1353 N = UnusedArgNodeMap[V]; 1354 if (N.getNode()) { 1355 if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N)) 1356 return true; 1357 SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder); 1358 DAG.AddDbgValue(SDV, N.getNode(), false); 1359 return true; 1360 } 1361 1362 // Special rules apply for the first dbg.values of parameter variables in a 1363 // function. Identify them by the fact they reference Argument Values, that 1364 // they're parameters, and they are parameters of the current function. We 1365 // need to let them dangle until they get an SDNode. 1366 bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() && 1367 !InstDL.getInlinedAt(); 1368 if (!IsParamOfFunc) { 1369 // The value is not used in this block yet (or it would have an SDNode). 1370 // We still want the value to appear for the user if possible -- if it has 1371 // an associated VReg, we can refer to that instead. 1372 auto VMI = FuncInfo.ValueMap.find(V); 1373 if (VMI != FuncInfo.ValueMap.end()) { 1374 unsigned Reg = VMI->second; 1375 // If this is a PHI node, it may be split up into several MI PHI nodes 1376 // (in FunctionLoweringInfo::set). 1377 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, 1378 V->getType(), None); 1379 if (RFV.occupiesMultipleRegs()) { 1380 unsigned Offset = 0; 1381 unsigned BitsToDescribe = 0; 1382 if (auto VarSize = Var->getSizeInBits()) 1383 BitsToDescribe = *VarSize; 1384 if (auto Fragment = Expr->getFragmentInfo()) 1385 BitsToDescribe = Fragment->SizeInBits; 1386 for (auto RegAndSize : RFV.getRegsAndSizes()) { 1387 unsigned RegisterSize = RegAndSize.second; 1388 // Bail out if all bits are described already. 1389 if (Offset >= BitsToDescribe) 1390 break; 1391 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe) 1392 ? BitsToDescribe - Offset 1393 : RegisterSize; 1394 auto FragmentExpr = DIExpression::createFragmentExpression( 1395 Expr, Offset, FragmentSize); 1396 if (!FragmentExpr) 1397 continue; 1398 SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first, 1399 false, dl, SDNodeOrder); 1400 DAG.AddDbgValue(SDV, nullptr, false); 1401 Offset += RegisterSize; 1402 } 1403 } else { 1404 SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder); 1405 DAG.AddDbgValue(SDV, nullptr, false); 1406 } 1407 return true; 1408 } 1409 } 1410 1411 return false; 1412 } 1413 1414 void SelectionDAGBuilder::resolveOrClearDbgInfo() { 1415 // Try to fixup any remaining dangling debug info -- and drop it if we can't. 1416 for (auto &Pair : DanglingDebugInfoMap) 1417 for (auto &DDI : Pair.second) 1418 salvageUnresolvedDbgValue(DDI); 1419 clearDanglingDebugInfo(); 1420 } 1421 1422 /// getCopyFromRegs - If there was virtual register allocated for the value V 1423 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. 1424 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { 1425 DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V); 1426 SDValue Result; 1427 1428 if (It != FuncInfo.ValueMap.end()) { 1429 Register InReg = It->second; 1430 1431 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), 1432 DAG.getDataLayout(), InReg, Ty, 1433 None); // This is not an ABI copy. 1434 SDValue Chain = DAG.getEntryNode(); 1435 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, 1436 V); 1437 resolveDanglingDebugInfo(V, Result); 1438 } 1439 1440 return Result; 1441 } 1442 1443 /// getValue - Return an SDValue for the given Value. 1444 SDValue SelectionDAGBuilder::getValue(const Value *V) { 1445 // If we already have an SDValue for this value, use it. It's important 1446 // to do this first, so that we don't create a CopyFromReg if we already 1447 // have a regular SDValue. 1448 SDValue &N = NodeMap[V]; 1449 if (N.getNode()) return N; 1450 1451 // If there's a virtual register allocated and initialized for this 1452 // value, use it. 1453 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType())) 1454 return copyFromReg; 1455 1456 // Otherwise create a new SDValue and remember it. 1457 SDValue Val = getValueImpl(V); 1458 NodeMap[V] = Val; 1459 resolveDanglingDebugInfo(V, Val); 1460 return Val; 1461 } 1462 1463 /// getNonRegisterValue - Return an SDValue for the given Value, but 1464 /// don't look in FuncInfo.ValueMap for a virtual register. 1465 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) { 1466 // If we already have an SDValue for this value, use it. 1467 SDValue &N = NodeMap[V]; 1468 if (N.getNode()) { 1469 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) { 1470 // Remove the debug location from the node as the node is about to be used 1471 // in a location which may differ from the original debug location. This 1472 // is relevant to Constant and ConstantFP nodes because they can appear 1473 // as constant expressions inside PHI nodes. 1474 N->setDebugLoc(DebugLoc()); 1475 } 1476 return N; 1477 } 1478 1479 // Otherwise create a new SDValue and remember it. 1480 SDValue Val = getValueImpl(V); 1481 NodeMap[V] = Val; 1482 resolveDanglingDebugInfo(V, Val); 1483 return Val; 1484 } 1485 1486 /// getValueImpl - Helper function for getValue and getNonRegisterValue. 1487 /// Create an SDValue for the given value. 1488 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { 1489 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1490 1491 if (const Constant *C = dyn_cast<Constant>(V)) { 1492 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true); 1493 1494 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1495 return DAG.getConstant(*CI, getCurSDLoc(), VT); 1496 1497 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 1498 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); 1499 1500 if (isa<ConstantPointerNull>(C)) { 1501 unsigned AS = V->getType()->getPointerAddressSpace(); 1502 return DAG.getConstant(0, getCurSDLoc(), 1503 TLI.getPointerTy(DAG.getDataLayout(), AS)); 1504 } 1505 1506 if (match(C, m_VScale(DAG.getDataLayout()))) 1507 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)); 1508 1509 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 1510 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT); 1511 1512 if (isa<UndefValue>(C) && !V->getType()->isAggregateType()) 1513 return DAG.getUNDEF(VT); 1514 1515 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 1516 visit(CE->getOpcode(), *CE); 1517 SDValue N1 = NodeMap[V]; 1518 assert(N1.getNode() && "visit didn't populate the NodeMap!"); 1519 return N1; 1520 } 1521 1522 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { 1523 SmallVector<SDValue, 4> Constants; 1524 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); 1525 OI != OE; ++OI) { 1526 SDNode *Val = getValue(*OI).getNode(); 1527 // If the operand is an empty aggregate, there are no values. 1528 if (!Val) continue; 1529 // Add each leaf value from the operand to the Constants list 1530 // to form a flattened list of all the values. 1531 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1532 Constants.push_back(SDValue(Val, i)); 1533 } 1534 1535 return DAG.getMergeValues(Constants, getCurSDLoc()); 1536 } 1537 1538 if (const ConstantDataSequential *CDS = 1539 dyn_cast<ConstantDataSequential>(C)) { 1540 SmallVector<SDValue, 4> Ops; 1541 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1542 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode(); 1543 // Add each leaf value from the operand to the Constants list 1544 // to form a flattened list of all the values. 1545 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1546 Ops.push_back(SDValue(Val, i)); 1547 } 1548 1549 if (isa<ArrayType>(CDS->getType())) 1550 return DAG.getMergeValues(Ops, getCurSDLoc()); 1551 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); 1552 } 1553 1554 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { 1555 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && 1556 "Unknown struct or array constant!"); 1557 1558 SmallVector<EVT, 4> ValueVTs; 1559 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs); 1560 unsigned NumElts = ValueVTs.size(); 1561 if (NumElts == 0) 1562 return SDValue(); // empty struct 1563 SmallVector<SDValue, 4> Constants(NumElts); 1564 for (unsigned i = 0; i != NumElts; ++i) { 1565 EVT EltVT = ValueVTs[i]; 1566 if (isa<UndefValue>(C)) 1567 Constants[i] = DAG.getUNDEF(EltVT); 1568 else if (EltVT.isFloatingPoint()) 1569 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1570 else 1571 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT); 1572 } 1573 1574 return DAG.getMergeValues(Constants, getCurSDLoc()); 1575 } 1576 1577 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) 1578 return DAG.getBlockAddress(BA, VT); 1579 1580 VectorType *VecTy = cast<VectorType>(V->getType()); 1581 1582 // Now that we know the number and type of the elements, get that number of 1583 // elements into the Ops array based on what kind of constant it is. 1584 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) { 1585 SmallVector<SDValue, 16> Ops; 1586 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements(); 1587 for (unsigned i = 0; i != NumElements; ++i) 1588 Ops.push_back(getValue(CV->getOperand(i))); 1589 1590 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); 1591 } else if (isa<ConstantAggregateZero>(C)) { 1592 EVT EltVT = 1593 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType()); 1594 1595 SDValue Op; 1596 if (EltVT.isFloatingPoint()) 1597 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1598 else 1599 Op = DAG.getConstant(0, getCurSDLoc(), EltVT); 1600 1601 if (isa<ScalableVectorType>(VecTy)) 1602 return NodeMap[V] = DAG.getSplatVector(VT, getCurSDLoc(), Op); 1603 else { 1604 SmallVector<SDValue, 16> Ops; 1605 Ops.assign(cast<FixedVectorType>(VecTy)->getNumElements(), Op); 1606 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); 1607 } 1608 } 1609 llvm_unreachable("Unknown vector constant"); 1610 } 1611 1612 // If this is a static alloca, generate it as the frameindex instead of 1613 // computation. 1614 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1615 DenseMap<const AllocaInst*, int>::iterator SI = 1616 FuncInfo.StaticAllocaMap.find(AI); 1617 if (SI != FuncInfo.StaticAllocaMap.end()) 1618 return DAG.getFrameIndex(SI->second, 1619 TLI.getFrameIndexTy(DAG.getDataLayout())); 1620 } 1621 1622 // If this is an instruction which fast-isel has deferred, select it now. 1623 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 1624 unsigned InReg = FuncInfo.InitializeRegForValue(Inst); 1625 1626 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, 1627 Inst->getType(), getABIRegCopyCC(V)); 1628 SDValue Chain = DAG.getEntryNode(); 1629 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); 1630 } 1631 1632 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) { 1633 return DAG.getMDNode(cast<MDNode>(MD->getMetadata())); 1634 } 1635 llvm_unreachable("Can't get register for value!"); 1636 } 1637 1638 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) { 1639 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1640 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX; 1641 bool IsCoreCLR = Pers == EHPersonality::CoreCLR; 1642 bool IsSEH = isAsynchronousEHPersonality(Pers); 1643 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB; 1644 if (!IsSEH) 1645 CatchPadMBB->setIsEHScopeEntry(); 1646 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues. 1647 if (IsMSVCCXX || IsCoreCLR) 1648 CatchPadMBB->setIsEHFuncletEntry(); 1649 } 1650 1651 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) { 1652 // Update machine-CFG edge. 1653 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()]; 1654 FuncInfo.MBB->addSuccessor(TargetMBB); 1655 1656 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1657 bool IsSEH = isAsynchronousEHPersonality(Pers); 1658 if (IsSEH) { 1659 // If this is not a fall-through branch or optimizations are switched off, 1660 // emit the branch. 1661 if (TargetMBB != NextBlock(FuncInfo.MBB) || 1662 TM.getOptLevel() == CodeGenOpt::None) 1663 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 1664 getControlRoot(), DAG.getBasicBlock(TargetMBB))); 1665 return; 1666 } 1667 1668 // Figure out the funclet membership for the catchret's successor. 1669 // This will be used by the FuncletLayout pass to determine how to order the 1670 // BB's. 1671 // A 'catchret' returns to the outer scope's color. 1672 Value *ParentPad = I.getCatchSwitchParentPad(); 1673 const BasicBlock *SuccessorColor; 1674 if (isa<ConstantTokenNone>(ParentPad)) 1675 SuccessorColor = &FuncInfo.Fn->getEntryBlock(); 1676 else 1677 SuccessorColor = cast<Instruction>(ParentPad)->getParent(); 1678 assert(SuccessorColor && "No parent funclet for catchret!"); 1679 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor]; 1680 assert(SuccessorColorMBB && "No MBB for SuccessorColor!"); 1681 1682 // Create the terminator node. 1683 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other, 1684 getControlRoot(), DAG.getBasicBlock(TargetMBB), 1685 DAG.getBasicBlock(SuccessorColorMBB)); 1686 DAG.setRoot(Ret); 1687 } 1688 1689 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) { 1690 // Don't emit any special code for the cleanuppad instruction. It just marks 1691 // the start of an EH scope/funclet. 1692 FuncInfo.MBB->setIsEHScopeEntry(); 1693 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1694 if (Pers != EHPersonality::Wasm_CXX) { 1695 FuncInfo.MBB->setIsEHFuncletEntry(); 1696 FuncInfo.MBB->setIsCleanupFuncletEntry(); 1697 } 1698 } 1699 1700 // For wasm, there's alwyas a single catch pad attached to a catchswitch, and 1701 // the control flow always stops at the single catch pad, as it does for a 1702 // cleanup pad. In case the exception caught is not of the types the catch pad 1703 // catches, it will be rethrown by a rethrow. 1704 static void findWasmUnwindDestinations( 1705 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, 1706 BranchProbability Prob, 1707 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> 1708 &UnwindDests) { 1709 while (EHPadBB) { 1710 const Instruction *Pad = EHPadBB->getFirstNonPHI(); 1711 if (isa<CleanupPadInst>(Pad)) { 1712 // Stop on cleanup pads. 1713 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); 1714 UnwindDests.back().first->setIsEHScopeEntry(); 1715 break; 1716 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { 1717 // Add the catchpad handlers to the possible destinations. We don't 1718 // continue to the unwind destination of the catchswitch for wasm. 1719 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { 1720 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob); 1721 UnwindDests.back().first->setIsEHScopeEntry(); 1722 } 1723 break; 1724 } else { 1725 continue; 1726 } 1727 } 1728 } 1729 1730 /// When an invoke or a cleanupret unwinds to the next EH pad, there are 1731 /// many places it could ultimately go. In the IR, we have a single unwind 1732 /// destination, but in the machine CFG, we enumerate all the possible blocks. 1733 /// This function skips over imaginary basic blocks that hold catchswitch 1734 /// instructions, and finds all the "real" machine 1735 /// basic block destinations. As those destinations may not be successors of 1736 /// EHPadBB, here we also calculate the edge probability to those destinations. 1737 /// The passed-in Prob is the edge probability to EHPadBB. 1738 static void findUnwindDestinations( 1739 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, 1740 BranchProbability Prob, 1741 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> 1742 &UnwindDests) { 1743 EHPersonality Personality = 1744 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1745 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; 1746 bool IsCoreCLR = Personality == EHPersonality::CoreCLR; 1747 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; 1748 bool IsSEH = isAsynchronousEHPersonality(Personality); 1749 1750 if (IsWasmCXX) { 1751 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests); 1752 assert(UnwindDests.size() <= 1 && 1753 "There should be at most one unwind destination for wasm"); 1754 return; 1755 } 1756 1757 while (EHPadBB) { 1758 const Instruction *Pad = EHPadBB->getFirstNonPHI(); 1759 BasicBlock *NewEHPadBB = nullptr; 1760 if (isa<LandingPadInst>(Pad)) { 1761 // Stop on landingpads. They are not funclets. 1762 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); 1763 break; 1764 } else if (isa<CleanupPadInst>(Pad)) { 1765 // Stop on cleanup pads. Cleanups are always funclet entries for all known 1766 // personalities. 1767 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); 1768 UnwindDests.back().first->setIsEHScopeEntry(); 1769 UnwindDests.back().first->setIsEHFuncletEntry(); 1770 break; 1771 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { 1772 // Add the catchpad handlers to the possible destinations. 1773 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { 1774 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob); 1775 // For MSVC++ and the CLR, catchblocks are funclets and need prologues. 1776 if (IsMSVCCXX || IsCoreCLR) 1777 UnwindDests.back().first->setIsEHFuncletEntry(); 1778 if (!IsSEH) 1779 UnwindDests.back().first->setIsEHScopeEntry(); 1780 } 1781 NewEHPadBB = CatchSwitch->getUnwindDest(); 1782 } else { 1783 continue; 1784 } 1785 1786 BranchProbabilityInfo *BPI = FuncInfo.BPI; 1787 if (BPI && NewEHPadBB) 1788 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB); 1789 EHPadBB = NewEHPadBB; 1790 } 1791 } 1792 1793 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) { 1794 // Update successor info. 1795 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 1796 auto UnwindDest = I.getUnwindDest(); 1797 BranchProbabilityInfo *BPI = FuncInfo.BPI; 1798 BranchProbability UnwindDestProb = 1799 (BPI && UnwindDest) 1800 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest) 1801 : BranchProbability::getZero(); 1802 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests); 1803 for (auto &UnwindDest : UnwindDests) { 1804 UnwindDest.first->setIsEHPad(); 1805 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second); 1806 } 1807 FuncInfo.MBB->normalizeSuccProbs(); 1808 1809 // Create the terminator node. 1810 SDValue Ret = 1811 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot()); 1812 DAG.setRoot(Ret); 1813 } 1814 1815 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) { 1816 report_fatal_error("visitCatchSwitch not yet implemented!"); 1817 } 1818 1819 void SelectionDAGBuilder::visitRet(const ReturnInst &I) { 1820 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1821 auto &DL = DAG.getDataLayout(); 1822 SDValue Chain = getControlRoot(); 1823 SmallVector<ISD::OutputArg, 8> Outs; 1824 SmallVector<SDValue, 8> OutVals; 1825 1826 // Calls to @llvm.experimental.deoptimize don't generate a return value, so 1827 // lower 1828 // 1829 // %val = call <ty> @llvm.experimental.deoptimize() 1830 // ret <ty> %val 1831 // 1832 // differently. 1833 if (I.getParent()->getTerminatingDeoptimizeCall()) { 1834 LowerDeoptimizingReturn(); 1835 return; 1836 } 1837 1838 if (!FuncInfo.CanLowerReturn) { 1839 unsigned DemoteReg = FuncInfo.DemoteRegister; 1840 const Function *F = I.getParent()->getParent(); 1841 1842 // Emit a store of the return value through the virtual register. 1843 // Leave Outs empty so that LowerReturn won't try to load return 1844 // registers the usual way. 1845 SmallVector<EVT, 1> PtrValueVTs; 1846 ComputeValueVTs(TLI, DL, 1847 F->getReturnType()->getPointerTo( 1848 DAG.getDataLayout().getAllocaAddrSpace()), 1849 PtrValueVTs); 1850 1851 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), 1852 DemoteReg, PtrValueVTs[0]); 1853 SDValue RetOp = getValue(I.getOperand(0)); 1854 1855 SmallVector<EVT, 4> ValueVTs, MemVTs; 1856 SmallVector<uint64_t, 4> Offsets; 1857 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs, 1858 &Offsets); 1859 unsigned NumValues = ValueVTs.size(); 1860 1861 SmallVector<SDValue, 4> Chains(NumValues); 1862 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType()); 1863 for (unsigned i = 0; i != NumValues; ++i) { 1864 // An aggregate return value cannot wrap around the address space, so 1865 // offsets to its parts don't wrap either. 1866 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]); 1867 1868 SDValue Val = RetOp.getValue(RetOp.getResNo() + i); 1869 if (MemVTs[i] != ValueVTs[i]) 1870 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]); 1871 Chains[i] = DAG.getStore( 1872 Chain, getCurSDLoc(), Val, 1873 // FIXME: better loc info would be nice. 1874 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), 1875 commonAlignment(BaseAlign, Offsets[i])); 1876 } 1877 1878 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 1879 MVT::Other, Chains); 1880 } else if (I.getNumOperands() != 0) { 1881 SmallVector<EVT, 4> ValueVTs; 1882 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs); 1883 unsigned NumValues = ValueVTs.size(); 1884 if (NumValues) { 1885 SDValue RetOp = getValue(I.getOperand(0)); 1886 1887 const Function *F = I.getParent()->getParent(); 1888 1889 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 1890 I.getOperand(0)->getType(), F->getCallingConv(), 1891 /*IsVarArg*/ false); 1892 1893 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1894 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, 1895 Attribute::SExt)) 1896 ExtendKind = ISD::SIGN_EXTEND; 1897 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, 1898 Attribute::ZExt)) 1899 ExtendKind = ISD::ZERO_EXTEND; 1900 1901 LLVMContext &Context = F->getContext(); 1902 bool RetInReg = F->getAttributes().hasAttribute( 1903 AttributeList::ReturnIndex, Attribute::InReg); 1904 1905 for (unsigned j = 0; j != NumValues; ++j) { 1906 EVT VT = ValueVTs[j]; 1907 1908 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) 1909 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind); 1910 1911 CallingConv::ID CC = F->getCallingConv(); 1912 1913 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT); 1914 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT); 1915 SmallVector<SDValue, 4> Parts(NumParts); 1916 getCopyToParts(DAG, getCurSDLoc(), 1917 SDValue(RetOp.getNode(), RetOp.getResNo() + j), 1918 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind); 1919 1920 // 'inreg' on function refers to return value 1921 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1922 if (RetInReg) 1923 Flags.setInReg(); 1924 1925 if (I.getOperand(0)->getType()->isPointerTy()) { 1926 Flags.setPointer(); 1927 Flags.setPointerAddrSpace( 1928 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace()); 1929 } 1930 1931 if (NeedsRegBlock) { 1932 Flags.setInConsecutiveRegs(); 1933 if (j == NumValues - 1) 1934 Flags.setInConsecutiveRegsLast(); 1935 } 1936 1937 // Propagate extension type if any 1938 if (ExtendKind == ISD::SIGN_EXTEND) 1939 Flags.setSExt(); 1940 else if (ExtendKind == ISD::ZERO_EXTEND) 1941 Flags.setZExt(); 1942 1943 for (unsigned i = 0; i < NumParts; ++i) { 1944 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(), 1945 VT, /*isfixed=*/true, 0, 0)); 1946 OutVals.push_back(Parts[i]); 1947 } 1948 } 1949 } 1950 } 1951 1952 // Push in swifterror virtual register as the last element of Outs. This makes 1953 // sure swifterror virtual register will be returned in the swifterror 1954 // physical register. 1955 const Function *F = I.getParent()->getParent(); 1956 if (TLI.supportSwiftError() && 1957 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 1958 assert(SwiftError.getFunctionArg() && "Need a swift error argument"); 1959 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1960 Flags.setSwiftError(); 1961 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/, 1962 EVT(TLI.getPointerTy(DL)) /*argvt*/, 1963 true /*isfixed*/, 1 /*origidx*/, 1964 0 /*partOffs*/)); 1965 // Create SDNode for the swifterror virtual register. 1966 OutVals.push_back( 1967 DAG.getRegister(SwiftError.getOrCreateVRegUseAt( 1968 &I, FuncInfo.MBB, SwiftError.getFunctionArg()), 1969 EVT(TLI.getPointerTy(DL)))); 1970 } 1971 1972 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg(); 1973 CallingConv::ID CallConv = 1974 DAG.getMachineFunction().getFunction().getCallingConv(); 1975 Chain = DAG.getTargetLoweringInfo().LowerReturn( 1976 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG); 1977 1978 // Verify that the target's LowerReturn behaved as expected. 1979 assert(Chain.getNode() && Chain.getValueType() == MVT::Other && 1980 "LowerReturn didn't return a valid chain!"); 1981 1982 // Update the DAG with the new chain value resulting from return lowering. 1983 DAG.setRoot(Chain); 1984 } 1985 1986 /// CopyToExportRegsIfNeeded - If the given value has virtual registers 1987 /// created for it, emit nodes to copy the value into the virtual 1988 /// registers. 1989 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) { 1990 // Skip empty types 1991 if (V->getType()->isEmptyTy()) 1992 return; 1993 1994 DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V); 1995 if (VMI != FuncInfo.ValueMap.end()) { 1996 assert(!V->use_empty() && "Unused value assigned virtual registers!"); 1997 CopyValueToVirtualRegister(V, VMI->second); 1998 } 1999 } 2000 2001 /// ExportFromCurrentBlock - If this condition isn't known to be exported from 2002 /// the current basic block, add it to ValueMap now so that we'll get a 2003 /// CopyTo/FromReg. 2004 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) { 2005 // No need to export constants. 2006 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 2007 2008 // Already exported? 2009 if (FuncInfo.isExportedInst(V)) return; 2010 2011 unsigned Reg = FuncInfo.InitializeRegForValue(V); 2012 CopyValueToVirtualRegister(V, Reg); 2013 } 2014 2015 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V, 2016 const BasicBlock *FromBB) { 2017 // The operands of the setcc have to be in this block. We don't know 2018 // how to export them from some other block. 2019 if (const Instruction *VI = dyn_cast<Instruction>(V)) { 2020 // Can export from current BB. 2021 if (VI->getParent() == FromBB) 2022 return true; 2023 2024 // Is already exported, noop. 2025 return FuncInfo.isExportedInst(V); 2026 } 2027 2028 // If this is an argument, we can export it if the BB is the entry block or 2029 // if it is already exported. 2030 if (isa<Argument>(V)) { 2031 if (FromBB == &FromBB->getParent()->getEntryBlock()) 2032 return true; 2033 2034 // Otherwise, can only export this if it is already exported. 2035 return FuncInfo.isExportedInst(V); 2036 } 2037 2038 // Otherwise, constants can always be exported. 2039 return true; 2040 } 2041 2042 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks. 2043 BranchProbability 2044 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src, 2045 const MachineBasicBlock *Dst) const { 2046 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2047 const BasicBlock *SrcBB = Src->getBasicBlock(); 2048 const BasicBlock *DstBB = Dst->getBasicBlock(); 2049 if (!BPI) { 2050 // If BPI is not available, set the default probability as 1 / N, where N is 2051 // the number of successors. 2052 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); 2053 return BranchProbability(1, SuccSize); 2054 } 2055 return BPI->getEdgeProbability(SrcBB, DstBB); 2056 } 2057 2058 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src, 2059 MachineBasicBlock *Dst, 2060 BranchProbability Prob) { 2061 if (!FuncInfo.BPI) 2062 Src->addSuccessorWithoutProb(Dst); 2063 else { 2064 if (Prob.isUnknown()) 2065 Prob = getEdgeProbability(Src, Dst); 2066 Src->addSuccessor(Dst, Prob); 2067 } 2068 } 2069 2070 static bool InBlock(const Value *V, const BasicBlock *BB) { 2071 if (const Instruction *I = dyn_cast<Instruction>(V)) 2072 return I->getParent() == BB; 2073 return true; 2074 } 2075 2076 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions. 2077 /// This function emits a branch and is used at the leaves of an OR or an 2078 /// AND operator tree. 2079 void 2080 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, 2081 MachineBasicBlock *TBB, 2082 MachineBasicBlock *FBB, 2083 MachineBasicBlock *CurBB, 2084 MachineBasicBlock *SwitchBB, 2085 BranchProbability TProb, 2086 BranchProbability FProb, 2087 bool InvertCond) { 2088 const BasicBlock *BB = CurBB->getBasicBlock(); 2089 2090 // If the leaf of the tree is a comparison, merge the condition into 2091 // the caseblock. 2092 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 2093 // The operands of the cmp have to be in this block. We don't know 2094 // how to export them from some other block. If this is the first block 2095 // of the sequence, no exporting is needed. 2096 if (CurBB == SwitchBB || 2097 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 2098 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { 2099 ISD::CondCode Condition; 2100 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 2101 ICmpInst::Predicate Pred = 2102 InvertCond ? IC->getInversePredicate() : IC->getPredicate(); 2103 Condition = getICmpCondCode(Pred); 2104 } else { 2105 const FCmpInst *FC = cast<FCmpInst>(Cond); 2106 FCmpInst::Predicate Pred = 2107 InvertCond ? FC->getInversePredicate() : FC->getPredicate(); 2108 Condition = getFCmpCondCode(Pred); 2109 if (TM.Options.NoNaNsFPMath) 2110 Condition = getFCmpCodeWithoutNaN(Condition); 2111 } 2112 2113 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr, 2114 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); 2115 SL->SwitchCases.push_back(CB); 2116 return; 2117 } 2118 } 2119 2120 // Create a CaseBlock record representing this branch. 2121 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ; 2122 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()), 2123 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); 2124 SL->SwitchCases.push_back(CB); 2125 } 2126 2127 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond, 2128 MachineBasicBlock *TBB, 2129 MachineBasicBlock *FBB, 2130 MachineBasicBlock *CurBB, 2131 MachineBasicBlock *SwitchBB, 2132 Instruction::BinaryOps Opc, 2133 BranchProbability TProb, 2134 BranchProbability FProb, 2135 bool InvertCond) { 2136 // Skip over not part of the tree and remember to invert op and operands at 2137 // next level. 2138 Value *NotCond; 2139 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) && 2140 InBlock(NotCond, CurBB->getBasicBlock())) { 2141 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, 2142 !InvertCond); 2143 return; 2144 } 2145 2146 const Instruction *BOp = dyn_cast<Instruction>(Cond); 2147 // Compute the effective opcode for Cond, taking into account whether it needs 2148 // to be inverted, e.g. 2149 // and (not (or A, B)), C 2150 // gets lowered as 2151 // and (and (not A, not B), C) 2152 unsigned BOpc = 0; 2153 if (BOp) { 2154 BOpc = BOp->getOpcode(); 2155 if (InvertCond) { 2156 if (BOpc == Instruction::And) 2157 BOpc = Instruction::Or; 2158 else if (BOpc == Instruction::Or) 2159 BOpc = Instruction::And; 2160 } 2161 } 2162 2163 // If this node is not part of the or/and tree, emit it as a branch. 2164 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 2165 BOpc != unsigned(Opc) || !BOp->hasOneUse() || 2166 BOp->getParent() != CurBB->getBasicBlock() || 2167 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 2168 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 2169 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, 2170 TProb, FProb, InvertCond); 2171 return; 2172 } 2173 2174 // Create TmpBB after CurBB. 2175 MachineFunction::iterator BBI(CurBB); 2176 MachineFunction &MF = DAG.getMachineFunction(); 2177 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); 2178 CurBB->getParent()->insert(++BBI, TmpBB); 2179 2180 if (Opc == Instruction::Or) { 2181 // Codegen X | Y as: 2182 // BB1: 2183 // jmp_if_X TBB 2184 // jmp TmpBB 2185 // TmpBB: 2186 // jmp_if_Y TBB 2187 // jmp FBB 2188 // 2189 2190 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 2191 // The requirement is that 2192 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 2193 // = TrueProb for original BB. 2194 // Assuming the original probabilities are A and B, one choice is to set 2195 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to 2196 // A/(1+B) and 2B/(1+B). This choice assumes that 2197 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 2198 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 2199 // TmpBB, but the math is more complicated. 2200 2201 auto NewTrueProb = TProb / 2; 2202 auto NewFalseProb = TProb / 2 + FProb; 2203 // Emit the LHS condition. 2204 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc, 2205 NewTrueProb, NewFalseProb, InvertCond); 2206 2207 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). 2208 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; 2209 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 2210 // Emit the RHS condition into TmpBB. 2211 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 2212 Probs[0], Probs[1], InvertCond); 2213 } else { 2214 assert(Opc == Instruction::And && "Unknown merge op!"); 2215 // Codegen X & Y as: 2216 // BB1: 2217 // jmp_if_X TmpBB 2218 // jmp FBB 2219 // TmpBB: 2220 // jmp_if_Y TBB 2221 // jmp FBB 2222 // 2223 // This requires creation of TmpBB after CurBB. 2224 2225 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 2226 // The requirement is that 2227 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 2228 // = FalseProb for original BB. 2229 // Assuming the original probabilities are A and B, one choice is to set 2230 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to 2231 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == 2232 // TrueProb for BB1 * FalseProb for TmpBB. 2233 2234 auto NewTrueProb = TProb + FProb / 2; 2235 auto NewFalseProb = FProb / 2; 2236 // Emit the LHS condition. 2237 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc, 2238 NewTrueProb, NewFalseProb, InvertCond); 2239 2240 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). 2241 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; 2242 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 2243 // Emit the RHS condition into TmpBB. 2244 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 2245 Probs[0], Probs[1], InvertCond); 2246 } 2247 } 2248 2249 /// If the set of cases should be emitted as a series of branches, return true. 2250 /// If we should emit this as a bunch of and/or'd together conditions, return 2251 /// false. 2252 bool 2253 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) { 2254 if (Cases.size() != 2) return true; 2255 2256 // If this is two comparisons of the same values or'd or and'd together, they 2257 // will get folded into a single comparison, so don't emit two blocks. 2258 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 2259 Cases[0].CmpRHS == Cases[1].CmpRHS) || 2260 (Cases[0].CmpRHS == Cases[1].CmpLHS && 2261 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 2262 return false; 2263 } 2264 2265 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 2266 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 2267 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 2268 Cases[0].CC == Cases[1].CC && 2269 isa<Constant>(Cases[0].CmpRHS) && 2270 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 2271 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB) 2272 return false; 2273 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB) 2274 return false; 2275 } 2276 2277 return true; 2278 } 2279 2280 void SelectionDAGBuilder::visitBr(const BranchInst &I) { 2281 MachineBasicBlock *BrMBB = FuncInfo.MBB; 2282 2283 // Update machine-CFG edges. 2284 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 2285 2286 if (I.isUnconditional()) { 2287 // Update machine-CFG edges. 2288 BrMBB->addSuccessor(Succ0MBB); 2289 2290 // If this is not a fall-through branch or optimizations are switched off, 2291 // emit the branch. 2292 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None) 2293 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 2294 MVT::Other, getControlRoot(), 2295 DAG.getBasicBlock(Succ0MBB))); 2296 2297 return; 2298 } 2299 2300 // If this condition is one of the special cases we handle, do special stuff 2301 // now. 2302 const Value *CondVal = I.getCondition(); 2303 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 2304 2305 // If this is a series of conditions that are or'd or and'd together, emit 2306 // this as a sequence of branches instead of setcc's with and/or operations. 2307 // As long as jumps are not expensive (exceptions for multi-use logic ops, 2308 // unpredictable branches, and vector extracts because those jumps are likely 2309 // expensive for any target), this should improve performance. 2310 // For example, instead of something like: 2311 // cmp A, B 2312 // C = seteq 2313 // cmp D, E 2314 // F = setle 2315 // or C, F 2316 // jnz foo 2317 // Emit: 2318 // cmp A, B 2319 // je foo 2320 // cmp D, E 2321 // jle foo 2322 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 2323 Instruction::BinaryOps Opcode = BOp->getOpcode(); 2324 Value *Vec, *BOp0 = BOp->getOperand(0), *BOp1 = BOp->getOperand(1); 2325 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() && 2326 !I.hasMetadata(LLVMContext::MD_unpredictable) && 2327 (Opcode == Instruction::And || Opcode == Instruction::Or) && 2328 !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) && 2329 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) { 2330 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, 2331 Opcode, 2332 getEdgeProbability(BrMBB, Succ0MBB), 2333 getEdgeProbability(BrMBB, Succ1MBB), 2334 /*InvertCond=*/false); 2335 // If the compares in later blocks need to use values not currently 2336 // exported from this block, export them now. This block should always 2337 // be the first entry. 2338 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"); 2339 2340 // Allow some cases to be rejected. 2341 if (ShouldEmitAsBranches(SL->SwitchCases)) { 2342 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) { 2343 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS); 2344 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS); 2345 } 2346 2347 // Emit the branch for this block. 2348 visitSwitchCase(SL->SwitchCases[0], BrMBB); 2349 SL->SwitchCases.erase(SL->SwitchCases.begin()); 2350 return; 2351 } 2352 2353 // Okay, we decided not to do this, remove any inserted MBB's and clear 2354 // SwitchCases. 2355 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) 2356 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB); 2357 2358 SL->SwitchCases.clear(); 2359 } 2360 } 2361 2362 // Create a CaseBlock record representing this branch. 2363 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()), 2364 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc()); 2365 2366 // Use visitSwitchCase to actually insert the fast branch sequence for this 2367 // cond branch. 2368 visitSwitchCase(CB, BrMBB); 2369 } 2370 2371 /// visitSwitchCase - Emits the necessary code to represent a single node in 2372 /// the binary search tree resulting from lowering a switch instruction. 2373 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, 2374 MachineBasicBlock *SwitchBB) { 2375 SDValue Cond; 2376 SDValue CondLHS = getValue(CB.CmpLHS); 2377 SDLoc dl = CB.DL; 2378 2379 if (CB.CC == ISD::SETTRUE) { 2380 // Branch or fall through to TrueBB. 2381 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb); 2382 SwitchBB->normalizeSuccProbs(); 2383 if (CB.TrueBB != NextBlock(SwitchBB)) { 2384 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(), 2385 DAG.getBasicBlock(CB.TrueBB))); 2386 } 2387 return; 2388 } 2389 2390 auto &TLI = DAG.getTargetLoweringInfo(); 2391 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType()); 2392 2393 // Build the setcc now. 2394 if (!CB.CmpMHS) { 2395 // Fold "(X == true)" to X and "(X == false)" to !X to 2396 // handle common cases produced by branch lowering. 2397 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) && 2398 CB.CC == ISD::SETEQ) 2399 Cond = CondLHS; 2400 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) && 2401 CB.CC == ISD::SETEQ) { 2402 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType()); 2403 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True); 2404 } else { 2405 SDValue CondRHS = getValue(CB.CmpRHS); 2406 2407 // If a pointer's DAG type is larger than its memory type then the DAG 2408 // values are zero-extended. This breaks signed comparisons so truncate 2409 // back to the underlying type before doing the compare. 2410 if (CondLHS.getValueType() != MemVT) { 2411 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT); 2412 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT); 2413 } 2414 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC); 2415 } 2416 } else { 2417 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now"); 2418 2419 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 2420 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 2421 2422 SDValue CmpOp = getValue(CB.CmpMHS); 2423 EVT VT = CmpOp.getValueType(); 2424 2425 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 2426 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT), 2427 ISD::SETLE); 2428 } else { 2429 SDValue SUB = DAG.getNode(ISD::SUB, dl, 2430 VT, CmpOp, DAG.getConstant(Low, dl, VT)); 2431 Cond = DAG.getSetCC(dl, MVT::i1, SUB, 2432 DAG.getConstant(High-Low, dl, VT), ISD::SETULE); 2433 } 2434 } 2435 2436 // Update successor info 2437 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb); 2438 // TrueBB and FalseBB are always different unless the incoming IR is 2439 // degenerate. This only happens when running llc on weird IR. 2440 if (CB.TrueBB != CB.FalseBB) 2441 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb); 2442 SwitchBB->normalizeSuccProbs(); 2443 2444 // If the lhs block is the next block, invert the condition so that we can 2445 // fall through to the lhs instead of the rhs block. 2446 if (CB.TrueBB == NextBlock(SwitchBB)) { 2447 std::swap(CB.TrueBB, CB.FalseBB); 2448 SDValue True = DAG.getConstant(1, dl, Cond.getValueType()); 2449 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True); 2450 } 2451 2452 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 2453 MVT::Other, getControlRoot(), Cond, 2454 DAG.getBasicBlock(CB.TrueBB)); 2455 2456 // Insert the false branch. Do this even if it's a fall through branch, 2457 // this makes it easier to do DAG optimizations which require inverting 2458 // the branch condition. 2459 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 2460 DAG.getBasicBlock(CB.FalseBB)); 2461 2462 DAG.setRoot(BrCond); 2463 } 2464 2465 /// visitJumpTable - Emit JumpTable node in the current MBB 2466 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) { 2467 // Emit the code for the jump table 2468 assert(JT.Reg != -1U && "Should lower JT Header first!"); 2469 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2470 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(), 2471 JT.Reg, PTy); 2472 SDValue Table = DAG.getJumpTable(JT.JTI, PTy); 2473 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(), 2474 MVT::Other, Index.getValue(1), 2475 Table, Index); 2476 DAG.setRoot(BrJumpTable); 2477 } 2478 2479 /// visitJumpTableHeader - This function emits necessary code to produce index 2480 /// in the JumpTable from switch case. 2481 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT, 2482 JumpTableHeader &JTH, 2483 MachineBasicBlock *SwitchBB) { 2484 SDLoc dl = getCurSDLoc(); 2485 2486 // Subtract the lowest switch case value from the value being switched on. 2487 SDValue SwitchOp = getValue(JTH.SValue); 2488 EVT VT = SwitchOp.getValueType(); 2489 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, 2490 DAG.getConstant(JTH.First, dl, VT)); 2491 2492 // The SDNode we just created, which holds the value being switched on minus 2493 // the smallest case value, needs to be copied to a virtual register so it 2494 // can be used as an index into the jump table in a subsequent basic block. 2495 // This value may be smaller or larger than the target's pointer type, and 2496 // therefore require extension or truncating. 2497 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2498 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout())); 2499 2500 unsigned JumpTableReg = 2501 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout())); 2502 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, 2503 JumpTableReg, SwitchOp); 2504 JT.Reg = JumpTableReg; 2505 2506 if (!JTH.OmitRangeCheck) { 2507 // Emit the range check for the jump table, and branch to the default block 2508 // for the switch statement if the value being switched on exceeds the 2509 // largest case in the switch. 2510 SDValue CMP = DAG.getSetCC( 2511 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 2512 Sub.getValueType()), 2513 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT); 2514 2515 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 2516 MVT::Other, CopyTo, CMP, 2517 DAG.getBasicBlock(JT.Default)); 2518 2519 // Avoid emitting unnecessary branches to the next block. 2520 if (JT.MBB != NextBlock(SwitchBB)) 2521 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 2522 DAG.getBasicBlock(JT.MBB)); 2523 2524 DAG.setRoot(BrCond); 2525 } else { 2526 // Avoid emitting unnecessary branches to the next block. 2527 if (JT.MBB != NextBlock(SwitchBB)) 2528 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo, 2529 DAG.getBasicBlock(JT.MBB))); 2530 else 2531 DAG.setRoot(CopyTo); 2532 } 2533 } 2534 2535 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global 2536 /// variable if there exists one. 2537 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, 2538 SDValue &Chain) { 2539 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2540 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 2541 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout()); 2542 MachineFunction &MF = DAG.getMachineFunction(); 2543 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent()); 2544 MachineSDNode *Node = 2545 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain); 2546 if (Global) { 2547 MachinePointerInfo MPInfo(Global); 2548 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 2549 MachineMemOperand::MODereferenceable; 2550 MachineMemOperand *MemRef = MF.getMachineMemOperand( 2551 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy)); 2552 DAG.setNodeMemRefs(Node, {MemRef}); 2553 } 2554 if (PtrTy != PtrMemTy) 2555 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy); 2556 return SDValue(Node, 0); 2557 } 2558 2559 /// Codegen a new tail for a stack protector check ParentMBB which has had its 2560 /// tail spliced into a stack protector check success bb. 2561 /// 2562 /// For a high level explanation of how this fits into the stack protector 2563 /// generation see the comment on the declaration of class 2564 /// StackProtectorDescriptor. 2565 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, 2566 MachineBasicBlock *ParentBB) { 2567 2568 // First create the loads to the guard/stack slot for the comparison. 2569 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2570 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 2571 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout()); 2572 2573 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); 2574 int FI = MFI.getStackProtectorIndex(); 2575 2576 SDValue Guard; 2577 SDLoc dl = getCurSDLoc(); 2578 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); 2579 const Module &M = *ParentBB->getParent()->getFunction().getParent(); 2580 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext())); 2581 2582 // Generate code to load the content of the guard slot. 2583 SDValue GuardVal = DAG.getLoad( 2584 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr, 2585 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align, 2586 MachineMemOperand::MOVolatile); 2587 2588 if (TLI.useStackGuardXorFP()) 2589 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl); 2590 2591 // Retrieve guard check function, nullptr if instrumentation is inlined. 2592 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) { 2593 // The target provides a guard check function to validate the guard value. 2594 // Generate a call to that function with the content of the guard slot as 2595 // argument. 2596 FunctionType *FnTy = GuardCheckFn->getFunctionType(); 2597 assert(FnTy->getNumParams() == 1 && "Invalid function signature"); 2598 2599 TargetLowering::ArgListTy Args; 2600 TargetLowering::ArgListEntry Entry; 2601 Entry.Node = GuardVal; 2602 Entry.Ty = FnTy->getParamType(0); 2603 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg)) 2604 Entry.IsInReg = true; 2605 Args.push_back(Entry); 2606 2607 TargetLowering::CallLoweringInfo CLI(DAG); 2608 CLI.setDebugLoc(getCurSDLoc()) 2609 .setChain(DAG.getEntryNode()) 2610 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(), 2611 getValue(GuardCheckFn), std::move(Args)); 2612 2613 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 2614 DAG.setRoot(Result.second); 2615 return; 2616 } 2617 2618 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. 2619 // Otherwise, emit a volatile load to retrieve the stack guard value. 2620 SDValue Chain = DAG.getEntryNode(); 2621 if (TLI.useLoadStackGuardNode()) { 2622 Guard = getLoadStackGuard(DAG, dl, Chain); 2623 } else { 2624 const Value *IRGuard = TLI.getSDagStackGuard(M); 2625 SDValue GuardPtr = getValue(IRGuard); 2626 2627 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr, 2628 MachinePointerInfo(IRGuard, 0), Align, 2629 MachineMemOperand::MOVolatile); 2630 } 2631 2632 // Perform the comparison via a getsetcc. 2633 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(), 2634 *DAG.getContext(), 2635 Guard.getValueType()), 2636 Guard, GuardVal, ISD::SETNE); 2637 2638 // If the guard/stackslot do not equal, branch to failure MBB. 2639 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 2640 MVT::Other, GuardVal.getOperand(0), 2641 Cmp, DAG.getBasicBlock(SPD.getFailureMBB())); 2642 // Otherwise branch to success MBB. 2643 SDValue Br = DAG.getNode(ISD::BR, dl, 2644 MVT::Other, BrCond, 2645 DAG.getBasicBlock(SPD.getSuccessMBB())); 2646 2647 DAG.setRoot(Br); 2648 } 2649 2650 /// Codegen the failure basic block for a stack protector check. 2651 /// 2652 /// A failure stack protector machine basic block consists simply of a call to 2653 /// __stack_chk_fail(). 2654 /// 2655 /// For a high level explanation of how this fits into the stack protector 2656 /// generation see the comment on the declaration of class 2657 /// StackProtectorDescriptor. 2658 void 2659 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { 2660 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2661 TargetLowering::MakeLibCallOptions CallOptions; 2662 CallOptions.setDiscardResult(true); 2663 SDValue Chain = 2664 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid, 2665 None, CallOptions, getCurSDLoc()).second; 2666 // On PS4, the "return address" must still be within the calling function, 2667 // even if it's at the very end, so emit an explicit TRAP here. 2668 // Passing 'true' for doesNotReturn above won't generate the trap for us. 2669 if (TM.getTargetTriple().isPS4CPU()) 2670 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain); 2671 // WebAssembly needs an unreachable instruction after a non-returning call, 2672 // because the function return type can be different from __stack_chk_fail's 2673 // return type (void). 2674 if (TM.getTargetTriple().isWasm()) 2675 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain); 2676 2677 DAG.setRoot(Chain); 2678 } 2679 2680 /// visitBitTestHeader - This function emits necessary code to produce value 2681 /// suitable for "bit tests" 2682 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B, 2683 MachineBasicBlock *SwitchBB) { 2684 SDLoc dl = getCurSDLoc(); 2685 2686 // Subtract the minimum value. 2687 SDValue SwitchOp = getValue(B.SValue); 2688 EVT VT = SwitchOp.getValueType(); 2689 SDValue RangeSub = 2690 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT)); 2691 2692 // Determine the type of the test operands. 2693 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2694 bool UsePtrType = false; 2695 if (!TLI.isTypeLegal(VT)) { 2696 UsePtrType = true; 2697 } else { 2698 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i) 2699 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) { 2700 // Switch table case range are encoded into series of masks. 2701 // Just use pointer type, it's guaranteed to fit. 2702 UsePtrType = true; 2703 break; 2704 } 2705 } 2706 SDValue Sub = RangeSub; 2707 if (UsePtrType) { 2708 VT = TLI.getPointerTy(DAG.getDataLayout()); 2709 Sub = DAG.getZExtOrTrunc(Sub, dl, VT); 2710 } 2711 2712 B.RegVT = VT.getSimpleVT(); 2713 B.Reg = FuncInfo.CreateReg(B.RegVT); 2714 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub); 2715 2716 MachineBasicBlock* MBB = B.Cases[0].ThisBB; 2717 2718 if (!B.OmitRangeCheck) 2719 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); 2720 addSuccessorWithProb(SwitchBB, MBB, B.Prob); 2721 SwitchBB->normalizeSuccProbs(); 2722 2723 SDValue Root = CopyTo; 2724 if (!B.OmitRangeCheck) { 2725 // Conditional branch to the default block. 2726 SDValue RangeCmp = DAG.getSetCC(dl, 2727 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 2728 RangeSub.getValueType()), 2729 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()), 2730 ISD::SETUGT); 2731 2732 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp, 2733 DAG.getBasicBlock(B.Default)); 2734 } 2735 2736 // Avoid emitting unnecessary branches to the next block. 2737 if (MBB != NextBlock(SwitchBB)) 2738 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB)); 2739 2740 DAG.setRoot(Root); 2741 } 2742 2743 /// visitBitTestCase - this function produces one "bit test" 2744 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, 2745 MachineBasicBlock* NextMBB, 2746 BranchProbability BranchProbToNext, 2747 unsigned Reg, 2748 BitTestCase &B, 2749 MachineBasicBlock *SwitchBB) { 2750 SDLoc dl = getCurSDLoc(); 2751 MVT VT = BB.RegVT; 2752 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT); 2753 SDValue Cmp; 2754 unsigned PopCount = countPopulation(B.Mask); 2755 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2756 if (PopCount == 1) { 2757 // Testing for a single bit; just compare the shift count with what it 2758 // would need to be to shift a 1 bit in that position. 2759 Cmp = DAG.getSetCC( 2760 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 2761 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT), 2762 ISD::SETEQ); 2763 } else if (PopCount == BB.Range) { 2764 // There is only one zero bit in the range, test for it directly. 2765 Cmp = DAG.getSetCC( 2766 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 2767 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT), 2768 ISD::SETNE); 2769 } else { 2770 // Make desired shift 2771 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT, 2772 DAG.getConstant(1, dl, VT), ShiftOp); 2773 2774 // Emit bit tests and jumps 2775 SDValue AndOp = DAG.getNode(ISD::AND, dl, 2776 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT)); 2777 Cmp = DAG.getSetCC( 2778 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 2779 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE); 2780 } 2781 2782 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. 2783 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb); 2784 // The branch probability from SwitchBB to NextMBB is BranchProbToNext. 2785 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext); 2786 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is 2787 // one as they are relative probabilities (and thus work more like weights), 2788 // and hence we need to normalize them to let the sum of them become one. 2789 SwitchBB->normalizeSuccProbs(); 2790 2791 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl, 2792 MVT::Other, getControlRoot(), 2793 Cmp, DAG.getBasicBlock(B.TargetBB)); 2794 2795 // Avoid emitting unnecessary branches to the next block. 2796 if (NextMBB != NextBlock(SwitchBB)) 2797 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd, 2798 DAG.getBasicBlock(NextMBB)); 2799 2800 DAG.setRoot(BrAnd); 2801 } 2802 2803 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { 2804 MachineBasicBlock *InvokeMBB = FuncInfo.MBB; 2805 2806 // Retrieve successors. Look through artificial IR level blocks like 2807 // catchswitch for successors. 2808 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; 2809 const BasicBlock *EHPadBB = I.getSuccessor(1); 2810 2811 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't 2812 // have to do anything here to lower funclet bundles. 2813 assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt, 2814 LLVMContext::OB_gc_transition, 2815 LLVMContext::OB_gc_live, 2816 LLVMContext::OB_funclet, 2817 LLVMContext::OB_cfguardtarget}) && 2818 "Cannot lower invokes with arbitrary operand bundles yet!"); 2819 2820 const Value *Callee(I.getCalledOperand()); 2821 const Function *Fn = dyn_cast<Function>(Callee); 2822 if (isa<InlineAsm>(Callee)) 2823 visitInlineAsm(I); 2824 else if (Fn && Fn->isIntrinsic()) { 2825 switch (Fn->getIntrinsicID()) { 2826 default: 2827 llvm_unreachable("Cannot invoke this intrinsic"); 2828 case Intrinsic::donothing: 2829 // Ignore invokes to @llvm.donothing: jump directly to the next BB. 2830 break; 2831 case Intrinsic::experimental_patchpoint_void: 2832 case Intrinsic::experimental_patchpoint_i64: 2833 visitPatchpoint(I, EHPadBB); 2834 break; 2835 case Intrinsic::experimental_gc_statepoint: 2836 LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB); 2837 break; 2838 case Intrinsic::wasm_rethrow_in_catch: { 2839 // This is usually done in visitTargetIntrinsic, but this intrinsic is 2840 // special because it can be invoked, so we manually lower it to a DAG 2841 // node here. 2842 SmallVector<SDValue, 8> Ops; 2843 Ops.push_back(getRoot()); // inchain 2844 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2845 Ops.push_back( 2846 DAG.getTargetConstant(Intrinsic::wasm_rethrow_in_catch, getCurSDLoc(), 2847 TLI.getPointerTy(DAG.getDataLayout()))); 2848 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain 2849 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops)); 2850 break; 2851 } 2852 } 2853 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) { 2854 // Currently we do not lower any intrinsic calls with deopt operand bundles. 2855 // Eventually we will support lowering the @llvm.experimental.deoptimize 2856 // intrinsic, and right now there are no plans to support other intrinsics 2857 // with deopt state. 2858 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB); 2859 } else { 2860 LowerCallTo(I, getValue(Callee), false, EHPadBB); 2861 } 2862 2863 // If the value of the invoke is used outside of its defining block, make it 2864 // available as a virtual register. 2865 // We already took care of the exported value for the statepoint instruction 2866 // during call to the LowerStatepoint. 2867 if (!isa<GCStatepointInst>(I)) { 2868 CopyToExportRegsIfNeeded(&I); 2869 } 2870 2871 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 2872 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2873 BranchProbability EHPadBBProb = 2874 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB) 2875 : BranchProbability::getZero(); 2876 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests); 2877 2878 // Update successor info. 2879 addSuccessorWithProb(InvokeMBB, Return); 2880 for (auto &UnwindDest : UnwindDests) { 2881 UnwindDest.first->setIsEHPad(); 2882 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second); 2883 } 2884 InvokeMBB->normalizeSuccProbs(); 2885 2886 // Drop into normal successor. 2887 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), 2888 DAG.getBasicBlock(Return))); 2889 } 2890 2891 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) { 2892 MachineBasicBlock *CallBrMBB = FuncInfo.MBB; 2893 2894 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't 2895 // have to do anything here to lower funclet bundles. 2896 assert(!I.hasOperandBundlesOtherThan( 2897 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && 2898 "Cannot lower callbrs with arbitrary operand bundles yet!"); 2899 2900 assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr"); 2901 visitInlineAsm(I); 2902 CopyToExportRegsIfNeeded(&I); 2903 2904 // Retrieve successors. 2905 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()]; 2906 2907 // Update successor info. 2908 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne()); 2909 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) { 2910 MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)]; 2911 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero()); 2912 Target->setIsInlineAsmBrIndirectTarget(); 2913 } 2914 CallBrMBB->normalizeSuccProbs(); 2915 2916 // Drop into default successor. 2917 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 2918 MVT::Other, getControlRoot(), 2919 DAG.getBasicBlock(Return))); 2920 } 2921 2922 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) { 2923 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!"); 2924 } 2925 2926 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) { 2927 assert(FuncInfo.MBB->isEHPad() && 2928 "Call to landingpad not in landing pad!"); 2929 2930 // If there aren't registers to copy the values into (e.g., during SjLj 2931 // exceptions), then don't bother to create these DAG nodes. 2932 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2933 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn(); 2934 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 2935 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 2936 return; 2937 2938 // If landingpad's return type is token type, we don't create DAG nodes 2939 // for its exception pointer and selector value. The extraction of exception 2940 // pointer or selector value from token type landingpads is not currently 2941 // supported. 2942 if (LP.getType()->isTokenTy()) 2943 return; 2944 2945 SmallVector<EVT, 2> ValueVTs; 2946 SDLoc dl = getCurSDLoc(); 2947 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs); 2948 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported"); 2949 2950 // Get the two live-in registers as SDValues. The physregs have already been 2951 // copied into virtual registers. 2952 SDValue Ops[2]; 2953 if (FuncInfo.ExceptionPointerVirtReg) { 2954 Ops[0] = DAG.getZExtOrTrunc( 2955 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 2956 FuncInfo.ExceptionPointerVirtReg, 2957 TLI.getPointerTy(DAG.getDataLayout())), 2958 dl, ValueVTs[0]); 2959 } else { 2960 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout())); 2961 } 2962 Ops[1] = DAG.getZExtOrTrunc( 2963 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 2964 FuncInfo.ExceptionSelectorVirtReg, 2965 TLI.getPointerTy(DAG.getDataLayout())), 2966 dl, ValueVTs[1]); 2967 2968 // Merge into one. 2969 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, 2970 DAG.getVTList(ValueVTs), Ops); 2971 setValue(&LP, Res); 2972 } 2973 2974 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First, 2975 MachineBasicBlock *Last) { 2976 // Update JTCases. 2977 for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i) 2978 if (SL->JTCases[i].first.HeaderBB == First) 2979 SL->JTCases[i].first.HeaderBB = Last; 2980 2981 // Update BitTestCases. 2982 for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i) 2983 if (SL->BitTestCases[i].Parent == First) 2984 SL->BitTestCases[i].Parent = Last; 2985 } 2986 2987 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { 2988 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; 2989 2990 // Update machine-CFG edges with unique successors. 2991 SmallSet<BasicBlock*, 32> Done; 2992 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { 2993 BasicBlock *BB = I.getSuccessor(i); 2994 bool Inserted = Done.insert(BB).second; 2995 if (!Inserted) 2996 continue; 2997 2998 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB]; 2999 addSuccessorWithProb(IndirectBrMBB, Succ); 3000 } 3001 IndirectBrMBB->normalizeSuccProbs(); 3002 3003 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(), 3004 MVT::Other, getControlRoot(), 3005 getValue(I.getAddress()))); 3006 } 3007 3008 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) { 3009 if (!DAG.getTarget().Options.TrapUnreachable) 3010 return; 3011 3012 // We may be able to ignore unreachable behind a noreturn call. 3013 if (DAG.getTarget().Options.NoTrapAfterNoreturn) { 3014 const BasicBlock &BB = *I.getParent(); 3015 if (&I != &BB.front()) { 3016 BasicBlock::const_iterator PredI = 3017 std::prev(BasicBlock::const_iterator(&I)); 3018 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) { 3019 if (Call->doesNotReturn()) 3020 return; 3021 } 3022 } 3023 } 3024 3025 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot())); 3026 } 3027 3028 void SelectionDAGBuilder::visitFSub(const User &I) { 3029 // -0.0 - X --> fneg 3030 Type *Ty = I.getType(); 3031 if (isa<Constant>(I.getOperand(0)) && 3032 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) { 3033 SDValue Op2 = getValue(I.getOperand(1)); 3034 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(), 3035 Op2.getValueType(), Op2)); 3036 return; 3037 } 3038 3039 visitBinary(I, ISD::FSUB); 3040 } 3041 3042 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) { 3043 SDNodeFlags Flags; 3044 3045 SDValue Op = getValue(I.getOperand(0)); 3046 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(), 3047 Op, Flags); 3048 setValue(&I, UnNodeValue); 3049 } 3050 3051 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) { 3052 SDNodeFlags Flags; 3053 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) { 3054 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap()); 3055 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap()); 3056 } 3057 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) { 3058 Flags.setExact(ExactOp->isExact()); 3059 } 3060 3061 SDValue Op1 = getValue(I.getOperand(0)); 3062 SDValue Op2 = getValue(I.getOperand(1)); 3063 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), 3064 Op1, Op2, Flags); 3065 setValue(&I, BinNodeValue); 3066 } 3067 3068 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { 3069 SDValue Op1 = getValue(I.getOperand(0)); 3070 SDValue Op2 = getValue(I.getOperand(1)); 3071 3072 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy( 3073 Op1.getValueType(), DAG.getDataLayout()); 3074 3075 // Coerce the shift amount to the right type if we can. 3076 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { 3077 unsigned ShiftSize = ShiftTy.getSizeInBits(); 3078 unsigned Op2Size = Op2.getValueSizeInBits(); 3079 SDLoc DL = getCurSDLoc(); 3080 3081 // If the operand is smaller than the shift count type, promote it. 3082 if (ShiftSize > Op2Size) 3083 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2); 3084 3085 // If the operand is larger than the shift count type but the shift 3086 // count type has enough bits to represent any shift value, truncate 3087 // it now. This is a common case and it exposes the truncate to 3088 // optimization early. 3089 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits())) 3090 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2); 3091 // Otherwise we'll need to temporarily settle for some other convenient 3092 // type. Type legalization will make adjustments once the shiftee is split. 3093 else 3094 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32); 3095 } 3096 3097 bool nuw = false; 3098 bool nsw = false; 3099 bool exact = false; 3100 3101 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) { 3102 3103 if (const OverflowingBinaryOperator *OFBinOp = 3104 dyn_cast<const OverflowingBinaryOperator>(&I)) { 3105 nuw = OFBinOp->hasNoUnsignedWrap(); 3106 nsw = OFBinOp->hasNoSignedWrap(); 3107 } 3108 if (const PossiblyExactOperator *ExactOp = 3109 dyn_cast<const PossiblyExactOperator>(&I)) 3110 exact = ExactOp->isExact(); 3111 } 3112 SDNodeFlags Flags; 3113 Flags.setExact(exact); 3114 Flags.setNoSignedWrap(nsw); 3115 Flags.setNoUnsignedWrap(nuw); 3116 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2, 3117 Flags); 3118 setValue(&I, Res); 3119 } 3120 3121 void SelectionDAGBuilder::visitSDiv(const User &I) { 3122 SDValue Op1 = getValue(I.getOperand(0)); 3123 SDValue Op2 = getValue(I.getOperand(1)); 3124 3125 SDNodeFlags Flags; 3126 Flags.setExact(isa<PossiblyExactOperator>(&I) && 3127 cast<PossiblyExactOperator>(&I)->isExact()); 3128 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1, 3129 Op2, Flags)); 3130 } 3131 3132 void SelectionDAGBuilder::visitICmp(const User &I) { 3133 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 3134 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I)) 3135 predicate = IC->getPredicate(); 3136 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 3137 predicate = ICmpInst::Predicate(IC->getPredicate()); 3138 SDValue Op1 = getValue(I.getOperand(0)); 3139 SDValue Op2 = getValue(I.getOperand(1)); 3140 ISD::CondCode Opcode = getICmpCondCode(predicate); 3141 3142 auto &TLI = DAG.getTargetLoweringInfo(); 3143 EVT MemVT = 3144 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); 3145 3146 // If a pointer's DAG type is larger than its memory type then the DAG values 3147 // are zero-extended. This breaks signed comparisons so truncate back to the 3148 // underlying type before doing the compare. 3149 if (Op1.getValueType() != MemVT) { 3150 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT); 3151 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT); 3152 } 3153 3154 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3155 I.getType()); 3156 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode)); 3157 } 3158 3159 void SelectionDAGBuilder::visitFCmp(const User &I) { 3160 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 3161 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I)) 3162 predicate = FC->getPredicate(); 3163 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 3164 predicate = FCmpInst::Predicate(FC->getPredicate()); 3165 SDValue Op1 = getValue(I.getOperand(0)); 3166 SDValue Op2 = getValue(I.getOperand(1)); 3167 3168 ISD::CondCode Condition = getFCmpCondCode(predicate); 3169 auto *FPMO = dyn_cast<FPMathOperator>(&I); 3170 if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath) 3171 Condition = getFCmpCodeWithoutNaN(Condition); 3172 3173 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3174 I.getType()); 3175 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition)); 3176 } 3177 3178 // Check if the condition of the select has one use or two users that are both 3179 // selects with the same condition. 3180 static bool hasOnlySelectUsers(const Value *Cond) { 3181 return llvm::all_of(Cond->users(), [](const Value *V) { 3182 return isa<SelectInst>(V); 3183 }); 3184 } 3185 3186 void SelectionDAGBuilder::visitSelect(const User &I) { 3187 SmallVector<EVT, 4> ValueVTs; 3188 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(), 3189 ValueVTs); 3190 unsigned NumValues = ValueVTs.size(); 3191 if (NumValues == 0) return; 3192 3193 SmallVector<SDValue, 4> Values(NumValues); 3194 SDValue Cond = getValue(I.getOperand(0)); 3195 SDValue LHSVal = getValue(I.getOperand(1)); 3196 SDValue RHSVal = getValue(I.getOperand(2)); 3197 SmallVector<SDValue, 1> BaseOps(1, Cond); 3198 ISD::NodeType OpCode = 3199 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT; 3200 3201 bool IsUnaryAbs = false; 3202 3203 // Min/max matching is only viable if all output VTs are the same. 3204 if (is_splat(ValueVTs)) { 3205 EVT VT = ValueVTs[0]; 3206 LLVMContext &Ctx = *DAG.getContext(); 3207 auto &TLI = DAG.getTargetLoweringInfo(); 3208 3209 // We care about the legality of the operation after it has been type 3210 // legalized. 3211 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal) 3212 VT = TLI.getTypeToTransformTo(Ctx, VT); 3213 3214 // If the vselect is legal, assume we want to leave this as a vector setcc + 3215 // vselect. Otherwise, if this is going to be scalarized, we want to see if 3216 // min/max is legal on the scalar type. 3217 bool UseScalarMinMax = VT.isVector() && 3218 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT); 3219 3220 Value *LHS, *RHS; 3221 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS); 3222 ISD::NodeType Opc = ISD::DELETED_NODE; 3223 switch (SPR.Flavor) { 3224 case SPF_UMAX: Opc = ISD::UMAX; break; 3225 case SPF_UMIN: Opc = ISD::UMIN; break; 3226 case SPF_SMAX: Opc = ISD::SMAX; break; 3227 case SPF_SMIN: Opc = ISD::SMIN; break; 3228 case SPF_FMINNUM: 3229 switch (SPR.NaNBehavior) { 3230 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); 3231 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break; 3232 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break; 3233 case SPNB_RETURNS_ANY: { 3234 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT)) 3235 Opc = ISD::FMINNUM; 3236 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT)) 3237 Opc = ISD::FMINIMUM; 3238 else if (UseScalarMinMax) 3239 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ? 3240 ISD::FMINNUM : ISD::FMINIMUM; 3241 break; 3242 } 3243 } 3244 break; 3245 case SPF_FMAXNUM: 3246 switch (SPR.NaNBehavior) { 3247 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); 3248 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break; 3249 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break; 3250 case SPNB_RETURNS_ANY: 3251 3252 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT)) 3253 Opc = ISD::FMAXNUM; 3254 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT)) 3255 Opc = ISD::FMAXIMUM; 3256 else if (UseScalarMinMax) 3257 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ? 3258 ISD::FMAXNUM : ISD::FMAXIMUM; 3259 break; 3260 } 3261 break; 3262 case SPF_ABS: 3263 IsUnaryAbs = true; 3264 Opc = ISD::ABS; 3265 break; 3266 case SPF_NABS: 3267 // TODO: we need to produce sub(0, abs(X)). 3268 default: break; 3269 } 3270 3271 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE && 3272 (TLI.isOperationLegalOrCustom(Opc, VT) || 3273 (UseScalarMinMax && 3274 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) && 3275 // If the underlying comparison instruction is used by any other 3276 // instruction, the consumed instructions won't be destroyed, so it is 3277 // not profitable to convert to a min/max. 3278 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) { 3279 OpCode = Opc; 3280 LHSVal = getValue(LHS); 3281 RHSVal = getValue(RHS); 3282 BaseOps.clear(); 3283 } 3284 3285 if (IsUnaryAbs) { 3286 OpCode = Opc; 3287 LHSVal = getValue(LHS); 3288 BaseOps.clear(); 3289 } 3290 } 3291 3292 if (IsUnaryAbs) { 3293 for (unsigned i = 0; i != NumValues; ++i) { 3294 Values[i] = 3295 DAG.getNode(OpCode, getCurSDLoc(), 3296 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), 3297 SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); 3298 } 3299 } else { 3300 for (unsigned i = 0; i != NumValues; ++i) { 3301 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end()); 3302 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); 3303 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i)); 3304 Values[i] = DAG.getNode( 3305 OpCode, getCurSDLoc(), 3306 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops); 3307 } 3308 } 3309 3310 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3311 DAG.getVTList(ValueVTs), Values)); 3312 } 3313 3314 void SelectionDAGBuilder::visitTrunc(const User &I) { 3315 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 3316 SDValue N = getValue(I.getOperand(0)); 3317 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3318 I.getType()); 3319 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N)); 3320 } 3321 3322 void SelectionDAGBuilder::visitZExt(const User &I) { 3323 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 3324 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 3325 SDValue N = getValue(I.getOperand(0)); 3326 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3327 I.getType()); 3328 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N)); 3329 } 3330 3331 void SelectionDAGBuilder::visitSExt(const User &I) { 3332 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 3333 // SExt also can't be a cast to bool for same reason. So, nothing much to do 3334 SDValue N = getValue(I.getOperand(0)); 3335 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3336 I.getType()); 3337 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N)); 3338 } 3339 3340 void SelectionDAGBuilder::visitFPTrunc(const User &I) { 3341 // FPTrunc is never a no-op cast, no need to check 3342 SDValue N = getValue(I.getOperand(0)); 3343 SDLoc dl = getCurSDLoc(); 3344 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3345 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3346 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N, 3347 DAG.getTargetConstant( 3348 0, dl, TLI.getPointerTy(DAG.getDataLayout())))); 3349 } 3350 3351 void SelectionDAGBuilder::visitFPExt(const User &I) { 3352 // FPExt is never a no-op cast, no need to check 3353 SDValue N = getValue(I.getOperand(0)); 3354 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3355 I.getType()); 3356 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N)); 3357 } 3358 3359 void SelectionDAGBuilder::visitFPToUI(const User &I) { 3360 // FPToUI is never a no-op cast, no need to check 3361 SDValue N = getValue(I.getOperand(0)); 3362 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3363 I.getType()); 3364 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N)); 3365 } 3366 3367 void SelectionDAGBuilder::visitFPToSI(const User &I) { 3368 // FPToSI is never a no-op cast, no need to check 3369 SDValue N = getValue(I.getOperand(0)); 3370 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3371 I.getType()); 3372 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N)); 3373 } 3374 3375 void SelectionDAGBuilder::visitUIToFP(const User &I) { 3376 // UIToFP is never a no-op cast, no need to check 3377 SDValue N = getValue(I.getOperand(0)); 3378 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3379 I.getType()); 3380 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N)); 3381 } 3382 3383 void SelectionDAGBuilder::visitSIToFP(const User &I) { 3384 // SIToFP is never a no-op cast, no need to check 3385 SDValue N = getValue(I.getOperand(0)); 3386 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3387 I.getType()); 3388 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N)); 3389 } 3390 3391 void SelectionDAGBuilder::visitPtrToInt(const User &I) { 3392 // What to do depends on the size of the integer and the size of the pointer. 3393 // We can either truncate, zero extend, or no-op, accordingly. 3394 SDValue N = getValue(I.getOperand(0)); 3395 auto &TLI = DAG.getTargetLoweringInfo(); 3396 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3397 I.getType()); 3398 EVT PtrMemVT = 3399 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); 3400 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT); 3401 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT); 3402 setValue(&I, N); 3403 } 3404 3405 void SelectionDAGBuilder::visitIntToPtr(const User &I) { 3406 // What to do depends on the size of the integer and the size of the pointer. 3407 // We can either truncate, zero extend, or no-op, accordingly. 3408 SDValue N = getValue(I.getOperand(0)); 3409 auto &TLI = DAG.getTargetLoweringInfo(); 3410 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3411 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType()); 3412 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT); 3413 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT); 3414 setValue(&I, N); 3415 } 3416 3417 void SelectionDAGBuilder::visitBitCast(const User &I) { 3418 SDValue N = getValue(I.getOperand(0)); 3419 SDLoc dl = getCurSDLoc(); 3420 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3421 I.getType()); 3422 3423 // BitCast assures us that source and destination are the same size so this is 3424 // either a BITCAST or a no-op. 3425 if (DestVT != N.getValueType()) 3426 setValue(&I, DAG.getNode(ISD::BITCAST, dl, 3427 DestVT, N)); // convert types. 3428 // Check if the original LLVM IR Operand was a ConstantInt, because getValue() 3429 // might fold any kind of constant expression to an integer constant and that 3430 // is not what we are looking for. Only recognize a bitcast of a genuine 3431 // constant integer as an opaque constant. 3432 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0))) 3433 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false, 3434 /*isOpaque*/true)); 3435 else 3436 setValue(&I, N); // noop cast. 3437 } 3438 3439 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) { 3440 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3441 const Value *SV = I.getOperand(0); 3442 SDValue N = getValue(SV); 3443 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3444 3445 unsigned SrcAS = SV->getType()->getPointerAddressSpace(); 3446 unsigned DestAS = I.getType()->getPointerAddressSpace(); 3447 3448 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3449 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS); 3450 3451 setValue(&I, N); 3452 } 3453 3454 void SelectionDAGBuilder::visitInsertElement(const User &I) { 3455 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3456 SDValue InVec = getValue(I.getOperand(0)); 3457 SDValue InVal = getValue(I.getOperand(1)); 3458 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(), 3459 TLI.getVectorIdxTy(DAG.getDataLayout())); 3460 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(), 3461 TLI.getValueType(DAG.getDataLayout(), I.getType()), 3462 InVec, InVal, InIdx)); 3463 } 3464 3465 void SelectionDAGBuilder::visitExtractElement(const User &I) { 3466 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3467 SDValue InVec = getValue(I.getOperand(0)); 3468 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(), 3469 TLI.getVectorIdxTy(DAG.getDataLayout())); 3470 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(), 3471 TLI.getValueType(DAG.getDataLayout(), I.getType()), 3472 InVec, InIdx)); 3473 } 3474 3475 void SelectionDAGBuilder::visitShuffleVector(const User &I) { 3476 SDValue Src1 = getValue(I.getOperand(0)); 3477 SDValue Src2 = getValue(I.getOperand(1)); 3478 ArrayRef<int> Mask; 3479 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I)) 3480 Mask = SVI->getShuffleMask(); 3481 else 3482 Mask = cast<ConstantExpr>(I).getShuffleMask(); 3483 SDLoc DL = getCurSDLoc(); 3484 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3485 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3486 EVT SrcVT = Src1.getValueType(); 3487 3488 if (all_of(Mask, [](int Elem) { return Elem == 0; }) && 3489 VT.isScalableVector()) { 3490 // Canonical splat form of first element of first input vector. 3491 SDValue FirstElt = 3492 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1, 3493 DAG.getVectorIdxConstant(0, DL)); 3494 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt)); 3495 return; 3496 } 3497 3498 // For now, we only handle splats for scalable vectors. 3499 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation 3500 // for targets that support a SPLAT_VECTOR for non-scalable vector types. 3501 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle"); 3502 3503 unsigned SrcNumElts = SrcVT.getVectorNumElements(); 3504 unsigned MaskNumElts = Mask.size(); 3505 3506 if (SrcNumElts == MaskNumElts) { 3507 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask)); 3508 return; 3509 } 3510 3511 // Normalize the shuffle vector since mask and vector length don't match. 3512 if (SrcNumElts < MaskNumElts) { 3513 // Mask is longer than the source vectors. We can use concatenate vector to 3514 // make the mask and vectors lengths match. 3515 3516 if (MaskNumElts % SrcNumElts == 0) { 3517 // Mask length is a multiple of the source vector length. 3518 // Check if the shuffle is some kind of concatenation of the input 3519 // vectors. 3520 unsigned NumConcat = MaskNumElts / SrcNumElts; 3521 bool IsConcat = true; 3522 SmallVector<int, 8> ConcatSrcs(NumConcat, -1); 3523 for (unsigned i = 0; i != MaskNumElts; ++i) { 3524 int Idx = Mask[i]; 3525 if (Idx < 0) 3526 continue; 3527 // Ensure the indices in each SrcVT sized piece are sequential and that 3528 // the same source is used for the whole piece. 3529 if ((Idx % SrcNumElts != (i % SrcNumElts)) || 3530 (ConcatSrcs[i / SrcNumElts] >= 0 && 3531 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) { 3532 IsConcat = false; 3533 break; 3534 } 3535 // Remember which source this index came from. 3536 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; 3537 } 3538 3539 // The shuffle is concatenating multiple vectors together. Just emit 3540 // a CONCAT_VECTORS operation. 3541 if (IsConcat) { 3542 SmallVector<SDValue, 8> ConcatOps; 3543 for (auto Src : ConcatSrcs) { 3544 if (Src < 0) 3545 ConcatOps.push_back(DAG.getUNDEF(SrcVT)); 3546 else if (Src == 0) 3547 ConcatOps.push_back(Src1); 3548 else 3549 ConcatOps.push_back(Src2); 3550 } 3551 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps)); 3552 return; 3553 } 3554 } 3555 3556 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts); 3557 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts; 3558 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), 3559 PaddedMaskNumElts); 3560 3561 // Pad both vectors with undefs to make them the same length as the mask. 3562 SDValue UndefVal = DAG.getUNDEF(SrcVT); 3563 3564 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); 3565 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); 3566 MOps1[0] = Src1; 3567 MOps2[0] = Src2; 3568 3569 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1); 3570 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2); 3571 3572 // Readjust mask for new input vector length. 3573 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1); 3574 for (unsigned i = 0; i != MaskNumElts; ++i) { 3575 int Idx = Mask[i]; 3576 if (Idx >= (int)SrcNumElts) 3577 Idx -= SrcNumElts - PaddedMaskNumElts; 3578 MappedOps[i] = Idx; 3579 } 3580 3581 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps); 3582 3583 // If the concatenated vector was padded, extract a subvector with the 3584 // correct number of elements. 3585 if (MaskNumElts != PaddedMaskNumElts) 3586 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result, 3587 DAG.getVectorIdxConstant(0, DL)); 3588 3589 setValue(&I, Result); 3590 return; 3591 } 3592 3593 if (SrcNumElts > MaskNumElts) { 3594 // Analyze the access pattern of the vector to see if we can extract 3595 // two subvectors and do the shuffle. 3596 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from 3597 bool CanExtract = true; 3598 for (int Idx : Mask) { 3599 unsigned Input = 0; 3600 if (Idx < 0) 3601 continue; 3602 3603 if (Idx >= (int)SrcNumElts) { 3604 Input = 1; 3605 Idx -= SrcNumElts; 3606 } 3607 3608 // If all the indices come from the same MaskNumElts sized portion of 3609 // the sources we can use extract. Also make sure the extract wouldn't 3610 // extract past the end of the source. 3611 int NewStartIdx = alignDown(Idx, MaskNumElts); 3612 if (NewStartIdx + MaskNumElts > SrcNumElts || 3613 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx)) 3614 CanExtract = false; 3615 // Make sure we always update StartIdx as we use it to track if all 3616 // elements are undef. 3617 StartIdx[Input] = NewStartIdx; 3618 } 3619 3620 if (StartIdx[0] < 0 && StartIdx[1] < 0) { 3621 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used. 3622 return; 3623 } 3624 if (CanExtract) { 3625 // Extract appropriate subvector and generate a vector shuffle 3626 for (unsigned Input = 0; Input < 2; ++Input) { 3627 SDValue &Src = Input == 0 ? Src1 : Src2; 3628 if (StartIdx[Input] < 0) 3629 Src = DAG.getUNDEF(VT); 3630 else { 3631 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src, 3632 DAG.getVectorIdxConstant(StartIdx[Input], DL)); 3633 } 3634 } 3635 3636 // Calculate new mask. 3637 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end()); 3638 for (int &Idx : MappedOps) { 3639 if (Idx >= (int)SrcNumElts) 3640 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; 3641 else if (Idx >= 0) 3642 Idx -= StartIdx[0]; 3643 } 3644 3645 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps)); 3646 return; 3647 } 3648 } 3649 3650 // We can't use either concat vectors or extract subvectors so fall back to 3651 // replacing the shuffle with extract and build vector. 3652 // to insert and build vector. 3653 EVT EltVT = VT.getVectorElementType(); 3654 SmallVector<SDValue,8> Ops; 3655 for (int Idx : Mask) { 3656 SDValue Res; 3657 3658 if (Idx < 0) { 3659 Res = DAG.getUNDEF(EltVT); 3660 } else { 3661 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2; 3662 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; 3663 3664 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src, 3665 DAG.getVectorIdxConstant(Idx, DL)); 3666 } 3667 3668 Ops.push_back(Res); 3669 } 3670 3671 setValue(&I, DAG.getBuildVector(VT, DL, Ops)); 3672 } 3673 3674 void SelectionDAGBuilder::visitInsertValue(const User &I) { 3675 ArrayRef<unsigned> Indices; 3676 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I)) 3677 Indices = IV->getIndices(); 3678 else 3679 Indices = cast<ConstantExpr>(&I)->getIndices(); 3680 3681 const Value *Op0 = I.getOperand(0); 3682 const Value *Op1 = I.getOperand(1); 3683 Type *AggTy = I.getType(); 3684 Type *ValTy = Op1->getType(); 3685 bool IntoUndef = isa<UndefValue>(Op0); 3686 bool FromUndef = isa<UndefValue>(Op1); 3687 3688 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); 3689 3690 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3691 SmallVector<EVT, 4> AggValueVTs; 3692 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs); 3693 SmallVector<EVT, 4> ValValueVTs; 3694 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 3695 3696 unsigned NumAggValues = AggValueVTs.size(); 3697 unsigned NumValValues = ValValueVTs.size(); 3698 SmallVector<SDValue, 4> Values(NumAggValues); 3699 3700 // Ignore an insertvalue that produces an empty object 3701 if (!NumAggValues) { 3702 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 3703 return; 3704 } 3705 3706 SDValue Agg = getValue(Op0); 3707 unsigned i = 0; 3708 // Copy the beginning value(s) from the original aggregate. 3709 for (; i != LinearIndex; ++i) 3710 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3711 SDValue(Agg.getNode(), Agg.getResNo() + i); 3712 // Copy values from the inserted value(s). 3713 if (NumValValues) { 3714 SDValue Val = getValue(Op1); 3715 for (; i != LinearIndex + NumValValues; ++i) 3716 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3717 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); 3718 } 3719 // Copy remaining value(s) from the original aggregate. 3720 for (; i != NumAggValues; ++i) 3721 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3722 SDValue(Agg.getNode(), Agg.getResNo() + i); 3723 3724 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3725 DAG.getVTList(AggValueVTs), Values)); 3726 } 3727 3728 void SelectionDAGBuilder::visitExtractValue(const User &I) { 3729 ArrayRef<unsigned> Indices; 3730 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I)) 3731 Indices = EV->getIndices(); 3732 else 3733 Indices = cast<ConstantExpr>(&I)->getIndices(); 3734 3735 const Value *Op0 = I.getOperand(0); 3736 Type *AggTy = Op0->getType(); 3737 Type *ValTy = I.getType(); 3738 bool OutOfUndef = isa<UndefValue>(Op0); 3739 3740 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); 3741 3742 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3743 SmallVector<EVT, 4> ValValueVTs; 3744 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 3745 3746 unsigned NumValValues = ValValueVTs.size(); 3747 3748 // Ignore a extractvalue that produces an empty object 3749 if (!NumValValues) { 3750 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 3751 return; 3752 } 3753 3754 SmallVector<SDValue, 4> Values(NumValValues); 3755 3756 SDValue Agg = getValue(Op0); 3757 // Copy out the selected value(s). 3758 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) 3759 Values[i - LinearIndex] = 3760 OutOfUndef ? 3761 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : 3762 SDValue(Agg.getNode(), Agg.getResNo() + i); 3763 3764 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3765 DAG.getVTList(ValValueVTs), Values)); 3766 } 3767 3768 void SelectionDAGBuilder::visitGetElementPtr(const User &I) { 3769 Value *Op0 = I.getOperand(0); 3770 // Note that the pointer operand may be a vector of pointers. Take the scalar 3771 // element which holds a pointer. 3772 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace(); 3773 SDValue N = getValue(Op0); 3774 SDLoc dl = getCurSDLoc(); 3775 auto &TLI = DAG.getTargetLoweringInfo(); 3776 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS); 3777 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS); 3778 3779 // Normalize Vector GEP - all scalar operands should be converted to the 3780 // splat vector. 3781 bool IsVectorGEP = I.getType()->isVectorTy(); 3782 ElementCount VectorElementCount = 3783 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount() 3784 : ElementCount(0, false); 3785 3786 if (IsVectorGEP && !N.getValueType().isVector()) { 3787 LLVMContext &Context = *DAG.getContext(); 3788 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount); 3789 if (VectorElementCount.Scalable) 3790 N = DAG.getSplatVector(VT, dl, N); 3791 else 3792 N = DAG.getSplatBuildVector(VT, dl, N); 3793 } 3794 3795 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I); 3796 GTI != E; ++GTI) { 3797 const Value *Idx = GTI.getOperand(); 3798 if (StructType *StTy = GTI.getStructTypeOrNull()) { 3799 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 3800 if (Field) { 3801 // N = N + Offset 3802 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field); 3803 3804 // In an inbounds GEP with an offset that is nonnegative even when 3805 // interpreted as signed, assume there is no unsigned overflow. 3806 SDNodeFlags Flags; 3807 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds()) 3808 Flags.setNoUnsignedWrap(true); 3809 3810 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, 3811 DAG.getConstant(Offset, dl, N.getValueType()), Flags); 3812 } 3813 } else { 3814 // IdxSize is the width of the arithmetic according to IR semantics. 3815 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth 3816 // (and fix up the result later). 3817 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS); 3818 MVT IdxTy = MVT::getIntegerVT(IdxSize); 3819 TypeSize ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 3820 // We intentionally mask away the high bits here; ElementSize may not 3821 // fit in IdxTy. 3822 APInt ElementMul(IdxSize, ElementSize.getKnownMinSize()); 3823 bool ElementScalable = ElementSize.isScalable(); 3824 3825 // If this is a scalar constant or a splat vector of constants, 3826 // handle it quickly. 3827 const auto *C = dyn_cast<Constant>(Idx); 3828 if (C && isa<VectorType>(C->getType())) 3829 C = C->getSplatValue(); 3830 3831 const auto *CI = dyn_cast_or_null<ConstantInt>(C); 3832 if (CI && CI->isZero()) 3833 continue; 3834 if (CI && !ElementScalable) { 3835 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize); 3836 LLVMContext &Context = *DAG.getContext(); 3837 SDValue OffsVal; 3838 if (IsVectorGEP) 3839 OffsVal = DAG.getConstant( 3840 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount)); 3841 else 3842 OffsVal = DAG.getConstant(Offs, dl, IdxTy); 3843 3844 // In an inbounds GEP with an offset that is nonnegative even when 3845 // interpreted as signed, assume there is no unsigned overflow. 3846 SDNodeFlags Flags; 3847 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds()) 3848 Flags.setNoUnsignedWrap(true); 3849 3850 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType()); 3851 3852 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags); 3853 continue; 3854 } 3855 3856 // N = N + Idx * ElementMul; 3857 SDValue IdxN = getValue(Idx); 3858 3859 if (!IdxN.getValueType().isVector() && IsVectorGEP) { 3860 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), 3861 VectorElementCount); 3862 if (VectorElementCount.Scalable) 3863 IdxN = DAG.getSplatVector(VT, dl, IdxN); 3864 else 3865 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN); 3866 } 3867 3868 // If the index is smaller or larger than intptr_t, truncate or extend 3869 // it. 3870 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType()); 3871 3872 if (ElementScalable) { 3873 EVT VScaleTy = N.getValueType().getScalarType(); 3874 SDValue VScale = DAG.getNode( 3875 ISD::VSCALE, dl, VScaleTy, 3876 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy)); 3877 if (IsVectorGEP) 3878 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale); 3879 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale); 3880 } else { 3881 // If this is a multiply by a power of two, turn it into a shl 3882 // immediately. This is a very common case. 3883 if (ElementMul != 1) { 3884 if (ElementMul.isPowerOf2()) { 3885 unsigned Amt = ElementMul.logBase2(); 3886 IdxN = DAG.getNode(ISD::SHL, dl, 3887 N.getValueType(), IdxN, 3888 DAG.getConstant(Amt, dl, IdxN.getValueType())); 3889 } else { 3890 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl, 3891 IdxN.getValueType()); 3892 IdxN = DAG.getNode(ISD::MUL, dl, 3893 N.getValueType(), IdxN, Scale); 3894 } 3895 } 3896 } 3897 3898 N = DAG.getNode(ISD::ADD, dl, 3899 N.getValueType(), N, IdxN); 3900 } 3901 } 3902 3903 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds()) 3904 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy); 3905 3906 setValue(&I, N); 3907 } 3908 3909 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { 3910 // If this is a fixed sized alloca in the entry block of the function, 3911 // allocate it statically on the stack. 3912 if (FuncInfo.StaticAllocaMap.count(&I)) 3913 return; // getValue will auto-populate this. 3914 3915 SDLoc dl = getCurSDLoc(); 3916 Type *Ty = I.getAllocatedType(); 3917 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3918 auto &DL = DAG.getDataLayout(); 3919 uint64_t TySize = DL.getTypeAllocSize(Ty); 3920 MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign()); 3921 3922 SDValue AllocSize = getValue(I.getArraySize()); 3923 3924 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace()); 3925 if (AllocSize.getValueType() != IntPtr) 3926 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr); 3927 3928 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, 3929 AllocSize, 3930 DAG.getConstant(TySize, dl, IntPtr)); 3931 3932 // Handle alignment. If the requested alignment is less than or equal to 3933 // the stack alignment, ignore it. If the size is greater than or equal to 3934 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. 3935 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign(); 3936 if (*Alignment <= StackAlign) 3937 Alignment = None; 3938 3939 const uint64_t StackAlignMask = StackAlign.value() - 1U; 3940 // Round the size of the allocation up to the stack alignment size 3941 // by add SA-1 to the size. This doesn't overflow because we're computing 3942 // an address inside an alloca. 3943 SDNodeFlags Flags; 3944 Flags.setNoUnsignedWrap(true); 3945 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize, 3946 DAG.getConstant(StackAlignMask, dl, IntPtr), Flags); 3947 3948 // Mask out the low bits for alignment purposes. 3949 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize, 3950 DAG.getConstant(~StackAlignMask, dl, IntPtr)); 3951 3952 SDValue Ops[] = { 3953 getRoot(), AllocSize, 3954 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)}; 3955 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); 3956 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops); 3957 setValue(&I, DSA); 3958 DAG.setRoot(DSA.getValue(1)); 3959 3960 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects()); 3961 } 3962 3963 void SelectionDAGBuilder::visitLoad(const LoadInst &I) { 3964 if (I.isAtomic()) 3965 return visitAtomicLoad(I); 3966 3967 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3968 const Value *SV = I.getOperand(0); 3969 if (TLI.supportSwiftError()) { 3970 // Swifterror values can come from either a function parameter with 3971 // swifterror attribute or an alloca with swifterror attribute. 3972 if (const Argument *Arg = dyn_cast<Argument>(SV)) { 3973 if (Arg->hasSwiftErrorAttr()) 3974 return visitLoadFromSwiftError(I); 3975 } 3976 3977 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { 3978 if (Alloca->isSwiftError()) 3979 return visitLoadFromSwiftError(I); 3980 } 3981 } 3982 3983 SDValue Ptr = getValue(SV); 3984 3985 Type *Ty = I.getType(); 3986 Align Alignment = I.getAlign(); 3987 3988 AAMDNodes AAInfo; 3989 I.getAAMetadata(AAInfo); 3990 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 3991 3992 SmallVector<EVT, 4> ValueVTs, MemVTs; 3993 SmallVector<uint64_t, 4> Offsets; 3994 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets); 3995 unsigned NumValues = ValueVTs.size(); 3996 if (NumValues == 0) 3997 return; 3998 3999 bool isVolatile = I.isVolatile(); 4000 4001 SDValue Root; 4002 bool ConstantMemory = false; 4003 if (isVolatile) 4004 // Serialize volatile loads with other side effects. 4005 Root = getRoot(); 4006 else if (NumValues > MaxParallelChains) 4007 Root = getMemoryRoot(); 4008 else if (AA && 4009 AA->pointsToConstantMemory(MemoryLocation( 4010 SV, 4011 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), 4012 AAInfo))) { 4013 // Do not serialize (non-volatile) loads of constant memory with anything. 4014 Root = DAG.getEntryNode(); 4015 ConstantMemory = true; 4016 } else { 4017 // Do not serialize non-volatile loads against each other. 4018 Root = DAG.getRoot(); 4019 } 4020 4021 SDLoc dl = getCurSDLoc(); 4022 4023 if (isVolatile) 4024 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG); 4025 4026 // An aggregate load cannot wrap around the address space, so offsets to its 4027 // parts don't wrap either. 4028 SDNodeFlags Flags; 4029 Flags.setNoUnsignedWrap(true); 4030 4031 SmallVector<SDValue, 4> Values(NumValues); 4032 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 4033 EVT PtrVT = Ptr.getValueType(); 4034 4035 MachineMemOperand::Flags MMOFlags 4036 = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout()); 4037 4038 unsigned ChainI = 0; 4039 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 4040 // Serializing loads here may result in excessive register pressure, and 4041 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling 4042 // could recover a bit by hoisting nodes upward in the chain by recognizing 4043 // they are side-effect free or do not alias. The optimizer should really 4044 // avoid this case by converting large object/array copies to llvm.memcpy 4045 // (MaxParallelChains should always remain as failsafe). 4046 if (ChainI == MaxParallelChains) { 4047 assert(PendingLoads.empty() && "PendingLoads must be serialized first"); 4048 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4049 makeArrayRef(Chains.data(), ChainI)); 4050 Root = Chain; 4051 ChainI = 0; 4052 } 4053 SDValue A = DAG.getNode(ISD::ADD, dl, 4054 PtrVT, Ptr, 4055 DAG.getConstant(Offsets[i], dl, PtrVT), 4056 Flags); 4057 4058 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, 4059 MachinePointerInfo(SV, Offsets[i]), Alignment, 4060 MMOFlags, AAInfo, Ranges); 4061 Chains[ChainI] = L.getValue(1); 4062 4063 if (MemVTs[i] != ValueVTs[i]) 4064 L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]); 4065 4066 Values[i] = L; 4067 } 4068 4069 if (!ConstantMemory) { 4070 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4071 makeArrayRef(Chains.data(), ChainI)); 4072 if (isVolatile) 4073 DAG.setRoot(Chain); 4074 else 4075 PendingLoads.push_back(Chain); 4076 } 4077 4078 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl, 4079 DAG.getVTList(ValueVTs), Values)); 4080 } 4081 4082 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) { 4083 assert(DAG.getTargetLoweringInfo().supportSwiftError() && 4084 "call visitStoreToSwiftError when backend supports swifterror"); 4085 4086 SmallVector<EVT, 4> ValueVTs; 4087 SmallVector<uint64_t, 4> Offsets; 4088 const Value *SrcV = I.getOperand(0); 4089 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), 4090 SrcV->getType(), ValueVTs, &Offsets); 4091 assert(ValueVTs.size() == 1 && Offsets[0] == 0 && 4092 "expect a single EVT for swifterror"); 4093 4094 SDValue Src = getValue(SrcV); 4095 // Create a virtual register, then update the virtual register. 4096 Register VReg = 4097 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand()); 4098 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue 4099 // Chain can be getRoot or getControlRoot. 4100 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg, 4101 SDValue(Src.getNode(), Src.getResNo())); 4102 DAG.setRoot(CopyNode); 4103 } 4104 4105 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) { 4106 assert(DAG.getTargetLoweringInfo().supportSwiftError() && 4107 "call visitLoadFromSwiftError when backend supports swifterror"); 4108 4109 assert(!I.isVolatile() && 4110 !I.hasMetadata(LLVMContext::MD_nontemporal) && 4111 !I.hasMetadata(LLVMContext::MD_invariant_load) && 4112 "Support volatile, non temporal, invariant for load_from_swift_error"); 4113 4114 const Value *SV = I.getOperand(0); 4115 Type *Ty = I.getType(); 4116 AAMDNodes AAInfo; 4117 I.getAAMetadata(AAInfo); 4118 assert( 4119 (!AA || 4120 !AA->pointsToConstantMemory(MemoryLocation( 4121 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), 4122 AAInfo))) && 4123 "load_from_swift_error should not be constant memory"); 4124 4125 SmallVector<EVT, 4> ValueVTs; 4126 SmallVector<uint64_t, 4> Offsets; 4127 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty, 4128 ValueVTs, &Offsets); 4129 assert(ValueVTs.size() == 1 && Offsets[0] == 0 && 4130 "expect a single EVT for swifterror"); 4131 4132 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT 4133 SDValue L = DAG.getCopyFromReg( 4134 getRoot(), getCurSDLoc(), 4135 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]); 4136 4137 setValue(&I, L); 4138 } 4139 4140 void SelectionDAGBuilder::visitStore(const StoreInst &I) { 4141 if (I.isAtomic()) 4142 return visitAtomicStore(I); 4143 4144 const Value *SrcV = I.getOperand(0); 4145 const Value *PtrV = I.getOperand(1); 4146 4147 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4148 if (TLI.supportSwiftError()) { 4149 // Swifterror values can come from either a function parameter with 4150 // swifterror attribute or an alloca with swifterror attribute. 4151 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { 4152 if (Arg->hasSwiftErrorAttr()) 4153 return visitStoreToSwiftError(I); 4154 } 4155 4156 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { 4157 if (Alloca->isSwiftError()) 4158 return visitStoreToSwiftError(I); 4159 } 4160 } 4161 4162 SmallVector<EVT, 4> ValueVTs, MemVTs; 4163 SmallVector<uint64_t, 4> Offsets; 4164 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), 4165 SrcV->getType(), ValueVTs, &MemVTs, &Offsets); 4166 unsigned NumValues = ValueVTs.size(); 4167 if (NumValues == 0) 4168 return; 4169 4170 // Get the lowered operands. Note that we do this after 4171 // checking if NumResults is zero, because with zero results 4172 // the operands won't have values in the map. 4173 SDValue Src = getValue(SrcV); 4174 SDValue Ptr = getValue(PtrV); 4175 4176 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot(); 4177 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 4178 SDLoc dl = getCurSDLoc(); 4179 Align Alignment = I.getAlign(); 4180 AAMDNodes AAInfo; 4181 I.getAAMetadata(AAInfo); 4182 4183 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout()); 4184 4185 // An aggregate load cannot wrap around the address space, so offsets to its 4186 // parts don't wrap either. 4187 SDNodeFlags Flags; 4188 Flags.setNoUnsignedWrap(true); 4189 4190 unsigned ChainI = 0; 4191 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 4192 // See visitLoad comments. 4193 if (ChainI == MaxParallelChains) { 4194 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4195 makeArrayRef(Chains.data(), ChainI)); 4196 Root = Chain; 4197 ChainI = 0; 4198 } 4199 SDValue Add = DAG.getMemBasePlusOffset(Ptr, Offsets[i], dl, Flags); 4200 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i); 4201 if (MemVTs[i] != ValueVTs[i]) 4202 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]); 4203 SDValue St = 4204 DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]), 4205 Alignment, MMOFlags, AAInfo); 4206 Chains[ChainI] = St; 4207 } 4208 4209 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4210 makeArrayRef(Chains.data(), ChainI)); 4211 DAG.setRoot(StoreNode); 4212 } 4213 4214 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I, 4215 bool IsCompressing) { 4216 SDLoc sdl = getCurSDLoc(); 4217 4218 auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4219 MaybeAlign &Alignment) { 4220 // llvm.masked.store.*(Src0, Ptr, alignment, Mask) 4221 Src0 = I.getArgOperand(0); 4222 Ptr = I.getArgOperand(1); 4223 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue(); 4224 Mask = I.getArgOperand(3); 4225 }; 4226 auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4227 MaybeAlign &Alignment) { 4228 // llvm.masked.compressstore.*(Src0, Ptr, Mask) 4229 Src0 = I.getArgOperand(0); 4230 Ptr = I.getArgOperand(1); 4231 Mask = I.getArgOperand(2); 4232 Alignment = None; 4233 }; 4234 4235 Value *PtrOperand, *MaskOperand, *Src0Operand; 4236 MaybeAlign Alignment; 4237 if (IsCompressing) 4238 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4239 else 4240 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4241 4242 SDValue Ptr = getValue(PtrOperand); 4243 SDValue Src0 = getValue(Src0Operand); 4244 SDValue Mask = getValue(MaskOperand); 4245 SDValue Offset = DAG.getUNDEF(Ptr.getValueType()); 4246 4247 EVT VT = Src0.getValueType(); 4248 if (!Alignment) 4249 Alignment = DAG.getEVTAlign(VT); 4250 4251 AAMDNodes AAInfo; 4252 I.getAAMetadata(AAInfo); 4253 4254 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 4255 MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore, 4256 // TODO: Make MachineMemOperands aware of scalable 4257 // vectors. 4258 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo); 4259 SDValue StoreNode = 4260 DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO, 4261 ISD::UNINDEXED, false /* Truncating */, IsCompressing); 4262 DAG.setRoot(StoreNode); 4263 setValue(&I, StoreNode); 4264 } 4265 4266 // Get a uniform base for the Gather/Scatter intrinsic. 4267 // The first argument of the Gather/Scatter intrinsic is a vector of pointers. 4268 // We try to represent it as a base pointer + vector of indices. 4269 // Usually, the vector of pointers comes from a 'getelementptr' instruction. 4270 // The first operand of the GEP may be a single pointer or a vector of pointers 4271 // Example: 4272 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind 4273 // or 4274 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind 4275 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, .. 4276 // 4277 // When the first GEP operand is a single pointer - it is the uniform base we 4278 // are looking for. If first operand of the GEP is a splat vector - we 4279 // extract the splat value and use it as a uniform base. 4280 // In all other cases the function returns 'false'. 4281 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, 4282 ISD::MemIndexType &IndexType, SDValue &Scale, 4283 SelectionDAGBuilder *SDB, const BasicBlock *CurBB) { 4284 SelectionDAG& DAG = SDB->DAG; 4285 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4286 const DataLayout &DL = DAG.getDataLayout(); 4287 4288 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type"); 4289 4290 // Handle splat constant pointer. 4291 if (auto *C = dyn_cast<Constant>(Ptr)) { 4292 C = C->getSplatValue(); 4293 if (!C) 4294 return false; 4295 4296 Base = SDB->getValue(C); 4297 4298 unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements(); 4299 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts); 4300 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT); 4301 IndexType = ISD::SIGNED_SCALED; 4302 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL)); 4303 return true; 4304 } 4305 4306 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 4307 if (!GEP || GEP->getParent() != CurBB) 4308 return false; 4309 4310 if (GEP->getNumOperands() != 2) 4311 return false; 4312 4313 const Value *BasePtr = GEP->getPointerOperand(); 4314 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1); 4315 4316 // Make sure the base is scalar and the index is a vector. 4317 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy()) 4318 return false; 4319 4320 Base = SDB->getValue(BasePtr); 4321 Index = SDB->getValue(IndexVal); 4322 IndexType = ISD::SIGNED_SCALED; 4323 Scale = DAG.getTargetConstant( 4324 DL.getTypeAllocSize(GEP->getResultElementType()), 4325 SDB->getCurSDLoc(), TLI.getPointerTy(DL)); 4326 return true; 4327 } 4328 4329 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) { 4330 SDLoc sdl = getCurSDLoc(); 4331 4332 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask) 4333 const Value *Ptr = I.getArgOperand(1); 4334 SDValue Src0 = getValue(I.getArgOperand(0)); 4335 SDValue Mask = getValue(I.getArgOperand(3)); 4336 EVT VT = Src0.getValueType(); 4337 Align Alignment = cast<ConstantInt>(I.getArgOperand(2)) 4338 ->getMaybeAlignValue() 4339 .getValueOr(DAG.getEVTAlign(VT)); 4340 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4341 4342 AAMDNodes AAInfo; 4343 I.getAAMetadata(AAInfo); 4344 4345 SDValue Base; 4346 SDValue Index; 4347 ISD::MemIndexType IndexType; 4348 SDValue Scale; 4349 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this, 4350 I.getParent()); 4351 4352 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); 4353 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 4354 MachinePointerInfo(AS), MachineMemOperand::MOStore, 4355 // TODO: Make MachineMemOperands aware of scalable 4356 // vectors. 4357 MemoryLocation::UnknownSize, Alignment, AAInfo); 4358 if (!UniformBase) { 4359 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 4360 Index = getValue(Ptr); 4361 IndexType = ISD::SIGNED_SCALED; 4362 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); 4363 } 4364 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale }; 4365 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl, 4366 Ops, MMO, IndexType); 4367 DAG.setRoot(Scatter); 4368 setValue(&I, Scatter); 4369 } 4370 4371 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { 4372 SDLoc sdl = getCurSDLoc(); 4373 4374 auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4375 MaybeAlign &Alignment) { 4376 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) 4377 Ptr = I.getArgOperand(0); 4378 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue(); 4379 Mask = I.getArgOperand(2); 4380 Src0 = I.getArgOperand(3); 4381 }; 4382 auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4383 MaybeAlign &Alignment) { 4384 // @llvm.masked.expandload.*(Ptr, Mask, Src0) 4385 Ptr = I.getArgOperand(0); 4386 Alignment = None; 4387 Mask = I.getArgOperand(1); 4388 Src0 = I.getArgOperand(2); 4389 }; 4390 4391 Value *PtrOperand, *MaskOperand, *Src0Operand; 4392 MaybeAlign Alignment; 4393 if (IsExpanding) 4394 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4395 else 4396 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4397 4398 SDValue Ptr = getValue(PtrOperand); 4399 SDValue Src0 = getValue(Src0Operand); 4400 SDValue Mask = getValue(MaskOperand); 4401 SDValue Offset = DAG.getUNDEF(Ptr.getValueType()); 4402 4403 EVT VT = Src0.getValueType(); 4404 if (!Alignment) 4405 Alignment = DAG.getEVTAlign(VT); 4406 4407 AAMDNodes AAInfo; 4408 I.getAAMetadata(AAInfo); 4409 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 4410 4411 // Do not serialize masked loads of constant memory with anything. 4412 MemoryLocation ML; 4413 if (VT.isScalableVector()) 4414 ML = MemoryLocation(PtrOperand); 4415 else 4416 ML = MemoryLocation(PtrOperand, LocationSize::precise( 4417 DAG.getDataLayout().getTypeStoreSize(I.getType())), 4418 AAInfo); 4419 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML); 4420 4421 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); 4422 4423 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 4424 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad, 4425 // TODO: Make MachineMemOperands aware of scalable 4426 // vectors. 4427 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges); 4428 4429 SDValue Load = 4430 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO, 4431 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding); 4432 if (AddToChain) 4433 PendingLoads.push_back(Load.getValue(1)); 4434 setValue(&I, Load); 4435 } 4436 4437 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) { 4438 SDLoc sdl = getCurSDLoc(); 4439 4440 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) 4441 const Value *Ptr = I.getArgOperand(0); 4442 SDValue Src0 = getValue(I.getArgOperand(3)); 4443 SDValue Mask = getValue(I.getArgOperand(2)); 4444 4445 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4446 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4447 Align Alignment = cast<ConstantInt>(I.getArgOperand(1)) 4448 ->getMaybeAlignValue() 4449 .getValueOr(DAG.getEVTAlign(VT)); 4450 4451 AAMDNodes AAInfo; 4452 I.getAAMetadata(AAInfo); 4453 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 4454 4455 SDValue Root = DAG.getRoot(); 4456 SDValue Base; 4457 SDValue Index; 4458 ISD::MemIndexType IndexType; 4459 SDValue Scale; 4460 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this, 4461 I.getParent()); 4462 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); 4463 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 4464 MachinePointerInfo(AS), MachineMemOperand::MOLoad, 4465 // TODO: Make MachineMemOperands aware of scalable 4466 // vectors. 4467 MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges); 4468 4469 if (!UniformBase) { 4470 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 4471 Index = getValue(Ptr); 4472 IndexType = ISD::SIGNED_SCALED; 4473 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); 4474 } 4475 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale }; 4476 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl, 4477 Ops, MMO, IndexType); 4478 4479 PendingLoads.push_back(Gather.getValue(1)); 4480 setValue(&I, Gather); 4481 } 4482 4483 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { 4484 SDLoc dl = getCurSDLoc(); 4485 AtomicOrdering SuccessOrdering = I.getSuccessOrdering(); 4486 AtomicOrdering FailureOrdering = I.getFailureOrdering(); 4487 SyncScope::ID SSID = I.getSyncScopeID(); 4488 4489 SDValue InChain = getRoot(); 4490 4491 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); 4492 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); 4493 4494 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4495 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout()); 4496 4497 MachineFunction &MF = DAG.getMachineFunction(); 4498 MachineMemOperand *MMO = MF.getMachineMemOperand( 4499 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), 4500 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering, 4501 FailureOrdering); 4502 4503 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, 4504 dl, MemVT, VTs, InChain, 4505 getValue(I.getPointerOperand()), 4506 getValue(I.getCompareOperand()), 4507 getValue(I.getNewValOperand()), MMO); 4508 4509 SDValue OutChain = L.getValue(2); 4510 4511 setValue(&I, L); 4512 DAG.setRoot(OutChain); 4513 } 4514 4515 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { 4516 SDLoc dl = getCurSDLoc(); 4517 ISD::NodeType NT; 4518 switch (I.getOperation()) { 4519 default: llvm_unreachable("Unknown atomicrmw operation"); 4520 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break; 4521 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break; 4522 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break; 4523 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break; 4524 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; 4525 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break; 4526 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break; 4527 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break; 4528 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; 4529 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; 4530 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; 4531 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break; 4532 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break; 4533 } 4534 AtomicOrdering Ordering = I.getOrdering(); 4535 SyncScope::ID SSID = I.getSyncScopeID(); 4536 4537 SDValue InChain = getRoot(); 4538 4539 auto MemVT = getValue(I.getValOperand()).getSimpleValueType(); 4540 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4541 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout()); 4542 4543 MachineFunction &MF = DAG.getMachineFunction(); 4544 MachineMemOperand *MMO = MF.getMachineMemOperand( 4545 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), 4546 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering); 4547 4548 SDValue L = 4549 DAG.getAtomic(NT, dl, MemVT, InChain, 4550 getValue(I.getPointerOperand()), getValue(I.getValOperand()), 4551 MMO); 4552 4553 SDValue OutChain = L.getValue(1); 4554 4555 setValue(&I, L); 4556 DAG.setRoot(OutChain); 4557 } 4558 4559 void SelectionDAGBuilder::visitFence(const FenceInst &I) { 4560 SDLoc dl = getCurSDLoc(); 4561 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4562 SDValue Ops[3]; 4563 Ops[0] = getRoot(); 4564 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl, 4565 TLI.getFenceOperandTy(DAG.getDataLayout())); 4566 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl, 4567 TLI.getFenceOperandTy(DAG.getDataLayout())); 4568 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops)); 4569 } 4570 4571 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { 4572 SDLoc dl = getCurSDLoc(); 4573 AtomicOrdering Order = I.getOrdering(); 4574 SyncScope::ID SSID = I.getSyncScopeID(); 4575 4576 SDValue InChain = getRoot(); 4577 4578 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4579 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4580 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType()); 4581 4582 if (!TLI.supportsUnalignedAtomics() && 4583 I.getAlignment() < MemVT.getSizeInBits() / 8) 4584 report_fatal_error("Cannot generate unaligned atomic load"); 4585 4586 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout()); 4587 4588 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 4589 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), 4590 I.getAlign(), AAMDNodes(), nullptr, SSID, Order); 4591 4592 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); 4593 4594 SDValue Ptr = getValue(I.getPointerOperand()); 4595 4596 if (TLI.lowerAtomicLoadAsLoadSDNode(I)) { 4597 // TODO: Once this is better exercised by tests, it should be merged with 4598 // the normal path for loads to prevent future divergence. 4599 SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO); 4600 if (MemVT != VT) 4601 L = DAG.getPtrExtOrTrunc(L, dl, VT); 4602 4603 setValue(&I, L); 4604 SDValue OutChain = L.getValue(1); 4605 if (!I.isUnordered()) 4606 DAG.setRoot(OutChain); 4607 else 4608 PendingLoads.push_back(OutChain); 4609 return; 4610 } 4611 4612 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain, 4613 Ptr, MMO); 4614 4615 SDValue OutChain = L.getValue(1); 4616 if (MemVT != VT) 4617 L = DAG.getPtrExtOrTrunc(L, dl, VT); 4618 4619 setValue(&I, L); 4620 DAG.setRoot(OutChain); 4621 } 4622 4623 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { 4624 SDLoc dl = getCurSDLoc(); 4625 4626 AtomicOrdering Ordering = I.getOrdering(); 4627 SyncScope::ID SSID = I.getSyncScopeID(); 4628 4629 SDValue InChain = getRoot(); 4630 4631 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4632 EVT MemVT = 4633 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType()); 4634 4635 if (I.getAlignment() < MemVT.getSizeInBits() / 8) 4636 report_fatal_error("Cannot generate unaligned atomic store"); 4637 4638 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout()); 4639 4640 MachineFunction &MF = DAG.getMachineFunction(); 4641 MachineMemOperand *MMO = MF.getMachineMemOperand( 4642 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), 4643 I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering); 4644 4645 SDValue Val = getValue(I.getValueOperand()); 4646 if (Val.getValueType() != MemVT) 4647 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT); 4648 SDValue Ptr = getValue(I.getPointerOperand()); 4649 4650 if (TLI.lowerAtomicStoreAsStoreSDNode(I)) { 4651 // TODO: Once this is better exercised by tests, it should be merged with 4652 // the normal path for stores to prevent future divergence. 4653 SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO); 4654 DAG.setRoot(S); 4655 return; 4656 } 4657 SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, 4658 Ptr, Val, MMO); 4659 4660 4661 DAG.setRoot(OutChain); 4662 } 4663 4664 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 4665 /// node. 4666 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I, 4667 unsigned Intrinsic) { 4668 // Ignore the callsite's attributes. A specific call site may be marked with 4669 // readnone, but the lowering code will expect the chain based on the 4670 // definition. 4671 const Function *F = I.getCalledFunction(); 4672 bool HasChain = !F->doesNotAccessMemory(); 4673 bool OnlyLoad = HasChain && F->onlyReadsMemory(); 4674 4675 // Build the operand list. 4676 SmallVector<SDValue, 8> Ops; 4677 if (HasChain) { // If this intrinsic has side-effects, chainify it. 4678 if (OnlyLoad) { 4679 // We don't need to serialize loads against other loads. 4680 Ops.push_back(DAG.getRoot()); 4681 } else { 4682 Ops.push_back(getRoot()); 4683 } 4684 } 4685 4686 // Info is set by getTgtMemInstrinsic 4687 TargetLowering::IntrinsicInfo Info; 4688 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4689 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, 4690 DAG.getMachineFunction(), 4691 Intrinsic); 4692 4693 // Add the intrinsic ID as an integer operand if it's not a target intrinsic. 4694 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID || 4695 Info.opc == ISD::INTRINSIC_W_CHAIN) 4696 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(), 4697 TLI.getPointerTy(DAG.getDataLayout()))); 4698 4699 // Add all operands of the call to the operand list. 4700 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { 4701 const Value *Arg = I.getArgOperand(i); 4702 if (!I.paramHasAttr(i, Attribute::ImmArg)) { 4703 Ops.push_back(getValue(Arg)); 4704 continue; 4705 } 4706 4707 // Use TargetConstant instead of a regular constant for immarg. 4708 EVT VT = TLI.getValueType(*DL, Arg->getType(), true); 4709 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) { 4710 assert(CI->getBitWidth() <= 64 && 4711 "large intrinsic immediates not handled"); 4712 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT)); 4713 } else { 4714 Ops.push_back( 4715 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT)); 4716 } 4717 } 4718 4719 SmallVector<EVT, 4> ValueVTs; 4720 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs); 4721 4722 if (HasChain) 4723 ValueVTs.push_back(MVT::Other); 4724 4725 SDVTList VTs = DAG.getVTList(ValueVTs); 4726 4727 // Create the node. 4728 SDValue Result; 4729 if (IsTgtIntrinsic) { 4730 // This is target intrinsic that touches memory 4731 AAMDNodes AAInfo; 4732 I.getAAMetadata(AAInfo); 4733 Result = 4734 DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT, 4735 MachinePointerInfo(Info.ptrVal, Info.offset), 4736 Info.align, Info.flags, Info.size, AAInfo); 4737 } else if (!HasChain) { 4738 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops); 4739 } else if (!I.getType()->isVoidTy()) { 4740 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops); 4741 } else { 4742 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops); 4743 } 4744 4745 if (HasChain) { 4746 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); 4747 if (OnlyLoad) 4748 PendingLoads.push_back(Chain); 4749 else 4750 DAG.setRoot(Chain); 4751 } 4752 4753 if (!I.getType()->isVoidTy()) { 4754 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) { 4755 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy); 4756 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result); 4757 } else 4758 Result = lowerRangeToAssertZExt(DAG, I, Result); 4759 4760 MaybeAlign Alignment = I.getRetAlign(); 4761 if (!Alignment) 4762 Alignment = F->getAttributes().getRetAlignment(); 4763 // Insert `assertalign` node if there's an alignment. 4764 if (InsertAssertAlign && Alignment) { 4765 Result = 4766 DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne()); 4767 } 4768 4769 setValue(&I, Result); 4770 } 4771 } 4772 4773 /// GetSignificand - Get the significand and build it into a floating-point 4774 /// number with exponent of 1: 4775 /// 4776 /// Op = (Op & 0x007fffff) | 0x3f800000; 4777 /// 4778 /// where Op is the hexadecimal representation of floating point value. 4779 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) { 4780 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 4781 DAG.getConstant(0x007fffff, dl, MVT::i32)); 4782 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, 4783 DAG.getConstant(0x3f800000, dl, MVT::i32)); 4784 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2); 4785 } 4786 4787 /// GetExponent - Get the exponent: 4788 /// 4789 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127); 4790 /// 4791 /// where Op is the hexadecimal representation of floating point value. 4792 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, 4793 const TargetLowering &TLI, const SDLoc &dl) { 4794 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 4795 DAG.getConstant(0x7f800000, dl, MVT::i32)); 4796 SDValue t1 = DAG.getNode( 4797 ISD::SRL, dl, MVT::i32, t0, 4798 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout()))); 4799 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1, 4800 DAG.getConstant(127, dl, MVT::i32)); 4801 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); 4802 } 4803 4804 /// getF32Constant - Get 32-bit floating point constant. 4805 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, 4806 const SDLoc &dl) { 4807 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl, 4808 MVT::f32); 4809 } 4810 4811 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, 4812 SelectionDAG &DAG) { 4813 // TODO: What fast-math-flags should be set on the floating-point nodes? 4814 4815 // IntegerPartOfX = ((int32_t)(t0); 4816 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0); 4817 4818 // FractionalPartOfX = t0 - (float)IntegerPartOfX; 4819 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); 4820 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); 4821 4822 // IntegerPartOfX <<= 23; 4823 IntegerPartOfX = DAG.getNode( 4824 ISD::SHL, dl, MVT::i32, IntegerPartOfX, 4825 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy( 4826 DAG.getDataLayout()))); 4827 4828 SDValue TwoToFractionalPartOfX; 4829 if (LimitFloatPrecision <= 6) { 4830 // For floating-point precision of 6: 4831 // 4832 // TwoToFractionalPartOfX = 4833 // 0.997535578f + 4834 // (0.735607626f + 0.252464424f * x) * x; 4835 // 4836 // error 0.0144103317, which is 6 bits 4837 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4838 getF32Constant(DAG, 0x3e814304, dl)); 4839 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4840 getF32Constant(DAG, 0x3f3c50c8, dl)); 4841 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4842 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4843 getF32Constant(DAG, 0x3f7f5e7e, dl)); 4844 } else if (LimitFloatPrecision <= 12) { 4845 // For floating-point precision of 12: 4846 // 4847 // TwoToFractionalPartOfX = 4848 // 0.999892986f + 4849 // (0.696457318f + 4850 // (0.224338339f + 0.792043434e-1f * x) * x) * x; 4851 // 4852 // error 0.000107046256, which is 13 to 14 bits 4853 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4854 getF32Constant(DAG, 0x3da235e3, dl)); 4855 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4856 getF32Constant(DAG, 0x3e65b8f3, dl)); 4857 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4858 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4859 getF32Constant(DAG, 0x3f324b07, dl)); 4860 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4861 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4862 getF32Constant(DAG, 0x3f7ff8fd, dl)); 4863 } else { // LimitFloatPrecision <= 18 4864 // For floating-point precision of 18: 4865 // 4866 // TwoToFractionalPartOfX = 4867 // 0.999999982f + 4868 // (0.693148872f + 4869 // (0.240227044f + 4870 // (0.554906021e-1f + 4871 // (0.961591928e-2f + 4872 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; 4873 // error 2.47208000*10^(-7), which is better than 18 bits 4874 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4875 getF32Constant(DAG, 0x3924b03e, dl)); 4876 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4877 getF32Constant(DAG, 0x3ab24b87, dl)); 4878 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4879 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4880 getF32Constant(DAG, 0x3c1d8c17, dl)); 4881 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4882 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4883 getF32Constant(DAG, 0x3d634a1d, dl)); 4884 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4885 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4886 getF32Constant(DAG, 0x3e75fe14, dl)); 4887 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4888 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, 4889 getF32Constant(DAG, 0x3f317234, dl)); 4890 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); 4891 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, 4892 getF32Constant(DAG, 0x3f800000, dl)); 4893 } 4894 4895 // Add the exponent into the result in integer domain. 4896 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX); 4897 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 4898 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX)); 4899 } 4900 4901 /// expandExp - Lower an exp intrinsic. Handles the special sequences for 4902 /// limited-precision mode. 4903 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 4904 const TargetLowering &TLI) { 4905 if (Op.getValueType() == MVT::f32 && 4906 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4907 4908 // Put the exponent in the right bit position for later addition to the 4909 // final result: 4910 // 4911 // t0 = Op * log2(e) 4912 4913 // TODO: What fast-math-flags should be set here? 4914 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op, 4915 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32)); 4916 return getLimitedPrecisionExp2(t0, dl, DAG); 4917 } 4918 4919 // No special expansion. 4920 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op); 4921 } 4922 4923 /// expandLog - Lower a log intrinsic. Handles the special sequences for 4924 /// limited-precision mode. 4925 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 4926 const TargetLowering &TLI) { 4927 // TODO: What fast-math-flags should be set on the floating-point nodes? 4928 4929 if (Op.getValueType() == MVT::f32 && 4930 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4931 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 4932 4933 // Scale the exponent by log(2). 4934 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 4935 SDValue LogOfExponent = 4936 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 4937 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32)); 4938 4939 // Get the significand and build it into a floating-point number with 4940 // exponent of 1. 4941 SDValue X = GetSignificand(DAG, Op1, dl); 4942 4943 SDValue LogOfMantissa; 4944 if (LimitFloatPrecision <= 6) { 4945 // For floating-point precision of 6: 4946 // 4947 // LogofMantissa = 4948 // -1.1609546f + 4949 // (1.4034025f - 0.23903021f * x) * x; 4950 // 4951 // error 0.0034276066, which is better than 8 bits 4952 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4953 getF32Constant(DAG, 0xbe74c456, dl)); 4954 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4955 getF32Constant(DAG, 0x3fb3a2b1, dl)); 4956 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4957 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4958 getF32Constant(DAG, 0x3f949a29, dl)); 4959 } else if (LimitFloatPrecision <= 12) { 4960 // For floating-point precision of 12: 4961 // 4962 // LogOfMantissa = 4963 // -1.7417939f + 4964 // (2.8212026f + 4965 // (-1.4699568f + 4966 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; 4967 // 4968 // error 0.000061011436, which is 14 bits 4969 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4970 getF32Constant(DAG, 0xbd67b6d6, dl)); 4971 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4972 getF32Constant(DAG, 0x3ee4f4b8, dl)); 4973 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4974 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4975 getF32Constant(DAG, 0x3fbc278b, dl)); 4976 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4977 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4978 getF32Constant(DAG, 0x40348e95, dl)); 4979 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4980 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4981 getF32Constant(DAG, 0x3fdef31a, dl)); 4982 } else { // LimitFloatPrecision <= 18 4983 // For floating-point precision of 18: 4984 // 4985 // LogOfMantissa = 4986 // -2.1072184f + 4987 // (4.2372794f + 4988 // (-3.7029485f + 4989 // (2.2781945f + 4990 // (-0.87823314f + 4991 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; 4992 // 4993 // error 0.0000023660568, which is better than 18 bits 4994 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4995 getF32Constant(DAG, 0xbc91e5ac, dl)); 4996 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4997 getF32Constant(DAG, 0x3e4350aa, dl)); 4998 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4999 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5000 getF32Constant(DAG, 0x3f60d3e3, dl)); 5001 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5002 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5003 getF32Constant(DAG, 0x4011cdf0, dl)); 5004 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5005 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 5006 getF32Constant(DAG, 0x406cfd1c, dl)); 5007 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 5008 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 5009 getF32Constant(DAG, 0x408797cb, dl)); 5010 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 5011 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 5012 getF32Constant(DAG, 0x4006dcab, dl)); 5013 } 5014 5015 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); 5016 } 5017 5018 // No special expansion. 5019 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op); 5020 } 5021 5022 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for 5023 /// limited-precision mode. 5024 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5025 const TargetLowering &TLI) { 5026 // TODO: What fast-math-flags should be set on the floating-point nodes? 5027 5028 if (Op.getValueType() == MVT::f32 && 5029 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5030 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 5031 5032 // Get the exponent. 5033 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); 5034 5035 // Get the significand and build it into a floating-point number with 5036 // exponent of 1. 5037 SDValue X = GetSignificand(DAG, Op1, dl); 5038 5039 // Different possible minimax approximations of significand in 5040 // floating-point for various degrees of accuracy over [1,2]. 5041 SDValue Log2ofMantissa; 5042 if (LimitFloatPrecision <= 6) { 5043 // For floating-point precision of 6: 5044 // 5045 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; 5046 // 5047 // error 0.0049451742, which is more than 7 bits 5048 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5049 getF32Constant(DAG, 0xbeb08fe0, dl)); 5050 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5051 getF32Constant(DAG, 0x40019463, dl)); 5052 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5053 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5054 getF32Constant(DAG, 0x3fd6633d, dl)); 5055 } else if (LimitFloatPrecision <= 12) { 5056 // For floating-point precision of 12: 5057 // 5058 // Log2ofMantissa = 5059 // -2.51285454f + 5060 // (4.07009056f + 5061 // (-2.12067489f + 5062 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; 5063 // 5064 // error 0.0000876136000, which is better than 13 bits 5065 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5066 getF32Constant(DAG, 0xbda7262e, dl)); 5067 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5068 getF32Constant(DAG, 0x3f25280b, dl)); 5069 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5070 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5071 getF32Constant(DAG, 0x4007b923, dl)); 5072 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5073 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5074 getF32Constant(DAG, 0x40823e2f, dl)); 5075 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5076 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 5077 getF32Constant(DAG, 0x4020d29c, dl)); 5078 } else { // LimitFloatPrecision <= 18 5079 // For floating-point precision of 18: 5080 // 5081 // Log2ofMantissa = 5082 // -3.0400495f + 5083 // (6.1129976f + 5084 // (-5.3420409f + 5085 // (3.2865683f + 5086 // (-1.2669343f + 5087 // (0.27515199f - 5088 // 0.25691327e-1f * x) * x) * x) * x) * x) * x; 5089 // 5090 // error 0.0000018516, which is better than 18 bits 5091 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5092 getF32Constant(DAG, 0xbcd2769e, dl)); 5093 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5094 getF32Constant(DAG, 0x3e8ce0b9, dl)); 5095 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5096 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5097 getF32Constant(DAG, 0x3fa22ae7, dl)); 5098 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5099 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5100 getF32Constant(DAG, 0x40525723, dl)); 5101 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5102 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 5103 getF32Constant(DAG, 0x40aaf200, dl)); 5104 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 5105 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 5106 getF32Constant(DAG, 0x40c39dad, dl)); 5107 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 5108 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 5109 getF32Constant(DAG, 0x4042902c, dl)); 5110 } 5111 5112 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); 5113 } 5114 5115 // No special expansion. 5116 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op); 5117 } 5118 5119 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for 5120 /// limited-precision mode. 5121 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5122 const TargetLowering &TLI) { 5123 // TODO: What fast-math-flags should be set on the floating-point nodes? 5124 5125 if (Op.getValueType() == MVT::f32 && 5126 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5127 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 5128 5129 // Scale the exponent by log10(2) [0.30102999f]. 5130 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 5131 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 5132 getF32Constant(DAG, 0x3e9a209a, dl)); 5133 5134 // Get the significand and build it into a floating-point number with 5135 // exponent of 1. 5136 SDValue X = GetSignificand(DAG, Op1, dl); 5137 5138 SDValue Log10ofMantissa; 5139 if (LimitFloatPrecision <= 6) { 5140 // For floating-point precision of 6: 5141 // 5142 // Log10ofMantissa = 5143 // -0.50419619f + 5144 // (0.60948995f - 0.10380950f * x) * x; 5145 // 5146 // error 0.0014886165, which is 6 bits 5147 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5148 getF32Constant(DAG, 0xbdd49a13, dl)); 5149 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5150 getF32Constant(DAG, 0x3f1c0789, dl)); 5151 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5152 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5153 getF32Constant(DAG, 0x3f011300, dl)); 5154 } else if (LimitFloatPrecision <= 12) { 5155 // For floating-point precision of 12: 5156 // 5157 // Log10ofMantissa = 5158 // -0.64831180f + 5159 // (0.91751397f + 5160 // (-0.31664806f + 0.47637168e-1f * x) * x) * x; 5161 // 5162 // error 0.00019228036, which is better than 12 bits 5163 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5164 getF32Constant(DAG, 0x3d431f31, dl)); 5165 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 5166 getF32Constant(DAG, 0x3ea21fb2, dl)); 5167 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5168 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 5169 getF32Constant(DAG, 0x3f6ae232, dl)); 5170 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5171 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 5172 getF32Constant(DAG, 0x3f25f7c3, dl)); 5173 } else { // LimitFloatPrecision <= 18 5174 // For floating-point precision of 18: 5175 // 5176 // Log10ofMantissa = 5177 // -0.84299375f + 5178 // (1.5327582f + 5179 // (-1.0688956f + 5180 // (0.49102474f + 5181 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; 5182 // 5183 // error 0.0000037995730, which is better than 18 bits 5184 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5185 getF32Constant(DAG, 0x3c5d51ce, dl)); 5186 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 5187 getF32Constant(DAG, 0x3e00685a, dl)); 5188 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5189 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 5190 getF32Constant(DAG, 0x3efb6798, dl)); 5191 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5192 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 5193 getF32Constant(DAG, 0x3f88d192, dl)); 5194 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5195 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 5196 getF32Constant(DAG, 0x3fc4316c, dl)); 5197 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 5198 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8, 5199 getF32Constant(DAG, 0x3f57ce70, dl)); 5200 } 5201 5202 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); 5203 } 5204 5205 // No special expansion. 5206 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op); 5207 } 5208 5209 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for 5210 /// limited-precision mode. 5211 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5212 const TargetLowering &TLI) { 5213 if (Op.getValueType() == MVT::f32 && 5214 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) 5215 return getLimitedPrecisionExp2(Op, dl, DAG); 5216 5217 // No special expansion. 5218 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op); 5219 } 5220 5221 /// visitPow - Lower a pow intrinsic. Handles the special sequences for 5222 /// limited-precision mode with x == 10.0f. 5223 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, 5224 SelectionDAG &DAG, const TargetLowering &TLI) { 5225 bool IsExp10 = false; 5226 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && 5227 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5228 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) { 5229 APFloat Ten(10.0f); 5230 IsExp10 = LHSC->isExactlyValue(Ten); 5231 } 5232 } 5233 5234 // TODO: What fast-math-flags should be set on the FMUL node? 5235 if (IsExp10) { 5236 // Put the exponent in the right bit position for later addition to the 5237 // final result: 5238 // 5239 // #define LOG2OF10 3.3219281f 5240 // t0 = Op * LOG2OF10; 5241 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS, 5242 getF32Constant(DAG, 0x40549a78, dl)); 5243 return getLimitedPrecisionExp2(t0, dl, DAG); 5244 } 5245 5246 // No special expansion. 5247 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS); 5248 } 5249 5250 /// ExpandPowI - Expand a llvm.powi intrinsic. 5251 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, 5252 SelectionDAG &DAG) { 5253 // If RHS is a constant, we can expand this out to a multiplication tree, 5254 // otherwise we end up lowering to a call to __powidf2 (for example). When 5255 // optimizing for size, we only want to do this if the expansion would produce 5256 // a small number of multiplies, otherwise we do the full expansion. 5257 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 5258 // Get the exponent as a positive value. 5259 unsigned Val = RHSC->getSExtValue(); 5260 if ((int)Val < 0) Val = -Val; 5261 5262 // powi(x, 0) -> 1.0 5263 if (Val == 0) 5264 return DAG.getConstantFP(1.0, DL, LHS.getValueType()); 5265 5266 bool OptForSize = DAG.shouldOptForSize(); 5267 if (!OptForSize || 5268 // If optimizing for size, don't insert too many multiplies. 5269 // This inserts up to 5 multiplies. 5270 countPopulation(Val) + Log2_32(Val) < 7) { 5271 // We use the simple binary decomposition method to generate the multiply 5272 // sequence. There are more optimal ways to do this (for example, 5273 // powi(x,15) generates one more multiply than it should), but this has 5274 // the benefit of being both really simple and much better than a libcall. 5275 SDValue Res; // Logically starts equal to 1.0 5276 SDValue CurSquare = LHS; 5277 // TODO: Intrinsics should have fast-math-flags that propagate to these 5278 // nodes. 5279 while (Val) { 5280 if (Val & 1) { 5281 if (Res.getNode()) 5282 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare); 5283 else 5284 Res = CurSquare; // 1.0*CurSquare. 5285 } 5286 5287 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(), 5288 CurSquare, CurSquare); 5289 Val >>= 1; 5290 } 5291 5292 // If the original was negative, invert the result, producing 1/(x*x*x). 5293 if (RHSC->getSExtValue() < 0) 5294 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(), 5295 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res); 5296 return Res; 5297 } 5298 } 5299 5300 // Otherwise, expand to a libcall. 5301 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS); 5302 } 5303 5304 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, 5305 SDValue LHS, SDValue RHS, SDValue Scale, 5306 SelectionDAG &DAG, const TargetLowering &TLI) { 5307 EVT VT = LHS.getValueType(); 5308 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 5309 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 5310 LLVMContext &Ctx = *DAG.getContext(); 5311 5312 // If the type is legal but the operation isn't, this node might survive all 5313 // the way to operation legalization. If we end up there and we do not have 5314 // the ability to widen the type (if VT*2 is not legal), we cannot expand the 5315 // node. 5316 5317 // Coax the legalizer into expanding the node during type legalization instead 5318 // by bumping the size by one bit. This will force it to Promote, enabling the 5319 // early expansion and avoiding the need to expand later. 5320 5321 // We don't have to do this if Scale is 0; that can always be expanded, unless 5322 // it's a saturating signed operation. Those can experience true integer 5323 // division overflow, a case which we must avoid. 5324 5325 // FIXME: We wouldn't have to do this (or any of the early 5326 // expansion/promotion) if it was possible to expand a libcall of an 5327 // illegal type during operation legalization. But it's not, so things 5328 // get a bit hacky. 5329 unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue(); 5330 if ((ScaleInt > 0 || (Saturating && Signed)) && 5331 (TLI.isTypeLegal(VT) || 5332 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) { 5333 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction( 5334 Opcode, VT, ScaleInt); 5335 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) { 5336 EVT PromVT; 5337 if (VT.isScalarInteger()) 5338 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1); 5339 else if (VT.isVector()) { 5340 PromVT = VT.getVectorElementType(); 5341 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1); 5342 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount()); 5343 } else 5344 llvm_unreachable("Wrong VT for DIVFIX?"); 5345 if (Signed) { 5346 LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT); 5347 RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT); 5348 } else { 5349 LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT); 5350 RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT); 5351 } 5352 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout()); 5353 // For saturating operations, we need to shift up the LHS to get the 5354 // proper saturation width, and then shift down again afterwards. 5355 if (Saturating) 5356 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS, 5357 DAG.getConstant(1, DL, ShiftTy)); 5358 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale); 5359 if (Saturating) 5360 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res, 5361 DAG.getConstant(1, DL, ShiftTy)); 5362 return DAG.getZExtOrTrunc(Res, DL, VT); 5363 } 5364 } 5365 5366 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale); 5367 } 5368 5369 // getUnderlyingArgRegs - Find underlying registers used for a truncated, 5370 // bitcasted, or split argument. Returns a list of <Register, size in bits> 5371 static void 5372 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, 5373 const SDValue &N) { 5374 switch (N.getOpcode()) { 5375 case ISD::CopyFromReg: { 5376 SDValue Op = N.getOperand(1); 5377 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(), 5378 Op.getValueType().getSizeInBits()); 5379 return; 5380 } 5381 case ISD::BITCAST: 5382 case ISD::AssertZext: 5383 case ISD::AssertSext: 5384 case ISD::TRUNCATE: 5385 getUnderlyingArgRegs(Regs, N.getOperand(0)); 5386 return; 5387 case ISD::BUILD_PAIR: 5388 case ISD::BUILD_VECTOR: 5389 case ISD::CONCAT_VECTORS: 5390 for (SDValue Op : N->op_values()) 5391 getUnderlyingArgRegs(Regs, Op); 5392 return; 5393 default: 5394 return; 5395 } 5396 } 5397 5398 /// If the DbgValueInst is a dbg_value of a function argument, create the 5399 /// corresponding DBG_VALUE machine instruction for it now. At the end of 5400 /// instruction selection, they will be inserted to the entry BB. 5401 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( 5402 const Value *V, DILocalVariable *Variable, DIExpression *Expr, 5403 DILocation *DL, bool IsDbgDeclare, const SDValue &N) { 5404 const Argument *Arg = dyn_cast<Argument>(V); 5405 if (!Arg) 5406 return false; 5407 5408 if (!IsDbgDeclare) { 5409 // ArgDbgValues are hoisted to the beginning of the entry block. So we 5410 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in 5411 // the entry block. 5412 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front(); 5413 if (!IsInEntryBlock) 5414 return false; 5415 5416 // ArgDbgValues are hoisted to the beginning of the entry block. So we 5417 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a 5418 // variable that also is a param. 5419 // 5420 // Although, if we are at the top of the entry block already, we can still 5421 // emit using ArgDbgValue. This might catch some situations when the 5422 // dbg.value refers to an argument that isn't used in the entry block, so 5423 // any CopyToReg node would be optimized out and the only way to express 5424 // this DBG_VALUE is by using the physical reg (or FI) as done in this 5425 // method. ArgDbgValues are hoisted to the beginning of the entry block. So 5426 // we should only emit as ArgDbgValue if the Variable is an argument to the 5427 // current function, and the dbg.value intrinsic is found in the entry 5428 // block. 5429 bool VariableIsFunctionInputArg = Variable->isParameter() && 5430 !DL->getInlinedAt(); 5431 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder; 5432 if (!IsInPrologue && !VariableIsFunctionInputArg) 5433 return false; 5434 5435 // Here we assume that a function argument on IR level only can be used to 5436 // describe one input parameter on source level. If we for example have 5437 // source code like this 5438 // 5439 // struct A { long x, y; }; 5440 // void foo(struct A a, long b) { 5441 // ... 5442 // b = a.x; 5443 // ... 5444 // } 5445 // 5446 // and IR like this 5447 // 5448 // define void @foo(i32 %a1, i32 %a2, i32 %b) { 5449 // entry: 5450 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment 5451 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment 5452 // call void @llvm.dbg.value(metadata i32 %b, "b", 5453 // ... 5454 // call void @llvm.dbg.value(metadata i32 %a1, "b" 5455 // ... 5456 // 5457 // then the last dbg.value is describing a parameter "b" using a value that 5458 // is an argument. But since we already has used %a1 to describe a parameter 5459 // we should not handle that last dbg.value here (that would result in an 5460 // incorrect hoisting of the DBG_VALUE to the function entry). 5461 // Notice that we allow one dbg.value per IR level argument, to accommodate 5462 // for the situation with fragments above. 5463 if (VariableIsFunctionInputArg) { 5464 unsigned ArgNo = Arg->getArgNo(); 5465 if (ArgNo >= FuncInfo.DescribedArgs.size()) 5466 FuncInfo.DescribedArgs.resize(ArgNo + 1, false); 5467 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo)) 5468 return false; 5469 FuncInfo.DescribedArgs.set(ArgNo); 5470 } 5471 } 5472 5473 MachineFunction &MF = DAG.getMachineFunction(); 5474 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 5475 5476 bool IsIndirect = false; 5477 Optional<MachineOperand> Op; 5478 // Some arguments' frame index is recorded during argument lowering. 5479 int FI = FuncInfo.getArgumentFrameIndex(Arg); 5480 if (FI != std::numeric_limits<int>::max()) 5481 Op = MachineOperand::CreateFI(FI); 5482 5483 SmallVector<std::pair<unsigned, unsigned>, 8> ArgRegsAndSizes; 5484 if (!Op && N.getNode()) { 5485 getUnderlyingArgRegs(ArgRegsAndSizes, N); 5486 Register Reg; 5487 if (ArgRegsAndSizes.size() == 1) 5488 Reg = ArgRegsAndSizes.front().first; 5489 5490 if (Reg && Reg.isVirtual()) { 5491 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 5492 Register PR = RegInfo.getLiveInPhysReg(Reg); 5493 if (PR) 5494 Reg = PR; 5495 } 5496 if (Reg) { 5497 Op = MachineOperand::CreateReg(Reg, false); 5498 IsIndirect = IsDbgDeclare; 5499 } 5500 } 5501 5502 if (!Op && N.getNode()) { 5503 // Check if frame index is available. 5504 SDValue LCandidate = peekThroughBitcasts(N); 5505 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode())) 5506 if (FrameIndexSDNode *FINode = 5507 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 5508 Op = MachineOperand::CreateFI(FINode->getIndex()); 5509 } 5510 5511 if (!Op) { 5512 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg 5513 auto splitMultiRegDbgValue 5514 = [&](ArrayRef<std::pair<unsigned, unsigned>> SplitRegs) { 5515 unsigned Offset = 0; 5516 for (auto RegAndSize : SplitRegs) { 5517 // If the expression is already a fragment, the current register 5518 // offset+size might extend beyond the fragment. In this case, only 5519 // the register bits that are inside the fragment are relevant. 5520 int RegFragmentSizeInBits = RegAndSize.second; 5521 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) { 5522 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits; 5523 // The register is entirely outside the expression fragment, 5524 // so is irrelevant for debug info. 5525 if (Offset >= ExprFragmentSizeInBits) 5526 break; 5527 // The register is partially outside the expression fragment, only 5528 // the low bits within the fragment are relevant for debug info. 5529 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) { 5530 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset; 5531 } 5532 } 5533 5534 auto FragmentExpr = DIExpression::createFragmentExpression( 5535 Expr, Offset, RegFragmentSizeInBits); 5536 Offset += RegAndSize.second; 5537 // If a valid fragment expression cannot be created, the variable's 5538 // correct value cannot be determined and so it is set as Undef. 5539 if (!FragmentExpr) { 5540 SDDbgValue *SDV = DAG.getConstantDbgValue( 5541 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder); 5542 DAG.AddDbgValue(SDV, nullptr, false); 5543 continue; 5544 } 5545 assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?"); 5546 FuncInfo.ArgDbgValues.push_back( 5547 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare, 5548 RegAndSize.first, Variable, *FragmentExpr)); 5549 } 5550 }; 5551 5552 // Check if ValueMap has reg number. 5553 DenseMap<const Value *, Register>::const_iterator 5554 VMI = FuncInfo.ValueMap.find(V); 5555 if (VMI != FuncInfo.ValueMap.end()) { 5556 const auto &TLI = DAG.getTargetLoweringInfo(); 5557 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, 5558 V->getType(), getABIRegCopyCC(V)); 5559 if (RFV.occupiesMultipleRegs()) { 5560 splitMultiRegDbgValue(RFV.getRegsAndSizes()); 5561 return true; 5562 } 5563 5564 Op = MachineOperand::CreateReg(VMI->second, false); 5565 IsIndirect = IsDbgDeclare; 5566 } else if (ArgRegsAndSizes.size() > 1) { 5567 // This was split due to the calling convention, and no virtual register 5568 // mapping exists for the value. 5569 splitMultiRegDbgValue(ArgRegsAndSizes); 5570 return true; 5571 } 5572 } 5573 5574 if (!Op) 5575 return false; 5576 5577 assert(Variable->isValidLocationForIntrinsic(DL) && 5578 "Expected inlined-at fields to agree"); 5579 IsIndirect = (Op->isReg()) ? IsIndirect : true; 5580 FuncInfo.ArgDbgValues.push_back( 5581 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect, 5582 *Op, Variable, Expr)); 5583 5584 return true; 5585 } 5586 5587 /// Return the appropriate SDDbgValue based on N. 5588 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N, 5589 DILocalVariable *Variable, 5590 DIExpression *Expr, 5591 const DebugLoc &dl, 5592 unsigned DbgSDNodeOrder) { 5593 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) { 5594 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe 5595 // stack slot locations. 5596 // 5597 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting 5598 // debug values here after optimization: 5599 // 5600 // dbg.value(i32* %px, !"int *px", !DIExpression()), and 5601 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref)) 5602 // 5603 // Both describe the direct values of their associated variables. 5604 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(), 5605 /*IsIndirect*/ false, dl, DbgSDNodeOrder); 5606 } 5607 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(), 5608 /*IsIndirect*/ false, dl, DbgSDNodeOrder); 5609 } 5610 5611 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) { 5612 switch (Intrinsic) { 5613 case Intrinsic::smul_fix: 5614 return ISD::SMULFIX; 5615 case Intrinsic::umul_fix: 5616 return ISD::UMULFIX; 5617 case Intrinsic::smul_fix_sat: 5618 return ISD::SMULFIXSAT; 5619 case Intrinsic::umul_fix_sat: 5620 return ISD::UMULFIXSAT; 5621 case Intrinsic::sdiv_fix: 5622 return ISD::SDIVFIX; 5623 case Intrinsic::udiv_fix: 5624 return ISD::UDIVFIX; 5625 case Intrinsic::sdiv_fix_sat: 5626 return ISD::SDIVFIXSAT; 5627 case Intrinsic::udiv_fix_sat: 5628 return ISD::UDIVFIXSAT; 5629 default: 5630 llvm_unreachable("Unhandled fixed point intrinsic"); 5631 } 5632 } 5633 5634 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I, 5635 const char *FunctionName) { 5636 assert(FunctionName && "FunctionName must not be nullptr"); 5637 SDValue Callee = DAG.getExternalSymbol( 5638 FunctionName, 5639 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())); 5640 LowerCallTo(I, Callee, I.isTailCall()); 5641 } 5642 5643 /// Given a @llvm.call.preallocated.setup, return the corresponding 5644 /// preallocated call. 5645 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) { 5646 assert(cast<CallBase>(PreallocatedSetup) 5647 ->getCalledFunction() 5648 ->getIntrinsicID() == Intrinsic::call_preallocated_setup && 5649 "expected call_preallocated_setup Value"); 5650 for (auto *U : PreallocatedSetup->users()) { 5651 auto *UseCall = cast<CallBase>(U); 5652 const Function *Fn = UseCall->getCalledFunction(); 5653 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) { 5654 return UseCall; 5655 } 5656 } 5657 llvm_unreachable("expected corresponding call to preallocated setup/arg"); 5658 } 5659 5660 /// Lower the call to the specified intrinsic function. 5661 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, 5662 unsigned Intrinsic) { 5663 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5664 SDLoc sdl = getCurSDLoc(); 5665 DebugLoc dl = getCurDebugLoc(); 5666 SDValue Res; 5667 5668 switch (Intrinsic) { 5669 default: 5670 // By default, turn this into a target intrinsic node. 5671 visitTargetIntrinsic(I, Intrinsic); 5672 return; 5673 case Intrinsic::vscale: { 5674 match(&I, m_VScale(DAG.getDataLayout())); 5675 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 5676 setValue(&I, 5677 DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1))); 5678 return; 5679 } 5680 case Intrinsic::vastart: visitVAStart(I); return; 5681 case Intrinsic::vaend: visitVAEnd(I); return; 5682 case Intrinsic::vacopy: visitVACopy(I); return; 5683 case Intrinsic::returnaddress: 5684 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, 5685 TLI.getPointerTy(DAG.getDataLayout()), 5686 getValue(I.getArgOperand(0)))); 5687 return; 5688 case Intrinsic::addressofreturnaddress: 5689 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl, 5690 TLI.getPointerTy(DAG.getDataLayout()))); 5691 return; 5692 case Intrinsic::sponentry: 5693 setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl, 5694 TLI.getFrameIndexTy(DAG.getDataLayout()))); 5695 return; 5696 case Intrinsic::frameaddress: 5697 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, 5698 TLI.getFrameIndexTy(DAG.getDataLayout()), 5699 getValue(I.getArgOperand(0)))); 5700 return; 5701 case Intrinsic::read_volatile_register: 5702 case Intrinsic::read_register: { 5703 Value *Reg = I.getArgOperand(0); 5704 SDValue Chain = getRoot(); 5705 SDValue RegName = 5706 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 5707 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 5708 Res = DAG.getNode(ISD::READ_REGISTER, sdl, 5709 DAG.getVTList(VT, MVT::Other), Chain, RegName); 5710 setValue(&I, Res); 5711 DAG.setRoot(Res.getValue(1)); 5712 return; 5713 } 5714 case Intrinsic::write_register: { 5715 Value *Reg = I.getArgOperand(0); 5716 Value *RegValue = I.getArgOperand(1); 5717 SDValue Chain = getRoot(); 5718 SDValue RegName = 5719 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 5720 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain, 5721 RegName, getValue(RegValue))); 5722 return; 5723 } 5724 case Intrinsic::memcpy: { 5725 const auto &MCI = cast<MemCpyInst>(I); 5726 SDValue Op1 = getValue(I.getArgOperand(0)); 5727 SDValue Op2 = getValue(I.getArgOperand(1)); 5728 SDValue Op3 = getValue(I.getArgOperand(2)); 5729 // @llvm.memcpy defines 0 and 1 to both mean no alignment. 5730 Align DstAlign = MCI.getDestAlign().valueOrOne(); 5731 Align SrcAlign = MCI.getSourceAlign().valueOrOne(); 5732 Align Alignment = commonAlignment(DstAlign, SrcAlign); 5733 bool isVol = MCI.isVolatile(); 5734 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 5735 // FIXME: Support passing different dest/src alignments to the memcpy DAG 5736 // node. 5737 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 5738 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol, 5739 /* AlwaysInline */ false, isTC, 5740 MachinePointerInfo(I.getArgOperand(0)), 5741 MachinePointerInfo(I.getArgOperand(1))); 5742 updateDAGForMaybeTailCall(MC); 5743 return; 5744 } 5745 case Intrinsic::memcpy_inline: { 5746 const auto &MCI = cast<MemCpyInlineInst>(I); 5747 SDValue Dst = getValue(I.getArgOperand(0)); 5748 SDValue Src = getValue(I.getArgOperand(1)); 5749 SDValue Size = getValue(I.getArgOperand(2)); 5750 assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size"); 5751 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment. 5752 Align DstAlign = MCI.getDestAlign().valueOrOne(); 5753 Align SrcAlign = MCI.getSourceAlign().valueOrOne(); 5754 Align Alignment = commonAlignment(DstAlign, SrcAlign); 5755 bool isVol = MCI.isVolatile(); 5756 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 5757 // FIXME: Support passing different dest/src alignments to the memcpy DAG 5758 // node. 5759 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol, 5760 /* AlwaysInline */ true, isTC, 5761 MachinePointerInfo(I.getArgOperand(0)), 5762 MachinePointerInfo(I.getArgOperand(1))); 5763 updateDAGForMaybeTailCall(MC); 5764 return; 5765 } 5766 case Intrinsic::memset: { 5767 const auto &MSI = cast<MemSetInst>(I); 5768 SDValue Op1 = getValue(I.getArgOperand(0)); 5769 SDValue Op2 = getValue(I.getArgOperand(1)); 5770 SDValue Op3 = getValue(I.getArgOperand(2)); 5771 // @llvm.memset defines 0 and 1 to both mean no alignment. 5772 Align Alignment = MSI.getDestAlign().valueOrOne(); 5773 bool isVol = MSI.isVolatile(); 5774 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 5775 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 5776 SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC, 5777 MachinePointerInfo(I.getArgOperand(0))); 5778 updateDAGForMaybeTailCall(MS); 5779 return; 5780 } 5781 case Intrinsic::memmove: { 5782 const auto &MMI = cast<MemMoveInst>(I); 5783 SDValue Op1 = getValue(I.getArgOperand(0)); 5784 SDValue Op2 = getValue(I.getArgOperand(1)); 5785 SDValue Op3 = getValue(I.getArgOperand(2)); 5786 // @llvm.memmove defines 0 and 1 to both mean no alignment. 5787 Align DstAlign = MMI.getDestAlign().valueOrOne(); 5788 Align SrcAlign = MMI.getSourceAlign().valueOrOne(); 5789 Align Alignment = commonAlignment(DstAlign, SrcAlign); 5790 bool isVol = MMI.isVolatile(); 5791 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 5792 // FIXME: Support passing different dest/src alignments to the memmove DAG 5793 // node. 5794 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 5795 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, 5796 isTC, MachinePointerInfo(I.getArgOperand(0)), 5797 MachinePointerInfo(I.getArgOperand(1))); 5798 updateDAGForMaybeTailCall(MM); 5799 return; 5800 } 5801 case Intrinsic::memcpy_element_unordered_atomic: { 5802 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I); 5803 SDValue Dst = getValue(MI.getRawDest()); 5804 SDValue Src = getValue(MI.getRawSource()); 5805 SDValue Length = getValue(MI.getLength()); 5806 5807 unsigned DstAlign = MI.getDestAlignment(); 5808 unsigned SrcAlign = MI.getSourceAlignment(); 5809 Type *LengthTy = MI.getLength()->getType(); 5810 unsigned ElemSz = MI.getElementSizeInBytes(); 5811 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 5812 SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src, 5813 SrcAlign, Length, LengthTy, ElemSz, isTC, 5814 MachinePointerInfo(MI.getRawDest()), 5815 MachinePointerInfo(MI.getRawSource())); 5816 updateDAGForMaybeTailCall(MC); 5817 return; 5818 } 5819 case Intrinsic::memmove_element_unordered_atomic: { 5820 auto &MI = cast<AtomicMemMoveInst>(I); 5821 SDValue Dst = getValue(MI.getRawDest()); 5822 SDValue Src = getValue(MI.getRawSource()); 5823 SDValue Length = getValue(MI.getLength()); 5824 5825 unsigned DstAlign = MI.getDestAlignment(); 5826 unsigned SrcAlign = MI.getSourceAlignment(); 5827 Type *LengthTy = MI.getLength()->getType(); 5828 unsigned ElemSz = MI.getElementSizeInBytes(); 5829 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 5830 SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src, 5831 SrcAlign, Length, LengthTy, ElemSz, isTC, 5832 MachinePointerInfo(MI.getRawDest()), 5833 MachinePointerInfo(MI.getRawSource())); 5834 updateDAGForMaybeTailCall(MC); 5835 return; 5836 } 5837 case Intrinsic::memset_element_unordered_atomic: { 5838 auto &MI = cast<AtomicMemSetInst>(I); 5839 SDValue Dst = getValue(MI.getRawDest()); 5840 SDValue Val = getValue(MI.getValue()); 5841 SDValue Length = getValue(MI.getLength()); 5842 5843 unsigned DstAlign = MI.getDestAlignment(); 5844 Type *LengthTy = MI.getLength()->getType(); 5845 unsigned ElemSz = MI.getElementSizeInBytes(); 5846 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 5847 SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length, 5848 LengthTy, ElemSz, isTC, 5849 MachinePointerInfo(MI.getRawDest())); 5850 updateDAGForMaybeTailCall(MC); 5851 return; 5852 } 5853 case Intrinsic::call_preallocated_setup: { 5854 const CallBase *PreallocatedCall = FindPreallocatedCall(&I); 5855 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall); 5856 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other, 5857 getRoot(), SrcValue); 5858 setValue(&I, Res); 5859 DAG.setRoot(Res); 5860 return; 5861 } 5862 case Intrinsic::call_preallocated_arg: { 5863 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0)); 5864 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall); 5865 SDValue Ops[3]; 5866 Ops[0] = getRoot(); 5867 Ops[1] = SrcValue; 5868 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl, 5869 MVT::i32); // arg index 5870 SDValue Res = DAG.getNode( 5871 ISD::PREALLOCATED_ARG, sdl, 5872 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops); 5873 setValue(&I, Res); 5874 DAG.setRoot(Res.getValue(1)); 5875 return; 5876 } 5877 case Intrinsic::dbg_addr: 5878 case Intrinsic::dbg_declare: { 5879 const auto &DI = cast<DbgVariableIntrinsic>(I); 5880 DILocalVariable *Variable = DI.getVariable(); 5881 DIExpression *Expression = DI.getExpression(); 5882 dropDanglingDebugInfo(Variable, Expression); 5883 assert(Variable && "Missing variable"); 5884 LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI 5885 << "\n"); 5886 // Check if address has undef value. 5887 const Value *Address = DI.getVariableLocation(); 5888 if (!Address || isa<UndefValue>(Address) || 5889 (Address->use_empty() && !isa<Argument>(Address))) { 5890 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI 5891 << " (bad/undef/unused-arg address)\n"); 5892 return; 5893 } 5894 5895 bool isParameter = Variable->isParameter() || isa<Argument>(Address); 5896 5897 // Check if this variable can be described by a frame index, typically 5898 // either as a static alloca or a byval parameter. 5899 int FI = std::numeric_limits<int>::max(); 5900 if (const auto *AI = 5901 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) { 5902 if (AI->isStaticAlloca()) { 5903 auto I = FuncInfo.StaticAllocaMap.find(AI); 5904 if (I != FuncInfo.StaticAllocaMap.end()) 5905 FI = I->second; 5906 } 5907 } else if (const auto *Arg = dyn_cast<Argument>( 5908 Address->stripInBoundsConstantOffsets())) { 5909 FI = FuncInfo.getArgumentFrameIndex(Arg); 5910 } 5911 5912 // llvm.dbg.addr is control dependent and always generates indirect 5913 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in 5914 // the MachineFunction variable table. 5915 if (FI != std::numeric_limits<int>::max()) { 5916 if (Intrinsic == Intrinsic::dbg_addr) { 5917 SDDbgValue *SDV = DAG.getFrameIndexDbgValue( 5918 Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder); 5919 DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter); 5920 } else { 5921 LLVM_DEBUG(dbgs() << "Skipping " << DI 5922 << " (variable info stashed in MF side table)\n"); 5923 } 5924 return; 5925 } 5926 5927 SDValue &N = NodeMap[Address]; 5928 if (!N.getNode() && isa<Argument>(Address)) 5929 // Check unused arguments map. 5930 N = UnusedArgNodeMap[Address]; 5931 SDDbgValue *SDV; 5932 if (N.getNode()) { 5933 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) 5934 Address = BCI->getOperand(0); 5935 // Parameters are handled specially. 5936 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode()); 5937 if (isParameter && FINode) { 5938 // Byval parameter. We have a frame index at this point. 5939 SDV = 5940 DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(), 5941 /*IsIndirect*/ true, dl, SDNodeOrder); 5942 } else if (isa<Argument>(Address)) { 5943 // Address is an argument, so try to emit its dbg value using 5944 // virtual register info from the FuncInfo.ValueMap. 5945 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N); 5946 return; 5947 } else { 5948 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(), 5949 true, dl, SDNodeOrder); 5950 } 5951 DAG.AddDbgValue(SDV, N.getNode(), isParameter); 5952 } else { 5953 // If Address is an argument then try to emit its dbg value using 5954 // virtual register info from the FuncInfo.ValueMap. 5955 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, 5956 N)) { 5957 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI 5958 << " (could not emit func-arg dbg_value)\n"); 5959 } 5960 } 5961 return; 5962 } 5963 case Intrinsic::dbg_label: { 5964 const DbgLabelInst &DI = cast<DbgLabelInst>(I); 5965 DILabel *Label = DI.getLabel(); 5966 assert(Label && "Missing label"); 5967 5968 SDDbgLabel *SDV; 5969 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder); 5970 DAG.AddDbgLabel(SDV); 5971 return; 5972 } 5973 case Intrinsic::dbg_value: { 5974 const DbgValueInst &DI = cast<DbgValueInst>(I); 5975 assert(DI.getVariable() && "Missing variable"); 5976 5977 DILocalVariable *Variable = DI.getVariable(); 5978 DIExpression *Expression = DI.getExpression(); 5979 dropDanglingDebugInfo(Variable, Expression); 5980 const Value *V = DI.getValue(); 5981 if (!V) 5982 return; 5983 5984 if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(), 5985 SDNodeOrder)) 5986 return; 5987 5988 // TODO: Dangling debug info will eventually either be resolved or produce 5989 // an Undef DBG_VALUE. However in the resolution case, a gap may appear 5990 // between the original dbg.value location and its resolved DBG_VALUE, which 5991 // we should ideally fill with an extra Undef DBG_VALUE. 5992 5993 DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder); 5994 return; 5995 } 5996 5997 case Intrinsic::eh_typeid_for: { 5998 // Find the type id for the given typeinfo. 5999 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0)); 6000 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV); 6001 Res = DAG.getConstant(TypeID, sdl, MVT::i32); 6002 setValue(&I, Res); 6003 return; 6004 } 6005 6006 case Intrinsic::eh_return_i32: 6007 case Intrinsic::eh_return_i64: 6008 DAG.getMachineFunction().setCallsEHReturn(true); 6009 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl, 6010 MVT::Other, 6011 getControlRoot(), 6012 getValue(I.getArgOperand(0)), 6013 getValue(I.getArgOperand(1)))); 6014 return; 6015 case Intrinsic::eh_unwind_init: 6016 DAG.getMachineFunction().setCallsUnwindInit(true); 6017 return; 6018 case Intrinsic::eh_dwarf_cfa: 6019 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl, 6020 TLI.getPointerTy(DAG.getDataLayout()), 6021 getValue(I.getArgOperand(0)))); 6022 return; 6023 case Intrinsic::eh_sjlj_callsite: { 6024 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 6025 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0)); 6026 assert(CI && "Non-constant call site value in eh.sjlj.callsite!"); 6027 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!"); 6028 6029 MMI.setCurrentCallSite(CI->getZExtValue()); 6030 return; 6031 } 6032 case Intrinsic::eh_sjlj_functioncontext: { 6033 // Get and store the index of the function context. 6034 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 6035 AllocaInst *FnCtx = 6036 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts()); 6037 int FI = FuncInfo.StaticAllocaMap[FnCtx]; 6038 MFI.setFunctionContextIndex(FI); 6039 return; 6040 } 6041 case Intrinsic::eh_sjlj_setjmp: { 6042 SDValue Ops[2]; 6043 Ops[0] = getRoot(); 6044 Ops[1] = getValue(I.getArgOperand(0)); 6045 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl, 6046 DAG.getVTList(MVT::i32, MVT::Other), Ops); 6047 setValue(&I, Op.getValue(0)); 6048 DAG.setRoot(Op.getValue(1)); 6049 return; 6050 } 6051 case Intrinsic::eh_sjlj_longjmp: 6052 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other, 6053 getRoot(), getValue(I.getArgOperand(0)))); 6054 return; 6055 case Intrinsic::eh_sjlj_setup_dispatch: 6056 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other, 6057 getRoot())); 6058 return; 6059 case Intrinsic::masked_gather: 6060 visitMaskedGather(I); 6061 return; 6062 case Intrinsic::masked_load: 6063 visitMaskedLoad(I); 6064 return; 6065 case Intrinsic::masked_scatter: 6066 visitMaskedScatter(I); 6067 return; 6068 case Intrinsic::masked_store: 6069 visitMaskedStore(I); 6070 return; 6071 case Intrinsic::masked_expandload: 6072 visitMaskedLoad(I, true /* IsExpanding */); 6073 return; 6074 case Intrinsic::masked_compressstore: 6075 visitMaskedStore(I, true /* IsCompressing */); 6076 return; 6077 case Intrinsic::powi: 6078 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)), 6079 getValue(I.getArgOperand(1)), DAG)); 6080 return; 6081 case Intrinsic::log: 6082 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 6083 return; 6084 case Intrinsic::log2: 6085 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 6086 return; 6087 case Intrinsic::log10: 6088 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 6089 return; 6090 case Intrinsic::exp: 6091 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 6092 return; 6093 case Intrinsic::exp2: 6094 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 6095 return; 6096 case Intrinsic::pow: 6097 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)), 6098 getValue(I.getArgOperand(1)), DAG, TLI)); 6099 return; 6100 case Intrinsic::sqrt: 6101 case Intrinsic::fabs: 6102 case Intrinsic::sin: 6103 case Intrinsic::cos: 6104 case Intrinsic::floor: 6105 case Intrinsic::ceil: 6106 case Intrinsic::trunc: 6107 case Intrinsic::rint: 6108 case Intrinsic::nearbyint: 6109 case Intrinsic::round: 6110 case Intrinsic::roundeven: 6111 case Intrinsic::canonicalize: { 6112 unsigned Opcode; 6113 switch (Intrinsic) { 6114 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 6115 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 6116 case Intrinsic::fabs: Opcode = ISD::FABS; break; 6117 case Intrinsic::sin: Opcode = ISD::FSIN; break; 6118 case Intrinsic::cos: Opcode = ISD::FCOS; break; 6119 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 6120 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 6121 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 6122 case Intrinsic::rint: Opcode = ISD::FRINT; break; 6123 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 6124 case Intrinsic::round: Opcode = ISD::FROUND; break; 6125 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break; 6126 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break; 6127 } 6128 6129 setValue(&I, DAG.getNode(Opcode, sdl, 6130 getValue(I.getArgOperand(0)).getValueType(), 6131 getValue(I.getArgOperand(0)))); 6132 return; 6133 } 6134 case Intrinsic::lround: 6135 case Intrinsic::llround: 6136 case Intrinsic::lrint: 6137 case Intrinsic::llrint: { 6138 unsigned Opcode; 6139 switch (Intrinsic) { 6140 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 6141 case Intrinsic::lround: Opcode = ISD::LROUND; break; 6142 case Intrinsic::llround: Opcode = ISD::LLROUND; break; 6143 case Intrinsic::lrint: Opcode = ISD::LRINT; break; 6144 case Intrinsic::llrint: Opcode = ISD::LLRINT; break; 6145 } 6146 6147 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6148 setValue(&I, DAG.getNode(Opcode, sdl, RetVT, 6149 getValue(I.getArgOperand(0)))); 6150 return; 6151 } 6152 case Intrinsic::minnum: 6153 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl, 6154 getValue(I.getArgOperand(0)).getValueType(), 6155 getValue(I.getArgOperand(0)), 6156 getValue(I.getArgOperand(1)))); 6157 return; 6158 case Intrinsic::maxnum: 6159 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl, 6160 getValue(I.getArgOperand(0)).getValueType(), 6161 getValue(I.getArgOperand(0)), 6162 getValue(I.getArgOperand(1)))); 6163 return; 6164 case Intrinsic::minimum: 6165 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl, 6166 getValue(I.getArgOperand(0)).getValueType(), 6167 getValue(I.getArgOperand(0)), 6168 getValue(I.getArgOperand(1)))); 6169 return; 6170 case Intrinsic::maximum: 6171 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl, 6172 getValue(I.getArgOperand(0)).getValueType(), 6173 getValue(I.getArgOperand(0)), 6174 getValue(I.getArgOperand(1)))); 6175 return; 6176 case Intrinsic::copysign: 6177 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl, 6178 getValue(I.getArgOperand(0)).getValueType(), 6179 getValue(I.getArgOperand(0)), 6180 getValue(I.getArgOperand(1)))); 6181 return; 6182 case Intrinsic::fma: 6183 setValue(&I, DAG.getNode(ISD::FMA, sdl, 6184 getValue(I.getArgOperand(0)).getValueType(), 6185 getValue(I.getArgOperand(0)), 6186 getValue(I.getArgOperand(1)), 6187 getValue(I.getArgOperand(2)))); 6188 return; 6189 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 6190 case Intrinsic::INTRINSIC: 6191 #include "llvm/IR/ConstrainedOps.def" 6192 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I)); 6193 return; 6194 case Intrinsic::fmuladd: { 6195 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6196 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 6197 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { 6198 setValue(&I, DAG.getNode(ISD::FMA, sdl, 6199 getValue(I.getArgOperand(0)).getValueType(), 6200 getValue(I.getArgOperand(0)), 6201 getValue(I.getArgOperand(1)), 6202 getValue(I.getArgOperand(2)))); 6203 } else { 6204 // TODO: Intrinsic calls should have fast-math-flags. 6205 SDValue Mul = DAG.getNode(ISD::FMUL, sdl, 6206 getValue(I.getArgOperand(0)).getValueType(), 6207 getValue(I.getArgOperand(0)), 6208 getValue(I.getArgOperand(1))); 6209 SDValue Add = DAG.getNode(ISD::FADD, sdl, 6210 getValue(I.getArgOperand(0)).getValueType(), 6211 Mul, 6212 getValue(I.getArgOperand(2))); 6213 setValue(&I, Add); 6214 } 6215 return; 6216 } 6217 case Intrinsic::convert_to_fp16: 6218 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16, 6219 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16, 6220 getValue(I.getArgOperand(0)), 6221 DAG.getTargetConstant(0, sdl, 6222 MVT::i32)))); 6223 return; 6224 case Intrinsic::convert_from_fp16: 6225 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl, 6226 TLI.getValueType(DAG.getDataLayout(), I.getType()), 6227 DAG.getNode(ISD::BITCAST, sdl, MVT::f16, 6228 getValue(I.getArgOperand(0))))); 6229 return; 6230 case Intrinsic::pcmarker: { 6231 SDValue Tmp = getValue(I.getArgOperand(0)); 6232 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp)); 6233 return; 6234 } 6235 case Intrinsic::readcyclecounter: { 6236 SDValue Op = getRoot(); 6237 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl, 6238 DAG.getVTList(MVT::i64, MVT::Other), Op); 6239 setValue(&I, Res); 6240 DAG.setRoot(Res.getValue(1)); 6241 return; 6242 } 6243 case Intrinsic::bitreverse: 6244 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl, 6245 getValue(I.getArgOperand(0)).getValueType(), 6246 getValue(I.getArgOperand(0)))); 6247 return; 6248 case Intrinsic::bswap: 6249 setValue(&I, DAG.getNode(ISD::BSWAP, sdl, 6250 getValue(I.getArgOperand(0)).getValueType(), 6251 getValue(I.getArgOperand(0)))); 6252 return; 6253 case Intrinsic::cttz: { 6254 SDValue Arg = getValue(I.getArgOperand(0)); 6255 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 6256 EVT Ty = Arg.getValueType(); 6257 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, 6258 sdl, Ty, Arg)); 6259 return; 6260 } 6261 case Intrinsic::ctlz: { 6262 SDValue Arg = getValue(I.getArgOperand(0)); 6263 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 6264 EVT Ty = Arg.getValueType(); 6265 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, 6266 sdl, Ty, Arg)); 6267 return; 6268 } 6269 case Intrinsic::ctpop: { 6270 SDValue Arg = getValue(I.getArgOperand(0)); 6271 EVT Ty = Arg.getValueType(); 6272 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg)); 6273 return; 6274 } 6275 case Intrinsic::fshl: 6276 case Intrinsic::fshr: { 6277 bool IsFSHL = Intrinsic == Intrinsic::fshl; 6278 SDValue X = getValue(I.getArgOperand(0)); 6279 SDValue Y = getValue(I.getArgOperand(1)); 6280 SDValue Z = getValue(I.getArgOperand(2)); 6281 EVT VT = X.getValueType(); 6282 SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT); 6283 SDValue Zero = DAG.getConstant(0, sdl, VT); 6284 SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC); 6285 6286 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR; 6287 if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) { 6288 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z)); 6289 return; 6290 } 6291 6292 // When X == Y, this is rotate. If the data type has a power-of-2 size, we 6293 // avoid the select that is necessary in the general case to filter out 6294 // the 0-shift possibility that leads to UB. 6295 if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) { 6296 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR; 6297 if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) { 6298 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z)); 6299 return; 6300 } 6301 6302 // Some targets only rotate one way. Try the opposite direction. 6303 RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL; 6304 if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) { 6305 // Negate the shift amount because it is safe to ignore the high bits. 6306 SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z); 6307 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt)); 6308 return; 6309 } 6310 6311 // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW)) 6312 // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW)) 6313 SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z); 6314 SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC); 6315 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt); 6316 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt); 6317 setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY)); 6318 return; 6319 } 6320 6321 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 6322 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 6323 SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt); 6324 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt); 6325 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt); 6326 SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY); 6327 6328 // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth, 6329 // and that is undefined. We must compare and select to avoid UB. 6330 EVT CCVT = MVT::i1; 6331 if (VT.isVector()) 6332 CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements()); 6333 6334 // For fshl, 0-shift returns the 1st arg (X). 6335 // For fshr, 0-shift returns the 2nd arg (Y). 6336 SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ); 6337 setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or)); 6338 return; 6339 } 6340 case Intrinsic::sadd_sat: { 6341 SDValue Op1 = getValue(I.getArgOperand(0)); 6342 SDValue Op2 = getValue(I.getArgOperand(1)); 6343 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2)); 6344 return; 6345 } 6346 case Intrinsic::uadd_sat: { 6347 SDValue Op1 = getValue(I.getArgOperand(0)); 6348 SDValue Op2 = getValue(I.getArgOperand(1)); 6349 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2)); 6350 return; 6351 } 6352 case Intrinsic::ssub_sat: { 6353 SDValue Op1 = getValue(I.getArgOperand(0)); 6354 SDValue Op2 = getValue(I.getArgOperand(1)); 6355 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2)); 6356 return; 6357 } 6358 case Intrinsic::usub_sat: { 6359 SDValue Op1 = getValue(I.getArgOperand(0)); 6360 SDValue Op2 = getValue(I.getArgOperand(1)); 6361 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2)); 6362 return; 6363 } 6364 case Intrinsic::smul_fix: 6365 case Intrinsic::umul_fix: 6366 case Intrinsic::smul_fix_sat: 6367 case Intrinsic::umul_fix_sat: { 6368 SDValue Op1 = getValue(I.getArgOperand(0)); 6369 SDValue Op2 = getValue(I.getArgOperand(1)); 6370 SDValue Op3 = getValue(I.getArgOperand(2)); 6371 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl, 6372 Op1.getValueType(), Op1, Op2, Op3)); 6373 return; 6374 } 6375 case Intrinsic::sdiv_fix: 6376 case Intrinsic::udiv_fix: 6377 case Intrinsic::sdiv_fix_sat: 6378 case Intrinsic::udiv_fix_sat: { 6379 SDValue Op1 = getValue(I.getArgOperand(0)); 6380 SDValue Op2 = getValue(I.getArgOperand(1)); 6381 SDValue Op3 = getValue(I.getArgOperand(2)); 6382 setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl, 6383 Op1, Op2, Op3, DAG, TLI)); 6384 return; 6385 } 6386 case Intrinsic::stacksave: { 6387 SDValue Op = getRoot(); 6388 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6389 Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op); 6390 setValue(&I, Res); 6391 DAG.setRoot(Res.getValue(1)); 6392 return; 6393 } 6394 case Intrinsic::stackrestore: 6395 Res = getValue(I.getArgOperand(0)); 6396 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res)); 6397 return; 6398 case Intrinsic::get_dynamic_area_offset: { 6399 SDValue Op = getRoot(); 6400 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout()); 6401 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6402 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for 6403 // target. 6404 if (PtrTy.getSizeInBits() < ResTy.getSizeInBits()) 6405 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset" 6406 " intrinsic!"); 6407 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy), 6408 Op); 6409 DAG.setRoot(Op); 6410 setValue(&I, Res); 6411 return; 6412 } 6413 case Intrinsic::stackguard: { 6414 MachineFunction &MF = DAG.getMachineFunction(); 6415 const Module &M = *MF.getFunction().getParent(); 6416 SDValue Chain = getRoot(); 6417 if (TLI.useLoadStackGuardNode()) { 6418 Res = getLoadStackGuard(DAG, sdl, Chain); 6419 } else { 6420 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6421 const Value *Global = TLI.getSDagStackGuard(M); 6422 unsigned Align = DL->getPrefTypeAlignment(Global->getType()); 6423 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global), 6424 MachinePointerInfo(Global, 0), Align, 6425 MachineMemOperand::MOVolatile); 6426 } 6427 if (TLI.useStackGuardXorFP()) 6428 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl); 6429 DAG.setRoot(Chain); 6430 setValue(&I, Res); 6431 return; 6432 } 6433 case Intrinsic::stackprotector: { 6434 // Emit code into the DAG to store the stack guard onto the stack. 6435 MachineFunction &MF = DAG.getMachineFunction(); 6436 MachineFrameInfo &MFI = MF.getFrameInfo(); 6437 SDValue Src, Chain = getRoot(); 6438 6439 if (TLI.useLoadStackGuardNode()) 6440 Src = getLoadStackGuard(DAG, sdl, Chain); 6441 else 6442 Src = getValue(I.getArgOperand(0)); // The guard's value. 6443 6444 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1)); 6445 6446 int FI = FuncInfo.StaticAllocaMap[Slot]; 6447 MFI.setStackProtectorIndex(FI); 6448 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout()); 6449 6450 SDValue FIN = DAG.getFrameIndex(FI, PtrTy); 6451 6452 // Store the stack protector onto the stack. 6453 Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack( 6454 DAG.getMachineFunction(), FI), 6455 /* Alignment = */ 0, MachineMemOperand::MOVolatile); 6456 setValue(&I, Res); 6457 DAG.setRoot(Res); 6458 return; 6459 } 6460 case Intrinsic::objectsize: 6461 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 6462 6463 case Intrinsic::is_constant: 6464 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 6465 6466 case Intrinsic::annotation: 6467 case Intrinsic::ptr_annotation: 6468 case Intrinsic::launder_invariant_group: 6469 case Intrinsic::strip_invariant_group: 6470 // Drop the intrinsic, but forward the value 6471 setValue(&I, getValue(I.getOperand(0))); 6472 return; 6473 case Intrinsic::assume: 6474 case Intrinsic::var_annotation: 6475 case Intrinsic::sideeffect: 6476 // Discard annotate attributes, assumptions, and artificial side-effects. 6477 return; 6478 6479 case Intrinsic::codeview_annotation: { 6480 // Emit a label associated with this metadata. 6481 MachineFunction &MF = DAG.getMachineFunction(); 6482 MCSymbol *Label = 6483 MF.getMMI().getContext().createTempSymbol("annotation", true); 6484 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata(); 6485 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD)); 6486 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label); 6487 DAG.setRoot(Res); 6488 return; 6489 } 6490 6491 case Intrinsic::init_trampoline: { 6492 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts()); 6493 6494 SDValue Ops[6]; 6495 Ops[0] = getRoot(); 6496 Ops[1] = getValue(I.getArgOperand(0)); 6497 Ops[2] = getValue(I.getArgOperand(1)); 6498 Ops[3] = getValue(I.getArgOperand(2)); 6499 Ops[4] = DAG.getSrcValue(I.getArgOperand(0)); 6500 Ops[5] = DAG.getSrcValue(F); 6501 6502 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops); 6503 6504 DAG.setRoot(Res); 6505 return; 6506 } 6507 case Intrinsic::adjust_trampoline: 6508 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl, 6509 TLI.getPointerTy(DAG.getDataLayout()), 6510 getValue(I.getArgOperand(0)))); 6511 return; 6512 case Intrinsic::gcroot: { 6513 assert(DAG.getMachineFunction().getFunction().hasGC() && 6514 "only valid in functions with gc specified, enforced by Verifier"); 6515 assert(GFI && "implied by previous"); 6516 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); 6517 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1)); 6518 6519 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); 6520 GFI->addStackRoot(FI->getIndex(), TypeMap); 6521 return; 6522 } 6523 case Intrinsic::gcread: 6524 case Intrinsic::gcwrite: 6525 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!"); 6526 case Intrinsic::flt_rounds: 6527 Res = DAG.getNode(ISD::FLT_ROUNDS_, sdl, {MVT::i32, MVT::Other}, getRoot()); 6528 setValue(&I, Res); 6529 DAG.setRoot(Res.getValue(1)); 6530 return; 6531 6532 case Intrinsic::expect: 6533 // Just replace __builtin_expect(exp, c) with EXP. 6534 setValue(&I, getValue(I.getArgOperand(0))); 6535 return; 6536 6537 case Intrinsic::debugtrap: 6538 case Intrinsic::trap: { 6539 StringRef TrapFuncName = 6540 I.getAttributes() 6541 .getAttribute(AttributeList::FunctionIndex, "trap-func-name") 6542 .getValueAsString(); 6543 if (TrapFuncName.empty()) { 6544 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ? 6545 ISD::TRAP : ISD::DEBUGTRAP; 6546 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot())); 6547 return; 6548 } 6549 TargetLowering::ArgListTy Args; 6550 6551 TargetLowering::CallLoweringInfo CLI(DAG); 6552 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( 6553 CallingConv::C, I.getType(), 6554 DAG.getExternalSymbol(TrapFuncName.data(), 6555 TLI.getPointerTy(DAG.getDataLayout())), 6556 std::move(Args)); 6557 6558 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 6559 DAG.setRoot(Result.second); 6560 return; 6561 } 6562 6563 case Intrinsic::uadd_with_overflow: 6564 case Intrinsic::sadd_with_overflow: 6565 case Intrinsic::usub_with_overflow: 6566 case Intrinsic::ssub_with_overflow: 6567 case Intrinsic::umul_with_overflow: 6568 case Intrinsic::smul_with_overflow: { 6569 ISD::NodeType Op; 6570 switch (Intrinsic) { 6571 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 6572 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break; 6573 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break; 6574 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break; 6575 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break; 6576 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break; 6577 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break; 6578 } 6579 SDValue Op1 = getValue(I.getArgOperand(0)); 6580 SDValue Op2 = getValue(I.getArgOperand(1)); 6581 6582 EVT ResultVT = Op1.getValueType(); 6583 EVT OverflowVT = MVT::i1; 6584 if (ResultVT.isVector()) 6585 OverflowVT = EVT::getVectorVT( 6586 *Context, OverflowVT, ResultVT.getVectorNumElements()); 6587 6588 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT); 6589 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2)); 6590 return; 6591 } 6592 case Intrinsic::prefetch: { 6593 SDValue Ops[5]; 6594 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); 6595 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore; 6596 Ops[0] = DAG.getRoot(); 6597 Ops[1] = getValue(I.getArgOperand(0)); 6598 Ops[2] = getValue(I.getArgOperand(1)); 6599 Ops[3] = getValue(I.getArgOperand(2)); 6600 Ops[4] = getValue(I.getArgOperand(3)); 6601 SDValue Result = DAG.getMemIntrinsicNode( 6602 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops, 6603 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)), 6604 /* align */ None, Flags); 6605 6606 // Chain the prefetch in parallell with any pending loads, to stay out of 6607 // the way of later optimizations. 6608 PendingLoads.push_back(Result); 6609 Result = getRoot(); 6610 DAG.setRoot(Result); 6611 return; 6612 } 6613 case Intrinsic::lifetime_start: 6614 case Intrinsic::lifetime_end: { 6615 bool IsStart = (Intrinsic == Intrinsic::lifetime_start); 6616 // Stack coloring is not enabled in O0, discard region information. 6617 if (TM.getOptLevel() == CodeGenOpt::None) 6618 return; 6619 6620 const int64_t ObjectSize = 6621 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue(); 6622 Value *const ObjectPtr = I.getArgOperand(1); 6623 SmallVector<const Value *, 4> Allocas; 6624 GetUnderlyingObjects(ObjectPtr, Allocas, *DL); 6625 6626 for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(), 6627 E = Allocas.end(); Object != E; ++Object) { 6628 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object); 6629 6630 // Could not find an Alloca. 6631 if (!LifetimeObject) 6632 continue; 6633 6634 // First check that the Alloca is static, otherwise it won't have a 6635 // valid frame index. 6636 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject); 6637 if (SI == FuncInfo.StaticAllocaMap.end()) 6638 return; 6639 6640 const int FrameIndex = SI->second; 6641 int64_t Offset; 6642 if (GetPointerBaseWithConstantOffset( 6643 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject) 6644 Offset = -1; // Cannot determine offset from alloca to lifetime object. 6645 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize, 6646 Offset); 6647 DAG.setRoot(Res); 6648 } 6649 return; 6650 } 6651 case Intrinsic::invariant_start: 6652 // Discard region information. 6653 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout()))); 6654 return; 6655 case Intrinsic::invariant_end: 6656 // Discard region information. 6657 return; 6658 case Intrinsic::clear_cache: 6659 /// FunctionName may be null. 6660 if (const char *FunctionName = TLI.getClearCacheBuiltinName()) 6661 lowerCallToExternalSymbol(I, FunctionName); 6662 return; 6663 case Intrinsic::donothing: 6664 // ignore 6665 return; 6666 case Intrinsic::experimental_stackmap: 6667 visitStackmap(I); 6668 return; 6669 case Intrinsic::experimental_patchpoint_void: 6670 case Intrinsic::experimental_patchpoint_i64: 6671 visitPatchpoint(I); 6672 return; 6673 case Intrinsic::experimental_gc_statepoint: 6674 LowerStatepoint(cast<GCStatepointInst>(I)); 6675 return; 6676 case Intrinsic::experimental_gc_result: 6677 visitGCResult(cast<GCResultInst>(I)); 6678 return; 6679 case Intrinsic::experimental_gc_relocate: 6680 visitGCRelocate(cast<GCRelocateInst>(I)); 6681 return; 6682 case Intrinsic::instrprof_increment: 6683 llvm_unreachable("instrprof failed to lower an increment"); 6684 case Intrinsic::instrprof_value_profile: 6685 llvm_unreachable("instrprof failed to lower a value profiling call"); 6686 case Intrinsic::localescape: { 6687 MachineFunction &MF = DAG.getMachineFunction(); 6688 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 6689 6690 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission 6691 // is the same on all targets. 6692 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) { 6693 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts(); 6694 if (isa<ConstantPointerNull>(Arg)) 6695 continue; // Skip null pointers. They represent a hole in index space. 6696 AllocaInst *Slot = cast<AllocaInst>(Arg); 6697 assert(FuncInfo.StaticAllocaMap.count(Slot) && 6698 "can only escape static allocas"); 6699 int FI = FuncInfo.StaticAllocaMap[Slot]; 6700 MCSymbol *FrameAllocSym = 6701 MF.getMMI().getContext().getOrCreateFrameAllocSymbol( 6702 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx); 6703 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl, 6704 TII->get(TargetOpcode::LOCAL_ESCAPE)) 6705 .addSym(FrameAllocSym) 6706 .addFrameIndex(FI); 6707 } 6708 6709 return; 6710 } 6711 6712 case Intrinsic::localrecover: { 6713 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx) 6714 MachineFunction &MF = DAG.getMachineFunction(); 6715 6716 // Get the symbol that defines the frame offset. 6717 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts()); 6718 auto *Idx = cast<ConstantInt>(I.getArgOperand(2)); 6719 unsigned IdxVal = 6720 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max())); 6721 MCSymbol *FrameAllocSym = 6722 MF.getMMI().getContext().getOrCreateFrameAllocSymbol( 6723 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal); 6724 6725 Value *FP = I.getArgOperand(1); 6726 SDValue FPVal = getValue(FP); 6727 EVT PtrVT = FPVal.getValueType(); 6728 6729 // Create a MCSymbol for the label to avoid any target lowering 6730 // that would make this PC relative. 6731 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT); 6732 SDValue OffsetVal = 6733 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym); 6734 6735 // Add the offset to the FP. 6736 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl); 6737 setValue(&I, Add); 6738 6739 return; 6740 } 6741 6742 case Intrinsic::eh_exceptionpointer: 6743 case Intrinsic::eh_exceptioncode: { 6744 // Get the exception pointer vreg, copy from it, and resize it to fit. 6745 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0)); 6746 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 6747 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT); 6748 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC); 6749 SDValue N = 6750 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT); 6751 if (Intrinsic == Intrinsic::eh_exceptioncode) 6752 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32); 6753 setValue(&I, N); 6754 return; 6755 } 6756 case Intrinsic::xray_customevent: { 6757 // Here we want to make sure that the intrinsic behaves as if it has a 6758 // specific calling convention, and only for x86_64. 6759 // FIXME: Support other platforms later. 6760 const auto &Triple = DAG.getTarget().getTargetTriple(); 6761 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 6762 return; 6763 6764 SDLoc DL = getCurSDLoc(); 6765 SmallVector<SDValue, 8> Ops; 6766 6767 // We want to say that we always want the arguments in registers. 6768 SDValue LogEntryVal = getValue(I.getArgOperand(0)); 6769 SDValue StrSizeVal = getValue(I.getArgOperand(1)); 6770 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6771 SDValue Chain = getRoot(); 6772 Ops.push_back(LogEntryVal); 6773 Ops.push_back(StrSizeVal); 6774 Ops.push_back(Chain); 6775 6776 // We need to enforce the calling convention for the callsite, so that 6777 // argument ordering is enforced correctly, and that register allocation can 6778 // see that some registers may be assumed clobbered and have to preserve 6779 // them across calls to the intrinsic. 6780 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL, 6781 DL, NodeTys, Ops); 6782 SDValue patchableNode = SDValue(MN, 0); 6783 DAG.setRoot(patchableNode); 6784 setValue(&I, patchableNode); 6785 return; 6786 } 6787 case Intrinsic::xray_typedevent: { 6788 // Here we want to make sure that the intrinsic behaves as if it has a 6789 // specific calling convention, and only for x86_64. 6790 // FIXME: Support other platforms later. 6791 const auto &Triple = DAG.getTarget().getTargetTriple(); 6792 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 6793 return; 6794 6795 SDLoc DL = getCurSDLoc(); 6796 SmallVector<SDValue, 8> Ops; 6797 6798 // We want to say that we always want the arguments in registers. 6799 // It's unclear to me how manipulating the selection DAG here forces callers 6800 // to provide arguments in registers instead of on the stack. 6801 SDValue LogTypeId = getValue(I.getArgOperand(0)); 6802 SDValue LogEntryVal = getValue(I.getArgOperand(1)); 6803 SDValue StrSizeVal = getValue(I.getArgOperand(2)); 6804 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6805 SDValue Chain = getRoot(); 6806 Ops.push_back(LogTypeId); 6807 Ops.push_back(LogEntryVal); 6808 Ops.push_back(StrSizeVal); 6809 Ops.push_back(Chain); 6810 6811 // We need to enforce the calling convention for the callsite, so that 6812 // argument ordering is enforced correctly, and that register allocation can 6813 // see that some registers may be assumed clobbered and have to preserve 6814 // them across calls to the intrinsic. 6815 MachineSDNode *MN = DAG.getMachineNode( 6816 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops); 6817 SDValue patchableNode = SDValue(MN, 0); 6818 DAG.setRoot(patchableNode); 6819 setValue(&I, patchableNode); 6820 return; 6821 } 6822 case Intrinsic::experimental_deoptimize: 6823 LowerDeoptimizeCall(&I); 6824 return; 6825 6826 case Intrinsic::experimental_vector_reduce_v2_fadd: 6827 case Intrinsic::experimental_vector_reduce_v2_fmul: 6828 case Intrinsic::experimental_vector_reduce_add: 6829 case Intrinsic::experimental_vector_reduce_mul: 6830 case Intrinsic::experimental_vector_reduce_and: 6831 case Intrinsic::experimental_vector_reduce_or: 6832 case Intrinsic::experimental_vector_reduce_xor: 6833 case Intrinsic::experimental_vector_reduce_smax: 6834 case Intrinsic::experimental_vector_reduce_smin: 6835 case Intrinsic::experimental_vector_reduce_umax: 6836 case Intrinsic::experimental_vector_reduce_umin: 6837 case Intrinsic::experimental_vector_reduce_fmax: 6838 case Intrinsic::experimental_vector_reduce_fmin: 6839 visitVectorReduce(I, Intrinsic); 6840 return; 6841 6842 case Intrinsic::icall_branch_funnel: { 6843 SmallVector<SDValue, 16> Ops; 6844 Ops.push_back(getValue(I.getArgOperand(0))); 6845 6846 int64_t Offset; 6847 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset( 6848 I.getArgOperand(1), Offset, DAG.getDataLayout())); 6849 if (!Base) 6850 report_fatal_error( 6851 "llvm.icall.branch.funnel operand must be a GlobalValue"); 6852 Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0)); 6853 6854 struct BranchFunnelTarget { 6855 int64_t Offset; 6856 SDValue Target; 6857 }; 6858 SmallVector<BranchFunnelTarget, 8> Targets; 6859 6860 for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) { 6861 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset( 6862 I.getArgOperand(Op), Offset, DAG.getDataLayout())); 6863 if (ElemBase != Base) 6864 report_fatal_error("all llvm.icall.branch.funnel operands must refer " 6865 "to the same GlobalValue"); 6866 6867 SDValue Val = getValue(I.getArgOperand(Op + 1)); 6868 auto *GA = dyn_cast<GlobalAddressSDNode>(Val); 6869 if (!GA) 6870 report_fatal_error( 6871 "llvm.icall.branch.funnel operand must be a GlobalValue"); 6872 Targets.push_back({Offset, DAG.getTargetGlobalAddress( 6873 GA->getGlobal(), getCurSDLoc(), 6874 Val.getValueType(), GA->getOffset())}); 6875 } 6876 llvm::sort(Targets, 6877 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) { 6878 return T1.Offset < T2.Offset; 6879 }); 6880 6881 for (auto &T : Targets) { 6882 Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32)); 6883 Ops.push_back(T.Target); 6884 } 6885 6886 Ops.push_back(DAG.getRoot()); // Chain 6887 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, 6888 getCurSDLoc(), MVT::Other, Ops), 6889 0); 6890 DAG.setRoot(N); 6891 setValue(&I, N); 6892 HasTailCall = true; 6893 return; 6894 } 6895 6896 case Intrinsic::wasm_landingpad_index: 6897 // Information this intrinsic contained has been transferred to 6898 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely 6899 // delete it now. 6900 return; 6901 6902 case Intrinsic::aarch64_settag: 6903 case Intrinsic::aarch64_settag_zero: { 6904 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 6905 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero; 6906 SDValue Val = TSI.EmitTargetCodeForSetTag( 6907 DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)), 6908 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)), 6909 ZeroMemory); 6910 DAG.setRoot(Val); 6911 setValue(&I, Val); 6912 return; 6913 } 6914 case Intrinsic::ptrmask: { 6915 SDValue Ptr = getValue(I.getOperand(0)); 6916 SDValue Const = getValue(I.getOperand(1)); 6917 6918 EVT PtrVT = Ptr.getValueType(); 6919 setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), PtrVT, Ptr, 6920 DAG.getZExtOrTrunc(Const, getCurSDLoc(), PtrVT))); 6921 return; 6922 } 6923 case Intrinsic::get_active_lane_mask: { 6924 auto DL = getCurSDLoc(); 6925 SDValue Index = getValue(I.getOperand(0)); 6926 SDValue BTC = getValue(I.getOperand(1)); 6927 Type *ElementTy = I.getOperand(0)->getType(); 6928 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6929 unsigned VecWidth = VT.getVectorNumElements(); 6930 6931 SmallVector<SDValue, 16> OpsBTC; 6932 SmallVector<SDValue, 16> OpsIndex; 6933 SmallVector<SDValue, 16> OpsStepConstants; 6934 for (unsigned i = 0; i < VecWidth; i++) { 6935 OpsBTC.push_back(BTC); 6936 OpsIndex.push_back(Index); 6937 OpsStepConstants.push_back(DAG.getConstant(i, DL, MVT::getVT(ElementTy))); 6938 } 6939 6940 EVT CCVT = MVT::i1; 6941 CCVT = EVT::getVectorVT(I.getContext(), CCVT, VecWidth); 6942 6943 auto VecTy = MVT::getVT(FixedVectorType::get(ElementTy, VecWidth)); 6944 SDValue VectorIndex = DAG.getBuildVector(VecTy, DL, OpsIndex); 6945 SDValue VectorStep = DAG.getBuildVector(VecTy, DL, OpsStepConstants); 6946 SDValue VectorInduction = DAG.getNode( 6947 ISD::UADDO, DL, DAG.getVTList(VecTy, CCVT), VectorIndex, VectorStep); 6948 SDValue VectorBTC = DAG.getBuildVector(VecTy, DL, OpsBTC); 6949 SDValue SetCC = DAG.getSetCC(DL, CCVT, VectorInduction.getValue(0), 6950 VectorBTC, ISD::CondCode::SETULE); 6951 setValue(&I, DAG.getNode(ISD::AND, DL, CCVT, 6952 DAG.getNOT(DL, VectorInduction.getValue(1), CCVT), 6953 SetCC)); 6954 return; 6955 } 6956 } 6957 } 6958 6959 void SelectionDAGBuilder::visitConstrainedFPIntrinsic( 6960 const ConstrainedFPIntrinsic &FPI) { 6961 SDLoc sdl = getCurSDLoc(); 6962 6963 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6964 SmallVector<EVT, 4> ValueVTs; 6965 ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs); 6966 ValueVTs.push_back(MVT::Other); // Out chain 6967 6968 // We do not need to serialize constrained FP intrinsics against 6969 // each other or against (nonvolatile) loads, so they can be 6970 // chained like loads. 6971 SDValue Chain = DAG.getRoot(); 6972 SmallVector<SDValue, 4> Opers; 6973 Opers.push_back(Chain); 6974 if (FPI.isUnaryOp()) { 6975 Opers.push_back(getValue(FPI.getArgOperand(0))); 6976 } else if (FPI.isTernaryOp()) { 6977 Opers.push_back(getValue(FPI.getArgOperand(0))); 6978 Opers.push_back(getValue(FPI.getArgOperand(1))); 6979 Opers.push_back(getValue(FPI.getArgOperand(2))); 6980 } else { 6981 Opers.push_back(getValue(FPI.getArgOperand(0))); 6982 Opers.push_back(getValue(FPI.getArgOperand(1))); 6983 } 6984 6985 auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) { 6986 assert(Result.getNode()->getNumValues() == 2); 6987 6988 // Push node to the appropriate list so that future instructions can be 6989 // chained up correctly. 6990 SDValue OutChain = Result.getValue(1); 6991 switch (EB) { 6992 case fp::ExceptionBehavior::ebIgnore: 6993 // The only reason why ebIgnore nodes still need to be chained is that 6994 // they might depend on the current rounding mode, and therefore must 6995 // not be moved across instruction that may change that mode. 6996 LLVM_FALLTHROUGH; 6997 case fp::ExceptionBehavior::ebMayTrap: 6998 // These must not be moved across calls or instructions that may change 6999 // floating-point exception masks. 7000 PendingConstrainedFP.push_back(OutChain); 7001 break; 7002 case fp::ExceptionBehavior::ebStrict: 7003 // These must not be moved across calls or instructions that may change 7004 // floating-point exception masks or read floating-point exception flags. 7005 // In addition, they cannot be optimized out even if unused. 7006 PendingConstrainedFPStrict.push_back(OutChain); 7007 break; 7008 } 7009 }; 7010 7011 SDVTList VTs = DAG.getVTList(ValueVTs); 7012 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue(); 7013 7014 SDNodeFlags Flags; 7015 if (EB == fp::ExceptionBehavior::ebIgnore) 7016 Flags.setNoFPExcept(true); 7017 7018 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI)) 7019 Flags.copyFMF(*FPOp); 7020 7021 unsigned Opcode; 7022 switch (FPI.getIntrinsicID()) { 7023 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 7024 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7025 case Intrinsic::INTRINSIC: \ 7026 Opcode = ISD::STRICT_##DAGN; \ 7027 break; 7028 #include "llvm/IR/ConstrainedOps.def" 7029 case Intrinsic::experimental_constrained_fmuladd: { 7030 Opcode = ISD::STRICT_FMA; 7031 // Break fmuladd into fmul and fadd. 7032 if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict || 7033 !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), 7034 ValueVTs[0])) { 7035 Opers.pop_back(); 7036 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags); 7037 pushOutChain(Mul, EB); 7038 Opcode = ISD::STRICT_FADD; 7039 Opers.clear(); 7040 Opers.push_back(Mul.getValue(1)); 7041 Opers.push_back(Mul.getValue(0)); 7042 Opers.push_back(getValue(FPI.getArgOperand(2))); 7043 } 7044 break; 7045 } 7046 } 7047 7048 // A few strict DAG nodes carry additional operands that are not 7049 // set up by the default code above. 7050 switch (Opcode) { 7051 default: break; 7052 case ISD::STRICT_FP_ROUND: 7053 Opers.push_back( 7054 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()))); 7055 break; 7056 case ISD::STRICT_FSETCC: 7057 case ISD::STRICT_FSETCCS: { 7058 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI); 7059 Opers.push_back(DAG.getCondCode(getFCmpCondCode(FPCmp->getPredicate()))); 7060 break; 7061 } 7062 } 7063 7064 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags); 7065 pushOutChain(Result, EB); 7066 7067 SDValue FPResult = Result.getValue(0); 7068 setValue(&FPI, FPResult); 7069 } 7070 7071 std::pair<SDValue, SDValue> 7072 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, 7073 const BasicBlock *EHPadBB) { 7074 MachineFunction &MF = DAG.getMachineFunction(); 7075 MachineModuleInfo &MMI = MF.getMMI(); 7076 MCSymbol *BeginLabel = nullptr; 7077 7078 if (EHPadBB) { 7079 // Insert a label before the invoke call to mark the try range. This can be 7080 // used to detect deletion of the invoke via the MachineModuleInfo. 7081 BeginLabel = MMI.getContext().createTempSymbol(); 7082 7083 // For SjLj, keep track of which landing pads go with which invokes 7084 // so as to maintain the ordering of pads in the LSDA. 7085 unsigned CallSiteIndex = MMI.getCurrentCallSite(); 7086 if (CallSiteIndex) { 7087 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); 7088 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex); 7089 7090 // Now that the call site is handled, stop tracking it. 7091 MMI.setCurrentCallSite(0); 7092 } 7093 7094 // Both PendingLoads and PendingExports must be flushed here; 7095 // this call might not return. 7096 (void)getRoot(); 7097 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel)); 7098 7099 CLI.setChain(getRoot()); 7100 } 7101 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7102 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 7103 7104 assert((CLI.IsTailCall || Result.second.getNode()) && 7105 "Non-null chain expected with non-tail call!"); 7106 assert((Result.second.getNode() || !Result.first.getNode()) && 7107 "Null value expected with tail call!"); 7108 7109 if (!Result.second.getNode()) { 7110 // As a special case, a null chain means that a tail call has been emitted 7111 // and the DAG root is already updated. 7112 HasTailCall = true; 7113 7114 // Since there's no actual continuation from this block, nothing can be 7115 // relying on us setting vregs for them. 7116 PendingExports.clear(); 7117 } else { 7118 DAG.setRoot(Result.second); 7119 } 7120 7121 if (EHPadBB) { 7122 // Insert a label at the end of the invoke call to mark the try range. This 7123 // can be used to detect deletion of the invoke via the MachineModuleInfo. 7124 MCSymbol *EndLabel = MMI.getContext().createTempSymbol(); 7125 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel)); 7126 7127 // Inform MachineModuleInfo of range. 7128 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 7129 // There is a platform (e.g. wasm) that uses funclet style IR but does not 7130 // actually use outlined funclets and their LSDA info style. 7131 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) { 7132 assert(CLI.CB); 7133 WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo(); 7134 EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CB), BeginLabel, EndLabel); 7135 } else if (!isScopedEHPersonality(Pers)) { 7136 MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel); 7137 } 7138 } 7139 7140 return Result; 7141 } 7142 7143 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, 7144 bool isTailCall, 7145 const BasicBlock *EHPadBB) { 7146 auto &DL = DAG.getDataLayout(); 7147 FunctionType *FTy = CB.getFunctionType(); 7148 Type *RetTy = CB.getType(); 7149 7150 TargetLowering::ArgListTy Args; 7151 Args.reserve(CB.arg_size()); 7152 7153 const Value *SwiftErrorVal = nullptr; 7154 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7155 7156 if (isTailCall) { 7157 // Avoid emitting tail calls in functions with the disable-tail-calls 7158 // attribute. 7159 auto *Caller = CB.getParent()->getParent(); 7160 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() == 7161 "true") 7162 isTailCall = false; 7163 7164 // We can't tail call inside a function with a swifterror argument. Lowering 7165 // does not support this yet. It would have to move into the swifterror 7166 // register before the call. 7167 if (TLI.supportSwiftError() && 7168 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 7169 isTailCall = false; 7170 } 7171 7172 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) { 7173 TargetLowering::ArgListEntry Entry; 7174 const Value *V = *I; 7175 7176 // Skip empty types 7177 if (V->getType()->isEmptyTy()) 7178 continue; 7179 7180 SDValue ArgNode = getValue(V); 7181 Entry.Node = ArgNode; Entry.Ty = V->getType(); 7182 7183 Entry.setAttributes(&CB, I - CB.arg_begin()); 7184 7185 // Use swifterror virtual register as input to the call. 7186 if (Entry.IsSwiftError && TLI.supportSwiftError()) { 7187 SwiftErrorVal = V; 7188 // We find the virtual register for the actual swifterror argument. 7189 // Instead of using the Value, we use the virtual register instead. 7190 Entry.Node = 7191 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V), 7192 EVT(TLI.getPointerTy(DL))); 7193 } 7194 7195 Args.push_back(Entry); 7196 7197 // If we have an explicit sret argument that is an Instruction, (i.e., it 7198 // might point to function-local memory), we can't meaningfully tail-call. 7199 if (Entry.IsSRet && isa<Instruction>(V)) 7200 isTailCall = false; 7201 } 7202 7203 // If call site has a cfguardtarget operand bundle, create and add an 7204 // additional ArgListEntry. 7205 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) { 7206 TargetLowering::ArgListEntry Entry; 7207 Value *V = Bundle->Inputs[0]; 7208 SDValue ArgNode = getValue(V); 7209 Entry.Node = ArgNode; 7210 Entry.Ty = V->getType(); 7211 Entry.IsCFGuardTarget = true; 7212 Args.push_back(Entry); 7213 } 7214 7215 // Check if target-independent constraints permit a tail call here. 7216 // Target-dependent constraints are checked within TLI->LowerCallTo. 7217 if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget())) 7218 isTailCall = false; 7219 7220 // Disable tail calls if there is an swifterror argument. Targets have not 7221 // been updated to support tail calls. 7222 if (TLI.supportSwiftError() && SwiftErrorVal) 7223 isTailCall = false; 7224 7225 TargetLowering::CallLoweringInfo CLI(DAG); 7226 CLI.setDebugLoc(getCurSDLoc()) 7227 .setChain(getRoot()) 7228 .setCallee(RetTy, FTy, Callee, std::move(Args), CB) 7229 .setTailCall(isTailCall) 7230 .setConvergent(CB.isConvergent()) 7231 .setIsPreallocated( 7232 CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0); 7233 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); 7234 7235 if (Result.first.getNode()) { 7236 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first); 7237 setValue(&CB, Result.first); 7238 } 7239 7240 // The last element of CLI.InVals has the SDValue for swifterror return. 7241 // Here we copy it to a virtual register and update SwiftErrorMap for 7242 // book-keeping. 7243 if (SwiftErrorVal && TLI.supportSwiftError()) { 7244 // Get the last element of InVals. 7245 SDValue Src = CLI.InVals.back(); 7246 Register VReg = 7247 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal); 7248 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src); 7249 DAG.setRoot(CopyNode); 7250 } 7251 } 7252 7253 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, 7254 SelectionDAGBuilder &Builder) { 7255 // Check to see if this load can be trivially constant folded, e.g. if the 7256 // input is from a string literal. 7257 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) { 7258 // Cast pointer to the type we really want to load. 7259 Type *LoadTy = 7260 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits()); 7261 if (LoadVT.isVector()) 7262 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements()); 7263 7264 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput), 7265 PointerType::getUnqual(LoadTy)); 7266 7267 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr( 7268 const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL)) 7269 return Builder.getValue(LoadCst); 7270 } 7271 7272 // Otherwise, we have to emit the load. If the pointer is to unfoldable but 7273 // still constant memory, the input chain can be the entry node. 7274 SDValue Root; 7275 bool ConstantMemory = false; 7276 7277 // Do not serialize (non-volatile) loads of constant memory with anything. 7278 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) { 7279 Root = Builder.DAG.getEntryNode(); 7280 ConstantMemory = true; 7281 } else { 7282 // Do not serialize non-volatile loads against each other. 7283 Root = Builder.DAG.getRoot(); 7284 } 7285 7286 SDValue Ptr = Builder.getValue(PtrVal); 7287 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, 7288 Ptr, MachinePointerInfo(PtrVal), 7289 /* Alignment = */ 1); 7290 7291 if (!ConstantMemory) 7292 Builder.PendingLoads.push_back(LoadVal.getValue(1)); 7293 return LoadVal; 7294 } 7295 7296 /// Record the value for an instruction that produces an integer result, 7297 /// converting the type where necessary. 7298 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I, 7299 SDValue Value, 7300 bool IsSigned) { 7301 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 7302 I.getType(), true); 7303 if (IsSigned) 7304 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT); 7305 else 7306 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT); 7307 setValue(&I, Value); 7308 } 7309 7310 /// See if we can lower a memcmp call into an optimized form. If so, return 7311 /// true and lower it. Otherwise return false, and it will be lowered like a 7312 /// normal call. 7313 /// The caller already checked that \p I calls the appropriate LibFunc with a 7314 /// correct prototype. 7315 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) { 7316 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1); 7317 const Value *Size = I.getArgOperand(2); 7318 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size); 7319 if (CSize && CSize->getZExtValue() == 0) { 7320 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 7321 I.getType(), true); 7322 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT)); 7323 return true; 7324 } 7325 7326 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 7327 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp( 7328 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS), 7329 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS)); 7330 if (Res.first.getNode()) { 7331 processIntegerCallValue(I, Res.first, true); 7332 PendingLoads.push_back(Res.second); 7333 return true; 7334 } 7335 7336 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 7337 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 7338 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I)) 7339 return false; 7340 7341 // If the target has a fast compare for the given size, it will return a 7342 // preferred load type for that size. Require that the load VT is legal and 7343 // that the target supports unaligned loads of that type. Otherwise, return 7344 // INVALID. 7345 auto hasFastLoadsAndCompare = [&](unsigned NumBits) { 7346 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7347 MVT LVT = TLI.hasFastEqualityCompare(NumBits); 7348 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) { 7349 // TODO: Handle 5 byte compare as 4-byte + 1 byte. 7350 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. 7351 // TODO: Check alignment of src and dest ptrs. 7352 unsigned DstAS = LHS->getType()->getPointerAddressSpace(); 7353 unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); 7354 if (!TLI.isTypeLegal(LVT) || 7355 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) || 7356 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS)) 7357 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE; 7358 } 7359 7360 return LVT; 7361 }; 7362 7363 // This turns into unaligned loads. We only do this if the target natively 7364 // supports the MVT we'll be loading or if it is small enough (<= 4) that 7365 // we'll only produce a small number of byte loads. 7366 MVT LoadVT; 7367 unsigned NumBitsToCompare = CSize->getZExtValue() * 8; 7368 switch (NumBitsToCompare) { 7369 default: 7370 return false; 7371 case 16: 7372 LoadVT = MVT::i16; 7373 break; 7374 case 32: 7375 LoadVT = MVT::i32; 7376 break; 7377 case 64: 7378 case 128: 7379 case 256: 7380 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare); 7381 break; 7382 } 7383 7384 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE) 7385 return false; 7386 7387 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this); 7388 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this); 7389 7390 // Bitcast to a wide integer type if the loads are vectors. 7391 if (LoadVT.isVector()) { 7392 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits()); 7393 LoadL = DAG.getBitcast(CmpVT, LoadL); 7394 LoadR = DAG.getBitcast(CmpVT, LoadR); 7395 } 7396 7397 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE); 7398 processIntegerCallValue(I, Cmp, false); 7399 return true; 7400 } 7401 7402 /// See if we can lower a memchr call into an optimized form. If so, return 7403 /// true and lower it. Otherwise return false, and it will be lowered like a 7404 /// normal call. 7405 /// The caller already checked that \p I calls the appropriate LibFunc with a 7406 /// correct prototype. 7407 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) { 7408 const Value *Src = I.getArgOperand(0); 7409 const Value *Char = I.getArgOperand(1); 7410 const Value *Length = I.getArgOperand(2); 7411 7412 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 7413 std::pair<SDValue, SDValue> Res = 7414 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(), 7415 getValue(Src), getValue(Char), getValue(Length), 7416 MachinePointerInfo(Src)); 7417 if (Res.first.getNode()) { 7418 setValue(&I, Res.first); 7419 PendingLoads.push_back(Res.second); 7420 return true; 7421 } 7422 7423 return false; 7424 } 7425 7426 /// See if we can lower a mempcpy call into an optimized form. If so, return 7427 /// true and lower it. Otherwise return false, and it will be lowered like a 7428 /// normal call. 7429 /// The caller already checked that \p I calls the appropriate LibFunc with a 7430 /// correct prototype. 7431 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) { 7432 SDValue Dst = getValue(I.getArgOperand(0)); 7433 SDValue Src = getValue(I.getArgOperand(1)); 7434 SDValue Size = getValue(I.getArgOperand(2)); 7435 7436 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne(); 7437 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne(); 7438 // DAG::getMemcpy needs Alignment to be defined. 7439 Align Alignment = std::min(DstAlign, SrcAlign); 7440 7441 bool isVol = false; 7442 SDLoc sdl = getCurSDLoc(); 7443 7444 // In the mempcpy context we need to pass in a false value for isTailCall 7445 // because the return pointer needs to be adjusted by the size of 7446 // the copied memory. 7447 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 7448 SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false, 7449 /*isTailCall=*/false, 7450 MachinePointerInfo(I.getArgOperand(0)), 7451 MachinePointerInfo(I.getArgOperand(1))); 7452 assert(MC.getNode() != nullptr && 7453 "** memcpy should not be lowered as TailCall in mempcpy context **"); 7454 DAG.setRoot(MC); 7455 7456 // Check if Size needs to be truncated or extended. 7457 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType()); 7458 7459 // Adjust return pointer to point just past the last dst byte. 7460 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(), 7461 Dst, Size); 7462 setValue(&I, DstPlusSize); 7463 return true; 7464 } 7465 7466 /// See if we can lower a strcpy call into an optimized form. If so, return 7467 /// true and lower it, otherwise return false and it will be lowered like a 7468 /// normal call. 7469 /// The caller already checked that \p I calls the appropriate LibFunc with a 7470 /// correct prototype. 7471 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) { 7472 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 7473 7474 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 7475 std::pair<SDValue, SDValue> Res = 7476 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(), 7477 getValue(Arg0), getValue(Arg1), 7478 MachinePointerInfo(Arg0), 7479 MachinePointerInfo(Arg1), isStpcpy); 7480 if (Res.first.getNode()) { 7481 setValue(&I, Res.first); 7482 DAG.setRoot(Res.second); 7483 return true; 7484 } 7485 7486 return false; 7487 } 7488 7489 /// See if we can lower a strcmp call into an optimized form. If so, return 7490 /// true and lower it, otherwise return false and it will be lowered like a 7491 /// normal call. 7492 /// The caller already checked that \p I calls the appropriate LibFunc with a 7493 /// correct prototype. 7494 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) { 7495 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 7496 7497 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 7498 std::pair<SDValue, SDValue> Res = 7499 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(), 7500 getValue(Arg0), getValue(Arg1), 7501 MachinePointerInfo(Arg0), 7502 MachinePointerInfo(Arg1)); 7503 if (Res.first.getNode()) { 7504 processIntegerCallValue(I, Res.first, true); 7505 PendingLoads.push_back(Res.second); 7506 return true; 7507 } 7508 7509 return false; 7510 } 7511 7512 /// See if we can lower a strlen call into an optimized form. If so, return 7513 /// true and lower it, otherwise return false and it will be lowered like a 7514 /// normal call. 7515 /// The caller already checked that \p I calls the appropriate LibFunc with a 7516 /// correct prototype. 7517 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) { 7518 const Value *Arg0 = I.getArgOperand(0); 7519 7520 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 7521 std::pair<SDValue, SDValue> Res = 7522 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(), 7523 getValue(Arg0), MachinePointerInfo(Arg0)); 7524 if (Res.first.getNode()) { 7525 processIntegerCallValue(I, Res.first, false); 7526 PendingLoads.push_back(Res.second); 7527 return true; 7528 } 7529 7530 return false; 7531 } 7532 7533 /// See if we can lower a strnlen call into an optimized form. If so, return 7534 /// true and lower it, otherwise return false and it will be lowered like a 7535 /// normal call. 7536 /// The caller already checked that \p I calls the appropriate LibFunc with a 7537 /// correct prototype. 7538 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) { 7539 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 7540 7541 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 7542 std::pair<SDValue, SDValue> Res = 7543 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(), 7544 getValue(Arg0), getValue(Arg1), 7545 MachinePointerInfo(Arg0)); 7546 if (Res.first.getNode()) { 7547 processIntegerCallValue(I, Res.first, false); 7548 PendingLoads.push_back(Res.second); 7549 return true; 7550 } 7551 7552 return false; 7553 } 7554 7555 /// See if we can lower a unary floating-point operation into an SDNode with 7556 /// the specified Opcode. If so, return true and lower it, otherwise return 7557 /// false and it will be lowered like a normal call. 7558 /// The caller already checked that \p I calls the appropriate LibFunc with a 7559 /// correct prototype. 7560 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I, 7561 unsigned Opcode) { 7562 // We already checked this call's prototype; verify it doesn't modify errno. 7563 if (!I.onlyReadsMemory()) 7564 return false; 7565 7566 SDValue Tmp = getValue(I.getArgOperand(0)); 7567 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp)); 7568 return true; 7569 } 7570 7571 /// See if we can lower a binary floating-point operation into an SDNode with 7572 /// the specified Opcode. If so, return true and lower it. Otherwise return 7573 /// false, and it will be lowered like a normal call. 7574 /// The caller already checked that \p I calls the appropriate LibFunc with a 7575 /// correct prototype. 7576 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I, 7577 unsigned Opcode) { 7578 // We already checked this call's prototype; verify it doesn't modify errno. 7579 if (!I.onlyReadsMemory()) 7580 return false; 7581 7582 SDValue Tmp0 = getValue(I.getArgOperand(0)); 7583 SDValue Tmp1 = getValue(I.getArgOperand(1)); 7584 EVT VT = Tmp0.getValueType(); 7585 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1)); 7586 return true; 7587 } 7588 7589 void SelectionDAGBuilder::visitCall(const CallInst &I) { 7590 // Handle inline assembly differently. 7591 if (I.isInlineAsm()) { 7592 visitInlineAsm(I); 7593 return; 7594 } 7595 7596 if (Function *F = I.getCalledFunction()) { 7597 if (F->isDeclaration()) { 7598 // Is this an LLVM intrinsic or a target-specific intrinsic? 7599 unsigned IID = F->getIntrinsicID(); 7600 if (!IID) 7601 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) 7602 IID = II->getIntrinsicID(F); 7603 7604 if (IID) { 7605 visitIntrinsicCall(I, IID); 7606 return; 7607 } 7608 } 7609 7610 // Check for well-known libc/libm calls. If the function is internal, it 7611 // can't be a library call. Don't do the check if marked as nobuiltin for 7612 // some reason or the call site requires strict floating point semantics. 7613 LibFunc Func; 7614 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() && 7615 F->hasName() && LibInfo->getLibFunc(*F, Func) && 7616 LibInfo->hasOptimizedCodeGen(Func)) { 7617 switch (Func) { 7618 default: break; 7619 case LibFunc_copysign: 7620 case LibFunc_copysignf: 7621 case LibFunc_copysignl: 7622 // We already checked this call's prototype; verify it doesn't modify 7623 // errno. 7624 if (I.onlyReadsMemory()) { 7625 SDValue LHS = getValue(I.getArgOperand(0)); 7626 SDValue RHS = getValue(I.getArgOperand(1)); 7627 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(), 7628 LHS.getValueType(), LHS, RHS)); 7629 return; 7630 } 7631 break; 7632 case LibFunc_fabs: 7633 case LibFunc_fabsf: 7634 case LibFunc_fabsl: 7635 if (visitUnaryFloatCall(I, ISD::FABS)) 7636 return; 7637 break; 7638 case LibFunc_fmin: 7639 case LibFunc_fminf: 7640 case LibFunc_fminl: 7641 if (visitBinaryFloatCall(I, ISD::FMINNUM)) 7642 return; 7643 break; 7644 case LibFunc_fmax: 7645 case LibFunc_fmaxf: 7646 case LibFunc_fmaxl: 7647 if (visitBinaryFloatCall(I, ISD::FMAXNUM)) 7648 return; 7649 break; 7650 case LibFunc_sin: 7651 case LibFunc_sinf: 7652 case LibFunc_sinl: 7653 if (visitUnaryFloatCall(I, ISD::FSIN)) 7654 return; 7655 break; 7656 case LibFunc_cos: 7657 case LibFunc_cosf: 7658 case LibFunc_cosl: 7659 if (visitUnaryFloatCall(I, ISD::FCOS)) 7660 return; 7661 break; 7662 case LibFunc_sqrt: 7663 case LibFunc_sqrtf: 7664 case LibFunc_sqrtl: 7665 case LibFunc_sqrt_finite: 7666 case LibFunc_sqrtf_finite: 7667 case LibFunc_sqrtl_finite: 7668 if (visitUnaryFloatCall(I, ISD::FSQRT)) 7669 return; 7670 break; 7671 case LibFunc_floor: 7672 case LibFunc_floorf: 7673 case LibFunc_floorl: 7674 if (visitUnaryFloatCall(I, ISD::FFLOOR)) 7675 return; 7676 break; 7677 case LibFunc_nearbyint: 7678 case LibFunc_nearbyintf: 7679 case LibFunc_nearbyintl: 7680 if (visitUnaryFloatCall(I, ISD::FNEARBYINT)) 7681 return; 7682 break; 7683 case LibFunc_ceil: 7684 case LibFunc_ceilf: 7685 case LibFunc_ceill: 7686 if (visitUnaryFloatCall(I, ISD::FCEIL)) 7687 return; 7688 break; 7689 case LibFunc_rint: 7690 case LibFunc_rintf: 7691 case LibFunc_rintl: 7692 if (visitUnaryFloatCall(I, ISD::FRINT)) 7693 return; 7694 break; 7695 case LibFunc_round: 7696 case LibFunc_roundf: 7697 case LibFunc_roundl: 7698 if (visitUnaryFloatCall(I, ISD::FROUND)) 7699 return; 7700 break; 7701 case LibFunc_trunc: 7702 case LibFunc_truncf: 7703 case LibFunc_truncl: 7704 if (visitUnaryFloatCall(I, ISD::FTRUNC)) 7705 return; 7706 break; 7707 case LibFunc_log2: 7708 case LibFunc_log2f: 7709 case LibFunc_log2l: 7710 if (visitUnaryFloatCall(I, ISD::FLOG2)) 7711 return; 7712 break; 7713 case LibFunc_exp2: 7714 case LibFunc_exp2f: 7715 case LibFunc_exp2l: 7716 if (visitUnaryFloatCall(I, ISD::FEXP2)) 7717 return; 7718 break; 7719 case LibFunc_memcmp: 7720 if (visitMemCmpCall(I)) 7721 return; 7722 break; 7723 case LibFunc_mempcpy: 7724 if (visitMemPCpyCall(I)) 7725 return; 7726 break; 7727 case LibFunc_memchr: 7728 if (visitMemChrCall(I)) 7729 return; 7730 break; 7731 case LibFunc_strcpy: 7732 if (visitStrCpyCall(I, false)) 7733 return; 7734 break; 7735 case LibFunc_stpcpy: 7736 if (visitStrCpyCall(I, true)) 7737 return; 7738 break; 7739 case LibFunc_strcmp: 7740 if (visitStrCmpCall(I)) 7741 return; 7742 break; 7743 case LibFunc_strlen: 7744 if (visitStrLenCall(I)) 7745 return; 7746 break; 7747 case LibFunc_strnlen: 7748 if (visitStrNLenCall(I)) 7749 return; 7750 break; 7751 } 7752 } 7753 } 7754 7755 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't 7756 // have to do anything here to lower funclet bundles. 7757 // CFGuardTarget bundles are lowered in LowerCallTo. 7758 assert(!I.hasOperandBundlesOtherThan( 7759 {LLVMContext::OB_deopt, LLVMContext::OB_funclet, 7760 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated}) && 7761 "Cannot lower calls with arbitrary operand bundles!"); 7762 7763 SDValue Callee = getValue(I.getCalledOperand()); 7764 7765 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 7766 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr); 7767 else 7768 // Check if we can potentially perform a tail call. More detailed checking 7769 // is be done within LowerCallTo, after more information about the call is 7770 // known. 7771 LowerCallTo(I, Callee, I.isTailCall()); 7772 } 7773 7774 namespace { 7775 7776 /// AsmOperandInfo - This contains information for each constraint that we are 7777 /// lowering. 7778 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { 7779 public: 7780 /// CallOperand - If this is the result output operand or a clobber 7781 /// this is null, otherwise it is the incoming operand to the CallInst. 7782 /// This gets modified as the asm is processed. 7783 SDValue CallOperand; 7784 7785 /// AssignedRegs - If this is a register or register class operand, this 7786 /// contains the set of register corresponding to the operand. 7787 RegsForValue AssignedRegs; 7788 7789 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info) 7790 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) { 7791 } 7792 7793 /// Whether or not this operand accesses memory 7794 bool hasMemory(const TargetLowering &TLI) const { 7795 // Indirect operand accesses access memory. 7796 if (isIndirect) 7797 return true; 7798 7799 for (const auto &Code : Codes) 7800 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory) 7801 return true; 7802 7803 return false; 7804 } 7805 7806 /// getCallOperandValEVT - Return the EVT of the Value* that this operand 7807 /// corresponds to. If there is no Value* for this operand, it returns 7808 /// MVT::Other. 7809 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI, 7810 const DataLayout &DL) const { 7811 if (!CallOperandVal) return MVT::Other; 7812 7813 if (isa<BasicBlock>(CallOperandVal)) 7814 return TLI.getProgramPointerTy(DL); 7815 7816 llvm::Type *OpTy = CallOperandVal->getType(); 7817 7818 // FIXME: code duplicated from TargetLowering::ParseConstraints(). 7819 // If this is an indirect operand, the operand is a pointer to the 7820 // accessed type. 7821 if (isIndirect) { 7822 PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 7823 if (!PtrTy) 7824 report_fatal_error("Indirect operand for inline asm not a pointer!"); 7825 OpTy = PtrTy->getElementType(); 7826 } 7827 7828 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 7829 if (StructType *STy = dyn_cast<StructType>(OpTy)) 7830 if (STy->getNumElements() == 1) 7831 OpTy = STy->getElementType(0); 7832 7833 // If OpTy is not a single value, it may be a struct/union that we 7834 // can tile with integers. 7835 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 7836 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 7837 switch (BitSize) { 7838 default: break; 7839 case 1: 7840 case 8: 7841 case 16: 7842 case 32: 7843 case 64: 7844 case 128: 7845 OpTy = IntegerType::get(Context, BitSize); 7846 break; 7847 } 7848 } 7849 7850 return TLI.getValueType(DL, OpTy, true); 7851 } 7852 }; 7853 7854 7855 } // end anonymous namespace 7856 7857 /// Make sure that the output operand \p OpInfo and its corresponding input 7858 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error 7859 /// out). 7860 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, 7861 SDISelAsmOperandInfo &MatchingOpInfo, 7862 SelectionDAG &DAG) { 7863 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT) 7864 return; 7865 7866 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); 7867 const auto &TLI = DAG.getTargetLoweringInfo(); 7868 7869 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 7870 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 7871 OpInfo.ConstraintVT); 7872 std::pair<unsigned, const TargetRegisterClass *> InputRC = 7873 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode, 7874 MatchingOpInfo.ConstraintVT); 7875 if ((OpInfo.ConstraintVT.isInteger() != 7876 MatchingOpInfo.ConstraintVT.isInteger()) || 7877 (MatchRC.second != InputRC.second)) { 7878 // FIXME: error out in a more elegant fashion 7879 report_fatal_error("Unsupported asm: input constraint" 7880 " with a matching output constraint of" 7881 " incompatible type!"); 7882 } 7883 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT; 7884 } 7885 7886 /// Get a direct memory input to behave well as an indirect operand. 7887 /// This may introduce stores, hence the need for a \p Chain. 7888 /// \return The (possibly updated) chain. 7889 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, 7890 SDISelAsmOperandInfo &OpInfo, 7891 SelectionDAG &DAG) { 7892 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7893 7894 // If we don't have an indirect input, put it in the constpool if we can, 7895 // otherwise spill it to a stack slot. 7896 // TODO: This isn't quite right. We need to handle these according to 7897 // the addressing mode that the constraint wants. Also, this may take 7898 // an additional register for the computation and we don't want that 7899 // either. 7900 7901 // If the operand is a float, integer, or vector constant, spill to a 7902 // constant pool entry to get its address. 7903 const Value *OpVal = OpInfo.CallOperandVal; 7904 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) || 7905 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) { 7906 OpInfo.CallOperand = DAG.getConstantPool( 7907 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout())); 7908 return Chain; 7909 } 7910 7911 // Otherwise, create a stack slot and emit a store to it before the asm. 7912 Type *Ty = OpVal->getType(); 7913 auto &DL = DAG.getDataLayout(); 7914 uint64_t TySize = DL.getTypeAllocSize(Ty); 7915 MachineFunction &MF = DAG.getMachineFunction(); 7916 int SSFI = MF.getFrameInfo().CreateStackObject( 7917 TySize, DL.getPrefTypeAlign(Ty), false); 7918 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL)); 7919 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot, 7920 MachinePointerInfo::getFixedStack(MF, SSFI), 7921 TLI.getMemValueType(DL, Ty)); 7922 OpInfo.CallOperand = StackSlot; 7923 7924 return Chain; 7925 } 7926 7927 /// GetRegistersForValue - Assign registers (virtual or physical) for the 7928 /// specified operand. We prefer to assign virtual registers, to allow the 7929 /// register allocator to handle the assignment process. However, if the asm 7930 /// uses features that we can't model on machineinstrs, we have SDISel do the 7931 /// allocation. This produces generally horrible, but correct, code. 7932 /// 7933 /// OpInfo describes the operand 7934 /// RefOpInfo describes the matching operand if any, the operand otherwise 7935 static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL, 7936 SDISelAsmOperandInfo &OpInfo, 7937 SDISelAsmOperandInfo &RefOpInfo) { 7938 LLVMContext &Context = *DAG.getContext(); 7939 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7940 7941 MachineFunction &MF = DAG.getMachineFunction(); 7942 SmallVector<unsigned, 4> Regs; 7943 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 7944 7945 // No work to do for memory operations. 7946 if (OpInfo.ConstraintType == TargetLowering::C_Memory) 7947 return; 7948 7949 // If this is a constraint for a single physreg, or a constraint for a 7950 // register class, find it. 7951 unsigned AssignedReg; 7952 const TargetRegisterClass *RC; 7953 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint( 7954 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT); 7955 // RC is unset only on failure. Return immediately. 7956 if (!RC) 7957 return; 7958 7959 // Get the actual register value type. This is important, because the user 7960 // may have asked for (e.g.) the AX register in i32 type. We need to 7961 // remember that AX is actually i16 to get the right extension. 7962 const MVT RegVT = *TRI.legalclasstypes_begin(*RC); 7963 7964 if (OpInfo.ConstraintVT != MVT::Other) { 7965 // If this is an FP operand in an integer register (or visa versa), or more 7966 // generally if the operand value disagrees with the register class we plan 7967 // to stick it in, fix the operand type. 7968 // 7969 // If this is an input value, the bitcast to the new type is done now. 7970 // Bitcast for output value is done at the end of visitInlineAsm(). 7971 if ((OpInfo.Type == InlineAsm::isOutput || 7972 OpInfo.Type == InlineAsm::isInput) && 7973 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) { 7974 // Try to convert to the first EVT that the reg class contains. If the 7975 // types are identical size, use a bitcast to convert (e.g. two differing 7976 // vector types). Note: output bitcast is done at the end of 7977 // visitInlineAsm(). 7978 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) { 7979 // Exclude indirect inputs while they are unsupported because the code 7980 // to perform the load is missing and thus OpInfo.CallOperand still 7981 // refers to the input address rather than the pointed-to value. 7982 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect) 7983 OpInfo.CallOperand = 7984 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand); 7985 OpInfo.ConstraintVT = RegVT; 7986 // If the operand is an FP value and we want it in integer registers, 7987 // use the corresponding integer type. This turns an f64 value into 7988 // i64, which can be passed with two i32 values on a 32-bit machine. 7989 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { 7990 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits()); 7991 if (OpInfo.Type == InlineAsm::isInput) 7992 OpInfo.CallOperand = 7993 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand); 7994 OpInfo.ConstraintVT = VT; 7995 } 7996 } 7997 } 7998 7999 // No need to allocate a matching input constraint since the constraint it's 8000 // matching to has already been allocated. 8001 if (OpInfo.isMatchingInputConstraint()) 8002 return; 8003 8004 EVT ValueVT = OpInfo.ConstraintVT; 8005 if (OpInfo.ConstraintVT == MVT::Other) 8006 ValueVT = RegVT; 8007 8008 // Initialize NumRegs. 8009 unsigned NumRegs = 1; 8010 if (OpInfo.ConstraintVT != MVT::Other) 8011 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); 8012 8013 // If this is a constraint for a specific physical register, like {r17}, 8014 // assign it now. 8015 8016 // If this associated to a specific register, initialize iterator to correct 8017 // place. If virtual, make sure we have enough registers 8018 8019 // Initialize iterator if necessary 8020 TargetRegisterClass::iterator I = RC->begin(); 8021 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 8022 8023 // Do not check for single registers. 8024 if (AssignedReg) { 8025 for (; *I != AssignedReg; ++I) 8026 assert(I != RC->end() && "AssignedReg should be member of RC"); 8027 } 8028 8029 for (; NumRegs; --NumRegs, ++I) { 8030 assert(I != RC->end() && "Ran out of registers to allocate!"); 8031 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC); 8032 Regs.push_back(R); 8033 } 8034 8035 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 8036 } 8037 8038 static unsigned 8039 findMatchingInlineAsmOperand(unsigned OperandNo, 8040 const std::vector<SDValue> &AsmNodeOperands) { 8041 // Scan until we find the definition we already emitted of this operand. 8042 unsigned CurOp = InlineAsm::Op_FirstOperand; 8043 for (; OperandNo; --OperandNo) { 8044 // Advance to the next operand. 8045 unsigned OpFlag = 8046 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 8047 assert((InlineAsm::isRegDefKind(OpFlag) || 8048 InlineAsm::isRegDefEarlyClobberKind(OpFlag) || 8049 InlineAsm::isMemKind(OpFlag)) && 8050 "Skipped past definitions?"); 8051 CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1; 8052 } 8053 return CurOp; 8054 } 8055 8056 namespace { 8057 8058 class ExtraFlags { 8059 unsigned Flags = 0; 8060 8061 public: 8062 explicit ExtraFlags(const CallBase &Call) { 8063 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 8064 if (IA->hasSideEffects()) 8065 Flags |= InlineAsm::Extra_HasSideEffects; 8066 if (IA->isAlignStack()) 8067 Flags |= InlineAsm::Extra_IsAlignStack; 8068 if (Call.isConvergent()) 8069 Flags |= InlineAsm::Extra_IsConvergent; 8070 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 8071 } 8072 8073 void update(const TargetLowering::AsmOperandInfo &OpInfo) { 8074 // Ideally, we would only check against memory constraints. However, the 8075 // meaning of an Other constraint can be target-specific and we can't easily 8076 // reason about it. Therefore, be conservative and set MayLoad/MayStore 8077 // for Other constraints as well. 8078 if (OpInfo.ConstraintType == TargetLowering::C_Memory || 8079 OpInfo.ConstraintType == TargetLowering::C_Other) { 8080 if (OpInfo.Type == InlineAsm::isInput) 8081 Flags |= InlineAsm::Extra_MayLoad; 8082 else if (OpInfo.Type == InlineAsm::isOutput) 8083 Flags |= InlineAsm::Extra_MayStore; 8084 else if (OpInfo.Type == InlineAsm::isClobber) 8085 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); 8086 } 8087 } 8088 8089 unsigned get() const { return Flags; } 8090 }; 8091 8092 } // end anonymous namespace 8093 8094 /// visitInlineAsm - Handle a call to an InlineAsm object. 8095 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) { 8096 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 8097 8098 /// ConstraintOperands - Information about all of the constraints. 8099 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands; 8100 8101 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8102 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints( 8103 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call); 8104 8105 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack, 8106 // AsmDialect, MayLoad, MayStore). 8107 bool HasSideEffect = IA->hasSideEffects(); 8108 ExtraFlags ExtraInfo(Call); 8109 8110 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 8111 unsigned ResNo = 0; // ResNo - The result number of the next output. 8112 unsigned NumMatchingOps = 0; 8113 for (auto &T : TargetConstraints) { 8114 ConstraintOperands.push_back(SDISelAsmOperandInfo(T)); 8115 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); 8116 8117 // Compute the value type for each operand. 8118 if (OpInfo.Type == InlineAsm::isInput || 8119 (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) { 8120 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 8121 8122 // Process the call argument. BasicBlocks are labels, currently appearing 8123 // only in asm's. 8124 if (isa<CallBrInst>(Call) && 8125 ArgNo - 1 >= (cast<CallBrInst>(&Call)->getNumArgOperands() - 8126 cast<CallBrInst>(&Call)->getNumIndirectDests() - 8127 NumMatchingOps) && 8128 (NumMatchingOps == 0 || 8129 ArgNo - 1 < (cast<CallBrInst>(&Call)->getNumArgOperands() - 8130 NumMatchingOps))) { 8131 const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal); 8132 EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true); 8133 OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT); 8134 } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) { 8135 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]); 8136 } else { 8137 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); 8138 } 8139 8140 OpInfo.ConstraintVT = 8141 OpInfo 8142 .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout()) 8143 .getSimpleVT(); 8144 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) { 8145 // The return value of the call is this value. As such, there is no 8146 // corresponding argument. 8147 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 8148 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 8149 OpInfo.ConstraintVT = TLI.getSimpleValueType( 8150 DAG.getDataLayout(), STy->getElementType(ResNo)); 8151 } else { 8152 assert(ResNo == 0 && "Asm only has one result!"); 8153 OpInfo.ConstraintVT = 8154 TLI.getSimpleValueType(DAG.getDataLayout(), Call.getType()); 8155 } 8156 ++ResNo; 8157 } else { 8158 OpInfo.ConstraintVT = MVT::Other; 8159 } 8160 8161 if (OpInfo.hasMatchingInput()) 8162 ++NumMatchingOps; 8163 8164 if (!HasSideEffect) 8165 HasSideEffect = OpInfo.hasMemory(TLI); 8166 8167 // Determine if this InlineAsm MayLoad or MayStore based on the constraints. 8168 // FIXME: Could we compute this on OpInfo rather than T? 8169 8170 // Compute the constraint code and ConstraintType to use. 8171 TLI.ComputeConstraintToUse(T, SDValue()); 8172 8173 if (T.ConstraintType == TargetLowering::C_Immediate && 8174 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand)) 8175 // We've delayed emitting a diagnostic like the "n" constraint because 8176 // inlining could cause an integer showing up. 8177 return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) + 8178 "' expects an integer constant " 8179 "expression"); 8180 8181 ExtraInfo.update(T); 8182 } 8183 8184 8185 // We won't need to flush pending loads if this asm doesn't touch 8186 // memory and is nonvolatile. 8187 SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot(); 8188 8189 bool IsCallBr = isa<CallBrInst>(Call); 8190 if (IsCallBr) { 8191 // If this is a callbr we need to flush pending exports since inlineasm_br 8192 // is a terminator. We need to do this before nodes are glued to 8193 // the inlineasm_br node. 8194 Chain = getControlRoot(); 8195 } 8196 8197 // Second pass over the constraints: compute which constraint option to use. 8198 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { 8199 // If this is an output operand with a matching input operand, look up the 8200 // matching input. If their types mismatch, e.g. one is an integer, the 8201 // other is floating point, or their sizes are different, flag it as an 8202 // error. 8203 if (OpInfo.hasMatchingInput()) { 8204 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 8205 patchMatchingInput(OpInfo, Input, DAG); 8206 } 8207 8208 // Compute the constraint code and ConstraintType to use. 8209 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); 8210 8211 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 8212 OpInfo.Type == InlineAsm::isClobber) 8213 continue; 8214 8215 // If this is a memory input, and if the operand is not indirect, do what we 8216 // need to provide an address for the memory input. 8217 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 8218 !OpInfo.isIndirect) { 8219 assert((OpInfo.isMultipleAlternative || 8220 (OpInfo.Type == InlineAsm::isInput)) && 8221 "Can only indirectify direct input operands!"); 8222 8223 // Memory operands really want the address of the value. 8224 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG); 8225 8226 // There is no longer a Value* corresponding to this operand. 8227 OpInfo.CallOperandVal = nullptr; 8228 8229 // It is now an indirect operand. 8230 OpInfo.isIndirect = true; 8231 } 8232 8233 } 8234 8235 // AsmNodeOperands - The operands for the ISD::INLINEASM node. 8236 std::vector<SDValue> AsmNodeOperands; 8237 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain 8238 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol( 8239 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout()))); 8240 8241 // If we have a !srcloc metadata node associated with it, we want to attach 8242 // this to the ultimately generated inline asm machineinstr. To do this, we 8243 // pass in the third operand as this (potentially null) inline asm MDNode. 8244 const MDNode *SrcLoc = Call.getMetadata("srcloc"); 8245 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); 8246 8247 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore 8248 // bits as operand 3. 8249 AsmNodeOperands.push_back(DAG.getTargetConstant( 8250 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 8251 8252 // Third pass: Loop over operands to prepare DAG-level operands.. As part of 8253 // this, assign virtual and physical registers for inputs and otput. 8254 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { 8255 // Assign Registers. 8256 SDISelAsmOperandInfo &RefOpInfo = 8257 OpInfo.isMatchingInputConstraint() 8258 ? ConstraintOperands[OpInfo.getMatchedOperand()] 8259 : OpInfo; 8260 GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo); 8261 8262 auto DetectWriteToReservedRegister = [&]() { 8263 const MachineFunction &MF = DAG.getMachineFunction(); 8264 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 8265 for (unsigned Reg : OpInfo.AssignedRegs.Regs) { 8266 if (Register::isPhysicalRegister(Reg) && 8267 TRI.isInlineAsmReadOnlyReg(MF, Reg)) { 8268 const char *RegName = TRI.getName(Reg); 8269 emitInlineAsmError(Call, "write to reserved register '" + 8270 Twine(RegName) + "'"); 8271 return true; 8272 } 8273 } 8274 return false; 8275 }; 8276 8277 switch (OpInfo.Type) { 8278 case InlineAsm::isOutput: 8279 if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 8280 unsigned ConstraintID = 8281 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 8282 assert(ConstraintID != InlineAsm::Constraint_Unknown && 8283 "Failed to convert memory constraint code to constraint id."); 8284 8285 // Add information to the INLINEASM node to know about this output. 8286 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 8287 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); 8288 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(), 8289 MVT::i32)); 8290 AsmNodeOperands.push_back(OpInfo.CallOperand); 8291 } else { 8292 // Otherwise, this outputs to a register (directly for C_Register / 8293 // C_RegisterClass, and a target-defined fashion for 8294 // C_Immediate/C_Other). Find a register that we can use. 8295 if (OpInfo.AssignedRegs.Regs.empty()) { 8296 emitInlineAsmError( 8297 Call, "couldn't allocate output register for constraint '" + 8298 Twine(OpInfo.ConstraintCode) + "'"); 8299 return; 8300 } 8301 8302 if (DetectWriteToReservedRegister()) 8303 return; 8304 8305 // Add information to the INLINEASM node to know that this register is 8306 // set. 8307 OpInfo.AssignedRegs.AddInlineAsmOperands( 8308 OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber 8309 : InlineAsm::Kind_RegDef, 8310 false, 0, getCurSDLoc(), DAG, AsmNodeOperands); 8311 } 8312 break; 8313 8314 case InlineAsm::isInput: { 8315 SDValue InOperandVal = OpInfo.CallOperand; 8316 8317 if (OpInfo.isMatchingInputConstraint()) { 8318 // If this is required to match an output register we have already set, 8319 // just use its register. 8320 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(), 8321 AsmNodeOperands); 8322 unsigned OpFlag = 8323 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 8324 if (InlineAsm::isRegDefKind(OpFlag) || 8325 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) { 8326 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs. 8327 if (OpInfo.isIndirect) { 8328 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c 8329 emitInlineAsmError(Call, "inline asm not supported yet: " 8330 "don't know how to handle tied " 8331 "indirect register inputs"); 8332 return; 8333 } 8334 8335 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType(); 8336 SmallVector<unsigned, 4> Regs; 8337 8338 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) { 8339 unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag); 8340 MachineRegisterInfo &RegInfo = 8341 DAG.getMachineFunction().getRegInfo(); 8342 for (unsigned i = 0; i != NumRegs; ++i) 8343 Regs.push_back(RegInfo.createVirtualRegister(RC)); 8344 } else { 8345 emitInlineAsmError(Call, 8346 "inline asm error: This value type register " 8347 "class is not natively supported!"); 8348 return; 8349 } 8350 8351 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType()); 8352 8353 SDLoc dl = getCurSDLoc(); 8354 // Use the produced MatchedRegs object to 8355 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, &Call); 8356 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, 8357 true, OpInfo.getMatchedOperand(), dl, 8358 DAG, AsmNodeOperands); 8359 break; 8360 } 8361 8362 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!"); 8363 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 && 8364 "Unexpected number of operands"); 8365 // Add information to the INLINEASM node to know about this input. 8366 // See InlineAsm.h isUseOperandTiedToDef. 8367 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag); 8368 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag, 8369 OpInfo.getMatchedOperand()); 8370 AsmNodeOperands.push_back(DAG.getTargetConstant( 8371 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 8372 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); 8373 break; 8374 } 8375 8376 // Treat indirect 'X' constraint as memory. 8377 if (OpInfo.ConstraintType == TargetLowering::C_Other && 8378 OpInfo.isIndirect) 8379 OpInfo.ConstraintType = TargetLowering::C_Memory; 8380 8381 if (OpInfo.ConstraintType == TargetLowering::C_Immediate || 8382 OpInfo.ConstraintType == TargetLowering::C_Other) { 8383 std::vector<SDValue> Ops; 8384 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode, 8385 Ops, DAG); 8386 if (Ops.empty()) { 8387 if (OpInfo.ConstraintType == TargetLowering::C_Immediate) 8388 if (isa<ConstantSDNode>(InOperandVal)) { 8389 emitInlineAsmError(Call, "value out of range for constraint '" + 8390 Twine(OpInfo.ConstraintCode) + "'"); 8391 return; 8392 } 8393 8394 emitInlineAsmError(Call, 8395 "invalid operand for inline asm constraint '" + 8396 Twine(OpInfo.ConstraintCode) + "'"); 8397 return; 8398 } 8399 8400 // Add information to the INLINEASM node to know about this input. 8401 unsigned ResOpType = 8402 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size()); 8403 AsmNodeOperands.push_back(DAG.getTargetConstant( 8404 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 8405 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end()); 8406 break; 8407 } 8408 8409 if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 8410 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); 8411 assert(InOperandVal.getValueType() == 8412 TLI.getPointerTy(DAG.getDataLayout()) && 8413 "Memory operands expect pointer values"); 8414 8415 unsigned ConstraintID = 8416 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 8417 assert(ConstraintID != InlineAsm::Constraint_Unknown && 8418 "Failed to convert memory constraint code to constraint id."); 8419 8420 // Add information to the INLINEASM node to know about this input. 8421 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 8422 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID); 8423 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 8424 getCurSDLoc(), 8425 MVT::i32)); 8426 AsmNodeOperands.push_back(InOperandVal); 8427 break; 8428 } 8429 8430 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || 8431 OpInfo.ConstraintType == TargetLowering::C_Register) && 8432 "Unknown constraint type!"); 8433 8434 // TODO: Support this. 8435 if (OpInfo.isIndirect) { 8436 emitInlineAsmError( 8437 Call, "Don't know how to handle indirect register inputs yet " 8438 "for constraint '" + 8439 Twine(OpInfo.ConstraintCode) + "'"); 8440 return; 8441 } 8442 8443 // Copy the input into the appropriate registers. 8444 if (OpInfo.AssignedRegs.Regs.empty()) { 8445 emitInlineAsmError(Call, 8446 "couldn't allocate input reg for constraint '" + 8447 Twine(OpInfo.ConstraintCode) + "'"); 8448 return; 8449 } 8450 8451 if (DetectWriteToReservedRegister()) 8452 return; 8453 8454 SDLoc dl = getCurSDLoc(); 8455 8456 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, 8457 &Call); 8458 8459 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0, 8460 dl, DAG, AsmNodeOperands); 8461 break; 8462 } 8463 case InlineAsm::isClobber: 8464 // Add the clobbered value to the operand list, so that the register 8465 // allocator is aware that the physreg got clobbered. 8466 if (!OpInfo.AssignedRegs.Regs.empty()) 8467 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber, 8468 false, 0, getCurSDLoc(), DAG, 8469 AsmNodeOperands); 8470 break; 8471 } 8472 } 8473 8474 // Finish up input operands. Set the input chain and add the flag last. 8475 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; 8476 if (Flag.getNode()) AsmNodeOperands.push_back(Flag); 8477 8478 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM; 8479 Chain = DAG.getNode(ISDOpc, getCurSDLoc(), 8480 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands); 8481 Flag = Chain.getValue(1); 8482 8483 // Do additional work to generate outputs. 8484 8485 SmallVector<EVT, 1> ResultVTs; 8486 SmallVector<SDValue, 1> ResultValues; 8487 SmallVector<SDValue, 8> OutChains; 8488 8489 llvm::Type *CallResultType = Call.getType(); 8490 ArrayRef<Type *> ResultTypes; 8491 if (StructType *StructResult = dyn_cast<StructType>(CallResultType)) 8492 ResultTypes = StructResult->elements(); 8493 else if (!CallResultType->isVoidTy()) 8494 ResultTypes = makeArrayRef(CallResultType); 8495 8496 auto CurResultType = ResultTypes.begin(); 8497 auto handleRegAssign = [&](SDValue V) { 8498 assert(CurResultType != ResultTypes.end() && "Unexpected value"); 8499 assert((*CurResultType)->isSized() && "Unexpected unsized type"); 8500 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType); 8501 ++CurResultType; 8502 // If the type of the inline asm call site return value is different but has 8503 // same size as the type of the asm output bitcast it. One example of this 8504 // is for vectors with different width / number of elements. This can 8505 // happen for register classes that can contain multiple different value 8506 // types. The preg or vreg allocated may not have the same VT as was 8507 // expected. 8508 // 8509 // This can also happen for a return value that disagrees with the register 8510 // class it is put in, eg. a double in a general-purpose register on a 8511 // 32-bit machine. 8512 if (ResultVT != V.getValueType() && 8513 ResultVT.getSizeInBits() == V.getValueSizeInBits()) 8514 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V); 8515 else if (ResultVT != V.getValueType() && ResultVT.isInteger() && 8516 V.getValueType().isInteger()) { 8517 // If a result value was tied to an input value, the computed result 8518 // may have a wider width than the expected result. Extract the 8519 // relevant portion. 8520 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V); 8521 } 8522 assert(ResultVT == V.getValueType() && "Asm result value mismatch!"); 8523 ResultVTs.push_back(ResultVT); 8524 ResultValues.push_back(V); 8525 }; 8526 8527 // Deal with output operands. 8528 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { 8529 if (OpInfo.Type == InlineAsm::isOutput) { 8530 SDValue Val; 8531 // Skip trivial output operands. 8532 if (OpInfo.AssignedRegs.Regs.empty()) 8533 continue; 8534 8535 switch (OpInfo.ConstraintType) { 8536 case TargetLowering::C_Register: 8537 case TargetLowering::C_RegisterClass: 8538 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 8539 Chain, &Flag, &Call); 8540 break; 8541 case TargetLowering::C_Immediate: 8542 case TargetLowering::C_Other: 8543 Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(), 8544 OpInfo, DAG); 8545 break; 8546 case TargetLowering::C_Memory: 8547 break; // Already handled. 8548 case TargetLowering::C_Unknown: 8549 assert(false && "Unexpected unknown constraint"); 8550 } 8551 8552 // Indirect output manifest as stores. Record output chains. 8553 if (OpInfo.isIndirect) { 8554 const Value *Ptr = OpInfo.CallOperandVal; 8555 assert(Ptr && "Expected value CallOperandVal for indirect asm operand"); 8556 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr), 8557 MachinePointerInfo(Ptr)); 8558 OutChains.push_back(Store); 8559 } else { 8560 // generate CopyFromRegs to associated registers. 8561 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 8562 if (Val.getOpcode() == ISD::MERGE_VALUES) { 8563 for (const SDValue &V : Val->op_values()) 8564 handleRegAssign(V); 8565 } else 8566 handleRegAssign(Val); 8567 } 8568 } 8569 } 8570 8571 // Set results. 8572 if (!ResultValues.empty()) { 8573 assert(CurResultType == ResultTypes.end() && 8574 "Mismatch in number of ResultTypes"); 8575 assert(ResultValues.size() == ResultTypes.size() && 8576 "Mismatch in number of output operands in asm result"); 8577 8578 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 8579 DAG.getVTList(ResultVTs), ResultValues); 8580 setValue(&Call, V); 8581 } 8582 8583 // Collect store chains. 8584 if (!OutChains.empty()) 8585 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains); 8586 8587 // Only Update Root if inline assembly has a memory effect. 8588 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr) 8589 DAG.setRoot(Chain); 8590 } 8591 8592 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call, 8593 const Twine &Message) { 8594 LLVMContext &Ctx = *DAG.getContext(); 8595 Ctx.emitError(&Call, Message); 8596 8597 // Make sure we leave the DAG in a valid state 8598 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8599 SmallVector<EVT, 1> ValueVTs; 8600 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs); 8601 8602 if (ValueVTs.empty()) 8603 return; 8604 8605 SmallVector<SDValue, 1> Ops; 8606 for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i) 8607 Ops.push_back(DAG.getUNDEF(ValueVTs[i])); 8608 8609 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc())); 8610 } 8611 8612 void SelectionDAGBuilder::visitVAStart(const CallInst &I) { 8613 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(), 8614 MVT::Other, getRoot(), 8615 getValue(I.getArgOperand(0)), 8616 DAG.getSrcValue(I.getArgOperand(0)))); 8617 } 8618 8619 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { 8620 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8621 const DataLayout &DL = DAG.getDataLayout(); 8622 SDValue V = DAG.getVAArg( 8623 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(), 8624 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)), 8625 DL.getABITypeAlign(I.getType()).value()); 8626 DAG.setRoot(V.getValue(1)); 8627 8628 if (I.getType()->isPointerTy()) 8629 V = DAG.getPtrExtOrTrunc( 8630 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType())); 8631 setValue(&I, V); 8632 } 8633 8634 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) { 8635 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(), 8636 MVT::Other, getRoot(), 8637 getValue(I.getArgOperand(0)), 8638 DAG.getSrcValue(I.getArgOperand(0)))); 8639 } 8640 8641 void SelectionDAGBuilder::visitVACopy(const CallInst &I) { 8642 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(), 8643 MVT::Other, getRoot(), 8644 getValue(I.getArgOperand(0)), 8645 getValue(I.getArgOperand(1)), 8646 DAG.getSrcValue(I.getArgOperand(0)), 8647 DAG.getSrcValue(I.getArgOperand(1)))); 8648 } 8649 8650 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, 8651 const Instruction &I, 8652 SDValue Op) { 8653 const MDNode *Range = I.getMetadata(LLVMContext::MD_range); 8654 if (!Range) 8655 return Op; 8656 8657 ConstantRange CR = getConstantRangeFromMetadata(*Range); 8658 if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped()) 8659 return Op; 8660 8661 APInt Lo = CR.getUnsignedMin(); 8662 if (!Lo.isMinValue()) 8663 return Op; 8664 8665 APInt Hi = CR.getUnsignedMax(); 8666 unsigned Bits = std::max(Hi.getActiveBits(), 8667 static_cast<unsigned>(IntegerType::MIN_INT_BITS)); 8668 8669 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 8670 8671 SDLoc SL = getCurSDLoc(); 8672 8673 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op, 8674 DAG.getValueType(SmallVT)); 8675 unsigned NumVals = Op.getNode()->getNumValues(); 8676 if (NumVals == 1) 8677 return ZExt; 8678 8679 SmallVector<SDValue, 4> Ops; 8680 8681 Ops.push_back(ZExt); 8682 for (unsigned I = 1; I != NumVals; ++I) 8683 Ops.push_back(Op.getValue(I)); 8684 8685 return DAG.getMergeValues(Ops, SL); 8686 } 8687 8688 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of 8689 /// the call being lowered. 8690 /// 8691 /// This is a helper for lowering intrinsics that follow a target calling 8692 /// convention or require stack pointer adjustment. Only a subset of the 8693 /// intrinsic's operands need to participate in the calling convention. 8694 void SelectionDAGBuilder::populateCallLoweringInfo( 8695 TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, 8696 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, 8697 bool IsPatchPoint) { 8698 TargetLowering::ArgListTy Args; 8699 Args.reserve(NumArgs); 8700 8701 // Populate the argument list. 8702 // Attributes for args start at offset 1, after the return attribute. 8703 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; 8704 ArgI != ArgE; ++ArgI) { 8705 const Value *V = Call->getOperand(ArgI); 8706 8707 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 8708 8709 TargetLowering::ArgListEntry Entry; 8710 Entry.Node = getValue(V); 8711 Entry.Ty = V->getType(); 8712 Entry.setAttributes(Call, ArgI); 8713 Args.push_back(Entry); 8714 } 8715 8716 CLI.setDebugLoc(getCurSDLoc()) 8717 .setChain(getRoot()) 8718 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args)) 8719 .setDiscardResult(Call->use_empty()) 8720 .setIsPatchPoint(IsPatchPoint) 8721 .setIsPreallocated( 8722 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0); 8723 } 8724 8725 /// Add a stack map intrinsic call's live variable operands to a stackmap 8726 /// or patchpoint target node's operand list. 8727 /// 8728 /// Constants are converted to TargetConstants purely as an optimization to 8729 /// avoid constant materialization and register allocation. 8730 /// 8731 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not 8732 /// generate addess computation nodes, and so FinalizeISel can convert the 8733 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids 8734 /// address materialization and register allocation, but may also be required 8735 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an 8736 /// alloca in the entry block, then the runtime may assume that the alloca's 8737 /// StackMap location can be read immediately after compilation and that the 8738 /// location is valid at any point during execution (this is similar to the 8739 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were 8740 /// only available in a register, then the runtime would need to trap when 8741 /// execution reaches the StackMap in order to read the alloca's location. 8742 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, 8743 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops, 8744 SelectionDAGBuilder &Builder) { 8745 for (unsigned i = StartIdx, e = Call.arg_size(); i != e; ++i) { 8746 SDValue OpVal = Builder.getValue(Call.getArgOperand(i)); 8747 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) { 8748 Ops.push_back( 8749 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64)); 8750 Ops.push_back( 8751 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64)); 8752 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) { 8753 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); 8754 Ops.push_back(Builder.DAG.getTargetFrameIndex( 8755 FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout()))); 8756 } else 8757 Ops.push_back(OpVal); 8758 } 8759 } 8760 8761 /// Lower llvm.experimental.stackmap directly to its target opcode. 8762 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { 8763 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>, 8764 // [live variables...]) 8765 8766 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value."); 8767 8768 SDValue Chain, InFlag, Callee, NullPtr; 8769 SmallVector<SDValue, 32> Ops; 8770 8771 SDLoc DL = getCurSDLoc(); 8772 Callee = getValue(CI.getCalledOperand()); 8773 NullPtr = DAG.getIntPtrConstant(0, DL, true); 8774 8775 // The stackmap intrinsic only records the live variables (the arguments 8776 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 8777 // intrinsic, this won't be lowered to a function call. This means we don't 8778 // have to worry about calling conventions and target specific lowering code. 8779 // Instead we perform the call lowering right here. 8780 // 8781 // chain, flag = CALLSEQ_START(chain, 0, 0) 8782 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag) 8783 // chain, flag = CALLSEQ_END(chain, 0, 0, flag) 8784 // 8785 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL); 8786 InFlag = Chain.getValue(1); 8787 8788 // Add the <id> and <numBytes> constants. 8789 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos)); 8790 Ops.push_back(DAG.getTargetConstant( 8791 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64)); 8792 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos)); 8793 Ops.push_back(DAG.getTargetConstant( 8794 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL, 8795 MVT::i32)); 8796 8797 // Push live variables for the stack map. 8798 addStackMapLiveVars(CI, 2, DL, Ops, *this); 8799 8800 // We are not pushing any register mask info here on the operands list, 8801 // because the stackmap doesn't clobber anything. 8802 8803 // Push the chain and the glue flag. 8804 Ops.push_back(Chain); 8805 Ops.push_back(InFlag); 8806 8807 // Create the STACKMAP node. 8808 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8809 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops); 8810 Chain = SDValue(SM, 0); 8811 InFlag = Chain.getValue(1); 8812 8813 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL); 8814 8815 // Stackmaps don't generate values, so nothing goes into the NodeMap. 8816 8817 // Set the root to the target-lowered call chain. 8818 DAG.setRoot(Chain); 8819 8820 // Inform the Frame Information that we have a stackmap in this function. 8821 FuncInfo.MF->getFrameInfo().setHasStackMap(); 8822 } 8823 8824 /// Lower llvm.experimental.patchpoint directly to its target opcode. 8825 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB, 8826 const BasicBlock *EHPadBB) { 8827 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 8828 // i32 <numBytes>, 8829 // i8* <target>, 8830 // i32 <numArgs>, 8831 // [Args...], 8832 // [live variables...]) 8833 8834 CallingConv::ID CC = CB.getCallingConv(); 8835 bool IsAnyRegCC = CC == CallingConv::AnyReg; 8836 bool HasDef = !CB.getType()->isVoidTy(); 8837 SDLoc dl = getCurSDLoc(); 8838 SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos)); 8839 8840 // Handle immediate and symbolic callees. 8841 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee)) 8842 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl, 8843 /*isTarget=*/true); 8844 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee)) 8845 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(), 8846 SDLoc(SymbolicCallee), 8847 SymbolicCallee->getValueType(0)); 8848 8849 // Get the real number of arguments participating in the call <numArgs> 8850 SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos)); 8851 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue(); 8852 8853 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 8854 // Intrinsics include all meta-operands up to but not including CC. 8855 unsigned NumMetaOpers = PatchPointOpers::CCPos; 8856 assert(CB.arg_size() >= NumMetaOpers + NumArgs && 8857 "Not enough arguments provided to the patchpoint intrinsic"); 8858 8859 // For AnyRegCC the arguments are lowered later on manually. 8860 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 8861 Type *ReturnTy = 8862 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType(); 8863 8864 TargetLowering::CallLoweringInfo CLI(DAG); 8865 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee, 8866 ReturnTy, true); 8867 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); 8868 8869 SDNode *CallEnd = Result.second.getNode(); 8870 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) 8871 CallEnd = CallEnd->getOperand(0).getNode(); 8872 8873 /// Get a call instruction from the call sequence chain. 8874 /// Tail calls are not allowed. 8875 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && 8876 "Expected a callseq node."); 8877 SDNode *Call = CallEnd->getOperand(0).getNode(); 8878 bool HasGlue = Call->getGluedNode(); 8879 8880 // Replace the target specific call node with the patchable intrinsic. 8881 SmallVector<SDValue, 8> Ops; 8882 8883 // Add the <id> and <numBytes> constants. 8884 SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos)); 8885 Ops.push_back(DAG.getTargetConstant( 8886 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64)); 8887 SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos)); 8888 Ops.push_back(DAG.getTargetConstant( 8889 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl, 8890 MVT::i32)); 8891 8892 // Add the callee. 8893 Ops.push_back(Callee); 8894 8895 // Adjust <numArgs> to account for any arguments that have been passed on the 8896 // stack instead. 8897 // Call Node: Chain, Target, {Args}, RegMask, [Glue] 8898 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3); 8899 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs; 8900 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32)); 8901 8902 // Add the calling convention 8903 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32)); 8904 8905 // Add the arguments we omitted previously. The register allocator should 8906 // place these in any free register. 8907 if (IsAnyRegCC) 8908 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) 8909 Ops.push_back(getValue(CB.getArgOperand(i))); 8910 8911 // Push the arguments from the call instruction up to the register mask. 8912 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1; 8913 Ops.append(Call->op_begin() + 2, e); 8914 8915 // Push live variables for the stack map. 8916 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this); 8917 8918 // Push the register mask info. 8919 if (HasGlue) 8920 Ops.push_back(*(Call->op_end()-2)); 8921 else 8922 Ops.push_back(*(Call->op_end()-1)); 8923 8924 // Push the chain (this is originally the first operand of the call, but 8925 // becomes now the last or second to last operand). 8926 Ops.push_back(*(Call->op_begin())); 8927 8928 // Push the glue flag (last operand). 8929 if (HasGlue) 8930 Ops.push_back(*(Call->op_end()-1)); 8931 8932 SDVTList NodeTys; 8933 if (IsAnyRegCC && HasDef) { 8934 // Create the return types based on the intrinsic definition 8935 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8936 SmallVector<EVT, 3> ValueVTs; 8937 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs); 8938 assert(ValueVTs.size() == 1 && "Expected only one return value type."); 8939 8940 // There is always a chain and a glue type at the end 8941 ValueVTs.push_back(MVT::Other); 8942 ValueVTs.push_back(MVT::Glue); 8943 NodeTys = DAG.getVTList(ValueVTs); 8944 } else 8945 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8946 8947 // Replace the target specific call node with a PATCHPOINT node. 8948 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT, 8949 dl, NodeTys, Ops); 8950 8951 // Update the NodeMap. 8952 if (HasDef) { 8953 if (IsAnyRegCC) 8954 setValue(&CB, SDValue(MN, 0)); 8955 else 8956 setValue(&CB, Result.first); 8957 } 8958 8959 // Fixup the consumers of the intrinsic. The chain and glue may be used in the 8960 // call sequence. Furthermore the location of the chain and glue can change 8961 // when the AnyReg calling convention is used and the intrinsic returns a 8962 // value. 8963 if (IsAnyRegCC && HasDef) { 8964 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)}; 8965 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)}; 8966 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 8967 } else 8968 DAG.ReplaceAllUsesWith(Call, MN); 8969 DAG.DeleteNode(Call); 8970 8971 // Inform the Frame Information that we have a patchpoint in this function. 8972 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 8973 } 8974 8975 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I, 8976 unsigned Intrinsic) { 8977 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8978 SDValue Op1 = getValue(I.getArgOperand(0)); 8979 SDValue Op2; 8980 if (I.getNumArgOperands() > 1) 8981 Op2 = getValue(I.getArgOperand(1)); 8982 SDLoc dl = getCurSDLoc(); 8983 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 8984 SDValue Res; 8985 FastMathFlags FMF; 8986 if (isa<FPMathOperator>(I)) 8987 FMF = I.getFastMathFlags(); 8988 8989 switch (Intrinsic) { 8990 case Intrinsic::experimental_vector_reduce_v2_fadd: 8991 if (FMF.allowReassoc()) 8992 Res = DAG.getNode(ISD::FADD, dl, VT, Op1, 8993 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2)); 8994 else 8995 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2); 8996 break; 8997 case Intrinsic::experimental_vector_reduce_v2_fmul: 8998 if (FMF.allowReassoc()) 8999 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1, 9000 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2)); 9001 else 9002 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2); 9003 break; 9004 case Intrinsic::experimental_vector_reduce_add: 9005 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1); 9006 break; 9007 case Intrinsic::experimental_vector_reduce_mul: 9008 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1); 9009 break; 9010 case Intrinsic::experimental_vector_reduce_and: 9011 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1); 9012 break; 9013 case Intrinsic::experimental_vector_reduce_or: 9014 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1); 9015 break; 9016 case Intrinsic::experimental_vector_reduce_xor: 9017 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1); 9018 break; 9019 case Intrinsic::experimental_vector_reduce_smax: 9020 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1); 9021 break; 9022 case Intrinsic::experimental_vector_reduce_smin: 9023 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1); 9024 break; 9025 case Intrinsic::experimental_vector_reduce_umax: 9026 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1); 9027 break; 9028 case Intrinsic::experimental_vector_reduce_umin: 9029 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1); 9030 break; 9031 case Intrinsic::experimental_vector_reduce_fmax: 9032 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1); 9033 break; 9034 case Intrinsic::experimental_vector_reduce_fmin: 9035 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1); 9036 break; 9037 default: 9038 llvm_unreachable("Unhandled vector reduce intrinsic"); 9039 } 9040 setValue(&I, Res); 9041 } 9042 9043 /// Returns an AttributeList representing the attributes applied to the return 9044 /// value of the given call. 9045 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) { 9046 SmallVector<Attribute::AttrKind, 2> Attrs; 9047 if (CLI.RetSExt) 9048 Attrs.push_back(Attribute::SExt); 9049 if (CLI.RetZExt) 9050 Attrs.push_back(Attribute::ZExt); 9051 if (CLI.IsInReg) 9052 Attrs.push_back(Attribute::InReg); 9053 9054 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 9055 Attrs); 9056 } 9057 9058 /// TargetLowering::LowerCallTo - This is the default LowerCallTo 9059 /// implementation, which just calls LowerCall. 9060 /// FIXME: When all targets are 9061 /// migrated to using LowerCall, this hook should be integrated into SDISel. 9062 std::pair<SDValue, SDValue> 9063 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { 9064 // Handle the incoming return values from the call. 9065 CLI.Ins.clear(); 9066 Type *OrigRetTy = CLI.RetTy; 9067 SmallVector<EVT, 4> RetTys; 9068 SmallVector<uint64_t, 4> Offsets; 9069 auto &DL = CLI.DAG.getDataLayout(); 9070 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets); 9071 9072 if (CLI.IsPostTypeLegalization) { 9073 // If we are lowering a libcall after legalization, split the return type. 9074 SmallVector<EVT, 4> OldRetTys; 9075 SmallVector<uint64_t, 4> OldOffsets; 9076 RetTys.swap(OldRetTys); 9077 Offsets.swap(OldOffsets); 9078 9079 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) { 9080 EVT RetVT = OldRetTys[i]; 9081 uint64_t Offset = OldOffsets[i]; 9082 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT); 9083 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT); 9084 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8; 9085 RetTys.append(NumRegs, RegisterVT); 9086 for (unsigned j = 0; j != NumRegs; ++j) 9087 Offsets.push_back(Offset + j * RegisterVTByteSZ); 9088 } 9089 } 9090 9091 SmallVector<ISD::OutputArg, 4> Outs; 9092 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL); 9093 9094 bool CanLowerReturn = 9095 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), 9096 CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 9097 9098 SDValue DemoteStackSlot; 9099 int DemoteStackIdx = -100; 9100 if (!CanLowerReturn) { 9101 // FIXME: equivalent assert? 9102 // assert(!CS.hasInAllocaArgument() && 9103 // "sret demotion is incompatible with inalloca"); 9104 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy); 9105 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy); 9106 MachineFunction &MF = CLI.DAG.getMachineFunction(); 9107 DemoteStackIdx = 9108 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false); 9109 Type *StackSlotPtrType = PointerType::get(CLI.RetTy, 9110 DL.getAllocaAddrSpace()); 9111 9112 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL)); 9113 ArgListEntry Entry; 9114 Entry.Node = DemoteStackSlot; 9115 Entry.Ty = StackSlotPtrType; 9116 Entry.IsSExt = false; 9117 Entry.IsZExt = false; 9118 Entry.IsInReg = false; 9119 Entry.IsSRet = true; 9120 Entry.IsNest = false; 9121 Entry.IsByVal = false; 9122 Entry.IsReturned = false; 9123 Entry.IsSwiftSelf = false; 9124 Entry.IsSwiftError = false; 9125 Entry.IsCFGuardTarget = false; 9126 Entry.Alignment = Alignment; 9127 CLI.getArgs().insert(CLI.getArgs().begin(), Entry); 9128 CLI.NumFixedArgs += 1; 9129 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext()); 9130 9131 // sret demotion isn't compatible with tail-calls, since the sret argument 9132 // points into the callers stack frame. 9133 CLI.IsTailCall = false; 9134 } else { 9135 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( 9136 CLI.RetTy, CLI.CallConv, CLI.IsVarArg); 9137 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 9138 ISD::ArgFlagsTy Flags; 9139 if (NeedsRegBlock) { 9140 Flags.setInConsecutiveRegs(); 9141 if (I == RetTys.size() - 1) 9142 Flags.setInConsecutiveRegsLast(); 9143 } 9144 EVT VT = RetTys[I]; 9145 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), 9146 CLI.CallConv, VT); 9147 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), 9148 CLI.CallConv, VT); 9149 for (unsigned i = 0; i != NumRegs; ++i) { 9150 ISD::InputArg MyFlags; 9151 MyFlags.Flags = Flags; 9152 MyFlags.VT = RegisterVT; 9153 MyFlags.ArgVT = VT; 9154 MyFlags.Used = CLI.IsReturnValueUsed; 9155 if (CLI.RetTy->isPointerTy()) { 9156 MyFlags.Flags.setPointer(); 9157 MyFlags.Flags.setPointerAddrSpace( 9158 cast<PointerType>(CLI.RetTy)->getAddressSpace()); 9159 } 9160 if (CLI.RetSExt) 9161 MyFlags.Flags.setSExt(); 9162 if (CLI.RetZExt) 9163 MyFlags.Flags.setZExt(); 9164 if (CLI.IsInReg) 9165 MyFlags.Flags.setInReg(); 9166 CLI.Ins.push_back(MyFlags); 9167 } 9168 } 9169 } 9170 9171 // We push in swifterror return as the last element of CLI.Ins. 9172 ArgListTy &Args = CLI.getArgs(); 9173 if (supportSwiftError()) { 9174 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 9175 if (Args[i].IsSwiftError) { 9176 ISD::InputArg MyFlags; 9177 MyFlags.VT = getPointerTy(DL); 9178 MyFlags.ArgVT = EVT(getPointerTy(DL)); 9179 MyFlags.Flags.setSwiftError(); 9180 CLI.Ins.push_back(MyFlags); 9181 } 9182 } 9183 } 9184 9185 // Handle all of the outgoing arguments. 9186 CLI.Outs.clear(); 9187 CLI.OutVals.clear(); 9188 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 9189 SmallVector<EVT, 4> ValueVTs; 9190 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs); 9191 // FIXME: Split arguments if CLI.IsPostTypeLegalization 9192 Type *FinalType = Args[i].Ty; 9193 if (Args[i].IsByVal) 9194 FinalType = cast<PointerType>(Args[i].Ty)->getElementType(); 9195 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( 9196 FinalType, CLI.CallConv, CLI.IsVarArg); 9197 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; 9198 ++Value) { 9199 EVT VT = ValueVTs[Value]; 9200 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext()); 9201 SDValue Op = SDValue(Args[i].Node.getNode(), 9202 Args[i].Node.getResNo() + Value); 9203 ISD::ArgFlagsTy Flags; 9204 9205 // Certain targets (such as MIPS), may have a different ABI alignment 9206 // for a type depending on the context. Give the target a chance to 9207 // specify the alignment it wants. 9208 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL)); 9209 9210 if (Args[i].Ty->isPointerTy()) { 9211 Flags.setPointer(); 9212 Flags.setPointerAddrSpace( 9213 cast<PointerType>(Args[i].Ty)->getAddressSpace()); 9214 } 9215 if (Args[i].IsZExt) 9216 Flags.setZExt(); 9217 if (Args[i].IsSExt) 9218 Flags.setSExt(); 9219 if (Args[i].IsInReg) { 9220 // If we are using vectorcall calling convention, a structure that is 9221 // passed InReg - is surely an HVA 9222 if (CLI.CallConv == CallingConv::X86_VectorCall && 9223 isa<StructType>(FinalType)) { 9224 // The first value of a structure is marked 9225 if (0 == Value) 9226 Flags.setHvaStart(); 9227 Flags.setHva(); 9228 } 9229 // Set InReg Flag 9230 Flags.setInReg(); 9231 } 9232 if (Args[i].IsSRet) 9233 Flags.setSRet(); 9234 if (Args[i].IsSwiftSelf) 9235 Flags.setSwiftSelf(); 9236 if (Args[i].IsSwiftError) 9237 Flags.setSwiftError(); 9238 if (Args[i].IsCFGuardTarget) 9239 Flags.setCFGuardTarget(); 9240 if (Args[i].IsByVal) 9241 Flags.setByVal(); 9242 if (Args[i].IsPreallocated) { 9243 Flags.setPreallocated(); 9244 // Set the byval flag for CCAssignFn callbacks that don't know about 9245 // preallocated. This way we can know how many bytes we should've 9246 // allocated and how many bytes a callee cleanup function will pop. If 9247 // we port preallocated to more targets, we'll have to add custom 9248 // preallocated handling in the various CC lowering callbacks. 9249 Flags.setByVal(); 9250 } 9251 if (Args[i].IsInAlloca) { 9252 Flags.setInAlloca(); 9253 // Set the byval flag for CCAssignFn callbacks that don't know about 9254 // inalloca. This way we can know how many bytes we should've allocated 9255 // and how many bytes a callee cleanup function will pop. If we port 9256 // inalloca to more targets, we'll have to add custom inalloca handling 9257 // in the various CC lowering callbacks. 9258 Flags.setByVal(); 9259 } 9260 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) { 9261 PointerType *Ty = cast<PointerType>(Args[i].Ty); 9262 Type *ElementTy = Ty->getElementType(); 9263 9264 unsigned FrameSize = DL.getTypeAllocSize( 9265 Args[i].ByValType ? Args[i].ByValType : ElementTy); 9266 Flags.setByValSize(FrameSize); 9267 9268 // info is not there but there are cases it cannot get right. 9269 Align FrameAlign; 9270 if (auto MA = Args[i].Alignment) 9271 FrameAlign = *MA; 9272 else 9273 FrameAlign = Align(getByValTypeAlignment(ElementTy, DL)); 9274 Flags.setByValAlign(FrameAlign); 9275 } 9276 if (Args[i].IsNest) 9277 Flags.setNest(); 9278 if (NeedsRegBlock) 9279 Flags.setInConsecutiveRegs(); 9280 Flags.setOrigAlign(OriginalAlignment); 9281 9282 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), 9283 CLI.CallConv, VT); 9284 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(), 9285 CLI.CallConv, VT); 9286 SmallVector<SDValue, 4> Parts(NumParts); 9287 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 9288 9289 if (Args[i].IsSExt) 9290 ExtendKind = ISD::SIGN_EXTEND; 9291 else if (Args[i].IsZExt) 9292 ExtendKind = ISD::ZERO_EXTEND; 9293 9294 // Conservatively only handle 'returned' on non-vectors that can be lowered, 9295 // for now. 9296 if (Args[i].IsReturned && !Op.getValueType().isVector() && 9297 CanLowerReturn) { 9298 assert((CLI.RetTy == Args[i].Ty || 9299 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() && 9300 CLI.RetTy->getPointerAddressSpace() == 9301 Args[i].Ty->getPointerAddressSpace())) && 9302 RetTys.size() == NumValues && "unexpected use of 'returned'"); 9303 // Before passing 'returned' to the target lowering code, ensure that 9304 // either the register MVT and the actual EVT are the same size or that 9305 // the return value and argument are extended in the same way; in these 9306 // cases it's safe to pass the argument register value unchanged as the 9307 // return register value (although it's at the target's option whether 9308 // to do so) 9309 // TODO: allow code generation to take advantage of partially preserved 9310 // registers rather than clobbering the entire register when the 9311 // parameter extension method is not compatible with the return 9312 // extension method 9313 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || 9314 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt && 9315 CLI.RetZExt == Args[i].IsZExt)) 9316 Flags.setReturned(); 9317 } 9318 9319 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB, 9320 CLI.CallConv, ExtendKind); 9321 9322 for (unsigned j = 0; j != NumParts; ++j) { 9323 // if it isn't first piece, alignment must be 1 9324 // For scalable vectors the scalable part is currently handled 9325 // by individual targets, so we just use the known minimum size here. 9326 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT, 9327 i < CLI.NumFixedArgs, i, 9328 j*Parts[j].getValueType().getStoreSize().getKnownMinSize()); 9329 if (NumParts > 1 && j == 0) 9330 MyFlags.Flags.setSplit(); 9331 else if (j != 0) { 9332 MyFlags.Flags.setOrigAlign(Align(1)); 9333 if (j == NumParts - 1) 9334 MyFlags.Flags.setSplitEnd(); 9335 } 9336 9337 CLI.Outs.push_back(MyFlags); 9338 CLI.OutVals.push_back(Parts[j]); 9339 } 9340 9341 if (NeedsRegBlock && Value == NumValues - 1) 9342 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); 9343 } 9344 } 9345 9346 SmallVector<SDValue, 4> InVals; 9347 CLI.Chain = LowerCall(CLI, InVals); 9348 9349 // Update CLI.InVals to use outside of this function. 9350 CLI.InVals = InVals; 9351 9352 // Verify that the target's LowerCall behaved as expected. 9353 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other && 9354 "LowerCall didn't return a valid chain!"); 9355 assert((!CLI.IsTailCall || InVals.empty()) && 9356 "LowerCall emitted a return value for a tail call!"); 9357 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) && 9358 "LowerCall didn't emit the correct number of values!"); 9359 9360 // For a tail call, the return value is merely live-out and there aren't 9361 // any nodes in the DAG representing it. Return a special value to 9362 // indicate that a tail call has been emitted and no more Instructions 9363 // should be processed in the current block. 9364 if (CLI.IsTailCall) { 9365 CLI.DAG.setRoot(CLI.Chain); 9366 return std::make_pair(SDValue(), SDValue()); 9367 } 9368 9369 #ifndef NDEBUG 9370 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) { 9371 assert(InVals[i].getNode() && "LowerCall emitted a null value!"); 9372 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() && 9373 "LowerCall emitted a value with the wrong type!"); 9374 } 9375 #endif 9376 9377 SmallVector<SDValue, 4> ReturnValues; 9378 if (!CanLowerReturn) { 9379 // The instruction result is the result of loading from the 9380 // hidden sret parameter. 9381 SmallVector<EVT, 1> PVTs; 9382 Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace()); 9383 9384 ComputeValueVTs(*this, DL, PtrRetTy, PVTs); 9385 assert(PVTs.size() == 1 && "Pointers should fit in one register"); 9386 EVT PtrVT = PVTs[0]; 9387 9388 unsigned NumValues = RetTys.size(); 9389 ReturnValues.resize(NumValues); 9390 SmallVector<SDValue, 4> Chains(NumValues); 9391 9392 // An aggregate return value cannot wrap around the address space, so 9393 // offsets to its parts don't wrap either. 9394 SDNodeFlags Flags; 9395 Flags.setNoUnsignedWrap(true); 9396 9397 MachineFunction &MF = CLI.DAG.getMachineFunction(); 9398 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx); 9399 for (unsigned i = 0; i < NumValues; ++i) { 9400 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot, 9401 CLI.DAG.getConstant(Offsets[i], CLI.DL, 9402 PtrVT), Flags); 9403 SDValue L = CLI.DAG.getLoad( 9404 RetTys[i], CLI.DL, CLI.Chain, Add, 9405 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(), 9406 DemoteStackIdx, Offsets[i]), 9407 HiddenSRetAlign); 9408 ReturnValues[i] = L; 9409 Chains[i] = L.getValue(1); 9410 } 9411 9412 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains); 9413 } else { 9414 // Collect the legal value parts into potentially illegal values 9415 // that correspond to the original function's return values. 9416 Optional<ISD::NodeType> AssertOp; 9417 if (CLI.RetSExt) 9418 AssertOp = ISD::AssertSext; 9419 else if (CLI.RetZExt) 9420 AssertOp = ISD::AssertZext; 9421 unsigned CurReg = 0; 9422 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 9423 EVT VT = RetTys[I]; 9424 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), 9425 CLI.CallConv, VT); 9426 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), 9427 CLI.CallConv, VT); 9428 9429 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], 9430 NumRegs, RegisterVT, VT, nullptr, 9431 CLI.CallConv, AssertOp)); 9432 CurReg += NumRegs; 9433 } 9434 9435 // For a function returning void, there is no return value. We can't create 9436 // such a node, so we just return a null return value in that case. In 9437 // that case, nothing will actually look at the value. 9438 if (ReturnValues.empty()) 9439 return std::make_pair(SDValue(), CLI.Chain); 9440 } 9441 9442 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL, 9443 CLI.DAG.getVTList(RetTys), ReturnValues); 9444 return std::make_pair(Res, CLI.Chain); 9445 } 9446 9447 void TargetLowering::LowerOperationWrapper(SDNode *N, 9448 SmallVectorImpl<SDValue> &Results, 9449 SelectionDAG &DAG) const { 9450 if (SDValue Res = LowerOperation(SDValue(N, 0), DAG)) 9451 Results.push_back(Res); 9452 } 9453 9454 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9455 llvm_unreachable("LowerOperation not implemented for this target!"); 9456 } 9457 9458 void 9459 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { 9460 SDValue Op = getNonRegisterValue(V); 9461 assert((Op.getOpcode() != ISD::CopyFromReg || 9462 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 9463 "Copy from a reg to the same reg!"); 9464 assert(!Register::isPhysicalRegister(Reg) && "Is a physreg"); 9465 9466 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9467 // If this is an InlineAsm we have to match the registers required, not the 9468 // notional registers required by the type. 9469 9470 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(), 9471 None); // This is not an ABI copy. 9472 SDValue Chain = DAG.getEntryNode(); 9473 9474 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == 9475 FuncInfo.PreferredExtendType.end()) 9476 ? ISD::ANY_EXTEND 9477 : FuncInfo.PreferredExtendType[V]; 9478 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType); 9479 PendingExports.push_back(Chain); 9480 } 9481 9482 #include "llvm/CodeGen/SelectionDAGISel.h" 9483 9484 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the 9485 /// entry block, return true. This includes arguments used by switches, since 9486 /// the switch may expand into multiple basic blocks. 9487 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { 9488 // With FastISel active, we may be splitting blocks, so force creation 9489 // of virtual registers for all non-dead arguments. 9490 if (FastISel) 9491 return A->use_empty(); 9492 9493 const BasicBlock &Entry = A->getParent()->front(); 9494 for (const User *U : A->users()) 9495 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U)) 9496 return false; // Use not in entry block. 9497 9498 return true; 9499 } 9500 9501 using ArgCopyElisionMapTy = 9502 DenseMap<const Argument *, 9503 std::pair<const AllocaInst *, const StoreInst *>>; 9504 9505 /// Scan the entry block of the function in FuncInfo for arguments that look 9506 /// like copies into a local alloca. Record any copied arguments in 9507 /// ArgCopyElisionCandidates. 9508 static void 9509 findArgumentCopyElisionCandidates(const DataLayout &DL, 9510 FunctionLoweringInfo *FuncInfo, 9511 ArgCopyElisionMapTy &ArgCopyElisionCandidates) { 9512 // Record the state of every static alloca used in the entry block. Argument 9513 // allocas are all used in the entry block, so we need approximately as many 9514 // entries as we have arguments. 9515 enum StaticAllocaInfo { Unknown, Clobbered, Elidable }; 9516 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas; 9517 unsigned NumArgs = FuncInfo->Fn->arg_size(); 9518 StaticAllocas.reserve(NumArgs * 2); 9519 9520 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * { 9521 if (!V) 9522 return nullptr; 9523 V = V->stripPointerCasts(); 9524 const auto *AI = dyn_cast<AllocaInst>(V); 9525 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI)) 9526 return nullptr; 9527 auto Iter = StaticAllocas.insert({AI, Unknown}); 9528 return &Iter.first->second; 9529 }; 9530 9531 // Look for stores of arguments to static allocas. Look through bitcasts and 9532 // GEPs to handle type coercions, as long as the alloca is fully initialized 9533 // by the store. Any non-store use of an alloca escapes it and any subsequent 9534 // unanalyzed store might write it. 9535 // FIXME: Handle structs initialized with multiple stores. 9536 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) { 9537 // Look for stores, and handle non-store uses conservatively. 9538 const auto *SI = dyn_cast<StoreInst>(&I); 9539 if (!SI) { 9540 // We will look through cast uses, so ignore them completely. 9541 if (I.isCast()) 9542 continue; 9543 // Ignore debug info intrinsics, they don't escape or store to allocas. 9544 if (isa<DbgInfoIntrinsic>(I)) 9545 continue; 9546 // This is an unknown instruction. Assume it escapes or writes to all 9547 // static alloca operands. 9548 for (const Use &U : I.operands()) { 9549 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U)) 9550 *Info = StaticAllocaInfo::Clobbered; 9551 } 9552 continue; 9553 } 9554 9555 // If the stored value is a static alloca, mark it as escaped. 9556 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand())) 9557 *Info = StaticAllocaInfo::Clobbered; 9558 9559 // Check if the destination is a static alloca. 9560 const Value *Dst = SI->getPointerOperand()->stripPointerCasts(); 9561 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst); 9562 if (!Info) 9563 continue; 9564 const AllocaInst *AI = cast<AllocaInst>(Dst); 9565 9566 // Skip allocas that have been initialized or clobbered. 9567 if (*Info != StaticAllocaInfo::Unknown) 9568 continue; 9569 9570 // Check if the stored value is an argument, and that this store fully 9571 // initializes the alloca. Don't elide copies from the same argument twice. 9572 const Value *Val = SI->getValueOperand()->stripPointerCasts(); 9573 const auto *Arg = dyn_cast<Argument>(Val); 9574 if (!Arg || Arg->hasPassPointeeByValueAttr() || 9575 Arg->getType()->isEmptyTy() || 9576 DL.getTypeStoreSize(Arg->getType()) != 9577 DL.getTypeAllocSize(AI->getAllocatedType()) || 9578 ArgCopyElisionCandidates.count(Arg)) { 9579 *Info = StaticAllocaInfo::Clobbered; 9580 continue; 9581 } 9582 9583 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI 9584 << '\n'); 9585 9586 // Mark this alloca and store for argument copy elision. 9587 *Info = StaticAllocaInfo::Elidable; 9588 ArgCopyElisionCandidates.insert({Arg, {AI, SI}}); 9589 9590 // Stop scanning if we've seen all arguments. This will happen early in -O0 9591 // builds, which is useful, because -O0 builds have large entry blocks and 9592 // many allocas. 9593 if (ArgCopyElisionCandidates.size() == NumArgs) 9594 break; 9595 } 9596 } 9597 9598 /// Try to elide argument copies from memory into a local alloca. Succeeds if 9599 /// ArgVal is a load from a suitable fixed stack object. 9600 static void tryToElideArgumentCopy( 9601 FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains, 9602 DenseMap<int, int> &ArgCopyElisionFrameIndexMap, 9603 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs, 9604 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, 9605 SDValue ArgVal, bool &ArgHasUses) { 9606 // Check if this is a load from a fixed stack object. 9607 auto *LNode = dyn_cast<LoadSDNode>(ArgVal); 9608 if (!LNode) 9609 return; 9610 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()); 9611 if (!FINode) 9612 return; 9613 9614 // Check that the fixed stack object is the right size and alignment. 9615 // Look at the alignment that the user wrote on the alloca instead of looking 9616 // at the stack object. 9617 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg); 9618 assert(ArgCopyIter != ArgCopyElisionCandidates.end()); 9619 const AllocaInst *AI = ArgCopyIter->second.first; 9620 int FixedIndex = FINode->getIndex(); 9621 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI]; 9622 int OldIndex = AllocaIndex; 9623 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo(); 9624 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) { 9625 LLVM_DEBUG( 9626 dbgs() << " argument copy elision failed due to bad fixed stack " 9627 "object size\n"); 9628 return; 9629 } 9630 Align RequiredAlignment = AI->getAlign(); 9631 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) { 9632 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca " 9633 "greater than stack argument alignment (" 9634 << DebugStr(RequiredAlignment) << " vs " 9635 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n"); 9636 return; 9637 } 9638 9639 // Perform the elision. Delete the old stack object and replace its only use 9640 // in the variable info map. Mark the stack object as mutable. 9641 LLVM_DEBUG({ 9642 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n' 9643 << " Replacing frame index " << OldIndex << " with " << FixedIndex 9644 << '\n'; 9645 }); 9646 MFI.RemoveStackObject(OldIndex); 9647 MFI.setIsImmutableObjectIndex(FixedIndex, false); 9648 AllocaIndex = FixedIndex; 9649 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex}); 9650 Chains.push_back(ArgVal.getValue(1)); 9651 9652 // Avoid emitting code for the store implementing the copy. 9653 const StoreInst *SI = ArgCopyIter->second.second; 9654 ElidedArgCopyInstrs.insert(SI); 9655 9656 // Check for uses of the argument again so that we can avoid exporting ArgVal 9657 // if it is't used by anything other than the store. 9658 for (const Value *U : Arg.users()) { 9659 if (U != SI) { 9660 ArgHasUses = true; 9661 break; 9662 } 9663 } 9664 } 9665 9666 void SelectionDAGISel::LowerArguments(const Function &F) { 9667 SelectionDAG &DAG = SDB->DAG; 9668 SDLoc dl = SDB->getCurSDLoc(); 9669 const DataLayout &DL = DAG.getDataLayout(); 9670 SmallVector<ISD::InputArg, 16> Ins; 9671 9672 // In Naked functions we aren't going to save any registers. 9673 if (F.hasFnAttribute(Attribute::Naked)) 9674 return; 9675 9676 if (!FuncInfo->CanLowerReturn) { 9677 // Put in an sret pointer parameter before all the other parameters. 9678 SmallVector<EVT, 1> ValueVTs; 9679 ComputeValueVTs(*TLI, DAG.getDataLayout(), 9680 F.getReturnType()->getPointerTo( 9681 DAG.getDataLayout().getAllocaAddrSpace()), 9682 ValueVTs); 9683 9684 // NOTE: Assuming that a pointer will never break down to more than one VT 9685 // or one register. 9686 ISD::ArgFlagsTy Flags; 9687 Flags.setSRet(); 9688 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]); 9689 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 9690 ISD::InputArg::NoArgIndex, 0); 9691 Ins.push_back(RetArg); 9692 } 9693 9694 // Look for stores of arguments to static allocas. Mark such arguments with a 9695 // flag to ask the target to give us the memory location of that argument if 9696 // available. 9697 ArgCopyElisionMapTy ArgCopyElisionCandidates; 9698 findArgumentCopyElisionCandidates(DL, FuncInfo.get(), 9699 ArgCopyElisionCandidates); 9700 9701 // Set up the incoming argument description vector. 9702 for (const Argument &Arg : F.args()) { 9703 unsigned ArgNo = Arg.getArgNo(); 9704 SmallVector<EVT, 4> ValueVTs; 9705 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); 9706 bool isArgValueUsed = !Arg.use_empty(); 9707 unsigned PartBase = 0; 9708 Type *FinalType = Arg.getType(); 9709 if (Arg.hasAttribute(Attribute::ByVal)) 9710 FinalType = Arg.getParamByValType(); 9711 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( 9712 FinalType, F.getCallingConv(), F.isVarArg()); 9713 for (unsigned Value = 0, NumValues = ValueVTs.size(); 9714 Value != NumValues; ++Value) { 9715 EVT VT = ValueVTs[Value]; 9716 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); 9717 ISD::ArgFlagsTy Flags; 9718 9719 // Certain targets (such as MIPS), may have a different ABI alignment 9720 // for a type depending on the context. Give the target a chance to 9721 // specify the alignment it wants. 9722 const Align OriginalAlignment( 9723 TLI->getABIAlignmentForCallingConv(ArgTy, DL)); 9724 9725 if (Arg.getType()->isPointerTy()) { 9726 Flags.setPointer(); 9727 Flags.setPointerAddrSpace( 9728 cast<PointerType>(Arg.getType())->getAddressSpace()); 9729 } 9730 if (Arg.hasAttribute(Attribute::ZExt)) 9731 Flags.setZExt(); 9732 if (Arg.hasAttribute(Attribute::SExt)) 9733 Flags.setSExt(); 9734 if (Arg.hasAttribute(Attribute::InReg)) { 9735 // If we are using vectorcall calling convention, a structure that is 9736 // passed InReg - is surely an HVA 9737 if (F.getCallingConv() == CallingConv::X86_VectorCall && 9738 isa<StructType>(Arg.getType())) { 9739 // The first value of a structure is marked 9740 if (0 == Value) 9741 Flags.setHvaStart(); 9742 Flags.setHva(); 9743 } 9744 // Set InReg Flag 9745 Flags.setInReg(); 9746 } 9747 if (Arg.hasAttribute(Attribute::StructRet)) 9748 Flags.setSRet(); 9749 if (Arg.hasAttribute(Attribute::SwiftSelf)) 9750 Flags.setSwiftSelf(); 9751 if (Arg.hasAttribute(Attribute::SwiftError)) 9752 Flags.setSwiftError(); 9753 if (Arg.hasAttribute(Attribute::ByVal)) 9754 Flags.setByVal(); 9755 if (Arg.hasAttribute(Attribute::InAlloca)) { 9756 Flags.setInAlloca(); 9757 // Set the byval flag for CCAssignFn callbacks that don't know about 9758 // inalloca. This way we can know how many bytes we should've allocated 9759 // and how many bytes a callee cleanup function will pop. If we port 9760 // inalloca to more targets, we'll have to add custom inalloca handling 9761 // in the various CC lowering callbacks. 9762 Flags.setByVal(); 9763 } 9764 if (Arg.hasAttribute(Attribute::Preallocated)) { 9765 Flags.setPreallocated(); 9766 // Set the byval flag for CCAssignFn callbacks that don't know about 9767 // preallocated. This way we can know how many bytes we should've 9768 // allocated and how many bytes a callee cleanup function will pop. If 9769 // we port preallocated to more targets, we'll have to add custom 9770 // preallocated handling in the various CC lowering callbacks. 9771 Flags.setByVal(); 9772 } 9773 if (F.getCallingConv() == CallingConv::X86_INTR) { 9774 // IA Interrupt passes frame (1st parameter) by value in the stack. 9775 if (ArgNo == 0) 9776 Flags.setByVal(); 9777 } 9778 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) { 9779 Type *ElementTy = Arg.getParamByValType(); 9780 9781 // For ByVal, size and alignment should be passed from FE. BE will 9782 // guess if this info is not there but there are cases it cannot get 9783 // right. 9784 unsigned FrameSize = DL.getTypeAllocSize(Arg.getParamByValType()); 9785 Flags.setByValSize(FrameSize); 9786 9787 unsigned FrameAlign; 9788 if (Arg.getParamAlignment()) 9789 FrameAlign = Arg.getParamAlignment(); 9790 else 9791 FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL); 9792 Flags.setByValAlign(Align(FrameAlign)); 9793 } 9794 if (Arg.hasAttribute(Attribute::Nest)) 9795 Flags.setNest(); 9796 if (NeedsRegBlock) 9797 Flags.setInConsecutiveRegs(); 9798 Flags.setOrigAlign(OriginalAlignment); 9799 if (ArgCopyElisionCandidates.count(&Arg)) 9800 Flags.setCopyElisionCandidate(); 9801 if (Arg.hasAttribute(Attribute::Returned)) 9802 Flags.setReturned(); 9803 9804 MVT RegisterVT = TLI->getRegisterTypeForCallingConv( 9805 *CurDAG->getContext(), F.getCallingConv(), VT); 9806 unsigned NumRegs = TLI->getNumRegistersForCallingConv( 9807 *CurDAG->getContext(), F.getCallingConv(), VT); 9808 for (unsigned i = 0; i != NumRegs; ++i) { 9809 // For scalable vectors, use the minimum size; individual targets 9810 // are responsible for handling scalable vector arguments and 9811 // return values. 9812 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, 9813 ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize()); 9814 if (NumRegs > 1 && i == 0) 9815 MyFlags.Flags.setSplit(); 9816 // if it isn't first piece, alignment must be 1 9817 else if (i > 0) { 9818 MyFlags.Flags.setOrigAlign(Align(1)); 9819 if (i == NumRegs - 1) 9820 MyFlags.Flags.setSplitEnd(); 9821 } 9822 Ins.push_back(MyFlags); 9823 } 9824 if (NeedsRegBlock && Value == NumValues - 1) 9825 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); 9826 PartBase += VT.getStoreSize().getKnownMinSize(); 9827 } 9828 } 9829 9830 // Call the target to set up the argument values. 9831 SmallVector<SDValue, 8> InVals; 9832 SDValue NewRoot = TLI->LowerFormalArguments( 9833 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals); 9834 9835 // Verify that the target's LowerFormalArguments behaved as expected. 9836 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other && 9837 "LowerFormalArguments didn't return a valid chain!"); 9838 assert(InVals.size() == Ins.size() && 9839 "LowerFormalArguments didn't emit the correct number of values!"); 9840 LLVM_DEBUG({ 9841 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 9842 assert(InVals[i].getNode() && 9843 "LowerFormalArguments emitted a null value!"); 9844 assert(EVT(Ins[i].VT) == InVals[i].getValueType() && 9845 "LowerFormalArguments emitted a value with the wrong type!"); 9846 } 9847 }); 9848 9849 // Update the DAG with the new chain value resulting from argument lowering. 9850 DAG.setRoot(NewRoot); 9851 9852 // Set up the argument values. 9853 unsigned i = 0; 9854 if (!FuncInfo->CanLowerReturn) { 9855 // Create a virtual register for the sret pointer, and put in a copy 9856 // from the sret argument into it. 9857 SmallVector<EVT, 1> ValueVTs; 9858 ComputeValueVTs(*TLI, DAG.getDataLayout(), 9859 F.getReturnType()->getPointerTo( 9860 DAG.getDataLayout().getAllocaAddrSpace()), 9861 ValueVTs); 9862 MVT VT = ValueVTs[0].getSimpleVT(); 9863 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 9864 Optional<ISD::NodeType> AssertOp = None; 9865 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, 9866 nullptr, F.getCallingConv(), AssertOp); 9867 9868 MachineFunction& MF = SDB->DAG.getMachineFunction(); 9869 MachineRegisterInfo& RegInfo = MF.getRegInfo(); 9870 Register SRetReg = 9871 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); 9872 FuncInfo->DemoteRegister = SRetReg; 9873 NewRoot = 9874 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue); 9875 DAG.setRoot(NewRoot); 9876 9877 // i indexes lowered arguments. Bump it past the hidden sret argument. 9878 ++i; 9879 } 9880 9881 SmallVector<SDValue, 4> Chains; 9882 DenseMap<int, int> ArgCopyElisionFrameIndexMap; 9883 for (const Argument &Arg : F.args()) { 9884 SmallVector<SDValue, 4> ArgValues; 9885 SmallVector<EVT, 4> ValueVTs; 9886 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); 9887 unsigned NumValues = ValueVTs.size(); 9888 if (NumValues == 0) 9889 continue; 9890 9891 bool ArgHasUses = !Arg.use_empty(); 9892 9893 // Elide the copying store if the target loaded this argument from a 9894 // suitable fixed stack object. 9895 if (Ins[i].Flags.isCopyElisionCandidate()) { 9896 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap, 9897 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg, 9898 InVals[i], ArgHasUses); 9899 } 9900 9901 // If this argument is unused then remember its value. It is used to generate 9902 // debugging information. 9903 bool isSwiftErrorArg = 9904 TLI->supportSwiftError() && 9905 Arg.hasAttribute(Attribute::SwiftError); 9906 if (!ArgHasUses && !isSwiftErrorArg) { 9907 SDB->setUnusedArgValue(&Arg, InVals[i]); 9908 9909 // Also remember any frame index for use in FastISel. 9910 if (FrameIndexSDNode *FI = 9911 dyn_cast<FrameIndexSDNode>(InVals[i].getNode())) 9912 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 9913 } 9914 9915 for (unsigned Val = 0; Val != NumValues; ++Val) { 9916 EVT VT = ValueVTs[Val]; 9917 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), 9918 F.getCallingConv(), VT); 9919 unsigned NumParts = TLI->getNumRegistersForCallingConv( 9920 *CurDAG->getContext(), F.getCallingConv(), VT); 9921 9922 // Even an apparent 'unused' swifterror argument needs to be returned. So 9923 // we do generate a copy for it that can be used on return from the 9924 // function. 9925 if (ArgHasUses || isSwiftErrorArg) { 9926 Optional<ISD::NodeType> AssertOp; 9927 if (Arg.hasAttribute(Attribute::SExt)) 9928 AssertOp = ISD::AssertSext; 9929 else if (Arg.hasAttribute(Attribute::ZExt)) 9930 AssertOp = ISD::AssertZext; 9931 9932 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, 9933 PartVT, VT, nullptr, 9934 F.getCallingConv(), AssertOp)); 9935 } 9936 9937 i += NumParts; 9938 } 9939 9940 // We don't need to do anything else for unused arguments. 9941 if (ArgValues.empty()) 9942 continue; 9943 9944 // Note down frame index. 9945 if (FrameIndexSDNode *FI = 9946 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode())) 9947 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 9948 9949 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues), 9950 SDB->getCurSDLoc()); 9951 9952 SDB->setValue(&Arg, Res); 9953 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { 9954 // We want to associate the argument with the frame index, among 9955 // involved operands, that correspond to the lowest address. The 9956 // getCopyFromParts function, called earlier, is swapping the order of 9957 // the operands to BUILD_PAIR depending on endianness. The result of 9958 // that swapping is that the least significant bits of the argument will 9959 // be in the first operand of the BUILD_PAIR node, and the most 9960 // significant bits will be in the second operand. 9961 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0; 9962 if (LoadSDNode *LNode = 9963 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode())) 9964 if (FrameIndexSDNode *FI = 9965 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 9966 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 9967 } 9968 9969 // Analyses past this point are naive and don't expect an assertion. 9970 if (Res.getOpcode() == ISD::AssertZext) 9971 Res = Res.getOperand(0); 9972 9973 // Update the SwiftErrorVRegDefMap. 9974 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) { 9975 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 9976 if (Register::isVirtualRegister(Reg)) 9977 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(), 9978 Reg); 9979 } 9980 9981 // If this argument is live outside of the entry block, insert a copy from 9982 // wherever we got it to the vreg that other BB's will reference it as. 9983 if (Res.getOpcode() == ISD::CopyFromReg) { 9984 // If we can, though, try to skip creating an unnecessary vreg. 9985 // FIXME: This isn't very clean... it would be nice to make this more 9986 // general. 9987 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 9988 if (Register::isVirtualRegister(Reg)) { 9989 FuncInfo->ValueMap[&Arg] = Reg; 9990 continue; 9991 } 9992 } 9993 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) { 9994 FuncInfo->InitializeRegForValue(&Arg); 9995 SDB->CopyToExportRegsIfNeeded(&Arg); 9996 } 9997 } 9998 9999 if (!Chains.empty()) { 10000 Chains.push_back(NewRoot); 10001 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 10002 } 10003 10004 DAG.setRoot(NewRoot); 10005 10006 assert(i == InVals.size() && "Argument register count mismatch!"); 10007 10008 // If any argument copy elisions occurred and we have debug info, update the 10009 // stale frame indices used in the dbg.declare variable info table. 10010 MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo(); 10011 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) { 10012 for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) { 10013 auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot); 10014 if (I != ArgCopyElisionFrameIndexMap.end()) 10015 VI.Slot = I->second; 10016 } 10017 } 10018 10019 // Finally, if the target has anything special to do, allow it to do so. 10020 emitFunctionEntryCode(); 10021 } 10022 10023 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 10024 /// ensure constants are generated when needed. Remember the virtual registers 10025 /// that need to be added to the Machine PHI nodes as input. We cannot just 10026 /// directly add them, because expansion might result in multiple MBB's for one 10027 /// BB. As such, the start of the BB might correspond to a different MBB than 10028 /// the end. 10029 void 10030 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 10031 const Instruction *TI = LLVMBB->getTerminator(); 10032 10033 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 10034 10035 // Check PHI nodes in successors that expect a value to be available from this 10036 // block. 10037 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 10038 const BasicBlock *SuccBB = TI->getSuccessor(succ); 10039 if (!isa<PHINode>(SuccBB->begin())) continue; 10040 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 10041 10042 // If this terminator has multiple identical successors (common for 10043 // switches), only handle each succ once. 10044 if (!SuccsHandled.insert(SuccMBB).second) 10045 continue; 10046 10047 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 10048 10049 // At this point we know that there is a 1-1 correspondence between LLVM PHI 10050 // nodes and Machine PHI nodes, but the incoming operands have not been 10051 // emitted yet. 10052 for (const PHINode &PN : SuccBB->phis()) { 10053 // Ignore dead phi's. 10054 if (PN.use_empty()) 10055 continue; 10056 10057 // Skip empty types 10058 if (PN.getType()->isEmptyTy()) 10059 continue; 10060 10061 unsigned Reg; 10062 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB); 10063 10064 if (const Constant *C = dyn_cast<Constant>(PHIOp)) { 10065 unsigned &RegOut = ConstantsOut[C]; 10066 if (RegOut == 0) { 10067 RegOut = FuncInfo.CreateRegs(C); 10068 CopyValueToVirtualRegister(C, RegOut); 10069 } 10070 Reg = RegOut; 10071 } else { 10072 DenseMap<const Value *, Register>::iterator I = 10073 FuncInfo.ValueMap.find(PHIOp); 10074 if (I != FuncInfo.ValueMap.end()) 10075 Reg = I->second; 10076 else { 10077 assert(isa<AllocaInst>(PHIOp) && 10078 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 10079 "Didn't codegen value into a register!??"); 10080 Reg = FuncInfo.CreateRegs(PHIOp); 10081 CopyValueToVirtualRegister(PHIOp, Reg); 10082 } 10083 } 10084 10085 // Remember that this register needs to added to the machine PHI node as 10086 // the input for this MBB. 10087 SmallVector<EVT, 4> ValueVTs; 10088 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10089 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs); 10090 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 10091 EVT VT = ValueVTs[vti]; 10092 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT); 10093 for (unsigned i = 0, e = NumRegisters; i != e; ++i) 10094 FuncInfo.PHINodesToUpdate.push_back( 10095 std::make_pair(&*MBBI++, Reg + i)); 10096 Reg += NumRegisters; 10097 } 10098 } 10099 } 10100 10101 ConstantsOut.clear(); 10102 } 10103 10104 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB 10105 /// is 0. 10106 MachineBasicBlock * 10107 SelectionDAGBuilder::StackProtectorDescriptor:: 10108 AddSuccessorMBB(const BasicBlock *BB, 10109 MachineBasicBlock *ParentMBB, 10110 bool IsLikely, 10111 MachineBasicBlock *SuccMBB) { 10112 // If SuccBB has not been created yet, create it. 10113 if (!SuccMBB) { 10114 MachineFunction *MF = ParentMBB->getParent(); 10115 MachineFunction::iterator BBI(ParentMBB); 10116 SuccMBB = MF->CreateMachineBasicBlock(BB); 10117 MF->insert(++BBI, SuccMBB); 10118 } 10119 // Add it as a successor of ParentMBB. 10120 ParentMBB->addSuccessor( 10121 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely)); 10122 return SuccMBB; 10123 } 10124 10125 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) { 10126 MachineFunction::iterator I(MBB); 10127 if (++I == FuncInfo.MF->end()) 10128 return nullptr; 10129 return &*I; 10130 } 10131 10132 /// During lowering new call nodes can be created (such as memset, etc.). 10133 /// Those will become new roots of the current DAG, but complications arise 10134 /// when they are tail calls. In such cases, the call lowering will update 10135 /// the root, but the builder still needs to know that a tail call has been 10136 /// lowered in order to avoid generating an additional return. 10137 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) { 10138 // If the node is null, we do have a tail call. 10139 if (MaybeTC.getNode() != nullptr) 10140 DAG.setRoot(MaybeTC); 10141 else 10142 HasTailCall = true; 10143 } 10144 10145 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond, 10146 MachineBasicBlock *SwitchMBB, 10147 MachineBasicBlock *DefaultMBB) { 10148 MachineFunction *CurMF = FuncInfo.MF; 10149 MachineBasicBlock *NextMBB = nullptr; 10150 MachineFunction::iterator BBI(W.MBB); 10151 if (++BBI != FuncInfo.MF->end()) 10152 NextMBB = &*BBI; 10153 10154 unsigned Size = W.LastCluster - W.FirstCluster + 1; 10155 10156 BranchProbabilityInfo *BPI = FuncInfo.BPI; 10157 10158 if (Size == 2 && W.MBB == SwitchMBB) { 10159 // If any two of the cases has the same destination, and if one value 10160 // is the same as the other, but has one bit unset that the other has set, 10161 // use bit manipulation to do two compares at once. For example: 10162 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 10163 // TODO: This could be extended to merge any 2 cases in switches with 3 10164 // cases. 10165 // TODO: Handle cases where W.CaseBB != SwitchBB. 10166 CaseCluster &Small = *W.FirstCluster; 10167 CaseCluster &Big = *W.LastCluster; 10168 10169 if (Small.Low == Small.High && Big.Low == Big.High && 10170 Small.MBB == Big.MBB) { 10171 const APInt &SmallValue = Small.Low->getValue(); 10172 const APInt &BigValue = Big.Low->getValue(); 10173 10174 // Check that there is only one bit different. 10175 APInt CommonBit = BigValue ^ SmallValue; 10176 if (CommonBit.isPowerOf2()) { 10177 SDValue CondLHS = getValue(Cond); 10178 EVT VT = CondLHS.getValueType(); 10179 SDLoc DL = getCurSDLoc(); 10180 10181 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS, 10182 DAG.getConstant(CommonBit, DL, VT)); 10183 SDValue Cond = DAG.getSetCC( 10184 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT), 10185 ISD::SETEQ); 10186 10187 // Update successor info. 10188 // Both Small and Big will jump to Small.BB, so we sum up the 10189 // probabilities. 10190 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob); 10191 if (BPI) 10192 addSuccessorWithProb( 10193 SwitchMBB, DefaultMBB, 10194 // The default destination is the first successor in IR. 10195 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0)); 10196 else 10197 addSuccessorWithProb(SwitchMBB, DefaultMBB); 10198 10199 // Insert the true branch. 10200 SDValue BrCond = 10201 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond, 10202 DAG.getBasicBlock(Small.MBB)); 10203 // Insert the false branch. 10204 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond, 10205 DAG.getBasicBlock(DefaultMBB)); 10206 10207 DAG.setRoot(BrCond); 10208 return; 10209 } 10210 } 10211 } 10212 10213 if (TM.getOptLevel() != CodeGenOpt::None) { 10214 // Here, we order cases by probability so the most likely case will be 10215 // checked first. However, two clusters can have the same probability in 10216 // which case their relative ordering is non-deterministic. So we use Low 10217 // as a tie-breaker as clusters are guaranteed to never overlap. 10218 llvm::sort(W.FirstCluster, W.LastCluster + 1, 10219 [](const CaseCluster &a, const CaseCluster &b) { 10220 return a.Prob != b.Prob ? 10221 a.Prob > b.Prob : 10222 a.Low->getValue().slt(b.Low->getValue()); 10223 }); 10224 10225 // Rearrange the case blocks so that the last one falls through if possible 10226 // without changing the order of probabilities. 10227 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) { 10228 --I; 10229 if (I->Prob > W.LastCluster->Prob) 10230 break; 10231 if (I->Kind == CC_Range && I->MBB == NextMBB) { 10232 std::swap(*I, *W.LastCluster); 10233 break; 10234 } 10235 } 10236 } 10237 10238 // Compute total probability. 10239 BranchProbability DefaultProb = W.DefaultProb; 10240 BranchProbability UnhandledProbs = DefaultProb; 10241 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 10242 UnhandledProbs += I->Prob; 10243 10244 MachineBasicBlock *CurMBB = W.MBB; 10245 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 10246 bool FallthroughUnreachable = false; 10247 MachineBasicBlock *Fallthrough; 10248 if (I == W.LastCluster) { 10249 // For the last cluster, fall through to the default destination. 10250 Fallthrough = DefaultMBB; 10251 FallthroughUnreachable = isa<UnreachableInst>( 10252 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); 10253 } else { 10254 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 10255 CurMF->insert(BBI, Fallthrough); 10256 // Put Cond in a virtual register to make it available from the new blocks. 10257 ExportFromCurrentBlock(Cond); 10258 } 10259 UnhandledProbs -= I->Prob; 10260 10261 switch (I->Kind) { 10262 case CC_JumpTable: { 10263 // FIXME: Optimize away range check based on pivot comparisons. 10264 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; 10265 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; 10266 10267 // The jump block hasn't been inserted yet; insert it here. 10268 MachineBasicBlock *JumpMBB = JT->MBB; 10269 CurMF->insert(BBI, JumpMBB); 10270 10271 auto JumpProb = I->Prob; 10272 auto FallthroughProb = UnhandledProbs; 10273 10274 // If the default statement is a target of the jump table, we evenly 10275 // distribute the default probability to successors of CurMBB. Also 10276 // update the probability on the edge from JumpMBB to Fallthrough. 10277 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 10278 SE = JumpMBB->succ_end(); 10279 SI != SE; ++SI) { 10280 if (*SI == DefaultMBB) { 10281 JumpProb += DefaultProb / 2; 10282 FallthroughProb -= DefaultProb / 2; 10283 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 10284 JumpMBB->normalizeSuccProbs(); 10285 break; 10286 } 10287 } 10288 10289 if (FallthroughUnreachable) { 10290 // Skip the range check if the fallthrough block is unreachable. 10291 JTH->OmitRangeCheck = true; 10292 } 10293 10294 if (!JTH->OmitRangeCheck) 10295 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 10296 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 10297 CurMBB->normalizeSuccProbs(); 10298 10299 // The jump table header will be inserted in our current block, do the 10300 // range check, and fall through to our fallthrough block. 10301 JTH->HeaderBB = CurMBB; 10302 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 10303 10304 // If we're in the right place, emit the jump table header right now. 10305 if (CurMBB == SwitchMBB) { 10306 visitJumpTableHeader(*JT, *JTH, SwitchMBB); 10307 JTH->Emitted = true; 10308 } 10309 break; 10310 } 10311 case CC_BitTests: { 10312 // FIXME: Optimize away range check based on pivot comparisons. 10313 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; 10314 10315 // The bit test blocks haven't been inserted yet; insert them here. 10316 for (BitTestCase &BTC : BTB->Cases) 10317 CurMF->insert(BBI, BTC.ThisBB); 10318 10319 // Fill in fields of the BitTestBlock. 10320 BTB->Parent = CurMBB; 10321 BTB->Default = Fallthrough; 10322 10323 BTB->DefaultProb = UnhandledProbs; 10324 // If the cases in bit test don't form a contiguous range, we evenly 10325 // distribute the probability on the edge to Fallthrough to two 10326 // successors of CurMBB. 10327 if (!BTB->ContiguousRange) { 10328 BTB->Prob += DefaultProb / 2; 10329 BTB->DefaultProb -= DefaultProb / 2; 10330 } 10331 10332 if (FallthroughUnreachable) { 10333 // Skip the range check if the fallthrough block is unreachable. 10334 BTB->OmitRangeCheck = true; 10335 } 10336 10337 // If we're in the right place, emit the bit test header right now. 10338 if (CurMBB == SwitchMBB) { 10339 visitBitTestHeader(*BTB, SwitchMBB); 10340 BTB->Emitted = true; 10341 } 10342 break; 10343 } 10344 case CC_Range: { 10345 const Value *RHS, *LHS, *MHS; 10346 ISD::CondCode CC; 10347 if (I->Low == I->High) { 10348 // Check Cond == I->Low. 10349 CC = ISD::SETEQ; 10350 LHS = Cond; 10351 RHS=I->Low; 10352 MHS = nullptr; 10353 } else { 10354 // Check I->Low <= Cond <= I->High. 10355 CC = ISD::SETLE; 10356 LHS = I->Low; 10357 MHS = Cond; 10358 RHS = I->High; 10359 } 10360 10361 // If Fallthrough is unreachable, fold away the comparison. 10362 if (FallthroughUnreachable) 10363 CC = ISD::SETTRUE; 10364 10365 // The false probability is the sum of all unhandled cases. 10366 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, 10367 getCurSDLoc(), I->Prob, UnhandledProbs); 10368 10369 if (CurMBB == SwitchMBB) 10370 visitSwitchCase(CB, SwitchMBB); 10371 else 10372 SL->SwitchCases.push_back(CB); 10373 10374 break; 10375 } 10376 } 10377 CurMBB = Fallthrough; 10378 } 10379 } 10380 10381 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC, 10382 CaseClusterIt First, 10383 CaseClusterIt Last) { 10384 return std::count_if(First, Last + 1, [&](const CaseCluster &X) { 10385 if (X.Prob != CC.Prob) 10386 return X.Prob > CC.Prob; 10387 10388 // Ties are broken by comparing the case value. 10389 return X.Low->getValue().slt(CC.Low->getValue()); 10390 }); 10391 } 10392 10393 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList, 10394 const SwitchWorkListItem &W, 10395 Value *Cond, 10396 MachineBasicBlock *SwitchMBB) { 10397 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && 10398 "Clusters not sorted?"); 10399 10400 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!"); 10401 10402 // Balance the tree based on branch probabilities to create a near-optimal (in 10403 // terms of search time given key frequency) binary search tree. See e.g. Kurt 10404 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975). 10405 CaseClusterIt LastLeft = W.FirstCluster; 10406 CaseClusterIt FirstRight = W.LastCluster; 10407 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2; 10408 auto RightProb = FirstRight->Prob + W.DefaultProb / 2; 10409 10410 // Move LastLeft and FirstRight towards each other from opposite directions to 10411 // find a partitioning of the clusters which balances the probability on both 10412 // sides. If LeftProb and RightProb are equal, alternate which side is 10413 // taken to ensure 0-probability nodes are distributed evenly. 10414 unsigned I = 0; 10415 while (LastLeft + 1 < FirstRight) { 10416 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1))) 10417 LeftProb += (++LastLeft)->Prob; 10418 else 10419 RightProb += (--FirstRight)->Prob; 10420 I++; 10421 } 10422 10423 while (true) { 10424 // Our binary search tree differs from a typical BST in that ours can have up 10425 // to three values in each leaf. The pivot selection above doesn't take that 10426 // into account, which means the tree might require more nodes and be less 10427 // efficient. We compensate for this here. 10428 10429 unsigned NumLeft = LastLeft - W.FirstCluster + 1; 10430 unsigned NumRight = W.LastCluster - FirstRight + 1; 10431 10432 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) { 10433 // If one side has less than 3 clusters, and the other has more than 3, 10434 // consider taking a cluster from the other side. 10435 10436 if (NumLeft < NumRight) { 10437 // Consider moving the first cluster on the right to the left side. 10438 CaseCluster &CC = *FirstRight; 10439 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); 10440 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); 10441 if (LeftSideRank <= RightSideRank) { 10442 // Moving the cluster to the left does not demote it. 10443 ++LastLeft; 10444 ++FirstRight; 10445 continue; 10446 } 10447 } else { 10448 assert(NumRight < NumLeft); 10449 // Consider moving the last element on the left to the right side. 10450 CaseCluster &CC = *LastLeft; 10451 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); 10452 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); 10453 if (RightSideRank <= LeftSideRank) { 10454 // Moving the cluster to the right does not demot it. 10455 --LastLeft; 10456 --FirstRight; 10457 continue; 10458 } 10459 } 10460 } 10461 break; 10462 } 10463 10464 assert(LastLeft + 1 == FirstRight); 10465 assert(LastLeft >= W.FirstCluster); 10466 assert(FirstRight <= W.LastCluster); 10467 10468 // Use the first element on the right as pivot since we will make less-than 10469 // comparisons against it. 10470 CaseClusterIt PivotCluster = FirstRight; 10471 assert(PivotCluster > W.FirstCluster); 10472 assert(PivotCluster <= W.LastCluster); 10473 10474 CaseClusterIt FirstLeft = W.FirstCluster; 10475 CaseClusterIt LastRight = W.LastCluster; 10476 10477 const ConstantInt *Pivot = PivotCluster->Low; 10478 10479 // New blocks will be inserted immediately after the current one. 10480 MachineFunction::iterator BBI(W.MBB); 10481 ++BBI; 10482 10483 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster, 10484 // we can branch to its destination directly if it's squeezed exactly in 10485 // between the known lower bound and Pivot - 1. 10486 MachineBasicBlock *LeftMBB; 10487 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && 10488 FirstLeft->Low == W.GE && 10489 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { 10490 LeftMBB = FirstLeft->MBB; 10491 } else { 10492 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 10493 FuncInfo.MF->insert(BBI, LeftMBB); 10494 WorkList.push_back( 10495 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2}); 10496 // Put Cond in a virtual register to make it available from the new blocks. 10497 ExportFromCurrentBlock(Cond); 10498 } 10499 10500 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a 10501 // single cluster, RHS.Low == Pivot, and we can branch to its destination 10502 // directly if RHS.High equals the current upper bound. 10503 MachineBasicBlock *RightMBB; 10504 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && 10505 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { 10506 RightMBB = FirstRight->MBB; 10507 } else { 10508 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 10509 FuncInfo.MF->insert(BBI, RightMBB); 10510 WorkList.push_back( 10511 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2}); 10512 // Put Cond in a virtual register to make it available from the new blocks. 10513 ExportFromCurrentBlock(Cond); 10514 } 10515 10516 // Create the CaseBlock record that will be used to lower the branch. 10517 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB, 10518 getCurSDLoc(), LeftProb, RightProb); 10519 10520 if (W.MBB == SwitchMBB) 10521 visitSwitchCase(CB, SwitchMBB); 10522 else 10523 SL->SwitchCases.push_back(CB); 10524 } 10525 10526 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb 10527 // from the swith statement. 10528 static BranchProbability scaleCaseProbality(BranchProbability CaseProb, 10529 BranchProbability PeeledCaseProb) { 10530 if (PeeledCaseProb == BranchProbability::getOne()) 10531 return BranchProbability::getZero(); 10532 BranchProbability SwitchProb = PeeledCaseProb.getCompl(); 10533 10534 uint32_t Numerator = CaseProb.getNumerator(); 10535 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator()); 10536 return BranchProbability(Numerator, std::max(Numerator, Denominator)); 10537 } 10538 10539 // Try to peel the top probability case if it exceeds the threshold. 10540 // Return current MachineBasicBlock for the switch statement if the peeling 10541 // does not occur. 10542 // If the peeling is performed, return the newly created MachineBasicBlock 10543 // for the peeled switch statement. Also update Clusters to remove the peeled 10544 // case. PeeledCaseProb is the BranchProbability for the peeled case. 10545 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster( 10546 const SwitchInst &SI, CaseClusterVector &Clusters, 10547 BranchProbability &PeeledCaseProb) { 10548 MachineBasicBlock *SwitchMBB = FuncInfo.MBB; 10549 // Don't perform if there is only one cluster or optimizing for size. 10550 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 || 10551 TM.getOptLevel() == CodeGenOpt::None || 10552 SwitchMBB->getParent()->getFunction().hasMinSize()) 10553 return SwitchMBB; 10554 10555 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100); 10556 unsigned PeeledCaseIndex = 0; 10557 bool SwitchPeeled = false; 10558 for (unsigned Index = 0; Index < Clusters.size(); ++Index) { 10559 CaseCluster &CC = Clusters[Index]; 10560 if (CC.Prob < TopCaseProb) 10561 continue; 10562 TopCaseProb = CC.Prob; 10563 PeeledCaseIndex = Index; 10564 SwitchPeeled = true; 10565 } 10566 if (!SwitchPeeled) 10567 return SwitchMBB; 10568 10569 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: " 10570 << TopCaseProb << "\n"); 10571 10572 // Record the MBB for the peeled switch statement. 10573 MachineFunction::iterator BBI(SwitchMBB); 10574 ++BBI; 10575 MachineBasicBlock *PeeledSwitchMBB = 10576 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock()); 10577 FuncInfo.MF->insert(BBI, PeeledSwitchMBB); 10578 10579 ExportFromCurrentBlock(SI.getCondition()); 10580 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex; 10581 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt, 10582 nullptr, nullptr, TopCaseProb.getCompl()}; 10583 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB); 10584 10585 Clusters.erase(PeeledCaseIt); 10586 for (CaseCluster &CC : Clusters) { 10587 LLVM_DEBUG( 10588 dbgs() << "Scale the probablity for one cluster, before scaling: " 10589 << CC.Prob << "\n"); 10590 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb); 10591 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n"); 10592 } 10593 PeeledCaseProb = TopCaseProb; 10594 return PeeledSwitchMBB; 10595 } 10596 10597 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { 10598 // Extract cases from the switch. 10599 BranchProbabilityInfo *BPI = FuncInfo.BPI; 10600 CaseClusterVector Clusters; 10601 Clusters.reserve(SI.getNumCases()); 10602 for (auto I : SI.cases()) { 10603 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()]; 10604 const ConstantInt *CaseVal = I.getCaseValue(); 10605 BranchProbability Prob = 10606 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 10607 : BranchProbability(1, SI.getNumCases() + 1); 10608 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 10609 } 10610 10611 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()]; 10612 10613 // Cluster adjacent cases with the same destination. We do this at all 10614 // optimization levels because it's cheap to do and will make codegen faster 10615 // if there are many clusters. 10616 sortAndRangeify(Clusters); 10617 10618 // The branch probablity of the peeled case. 10619 BranchProbability PeeledCaseProb = BranchProbability::getZero(); 10620 MachineBasicBlock *PeeledSwitchMBB = 10621 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb); 10622 10623 // If there is only the default destination, jump there directly. 10624 MachineBasicBlock *SwitchMBB = FuncInfo.MBB; 10625 if (Clusters.empty()) { 10626 assert(PeeledSwitchMBB == SwitchMBB); 10627 SwitchMBB->addSuccessor(DefaultMBB); 10628 if (DefaultMBB != NextBlock(SwitchMBB)) { 10629 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 10630 getControlRoot(), DAG.getBasicBlock(DefaultMBB))); 10631 } 10632 return; 10633 } 10634 10635 SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI()); 10636 SL->findBitTestClusters(Clusters, &SI); 10637 10638 LLVM_DEBUG({ 10639 dbgs() << "Case clusters: "; 10640 for (const CaseCluster &C : Clusters) { 10641 if (C.Kind == CC_JumpTable) 10642 dbgs() << "JT:"; 10643 if (C.Kind == CC_BitTests) 10644 dbgs() << "BT:"; 10645 10646 C.Low->getValue().print(dbgs(), true); 10647 if (C.Low != C.High) { 10648 dbgs() << '-'; 10649 C.High->getValue().print(dbgs(), true); 10650 } 10651 dbgs() << ' '; 10652 } 10653 dbgs() << '\n'; 10654 }); 10655 10656 assert(!Clusters.empty()); 10657 SwitchWorkList WorkList; 10658 CaseClusterIt First = Clusters.begin(); 10659 CaseClusterIt Last = Clusters.end() - 1; 10660 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB); 10661 // Scale the branchprobability for DefaultMBB if the peel occurs and 10662 // DefaultMBB is not replaced. 10663 if (PeeledCaseProb != BranchProbability::getZero() && 10664 DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()]) 10665 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb); 10666 WorkList.push_back( 10667 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 10668 10669 while (!WorkList.empty()) { 10670 SwitchWorkListItem W = WorkList.back(); 10671 WorkList.pop_back(); 10672 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; 10673 10674 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None && 10675 !DefaultMBB->getParent()->getFunction().hasMinSize()) { 10676 // For optimized builds, lower large range as a balanced binary tree. 10677 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); 10678 continue; 10679 } 10680 10681 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB); 10682 } 10683 } 10684 10685 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) { 10686 SmallVector<EVT, 4> ValueVTs; 10687 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(), 10688 ValueVTs); 10689 unsigned NumValues = ValueVTs.size(); 10690 if (NumValues == 0) return; 10691 10692 SmallVector<SDValue, 4> Values(NumValues); 10693 SDValue Op = getValue(I.getOperand(0)); 10694 10695 for (unsigned i = 0; i != NumValues; ++i) 10696 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i], 10697 SDValue(Op.getNode(), Op.getResNo() + i)); 10698 10699 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 10700 DAG.getVTList(ValueVTs), Values)); 10701 } 10702