1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/CodeGen/Analysis.h"
33 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
34 #include "llvm/CodeGen/CodeGenCommonISel.h"
35 #include "llvm/CodeGen/FunctionLoweringInfo.h"
36 #include "llvm/CodeGen/GCMetadata.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/MachineBasicBlock.h"
39 #include "llvm/CodeGen/MachineFrameInfo.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineInstrBuilder.h"
42 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
43 #include "llvm/CodeGen/MachineMemOperand.h"
44 #include "llvm/CodeGen/MachineModuleInfo.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsAMDGPU.h"
80 #include "llvm/IR/IntrinsicsWebAssembly.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
83 #include "llvm/IR/Metadata.h"
84 #include "llvm/IR/Module.h"
85 #include "llvm/IR/Operator.h"
86 #include "llvm/IR/PatternMatch.h"
87 #include "llvm/IR/Statepoint.h"
88 #include "llvm/IR/Type.h"
89 #include "llvm/IR/User.h"
90 #include "llvm/IR/Value.h"
91 #include "llvm/MC/MCContext.h"
92 #include "llvm/Support/AtomicOrdering.h"
93 #include "llvm/Support/Casting.h"
94 #include "llvm/Support/CommandLine.h"
95 #include "llvm/Support/Compiler.h"
96 #include "llvm/Support/Debug.h"
97 #include "llvm/Support/InstructionCost.h"
98 #include "llvm/Support/MathExtras.h"
99 #include "llvm/Support/raw_ostream.h"
100 #include "llvm/Target/TargetMachine.h"
101 #include "llvm/Target/TargetOptions.h"
102 #include "llvm/TargetParser/Triple.h"
103 #include "llvm/Transforms/Utils/Local.h"
104 #include <cstddef>
105 #include <limits>
106 #include <optional>
107 #include <tuple>
108
109 using namespace llvm;
110 using namespace PatternMatch;
111 using namespace SwitchCG;
112
113 #define DEBUG_TYPE "isel"
114
115 /// LimitFloatPrecision - Generate low-precision inline sequences for
116 /// some float libcalls (6, 8 or 12 bits).
117 static unsigned LimitFloatPrecision;
118
119 static cl::opt<bool>
120 InsertAssertAlign("insert-assert-align", cl::init(true),
121 cl::desc("Insert the experimental `assertalign` node."),
122 cl::ReallyHidden);
123
124 static cl::opt<unsigned, true>
125 LimitFPPrecision("limit-float-precision",
126 cl::desc("Generate low-precision inline sequences "
127 "for some float libcalls"),
128 cl::location(LimitFloatPrecision), cl::Hidden,
129 cl::init(0));
130
131 static cl::opt<unsigned> SwitchPeelThreshold(
132 "switch-peel-threshold", cl::Hidden, cl::init(66),
133 cl::desc("Set the case probability threshold for peeling the case from a "
134 "switch statement. A value greater than 100 will void this "
135 "optimization"));
136
137 // Limit the width of DAG chains. This is important in general to prevent
138 // DAG-based analysis from blowing up. For example, alias analysis and
139 // load clustering may not complete in reasonable time. It is difficult to
140 // recognize and avoid this situation within each individual analysis, and
141 // future analyses are likely to have the same behavior. Limiting DAG width is
142 // the safe approach and will be especially important with global DAGs.
143 //
144 // MaxParallelChains default is arbitrarily high to avoid affecting
145 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
146 // sequence over this should have been converted to llvm.memcpy by the
147 // frontend. It is easy to induce this behavior with .ll code such as:
148 // %buffer = alloca [4096 x i8]
149 // %data = load [4096 x i8]* %argPtr
150 // store [4096 x i8] %data, [4096 x i8]* %buffer
151 static const unsigned MaxParallelChains = 64;
152
153 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
154 const SDValue *Parts, unsigned NumParts,
155 MVT PartVT, EVT ValueVT, const Value *V,
156 SDValue InChain,
157 std::optional<CallingConv::ID> CC);
158
159 /// getCopyFromParts - Create a value that contains the specified legal parts
160 /// combined into the value they represent. If the parts combine to a type
161 /// larger than ValueVT then AssertOp can be used to specify whether the extra
162 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
163 /// (ISD::AssertSext).
164 static SDValue
getCopyFromParts(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,const Value * V,SDValue InChain,std::optional<CallingConv::ID> CC=std::nullopt,std::optional<ISD::NodeType> AssertOp=std::nullopt)165 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
166 unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
167 SDValue InChain,
168 std::optional<CallingConv::ID> CC = std::nullopt,
169 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
170 // Let the target assemble the parts if it wants to
171 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
172 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
173 PartVT, ValueVT, CC))
174 return Val;
175
176 if (ValueVT.isVector())
177 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
178 InChain, CC);
179
180 assert(NumParts > 0 && "No parts to assemble!");
181 SDValue Val = Parts[0];
182
183 if (NumParts > 1) {
184 // Assemble the value from multiple parts.
185 if (ValueVT.isInteger()) {
186 unsigned PartBits = PartVT.getSizeInBits();
187 unsigned ValueBits = ValueVT.getSizeInBits();
188
189 // Assemble the power of 2 part.
190 unsigned RoundParts = llvm::bit_floor(NumParts);
191 unsigned RoundBits = PartBits * RoundParts;
192 EVT RoundVT = RoundBits == ValueBits ?
193 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
194 SDValue Lo, Hi;
195
196 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
197
198 if (RoundParts > 2) {
199 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V,
200 InChain);
201 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2,
202 PartVT, HalfVT, V, InChain);
203 } else {
204 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
205 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
206 }
207
208 if (DAG.getDataLayout().isBigEndian())
209 std::swap(Lo, Hi);
210
211 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
212
213 if (RoundParts < NumParts) {
214 // Assemble the trailing non-power-of-2 part.
215 unsigned OddParts = NumParts - RoundParts;
216 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
217 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
218 OddVT, V, InChain, CC);
219
220 // Combine the round and odd parts.
221 Lo = Val;
222 if (DAG.getDataLayout().isBigEndian())
223 std::swap(Lo, Hi);
224 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
225 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
226 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
227 DAG.getConstant(Lo.getValueSizeInBits(), DL,
228 TLI.getShiftAmountTy(
229 TotalVT, DAG.getDataLayout())));
230 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
231 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
232 }
233 } else if (PartVT.isFloatingPoint()) {
234 // FP split into multiple FP parts (for ppcf128)
235 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
236 "Unexpected split");
237 SDValue Lo, Hi;
238 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
239 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
240 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
241 std::swap(Lo, Hi);
242 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
243 } else {
244 // FP split into integer parts (soft fp)
245 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
246 !PartVT.isVector() && "Unexpected split");
247 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
248 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V,
249 InChain, CC);
250 }
251 }
252
253 // There is now one part, held in Val. Correct it to match ValueVT.
254 // PartEVT is the type of the register class that holds the value.
255 // ValueVT is the type of the inline asm operation.
256 EVT PartEVT = Val.getValueType();
257
258 if (PartEVT == ValueVT)
259 return Val;
260
261 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
262 ValueVT.bitsLT(PartEVT)) {
263 // For an FP value in an integer part, we need to truncate to the right
264 // width first.
265 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
266 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
267 }
268
269 // Handle types that have the same size.
270 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
271 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
272
273 // Handle types with different sizes.
274 if (PartEVT.isInteger() && ValueVT.isInteger()) {
275 if (ValueVT.bitsLT(PartEVT)) {
276 // For a truncate, see if we have any information to
277 // indicate whether the truncated bits will always be
278 // zero or sign-extension.
279 if (AssertOp)
280 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
281 DAG.getValueType(ValueVT));
282 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
283 }
284 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
285 }
286
287 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
288 // FP_ROUND's are always exact here.
289 if (ValueVT.bitsLT(Val.getValueType())) {
290
291 SDValue NoChange =
292 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
293
294 if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr(
295 llvm::Attribute::StrictFP)) {
296 return DAG.getNode(ISD::STRICT_FP_ROUND, DL,
297 DAG.getVTList(ValueVT, MVT::Other), InChain, Val,
298 NoChange);
299 }
300
301 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange);
302 }
303
304 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
305 }
306
307 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
308 // then truncating.
309 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
310 ValueVT.bitsLT(PartEVT)) {
311 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
312 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
313 }
314
315 report_fatal_error("Unknown mismatch in getCopyFromParts!");
316 }
317
diagnosePossiblyInvalidConstraint(LLVMContext & Ctx,const Value * V,const Twine & ErrMsg)318 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
319 const Twine &ErrMsg) {
320 const Instruction *I = dyn_cast_or_null<Instruction>(V);
321 if (!I)
322 return Ctx.emitError(ErrMsg);
323
324 if (const CallInst *CI = dyn_cast<CallInst>(I))
325 if (CI->isInlineAsm()) {
326 return Ctx.diagnose(DiagnosticInfoInlineAsm(
327 *CI, ErrMsg + ", possible invalid constraint for vector type"));
328 }
329
330 return Ctx.emitError(I, ErrMsg);
331 }
332
333 /// getCopyFromPartsVector - Create a value that contains the specified legal
334 /// parts combined into the value they represent. If the parts combine to a
335 /// type larger than ValueVT then AssertOp can be used to specify whether the
336 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
337 /// ValueVT (ISD::AssertSext).
getCopyFromPartsVector(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,const Value * V,SDValue InChain,std::optional<CallingConv::ID> CallConv)338 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
339 const SDValue *Parts, unsigned NumParts,
340 MVT PartVT, EVT ValueVT, const Value *V,
341 SDValue InChain,
342 std::optional<CallingConv::ID> CallConv) {
343 assert(ValueVT.isVector() && "Not a vector value");
344 assert(NumParts > 0 && "No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
346
347 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
348 SDValue Val = Parts[0];
349
350 // Handle a multi-element vector.
351 if (NumParts > 1) {
352 EVT IntermediateVT;
353 MVT RegisterVT;
354 unsigned NumIntermediates;
355 unsigned NumRegs;
356
357 if (IsABIRegCopy) {
358 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
359 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
361 } else {
362 NumRegs =
363 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
364 NumIntermediates, RegisterVT);
365 }
366
367 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
368 NumParts = NumRegs; // Silence a compiler warning.
369 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
370 assert(RegisterVT.getSizeInBits() ==
371 Parts[0].getSimpleValueType().getSizeInBits() &&
372 "Part type sizes don't match!");
373
374 // Assemble the parts into intermediate operands.
375 SmallVector<SDValue, 8> Ops(NumIntermediates);
376 if (NumIntermediates == NumParts) {
377 // If the register was not expanded, truncate or copy the value,
378 // as appropriate.
379 for (unsigned i = 0; i != NumParts; ++i)
380 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT,
381 V, InChain, CallConv);
382 } else if (NumParts > 0) {
383 // If the intermediate type was expanded, build the intermediate
384 // operands from the parts.
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (unsigned i = 0; i != NumIntermediates; ++i)
389 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT,
390 IntermediateVT, V, InChain, CallConv);
391 }
392
393 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
394 // intermediate operands.
395 EVT BuiltVectorTy =
396 IntermediateVT.isVector()
397 ? EVT::getVectorVT(
398 *DAG.getContext(), IntermediateVT.getScalarType(),
399 IntermediateVT.getVectorElementCount() * NumParts)
400 : EVT::getVectorVT(*DAG.getContext(),
401 IntermediateVT.getScalarType(),
402 NumIntermediates);
403 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
404 : ISD::BUILD_VECTOR,
405 DL, BuiltVectorTy, Ops);
406 }
407
408 // There is now one part, held in Val. Correct it to match ValueVT.
409 EVT PartEVT = Val.getValueType();
410
411 if (PartEVT == ValueVT)
412 return Val;
413
414 if (PartEVT.isVector()) {
415 // Vector/Vector bitcast.
416 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
417 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
418
419 // If the parts vector has more elements than the value vector, then we
420 // have a vector widening case (e.g. <2 x float> -> <4 x float>).
421 // Extract the elements we want.
422 if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
423 assert((PartEVT.getVectorElementCount().getKnownMinValue() >
424 ValueVT.getVectorElementCount().getKnownMinValue()) &&
425 (PartEVT.getVectorElementCount().isScalable() ==
426 ValueVT.getVectorElementCount().isScalable()) &&
427 "Cannot narrow, it would be a lossy transformation");
428 PartEVT =
429 EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
430 ValueVT.getVectorElementCount());
431 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
432 DAG.getVectorIdxConstant(0, DL));
433 if (PartEVT == ValueVT)
434 return Val;
435 if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
436 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
437
438 // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
439 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
440 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
441 }
442
443 // Promoted vector extract
444 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
445 }
446
447 // Trivial bitcast if the types are the same size and the destination
448 // vector type is legal.
449 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
450 TLI.isTypeLegal(ValueVT))
451 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
452
453 if (ValueVT.getVectorNumElements() != 1) {
454 // Certain ABIs require that vectors are passed as integers. For vectors
455 // are the same size, this is an obvious bitcast.
456 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
457 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
458 } else if (ValueVT.bitsLT(PartEVT)) {
459 const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
460 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
461 // Drop the extra bits.
462 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
463 return DAG.getBitcast(ValueVT, Val);
464 }
465
466 diagnosePossiblyInvalidConstraint(
467 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
468 return DAG.getUNDEF(ValueVT);
469 }
470
471 // Handle cases such as i8 -> <1 x i1>
472 EVT ValueSVT = ValueVT.getVectorElementType();
473 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
474 unsigned ValueSize = ValueSVT.getSizeInBits();
475 if (ValueSize == PartEVT.getSizeInBits()) {
476 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
477 } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
478 // It's possible a scalar floating point type gets softened to integer and
479 // then promoted to a larger integer. If PartEVT is the larger integer
480 // we need to truncate it and then bitcast to the FP type.
481 assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
482 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
483 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
484 Val = DAG.getBitcast(ValueSVT, Val);
485 } else {
486 Val = ValueVT.isFloatingPoint()
487 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
488 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
489 }
490 }
491
492 return DAG.getBuildVector(ValueVT, DL, Val);
493 }
494
495 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
496 SDValue Val, SDValue *Parts, unsigned NumParts,
497 MVT PartVT, const Value *V,
498 std::optional<CallingConv::ID> CallConv);
499
500 /// getCopyToParts - Create a series of nodes that contain the specified value
501 /// split into legal parts. If the parts contain more bits than Val, then, for
502 /// integers, ExtendKind can be used to specify how to generate the extra bits.
503 static void
getCopyToParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,const Value * V,std::optional<CallingConv::ID> CallConv=std::nullopt,ISD::NodeType ExtendKind=ISD::ANY_EXTEND)504 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
505 unsigned NumParts, MVT PartVT, const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
507 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
508 // Let the target split the parts if it wants to
509 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
510 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
511 CallConv))
512 return;
513 EVT ValueVT = Val.getValueType();
514
515 // Handle the vector case separately.
516 if (ValueVT.isVector())
517 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
518 CallConv);
519
520 unsigned OrigNumParts = NumParts;
521 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
522 "Copying to an illegal type!");
523
524 if (NumParts == 0)
525 return;
526
527 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 && "No-op copy with multiple parts!");
531 Parts[0] = Val;
532 return;
533 }
534
535 unsigned PartBits = PartVT.getSizeInBits();
536 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
537 // If the parts cover more bits than the value has, promote the value.
538 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
539 assert(NumParts == 1 && "Do not know what to promote to!");
540 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
541 } else {
542 if (ValueVT.isFloatingPoint()) {
543 // FP values need to be bitcast, then extended if they are being put
544 // into a larger container.
545 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
546 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
547 }
548 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
549 ValueVT.isInteger() &&
550 "Unknown mismatch!");
551 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
552 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
554 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
555 }
556 } else if (PartBits == ValueVT.getSizeInBits()) {
557 // Different types of the same size.
558 assert(NumParts == 1 && PartEVT != ValueVT);
559 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
560 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
561 // If the parts cover less bits than value has, truncate the value.
562 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
563 ValueVT.isInteger() &&
564 "Unknown mismatch!");
565 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
566 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
567 if (PartVT == MVT::x86mmx)
568 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
569 }
570
571 // The value may have changed - recompute ValueVT.
572 ValueVT = Val.getValueType();
573 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
574 "Failed to tile the value with PartVT!");
575
576 if (NumParts == 1) {
577 if (PartEVT != ValueVT) {
578 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
579 "scalar-to-vector conversion failed");
580 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
581 }
582
583 Parts[0] = Val;
584 return;
585 }
586
587 // Expand the value into multiple parts.
588 if (NumParts & (NumParts - 1)) {
589 // The number of parts is not a power of 2. Split off and copy the tail.
590 assert(PartVT.isInteger() && ValueVT.isInteger() &&
591 "Do not know what to expand to!");
592 unsigned RoundParts = llvm::bit_floor(NumParts);
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
595 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
596 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
597
598 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
599 CallConv);
600
601 if (DAG.getDataLayout().isBigEndian())
602 // The odd parts were reversed by getCopyToParts - unreverse them.
603 std::reverse(Parts + RoundParts, Parts + NumParts);
604
605 NumParts = RoundParts;
606 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
607 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
608 }
609
610 // The number of parts is a power of 2. Repeatedly bisect the value using
611 // EXTRACT_ELEMENT.
612 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
613 EVT::getIntegerVT(*DAG.getContext(),
614 ValueVT.getSizeInBits()),
615 Val);
616
617 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
620 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
621 SDValue &Part0 = Parts[i];
622 SDValue &Part1 = Parts[i+StepSize/2];
623
624 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
625 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
626 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
627 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
628
629 if (ThisBits == PartBits && ThisVT != PartVT) {
630 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
631 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
632 }
633 }
634 }
635
636 if (DAG.getDataLayout().isBigEndian())
637 std::reverse(Parts, Parts + OrigNumParts);
638 }
639
widenVectorToPartType(SelectionDAG & DAG,SDValue Val,const SDLoc & DL,EVT PartVT)640 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
641 const SDLoc &DL, EVT PartVT) {
642 if (!PartVT.isVector())
643 return SDValue();
644
645 EVT ValueVT = Val.getValueType();
646 EVT PartEVT = PartVT.getVectorElementType();
647 EVT ValueEVT = ValueVT.getVectorElementType();
648 ElementCount PartNumElts = PartVT.getVectorElementCount();
649 ElementCount ValueNumElts = ValueVT.getVectorElementCount();
650
651 // We only support widening vectors with equivalent element types and
652 // fixed/scalable properties. If a target needs to widen a fixed-length type
653 // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
654 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
655 PartNumElts.isScalable() != ValueNumElts.isScalable())
656 return SDValue();
657
658 // Have a try for bf16 because some targets share its ABI with fp16.
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
660 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
661 "Cannot widen to illegal type");
662 Val = DAG.getNode(ISD::BITCAST, DL,
663 ValueVT.changeVectorElementType(MVT::f16), Val);
664 } else if (PartEVT != ValueEVT) {
665 return SDValue();
666 }
667
668 // Widening a scalable vector to another scalable vector is done by inserting
669 // the vector into a larger undef one.
670 if (PartNumElts.isScalable())
671 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
672 Val, DAG.getVectorIdxConstant(0, DL));
673
674 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
675 // undef elements.
676 SmallVector<SDValue, 16> Ops;
677 DAG.ExtractVectorElements(Val, Ops);
678 SDValue EltUndef = DAG.getUNDEF(PartEVT);
679 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
680
681 // FIXME: Use CONCAT for 2x -> 4x.
682 return DAG.getBuildVector(PartVT, DL, Ops);
683 }
684
685 /// getCopyToPartsVector - Create a series of nodes that contain the specified
686 /// value split into legal parts.
getCopyToPartsVector(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,const Value * V,std::optional<CallingConv::ID> CallConv)687 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
688 SDValue Val, SDValue *Parts, unsigned NumParts,
689 MVT PartVT, const Value *V,
690 std::optional<CallingConv::ID> CallConv) {
691 EVT ValueVT = Val.getValueType();
692 assert(ValueVT.isVector() && "Not a vector");
693 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
694 const bool IsABIRegCopy = CallConv.has_value();
695
696 if (NumParts == 1) {
697 EVT PartEVT = PartVT;
698 if (PartEVT == ValueVT) {
699 // Nothing to do.
700 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
701 // Bitconvert vector->vector case.
702 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
703 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
704 Val = Widened;
705 } else if (PartVT.isVector() &&
706 PartEVT.getVectorElementType().bitsGE(
707 ValueVT.getVectorElementType()) &&
708 PartEVT.getVectorElementCount() ==
709 ValueVT.getVectorElementCount()) {
710
711 // Promoted vector extract
712 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
713 } else if (PartEVT.isVector() &&
714 PartEVT.getVectorElementType() !=
715 ValueVT.getVectorElementType() &&
716 TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
717 TargetLowering::TypeWidenVector) {
718 // Combination of widening and promotion.
719 EVT WidenVT =
720 EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
721 PartVT.getVectorElementCount());
722 SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
723 Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
724 } else {
725 // Don't extract an integer from a float vector. This can happen if the
726 // FP type gets softened to integer and then promoted. The promotion
727 // prevents it from being picked up by the earlier bitcast case.
728 if (ValueVT.getVectorElementCount().isScalar() &&
729 (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
730 // If we reach this condition and PartVT is FP, this means that
731 // ValueVT is also FP and both have a different size, otherwise we
732 // would have bitcasted them. Producing an EXTRACT_VECTOR_ELT here
733 // would be invalid since that would mean the smaller FP type has to
734 // be extended to the larger one.
735 if (PartVT.isFloatingPoint()) {
736 Val = DAG.getBitcast(ValueVT.getScalarType(), Val);
737 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
738 } else
739 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
740 DAG.getVectorIdxConstant(0, DL));
741 } else {
742 uint64_t ValueSize = ValueVT.getFixedSizeInBits();
743 assert(PartVT.getFixedSizeInBits() > ValueSize &&
744 "lossy conversion of vector to scalar type");
745 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
746 Val = DAG.getBitcast(IntermediateType, Val);
747 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
748 }
749 }
750
751 assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
752 Parts[0] = Val;
753 return;
754 }
755
756 // Handle a multi-element vector.
757 EVT IntermediateVT;
758 MVT RegisterVT;
759 unsigned NumIntermediates;
760 unsigned NumRegs;
761 if (IsABIRegCopy) {
762 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
763 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
764 RegisterVT);
765 } else {
766 NumRegs =
767 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
768 NumIntermediates, RegisterVT);
769 }
770
771 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
772 NumParts = NumRegs; // Silence a compiler warning.
773 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
774
775 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
776 "Mixing scalable and fixed vectors when copying in parts");
777
778 std::optional<ElementCount> DestEltCnt;
779
780 if (IntermediateVT.isVector())
781 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
782 else
783 DestEltCnt = ElementCount::getFixed(NumIntermediates);
784
785 EVT BuiltVectorTy = EVT::getVectorVT(
786 *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
787
788 if (ValueVT == BuiltVectorTy) {
789 // Nothing to do.
790 } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
791 // Bitconvert vector->vector case.
792 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
793 } else {
794 if (BuiltVectorTy.getVectorElementType().bitsGT(
795 ValueVT.getVectorElementType())) {
796 // Integer promotion.
797 ValueVT = EVT::getVectorVT(*DAG.getContext(),
798 BuiltVectorTy.getVectorElementType(),
799 ValueVT.getVectorElementCount());
800 Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
801 }
802
803 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
804 Val = Widened;
805 }
806 }
807
808 assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
809
810 // Split the vector into intermediate operands.
811 SmallVector<SDValue, 8> Ops(NumIntermediates);
812 for (unsigned i = 0; i != NumIntermediates; ++i) {
813 if (IntermediateVT.isVector()) {
814 // This does something sensible for scalable vectors - see the
815 // definition of EXTRACT_SUBVECTOR for further details.
816 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
817 Ops[i] =
818 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
819 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
820 } else {
821 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
822 DAG.getVectorIdxConstant(i, DL));
823 }
824 }
825
826 // Split the intermediate operands into legal parts.
827 if (NumParts == NumIntermediates) {
828 // If the register was not expanded, promote or copy the value,
829 // as appropriate.
830 for (unsigned i = 0; i != NumParts; ++i)
831 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
832 } else if (NumParts > 0) {
833 // If the intermediate type was expanded, split each the value into
834 // legal parts.
835 assert(NumIntermediates != 0 && "division by zero");
836 assert(NumParts % NumIntermediates == 0 &&
837 "Must expand into a divisible number of parts!");
838 unsigned Factor = NumParts / NumIntermediates;
839 for (unsigned i = 0; i != NumIntermediates; ++i)
840 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
841 CallConv);
842 }
843 }
844
RegsForValue(const SmallVector<Register,4> & regs,MVT regvt,EVT valuevt,std::optional<CallingConv::ID> CC)845 RegsForValue::RegsForValue(const SmallVector<Register, 4> ®s, MVT regvt,
846 EVT valuevt, std::optional<CallingConv::ID> CC)
847 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
848 RegCount(1, regs.size()), CallConv(CC) {}
849
RegsForValue(LLVMContext & Context,const TargetLowering & TLI,const DataLayout & DL,Register Reg,Type * Ty,std::optional<CallingConv::ID> CC)850 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
851 const DataLayout &DL, Register Reg, Type *Ty,
852 std::optional<CallingConv::ID> CC) {
853 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
854
855 CallConv = CC;
856
857 for (EVT ValueVT : ValueVTs) {
858 unsigned NumRegs =
859 isABIMangled()
860 ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
861 : TLI.getNumRegisters(Context, ValueVT);
862 MVT RegisterVT =
863 isABIMangled()
864 ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
865 : TLI.getRegisterType(Context, ValueVT);
866 for (unsigned i = 0; i != NumRegs; ++i)
867 Regs.push_back(Reg + i);
868 RegVTs.push_back(RegisterVT);
869 RegCount.push_back(NumRegs);
870 Reg = Reg.id() + NumRegs;
871 }
872 }
873
getCopyFromRegs(SelectionDAG & DAG,FunctionLoweringInfo & FuncInfo,const SDLoc & dl,SDValue & Chain,SDValue * Glue,const Value * V) const874 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
875 FunctionLoweringInfo &FuncInfo,
876 const SDLoc &dl, SDValue &Chain,
877 SDValue *Glue, const Value *V) const {
878 // A Value with type {} or [0 x %t] needs no registers.
879 if (ValueVTs.empty())
880 return SDValue();
881
882 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
883
884 // Assemble the legal parts into the final values.
885 SmallVector<SDValue, 4> Values(ValueVTs.size());
886 SmallVector<SDValue, 8> Parts;
887 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
888 // Copy the legal parts from the registers.
889 EVT ValueVT = ValueVTs[Value];
890 unsigned NumRegs = RegCount[Value];
891 MVT RegisterVT = isABIMangled()
892 ? TLI.getRegisterTypeForCallingConv(
893 *DAG.getContext(), *CallConv, RegVTs[Value])
894 : RegVTs[Value];
895
896 Parts.resize(NumRegs);
897 for (unsigned i = 0; i != NumRegs; ++i) {
898 SDValue P;
899 if (!Glue) {
900 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
901 } else {
902 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
903 *Glue = P.getValue(2);
904 }
905
906 Chain = P.getValue(1);
907 Parts[i] = P;
908
909 // If the source register was virtual and if we know something about it,
910 // add an assert node.
911 if (!Regs[Part + i].isVirtual() || !RegisterVT.isInteger())
912 continue;
913
914 const FunctionLoweringInfo::LiveOutInfo *LOI =
915 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
916 if (!LOI)
917 continue;
918
919 unsigned RegSize = RegisterVT.getScalarSizeInBits();
920 unsigned NumSignBits = LOI->NumSignBits;
921 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
922
923 if (NumZeroBits == RegSize) {
924 // The current value is a zero.
925 // Explicitly express that as it would be easier for
926 // optimizations to kick in.
927 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
928 continue;
929 }
930
931 // FIXME: We capture more information than the dag can represent. For
932 // now, just use the tightest assertzext/assertsext possible.
933 bool isSExt;
934 EVT FromVT(MVT::Other);
935 if (NumZeroBits) {
936 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
937 isSExt = false;
938 } else if (NumSignBits > 1) {
939 FromVT =
940 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
941 isSExt = true;
942 } else {
943 continue;
944 }
945 // Add an assertion node.
946 assert(FromVT != MVT::Other);
947 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
948 RegisterVT, P, DAG.getValueType(FromVT));
949 }
950
951 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
952 RegisterVT, ValueVT, V, Chain, CallConv);
953 Part += NumRegs;
954 Parts.clear();
955 }
956
957 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
958 }
959
getCopyToRegs(SDValue Val,SelectionDAG & DAG,const SDLoc & dl,SDValue & Chain,SDValue * Glue,const Value * V,ISD::NodeType PreferredExtendType) const960 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
961 const SDLoc &dl, SDValue &Chain, SDValue *Glue,
962 const Value *V,
963 ISD::NodeType PreferredExtendType) const {
964 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
965 ISD::NodeType ExtendKind = PreferredExtendType;
966
967 // Get the list of the values's legal parts.
968 unsigned NumRegs = Regs.size();
969 SmallVector<SDValue, 8> Parts(NumRegs);
970 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
971 unsigned NumParts = RegCount[Value];
972
973 MVT RegisterVT = isABIMangled()
974 ? TLI.getRegisterTypeForCallingConv(
975 *DAG.getContext(), *CallConv, RegVTs[Value])
976 : RegVTs[Value];
977
978 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
979 ExtendKind = ISD::ZERO_EXTEND;
980
981 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
982 NumParts, RegisterVT, V, CallConv, ExtendKind);
983 Part += NumParts;
984 }
985
986 // Copy the parts into the registers.
987 SmallVector<SDValue, 8> Chains(NumRegs);
988 for (unsigned i = 0; i != NumRegs; ++i) {
989 SDValue Part;
990 if (!Glue) {
991 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
992 } else {
993 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
994 *Glue = Part.getValue(1);
995 }
996
997 Chains[i] = Part.getValue(0);
998 }
999
1000 if (NumRegs == 1 || Glue)
1001 // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
1002 // flagged to it. That is the CopyToReg nodes and the user are considered
1003 // a single scheduling unit. If we create a TokenFactor and return it as
1004 // chain, then the TokenFactor is both a predecessor (operand) of the
1005 // user as well as a successor (the TF operands are flagged to the user).
1006 // c1, f1 = CopyToReg
1007 // c2, f2 = CopyToReg
1008 // c3 = TokenFactor c1, c2
1009 // ...
1010 // = op c3, ..., f2
1011 Chain = Chains[NumRegs-1];
1012 else
1013 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1014 }
1015
AddInlineAsmOperands(InlineAsm::Kind Code,bool HasMatching,unsigned MatchingIdx,const SDLoc & dl,SelectionDAG & DAG,std::vector<SDValue> & Ops) const1016 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
1017 unsigned MatchingIdx, const SDLoc &dl,
1018 SelectionDAG &DAG,
1019 std::vector<SDValue> &Ops) const {
1020 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1021
1022 InlineAsm::Flag Flag(Code, Regs.size());
1023 if (HasMatching)
1024 Flag.setMatchingOp(MatchingIdx);
1025 else if (!Regs.empty() && Regs.front().isVirtual()) {
1026 // Put the register class of the virtual registers in the flag word. That
1027 // way, later passes can recompute register class constraints for inline
1028 // assembly as well as normal instructions.
1029 // Don't do this for tied operands that can use the regclass information
1030 // from the def.
1031 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1032 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1033 Flag.setRegClass(RC->getID());
1034 }
1035
1036 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1037 Ops.push_back(Res);
1038
1039 if (Code == InlineAsm::Kind::Clobber) {
1040 // Clobbers should always have a 1:1 mapping with registers, and may
1041 // reference registers that have illegal (e.g. vector) types. Hence, we
1042 // shouldn't try to apply any sort of splitting logic to them.
1043 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1044 "No 1:1 mapping from clobbers to regs?");
1045 Register SP = TLI.getStackPointerRegisterToSaveRestore();
1046 (void)SP;
1047 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1048 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1049 assert(
1050 (Regs[I] != SP ||
1051 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1052 "If we clobbered the stack pointer, MFI should know about it.");
1053 }
1054 return;
1055 }
1056
1057 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1058 MVT RegisterVT = RegVTs[Value];
1059 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1060 RegisterVT);
1061 for (unsigned i = 0; i != NumRegs; ++i) {
1062 assert(Reg < Regs.size() && "Mismatch in # registers expected");
1063 Register TheReg = Regs[Reg++];
1064 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1065 }
1066 }
1067 }
1068
1069 SmallVector<std::pair<Register, TypeSize>, 4>
getRegsAndSizes() const1070 RegsForValue::getRegsAndSizes() const {
1071 SmallVector<std::pair<Register, TypeSize>, 4> OutVec;
1072 unsigned I = 0;
1073 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1074 unsigned RegCount = std::get<0>(CountAndVT);
1075 MVT RegisterVT = std::get<1>(CountAndVT);
1076 TypeSize RegisterSize = RegisterVT.getSizeInBits();
1077 for (unsigned E = I + RegCount; I != E; ++I)
1078 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1079 }
1080 return OutVec;
1081 }
1082
init(GCFunctionInfo * gfi,BatchAAResults * aa,AssumptionCache * ac,const TargetLibraryInfo * li)1083 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, BatchAAResults *aa,
1084 AssumptionCache *ac,
1085 const TargetLibraryInfo *li) {
1086 BatchAA = aa;
1087 AC = ac;
1088 GFI = gfi;
1089 LibInfo = li;
1090 Context = DAG.getContext();
1091 LPadToCallSiteMap.clear();
1092 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1093 AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1094 *DAG.getMachineFunction().getFunction().getParent());
1095 }
1096
clear()1097 void SelectionDAGBuilder::clear() {
1098 NodeMap.clear();
1099 UnusedArgNodeMap.clear();
1100 PendingLoads.clear();
1101 PendingExports.clear();
1102 PendingConstrainedFP.clear();
1103 PendingConstrainedFPStrict.clear();
1104 CurInst = nullptr;
1105 HasTailCall = false;
1106 SDNodeOrder = LowestSDNodeOrder;
1107 StatepointLowering.clear();
1108 }
1109
clearDanglingDebugInfo()1110 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1111 DanglingDebugInfoMap.clear();
1112 }
1113
1114 // Update DAG root to include dependencies on Pending chains.
updateRoot(SmallVectorImpl<SDValue> & Pending)1115 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1116 SDValue Root = DAG.getRoot();
1117
1118 if (Pending.empty())
1119 return Root;
1120
1121 // Add current root to PendingChains, unless we already indirectly
1122 // depend on it.
1123 if (Root.getOpcode() != ISD::EntryToken) {
1124 unsigned i = 0, e = Pending.size();
1125 for (; i != e; ++i) {
1126 assert(Pending[i].getNode()->getNumOperands() > 1);
1127 if (Pending[i].getNode()->getOperand(0) == Root)
1128 break; // Don't add the root if we already indirectly depend on it.
1129 }
1130
1131 if (i == e)
1132 Pending.push_back(Root);
1133 }
1134
1135 if (Pending.size() == 1)
1136 Root = Pending[0];
1137 else
1138 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1139
1140 DAG.setRoot(Root);
1141 Pending.clear();
1142 return Root;
1143 }
1144
getMemoryRoot()1145 SDValue SelectionDAGBuilder::getMemoryRoot() {
1146 return updateRoot(PendingLoads);
1147 }
1148
getRoot()1149 SDValue SelectionDAGBuilder::getRoot() {
1150 // Chain up all pending constrained intrinsics together with all
1151 // pending loads, by simply appending them to PendingLoads and
1152 // then calling getMemoryRoot().
1153 PendingLoads.reserve(PendingLoads.size() +
1154 PendingConstrainedFP.size() +
1155 PendingConstrainedFPStrict.size());
1156 PendingLoads.append(PendingConstrainedFP.begin(),
1157 PendingConstrainedFP.end());
1158 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1159 PendingConstrainedFPStrict.end());
1160 PendingConstrainedFP.clear();
1161 PendingConstrainedFPStrict.clear();
1162 return getMemoryRoot();
1163 }
1164
getControlRoot()1165 SDValue SelectionDAGBuilder::getControlRoot() {
1166 // We need to emit pending fpexcept.strict constrained intrinsics,
1167 // so append them to the PendingExports list.
1168 PendingExports.append(PendingConstrainedFPStrict.begin(),
1169 PendingConstrainedFPStrict.end());
1170 PendingConstrainedFPStrict.clear();
1171 return updateRoot(PendingExports);
1172 }
1173
handleDebugDeclare(Value * Address,DILocalVariable * Variable,DIExpression * Expression,DebugLoc DL)1174 void SelectionDAGBuilder::handleDebugDeclare(Value *Address,
1175 DILocalVariable *Variable,
1176 DIExpression *Expression,
1177 DebugLoc DL) {
1178 assert(Variable && "Missing variable");
1179
1180 // Check if address has undef value.
1181 if (!Address || isa<UndefValue>(Address) ||
1182 (Address->use_empty() && !isa<Argument>(Address))) {
1183 LLVM_DEBUG(
1184 dbgs()
1185 << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1186 return;
1187 }
1188
1189 bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1190
1191 SDValue &N = NodeMap[Address];
1192 if (!N.getNode() && isa<Argument>(Address))
1193 // Check unused arguments map.
1194 N = UnusedArgNodeMap[Address];
1195 SDDbgValue *SDV;
1196 if (N.getNode()) {
1197 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1198 Address = BCI->getOperand(0);
1199 // Parameters are handled specially.
1200 auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1201 if (IsParameter && FINode) {
1202 // Byval parameter. We have a frame index at this point.
1203 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1204 /*IsIndirect*/ true, DL, SDNodeOrder);
1205 } else if (isa<Argument>(Address)) {
1206 // Address is an argument, so try to emit its dbg value using
1207 // virtual register info from the FuncInfo.ValueMap.
1208 EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1209 FuncArgumentDbgValueKind::Declare, N);
1210 return;
1211 } else {
1212 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1213 true, DL, SDNodeOrder);
1214 }
1215 DAG.AddDbgValue(SDV, IsParameter);
1216 } else {
1217 // If Address is an argument then try to emit its dbg value using
1218 // virtual register info from the FuncInfo.ValueMap.
1219 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1220 FuncArgumentDbgValueKind::Declare, N)) {
1221 LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1222 << " (could not emit func-arg dbg_value)\n");
1223 }
1224 }
1225 }
1226
visitDbgInfo(const Instruction & I)1227 void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
1228 // Add SDDbgValue nodes for any var locs here. Do so before updating
1229 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1230 if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1231 // Add SDDbgValue nodes for any var locs here. Do so before updating
1232 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1233 for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1234 It != End; ++It) {
1235 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1236 dropDanglingDebugInfo(Var, It->Expr);
1237 if (It->Values.isKillLocation(It->Expr)) {
1238 handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1239 continue;
1240 }
1241 SmallVector<Value *> Values(It->Values.location_ops());
1242 if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1243 It->Values.hasArgList())) {
1244 SmallVector<Value *, 4> Vals(It->Values.location_ops());
1245 addDanglingDebugInfo(Vals,
1246 FnVarLocs->getDILocalVariable(It->VariableID),
1247 It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1248 }
1249 }
1250 }
1251
1252 // We must skip DbgVariableRecords if they've already been processed above as
1253 // we have just emitted the debug values resulting from assignment tracking
1254 // analysis, making any existing DbgVariableRecords redundant (and probably
1255 // less correct). We still need to process DbgLabelRecords. This does sink
1256 // DbgLabelRecords to the bottom of the group of debug records. That sholdn't
1257 // be important as it does so deterministcally and ordering between
1258 // DbgLabelRecords and DbgVariableRecords is immaterial (other than for MIR/IR
1259 // printing).
1260 bool SkipDbgVariableRecords = DAG.getFunctionVarLocs();
1261 // Is there is any debug-info attached to this instruction, in the form of
1262 // DbgRecord non-instruction debug-info records.
1263 for (DbgRecord &DR : I.getDbgRecordRange()) {
1264 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1265 assert(DLR->getLabel() && "Missing label");
1266 SDDbgLabel *SDV =
1267 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1268 DAG.AddDbgLabel(SDV);
1269 continue;
1270 }
1271
1272 if (SkipDbgVariableRecords)
1273 continue;
1274 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
1275 DILocalVariable *Variable = DVR.getVariable();
1276 DIExpression *Expression = DVR.getExpression();
1277 dropDanglingDebugInfo(Variable, Expression);
1278
1279 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
1280 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1281 continue;
1282 LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DVR
1283 << "\n");
1284 handleDebugDeclare(DVR.getVariableLocationOp(0), Variable, Expression,
1285 DVR.getDebugLoc());
1286 continue;
1287 }
1288
1289 // A DbgVariableRecord with no locations is a kill location.
1290 SmallVector<Value *, 4> Values(DVR.location_ops());
1291 if (Values.empty()) {
1292 handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(),
1293 SDNodeOrder);
1294 continue;
1295 }
1296
1297 // A DbgVariableRecord with an undef or absent location is also a kill
1298 // location.
1299 if (llvm::any_of(Values,
1300 [](Value *V) { return !V || isa<UndefValue>(V); })) {
1301 handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(),
1302 SDNodeOrder);
1303 continue;
1304 }
1305
1306 bool IsVariadic = DVR.hasArgList();
1307 if (!handleDebugValue(Values, Variable, Expression, DVR.getDebugLoc(),
1308 SDNodeOrder, IsVariadic)) {
1309 addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1310 DVR.getDebugLoc(), SDNodeOrder);
1311 }
1312 }
1313 }
1314
visit(const Instruction & I)1315 void SelectionDAGBuilder::visit(const Instruction &I) {
1316 visitDbgInfo(I);
1317
1318 // Set up outgoing PHI node register values before emitting the terminator.
1319 if (I.isTerminator()) {
1320 HandlePHINodesInSuccessorBlocks(I.getParent());
1321 }
1322
1323 ++SDNodeOrder;
1324 CurInst = &I;
1325
1326 // Set inserted listener only if required.
1327 bool NodeInserted = false;
1328 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1329 MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1330 MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra);
1331 if (PCSectionsMD || MMRA) {
1332 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1333 DAG, [&](SDNode *) { NodeInserted = true; });
1334 }
1335
1336 visit(I.getOpcode(), I);
1337
1338 if (!I.isTerminator() && !HasTailCall &&
1339 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1340 CopyToExportRegsIfNeeded(&I);
1341
1342 // Handle metadata.
1343 if (PCSectionsMD || MMRA) {
1344 auto It = NodeMap.find(&I);
1345 if (It != NodeMap.end()) {
1346 if (PCSectionsMD)
1347 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1348 if (MMRA)
1349 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1350 } else if (NodeInserted) {
1351 // This should not happen; if it does, don't let it go unnoticed so we can
1352 // fix it. Relevant visit*() function is probably missing a setValue().
1353 errs() << "warning: loosing !pcsections and/or !mmra metadata ["
1354 << I.getModule()->getName() << "]\n";
1355 LLVM_DEBUG(I.dump());
1356 assert(false);
1357 }
1358 }
1359
1360 CurInst = nullptr;
1361 }
1362
visitPHI(const PHINode &)1363 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1364 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1365 }
1366
visit(unsigned Opcode,const User & I)1367 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1368 // Note: this doesn't use InstVisitor, because it has to work with
1369 // ConstantExpr's in addition to instructions.
1370 switch (Opcode) {
1371 default: llvm_unreachable("Unknown instruction type encountered!");
1372 // Build the switch statement using the Instruction.def file.
1373 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1374 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1375 #include "llvm/IR/Instruction.def"
1376 }
1377 }
1378
handleDanglingVariadicDebugInfo(SelectionDAG & DAG,DILocalVariable * Variable,DebugLoc DL,unsigned Order,SmallVectorImpl<Value * > & Values,DIExpression * Expression)1379 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1380 DILocalVariable *Variable,
1381 DebugLoc DL, unsigned Order,
1382 SmallVectorImpl<Value *> &Values,
1383 DIExpression *Expression) {
1384 // For variadic dbg_values we will now insert poison.
1385 // FIXME: We can potentially recover these!
1386 SmallVector<SDDbgOperand, 2> Locs;
1387 for (const Value *V : Values) {
1388 auto *Poison = PoisonValue::get(V->getType());
1389 Locs.push_back(SDDbgOperand::fromConst(Poison));
1390 }
1391 SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1392 /*IsIndirect=*/false, DL, Order,
1393 /*IsVariadic=*/true);
1394 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1395 return true;
1396 }
1397
addDanglingDebugInfo(SmallVectorImpl<Value * > & Values,DILocalVariable * Var,DIExpression * Expr,bool IsVariadic,DebugLoc DL,unsigned Order)1398 void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
1399 DILocalVariable *Var,
1400 DIExpression *Expr,
1401 bool IsVariadic, DebugLoc DL,
1402 unsigned Order) {
1403 if (IsVariadic) {
1404 handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1405 return;
1406 }
1407 // TODO: Dangling debug info will eventually either be resolved or produce
1408 // a poison DBG_VALUE. However in the resolution case, a gap may appear
1409 // between the original dbg.value location and its resolved DBG_VALUE,
1410 // which we should ideally fill with an extra poison DBG_VALUE.
1411 assert(Values.size() == 1);
1412 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1413 }
1414
dropDanglingDebugInfo(const DILocalVariable * Variable,const DIExpression * Expr)1415 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1416 const DIExpression *Expr) {
1417 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1418 DIVariable *DanglingVariable = DDI.getVariable();
1419 DIExpression *DanglingExpr = DDI.getExpression();
1420 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1421 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1422 << printDDI(nullptr, DDI) << "\n");
1423 return true;
1424 }
1425 return false;
1426 };
1427
1428 for (auto &DDIMI : DanglingDebugInfoMap) {
1429 DanglingDebugInfoVector &DDIV = DDIMI.second;
1430
1431 // If debug info is to be dropped, run it through final checks to see
1432 // whether it can be salvaged.
1433 for (auto &DDI : DDIV)
1434 if (isMatchingDbgValue(DDI))
1435 salvageUnresolvedDbgValue(DDIMI.first, DDI);
1436
1437 erase_if(DDIV, isMatchingDbgValue);
1438 }
1439 }
1440
1441 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1442 // generate the debug data structures now that we've seen its definition.
resolveDanglingDebugInfo(const Value * V,SDValue Val)1443 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1444 SDValue Val) {
1445 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1446 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1447 return;
1448
1449 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1450 for (auto &DDI : DDIV) {
1451 DebugLoc DL = DDI.getDebugLoc();
1452 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1453 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1454 DILocalVariable *Variable = DDI.getVariable();
1455 DIExpression *Expr = DDI.getExpression();
1456 assert(Variable->isValidLocationForIntrinsic(DL) &&
1457 "Expected inlined-at fields to agree");
1458 SDDbgValue *SDV;
1459 if (Val.getNode()) {
1460 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1461 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1462 // we couldn't resolve it directly when examining the DbgValue intrinsic
1463 // in the first place we should not be more successful here). Unless we
1464 // have some test case that prove this to be correct we should avoid
1465 // calling EmitFuncArgumentDbgValue here.
1466 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1467 FuncArgumentDbgValueKind::Value, Val)) {
1468 LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1469 << printDDI(V, DDI) << "\n");
1470 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
1471 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1472 // inserted after the definition of Val when emitting the instructions
1473 // after ISel. An alternative could be to teach
1474 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1475 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1476 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1477 << ValSDNodeOrder << "\n");
1478 SDV = getDbgValue(Val, Variable, Expr, DL,
1479 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1480 DAG.AddDbgValue(SDV, false);
1481 } else
1482 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1483 << printDDI(V, DDI)
1484 << " in EmitFuncArgumentDbgValue\n");
1485 } else {
1486 LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1487 << "\n");
1488 auto Poison = PoisonValue::get(V->getType());
1489 auto SDV =
1490 DAG.getConstantDbgValue(Variable, Expr, Poison, DL, DbgSDNodeOrder);
1491 DAG.AddDbgValue(SDV, false);
1492 }
1493 }
1494 DDIV.clear();
1495 }
1496
salvageUnresolvedDbgValue(const Value * V,DanglingDebugInfo & DDI)1497 void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V,
1498 DanglingDebugInfo &DDI) {
1499 // TODO: For the variadic implementation, instead of only checking the fail
1500 // state of `handleDebugValue`, we need know specifically which values were
1501 // invalid, so that we attempt to salvage only those values when processing
1502 // a DIArgList.
1503 const Value *OrigV = V;
1504 DILocalVariable *Var = DDI.getVariable();
1505 DIExpression *Expr = DDI.getExpression();
1506 DebugLoc DL = DDI.getDebugLoc();
1507 unsigned SDOrder = DDI.getSDNodeOrder();
1508
1509 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1510 // that DW_OP_stack_value is desired.
1511 bool StackValue = true;
1512
1513 // Can this Value can be encoded without any further work?
1514 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1515 return;
1516
1517 // Attempt to salvage back through as many instructions as possible. Bail if
1518 // a non-instruction is seen, such as a constant expression or global
1519 // variable. FIXME: Further work could recover those too.
1520 while (isa<Instruction>(V)) {
1521 const Instruction &VAsInst = *cast<const Instruction>(V);
1522 // Temporary "0", awaiting real implementation.
1523 SmallVector<uint64_t, 16> Ops;
1524 SmallVector<Value *, 4> AdditionalValues;
1525 V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1526 Expr->getNumLocationOperands(), Ops,
1527 AdditionalValues);
1528 // If we cannot salvage any further, and haven't yet found a suitable debug
1529 // expression, bail out.
1530 if (!V)
1531 break;
1532
1533 // TODO: If AdditionalValues isn't empty, then the salvage can only be
1534 // represented with a DBG_VALUE_LIST, so we give up. When we have support
1535 // here for variadic dbg_values, remove that condition.
1536 if (!AdditionalValues.empty())
1537 break;
1538
1539 // New value and expr now represent this debuginfo.
1540 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1541
1542 // Some kind of simplification occurred: check whether the operand of the
1543 // salvaged debug expression can be encoded in this DAG.
1544 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1545 LLVM_DEBUG(
1546 dbgs() << "Salvaged debug location info for:\n " << *Var << "\n"
1547 << *OrigV << "\nBy stripping back to:\n " << *V << "\n");
1548 return;
1549 }
1550 }
1551
1552 // This was the final opportunity to salvage this debug information, and it
1553 // couldn't be done. Place a poison DBG_VALUE at this location to terminate
1554 // any earlier variable location.
1555 assert(OrigV && "V shouldn't be null");
1556 auto *Poison = PoisonValue::get(OrigV->getType());
1557 auto *SDV = DAG.getConstantDbgValue(Var, Expr, Poison, DL, SDNodeOrder);
1558 DAG.AddDbgValue(SDV, false);
1559 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n "
1560 << printDDI(OrigV, DDI) << "\n");
1561 }
1562
handleKillDebugValue(DILocalVariable * Var,DIExpression * Expr,DebugLoc DbgLoc,unsigned Order)1563 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1564 DIExpression *Expr,
1565 DebugLoc DbgLoc,
1566 unsigned Order) {
1567 Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1568 DIExpression *NewExpr =
1569 const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1570 handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1571 /*IsVariadic*/ false);
1572 }
1573
handleDebugValue(ArrayRef<const Value * > Values,DILocalVariable * Var,DIExpression * Expr,DebugLoc DbgLoc,unsigned Order,bool IsVariadic)1574 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1575 DILocalVariable *Var,
1576 DIExpression *Expr, DebugLoc DbgLoc,
1577 unsigned Order, bool IsVariadic) {
1578 if (Values.empty())
1579 return true;
1580
1581 // Filter EntryValue locations out early.
1582 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1583 return true;
1584
1585 SmallVector<SDDbgOperand> LocationOps;
1586 SmallVector<SDNode *> Dependencies;
1587 for (const Value *V : Values) {
1588 // Constant value.
1589 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1590 isa<ConstantPointerNull>(V)) {
1591 LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1592 continue;
1593 }
1594
1595 // Look through IntToPtr constants.
1596 if (auto *CE = dyn_cast<ConstantExpr>(V))
1597 if (CE->getOpcode() == Instruction::IntToPtr) {
1598 LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1599 continue;
1600 }
1601
1602 // If the Value is a frame index, we can create a FrameIndex debug value
1603 // without relying on the DAG at all.
1604 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1605 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1606 if (SI != FuncInfo.StaticAllocaMap.end()) {
1607 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1608 continue;
1609 }
1610 }
1611
1612 // Do not use getValue() in here; we don't want to generate code at
1613 // this point if it hasn't been done yet.
1614 SDValue N = NodeMap[V];
1615 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1616 N = UnusedArgNodeMap[V];
1617
1618 if (N.getNode()) {
1619 // Only emit func arg dbg value for non-variadic dbg.values for now.
1620 if (!IsVariadic &&
1621 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1622 FuncArgumentDbgValueKind::Value, N))
1623 return true;
1624 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1625 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1626 // describe stack slot locations.
1627 //
1628 // Consider "int x = 0; int *px = &x;". There are two kinds of
1629 // interesting debug values here after optimization:
1630 //
1631 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
1632 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1633 //
1634 // Both describe the direct values of their associated variables.
1635 Dependencies.push_back(N.getNode());
1636 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1637 continue;
1638 }
1639 LocationOps.emplace_back(
1640 SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1641 continue;
1642 }
1643
1644 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1645 // Special rules apply for the first dbg.values of parameter variables in a
1646 // function. Identify them by the fact they reference Argument Values, that
1647 // they're parameters, and they are parameters of the current function. We
1648 // need to let them dangle until they get an SDNode.
1649 bool IsParamOfFunc =
1650 isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1651 if (IsParamOfFunc)
1652 return false;
1653
1654 // The value is not used in this block yet (or it would have an SDNode).
1655 // We still want the value to appear for the user if possible -- if it has
1656 // an associated VReg, we can refer to that instead.
1657 auto VMI = FuncInfo.ValueMap.find(V);
1658 if (VMI != FuncInfo.ValueMap.end()) {
1659 Register Reg = VMI->second;
1660 // If this is a PHI node, it may be split up into several MI PHI nodes
1661 // (in FunctionLoweringInfo::set).
1662 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1663 V->getType(), std::nullopt);
1664 if (RFV.occupiesMultipleRegs()) {
1665 // FIXME: We could potentially support variadic dbg_values here.
1666 if (IsVariadic)
1667 return false;
1668 unsigned Offset = 0;
1669 unsigned BitsToDescribe = 0;
1670 if (auto VarSize = Var->getSizeInBits())
1671 BitsToDescribe = *VarSize;
1672 if (auto Fragment = Expr->getFragmentInfo())
1673 BitsToDescribe = Fragment->SizeInBits;
1674 for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1675 // Bail out if all bits are described already.
1676 if (Offset >= BitsToDescribe)
1677 break;
1678 // TODO: handle scalable vectors.
1679 unsigned RegisterSize = RegAndSize.second;
1680 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1681 ? BitsToDescribe - Offset
1682 : RegisterSize;
1683 auto FragmentExpr = DIExpression::createFragmentExpression(
1684 Expr, Offset, FragmentSize);
1685 if (!FragmentExpr)
1686 continue;
1687 SDDbgValue *SDV = DAG.getVRegDbgValue(
1688 Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, Order);
1689 DAG.AddDbgValue(SDV, false);
1690 Offset += RegisterSize;
1691 }
1692 return true;
1693 }
1694 // We can use simple vreg locations for variadic dbg_values as well.
1695 LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1696 continue;
1697 }
1698 // We failed to create a SDDbgOperand for V.
1699 return false;
1700 }
1701
1702 // We have created a SDDbgOperand for each Value in Values.
1703 assert(!LocationOps.empty());
1704 SDDbgValue *SDV =
1705 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1706 /*IsIndirect=*/false, DbgLoc, Order, IsVariadic);
1707 DAG.AddDbgValue(SDV, /*isParameter=*/false);
1708 return true;
1709 }
1710
resolveOrClearDbgInfo()1711 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1712 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1713 for (auto &Pair : DanglingDebugInfoMap)
1714 for (auto &DDI : Pair.second)
1715 salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1716 clearDanglingDebugInfo();
1717 }
1718
1719 /// getCopyFromRegs - If there was virtual register allocated for the value V
1720 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
getCopyFromRegs(const Value * V,Type * Ty)1721 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1722 DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1723 SDValue Result;
1724
1725 if (It != FuncInfo.ValueMap.end()) {
1726 Register InReg = It->second;
1727
1728 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1729 DAG.getDataLayout(), InReg, Ty,
1730 std::nullopt); // This is not an ABI copy.
1731 SDValue Chain = DAG.getEntryNode();
1732 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1733 V);
1734 resolveDanglingDebugInfo(V, Result);
1735 }
1736
1737 return Result;
1738 }
1739
1740 /// getValue - Return an SDValue for the given Value.
getValue(const Value * V)1741 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1742 // If we already have an SDValue for this value, use it. It's important
1743 // to do this first, so that we don't create a CopyFromReg if we already
1744 // have a regular SDValue.
1745 SDValue &N = NodeMap[V];
1746 if (N.getNode()) return N;
1747
1748 // If there's a virtual register allocated and initialized for this
1749 // value, use it.
1750 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1751 return copyFromReg;
1752
1753 // Otherwise create a new SDValue and remember it.
1754 SDValue Val = getValueImpl(V);
1755 NodeMap[V] = Val;
1756 resolveDanglingDebugInfo(V, Val);
1757 return Val;
1758 }
1759
1760 /// getNonRegisterValue - Return an SDValue for the given Value, but
1761 /// don't look in FuncInfo.ValueMap for a virtual register.
getNonRegisterValue(const Value * V)1762 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1763 // If we already have an SDValue for this value, use it.
1764 SDValue &N = NodeMap[V];
1765 if (N.getNode()) {
1766 if (isIntOrFPConstant(N)) {
1767 // Remove the debug location from the node as the node is about to be used
1768 // in a location which may differ from the original debug location. This
1769 // is relevant to Constant and ConstantFP nodes because they can appear
1770 // as constant expressions inside PHI nodes.
1771 N->setDebugLoc(DebugLoc());
1772 }
1773 return N;
1774 }
1775
1776 // Otherwise create a new SDValue and remember it.
1777 SDValue Val = getValueImpl(V);
1778 NodeMap[V] = Val;
1779 resolveDanglingDebugInfo(V, Val);
1780 return Val;
1781 }
1782
1783 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1784 /// Create an SDValue for the given value.
getValueImpl(const Value * V)1785 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1786 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1787
1788 if (const Constant *C = dyn_cast<Constant>(V)) {
1789 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1790
1791 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
1792 SDLoc DL = getCurSDLoc();
1793
1794 // DAG.getConstant() may attempt to legalise the vector constant which can
1795 // significantly change the combines applied to the DAG. To reduce the
1796 // divergence when enabling ConstantInt based vectors we try to construct
1797 // the DAG in the same way as shufflevector based splats. TODO: The
1798 // divergence sometimes leads to better optimisations. Ideally we should
1799 // prevent DAG.getConstant() from legalising too early but there are some
1800 // degradations preventing this.
1801 if (VT.isScalableVector())
1802 return DAG.getNode(
1803 ISD::SPLAT_VECTOR, DL, VT,
1804 DAG.getConstant(CI->getValue(), DL, VT.getVectorElementType()));
1805 if (VT.isFixedLengthVector())
1806 return DAG.getSplatBuildVector(
1807 VT, DL,
1808 DAG.getConstant(CI->getValue(), DL, VT.getVectorElementType()));
1809 return DAG.getConstant(*CI, DL, VT);
1810 }
1811
1812 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1813 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1814
1815 if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(C)) {
1816 return DAG.getNode(ISD::PtrAuthGlobalAddress, getCurSDLoc(), VT,
1817 getValue(CPA->getPointer()), getValue(CPA->getKey()),
1818 getValue(CPA->getAddrDiscriminator()),
1819 getValue(CPA->getDiscriminator()));
1820 }
1821
1822 if (isa<ConstantPointerNull>(C)) {
1823 unsigned AS = V->getType()->getPointerAddressSpace();
1824 return DAG.getConstant(0, getCurSDLoc(),
1825 TLI.getPointerTy(DAG.getDataLayout(), AS));
1826 }
1827
1828 if (match(C, m_VScale()))
1829 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1830
1831 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1832 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1833
1834 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1835 return isa<PoisonValue>(C) ? DAG.getPOISON(VT) : DAG.getUNDEF(VT);
1836
1837 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1838 visit(CE->getOpcode(), *CE);
1839 SDValue N1 = NodeMap[V];
1840 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1841 return N1;
1842 }
1843
1844 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1845 SmallVector<SDValue, 4> Constants;
1846 for (const Use &U : C->operands()) {
1847 SDNode *Val = getValue(U).getNode();
1848 // If the operand is an empty aggregate, there are no values.
1849 if (!Val) continue;
1850 // Add each leaf value from the operand to the Constants list
1851 // to form a flattened list of all the values.
1852 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1853 Constants.push_back(SDValue(Val, i));
1854 }
1855
1856 return DAG.getMergeValues(Constants, getCurSDLoc());
1857 }
1858
1859 if (const ConstantDataSequential *CDS =
1860 dyn_cast<ConstantDataSequential>(C)) {
1861 SmallVector<SDValue, 4> Ops;
1862 for (uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1863 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1864 // Add each leaf value from the operand to the Constants list
1865 // to form a flattened list of all the values.
1866 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1867 Ops.push_back(SDValue(Val, i));
1868 }
1869
1870 if (isa<ArrayType>(CDS->getType()))
1871 return DAG.getMergeValues(Ops, getCurSDLoc());
1872 return DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1873 }
1874
1875 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1876 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1877 "Unknown struct or array constant!");
1878
1879 SmallVector<EVT, 4> ValueVTs;
1880 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1881 unsigned NumElts = ValueVTs.size();
1882 if (NumElts == 0)
1883 return SDValue(); // empty struct
1884 SmallVector<SDValue, 4> Constants(NumElts);
1885 for (unsigned i = 0; i != NumElts; ++i) {
1886 EVT EltVT = ValueVTs[i];
1887 if (isa<UndefValue>(C))
1888 Constants[i] = DAG.getUNDEF(EltVT);
1889 else if (EltVT.isFloatingPoint())
1890 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1891 else
1892 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1893 }
1894
1895 return DAG.getMergeValues(Constants, getCurSDLoc());
1896 }
1897
1898 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1899 return DAG.getBlockAddress(BA, VT);
1900
1901 if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1902 return getValue(Equiv->getGlobalValue());
1903
1904 if (const auto *NC = dyn_cast<NoCFIValue>(C))
1905 return getValue(NC->getGlobalValue());
1906
1907 if (VT == MVT::aarch64svcount) {
1908 assert(C->isNullValue() && "Can only zero this target type!");
1909 return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1910 DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1911 }
1912
1913 if (VT.isRISCVVectorTuple()) {
1914 assert(C->isNullValue() && "Can only zero this target type!");
1915 return DAG.getNode(
1916 ISD::BITCAST, getCurSDLoc(), VT,
1917 DAG.getNode(
1918 ISD::SPLAT_VECTOR, getCurSDLoc(),
1919 EVT::getVectorVT(*DAG.getContext(), MVT::i8,
1920 VT.getSizeInBits().getKnownMinValue() / 8, true),
1921 DAG.getConstant(0, getCurSDLoc(), MVT::getIntegerVT(8))));
1922 }
1923
1924 VectorType *VecTy = cast<VectorType>(V->getType());
1925
1926 // Now that we know the number and type of the elements, get that number of
1927 // elements into the Ops array based on what kind of constant it is.
1928 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1929 SmallVector<SDValue, 16> Ops;
1930 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1931 for (unsigned i = 0; i != NumElements; ++i)
1932 Ops.push_back(getValue(CV->getOperand(i)));
1933
1934 return DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1935 }
1936
1937 if (isa<ConstantAggregateZero>(C)) {
1938 EVT EltVT =
1939 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1940
1941 SDValue Op;
1942 if (EltVT.isFloatingPoint())
1943 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1944 else
1945 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1946
1947 return DAG.getSplat(VT, getCurSDLoc(), Op);
1948 }
1949
1950 llvm_unreachable("Unknown vector constant");
1951 }
1952
1953 // If this is a static alloca, generate it as the frameindex instead of
1954 // computation.
1955 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1956 DenseMap<const AllocaInst*, int>::iterator SI =
1957 FuncInfo.StaticAllocaMap.find(AI);
1958 if (SI != FuncInfo.StaticAllocaMap.end())
1959 return DAG.getFrameIndex(
1960 SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1961 }
1962
1963 // If this is an instruction which fast-isel has deferred, select it now.
1964 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1965 Register InReg = FuncInfo.InitializeRegForValue(Inst);
1966
1967 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1968 Inst->getType(), std::nullopt);
1969 SDValue Chain = DAG.getEntryNode();
1970 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1971 }
1972
1973 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1974 return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1975
1976 if (const auto *BB = dyn_cast<BasicBlock>(V))
1977 return DAG.getBasicBlock(FuncInfo.getMBB(BB));
1978
1979 llvm_unreachable("Can't get register for value!");
1980 }
1981
visitCatchPad(const CatchPadInst & I)1982 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1983 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1984 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1985 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1986 bool IsSEH = isAsynchronousEHPersonality(Pers);
1987 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1988 if (IsSEH) {
1989 // For SEH, EHCont Guard needs to know that this catchpad is a target.
1990 CatchPadMBB->setIsEHContTarget(true);
1991 DAG.getMachineFunction().setHasEHContTarget(true);
1992 } else
1993 CatchPadMBB->setIsEHScopeEntry();
1994 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1995 if (IsMSVCCXX || IsCoreCLR)
1996 CatchPadMBB->setIsEHFuncletEntry();
1997 }
1998
visitCatchRet(const CatchReturnInst & I)1999 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
2000 // Update machine-CFG edge.
2001 MachineBasicBlock *TargetMBB = FuncInfo.getMBB(I.getSuccessor());
2002 FuncInfo.MBB->addSuccessor(TargetMBB);
2003
2004 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2005 bool IsSEH = isAsynchronousEHPersonality(Pers);
2006 if (IsSEH) {
2007 // If this is not a fall-through branch or optimizations are switched off,
2008 // emit the branch.
2009 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
2010 TM.getOptLevel() == CodeGenOptLevel::None)
2011 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2012 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
2013 return;
2014 }
2015
2016 // For non-SEH, EHCont Guard needs to know that this catchret is a target.
2017 TargetMBB->setIsEHContTarget(true);
2018 DAG.getMachineFunction().setHasEHContTarget(true);
2019
2020 // Figure out the funclet membership for the catchret's successor.
2021 // This will be used by the FuncletLayout pass to determine how to order the
2022 // BB's.
2023 // A 'catchret' returns to the outer scope's color.
2024 Value *ParentPad = I.getCatchSwitchParentPad();
2025 const BasicBlock *SuccessorColor;
2026 if (isa<ConstantTokenNone>(ParentPad))
2027 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
2028 else
2029 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
2030 assert(SuccessorColor && "No parent funclet for catchret!");
2031 MachineBasicBlock *SuccessorColorMBB = FuncInfo.getMBB(SuccessorColor);
2032 assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
2033
2034 // Create the terminator node.
2035 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
2036 getControlRoot(), DAG.getBasicBlock(TargetMBB),
2037 DAG.getBasicBlock(SuccessorColorMBB));
2038 DAG.setRoot(Ret);
2039 }
2040
visitCleanupPad(const CleanupPadInst & CPI)2041 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
2042 // Don't emit any special code for the cleanuppad instruction. It just marks
2043 // the start of an EH scope/funclet.
2044 FuncInfo.MBB->setIsEHScopeEntry();
2045 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2046 if (Pers != EHPersonality::Wasm_CXX) {
2047 FuncInfo.MBB->setIsEHFuncletEntry();
2048 FuncInfo.MBB->setIsCleanupFuncletEntry();
2049 }
2050 }
2051
2052 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
2053 /// many places it could ultimately go. In the IR, we have a single unwind
2054 /// destination, but in the machine CFG, we enumerate all the possible blocks.
2055 /// This function skips over imaginary basic blocks that hold catchswitch
2056 /// instructions, and finds all the "real" machine
2057 /// basic block destinations. As those destinations may not be successors of
2058 /// EHPadBB, here we also calculate the edge probability to those destinations.
2059 /// The passed-in Prob is the edge probability to EHPadBB.
findUnwindDestinations(FunctionLoweringInfo & FuncInfo,const BasicBlock * EHPadBB,BranchProbability Prob,SmallVectorImpl<std::pair<MachineBasicBlock *,BranchProbability>> & UnwindDests)2060 static void findUnwindDestinations(
2061 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2062 BranchProbability Prob,
2063 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2064 &UnwindDests) {
2065 EHPersonality Personality =
2066 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2067 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2068 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2069 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2070 bool IsSEH = isAsynchronousEHPersonality(Personality);
2071
2072 while (EHPadBB) {
2073 BasicBlock::const_iterator Pad = EHPadBB->getFirstNonPHIIt();
2074 BasicBlock *NewEHPadBB = nullptr;
2075 if (isa<LandingPadInst>(Pad)) {
2076 // Stop on landingpads. They are not funclets.
2077 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2078 break;
2079 } else if (isa<CleanupPadInst>(Pad)) {
2080 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2081 // personalities except Wasm. And in Wasm this becomes a catch_all(_ref),
2082 // which always catches an exception.
2083 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2084 UnwindDests.back().first->setIsEHScopeEntry();
2085 // In Wasm, EH scopes are not funclets
2086 if (!IsWasmCXX)
2087 UnwindDests.back().first->setIsEHFuncletEntry();
2088 break;
2089 } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2090 // Add the catchpad handlers to the possible destinations.
2091 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2092 UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2093 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2094 if (IsMSVCCXX || IsCoreCLR)
2095 UnwindDests.back().first->setIsEHFuncletEntry();
2096 if (!IsSEH)
2097 UnwindDests.back().first->setIsEHScopeEntry();
2098 }
2099 NewEHPadBB = CatchSwitch->getUnwindDest();
2100 } else {
2101 continue;
2102 }
2103
2104 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2105 if (BPI && NewEHPadBB)
2106 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2107 EHPadBB = NewEHPadBB;
2108 }
2109 }
2110
visitCleanupRet(const CleanupReturnInst & I)2111 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2112 // Update successor info.
2113 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2114 auto UnwindDest = I.getUnwindDest();
2115 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2116 BranchProbability UnwindDestProb =
2117 (BPI && UnwindDest)
2118 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2119 : BranchProbability::getZero();
2120 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2121 for (auto &UnwindDest : UnwindDests) {
2122 UnwindDest.first->setIsEHPad();
2123 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2124 }
2125 FuncInfo.MBB->normalizeSuccProbs();
2126
2127 // Create the terminator node.
2128 MachineBasicBlock *CleanupPadMBB =
2129 FuncInfo.getMBB(I.getCleanupPad()->getParent());
2130 SDValue Ret = DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other,
2131 getControlRoot(), DAG.getBasicBlock(CleanupPadMBB));
2132 DAG.setRoot(Ret);
2133 }
2134
visitCatchSwitch(const CatchSwitchInst & CSI)2135 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2136 report_fatal_error("visitCatchSwitch not yet implemented!");
2137 }
2138
visitRet(const ReturnInst & I)2139 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2140 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2141 auto &DL = DAG.getDataLayout();
2142 SDValue Chain = getControlRoot();
2143 SmallVector<ISD::OutputArg, 8> Outs;
2144 SmallVector<SDValue, 8> OutVals;
2145
2146 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2147 // lower
2148 //
2149 // %val = call <ty> @llvm.experimental.deoptimize()
2150 // ret <ty> %val
2151 //
2152 // differently.
2153 if (I.getParent()->getTerminatingDeoptimizeCall()) {
2154 LowerDeoptimizingReturn();
2155 return;
2156 }
2157
2158 if (!FuncInfo.CanLowerReturn) {
2159 Register DemoteReg = FuncInfo.DemoteRegister;
2160
2161 // Emit a store of the return value through the virtual register.
2162 // Leave Outs empty so that LowerReturn won't try to load return
2163 // registers the usual way.
2164 MVT PtrValueVT = TLI.getPointerTy(DL, DL.getAllocaAddrSpace());
2165 SDValue RetPtr =
2166 DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVT);
2167 SDValue RetOp = getValue(I.getOperand(0));
2168
2169 SmallVector<EVT, 4> ValueVTs, MemVTs;
2170 SmallVector<uint64_t, 4> Offsets;
2171 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2172 &Offsets, 0);
2173 unsigned NumValues = ValueVTs.size();
2174
2175 SmallVector<SDValue, 4> Chains(NumValues);
2176 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2177 for (unsigned i = 0; i != NumValues; ++i) {
2178 // An aggregate return value cannot wrap around the address space, so
2179 // offsets to its parts don't wrap either.
2180 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2181 TypeSize::getFixed(Offsets[i]));
2182
2183 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2184 if (MemVTs[i] != ValueVTs[i])
2185 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2186 Chains[i] = DAG.getStore(
2187 Chain, getCurSDLoc(), Val,
2188 // FIXME: better loc info would be nice.
2189 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2190 commonAlignment(BaseAlign, Offsets[i]));
2191 }
2192
2193 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2194 MVT::Other, Chains);
2195 } else if (I.getNumOperands() != 0) {
2196 SmallVector<EVT, 4> ValueVTs;
2197 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2198 unsigned NumValues = ValueVTs.size();
2199 if (NumValues) {
2200 SDValue RetOp = getValue(I.getOperand(0));
2201
2202 const Function *F = I.getParent()->getParent();
2203
2204 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2205 I.getOperand(0)->getType(), F->getCallingConv(),
2206 /*IsVarArg*/ false, DL);
2207
2208 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2209 if (F->getAttributes().hasRetAttr(Attribute::SExt))
2210 ExtendKind = ISD::SIGN_EXTEND;
2211 else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2212 ExtendKind = ISD::ZERO_EXTEND;
2213
2214 LLVMContext &Context = F->getContext();
2215 bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2216
2217 for (unsigned j = 0; j != NumValues; ++j) {
2218 EVT VT = ValueVTs[j];
2219
2220 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2221 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2222
2223 CallingConv::ID CC = F->getCallingConv();
2224
2225 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2226 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2227 SmallVector<SDValue, 4> Parts(NumParts);
2228 getCopyToParts(DAG, getCurSDLoc(),
2229 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2230 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2231
2232 // 'inreg' on function refers to return value
2233 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2234 if (RetInReg)
2235 Flags.setInReg();
2236
2237 if (I.getOperand(0)->getType()->isPointerTy()) {
2238 Flags.setPointer();
2239 Flags.setPointerAddrSpace(
2240 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2241 }
2242
2243 if (NeedsRegBlock) {
2244 Flags.setInConsecutiveRegs();
2245 if (j == NumValues - 1)
2246 Flags.setInConsecutiveRegsLast();
2247 }
2248
2249 // Propagate extension type if any
2250 if (ExtendKind == ISD::SIGN_EXTEND)
2251 Flags.setSExt();
2252 else if (ExtendKind == ISD::ZERO_EXTEND)
2253 Flags.setZExt();
2254 else if (F->getAttributes().hasRetAttr(Attribute::NoExt))
2255 Flags.setNoExt();
2256
2257 for (unsigned i = 0; i < NumParts; ++i) {
2258 Outs.push_back(ISD::OutputArg(Flags,
2259 Parts[i].getValueType().getSimpleVT(),
2260 VT, /*isfixed=*/true, 0, 0));
2261 OutVals.push_back(Parts[i]);
2262 }
2263 }
2264 }
2265 }
2266
2267 // Push in swifterror virtual register as the last element of Outs. This makes
2268 // sure swifterror virtual register will be returned in the swifterror
2269 // physical register.
2270 const Function *F = I.getParent()->getParent();
2271 if (TLI.supportSwiftError() &&
2272 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2273 assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2274 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2275 Flags.setSwiftError();
2276 Outs.push_back(ISD::OutputArg(
2277 Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2278 /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2279 // Create SDNode for the swifterror virtual register.
2280 OutVals.push_back(
2281 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2282 &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2283 EVT(TLI.getPointerTy(DL))));
2284 }
2285
2286 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2287 CallingConv::ID CallConv =
2288 DAG.getMachineFunction().getFunction().getCallingConv();
2289 Chain = DAG.getTargetLoweringInfo().LowerReturn(
2290 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2291
2292 // Verify that the target's LowerReturn behaved as expected.
2293 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2294 "LowerReturn didn't return a valid chain!");
2295
2296 // Update the DAG with the new chain value resulting from return lowering.
2297 DAG.setRoot(Chain);
2298 }
2299
2300 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2301 /// created for it, emit nodes to copy the value into the virtual
2302 /// registers.
CopyToExportRegsIfNeeded(const Value * V)2303 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2304 // Skip empty types
2305 if (V->getType()->isEmptyTy())
2306 return;
2307
2308 DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2309 if (VMI != FuncInfo.ValueMap.end()) {
2310 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2311 "Unused value assigned virtual registers!");
2312 CopyValueToVirtualRegister(V, VMI->second);
2313 }
2314 }
2315
2316 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2317 /// the current basic block, add it to ValueMap now so that we'll get a
2318 /// CopyTo/FromReg.
ExportFromCurrentBlock(const Value * V)2319 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2320 // No need to export constants.
2321 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2322
2323 // Already exported?
2324 if (FuncInfo.isExportedInst(V)) return;
2325
2326 Register Reg = FuncInfo.InitializeRegForValue(V);
2327 CopyValueToVirtualRegister(V, Reg);
2328 }
2329
isExportableFromCurrentBlock(const Value * V,const BasicBlock * FromBB)2330 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2331 const BasicBlock *FromBB) {
2332 // The operands of the setcc have to be in this block. We don't know
2333 // how to export them from some other block.
2334 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2335 // Can export from current BB.
2336 if (VI->getParent() == FromBB)
2337 return true;
2338
2339 // Is already exported, noop.
2340 return FuncInfo.isExportedInst(V);
2341 }
2342
2343 // If this is an argument, we can export it if the BB is the entry block or
2344 // if it is already exported.
2345 if (isa<Argument>(V)) {
2346 if (FromBB->isEntryBlock())
2347 return true;
2348
2349 // Otherwise, can only export this if it is already exported.
2350 return FuncInfo.isExportedInst(V);
2351 }
2352
2353 // Otherwise, constants can always be exported.
2354 return true;
2355 }
2356
2357 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2358 BranchProbability
getEdgeProbability(const MachineBasicBlock * Src,const MachineBasicBlock * Dst) const2359 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2360 const MachineBasicBlock *Dst) const {
2361 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2362 const BasicBlock *SrcBB = Src->getBasicBlock();
2363 const BasicBlock *DstBB = Dst->getBasicBlock();
2364 if (!BPI) {
2365 // If BPI is not available, set the default probability as 1 / N, where N is
2366 // the number of successors.
2367 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2368 return BranchProbability(1, SuccSize);
2369 }
2370 return BPI->getEdgeProbability(SrcBB, DstBB);
2371 }
2372
addSuccessorWithProb(MachineBasicBlock * Src,MachineBasicBlock * Dst,BranchProbability Prob)2373 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2374 MachineBasicBlock *Dst,
2375 BranchProbability Prob) {
2376 if (!FuncInfo.BPI)
2377 Src->addSuccessorWithoutProb(Dst);
2378 else {
2379 if (Prob.isUnknown())
2380 Prob = getEdgeProbability(Src, Dst);
2381 Src->addSuccessor(Dst, Prob);
2382 }
2383 }
2384
InBlock(const Value * V,const BasicBlock * BB)2385 static bool InBlock(const Value *V, const BasicBlock *BB) {
2386 if (const Instruction *I = dyn_cast<Instruction>(V))
2387 return I->getParent() == BB;
2388 return true;
2389 }
2390
2391 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2392 /// This function emits a branch and is used at the leaves of an OR or an
2393 /// AND operator tree.
2394 void
EmitBranchForMergedCondition(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,BranchProbability TProb,BranchProbability FProb,bool InvertCond)2395 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2396 MachineBasicBlock *TBB,
2397 MachineBasicBlock *FBB,
2398 MachineBasicBlock *CurBB,
2399 MachineBasicBlock *SwitchBB,
2400 BranchProbability TProb,
2401 BranchProbability FProb,
2402 bool InvertCond) {
2403 const BasicBlock *BB = CurBB->getBasicBlock();
2404
2405 // If the leaf of the tree is a comparison, merge the condition into
2406 // the caseblock.
2407 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2408 // The operands of the cmp have to be in this block. We don't know
2409 // how to export them from some other block. If this is the first block
2410 // of the sequence, no exporting is needed.
2411 if (CurBB == SwitchBB ||
2412 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2413 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2414 ISD::CondCode Condition;
2415 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2416 ICmpInst::Predicate Pred =
2417 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2418 Condition = getICmpCondCode(Pred);
2419 } else {
2420 const FCmpInst *FC = cast<FCmpInst>(Cond);
2421 FCmpInst::Predicate Pred =
2422 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2423 Condition = getFCmpCondCode(Pred);
2424 if (TM.Options.NoNaNsFPMath)
2425 Condition = getFCmpCodeWithoutNaN(Condition);
2426 }
2427
2428 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2429 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2430 SL->SwitchCases.push_back(CB);
2431 return;
2432 }
2433 }
2434
2435 // Create a CaseBlock record representing this branch.
2436 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2437 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2438 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2439 SL->SwitchCases.push_back(CB);
2440 }
2441
2442 // Collect dependencies on V recursively. This is used for the cost analysis in
2443 // `shouldKeepJumpConditionsTogether`.
collectInstructionDeps(SmallMapVector<const Instruction *,bool,8> * Deps,const Value * V,SmallMapVector<const Instruction *,bool,8> * Necessary=nullptr,unsigned Depth=0)2444 static bool collectInstructionDeps(
2445 SmallMapVector<const Instruction *, bool, 8> *Deps, const Value *V,
2446 SmallMapVector<const Instruction *, bool, 8> *Necessary = nullptr,
2447 unsigned Depth = 0) {
2448 // Return false if we have an incomplete count.
2449 if (Depth >= SelectionDAG::MaxRecursionDepth)
2450 return false;
2451
2452 auto *I = dyn_cast<Instruction>(V);
2453 if (I == nullptr)
2454 return true;
2455
2456 if (Necessary != nullptr) {
2457 // This instruction is necessary for the other side of the condition so
2458 // don't count it.
2459 if (Necessary->contains(I))
2460 return true;
2461 }
2462
2463 // Already added this dep.
2464 if (!Deps->try_emplace(I, false).second)
2465 return true;
2466
2467 for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx)
2468 if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary,
2469 Depth + 1))
2470 return false;
2471 return true;
2472 }
2473
shouldKeepJumpConditionsTogether(const FunctionLoweringInfo & FuncInfo,const BranchInst & I,Instruction::BinaryOps Opc,const Value * Lhs,const Value * Rhs,TargetLoweringBase::CondMergingParams Params) const2474 bool SelectionDAGBuilder::shouldKeepJumpConditionsTogether(
2475 const FunctionLoweringInfo &FuncInfo, const BranchInst &I,
2476 Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
2477 TargetLoweringBase::CondMergingParams Params) const {
2478 if (I.getNumSuccessors() != 2)
2479 return false;
2480
2481 if (!I.isConditional())
2482 return false;
2483
2484 if (Params.BaseCost < 0)
2485 return false;
2486
2487 // Baseline cost.
2488 InstructionCost CostThresh = Params.BaseCost;
2489
2490 BranchProbabilityInfo *BPI = nullptr;
2491 if (Params.LikelyBias || Params.UnlikelyBias)
2492 BPI = FuncInfo.BPI;
2493 if (BPI != nullptr) {
2494 // See if we are either likely to get an early out or compute both lhs/rhs
2495 // of the condition.
2496 BasicBlock *IfFalse = I.getSuccessor(0);
2497 BasicBlock *IfTrue = I.getSuccessor(1);
2498
2499 std::optional<bool> Likely;
2500 if (BPI->isEdgeHot(I.getParent(), IfTrue))
2501 Likely = true;
2502 else if (BPI->isEdgeHot(I.getParent(), IfFalse))
2503 Likely = false;
2504
2505 if (Likely) {
2506 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2507 // Its likely we will have to compute both lhs and rhs of condition
2508 CostThresh += Params.LikelyBias;
2509 else {
2510 if (Params.UnlikelyBias < 0)
2511 return false;
2512 // Its likely we will get an early out.
2513 CostThresh -= Params.UnlikelyBias;
2514 }
2515 }
2516 }
2517
2518 if (CostThresh <= 0)
2519 return false;
2520
2521 // Collect "all" instructions that lhs condition is dependent on.
2522 // Use map for stable iteration (to avoid non-determanism of iteration of
2523 // SmallPtrSet). The `bool` value is just a dummy.
2524 SmallMapVector<const Instruction *, bool, 8> LhsDeps, RhsDeps;
2525 collectInstructionDeps(&LhsDeps, Lhs);
2526 // Collect "all" instructions that rhs condition is dependent on AND are
2527 // dependencies of lhs. This gives us an estimate on which instructions we
2528 // stand to save by splitting the condition.
2529 if (!collectInstructionDeps(&RhsDeps, Rhs, &LhsDeps))
2530 return false;
2531 // Add the compare instruction itself unless its a dependency on the LHS.
2532 if (const auto *RhsI = dyn_cast<Instruction>(Rhs))
2533 if (!LhsDeps.contains(RhsI))
2534 RhsDeps.try_emplace(RhsI, false);
2535
2536 const auto &TLI = DAG.getTargetLoweringInfo();
2537 const auto &TTI =
2538 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
2539
2540 InstructionCost CostOfIncluding = 0;
2541 // See if this instruction will need to computed independently of whether RHS
2542 // is.
2543 Value *BrCond = I.getCondition();
2544 auto ShouldCountInsn = [&RhsDeps, &BrCond](const Instruction *Ins) {
2545 for (const auto *U : Ins->users()) {
2546 // If user is independent of RHS calculation we don't need to count it.
2547 if (auto *UIns = dyn_cast<Instruction>(U))
2548 if (UIns != BrCond && !RhsDeps.contains(UIns))
2549 return false;
2550 }
2551 return true;
2552 };
2553
2554 // Prune instructions from RHS Deps that are dependencies of unrelated
2555 // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly
2556 // arbitrary and just meant to cap the how much time we spend in the pruning
2557 // loop. Its highly unlikely to come into affect.
2558 const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth;
2559 // Stop after a certain point. No incorrectness from including too many
2560 // instructions.
2561 for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2562 const Instruction *ToDrop = nullptr;
2563 for (const auto &InsPair : RhsDeps) {
2564 if (!ShouldCountInsn(InsPair.first)) {
2565 ToDrop = InsPair.first;
2566 break;
2567 }
2568 }
2569 if (ToDrop == nullptr)
2570 break;
2571 RhsDeps.erase(ToDrop);
2572 }
2573
2574 for (const auto &InsPair : RhsDeps) {
2575 // Finally accumulate latency that we can only attribute to computing the
2576 // RHS condition. Use latency because we are essentially trying to calculate
2577 // the cost of the dependency chain.
2578 // Possible TODO: We could try to estimate ILP and make this more precise.
2579 CostOfIncluding +=
2580 TTI.getInstructionCost(InsPair.first, TargetTransformInfo::TCK_Latency);
2581
2582 if (CostOfIncluding > CostThresh)
2583 return false;
2584 }
2585 return true;
2586 }
2587
FindMergedConditions(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,Instruction::BinaryOps Opc,BranchProbability TProb,BranchProbability FProb,bool InvertCond)2588 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2589 MachineBasicBlock *TBB,
2590 MachineBasicBlock *FBB,
2591 MachineBasicBlock *CurBB,
2592 MachineBasicBlock *SwitchBB,
2593 Instruction::BinaryOps Opc,
2594 BranchProbability TProb,
2595 BranchProbability FProb,
2596 bool InvertCond) {
2597 // Skip over not part of the tree and remember to invert op and operands at
2598 // next level.
2599 Value *NotCond;
2600 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2601 InBlock(NotCond, CurBB->getBasicBlock())) {
2602 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2603 !InvertCond);
2604 return;
2605 }
2606
2607 const Instruction *BOp = dyn_cast<Instruction>(Cond);
2608 const Value *BOpOp0, *BOpOp1;
2609 // Compute the effective opcode for Cond, taking into account whether it needs
2610 // to be inverted, e.g.
2611 // and (not (or A, B)), C
2612 // gets lowered as
2613 // and (and (not A, not B), C)
2614 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2615 if (BOp) {
2616 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2617 ? Instruction::And
2618 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2619 ? Instruction::Or
2620 : (Instruction::BinaryOps)0);
2621 if (InvertCond) {
2622 if (BOpc == Instruction::And)
2623 BOpc = Instruction::Or;
2624 else if (BOpc == Instruction::Or)
2625 BOpc = Instruction::And;
2626 }
2627 }
2628
2629 // If this node is not part of the or/and tree, emit it as a branch.
2630 // Note that all nodes in the tree should have same opcode.
2631 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2632 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2633 !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2634 !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2635 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2636 TProb, FProb, InvertCond);
2637 return;
2638 }
2639
2640 // Create TmpBB after CurBB.
2641 MachineFunction::iterator BBI(CurBB);
2642 MachineFunction &MF = DAG.getMachineFunction();
2643 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2644 CurBB->getParent()->insert(++BBI, TmpBB);
2645
2646 if (Opc == Instruction::Or) {
2647 // Codegen X | Y as:
2648 // BB1:
2649 // jmp_if_X TBB
2650 // jmp TmpBB
2651 // TmpBB:
2652 // jmp_if_Y TBB
2653 // jmp FBB
2654 //
2655
2656 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2657 // The requirement is that
2658 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2659 // = TrueProb for original BB.
2660 // Assuming the original probabilities are A and B, one choice is to set
2661 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2662 // A/(1+B) and 2B/(1+B). This choice assumes that
2663 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2664 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2665 // TmpBB, but the math is more complicated.
2666
2667 auto NewTrueProb = TProb / 2;
2668 auto NewFalseProb = TProb / 2 + FProb;
2669 // Emit the LHS condition.
2670 FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2671 NewFalseProb, InvertCond);
2672
2673 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2674 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2675 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2676 // Emit the RHS condition into TmpBB.
2677 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2678 Probs[1], InvertCond);
2679 } else {
2680 assert(Opc == Instruction::And && "Unknown merge op!");
2681 // Codegen X & Y as:
2682 // BB1:
2683 // jmp_if_X TmpBB
2684 // jmp FBB
2685 // TmpBB:
2686 // jmp_if_Y TBB
2687 // jmp FBB
2688 //
2689 // This requires creation of TmpBB after CurBB.
2690
2691 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2692 // The requirement is that
2693 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2694 // = FalseProb for original BB.
2695 // Assuming the original probabilities are A and B, one choice is to set
2696 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2697 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2698 // TrueProb for BB1 * FalseProb for TmpBB.
2699
2700 auto NewTrueProb = TProb + FProb / 2;
2701 auto NewFalseProb = FProb / 2;
2702 // Emit the LHS condition.
2703 FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2704 NewFalseProb, InvertCond);
2705
2706 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2707 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2708 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2709 // Emit the RHS condition into TmpBB.
2710 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2711 Probs[1], InvertCond);
2712 }
2713 }
2714
2715 /// If the set of cases should be emitted as a series of branches, return true.
2716 /// If we should emit this as a bunch of and/or'd together conditions, return
2717 /// false.
2718 bool
ShouldEmitAsBranches(const std::vector<CaseBlock> & Cases)2719 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2720 if (Cases.size() != 2) return true;
2721
2722 // If this is two comparisons of the same values or'd or and'd together, they
2723 // will get folded into a single comparison, so don't emit two blocks.
2724 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2725 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2726 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2727 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2728 return false;
2729 }
2730
2731 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2732 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2733 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2734 Cases[0].CC == Cases[1].CC &&
2735 isa<Constant>(Cases[0].CmpRHS) &&
2736 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2737 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2738 return false;
2739 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2740 return false;
2741 }
2742
2743 return true;
2744 }
2745
visitBr(const BranchInst & I)2746 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2747 MachineBasicBlock *BrMBB = FuncInfo.MBB;
2748
2749 // Update machine-CFG edges.
2750 MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(I.getSuccessor(0));
2751
2752 if (I.isUnconditional()) {
2753 // Update machine-CFG edges.
2754 BrMBB->addSuccessor(Succ0MBB);
2755
2756 // If this is not a fall-through branch or optimizations are switched off,
2757 // emit the branch.
2758 if (Succ0MBB != NextBlock(BrMBB) ||
2759 TM.getOptLevel() == CodeGenOptLevel::None) {
2760 auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2761 getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2762 setValue(&I, Br);
2763 DAG.setRoot(Br);
2764 }
2765
2766 return;
2767 }
2768
2769 // If this condition is one of the special cases we handle, do special stuff
2770 // now.
2771 const Value *CondVal = I.getCondition();
2772 MachineBasicBlock *Succ1MBB = FuncInfo.getMBB(I.getSuccessor(1));
2773
2774 // If this is a series of conditions that are or'd or and'd together, emit
2775 // this as a sequence of branches instead of setcc's with and/or operations.
2776 // As long as jumps are not expensive (exceptions for multi-use logic ops,
2777 // unpredictable branches, and vector extracts because those jumps are likely
2778 // expensive for any target), this should improve performance.
2779 // For example, instead of something like:
2780 // cmp A, B
2781 // C = seteq
2782 // cmp D, E
2783 // F = setle
2784 // or C, F
2785 // jnz foo
2786 // Emit:
2787 // cmp A, B
2788 // je foo
2789 // cmp D, E
2790 // jle foo
2791 bool IsUnpredictable = I.hasMetadata(LLVMContext::MD_unpredictable);
2792 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2793 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2794 BOp->hasOneUse() && !IsUnpredictable) {
2795 Value *Vec;
2796 const Value *BOp0, *BOp1;
2797 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2798 if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2799 Opcode = Instruction::And;
2800 else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2801 Opcode = Instruction::Or;
2802
2803 if (Opcode &&
2804 !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2805 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
2806 !shouldKeepJumpConditionsTogether(
2807 FuncInfo, I, Opcode, BOp0, BOp1,
2808 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2809 Opcode, BOp0, BOp1))) {
2810 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2811 getEdgeProbability(BrMBB, Succ0MBB),
2812 getEdgeProbability(BrMBB, Succ1MBB),
2813 /*InvertCond=*/false);
2814 // If the compares in later blocks need to use values not currently
2815 // exported from this block, export them now. This block should always
2816 // be the first entry.
2817 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2818
2819 // Allow some cases to be rejected.
2820 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2821 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2822 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2823 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2824 }
2825
2826 // Emit the branch for this block.
2827 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2828 SL->SwitchCases.erase(SL->SwitchCases.begin());
2829 return;
2830 }
2831
2832 // Okay, we decided not to do this, remove any inserted MBB's and clear
2833 // SwitchCases.
2834 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2835 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2836
2837 SL->SwitchCases.clear();
2838 }
2839 }
2840
2841 // Create a CaseBlock record representing this branch.
2842 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2843 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc(),
2844 BranchProbability::getUnknown(), BranchProbability::getUnknown(),
2845 IsUnpredictable);
2846
2847 // Use visitSwitchCase to actually insert the fast branch sequence for this
2848 // cond branch.
2849 visitSwitchCase(CB, BrMBB);
2850 }
2851
2852 /// visitSwitchCase - Emits the necessary code to represent a single node in
2853 /// the binary search tree resulting from lowering a switch instruction.
visitSwitchCase(CaseBlock & CB,MachineBasicBlock * SwitchBB)2854 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2855 MachineBasicBlock *SwitchBB) {
2856 SDValue Cond;
2857 SDValue CondLHS = getValue(CB.CmpLHS);
2858 SDLoc dl = CB.DL;
2859
2860 if (CB.CC == ISD::SETTRUE) {
2861 // Branch or fall through to TrueBB.
2862 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2863 SwitchBB->normalizeSuccProbs();
2864 if (CB.TrueBB != NextBlock(SwitchBB)) {
2865 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2866 DAG.getBasicBlock(CB.TrueBB)));
2867 }
2868 return;
2869 }
2870
2871 auto &TLI = DAG.getTargetLoweringInfo();
2872 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2873
2874 // Build the setcc now.
2875 if (!CB.CmpMHS) {
2876 // Fold "(X == true)" to X and "(X == false)" to !X to
2877 // handle common cases produced by branch lowering.
2878 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2879 CB.CC == ISD::SETEQ)
2880 Cond = CondLHS;
2881 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2882 CB.CC == ISD::SETEQ) {
2883 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2884 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2885 } else {
2886 SDValue CondRHS = getValue(CB.CmpRHS);
2887
2888 // If a pointer's DAG type is larger than its memory type then the DAG
2889 // values are zero-extended. This breaks signed comparisons so truncate
2890 // back to the underlying type before doing the compare.
2891 if (CondLHS.getValueType() != MemVT) {
2892 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2893 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2894 }
2895 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2896 }
2897 } else {
2898 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2899
2900 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2901 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2902
2903 SDValue CmpOp = getValue(CB.CmpMHS);
2904 EVT VT = CmpOp.getValueType();
2905
2906 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2907 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2908 ISD::SETLE);
2909 } else {
2910 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2911 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2912 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2913 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2914 }
2915 }
2916
2917 // Update successor info
2918 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2919 // TrueBB and FalseBB are always different unless the incoming IR is
2920 // degenerate. This only happens when running llc on weird IR.
2921 if (CB.TrueBB != CB.FalseBB)
2922 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2923 SwitchBB->normalizeSuccProbs();
2924
2925 // If the lhs block is the next block, invert the condition so that we can
2926 // fall through to the lhs instead of the rhs block.
2927 if (CB.TrueBB == NextBlock(SwitchBB)) {
2928 std::swap(CB.TrueBB, CB.FalseBB);
2929 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2930 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2931 }
2932
2933 SDNodeFlags Flags;
2934 Flags.setUnpredictable(CB.IsUnpredictable);
2935 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(),
2936 Cond, DAG.getBasicBlock(CB.TrueBB), Flags);
2937
2938 setValue(CurInst, BrCond);
2939
2940 // Insert the false branch. Do this even if it's a fall through branch,
2941 // this makes it easier to do DAG optimizations which require inverting
2942 // the branch condition.
2943 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2944 DAG.getBasicBlock(CB.FalseBB));
2945
2946 DAG.setRoot(BrCond);
2947 }
2948
2949 /// visitJumpTable - Emit JumpTable node in the current MBB
visitJumpTable(SwitchCG::JumpTable & JT)2950 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2951 // Emit the code for the jump table
2952 assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2953 assert(JT.Reg && "Should lower JT Header first!");
2954 EVT PTy = DAG.getTargetLoweringInfo().getJumpTableRegTy(DAG.getDataLayout());
2955 SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2956 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2957 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2958 Index.getValue(1), Table, Index);
2959 DAG.setRoot(BrJumpTable);
2960 }
2961
2962 /// visitJumpTableHeader - This function emits necessary code to produce index
2963 /// in the JumpTable from switch case.
visitJumpTableHeader(SwitchCG::JumpTable & JT,JumpTableHeader & JTH,MachineBasicBlock * SwitchBB)2964 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2965 JumpTableHeader &JTH,
2966 MachineBasicBlock *SwitchBB) {
2967 assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2968 const SDLoc &dl = *JT.SL;
2969
2970 // Subtract the lowest switch case value from the value being switched on.
2971 SDValue SwitchOp = getValue(JTH.SValue);
2972 EVT VT = SwitchOp.getValueType();
2973 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2974 DAG.getConstant(JTH.First, dl, VT));
2975
2976 // The SDNode we just created, which holds the value being switched on minus
2977 // the smallest case value, needs to be copied to a virtual register so it
2978 // can be used as an index into the jump table in a subsequent basic block.
2979 // This value may be smaller or larger than the target's pointer type, and
2980 // therefore require extension or truncating.
2981 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2982 SwitchOp =
2983 DAG.getZExtOrTrunc(Sub, dl, TLI.getJumpTableRegTy(DAG.getDataLayout()));
2984
2985 Register JumpTableReg =
2986 FuncInfo.CreateReg(TLI.getJumpTableRegTy(DAG.getDataLayout()));
2987 SDValue CopyTo =
2988 DAG.getCopyToReg(getControlRoot(), dl, JumpTableReg, SwitchOp);
2989 JT.Reg = JumpTableReg;
2990
2991 if (!JTH.FallthroughUnreachable) {
2992 // Emit the range check for the jump table, and branch to the default block
2993 // for the switch statement if the value being switched on exceeds the
2994 // largest case in the switch.
2995 SDValue CMP = DAG.getSetCC(
2996 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2997 Sub.getValueType()),
2998 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2999
3000 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3001 MVT::Other, CopyTo, CMP,
3002 DAG.getBasicBlock(JT.Default));
3003
3004 // Avoid emitting unnecessary branches to the next block.
3005 if (JT.MBB != NextBlock(SwitchBB))
3006 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3007 DAG.getBasicBlock(JT.MBB));
3008
3009 DAG.setRoot(BrCond);
3010 } else {
3011 // Avoid emitting unnecessary branches to the next block.
3012 if (JT.MBB != NextBlock(SwitchBB))
3013 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3014 DAG.getBasicBlock(JT.MBB)));
3015 else
3016 DAG.setRoot(CopyTo);
3017 }
3018 }
3019
3020 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
3021 /// variable if there exists one.
getLoadStackGuard(SelectionDAG & DAG,const SDLoc & DL,SDValue & Chain)3022 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
3023 SDValue &Chain) {
3024 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3025 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3026 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3027 MachineFunction &MF = DAG.getMachineFunction();
3028 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
3029 MachineSDNode *Node =
3030 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
3031 if (Global) {
3032 MachinePointerInfo MPInfo(Global);
3033 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
3034 MachineMemOperand::MODereferenceable;
3035 MachineMemOperand *MemRef = MF.getMachineMemOperand(
3036 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
3037 DAG.setNodeMemRefs(Node, {MemRef});
3038 }
3039 if (PtrTy != PtrMemTy)
3040 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
3041 return SDValue(Node, 0);
3042 }
3043
3044 /// Codegen a new tail for a stack protector check ParentMBB which has had its
3045 /// tail spliced into a stack protector check success bb.
3046 ///
3047 /// For a high level explanation of how this fits into the stack protector
3048 /// generation see the comment on the declaration of class
3049 /// StackProtectorDescriptor.
visitSPDescriptorParent(StackProtectorDescriptor & SPD,MachineBasicBlock * ParentBB)3050 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
3051 MachineBasicBlock *ParentBB) {
3052
3053 // First create the loads to the guard/stack slot for the comparison.
3054 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3055 auto &DL = DAG.getDataLayout();
3056 EVT PtrTy = TLI.getFrameIndexTy(DL);
3057 EVT PtrMemTy = TLI.getPointerMemTy(DL, DL.getAllocaAddrSpace());
3058
3059 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3060 int FI = MFI.getStackProtectorIndex();
3061
3062 SDValue Guard;
3063 SDLoc dl = getCurSDLoc();
3064 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
3065 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3066 Align Align = DL.getPrefTypeAlign(
3067 PointerType::get(M.getContext(), DL.getAllocaAddrSpace()));
3068
3069 // Generate code to load the content of the guard slot.
3070 SDValue GuardVal = DAG.getLoad(
3071 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
3072 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
3073 MachineMemOperand::MOVolatile);
3074
3075 if (TLI.useStackGuardXorFP())
3076 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
3077
3078 // If we're using function-based instrumentation, call the guard check
3079 // function
3080 if (SPD.shouldEmitFunctionBasedCheckStackProtector()) {
3081 // Get the guard check function from the target and verify it exists since
3082 // we're using function-based instrumentation
3083 const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M);
3084 assert(GuardCheckFn && "Guard check function is null");
3085
3086 // The target provides a guard check function to validate the guard value.
3087 // Generate a call to that function with the content of the guard slot as
3088 // argument.
3089 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3090 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3091
3092 TargetLowering::ArgListTy Args;
3093 TargetLowering::ArgListEntry Entry;
3094 Entry.Node = GuardVal;
3095 Entry.Ty = FnTy->getParamType(0);
3096 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3097 Entry.IsInReg = true;
3098 Args.push_back(Entry);
3099
3100 TargetLowering::CallLoweringInfo CLI(DAG);
3101 CLI.setDebugLoc(getCurSDLoc())
3102 .setChain(DAG.getEntryNode())
3103 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3104 getValue(GuardCheckFn), std::move(Args));
3105
3106 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
3107 DAG.setRoot(Result.second);
3108 return;
3109 }
3110
3111 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3112 // Otherwise, emit a volatile load to retrieve the stack guard value.
3113 SDValue Chain = DAG.getEntryNode();
3114 if (TLI.useLoadStackGuardNode(M)) {
3115 Guard = getLoadStackGuard(DAG, dl, Chain);
3116 } else {
3117 const Value *IRGuard = TLI.getSDagStackGuard(M);
3118 SDValue GuardPtr = getValue(IRGuard);
3119
3120 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3121 MachinePointerInfo(IRGuard, 0), Align,
3122 MachineMemOperand::MOVolatile);
3123 }
3124
3125 // Perform the comparison via a getsetcc.
3126 SDValue Cmp = DAG.getSetCC(
3127 dl, TLI.getSetCCResultType(DL, *DAG.getContext(), Guard.getValueType()),
3128 Guard, GuardVal, ISD::SETNE);
3129
3130 // If the guard/stackslot do not equal, branch to failure MBB.
3131 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3132 MVT::Other, GuardVal.getOperand(0),
3133 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
3134 // Otherwise branch to success MBB.
3135 SDValue Br = DAG.getNode(ISD::BR, dl,
3136 MVT::Other, BrCond,
3137 DAG.getBasicBlock(SPD.getSuccessMBB()));
3138
3139 DAG.setRoot(Br);
3140 }
3141
3142 /// Codegen the failure basic block for a stack protector check.
3143 ///
3144 /// A failure stack protector machine basic block consists simply of a call to
3145 /// __stack_chk_fail().
3146 ///
3147 /// For a high level explanation of how this fits into the stack protector
3148 /// generation see the comment on the declaration of class
3149 /// StackProtectorDescriptor.
visitSPDescriptorFailure(StackProtectorDescriptor & SPD)3150 void SelectionDAGBuilder::visitSPDescriptorFailure(
3151 StackProtectorDescriptor &SPD) {
3152
3153 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3154 MachineBasicBlock *ParentBB = SPD.getParentMBB();
3155 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3156 SDValue Chain;
3157
3158 // For -Oz builds with a guard check function, we use function-based
3159 // instrumentation. Otherwise, if we have a guard check function, we call it
3160 // in the failure block.
3161 auto *GuardCheckFn = TLI.getSSPStackGuardCheck(M);
3162 if (GuardCheckFn && !SPD.shouldEmitFunctionBasedCheckStackProtector()) {
3163 // First create the loads to the guard/stack slot for the comparison.
3164 auto &DL = DAG.getDataLayout();
3165 EVT PtrTy = TLI.getFrameIndexTy(DL);
3166 EVT PtrMemTy = TLI.getPointerMemTy(DL, DL.getAllocaAddrSpace());
3167
3168 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3169 int FI = MFI.getStackProtectorIndex();
3170
3171 SDLoc dl = getCurSDLoc();
3172 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
3173 Align Align = DL.getPrefTypeAlign(
3174 PointerType::get(M.getContext(), DL.getAllocaAddrSpace()));
3175
3176 // Generate code to load the content of the guard slot.
3177 SDValue GuardVal = DAG.getLoad(
3178 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
3179 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
3180 MachineMemOperand::MOVolatile);
3181
3182 if (TLI.useStackGuardXorFP())
3183 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
3184
3185 // The target provides a guard check function to validate the guard value.
3186 // Generate a call to that function with the content of the guard slot as
3187 // argument.
3188 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3189 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3190
3191 TargetLowering::ArgListTy Args;
3192 TargetLowering::ArgListEntry Entry;
3193 Entry.Node = GuardVal;
3194 Entry.Ty = FnTy->getParamType(0);
3195 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3196 Entry.IsInReg = true;
3197 Args.push_back(Entry);
3198
3199 TargetLowering::CallLoweringInfo CLI(DAG);
3200 CLI.setDebugLoc(getCurSDLoc())
3201 .setChain(DAG.getEntryNode())
3202 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3203 getValue(GuardCheckFn), std::move(Args));
3204
3205 Chain = TLI.LowerCallTo(CLI).second;
3206 } else {
3207 TargetLowering::MakeLibCallOptions CallOptions;
3208 CallOptions.setDiscardResult(true);
3209 Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3210 {}, CallOptions, getCurSDLoc())
3211 .second;
3212 }
3213
3214 // Emit a trap instruction if we are required to do so.
3215 const TargetOptions &TargetOpts = DAG.getTarget().Options;
3216 if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
3217 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3218
3219 DAG.setRoot(Chain);
3220 }
3221
3222 /// visitBitTestHeader - This function emits necessary code to produce value
3223 /// suitable for "bit tests"
visitBitTestHeader(BitTestBlock & B,MachineBasicBlock * SwitchBB)3224 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
3225 MachineBasicBlock *SwitchBB) {
3226 SDLoc dl = getCurSDLoc();
3227
3228 // Subtract the minimum value.
3229 SDValue SwitchOp = getValue(B.SValue);
3230 EVT VT = SwitchOp.getValueType();
3231 SDValue RangeSub =
3232 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
3233
3234 // Determine the type of the test operands.
3235 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3236 bool UsePtrType = false;
3237 if (!TLI.isTypeLegal(VT)) {
3238 UsePtrType = true;
3239 } else {
3240 for (const BitTestCase &Case : B.Cases)
3241 if (!isUIntN(VT.getSizeInBits(), Case.Mask)) {
3242 // Switch table case range are encoded into series of masks.
3243 // Just use pointer type, it's guaranteed to fit.
3244 UsePtrType = true;
3245 break;
3246 }
3247 }
3248 SDValue Sub = RangeSub;
3249 if (UsePtrType) {
3250 VT = TLI.getPointerTy(DAG.getDataLayout());
3251 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3252 }
3253
3254 B.RegVT = VT.getSimpleVT();
3255 B.Reg = FuncInfo.CreateReg(B.RegVT);
3256 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3257
3258 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3259
3260 if (!B.FallthroughUnreachable)
3261 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3262 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3263 SwitchBB->normalizeSuccProbs();
3264
3265 SDValue Root = CopyTo;
3266 if (!B.FallthroughUnreachable) {
3267 // Conditional branch to the default block.
3268 SDValue RangeCmp = DAG.getSetCC(dl,
3269 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3270 RangeSub.getValueType()),
3271 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3272 ISD::SETUGT);
3273
3274 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3275 DAG.getBasicBlock(B.Default));
3276 }
3277
3278 // Avoid emitting unnecessary branches to the next block.
3279 if (MBB != NextBlock(SwitchBB))
3280 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3281
3282 DAG.setRoot(Root);
3283 }
3284
3285 /// visitBitTestCase - this function produces one "bit test"
visitBitTestCase(BitTestBlock & BB,MachineBasicBlock * NextMBB,BranchProbability BranchProbToNext,Register Reg,BitTestCase & B,MachineBasicBlock * SwitchBB)3286 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
3287 MachineBasicBlock *NextMBB,
3288 BranchProbability BranchProbToNext,
3289 Register Reg, BitTestCase &B,
3290 MachineBasicBlock *SwitchBB) {
3291 SDLoc dl = getCurSDLoc();
3292 MVT VT = BB.RegVT;
3293 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3294 SDValue Cmp;
3295 unsigned PopCount = llvm::popcount(B.Mask);
3296 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3297 if (PopCount == 1) {
3298 // Testing for a single bit; just compare the shift count with what it
3299 // would need to be to shift a 1 bit in that position.
3300 Cmp = DAG.getSetCC(
3301 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3302 ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3303 ISD::SETEQ);
3304 } else if (PopCount == BB.Range) {
3305 // There is only one zero bit in the range, test for it directly.
3306 Cmp = DAG.getSetCC(
3307 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3308 ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3309 } else {
3310 // Make desired shift
3311 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3312 DAG.getConstant(1, dl, VT), ShiftOp);
3313
3314 // Emit bit tests and jumps
3315 SDValue AndOp = DAG.getNode(ISD::AND, dl,
3316 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3317 Cmp = DAG.getSetCC(
3318 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3319 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3320 }
3321
3322 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3323 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3324 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3325 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3326 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3327 // one as they are relative probabilities (and thus work more like weights),
3328 // and hence we need to normalize them to let the sum of them become one.
3329 SwitchBB->normalizeSuccProbs();
3330
3331 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3332 MVT::Other, getControlRoot(),
3333 Cmp, DAG.getBasicBlock(B.TargetBB));
3334
3335 // Avoid emitting unnecessary branches to the next block.
3336 if (NextMBB != NextBlock(SwitchBB))
3337 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3338 DAG.getBasicBlock(NextMBB));
3339
3340 DAG.setRoot(BrAnd);
3341 }
3342
visitInvoke(const InvokeInst & I)3343 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3344 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3345
3346 // Retrieve successors. Look through artificial IR level blocks like
3347 // catchswitch for successors.
3348 MachineBasicBlock *Return = FuncInfo.getMBB(I.getSuccessor(0));
3349 const BasicBlock *EHPadBB = I.getSuccessor(1);
3350 MachineBasicBlock *EHPadMBB = FuncInfo.getMBB(EHPadBB);
3351
3352 // Deopt and ptrauth bundles are lowered in helper functions, and we don't
3353 // have to do anything here to lower funclet bundles.
3354 constexpr uint32_t kAllowedBundles[] = {
3355 LLVMContext::OB_deopt,
3356 LLVMContext::OB_gc_transition,
3357 LLVMContext::OB_gc_live,
3358 LLVMContext::OB_funclet,
3359 LLVMContext::OB_cfguardtarget,
3360 LLVMContext::OB_ptrauth,
3361 LLVMContext::OB_clang_arc_attachedcall,
3362 LLVMContext::OB_kcfi};
3363 if (I.hasOperandBundlesOtherThan(kAllowedBundles)) {
3364 std::string Error;
3365 for (unsigned i = 0, e = I.getNumOperandBundles(); i != e; ++i) {
3366 OperandBundleUse U = I.getOperandBundleAt(i);
3367 bool First = true;
3368 if (is_contained(kAllowedBundles, U.getTagID()))
3369 continue;
3370 if (!First)
3371 Error += ", ";
3372 First = false;
3373 Error += U.getTagName();
3374 }
3375 reportFatalUsageError(
3376 Twine("cannot lower invokes with arbitrary operand bundles: ", Error));
3377 }
3378
3379 const Value *Callee(I.getCalledOperand());
3380 const Function *Fn = dyn_cast<Function>(Callee);
3381 if (isa<InlineAsm>(Callee))
3382 visitInlineAsm(I, EHPadBB);
3383 else if (Fn && Fn->isIntrinsic()) {
3384 switch (Fn->getIntrinsicID()) {
3385 default:
3386 llvm_unreachable("Cannot invoke this intrinsic");
3387 case Intrinsic::donothing:
3388 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3389 case Intrinsic::seh_try_begin:
3390 case Intrinsic::seh_scope_begin:
3391 case Intrinsic::seh_try_end:
3392 case Intrinsic::seh_scope_end:
3393 if (EHPadMBB)
3394 // a block referenced by EH table
3395 // so dtor-funclet not removed by opts
3396 EHPadMBB->setMachineBlockAddressTaken();
3397 break;
3398 case Intrinsic::experimental_patchpoint_void:
3399 case Intrinsic::experimental_patchpoint:
3400 visitPatchpoint(I, EHPadBB);
3401 break;
3402 case Intrinsic::experimental_gc_statepoint:
3403 LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3404 break;
3405 // wasm_throw, wasm_rethrow: This is usually done in visitTargetIntrinsic,
3406 // but these intrinsics are special because they can be invoked, so we
3407 // manually lower it to a DAG node here.
3408 case Intrinsic::wasm_throw: {
3409 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3410 std::array<SDValue, 4> Ops = {
3411 getControlRoot(), // inchain for the terminator node
3412 DAG.getTargetConstant(Intrinsic::wasm_throw, getCurSDLoc(),
3413 TLI.getPointerTy(DAG.getDataLayout())),
3414 getValue(I.getArgOperand(0)), // tag
3415 getValue(I.getArgOperand(1)) // thrown value
3416 };
3417 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3418 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3419 break;
3420 }
3421 case Intrinsic::wasm_rethrow: {
3422 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3423 std::array<SDValue, 2> Ops = {
3424 getControlRoot(), // inchain for the terminator node
3425 DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3426 TLI.getPointerTy(DAG.getDataLayout()))};
3427 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3428 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3429 break;
3430 }
3431 }
3432 } else if (I.hasDeoptState()) {
3433 // Currently we do not lower any intrinsic calls with deopt operand bundles.
3434 // Eventually we will support lowering the @llvm.experimental.deoptimize
3435 // intrinsic, and right now there are no plans to support other intrinsics
3436 // with deopt state.
3437 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3438 } else if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
3439 LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), EHPadBB);
3440 } else {
3441 LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3442 }
3443
3444 // If the value of the invoke is used outside of its defining block, make it
3445 // available as a virtual register.
3446 // We already took care of the exported value for the statepoint instruction
3447 // during call to the LowerStatepoint.
3448 if (!isa<GCStatepointInst>(I)) {
3449 CopyToExportRegsIfNeeded(&I);
3450 }
3451
3452 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3453 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3454 BranchProbability EHPadBBProb =
3455 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3456 : BranchProbability::getZero();
3457 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3458
3459 // Update successor info.
3460 addSuccessorWithProb(InvokeMBB, Return);
3461 for (auto &UnwindDest : UnwindDests) {
3462 UnwindDest.first->setIsEHPad();
3463 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3464 }
3465 InvokeMBB->normalizeSuccProbs();
3466
3467 // Drop into normal successor.
3468 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3469 DAG.getBasicBlock(Return)));
3470 }
3471
visitCallBr(const CallBrInst & I)3472 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3473 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3474
3475 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3476 // have to do anything here to lower funclet bundles.
3477 if (I.hasOperandBundlesOtherThan(
3478 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}))
3479 reportFatalUsageError(
3480 "cannot lower callbrs with arbitrary operand bundles!");
3481
3482 assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3483 visitInlineAsm(I);
3484 CopyToExportRegsIfNeeded(&I);
3485
3486 // Retrieve successors.
3487 SmallPtrSet<BasicBlock *, 8> Dests;
3488 Dests.insert(I.getDefaultDest());
3489 MachineBasicBlock *Return = FuncInfo.getMBB(I.getDefaultDest());
3490
3491 // Update successor info.
3492 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3493 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3494 BasicBlock *Dest = I.getIndirectDest(i);
3495 MachineBasicBlock *Target = FuncInfo.getMBB(Dest);
3496 Target->setIsInlineAsmBrIndirectTarget();
3497 // If we introduce a type of asm goto statement that is permitted to use an
3498 // indirect call instruction to jump to its labels, then we should add a
3499 // call to Target->setMachineBlockAddressTaken() here, to mark the target
3500 // block as requiring a BTI.
3501
3502 Target->setLabelMustBeEmitted();
3503 // Don't add duplicate machine successors.
3504 if (Dests.insert(Dest).second)
3505 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3506 }
3507 CallBrMBB->normalizeSuccProbs();
3508
3509 // Drop into default successor.
3510 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3511 MVT::Other, getControlRoot(),
3512 DAG.getBasicBlock(Return)));
3513 }
3514
visitResume(const ResumeInst & RI)3515 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3516 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3517 }
3518
visitLandingPad(const LandingPadInst & LP)3519 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3520 assert(FuncInfo.MBB->isEHPad() &&
3521 "Call to landingpad not in landing pad!");
3522
3523 // If there aren't registers to copy the values into (e.g., during SjLj
3524 // exceptions), then don't bother to create these DAG nodes.
3525 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3526 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3527 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3528 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3529 return;
3530
3531 // If landingpad's return type is token type, we don't create DAG nodes
3532 // for its exception pointer and selector value. The extraction of exception
3533 // pointer or selector value from token type landingpads is not currently
3534 // supported.
3535 if (LP.getType()->isTokenTy())
3536 return;
3537
3538 SmallVector<EVT, 2> ValueVTs;
3539 SDLoc dl = getCurSDLoc();
3540 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3541 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3542
3543 // Get the two live-in registers as SDValues. The physregs have already been
3544 // copied into virtual registers.
3545 SDValue Ops[2];
3546 if (FuncInfo.ExceptionPointerVirtReg) {
3547 Ops[0] = DAG.getZExtOrTrunc(
3548 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3549 FuncInfo.ExceptionPointerVirtReg,
3550 TLI.getPointerTy(DAG.getDataLayout())),
3551 dl, ValueVTs[0]);
3552 } else {
3553 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3554 }
3555 Ops[1] = DAG.getZExtOrTrunc(
3556 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3557 FuncInfo.ExceptionSelectorVirtReg,
3558 TLI.getPointerTy(DAG.getDataLayout())),
3559 dl, ValueVTs[1]);
3560
3561 // Merge into one.
3562 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3563 DAG.getVTList(ValueVTs), Ops);
3564 setValue(&LP, Res);
3565 }
3566
UpdateSplitBlock(MachineBasicBlock * First,MachineBasicBlock * Last)3567 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3568 MachineBasicBlock *Last) {
3569 // Update JTCases.
3570 for (JumpTableBlock &JTB : SL->JTCases)
3571 if (JTB.first.HeaderBB == First)
3572 JTB.first.HeaderBB = Last;
3573
3574 // Update BitTestCases.
3575 for (BitTestBlock &BTB : SL->BitTestCases)
3576 if (BTB.Parent == First)
3577 BTB.Parent = Last;
3578 }
3579
visitIndirectBr(const IndirectBrInst & I)3580 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3581 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3582
3583 // Update machine-CFG edges with unique successors.
3584 SmallSet<BasicBlock*, 32> Done;
3585 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3586 BasicBlock *BB = I.getSuccessor(i);
3587 bool Inserted = Done.insert(BB).second;
3588 if (!Inserted)
3589 continue;
3590
3591 MachineBasicBlock *Succ = FuncInfo.getMBB(BB);
3592 addSuccessorWithProb(IndirectBrMBB, Succ);
3593 }
3594 IndirectBrMBB->normalizeSuccProbs();
3595
3596 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3597 MVT::Other, getControlRoot(),
3598 getValue(I.getAddress())));
3599 }
3600
visitUnreachable(const UnreachableInst & I)3601 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3602 if (!I.shouldLowerToTrap(DAG.getTarget().Options.TrapUnreachable,
3603 DAG.getTarget().Options.NoTrapAfterNoreturn))
3604 return;
3605
3606 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3607 }
3608
visitUnary(const User & I,unsigned Opcode)3609 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3610 SDNodeFlags Flags;
3611 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3612 Flags.copyFMF(*FPOp);
3613
3614 SDValue Op = getValue(I.getOperand(0));
3615 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3616 Op, Flags);
3617 setValue(&I, UnNodeValue);
3618 }
3619
visitBinary(const User & I,unsigned Opcode)3620 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3621 SDNodeFlags Flags;
3622 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3623 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3624 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3625 }
3626 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3627 Flags.setExact(ExactOp->isExact());
3628 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3629 Flags.setDisjoint(DisjointOp->isDisjoint());
3630 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3631 Flags.copyFMF(*FPOp);
3632
3633 SDValue Op1 = getValue(I.getOperand(0));
3634 SDValue Op2 = getValue(I.getOperand(1));
3635 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3636 Op1, Op2, Flags);
3637 setValue(&I, BinNodeValue);
3638 }
3639
visitShift(const User & I,unsigned Opcode)3640 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3641 SDValue Op1 = getValue(I.getOperand(0));
3642 SDValue Op2 = getValue(I.getOperand(1));
3643
3644 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3645 Op1.getValueType(), DAG.getDataLayout());
3646
3647 // Coerce the shift amount to the right type if we can. This exposes the
3648 // truncate or zext to optimization early.
3649 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3650 assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3651 "Unexpected shift type");
3652 Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3653 }
3654
3655 bool nuw = false;
3656 bool nsw = false;
3657 bool exact = false;
3658
3659 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3660
3661 if (const OverflowingBinaryOperator *OFBinOp =
3662 dyn_cast<const OverflowingBinaryOperator>(&I)) {
3663 nuw = OFBinOp->hasNoUnsignedWrap();
3664 nsw = OFBinOp->hasNoSignedWrap();
3665 }
3666 if (const PossiblyExactOperator *ExactOp =
3667 dyn_cast<const PossiblyExactOperator>(&I))
3668 exact = ExactOp->isExact();
3669 }
3670 SDNodeFlags Flags;
3671 Flags.setExact(exact);
3672 Flags.setNoSignedWrap(nsw);
3673 Flags.setNoUnsignedWrap(nuw);
3674 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3675 Flags);
3676 setValue(&I, Res);
3677 }
3678
visitSDiv(const User & I)3679 void SelectionDAGBuilder::visitSDiv(const User &I) {
3680 SDValue Op1 = getValue(I.getOperand(0));
3681 SDValue Op2 = getValue(I.getOperand(1));
3682
3683 SDNodeFlags Flags;
3684 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3685 cast<PossiblyExactOperator>(&I)->isExact());
3686 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3687 Op2, Flags));
3688 }
3689
visitICmp(const ICmpInst & I)3690 void SelectionDAGBuilder::visitICmp(const ICmpInst &I) {
3691 ICmpInst::Predicate predicate = I.getPredicate();
3692 SDValue Op1 = getValue(I.getOperand(0));
3693 SDValue Op2 = getValue(I.getOperand(1));
3694 ISD::CondCode Opcode = getICmpCondCode(predicate);
3695
3696 auto &TLI = DAG.getTargetLoweringInfo();
3697 EVT MemVT =
3698 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3699
3700 // If a pointer's DAG type is larger than its memory type then the DAG values
3701 // are zero-extended. This breaks signed comparisons so truncate back to the
3702 // underlying type before doing the compare.
3703 if (Op1.getValueType() != MemVT) {
3704 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3705 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3706 }
3707
3708 SDNodeFlags Flags;
3709 Flags.setSameSign(I.hasSameSign());
3710 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3711
3712 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3713 I.getType());
3714 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3715 }
3716
visitFCmp(const FCmpInst & I)3717 void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
3718 FCmpInst::Predicate predicate = I.getPredicate();
3719 SDValue Op1 = getValue(I.getOperand(0));
3720 SDValue Op2 = getValue(I.getOperand(1));
3721
3722 ISD::CondCode Condition = getFCmpCondCode(predicate);
3723 auto *FPMO = cast<FPMathOperator>(&I);
3724 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3725 Condition = getFCmpCodeWithoutNaN(Condition);
3726
3727 SDNodeFlags Flags;
3728 Flags.copyFMF(*FPMO);
3729 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3730
3731 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3732 I.getType());
3733 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3734 }
3735
3736 // Check if the condition of the select has one use or two users that are both
3737 // selects with the same condition.
hasOnlySelectUsers(const Value * Cond)3738 static bool hasOnlySelectUsers(const Value *Cond) {
3739 return llvm::all_of(Cond->users(), [](const Value *V) {
3740 return isa<SelectInst>(V);
3741 });
3742 }
3743
visitSelect(const User & I)3744 void SelectionDAGBuilder::visitSelect(const User &I) {
3745 SmallVector<EVT, 4> ValueVTs;
3746 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3747 ValueVTs);
3748 unsigned NumValues = ValueVTs.size();
3749 if (NumValues == 0) return;
3750
3751 SmallVector<SDValue, 4> Values(NumValues);
3752 SDValue Cond = getValue(I.getOperand(0));
3753 SDValue LHSVal = getValue(I.getOperand(1));
3754 SDValue RHSVal = getValue(I.getOperand(2));
3755 SmallVector<SDValue, 1> BaseOps(1, Cond);
3756 ISD::NodeType OpCode =
3757 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3758
3759 bool IsUnaryAbs = false;
3760 bool Negate = false;
3761
3762 SDNodeFlags Flags;
3763 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3764 Flags.copyFMF(*FPOp);
3765
3766 Flags.setUnpredictable(
3767 cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3768
3769 // Min/max matching is only viable if all output VTs are the same.
3770 if (all_equal(ValueVTs)) {
3771 EVT VT = ValueVTs[0];
3772 LLVMContext &Ctx = *DAG.getContext();
3773 auto &TLI = DAG.getTargetLoweringInfo();
3774
3775 // We care about the legality of the operation after it has been type
3776 // legalized.
3777 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3778 VT = TLI.getTypeToTransformTo(Ctx, VT);
3779
3780 // If the vselect is legal, assume we want to leave this as a vector setcc +
3781 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3782 // min/max is legal on the scalar type.
3783 bool UseScalarMinMax = VT.isVector() &&
3784 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3785
3786 // ValueTracking's select pattern matching does not account for -0.0,
3787 // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3788 // -0.0 is less than +0.0.
3789 const Value *LHS, *RHS;
3790 auto SPR = matchSelectPattern(&I, LHS, RHS);
3791 ISD::NodeType Opc = ISD::DELETED_NODE;
3792 switch (SPR.Flavor) {
3793 case SPF_UMAX: Opc = ISD::UMAX; break;
3794 case SPF_UMIN: Opc = ISD::UMIN; break;
3795 case SPF_SMAX: Opc = ISD::SMAX; break;
3796 case SPF_SMIN: Opc = ISD::SMIN; break;
3797 case SPF_FMINNUM:
3798 switch (SPR.NaNBehavior) {
3799 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3800 case SPNB_RETURNS_NAN: break;
3801 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3802 case SPNB_RETURNS_ANY:
3803 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3804 (UseScalarMinMax &&
3805 TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3806 Opc = ISD::FMINNUM;
3807 break;
3808 }
3809 break;
3810 case SPF_FMAXNUM:
3811 switch (SPR.NaNBehavior) {
3812 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3813 case SPNB_RETURNS_NAN: break;
3814 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3815 case SPNB_RETURNS_ANY:
3816 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3817 (UseScalarMinMax &&
3818 TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3819 Opc = ISD::FMAXNUM;
3820 break;
3821 }
3822 break;
3823 case SPF_NABS:
3824 Negate = true;
3825 [[fallthrough]];
3826 case SPF_ABS:
3827 IsUnaryAbs = true;
3828 Opc = ISD::ABS;
3829 break;
3830 default: break;
3831 }
3832
3833 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3834 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3835 (UseScalarMinMax &&
3836 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3837 // If the underlying comparison instruction is used by any other
3838 // instruction, the consumed instructions won't be destroyed, so it is
3839 // not profitable to convert to a min/max.
3840 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3841 OpCode = Opc;
3842 LHSVal = getValue(LHS);
3843 RHSVal = getValue(RHS);
3844 BaseOps.clear();
3845 }
3846
3847 if (IsUnaryAbs) {
3848 OpCode = Opc;
3849 LHSVal = getValue(LHS);
3850 BaseOps.clear();
3851 }
3852 }
3853
3854 if (IsUnaryAbs) {
3855 for (unsigned i = 0; i != NumValues; ++i) {
3856 SDLoc dl = getCurSDLoc();
3857 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3858 Values[i] =
3859 DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3860 if (Negate)
3861 Values[i] = DAG.getNegative(Values[i], dl, VT);
3862 }
3863 } else {
3864 for (unsigned i = 0; i != NumValues; ++i) {
3865 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3866 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3867 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3868 Values[i] = DAG.getNode(
3869 OpCode, getCurSDLoc(),
3870 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3871 }
3872 }
3873
3874 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3875 DAG.getVTList(ValueVTs), Values));
3876 }
3877
visitTrunc(const User & I)3878 void SelectionDAGBuilder::visitTrunc(const User &I) {
3879 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3880 SDValue N = getValue(I.getOperand(0));
3881 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3882 I.getType());
3883 SDNodeFlags Flags;
3884 if (auto *Trunc = dyn_cast<TruncInst>(&I)) {
3885 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3886 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3887 }
3888
3889 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N, Flags));
3890 }
3891
visitZExt(const User & I)3892 void SelectionDAGBuilder::visitZExt(const User &I) {
3893 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3894 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3895 SDValue N = getValue(I.getOperand(0));
3896 auto &TLI = DAG.getTargetLoweringInfo();
3897 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3898
3899 SDNodeFlags Flags;
3900 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3901 Flags.setNonNeg(PNI->hasNonNeg());
3902
3903 // Eagerly use nonneg information to canonicalize towards sign_extend if
3904 // that is the target's preference.
3905 // TODO: Let the target do this later.
3906 if (Flags.hasNonNeg() &&
3907 TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3908 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3909 return;
3910 }
3911
3912 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3913 }
3914
visitSExt(const User & I)3915 void SelectionDAGBuilder::visitSExt(const User &I) {
3916 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3917 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3918 SDValue N = getValue(I.getOperand(0));
3919 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3920 I.getType());
3921 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3922 }
3923
visitFPTrunc(const User & I)3924 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3925 // FPTrunc is never a no-op cast, no need to check
3926 SDValue N = getValue(I.getOperand(0));
3927 SDLoc dl = getCurSDLoc();
3928 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3929 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3930 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3931 DAG.getTargetConstant(
3932 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3933 }
3934
visitFPExt(const User & I)3935 void SelectionDAGBuilder::visitFPExt(const User &I) {
3936 // FPExt is never a no-op cast, no need to check
3937 SDValue N = getValue(I.getOperand(0));
3938 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3939 I.getType());
3940 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3941 }
3942
visitFPToUI(const User & I)3943 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3944 // FPToUI is never a no-op cast, no need to check
3945 SDValue N = getValue(I.getOperand(0));
3946 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3947 I.getType());
3948 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3949 }
3950
visitFPToSI(const User & I)3951 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3952 // FPToSI is never a no-op cast, no need to check
3953 SDValue N = getValue(I.getOperand(0));
3954 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3955 I.getType());
3956 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3957 }
3958
visitUIToFP(const User & I)3959 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3960 // UIToFP is never a no-op cast, no need to check
3961 SDValue N = getValue(I.getOperand(0));
3962 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3963 I.getType());
3964 SDNodeFlags Flags;
3965 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3966 Flags.setNonNeg(PNI->hasNonNeg());
3967
3968 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N, Flags));
3969 }
3970
visitSIToFP(const User & I)3971 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3972 // SIToFP is never a no-op cast, no need to check
3973 SDValue N = getValue(I.getOperand(0));
3974 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3975 I.getType());
3976 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3977 }
3978
visitPtrToInt(const User & I)3979 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3980 // What to do depends on the size of the integer and the size of the pointer.
3981 // We can either truncate, zero extend, or no-op, accordingly.
3982 SDValue N = getValue(I.getOperand(0));
3983 auto &TLI = DAG.getTargetLoweringInfo();
3984 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3985 I.getType());
3986 EVT PtrMemVT =
3987 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3988 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3989 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3990 setValue(&I, N);
3991 }
3992
visitIntToPtr(const User & I)3993 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3994 // What to do depends on the size of the integer and the size of the pointer.
3995 // We can either truncate, zero extend, or no-op, accordingly.
3996 SDValue N = getValue(I.getOperand(0));
3997 auto &TLI = DAG.getTargetLoweringInfo();
3998 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3999 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4000 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
4001 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
4002 setValue(&I, N);
4003 }
4004
visitBitCast(const User & I)4005 void SelectionDAGBuilder::visitBitCast(const User &I) {
4006 SDValue N = getValue(I.getOperand(0));
4007 SDLoc dl = getCurSDLoc();
4008 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
4009 I.getType());
4010
4011 // BitCast assures us that source and destination are the same size so this is
4012 // either a BITCAST or a no-op.
4013 if (DestVT != N.getValueType())
4014 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
4015 DestVT, N)); // convert types.
4016 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
4017 // might fold any kind of constant expression to an integer constant and that
4018 // is not what we are looking for. Only recognize a bitcast of a genuine
4019 // constant integer as an opaque constant.
4020 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
4021 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
4022 /*isOpaque*/true));
4023 else
4024 setValue(&I, N); // noop cast.
4025 }
4026
visitAddrSpaceCast(const User & I)4027 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
4028 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4029 const Value *SV = I.getOperand(0);
4030 SDValue N = getValue(SV);
4031 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4032
4033 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
4034 unsigned DestAS = I.getType()->getPointerAddressSpace();
4035
4036 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4037 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
4038
4039 setValue(&I, N);
4040 }
4041
visitInsertElement(const User & I)4042 void SelectionDAGBuilder::visitInsertElement(const User &I) {
4043 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4044 SDValue InVec = getValue(I.getOperand(0));
4045 SDValue InVal = getValue(I.getOperand(1));
4046 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
4047 TLI.getVectorIdxTy(DAG.getDataLayout()));
4048 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
4049 TLI.getValueType(DAG.getDataLayout(), I.getType()),
4050 InVec, InVal, InIdx));
4051 }
4052
visitExtractElement(const User & I)4053 void SelectionDAGBuilder::visitExtractElement(const User &I) {
4054 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4055 SDValue InVec = getValue(I.getOperand(0));
4056 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
4057 TLI.getVectorIdxTy(DAG.getDataLayout()));
4058 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
4059 TLI.getValueType(DAG.getDataLayout(), I.getType()),
4060 InVec, InIdx));
4061 }
4062
visitShuffleVector(const User & I)4063 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
4064 SDValue Src1 = getValue(I.getOperand(0));
4065 SDValue Src2 = getValue(I.getOperand(1));
4066 ArrayRef<int> Mask;
4067 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
4068 Mask = SVI->getShuffleMask();
4069 else
4070 Mask = cast<ConstantExpr>(I).getShuffleMask();
4071 SDLoc DL = getCurSDLoc();
4072 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4073 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4074 EVT SrcVT = Src1.getValueType();
4075
4076 if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
4077 VT.isScalableVector()) {
4078 // Canonical splat form of first element of first input vector.
4079 SDValue FirstElt =
4080 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
4081 DAG.getVectorIdxConstant(0, DL));
4082 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
4083 return;
4084 }
4085
4086 // For now, we only handle splats for scalable vectors.
4087 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
4088 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
4089 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
4090
4091 unsigned SrcNumElts = SrcVT.getVectorNumElements();
4092 unsigned MaskNumElts = Mask.size();
4093
4094 if (SrcNumElts == MaskNumElts) {
4095 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
4096 return;
4097 }
4098
4099 // Normalize the shuffle vector since mask and vector length don't match.
4100 if (SrcNumElts < MaskNumElts) {
4101 // Mask is longer than the source vectors. We can use concatenate vector to
4102 // make the mask and vectors lengths match.
4103
4104 if (MaskNumElts % SrcNumElts == 0) {
4105 // Mask length is a multiple of the source vector length.
4106 // Check if the shuffle is some kind of concatenation of the input
4107 // vectors.
4108 unsigned NumConcat = MaskNumElts / SrcNumElts;
4109 bool IsConcat = true;
4110 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4111 for (unsigned i = 0; i != MaskNumElts; ++i) {
4112 int Idx = Mask[i];
4113 if (Idx < 0)
4114 continue;
4115 // Ensure the indices in each SrcVT sized piece are sequential and that
4116 // the same source is used for the whole piece.
4117 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4118 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4119 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
4120 IsConcat = false;
4121 break;
4122 }
4123 // Remember which source this index came from.
4124 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4125 }
4126
4127 // The shuffle is concatenating multiple vectors together. Just emit
4128 // a CONCAT_VECTORS operation.
4129 if (IsConcat) {
4130 SmallVector<SDValue, 8> ConcatOps;
4131 for (auto Src : ConcatSrcs) {
4132 if (Src < 0)
4133 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
4134 else if (Src == 0)
4135 ConcatOps.push_back(Src1);
4136 else
4137 ConcatOps.push_back(Src2);
4138 }
4139 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
4140 return;
4141 }
4142 }
4143
4144 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
4145 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4146 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
4147 PaddedMaskNumElts);
4148
4149 // Pad both vectors with undefs to make them the same length as the mask.
4150 SDValue UndefVal = DAG.getUNDEF(SrcVT);
4151
4152 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
4153 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
4154 MOps1[0] = Src1;
4155 MOps2[0] = Src2;
4156
4157 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
4158 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
4159
4160 // Readjust mask for new input vector length.
4161 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4162 for (unsigned i = 0; i != MaskNumElts; ++i) {
4163 int Idx = Mask[i];
4164 if (Idx >= (int)SrcNumElts)
4165 Idx -= SrcNumElts - PaddedMaskNumElts;
4166 MappedOps[i] = Idx;
4167 }
4168
4169 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
4170
4171 // If the concatenated vector was padded, extract a subvector with the
4172 // correct number of elements.
4173 if (MaskNumElts != PaddedMaskNumElts)
4174 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
4175 DAG.getVectorIdxConstant(0, DL));
4176
4177 setValue(&I, Result);
4178 return;
4179 }
4180
4181 assert(SrcNumElts > MaskNumElts);
4182
4183 // Analyze the access pattern of the vector to see if we can extract
4184 // two subvectors and do the shuffle.
4185 int StartIdx[2] = {-1, -1}; // StartIdx to extract from
4186 bool CanExtract = true;
4187 for (int Idx : Mask) {
4188 unsigned Input = 0;
4189 if (Idx < 0)
4190 continue;
4191
4192 if (Idx >= (int)SrcNumElts) {
4193 Input = 1;
4194 Idx -= SrcNumElts;
4195 }
4196
4197 // If all the indices come from the same MaskNumElts sized portion of
4198 // the sources we can use extract. Also make sure the extract wouldn't
4199 // extract past the end of the source.
4200 int NewStartIdx = alignDown(Idx, MaskNumElts);
4201 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4202 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4203 CanExtract = false;
4204 // Make sure we always update StartIdx as we use it to track if all
4205 // elements are undef.
4206 StartIdx[Input] = NewStartIdx;
4207 }
4208
4209 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4210 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
4211 return;
4212 }
4213 if (CanExtract) {
4214 // Extract appropriate subvector and generate a vector shuffle
4215 for (unsigned Input = 0; Input < 2; ++Input) {
4216 SDValue &Src = Input == 0 ? Src1 : Src2;
4217 if (StartIdx[Input] < 0)
4218 Src = DAG.getUNDEF(VT);
4219 else {
4220 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
4221 DAG.getVectorIdxConstant(StartIdx[Input], DL));
4222 }
4223 }
4224
4225 // Calculate new mask.
4226 SmallVector<int, 8> MappedOps(Mask);
4227 for (int &Idx : MappedOps) {
4228 if (Idx >= (int)SrcNumElts)
4229 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4230 else if (Idx >= 0)
4231 Idx -= StartIdx[0];
4232 }
4233
4234 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
4235 return;
4236 }
4237
4238 // We can't use either concat vectors or extract subvectors so fall back to
4239 // replacing the shuffle with extract and build vector.
4240 // to insert and build vector.
4241 EVT EltVT = VT.getVectorElementType();
4242 SmallVector<SDValue,8> Ops;
4243 for (int Idx : Mask) {
4244 SDValue Res;
4245
4246 if (Idx < 0) {
4247 Res = DAG.getUNDEF(EltVT);
4248 } else {
4249 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4250 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
4251
4252 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
4253 DAG.getVectorIdxConstant(Idx, DL));
4254 }
4255
4256 Ops.push_back(Res);
4257 }
4258
4259 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
4260 }
4261
visitInsertValue(const InsertValueInst & I)4262 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
4263 ArrayRef<unsigned> Indices = I.getIndices();
4264 const Value *Op0 = I.getOperand(0);
4265 const Value *Op1 = I.getOperand(1);
4266 Type *AggTy = I.getType();
4267 Type *ValTy = Op1->getType();
4268 bool IntoUndef = isa<UndefValue>(Op0);
4269 bool FromUndef = isa<UndefValue>(Op1);
4270
4271 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4272
4273 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4274 SmallVector<EVT, 4> AggValueVTs;
4275 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
4276 SmallVector<EVT, 4> ValValueVTs;
4277 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4278
4279 unsigned NumAggValues = AggValueVTs.size();
4280 unsigned NumValValues = ValValueVTs.size();
4281 SmallVector<SDValue, 4> Values(NumAggValues);
4282
4283 // Ignore an insertvalue that produces an empty object
4284 if (!NumAggValues) {
4285 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4286 return;
4287 }
4288
4289 SDValue Agg = getValue(Op0);
4290 unsigned i = 0;
4291 // Copy the beginning value(s) from the original aggregate.
4292 for (; i != LinearIndex; ++i)
4293 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4294 SDValue(Agg.getNode(), Agg.getResNo() + i);
4295 // Copy values from the inserted value(s).
4296 if (NumValValues) {
4297 SDValue Val = getValue(Op1);
4298 for (; i != LinearIndex + NumValValues; ++i)
4299 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4300 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4301 }
4302 // Copy remaining value(s) from the original aggregate.
4303 for (; i != NumAggValues; ++i)
4304 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4305 SDValue(Agg.getNode(), Agg.getResNo() + i);
4306
4307 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4308 DAG.getVTList(AggValueVTs), Values));
4309 }
4310
visitExtractValue(const ExtractValueInst & I)4311 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4312 ArrayRef<unsigned> Indices = I.getIndices();
4313 const Value *Op0 = I.getOperand(0);
4314 Type *AggTy = Op0->getType();
4315 Type *ValTy = I.getType();
4316 bool OutOfUndef = isa<UndefValue>(Op0);
4317
4318 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4319
4320 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4321 SmallVector<EVT, 4> ValValueVTs;
4322 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4323
4324 unsigned NumValValues = ValValueVTs.size();
4325
4326 // Ignore a extractvalue that produces an empty object
4327 if (!NumValValues) {
4328 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4329 return;
4330 }
4331
4332 SmallVector<SDValue, 4> Values(NumValValues);
4333
4334 SDValue Agg = getValue(Op0);
4335 // Copy out the selected value(s).
4336 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4337 Values[i - LinearIndex] =
4338 OutOfUndef ?
4339 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4340 SDValue(Agg.getNode(), Agg.getResNo() + i);
4341
4342 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4343 DAG.getVTList(ValValueVTs), Values));
4344 }
4345
visitGetElementPtr(const User & I)4346 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4347 Value *Op0 = I.getOperand(0);
4348 // Note that the pointer operand may be a vector of pointers. Take the scalar
4349 // element which holds a pointer.
4350 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4351 SDValue N = getValue(Op0);
4352 SDLoc dl = getCurSDLoc();
4353 auto &TLI = DAG.getTargetLoweringInfo();
4354 GEPNoWrapFlags NW = cast<GEPOperator>(I).getNoWrapFlags();
4355
4356 // For a vector GEP, keep the prefix scalar as long as possible, then
4357 // convert any scalars encountered after the first vector operand to vectors.
4358 bool IsVectorGEP = I.getType()->isVectorTy();
4359 ElementCount VectorElementCount =
4360 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4361 : ElementCount::getFixed(0);
4362
4363 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4364 GTI != E; ++GTI) {
4365 const Value *Idx = GTI.getOperand();
4366 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4367 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4368 if (Field) {
4369 // N = N + Offset
4370 uint64_t Offset =
4371 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4372
4373 // In an inbounds GEP with an offset that is nonnegative even when
4374 // interpreted as signed, assume there is no unsigned overflow.
4375 SDNodeFlags Flags;
4376 if (NW.hasNoUnsignedWrap() ||
4377 (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap()))
4378 Flags |= SDNodeFlags::NoUnsignedWrap;
4379
4380 N = DAG.getMemBasePlusOffset(
4381 N, DAG.getConstant(Offset, dl, N.getValueType()), dl, Flags);
4382 }
4383 } else {
4384 // IdxSize is the width of the arithmetic according to IR semantics.
4385 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4386 // (and fix up the result later).
4387 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4388 MVT IdxTy = MVT::getIntegerVT(IdxSize);
4389 TypeSize ElementSize =
4390 GTI.getSequentialElementStride(DAG.getDataLayout());
4391 // We intentionally mask away the high bits here; ElementSize may not
4392 // fit in IdxTy.
4393 APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(),
4394 /*isSigned=*/false, /*implicitTrunc=*/true);
4395 bool ElementScalable = ElementSize.isScalable();
4396
4397 // If this is a scalar constant or a splat vector of constants,
4398 // handle it quickly.
4399 const auto *C = dyn_cast<Constant>(Idx);
4400 if (C && isa<VectorType>(C->getType()))
4401 C = C->getSplatValue();
4402
4403 const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4404 if (CI && CI->isZero())
4405 continue;
4406 if (CI && !ElementScalable) {
4407 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4408 LLVMContext &Context = *DAG.getContext();
4409 SDValue OffsVal;
4410 if (N.getValueType().isVector())
4411 OffsVal = DAG.getConstant(
4412 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4413 else
4414 OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4415
4416 // In an inbounds GEP with an offset that is nonnegative even when
4417 // interpreted as signed, assume there is no unsigned overflow.
4418 SDNodeFlags Flags;
4419 if (NW.hasNoUnsignedWrap() ||
4420 (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap()))
4421 Flags.setNoUnsignedWrap(true);
4422
4423 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4424
4425 N = DAG.getMemBasePlusOffset(N, OffsVal, dl, Flags);
4426 continue;
4427 }
4428
4429 // N = N + Idx * ElementMul;
4430 SDValue IdxN = getValue(Idx);
4431
4432 if (IdxN.getValueType().isVector() != N.getValueType().isVector()) {
4433 if (N.getValueType().isVector()) {
4434 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4435 VectorElementCount);
4436 IdxN = DAG.getSplat(VT, dl, IdxN);
4437 } else {
4438 EVT VT =
4439 EVT::getVectorVT(*Context, N.getValueType(), VectorElementCount);
4440 N = DAG.getSplat(VT, dl, N);
4441 }
4442 }
4443
4444 // If the index is smaller or larger than intptr_t, truncate or extend
4445 // it.
4446 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4447
4448 SDNodeFlags ScaleFlags;
4449 // The multiplication of an index by the type size does not wrap the
4450 // pointer index type in a signed sense (mul nsw).
4451 ScaleFlags.setNoSignedWrap(NW.hasNoUnsignedSignedWrap());
4452
4453 // The multiplication of an index by the type size does not wrap the
4454 // pointer index type in an unsigned sense (mul nuw).
4455 ScaleFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4456
4457 if (ElementScalable) {
4458 EVT VScaleTy = N.getValueType().getScalarType();
4459 SDValue VScale = DAG.getNode(
4460 ISD::VSCALE, dl, VScaleTy,
4461 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4462 if (N.getValueType().isVector())
4463 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4464 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale,
4465 ScaleFlags);
4466 } else {
4467 // If this is a multiply by a power of two, turn it into a shl
4468 // immediately. This is a very common case.
4469 if (ElementMul != 1) {
4470 if (ElementMul.isPowerOf2()) {
4471 unsigned Amt = ElementMul.logBase2();
4472 IdxN = DAG.getNode(ISD::SHL, dl, N.getValueType(), IdxN,
4473 DAG.getConstant(Amt, dl, IdxN.getValueType()),
4474 ScaleFlags);
4475 } else {
4476 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4477 IdxN.getValueType());
4478 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, Scale,
4479 ScaleFlags);
4480 }
4481 }
4482 }
4483
4484 // The successive addition of the current address, truncated to the
4485 // pointer index type and interpreted as an unsigned number, and each
4486 // offset, also interpreted as an unsigned number, does not wrap the
4487 // pointer index type (add nuw).
4488 SDNodeFlags AddFlags;
4489 AddFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4490
4491 N = DAG.getMemBasePlusOffset(N, IdxN, dl, AddFlags);
4492 }
4493 }
4494
4495 if (IsVectorGEP && !N.getValueType().isVector()) {
4496 EVT VT = EVT::getVectorVT(*Context, N.getValueType(), VectorElementCount);
4497 N = DAG.getSplat(VT, dl, N);
4498 }
4499
4500 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4501 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4502 if (IsVectorGEP) {
4503 PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4504 PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4505 }
4506
4507 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4508 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4509
4510 setValue(&I, N);
4511 }
4512
visitAlloca(const AllocaInst & I)4513 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4514 // If this is a fixed sized alloca in the entry block of the function,
4515 // allocate it statically on the stack.
4516 if (FuncInfo.StaticAllocaMap.count(&I))
4517 return; // getValue will auto-populate this.
4518
4519 SDLoc dl = getCurSDLoc();
4520 Type *Ty = I.getAllocatedType();
4521 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4522 auto &DL = DAG.getDataLayout();
4523 TypeSize TySize = DL.getTypeAllocSize(Ty);
4524 MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4525
4526 SDValue AllocSize = getValue(I.getArraySize());
4527
4528 EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4529 if (AllocSize.getValueType() != IntPtr)
4530 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4531
4532 if (TySize.isScalable())
4533 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4534 DAG.getVScale(dl, IntPtr,
4535 APInt(IntPtr.getScalarSizeInBits(),
4536 TySize.getKnownMinValue())));
4537 else {
4538 SDValue TySizeValue =
4539 DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64));
4540 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4541 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4542 }
4543
4544 // Handle alignment. If the requested alignment is less than or equal to
4545 // the stack alignment, ignore it. If the size is greater than or equal to
4546 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4547 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4548 if (*Alignment <= StackAlign)
4549 Alignment = std::nullopt;
4550
4551 const uint64_t StackAlignMask = StackAlign.value() - 1U;
4552 // Round the size of the allocation up to the stack alignment size
4553 // by add SA-1 to the size. This doesn't overflow because we're computing
4554 // an address inside an alloca.
4555 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4556 DAG.getConstant(StackAlignMask, dl, IntPtr),
4557 SDNodeFlags::NoUnsignedWrap);
4558
4559 // Mask out the low bits for alignment purposes.
4560 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4561 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4562
4563 SDValue Ops[] = {
4564 getRoot(), AllocSize,
4565 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4566 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4567 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4568 setValue(&I, DSA);
4569 DAG.setRoot(DSA.getValue(1));
4570
4571 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4572 }
4573
getRangeMetadata(const Instruction & I)4574 static const MDNode *getRangeMetadata(const Instruction &I) {
4575 return I.getMetadata(LLVMContext::MD_range);
4576 }
4577
getRange(const Instruction & I)4578 static std::optional<ConstantRange> getRange(const Instruction &I) {
4579 if (const auto *CB = dyn_cast<CallBase>(&I))
4580 if (std::optional<ConstantRange> CR = CB->getRange())
4581 return CR;
4582 if (const MDNode *Range = getRangeMetadata(I))
4583 return getConstantRangeFromMetadata(*Range);
4584 return std::nullopt;
4585 }
4586
visitLoad(const LoadInst & I)4587 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4588 if (I.isAtomic())
4589 return visitAtomicLoad(I);
4590
4591 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4592 const Value *SV = I.getOperand(0);
4593 if (TLI.supportSwiftError()) {
4594 // Swifterror values can come from either a function parameter with
4595 // swifterror attribute or an alloca with swifterror attribute.
4596 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4597 if (Arg->hasSwiftErrorAttr())
4598 return visitLoadFromSwiftError(I);
4599 }
4600
4601 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4602 if (Alloca->isSwiftError())
4603 return visitLoadFromSwiftError(I);
4604 }
4605 }
4606
4607 SDValue Ptr = getValue(SV);
4608
4609 Type *Ty = I.getType();
4610 SmallVector<EVT, 4> ValueVTs, MemVTs;
4611 SmallVector<TypeSize, 4> Offsets;
4612 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4613 unsigned NumValues = ValueVTs.size();
4614 if (NumValues == 0)
4615 return;
4616
4617 Align Alignment = I.getAlign();
4618 AAMDNodes AAInfo = I.getAAMetadata();
4619 const MDNode *Ranges = getRangeMetadata(I);
4620 bool isVolatile = I.isVolatile();
4621 MachineMemOperand::Flags MMOFlags =
4622 TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4623
4624 SDValue Root;
4625 bool ConstantMemory = false;
4626 if (isVolatile)
4627 // Serialize volatile loads with other side effects.
4628 Root = getRoot();
4629 else if (NumValues > MaxParallelChains)
4630 Root = getMemoryRoot();
4631 else if (BatchAA &&
4632 BatchAA->pointsToConstantMemory(MemoryLocation(
4633 SV,
4634 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4635 AAInfo))) {
4636 // Do not serialize (non-volatile) loads of constant memory with anything.
4637 Root = DAG.getEntryNode();
4638 ConstantMemory = true;
4639 MMOFlags |= MachineMemOperand::MOInvariant;
4640 } else {
4641 // Do not serialize non-volatile loads against each other.
4642 Root = DAG.getRoot();
4643 }
4644
4645 SDLoc dl = getCurSDLoc();
4646
4647 if (isVolatile)
4648 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4649
4650 SmallVector<SDValue, 4> Values(NumValues);
4651 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4652
4653 unsigned ChainI = 0;
4654 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4655 // Serializing loads here may result in excessive register pressure, and
4656 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4657 // could recover a bit by hoisting nodes upward in the chain by recognizing
4658 // they are side-effect free or do not alias. The optimizer should really
4659 // avoid this case by converting large object/array copies to llvm.memcpy
4660 // (MaxParallelChains should always remain as failsafe).
4661 if (ChainI == MaxParallelChains) {
4662 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4663 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4664 ArrayRef(Chains.data(), ChainI));
4665 Root = Chain;
4666 ChainI = 0;
4667 }
4668
4669 // TODO: MachinePointerInfo only supports a fixed length offset.
4670 MachinePointerInfo PtrInfo =
4671 !Offsets[i].isScalable() || Offsets[i].isZero()
4672 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4673 : MachinePointerInfo();
4674
4675 SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4676 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4677 MMOFlags, AAInfo, Ranges);
4678 Chains[ChainI] = L.getValue(1);
4679
4680 if (MemVTs[i] != ValueVTs[i])
4681 L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4682
4683 Values[i] = L;
4684 }
4685
4686 if (!ConstantMemory) {
4687 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4688 ArrayRef(Chains.data(), ChainI));
4689 if (isVolatile)
4690 DAG.setRoot(Chain);
4691 else
4692 PendingLoads.push_back(Chain);
4693 }
4694
4695 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4696 DAG.getVTList(ValueVTs), Values));
4697 }
4698
visitStoreToSwiftError(const StoreInst & I)4699 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4700 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4701 "call visitStoreToSwiftError when backend supports swifterror");
4702
4703 SmallVector<EVT, 4> ValueVTs;
4704 SmallVector<uint64_t, 4> Offsets;
4705 const Value *SrcV = I.getOperand(0);
4706 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4707 SrcV->getType(), ValueVTs, &Offsets, 0);
4708 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4709 "expect a single EVT for swifterror");
4710
4711 SDValue Src = getValue(SrcV);
4712 // Create a virtual register, then update the virtual register.
4713 Register VReg =
4714 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4715 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4716 // Chain can be getRoot or getControlRoot.
4717 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4718 SDValue(Src.getNode(), Src.getResNo()));
4719 DAG.setRoot(CopyNode);
4720 }
4721
visitLoadFromSwiftError(const LoadInst & I)4722 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4723 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4724 "call visitLoadFromSwiftError when backend supports swifterror");
4725
4726 assert(!I.isVolatile() &&
4727 !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4728 !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4729 "Support volatile, non temporal, invariant for load_from_swift_error");
4730
4731 const Value *SV = I.getOperand(0);
4732 Type *Ty = I.getType();
4733 assert(
4734 (!BatchAA ||
4735 !BatchAA->pointsToConstantMemory(MemoryLocation(
4736 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4737 I.getAAMetadata()))) &&
4738 "load_from_swift_error should not be constant memory");
4739
4740 SmallVector<EVT, 4> ValueVTs;
4741 SmallVector<uint64_t, 4> Offsets;
4742 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4743 ValueVTs, &Offsets, 0);
4744 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4745 "expect a single EVT for swifterror");
4746
4747 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4748 SDValue L = DAG.getCopyFromReg(
4749 getRoot(), getCurSDLoc(),
4750 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4751
4752 setValue(&I, L);
4753 }
4754
visitStore(const StoreInst & I)4755 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4756 if (I.isAtomic())
4757 return visitAtomicStore(I);
4758
4759 const Value *SrcV = I.getOperand(0);
4760 const Value *PtrV = I.getOperand(1);
4761
4762 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4763 if (TLI.supportSwiftError()) {
4764 // Swifterror values can come from either a function parameter with
4765 // swifterror attribute or an alloca with swifterror attribute.
4766 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4767 if (Arg->hasSwiftErrorAttr())
4768 return visitStoreToSwiftError(I);
4769 }
4770
4771 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4772 if (Alloca->isSwiftError())
4773 return visitStoreToSwiftError(I);
4774 }
4775 }
4776
4777 SmallVector<EVT, 4> ValueVTs, MemVTs;
4778 SmallVector<TypeSize, 4> Offsets;
4779 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4780 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4781 unsigned NumValues = ValueVTs.size();
4782 if (NumValues == 0)
4783 return;
4784
4785 // Get the lowered operands. Note that we do this after
4786 // checking if NumResults is zero, because with zero results
4787 // the operands won't have values in the map.
4788 SDValue Src = getValue(SrcV);
4789 SDValue Ptr = getValue(PtrV);
4790
4791 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4792 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4793 SDLoc dl = getCurSDLoc();
4794 Align Alignment = I.getAlign();
4795 AAMDNodes AAInfo = I.getAAMetadata();
4796
4797 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4798
4799 unsigned ChainI = 0;
4800 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4801 // See visitLoad comments.
4802 if (ChainI == MaxParallelChains) {
4803 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4804 ArrayRef(Chains.data(), ChainI));
4805 Root = Chain;
4806 ChainI = 0;
4807 }
4808
4809 // TODO: MachinePointerInfo only supports a fixed length offset.
4810 MachinePointerInfo PtrInfo =
4811 !Offsets[i].isScalable() || Offsets[i].isZero()
4812 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4813 : MachinePointerInfo();
4814
4815 SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4816 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4817 if (MemVTs[i] != ValueVTs[i])
4818 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4819 SDValue St =
4820 DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4821 Chains[ChainI] = St;
4822 }
4823
4824 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4825 ArrayRef(Chains.data(), ChainI));
4826 setValue(&I, StoreNode);
4827 DAG.setRoot(StoreNode);
4828 }
4829
visitMaskedStore(const CallInst & I,bool IsCompressing)4830 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4831 bool IsCompressing) {
4832 SDLoc sdl = getCurSDLoc();
4833
4834 auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4835 Align &Alignment) {
4836 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4837 Src0 = I.getArgOperand(0);
4838 Ptr = I.getArgOperand(1);
4839 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getAlignValue();
4840 Mask = I.getArgOperand(3);
4841 };
4842 auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4843 Align &Alignment) {
4844 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4845 Src0 = I.getArgOperand(0);
4846 Ptr = I.getArgOperand(1);
4847 Mask = I.getArgOperand(2);
4848 Alignment = I.getParamAlign(1).valueOrOne();
4849 };
4850
4851 Value *PtrOperand, *MaskOperand, *Src0Operand;
4852 Align Alignment;
4853 if (IsCompressing)
4854 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4855 else
4856 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4857
4858 SDValue Ptr = getValue(PtrOperand);
4859 SDValue Src0 = getValue(Src0Operand);
4860 SDValue Mask = getValue(MaskOperand);
4861 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4862
4863 EVT VT = Src0.getValueType();
4864
4865 auto MMOFlags = MachineMemOperand::MOStore;
4866 if (I.hasMetadata(LLVMContext::MD_nontemporal))
4867 MMOFlags |= MachineMemOperand::MONonTemporal;
4868
4869 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4870 MachinePointerInfo(PtrOperand), MMOFlags,
4871 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4872
4873 const auto &TLI = DAG.getTargetLoweringInfo();
4874 const auto &TTI =
4875 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
4876 SDValue StoreNode =
4877 !IsCompressing && TTI.hasConditionalLoadStoreForType(
4878 I.getArgOperand(0)->getType(), /*IsStore=*/true)
4879 ? TLI.visitMaskedStore(DAG, sdl, getMemoryRoot(), MMO, Ptr, Src0,
4880 Mask)
4881 : DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask,
4882 VT, MMO, ISD::UNINDEXED, /*Truncating=*/false,
4883 IsCompressing);
4884 DAG.setRoot(StoreNode);
4885 setValue(&I, StoreNode);
4886 }
4887
4888 // Get a uniform base for the Gather/Scatter intrinsic.
4889 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4890 // We try to represent it as a base pointer + vector of indices.
4891 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4892 // The first operand of the GEP may be a single pointer or a vector of pointers
4893 // Example:
4894 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4895 // or
4896 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4897 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4898 //
4899 // When the first GEP operand is a single pointer - it is the uniform base we
4900 // are looking for. If first operand of the GEP is a splat vector - we
4901 // extract the splat value and use it as a uniform base.
4902 // In all other cases the function returns 'false'.
getUniformBase(const Value * Ptr,SDValue & Base,SDValue & Index,ISD::MemIndexType & IndexType,SDValue & Scale,SelectionDAGBuilder * SDB,const BasicBlock * CurBB,uint64_t ElemSize)4903 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4904 ISD::MemIndexType &IndexType, SDValue &Scale,
4905 SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4906 uint64_t ElemSize) {
4907 SelectionDAG& DAG = SDB->DAG;
4908 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4909 const DataLayout &DL = DAG.getDataLayout();
4910
4911 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4912
4913 // Handle splat constant pointer.
4914 if (auto *C = dyn_cast<Constant>(Ptr)) {
4915 C = C->getSplatValue();
4916 if (!C)
4917 return false;
4918
4919 Base = SDB->getValue(C);
4920
4921 ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4922 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4923 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4924 IndexType = ISD::SIGNED_SCALED;
4925 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4926 return true;
4927 }
4928
4929 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4930 if (!GEP || GEP->getParent() != CurBB)
4931 return false;
4932
4933 if (GEP->getNumOperands() != 2)
4934 return false;
4935
4936 const Value *BasePtr = GEP->getPointerOperand();
4937 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4938
4939 // Make sure the base is scalar and the index is a vector.
4940 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4941 return false;
4942
4943 TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4944 if (ScaleVal.isScalable())
4945 return false;
4946
4947 // Target may not support the required addressing mode.
4948 if (ScaleVal != 1 &&
4949 !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4950 return false;
4951
4952 Base = SDB->getValue(BasePtr);
4953 Index = SDB->getValue(IndexVal);
4954 IndexType = ISD::SIGNED_SCALED;
4955
4956 Scale =
4957 DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4958 return true;
4959 }
4960
visitMaskedScatter(const CallInst & I)4961 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4962 SDLoc sdl = getCurSDLoc();
4963
4964 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4965 const Value *Ptr = I.getArgOperand(1);
4966 SDValue Src0 = getValue(I.getArgOperand(0));
4967 SDValue Mask = getValue(I.getArgOperand(3));
4968 EVT VT = Src0.getValueType();
4969 Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4970 ->getMaybeAlignValue()
4971 .value_or(DAG.getEVTAlign(VT.getScalarType()));
4972 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4973
4974 SDValue Base;
4975 SDValue Index;
4976 ISD::MemIndexType IndexType;
4977 SDValue Scale;
4978 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4979 I.getParent(), VT.getScalarStoreSize());
4980
4981 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4982 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4983 MachinePointerInfo(AS), MachineMemOperand::MOStore,
4984 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4985 if (!UniformBase) {
4986 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4987 Index = getValue(Ptr);
4988 IndexType = ISD::SIGNED_SCALED;
4989 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4990 }
4991
4992 EVT IdxVT = Index.getValueType();
4993 EVT EltTy = IdxVT.getVectorElementType();
4994 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4995 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4996 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4997 }
4998
4999 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
5000 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
5001 Ops, MMO, IndexType, false);
5002 DAG.setRoot(Scatter);
5003 setValue(&I, Scatter);
5004 }
5005
visitMaskedLoad(const CallInst & I,bool IsExpanding)5006 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
5007 SDLoc sdl = getCurSDLoc();
5008
5009 auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
5010 Align &Alignment) {
5011 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
5012 Ptr = I.getArgOperand(0);
5013 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getAlignValue();
5014 Mask = I.getArgOperand(2);
5015 Src0 = I.getArgOperand(3);
5016 };
5017 auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
5018 Align &Alignment) {
5019 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
5020 Ptr = I.getArgOperand(0);
5021 Alignment = I.getParamAlign(0).valueOrOne();
5022 Mask = I.getArgOperand(1);
5023 Src0 = I.getArgOperand(2);
5024 };
5025
5026 Value *PtrOperand, *MaskOperand, *Src0Operand;
5027 Align Alignment;
5028 if (IsExpanding)
5029 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
5030 else
5031 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
5032
5033 SDValue Ptr = getValue(PtrOperand);
5034 SDValue Src0 = getValue(Src0Operand);
5035 SDValue Mask = getValue(MaskOperand);
5036 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
5037
5038 EVT VT = Src0.getValueType();
5039 AAMDNodes AAInfo = I.getAAMetadata();
5040 const MDNode *Ranges = getRangeMetadata(I);
5041
5042 // Do not serialize masked loads of constant memory with anything.
5043 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
5044 bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(ML);
5045
5046 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
5047
5048 auto MMOFlags = MachineMemOperand::MOLoad;
5049 if (I.hasMetadata(LLVMContext::MD_nontemporal))
5050 MMOFlags |= MachineMemOperand::MONonTemporal;
5051
5052 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5053 MachinePointerInfo(PtrOperand), MMOFlags,
5054 LocationSize::beforeOrAfterPointer(), Alignment, AAInfo, Ranges);
5055
5056 const auto &TLI = DAG.getTargetLoweringInfo();
5057 const auto &TTI =
5058 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
5059 // The Load/Res may point to different values and both of them are output
5060 // variables.
5061 SDValue Load;
5062 SDValue Res;
5063 if (!IsExpanding && TTI.hasConditionalLoadStoreForType(Src0Operand->getType(),
5064 /*IsStore=*/false))
5065 Res = TLI.visitMaskedLoad(DAG, sdl, InChain, MMO, Load, Ptr, Src0, Mask);
5066 else
5067 Res = Load =
5068 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
5069 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
5070 if (AddToChain)
5071 PendingLoads.push_back(Load.getValue(1));
5072 setValue(&I, Res);
5073 }
5074
visitMaskedGather(const CallInst & I)5075 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
5076 SDLoc sdl = getCurSDLoc();
5077
5078 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
5079 const Value *Ptr = I.getArgOperand(0);
5080 SDValue Src0 = getValue(I.getArgOperand(3));
5081 SDValue Mask = getValue(I.getArgOperand(2));
5082
5083 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5084 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5085 Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
5086 ->getMaybeAlignValue()
5087 .value_or(DAG.getEVTAlign(VT.getScalarType()));
5088
5089 const MDNode *Ranges = getRangeMetadata(I);
5090
5091 SDValue Root = DAG.getRoot();
5092 SDValue Base;
5093 SDValue Index;
5094 ISD::MemIndexType IndexType;
5095 SDValue Scale;
5096 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
5097 I.getParent(), VT.getScalarStoreSize());
5098 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
5099 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5100 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
5101 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(),
5102 Ranges);
5103
5104 if (!UniformBase) {
5105 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5106 Index = getValue(Ptr);
5107 IndexType = ISD::SIGNED_SCALED;
5108 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5109 }
5110
5111 EVT IdxVT = Index.getValueType();
5112 EVT EltTy = IdxVT.getVectorElementType();
5113 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
5114 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
5115 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
5116 }
5117
5118 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
5119 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
5120 Ops, MMO, IndexType, ISD::NON_EXTLOAD);
5121
5122 PendingLoads.push_back(Gather.getValue(1));
5123 setValue(&I, Gather);
5124 }
5125
visitAtomicCmpXchg(const AtomicCmpXchgInst & I)5126 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
5127 SDLoc dl = getCurSDLoc();
5128 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
5129 AtomicOrdering FailureOrdering = I.getFailureOrdering();
5130 SyncScope::ID SSID = I.getSyncScopeID();
5131
5132 SDValue InChain = getRoot();
5133
5134 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
5135 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5136
5137 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5138 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5139
5140 MachineFunction &MF = DAG.getMachineFunction();
5141 MachineMemOperand *MMO = MF.getMachineMemOperand(
5142 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5143 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
5144 FailureOrdering);
5145
5146 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5147 dl, MemVT, VTs, InChain,
5148 getValue(I.getPointerOperand()),
5149 getValue(I.getCompareOperand()),
5150 getValue(I.getNewValOperand()), MMO);
5151
5152 SDValue OutChain = L.getValue(2);
5153
5154 setValue(&I, L);
5155 DAG.setRoot(OutChain);
5156 }
5157
visitAtomicRMW(const AtomicRMWInst & I)5158 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
5159 SDLoc dl = getCurSDLoc();
5160 ISD::NodeType NT;
5161 switch (I.getOperation()) {
5162 default: llvm_unreachable("Unknown atomicrmw operation");
5163 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
5164 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
5165 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
5166 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
5167 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
5168 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
5169 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
5170 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
5171 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
5172 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
5173 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
5174 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
5175 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
5176 case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
5177 case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
5178 case AtomicRMWInst::FMaximum:
5179 NT = ISD::ATOMIC_LOAD_FMAXIMUM;
5180 break;
5181 case AtomicRMWInst::FMinimum:
5182 NT = ISD::ATOMIC_LOAD_FMINIMUM;
5183 break;
5184 case AtomicRMWInst::UIncWrap:
5185 NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5186 break;
5187 case AtomicRMWInst::UDecWrap:
5188 NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5189 break;
5190 case AtomicRMWInst::USubCond:
5191 NT = ISD::ATOMIC_LOAD_USUB_COND;
5192 break;
5193 case AtomicRMWInst::USubSat:
5194 NT = ISD::ATOMIC_LOAD_USUB_SAT;
5195 break;
5196 }
5197 AtomicOrdering Ordering = I.getOrdering();
5198 SyncScope::ID SSID = I.getSyncScopeID();
5199
5200 SDValue InChain = getRoot();
5201
5202 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
5203 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5204 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5205
5206 MachineFunction &MF = DAG.getMachineFunction();
5207 MachineMemOperand *MMO = MF.getMachineMemOperand(
5208 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5209 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
5210
5211 SDValue L =
5212 DAG.getAtomic(NT, dl, MemVT, InChain,
5213 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
5214 MMO);
5215
5216 SDValue OutChain = L.getValue(1);
5217
5218 setValue(&I, L);
5219 DAG.setRoot(OutChain);
5220 }
5221
visitFence(const FenceInst & I)5222 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
5223 SDLoc dl = getCurSDLoc();
5224 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5225 SDValue Ops[3];
5226 Ops[0] = getRoot();
5227 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
5228 TLI.getFenceOperandTy(DAG.getDataLayout()));
5229 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
5230 TLI.getFenceOperandTy(DAG.getDataLayout()));
5231 SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
5232 setValue(&I, N);
5233 DAG.setRoot(N);
5234 }
5235
visitAtomicLoad(const LoadInst & I)5236 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
5237 SDLoc dl = getCurSDLoc();
5238 AtomicOrdering Order = I.getOrdering();
5239 SyncScope::ID SSID = I.getSyncScopeID();
5240
5241 SDValue InChain = getRoot();
5242
5243 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5244 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5245 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
5246
5247 if (!TLI.supportsUnalignedAtomics() &&
5248 I.getAlign().value() < MemVT.getSizeInBits() / 8)
5249 report_fatal_error("Cannot generate unaligned atomic load");
5250
5251 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
5252
5253 const MDNode *Ranges = getRangeMetadata(I);
5254 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5255 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5256 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5257
5258 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
5259
5260 SDValue Ptr = getValue(I.getPointerOperand());
5261 SDValue L =
5262 DAG.getAtomicLoad(ISD::NON_EXTLOAD, dl, MemVT, MemVT, InChain, Ptr, MMO);
5263
5264 SDValue OutChain = L.getValue(1);
5265 if (MemVT != VT)
5266 L = DAG.getPtrExtOrTrunc(L, dl, VT);
5267
5268 setValue(&I, L);
5269 DAG.setRoot(OutChain);
5270 }
5271
visitAtomicStore(const StoreInst & I)5272 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
5273 SDLoc dl = getCurSDLoc();
5274
5275 AtomicOrdering Ordering = I.getOrdering();
5276 SyncScope::ID SSID = I.getSyncScopeID();
5277
5278 SDValue InChain = getRoot();
5279
5280 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5281 EVT MemVT =
5282 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
5283
5284 if (!TLI.supportsUnalignedAtomics() &&
5285 I.getAlign().value() < MemVT.getSizeInBits() / 8)
5286 report_fatal_error("Cannot generate unaligned atomic store");
5287
5288 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
5289
5290 MachineFunction &MF = DAG.getMachineFunction();
5291 MachineMemOperand *MMO = MF.getMachineMemOperand(
5292 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
5293 I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
5294
5295 SDValue Val = getValue(I.getValueOperand());
5296 if (Val.getValueType() != MemVT)
5297 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5298 SDValue Ptr = getValue(I.getPointerOperand());
5299
5300 SDValue OutChain =
5301 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5302
5303 setValue(&I, OutChain);
5304 DAG.setRoot(OutChain);
5305 }
5306
5307 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5308 /// node.
visitTargetIntrinsic(const CallInst & I,unsigned Intrinsic)5309 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
5310 unsigned Intrinsic) {
5311 // Ignore the callsite's attributes. A specific call site may be marked with
5312 // readnone, but the lowering code will expect the chain based on the
5313 // definition.
5314 const Function *F = I.getCalledFunction();
5315 bool HasChain = !F->doesNotAccessMemory();
5316 bool OnlyLoad =
5317 HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow();
5318
5319 // Build the operand list.
5320 SmallVector<SDValue, 8> Ops;
5321 if (HasChain) { // If this intrinsic has side-effects, chainify it.
5322 if (OnlyLoad) {
5323 // We don't need to serialize loads against other loads.
5324 Ops.push_back(DAG.getRoot());
5325 } else {
5326 Ops.push_back(getRoot());
5327 }
5328 }
5329
5330 // Info is set by getTgtMemIntrinsic
5331 TargetLowering::IntrinsicInfo Info;
5332 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5333 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
5334 DAG.getMachineFunction(),
5335 Intrinsic);
5336
5337 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5338 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
5339 Info.opc == ISD::INTRINSIC_W_CHAIN)
5340 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
5341 TLI.getPointerTy(DAG.getDataLayout())));
5342
5343 // Add all operands of the call to the operand list.
5344 for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5345 const Value *Arg = I.getArgOperand(i);
5346 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5347 Ops.push_back(getValue(Arg));
5348 continue;
5349 }
5350
5351 // Use TargetConstant instead of a regular constant for immarg.
5352 EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5353 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5354 assert(CI->getBitWidth() <= 64 &&
5355 "large intrinsic immediates not handled");
5356 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5357 } else {
5358 Ops.push_back(
5359 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5360 }
5361 }
5362
5363 SmallVector<EVT, 4> ValueVTs;
5364 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5365
5366 if (HasChain)
5367 ValueVTs.push_back(MVT::Other);
5368
5369 SDVTList VTs = DAG.getVTList(ValueVTs);
5370
5371 // Propagate fast-math-flags from IR to node(s).
5372 SDNodeFlags Flags;
5373 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5374 Flags.copyFMF(*FPMO);
5375 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5376
5377 // Create the node.
5378 SDValue Result;
5379
5380 if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
5381 auto *Token = Bundle->Inputs[0].get();
5382 SDValue ConvControlToken = getValue(Token);
5383 assert(Ops.back().getValueType() != MVT::Glue &&
5384 "Did not expected another glue node here.");
5385 ConvControlToken =
5386 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5387 Ops.push_back(ConvControlToken);
5388 }
5389
5390 // In some cases, custom collection of operands from CallInst I may be needed.
5391 TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
5392 if (IsTgtIntrinsic) {
5393 // This is target intrinsic that touches memory
5394 //
5395 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5396 // didn't yield anything useful.
5397 MachinePointerInfo MPI;
5398 if (Info.ptrVal)
5399 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5400 else if (Info.fallbackAddressSpace)
5401 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5402 EVT MemVT = Info.memVT;
5403 LocationSize Size = LocationSize::precise(Info.size);
5404 if (Size.hasValue() && !Size.getValue())
5405 Size = LocationSize::precise(MemVT.getStoreSize());
5406 Align Alignment = Info.align.value_or(DAG.getEVTAlign(MemVT));
5407 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5408 MPI, Info.flags, Size, Alignment, I.getAAMetadata(), /*Ranges=*/nullptr,
5409 Info.ssid, Info.order, Info.failureOrder);
5410 Result =
5411 DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, MemVT, MMO);
5412 } else if (!HasChain) {
5413 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5414 } else if (!I.getType()->isVoidTy()) {
5415 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5416 } else {
5417 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5418 }
5419
5420 if (HasChain) {
5421 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5422 if (OnlyLoad)
5423 PendingLoads.push_back(Chain);
5424 else
5425 DAG.setRoot(Chain);
5426 }
5427
5428 if (!I.getType()->isVoidTy()) {
5429 if (!isa<VectorType>(I.getType()))
5430 Result = lowerRangeToAssertZExt(DAG, I, Result);
5431
5432 MaybeAlign Alignment = I.getRetAlign();
5433
5434 // Insert `assertalign` node if there's an alignment.
5435 if (InsertAssertAlign && Alignment) {
5436 Result =
5437 DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5438 }
5439 }
5440
5441 setValue(&I, Result);
5442 }
5443
5444 /// GetSignificand - Get the significand and build it into a floating-point
5445 /// number with exponent of 1:
5446 ///
5447 /// Op = (Op & 0x007fffff) | 0x3f800000;
5448 ///
5449 /// where Op is the hexadecimal representation of floating point value.
GetSignificand(SelectionDAG & DAG,SDValue Op,const SDLoc & dl)5450 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5451 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5452 DAG.getConstant(0x007fffff, dl, MVT::i32));
5453 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5454 DAG.getConstant(0x3f800000, dl, MVT::i32));
5455 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5456 }
5457
5458 /// GetExponent - Get the exponent:
5459 ///
5460 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5461 ///
5462 /// where Op is the hexadecimal representation of floating point value.
GetExponent(SelectionDAG & DAG,SDValue Op,const TargetLowering & TLI,const SDLoc & dl)5463 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5464 const TargetLowering &TLI, const SDLoc &dl) {
5465 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5466 DAG.getConstant(0x7f800000, dl, MVT::i32));
5467 SDValue t1 = DAG.getNode(
5468 ISD::SRL, dl, MVT::i32, t0,
5469 DAG.getConstant(23, dl,
5470 TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5471 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5472 DAG.getConstant(127, dl, MVT::i32));
5473 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5474 }
5475
5476 /// getF32Constant - Get 32-bit floating point constant.
getF32Constant(SelectionDAG & DAG,unsigned Flt,const SDLoc & dl)5477 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5478 const SDLoc &dl) {
5479 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5480 MVT::f32);
5481 }
5482
getLimitedPrecisionExp2(SDValue t0,const SDLoc & dl,SelectionDAG & DAG)5483 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5484 SelectionDAG &DAG) {
5485 // TODO: What fast-math-flags should be set on the floating-point nodes?
5486
5487 // IntegerPartOfX = ((int32_t)(t0);
5488 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5489
5490 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
5491 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5492 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5493
5494 // IntegerPartOfX <<= 23;
5495 IntegerPartOfX =
5496 DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5497 DAG.getConstant(23, dl,
5498 DAG.getTargetLoweringInfo().getShiftAmountTy(
5499 MVT::i32, DAG.getDataLayout())));
5500
5501 SDValue TwoToFractionalPartOfX;
5502 if (LimitFloatPrecision <= 6) {
5503 // For floating-point precision of 6:
5504 //
5505 // TwoToFractionalPartOfX =
5506 // 0.997535578f +
5507 // (0.735607626f + 0.252464424f * x) * x;
5508 //
5509 // error 0.0144103317, which is 6 bits
5510 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5511 getF32Constant(DAG, 0x3e814304, dl));
5512 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5513 getF32Constant(DAG, 0x3f3c50c8, dl));
5514 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5515 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5516 getF32Constant(DAG, 0x3f7f5e7e, dl));
5517 } else if (LimitFloatPrecision <= 12) {
5518 // For floating-point precision of 12:
5519 //
5520 // TwoToFractionalPartOfX =
5521 // 0.999892986f +
5522 // (0.696457318f +
5523 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
5524 //
5525 // error 0.000107046256, which is 13 to 14 bits
5526 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5527 getF32Constant(DAG, 0x3da235e3, dl));
5528 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5529 getF32Constant(DAG, 0x3e65b8f3, dl));
5530 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5531 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5532 getF32Constant(DAG, 0x3f324b07, dl));
5533 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5534 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5535 getF32Constant(DAG, 0x3f7ff8fd, dl));
5536 } else { // LimitFloatPrecision <= 18
5537 // For floating-point precision of 18:
5538 //
5539 // TwoToFractionalPartOfX =
5540 // 0.999999982f +
5541 // (0.693148872f +
5542 // (0.240227044f +
5543 // (0.554906021e-1f +
5544 // (0.961591928e-2f +
5545 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5546 // error 2.47208000*10^(-7), which is better than 18 bits
5547 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5548 getF32Constant(DAG, 0x3924b03e, dl));
5549 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5550 getF32Constant(DAG, 0x3ab24b87, dl));
5551 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5552 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5553 getF32Constant(DAG, 0x3c1d8c17, dl));
5554 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5555 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5556 getF32Constant(DAG, 0x3d634a1d, dl));
5557 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5558 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5559 getF32Constant(DAG, 0x3e75fe14, dl));
5560 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5561 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5562 getF32Constant(DAG, 0x3f317234, dl));
5563 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5564 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5565 getF32Constant(DAG, 0x3f800000, dl));
5566 }
5567
5568 // Add the exponent into the result in integer domain.
5569 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5570 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5571 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5572 }
5573
5574 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5575 /// limited-precision mode.
expandExp(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI,SDNodeFlags Flags)5576 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5577 const TargetLowering &TLI, SDNodeFlags Flags) {
5578 if (Op.getValueType() == MVT::f32 &&
5579 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5580
5581 // Put the exponent in the right bit position for later addition to the
5582 // final result:
5583 //
5584 // t0 = Op * log2(e)
5585
5586 // TODO: What fast-math-flags should be set here?
5587 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5588 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5589 return getLimitedPrecisionExp2(t0, dl, DAG);
5590 }
5591
5592 // No special expansion.
5593 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5594 }
5595
5596 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5597 /// limited-precision mode.
expandLog(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI,SDNodeFlags Flags)5598 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5599 const TargetLowering &TLI, SDNodeFlags Flags) {
5600 // TODO: What fast-math-flags should be set on the floating-point nodes?
5601
5602 if (Op.getValueType() == MVT::f32 &&
5603 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5604 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5605
5606 // Scale the exponent by log(2).
5607 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5608 SDValue LogOfExponent =
5609 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5610 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5611
5612 // Get the significand and build it into a floating-point number with
5613 // exponent of 1.
5614 SDValue X = GetSignificand(DAG, Op1, dl);
5615
5616 SDValue LogOfMantissa;
5617 if (LimitFloatPrecision <= 6) {
5618 // For floating-point precision of 6:
5619 //
5620 // LogofMantissa =
5621 // -1.1609546f +
5622 // (1.4034025f - 0.23903021f * x) * x;
5623 //
5624 // error 0.0034276066, which is better than 8 bits
5625 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5626 getF32Constant(DAG, 0xbe74c456, dl));
5627 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5628 getF32Constant(DAG, 0x3fb3a2b1, dl));
5629 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5630 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5631 getF32Constant(DAG, 0x3f949a29, dl));
5632 } else if (LimitFloatPrecision <= 12) {
5633 // For floating-point precision of 12:
5634 //
5635 // LogOfMantissa =
5636 // -1.7417939f +
5637 // (2.8212026f +
5638 // (-1.4699568f +
5639 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5640 //
5641 // error 0.000061011436, which is 14 bits
5642 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5643 getF32Constant(DAG, 0xbd67b6d6, dl));
5644 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5645 getF32Constant(DAG, 0x3ee4f4b8, dl));
5646 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5647 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5648 getF32Constant(DAG, 0x3fbc278b, dl));
5649 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5650 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5651 getF32Constant(DAG, 0x40348e95, dl));
5652 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5653 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5654 getF32Constant(DAG, 0x3fdef31a, dl));
5655 } else { // LimitFloatPrecision <= 18
5656 // For floating-point precision of 18:
5657 //
5658 // LogOfMantissa =
5659 // -2.1072184f +
5660 // (4.2372794f +
5661 // (-3.7029485f +
5662 // (2.2781945f +
5663 // (-0.87823314f +
5664 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5665 //
5666 // error 0.0000023660568, which is better than 18 bits
5667 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5668 getF32Constant(DAG, 0xbc91e5ac, dl));
5669 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5670 getF32Constant(DAG, 0x3e4350aa, dl));
5671 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5672 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5673 getF32Constant(DAG, 0x3f60d3e3, dl));
5674 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5675 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5676 getF32Constant(DAG, 0x4011cdf0, dl));
5677 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5678 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5679 getF32Constant(DAG, 0x406cfd1c, dl));
5680 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5681 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5682 getF32Constant(DAG, 0x408797cb, dl));
5683 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5684 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5685 getF32Constant(DAG, 0x4006dcab, dl));
5686 }
5687
5688 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5689 }
5690
5691 // No special expansion.
5692 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5693 }
5694
5695 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5696 /// limited-precision mode.
expandLog2(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI,SDNodeFlags Flags)5697 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5698 const TargetLowering &TLI, SDNodeFlags Flags) {
5699 // TODO: What fast-math-flags should be set on the floating-point nodes?
5700
5701 if (Op.getValueType() == MVT::f32 &&
5702 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5703 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5704
5705 // Get the exponent.
5706 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5707
5708 // Get the significand and build it into a floating-point number with
5709 // exponent of 1.
5710 SDValue X = GetSignificand(DAG, Op1, dl);
5711
5712 // Different possible minimax approximations of significand in
5713 // floating-point for various degrees of accuracy over [1,2].
5714 SDValue Log2ofMantissa;
5715 if (LimitFloatPrecision <= 6) {
5716 // For floating-point precision of 6:
5717 //
5718 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5719 //
5720 // error 0.0049451742, which is more than 7 bits
5721 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5722 getF32Constant(DAG, 0xbeb08fe0, dl));
5723 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5724 getF32Constant(DAG, 0x40019463, dl));
5725 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5726 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5727 getF32Constant(DAG, 0x3fd6633d, dl));
5728 } else if (LimitFloatPrecision <= 12) {
5729 // For floating-point precision of 12:
5730 //
5731 // Log2ofMantissa =
5732 // -2.51285454f +
5733 // (4.07009056f +
5734 // (-2.12067489f +
5735 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5736 //
5737 // error 0.0000876136000, which is better than 13 bits
5738 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5739 getF32Constant(DAG, 0xbda7262e, dl));
5740 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5741 getF32Constant(DAG, 0x3f25280b, dl));
5742 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5743 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5744 getF32Constant(DAG, 0x4007b923, dl));
5745 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5746 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5747 getF32Constant(DAG, 0x40823e2f, dl));
5748 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5749 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5750 getF32Constant(DAG, 0x4020d29c, dl));
5751 } else { // LimitFloatPrecision <= 18
5752 // For floating-point precision of 18:
5753 //
5754 // Log2ofMantissa =
5755 // -3.0400495f +
5756 // (6.1129976f +
5757 // (-5.3420409f +
5758 // (3.2865683f +
5759 // (-1.2669343f +
5760 // (0.27515199f -
5761 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5762 //
5763 // error 0.0000018516, which is better than 18 bits
5764 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5765 getF32Constant(DAG, 0xbcd2769e, dl));
5766 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5767 getF32Constant(DAG, 0x3e8ce0b9, dl));
5768 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5769 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5770 getF32Constant(DAG, 0x3fa22ae7, dl));
5771 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5772 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5773 getF32Constant(DAG, 0x40525723, dl));
5774 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5775 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5776 getF32Constant(DAG, 0x40aaf200, dl));
5777 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5778 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5779 getF32Constant(DAG, 0x40c39dad, dl));
5780 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5781 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5782 getF32Constant(DAG, 0x4042902c, dl));
5783 }
5784
5785 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5786 }
5787
5788 // No special expansion.
5789 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5790 }
5791
5792 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5793 /// limited-precision mode.
expandLog10(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI,SDNodeFlags Flags)5794 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5795 const TargetLowering &TLI, SDNodeFlags Flags) {
5796 // TODO: What fast-math-flags should be set on the floating-point nodes?
5797
5798 if (Op.getValueType() == MVT::f32 &&
5799 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5800 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5801
5802 // Scale the exponent by log10(2) [0.30102999f].
5803 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5804 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5805 getF32Constant(DAG, 0x3e9a209a, dl));
5806
5807 // Get the significand and build it into a floating-point number with
5808 // exponent of 1.
5809 SDValue X = GetSignificand(DAG, Op1, dl);
5810
5811 SDValue Log10ofMantissa;
5812 if (LimitFloatPrecision <= 6) {
5813 // For floating-point precision of 6:
5814 //
5815 // Log10ofMantissa =
5816 // -0.50419619f +
5817 // (0.60948995f - 0.10380950f * x) * x;
5818 //
5819 // error 0.0014886165, which is 6 bits
5820 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5821 getF32Constant(DAG, 0xbdd49a13, dl));
5822 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5823 getF32Constant(DAG, 0x3f1c0789, dl));
5824 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5825 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5826 getF32Constant(DAG, 0x3f011300, dl));
5827 } else if (LimitFloatPrecision <= 12) {
5828 // For floating-point precision of 12:
5829 //
5830 // Log10ofMantissa =
5831 // -0.64831180f +
5832 // (0.91751397f +
5833 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5834 //
5835 // error 0.00019228036, which is better than 12 bits
5836 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5837 getF32Constant(DAG, 0x3d431f31, dl));
5838 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5839 getF32Constant(DAG, 0x3ea21fb2, dl));
5840 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5841 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5842 getF32Constant(DAG, 0x3f6ae232, dl));
5843 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5844 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5845 getF32Constant(DAG, 0x3f25f7c3, dl));
5846 } else { // LimitFloatPrecision <= 18
5847 // For floating-point precision of 18:
5848 //
5849 // Log10ofMantissa =
5850 // -0.84299375f +
5851 // (1.5327582f +
5852 // (-1.0688956f +
5853 // (0.49102474f +
5854 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5855 //
5856 // error 0.0000037995730, which is better than 18 bits
5857 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5858 getF32Constant(DAG, 0x3c5d51ce, dl));
5859 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5860 getF32Constant(DAG, 0x3e00685a, dl));
5861 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5862 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5863 getF32Constant(DAG, 0x3efb6798, dl));
5864 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5865 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5866 getF32Constant(DAG, 0x3f88d192, dl));
5867 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5868 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5869 getF32Constant(DAG, 0x3fc4316c, dl));
5870 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5871 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5872 getF32Constant(DAG, 0x3f57ce70, dl));
5873 }
5874
5875 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5876 }
5877
5878 // No special expansion.
5879 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5880 }
5881
5882 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5883 /// limited-precision mode.
expandExp2(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI,SDNodeFlags Flags)5884 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5885 const TargetLowering &TLI, SDNodeFlags Flags) {
5886 if (Op.getValueType() == MVT::f32 &&
5887 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5888 return getLimitedPrecisionExp2(Op, dl, DAG);
5889
5890 // No special expansion.
5891 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5892 }
5893
5894 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5895 /// limited-precision mode with x == 10.0f.
expandPow(const SDLoc & dl,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const TargetLowering & TLI,SDNodeFlags Flags)5896 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5897 SelectionDAG &DAG, const TargetLowering &TLI,
5898 SDNodeFlags Flags) {
5899 bool IsExp10 = false;
5900 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5901 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5902 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5903 APFloat Ten(10.0f);
5904 IsExp10 = LHSC->isExactlyValue(Ten);
5905 }
5906 }
5907
5908 // TODO: What fast-math-flags should be set on the FMUL node?
5909 if (IsExp10) {
5910 // Put the exponent in the right bit position for later addition to the
5911 // final result:
5912 //
5913 // #define LOG2OF10 3.3219281f
5914 // t0 = Op * LOG2OF10;
5915 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5916 getF32Constant(DAG, 0x40549a78, dl));
5917 return getLimitedPrecisionExp2(t0, dl, DAG);
5918 }
5919
5920 // No special expansion.
5921 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5922 }
5923
5924 /// ExpandPowI - Expand a llvm.powi intrinsic.
ExpandPowI(const SDLoc & DL,SDValue LHS,SDValue RHS,SelectionDAG & DAG)5925 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5926 SelectionDAG &DAG) {
5927 // If RHS is a constant, we can expand this out to a multiplication tree if
5928 // it's beneficial on the target, otherwise we end up lowering to a call to
5929 // __powidf2 (for example).
5930 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5931 unsigned Val = RHSC->getSExtValue();
5932
5933 // powi(x, 0) -> 1.0
5934 if (Val == 0)
5935 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5936
5937 if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5938 Val, DAG.shouldOptForSize())) {
5939 // Get the exponent as a positive value.
5940 if ((int)Val < 0)
5941 Val = -Val;
5942 // We use the simple binary decomposition method to generate the multiply
5943 // sequence. There are more optimal ways to do this (for example,
5944 // powi(x,15) generates one more multiply than it should), but this has
5945 // the benefit of being both really simple and much better than a libcall.
5946 SDValue Res; // Logically starts equal to 1.0
5947 SDValue CurSquare = LHS;
5948 // TODO: Intrinsics should have fast-math-flags that propagate to these
5949 // nodes.
5950 while (Val) {
5951 if (Val & 1) {
5952 if (Res.getNode())
5953 Res =
5954 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5955 else
5956 Res = CurSquare; // 1.0*CurSquare.
5957 }
5958
5959 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5960 CurSquare, CurSquare);
5961 Val >>= 1;
5962 }
5963
5964 // If the original was negative, invert the result, producing 1/(x*x*x).
5965 if (RHSC->getSExtValue() < 0)
5966 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5967 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5968 return Res;
5969 }
5970 }
5971
5972 // Otherwise, expand to a libcall.
5973 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5974 }
5975
expandDivFix(unsigned Opcode,const SDLoc & DL,SDValue LHS,SDValue RHS,SDValue Scale,SelectionDAG & DAG,const TargetLowering & TLI)5976 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5977 SDValue LHS, SDValue RHS, SDValue Scale,
5978 SelectionDAG &DAG, const TargetLowering &TLI) {
5979 EVT VT = LHS.getValueType();
5980 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5981 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5982 LLVMContext &Ctx = *DAG.getContext();
5983
5984 // If the type is legal but the operation isn't, this node might survive all
5985 // the way to operation legalization. If we end up there and we do not have
5986 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5987 // node.
5988
5989 // Coax the legalizer into expanding the node during type legalization instead
5990 // by bumping the size by one bit. This will force it to Promote, enabling the
5991 // early expansion and avoiding the need to expand later.
5992
5993 // We don't have to do this if Scale is 0; that can always be expanded, unless
5994 // it's a saturating signed operation. Those can experience true integer
5995 // division overflow, a case which we must avoid.
5996
5997 // FIXME: We wouldn't have to do this (or any of the early
5998 // expansion/promotion) if it was possible to expand a libcall of an
5999 // illegal type during operation legalization. But it's not, so things
6000 // get a bit hacky.
6001 unsigned ScaleInt = Scale->getAsZExtVal();
6002 if ((ScaleInt > 0 || (Saturating && Signed)) &&
6003 (TLI.isTypeLegal(VT) ||
6004 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
6005 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
6006 Opcode, VT, ScaleInt);
6007 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
6008 EVT PromVT;
6009 if (VT.isScalarInteger())
6010 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
6011 else if (VT.isVector()) {
6012 PromVT = VT.getVectorElementType();
6013 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
6014 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
6015 } else
6016 llvm_unreachable("Wrong VT for DIVFIX?");
6017 LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
6018 RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
6019 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
6020 // For saturating operations, we need to shift up the LHS to get the
6021 // proper saturation width, and then shift down again afterwards.
6022 if (Saturating)
6023 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
6024 DAG.getConstant(1, DL, ShiftTy));
6025 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
6026 if (Saturating)
6027 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
6028 DAG.getConstant(1, DL, ShiftTy));
6029 return DAG.getZExtOrTrunc(Res, DL, VT);
6030 }
6031 }
6032
6033 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
6034 }
6035
6036 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
6037 // bitcasted, or split argument. Returns a list of <Register, size in bits>
6038 static void
getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register,TypeSize>> & Regs,const SDValue & N)6039 getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register, TypeSize>> &Regs,
6040 const SDValue &N) {
6041 switch (N.getOpcode()) {
6042 case ISD::CopyFromReg: {
6043 SDValue Op = N.getOperand(1);
6044 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
6045 Op.getValueType().getSizeInBits());
6046 return;
6047 }
6048 case ISD::BITCAST:
6049 case ISD::AssertZext:
6050 case ISD::AssertSext:
6051 case ISD::TRUNCATE:
6052 getUnderlyingArgRegs(Regs, N.getOperand(0));
6053 return;
6054 case ISD::BUILD_PAIR:
6055 case ISD::BUILD_VECTOR:
6056 case ISD::CONCAT_VECTORS:
6057 for (SDValue Op : N->op_values())
6058 getUnderlyingArgRegs(Regs, Op);
6059 return;
6060 default:
6061 return;
6062 }
6063 }
6064
6065 /// If the DbgValueInst is a dbg_value of a function argument, create the
6066 /// corresponding DBG_VALUE machine instruction for it now. At the end of
6067 /// instruction selection, they will be inserted to the entry BB.
6068 /// We don't currently support this for variadic dbg_values, as they shouldn't
6069 /// appear for function arguments or in the prologue.
EmitFuncArgumentDbgValue(const Value * V,DILocalVariable * Variable,DIExpression * Expr,DILocation * DL,FuncArgumentDbgValueKind Kind,const SDValue & N)6070 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6071 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
6072 DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
6073 const Argument *Arg = dyn_cast<Argument>(V);
6074 if (!Arg)
6075 return false;
6076
6077 MachineFunction &MF = DAG.getMachineFunction();
6078 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6079
6080 // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
6081 // we've been asked to pursue.
6082 auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
6083 bool Indirect) {
6084 if (Reg.isVirtual() && MF.useDebugInstrRef()) {
6085 // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
6086 // pointing at the VReg, which will be patched up later.
6087 auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
6088 SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
6089 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
6090 /* isKill */ false, /* isDead */ false,
6091 /* isUndef */ false, /* isEarlyClobber */ false,
6092 /* SubReg */ 0, /* isDebug */ true)});
6093
6094 auto *NewDIExpr = FragExpr;
6095 // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
6096 // the DIExpression.
6097 if (Indirect)
6098 NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
6099 SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
6100 NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
6101 return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
6102 } else {
6103 // Create a completely standard DBG_VALUE.
6104 auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
6105 return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
6106 }
6107 };
6108
6109 if (Kind == FuncArgumentDbgValueKind::Value) {
6110 // ArgDbgValues are hoisted to the beginning of the entry block. So we
6111 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
6112 // the entry block.
6113 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
6114 if (!IsInEntryBlock)
6115 return false;
6116
6117 // ArgDbgValues are hoisted to the beginning of the entry block. So we
6118 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
6119 // variable that also is a param.
6120 //
6121 // Although, if we are at the top of the entry block already, we can still
6122 // emit using ArgDbgValue. This might catch some situations when the
6123 // dbg.value refers to an argument that isn't used in the entry block, so
6124 // any CopyToReg node would be optimized out and the only way to express
6125 // this DBG_VALUE is by using the physical reg (or FI) as done in this
6126 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
6127 // we should only emit as ArgDbgValue if the Variable is an argument to the
6128 // current function, and the dbg.value intrinsic is found in the entry
6129 // block.
6130 bool VariableIsFunctionInputArg = Variable->isParameter() &&
6131 !DL->getInlinedAt();
6132 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
6133 if (!IsInPrologue && !VariableIsFunctionInputArg)
6134 return false;
6135
6136 // Here we assume that a function argument on IR level only can be used to
6137 // describe one input parameter on source level. If we for example have
6138 // source code like this
6139 //
6140 // struct A { long x, y; };
6141 // void foo(struct A a, long b) {
6142 // ...
6143 // b = a.x;
6144 // ...
6145 // }
6146 //
6147 // and IR like this
6148 //
6149 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
6150 // entry:
6151 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
6152 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
6153 // call void @llvm.dbg.value(metadata i32 %b, "b",
6154 // ...
6155 // call void @llvm.dbg.value(metadata i32 %a1, "b"
6156 // ...
6157 //
6158 // then the last dbg.value is describing a parameter "b" using a value that
6159 // is an argument. But since we already has used %a1 to describe a parameter
6160 // we should not handle that last dbg.value here (that would result in an
6161 // incorrect hoisting of the DBG_VALUE to the function entry).
6162 // Notice that we allow one dbg.value per IR level argument, to accommodate
6163 // for the situation with fragments above.
6164 // If there is no node for the value being handled, we return true to skip
6165 // the normal generation of debug info, as it would kill existing debug
6166 // info for the parameter in case of duplicates.
6167 if (VariableIsFunctionInputArg) {
6168 unsigned ArgNo = Arg->getArgNo();
6169 if (ArgNo >= FuncInfo.DescribedArgs.size())
6170 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
6171 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
6172 return !NodeMap[V].getNode();
6173 FuncInfo.DescribedArgs.set(ArgNo);
6174 }
6175 }
6176
6177 bool IsIndirect = false;
6178 std::optional<MachineOperand> Op;
6179 // Some arguments' frame index is recorded during argument lowering.
6180 int FI = FuncInfo.getArgumentFrameIndex(Arg);
6181 if (FI != std::numeric_limits<int>::max())
6182 Op = MachineOperand::CreateFI(FI);
6183
6184 SmallVector<std::pair<Register, TypeSize>, 8> ArgRegsAndSizes;
6185 if (!Op && N.getNode()) {
6186 getUnderlyingArgRegs(ArgRegsAndSizes, N);
6187 Register Reg;
6188 if (ArgRegsAndSizes.size() == 1)
6189 Reg = ArgRegsAndSizes.front().first;
6190
6191 if (Reg && Reg.isVirtual()) {
6192 MachineRegisterInfo &RegInfo = MF.getRegInfo();
6193 Register PR = RegInfo.getLiveInPhysReg(Reg);
6194 if (PR)
6195 Reg = PR;
6196 }
6197 if (Reg) {
6198 Op = MachineOperand::CreateReg(Reg, false);
6199 IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6200 }
6201 }
6202
6203 if (!Op && N.getNode()) {
6204 // Check if frame index is available.
6205 SDValue LCandidate = peekThroughBitcasts(N);
6206 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
6207 if (FrameIndexSDNode *FINode =
6208 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6209 Op = MachineOperand::CreateFI(FINode->getIndex());
6210 }
6211
6212 if (!Op) {
6213 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
6214 auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<Register, TypeSize>>
6215 SplitRegs) {
6216 unsigned Offset = 0;
6217 for (const auto &RegAndSize : SplitRegs) {
6218 // If the expression is already a fragment, the current register
6219 // offset+size might extend beyond the fragment. In this case, only
6220 // the register bits that are inside the fragment are relevant.
6221 int RegFragmentSizeInBits = RegAndSize.second;
6222 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
6223 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6224 // The register is entirely outside the expression fragment,
6225 // so is irrelevant for debug info.
6226 if (Offset >= ExprFragmentSizeInBits)
6227 break;
6228 // The register is partially outside the expression fragment, only
6229 // the low bits within the fragment are relevant for debug info.
6230 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6231 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
6232 }
6233 }
6234
6235 auto FragmentExpr = DIExpression::createFragmentExpression(
6236 Expr, Offset, RegFragmentSizeInBits);
6237 Offset += RegAndSize.second;
6238 // If a valid fragment expression cannot be created, the variable's
6239 // correct value cannot be determined and so it is set as poison.
6240 if (!FragmentExpr) {
6241 SDDbgValue *SDV = DAG.getConstantDbgValue(
6242 Variable, Expr, PoisonValue::get(V->getType()), DL, SDNodeOrder);
6243 DAG.AddDbgValue(SDV, false);
6244 continue;
6245 }
6246 MachineInstr *NewMI =
6247 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6248 Kind != FuncArgumentDbgValueKind::Value);
6249 FuncInfo.ArgDbgValues.push_back(NewMI);
6250 }
6251 };
6252
6253 // Check if ValueMap has reg number.
6254 DenseMap<const Value *, Register>::const_iterator
6255 VMI = FuncInfo.ValueMap.find(V);
6256 if (VMI != FuncInfo.ValueMap.end()) {
6257 const auto &TLI = DAG.getTargetLoweringInfo();
6258 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
6259 V->getType(), std::nullopt);
6260 if (RFV.occupiesMultipleRegs()) {
6261 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6262 return true;
6263 }
6264
6265 Op = MachineOperand::CreateReg(VMI->second, false);
6266 IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6267 } else if (ArgRegsAndSizes.size() > 1) {
6268 // This was split due to the calling convention, and no virtual register
6269 // mapping exists for the value.
6270 splitMultiRegDbgValue(ArgRegsAndSizes);
6271 return true;
6272 }
6273 }
6274
6275 if (!Op)
6276 return false;
6277
6278 assert(Variable->isValidLocationForIntrinsic(DL) &&
6279 "Expected inlined-at fields to agree");
6280 MachineInstr *NewMI = nullptr;
6281
6282 if (Op->isReg())
6283 NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
6284 else
6285 NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
6286 Variable, Expr);
6287
6288 // Otherwise, use ArgDbgValues.
6289 FuncInfo.ArgDbgValues.push_back(NewMI);
6290 return true;
6291 }
6292
6293 /// Return the appropriate SDDbgValue based on N.
getDbgValue(SDValue N,DILocalVariable * Variable,DIExpression * Expr,const DebugLoc & dl,unsigned DbgSDNodeOrder)6294 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
6295 DILocalVariable *Variable,
6296 DIExpression *Expr,
6297 const DebugLoc &dl,
6298 unsigned DbgSDNodeOrder) {
6299 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
6300 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
6301 // stack slot locations.
6302 //
6303 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
6304 // debug values here after optimization:
6305 //
6306 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
6307 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
6308 //
6309 // Both describe the direct values of their associated variables.
6310 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6311 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6312 }
6313 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
6314 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6315 }
6316
FixedPointIntrinsicToOpcode(unsigned Intrinsic)6317 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
6318 switch (Intrinsic) {
6319 case Intrinsic::smul_fix:
6320 return ISD::SMULFIX;
6321 case Intrinsic::umul_fix:
6322 return ISD::UMULFIX;
6323 case Intrinsic::smul_fix_sat:
6324 return ISD::SMULFIXSAT;
6325 case Intrinsic::umul_fix_sat:
6326 return ISD::UMULFIXSAT;
6327 case Intrinsic::sdiv_fix:
6328 return ISD::SDIVFIX;
6329 case Intrinsic::udiv_fix:
6330 return ISD::UDIVFIX;
6331 case Intrinsic::sdiv_fix_sat:
6332 return ISD::SDIVFIXSAT;
6333 case Intrinsic::udiv_fix_sat:
6334 return ISD::UDIVFIXSAT;
6335 default:
6336 llvm_unreachable("Unhandled fixed point intrinsic");
6337 }
6338 }
6339
6340 /// Given a @llvm.call.preallocated.setup, return the corresponding
6341 /// preallocated call.
FindPreallocatedCall(const Value * PreallocatedSetup)6342 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
6343 assert(cast<CallBase>(PreallocatedSetup)
6344 ->getCalledFunction()
6345 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6346 "expected call_preallocated_setup Value");
6347 for (const auto *U : PreallocatedSetup->users()) {
6348 auto *UseCall = cast<CallBase>(U);
6349 const Function *Fn = UseCall->getCalledFunction();
6350 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6351 return UseCall;
6352 }
6353 }
6354 llvm_unreachable("expected corresponding call to preallocated setup/arg");
6355 }
6356
6357 /// If DI is a debug value with an EntryValue expression, lower it using the
6358 /// corresponding physical register of the associated Argument value
6359 /// (guaranteed to exist by the verifier).
visitEntryValueDbgValue(ArrayRef<const Value * > Values,DILocalVariable * Variable,DIExpression * Expr,DebugLoc DbgLoc)6360 bool SelectionDAGBuilder::visitEntryValueDbgValue(
6361 ArrayRef<const Value *> Values, DILocalVariable *Variable,
6362 DIExpression *Expr, DebugLoc DbgLoc) {
6363 if (!Expr->isEntryValue() || !hasSingleElement(Values))
6364 return false;
6365
6366 // These properties are guaranteed by the verifier.
6367 const Argument *Arg = cast<Argument>(Values[0]);
6368 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6369
6370 auto ArgIt = FuncInfo.ValueMap.find(Arg);
6371 if (ArgIt == FuncInfo.ValueMap.end()) {
6372 LLVM_DEBUG(
6373 dbgs() << "Dropping dbg.value: expression is entry_value but "
6374 "couldn't find an associated register for the Argument\n");
6375 return true;
6376 }
6377 Register ArgVReg = ArgIt->getSecond();
6378
6379 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6380 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6381 SDDbgValue *SDV = DAG.getVRegDbgValue(
6382 Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder);
6383 DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6384 return true;
6385 }
6386 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6387 "couldn't find a physical register\n");
6388 return true;
6389 }
6390
6391 /// Lower the call to the specified intrinsic function.
visitConvergenceControl(const CallInst & I,unsigned Intrinsic)6392 void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I,
6393 unsigned Intrinsic) {
6394 SDLoc sdl = getCurSDLoc();
6395 switch (Intrinsic) {
6396 case Intrinsic::experimental_convergence_anchor:
6397 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6398 break;
6399 case Intrinsic::experimental_convergence_entry:
6400 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6401 break;
6402 case Intrinsic::experimental_convergence_loop: {
6403 auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl);
6404 auto *Token = Bundle->Inputs[0].get();
6405 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6406 getValue(Token)));
6407 break;
6408 }
6409 }
6410 }
6411
visitVectorHistogram(const CallInst & I,unsigned IntrinsicID)6412 void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I,
6413 unsigned IntrinsicID) {
6414 // For now, we're only lowering an 'add' histogram.
6415 // We can add others later, e.g. saturating adds, min/max.
6416 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6417 "Tried to lower unsupported histogram type");
6418 SDLoc sdl = getCurSDLoc();
6419 Value *Ptr = I.getOperand(0);
6420 SDValue Inc = getValue(I.getOperand(1));
6421 SDValue Mask = getValue(I.getOperand(2));
6422
6423 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6424 DataLayout TargetDL = DAG.getDataLayout();
6425 EVT VT = Inc.getValueType();
6426 Align Alignment = DAG.getEVTAlign(VT);
6427
6428 const MDNode *Ranges = getRangeMetadata(I);
6429
6430 SDValue Root = DAG.getRoot();
6431 SDValue Base;
6432 SDValue Index;
6433 ISD::MemIndexType IndexType;
6434 SDValue Scale;
6435 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
6436 I.getParent(), VT.getScalarStoreSize());
6437
6438 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
6439
6440 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6441 MachinePointerInfo(AS),
6442 MachineMemOperand::MOLoad | MachineMemOperand::MOStore,
6443 MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
6444
6445 if (!UniformBase) {
6446 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6447 Index = getValue(Ptr);
6448 IndexType = ISD::SIGNED_SCALED;
6449 Scale =
6450 DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6451 }
6452
6453 EVT IdxVT = Index.getValueType();
6454 EVT EltTy = IdxVT.getVectorElementType();
6455 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
6456 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
6457 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
6458 }
6459
6460 SDValue ID = DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6461
6462 SDValue Ops[] = {Root, Inc, Mask, Base, Index, Scale, ID};
6463 SDValue Histogram = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), VT, sdl,
6464 Ops, MMO, IndexType);
6465
6466 setValue(&I, Histogram);
6467 DAG.setRoot(Histogram);
6468 }
6469
visitVectorExtractLastActive(const CallInst & I,unsigned Intrinsic)6470 void SelectionDAGBuilder::visitVectorExtractLastActive(const CallInst &I,
6471 unsigned Intrinsic) {
6472 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6473 "Tried lowering invalid vector extract last");
6474 SDLoc sdl = getCurSDLoc();
6475 const DataLayout &Layout = DAG.getDataLayout();
6476 SDValue Data = getValue(I.getOperand(0));
6477 SDValue Mask = getValue(I.getOperand(1));
6478
6479 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6480 EVT ResVT = TLI.getValueType(Layout, I.getType());
6481
6482 EVT ExtVT = TLI.getVectorIdxTy(Layout);
6483 SDValue Idx = DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6484 SDValue Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl, ResVT, Data, Idx);
6485
6486 Value *Default = I.getOperand(2);
6487 if (!isa<PoisonValue>(Default) && !isa<UndefValue>(Default)) {
6488 SDValue PassThru = getValue(Default);
6489 EVT BoolVT = Mask.getValueType().getScalarType();
6490 SDValue AnyActive = DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6491 Result = DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6492 }
6493
6494 setValue(&I, Result);
6495 }
6496
6497 /// Lower the call to the specified intrinsic function.
visitIntrinsicCall(const CallInst & I,unsigned Intrinsic)6498 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6499 unsigned Intrinsic) {
6500 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6501 SDLoc sdl = getCurSDLoc();
6502 DebugLoc dl = getCurDebugLoc();
6503 SDValue Res;
6504
6505 SDNodeFlags Flags;
6506 if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6507 Flags.copyFMF(*FPOp);
6508
6509 switch (Intrinsic) {
6510 default:
6511 // By default, turn this into a target intrinsic node.
6512 visitTargetIntrinsic(I, Intrinsic);
6513 return;
6514 case Intrinsic::vscale: {
6515 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6516 setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6517 return;
6518 }
6519 case Intrinsic::vastart: visitVAStart(I); return;
6520 case Intrinsic::vaend: visitVAEnd(I); return;
6521 case Intrinsic::vacopy: visitVACopy(I); return;
6522 case Intrinsic::returnaddress:
6523 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6524 TLI.getValueType(DAG.getDataLayout(), I.getType()),
6525 getValue(I.getArgOperand(0))));
6526 return;
6527 case Intrinsic::addressofreturnaddress:
6528 setValue(&I,
6529 DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6530 TLI.getValueType(DAG.getDataLayout(), I.getType())));
6531 return;
6532 case Intrinsic::sponentry:
6533 setValue(&I,
6534 DAG.getNode(ISD::SPONENTRY, sdl,
6535 TLI.getValueType(DAG.getDataLayout(), I.getType())));
6536 return;
6537 case Intrinsic::frameaddress:
6538 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6539 TLI.getFrameIndexTy(DAG.getDataLayout()),
6540 getValue(I.getArgOperand(0))));
6541 return;
6542 case Intrinsic::read_volatile_register:
6543 case Intrinsic::read_register: {
6544 Value *Reg = I.getArgOperand(0);
6545 SDValue Chain = getRoot();
6546 SDValue RegName =
6547 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6548 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6549 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6550 DAG.getVTList(VT, MVT::Other), Chain, RegName);
6551 setValue(&I, Res);
6552 DAG.setRoot(Res.getValue(1));
6553 return;
6554 }
6555 case Intrinsic::write_register: {
6556 Value *Reg = I.getArgOperand(0);
6557 Value *RegValue = I.getArgOperand(1);
6558 SDValue Chain = getRoot();
6559 SDValue RegName =
6560 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6561 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6562 RegName, getValue(RegValue)));
6563 return;
6564 }
6565 case Intrinsic::memcpy:
6566 case Intrinsic::memcpy_inline: {
6567 const auto &MCI = cast<MemCpyInst>(I);
6568 SDValue Dst = getValue(I.getArgOperand(0));
6569 SDValue Src = getValue(I.getArgOperand(1));
6570 SDValue Size = getValue(I.getArgOperand(2));
6571 assert((!MCI.isForceInlined() || isa<ConstantSDNode>(Size)) &&
6572 "memcpy_inline needs constant size");
6573 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6574 Align DstAlign = MCI.getDestAlign().valueOrOne();
6575 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6576 Align Alignment = std::min(DstAlign, SrcAlign);
6577 bool isVol = MCI.isVolatile();
6578 // FIXME: Support passing different dest/src alignments to the memcpy DAG
6579 // node.
6580 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6581 SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol,
6582 MCI.isForceInlined(), &I, std::nullopt,
6583 MachinePointerInfo(I.getArgOperand(0)),
6584 MachinePointerInfo(I.getArgOperand(1)),
6585 I.getAAMetadata(), BatchAA);
6586 updateDAGForMaybeTailCall(MC);
6587 return;
6588 }
6589 case Intrinsic::memset:
6590 case Intrinsic::memset_inline: {
6591 const auto &MSII = cast<MemSetInst>(I);
6592 SDValue Dst = getValue(I.getArgOperand(0));
6593 SDValue Value = getValue(I.getArgOperand(1));
6594 SDValue Size = getValue(I.getArgOperand(2));
6595 assert((!MSII.isForceInlined() || isa<ConstantSDNode>(Size)) &&
6596 "memset_inline needs constant size");
6597 // @llvm.memset defines 0 and 1 to both mean no alignment.
6598 Align DstAlign = MSII.getDestAlign().valueOrOne();
6599 bool isVol = MSII.isVolatile();
6600 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6601 SDValue MC = DAG.getMemset(
6602 Root, sdl, Dst, Value, Size, DstAlign, isVol, MSII.isForceInlined(),
6603 &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6604 updateDAGForMaybeTailCall(MC);
6605 return;
6606 }
6607 case Intrinsic::memmove: {
6608 const auto &MMI = cast<MemMoveInst>(I);
6609 SDValue Op1 = getValue(I.getArgOperand(0));
6610 SDValue Op2 = getValue(I.getArgOperand(1));
6611 SDValue Op3 = getValue(I.getArgOperand(2));
6612 // @llvm.memmove defines 0 and 1 to both mean no alignment.
6613 Align DstAlign = MMI.getDestAlign().valueOrOne();
6614 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6615 Align Alignment = std::min(DstAlign, SrcAlign);
6616 bool isVol = MMI.isVolatile();
6617 // FIXME: Support passing different dest/src alignments to the memmove DAG
6618 // node.
6619 SDValue Root = isVol ? getRoot() : getMemoryRoot();
6620 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &I,
6621 /* OverrideTailCall */ std::nullopt,
6622 MachinePointerInfo(I.getArgOperand(0)),
6623 MachinePointerInfo(I.getArgOperand(1)),
6624 I.getAAMetadata(), BatchAA);
6625 updateDAGForMaybeTailCall(MM);
6626 return;
6627 }
6628 case Intrinsic::memcpy_element_unordered_atomic: {
6629 auto &MI = cast<AnyMemCpyInst>(I);
6630 SDValue Dst = getValue(MI.getRawDest());
6631 SDValue Src = getValue(MI.getRawSource());
6632 SDValue Length = getValue(MI.getLength());
6633
6634 Type *LengthTy = MI.getLength()->getType();
6635 unsigned ElemSz = MI.getElementSizeInBytes();
6636 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6637 SDValue MC =
6638 DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6639 isTC, MachinePointerInfo(MI.getRawDest()),
6640 MachinePointerInfo(MI.getRawSource()));
6641 updateDAGForMaybeTailCall(MC);
6642 return;
6643 }
6644 case Intrinsic::memmove_element_unordered_atomic: {
6645 auto &MI = cast<AnyMemMoveInst>(I);
6646 SDValue Dst = getValue(MI.getRawDest());
6647 SDValue Src = getValue(MI.getRawSource());
6648 SDValue Length = getValue(MI.getLength());
6649
6650 Type *LengthTy = MI.getLength()->getType();
6651 unsigned ElemSz = MI.getElementSizeInBytes();
6652 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6653 SDValue MC =
6654 DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6655 isTC, MachinePointerInfo(MI.getRawDest()),
6656 MachinePointerInfo(MI.getRawSource()));
6657 updateDAGForMaybeTailCall(MC);
6658 return;
6659 }
6660 case Intrinsic::memset_element_unordered_atomic: {
6661 auto &MI = cast<AnyMemSetInst>(I);
6662 SDValue Dst = getValue(MI.getRawDest());
6663 SDValue Val = getValue(MI.getValue());
6664 SDValue Length = getValue(MI.getLength());
6665
6666 Type *LengthTy = MI.getLength()->getType();
6667 unsigned ElemSz = MI.getElementSizeInBytes();
6668 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6669 SDValue MC =
6670 DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6671 isTC, MachinePointerInfo(MI.getRawDest()));
6672 updateDAGForMaybeTailCall(MC);
6673 return;
6674 }
6675 case Intrinsic::call_preallocated_setup: {
6676 const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6677 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6678 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6679 getRoot(), SrcValue);
6680 setValue(&I, Res);
6681 DAG.setRoot(Res);
6682 return;
6683 }
6684 case Intrinsic::call_preallocated_arg: {
6685 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6686 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6687 SDValue Ops[3];
6688 Ops[0] = getRoot();
6689 Ops[1] = SrcValue;
6690 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6691 MVT::i32); // arg index
6692 SDValue Res = DAG.getNode(
6693 ISD::PREALLOCATED_ARG, sdl,
6694 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6695 setValue(&I, Res);
6696 DAG.setRoot(Res.getValue(1));
6697 return;
6698 }
6699
6700 case Intrinsic::eh_typeid_for: {
6701 // Find the type id for the given typeinfo.
6702 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6703 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6704 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6705 setValue(&I, Res);
6706 return;
6707 }
6708
6709 case Intrinsic::eh_return_i32:
6710 case Intrinsic::eh_return_i64:
6711 DAG.getMachineFunction().setCallsEHReturn(true);
6712 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6713 MVT::Other,
6714 getControlRoot(),
6715 getValue(I.getArgOperand(0)),
6716 getValue(I.getArgOperand(1))));
6717 return;
6718 case Intrinsic::eh_unwind_init:
6719 DAG.getMachineFunction().setCallsUnwindInit(true);
6720 return;
6721 case Intrinsic::eh_dwarf_cfa:
6722 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6723 TLI.getPointerTy(DAG.getDataLayout()),
6724 getValue(I.getArgOperand(0))));
6725 return;
6726 case Intrinsic::eh_sjlj_callsite: {
6727 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6728 assert(FuncInfo.getCurrentCallSite() == 0 && "Overlapping call sites!");
6729
6730 FuncInfo.setCurrentCallSite(CI->getZExtValue());
6731 return;
6732 }
6733 case Intrinsic::eh_sjlj_functioncontext: {
6734 // Get and store the index of the function context.
6735 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6736 AllocaInst *FnCtx =
6737 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6738 int FI = FuncInfo.StaticAllocaMap[FnCtx];
6739 MFI.setFunctionContextIndex(FI);
6740 return;
6741 }
6742 case Intrinsic::eh_sjlj_setjmp: {
6743 SDValue Ops[2];
6744 Ops[0] = getRoot();
6745 Ops[1] = getValue(I.getArgOperand(0));
6746 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6747 DAG.getVTList(MVT::i32, MVT::Other), Ops);
6748 setValue(&I, Op.getValue(0));
6749 DAG.setRoot(Op.getValue(1));
6750 return;
6751 }
6752 case Intrinsic::eh_sjlj_longjmp:
6753 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6754 getRoot(), getValue(I.getArgOperand(0))));
6755 return;
6756 case Intrinsic::eh_sjlj_setup_dispatch:
6757 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6758 getRoot()));
6759 return;
6760 case Intrinsic::masked_gather:
6761 visitMaskedGather(I);
6762 return;
6763 case Intrinsic::masked_load:
6764 visitMaskedLoad(I);
6765 return;
6766 case Intrinsic::masked_scatter:
6767 visitMaskedScatter(I);
6768 return;
6769 case Intrinsic::masked_store:
6770 visitMaskedStore(I);
6771 return;
6772 case Intrinsic::masked_expandload:
6773 visitMaskedLoad(I, true /* IsExpanding */);
6774 return;
6775 case Intrinsic::masked_compressstore:
6776 visitMaskedStore(I, true /* IsCompressing */);
6777 return;
6778 case Intrinsic::powi:
6779 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6780 getValue(I.getArgOperand(1)), DAG));
6781 return;
6782 case Intrinsic::log:
6783 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6784 return;
6785 case Intrinsic::log2:
6786 setValue(&I,
6787 expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6788 return;
6789 case Intrinsic::log10:
6790 setValue(&I,
6791 expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6792 return;
6793 case Intrinsic::exp:
6794 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6795 return;
6796 case Intrinsic::exp2:
6797 setValue(&I,
6798 expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6799 return;
6800 case Intrinsic::pow:
6801 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6802 getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6803 return;
6804 case Intrinsic::sqrt:
6805 case Intrinsic::fabs:
6806 case Intrinsic::sin:
6807 case Intrinsic::cos:
6808 case Intrinsic::tan:
6809 case Intrinsic::asin:
6810 case Intrinsic::acos:
6811 case Intrinsic::atan:
6812 case Intrinsic::sinh:
6813 case Intrinsic::cosh:
6814 case Intrinsic::tanh:
6815 case Intrinsic::exp10:
6816 case Intrinsic::floor:
6817 case Intrinsic::ceil:
6818 case Intrinsic::trunc:
6819 case Intrinsic::rint:
6820 case Intrinsic::nearbyint:
6821 case Intrinsic::round:
6822 case Intrinsic::roundeven:
6823 case Intrinsic::canonicalize: {
6824 unsigned Opcode;
6825 // clang-format off
6826 switch (Intrinsic) {
6827 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6828 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
6829 case Intrinsic::fabs: Opcode = ISD::FABS; break;
6830 case Intrinsic::sin: Opcode = ISD::FSIN; break;
6831 case Intrinsic::cos: Opcode = ISD::FCOS; break;
6832 case Intrinsic::tan: Opcode = ISD::FTAN; break;
6833 case Intrinsic::asin: Opcode = ISD::FASIN; break;
6834 case Intrinsic::acos: Opcode = ISD::FACOS; break;
6835 case Intrinsic::atan: Opcode = ISD::FATAN; break;
6836 case Intrinsic::sinh: Opcode = ISD::FSINH; break;
6837 case Intrinsic::cosh: Opcode = ISD::FCOSH; break;
6838 case Intrinsic::tanh: Opcode = ISD::FTANH; break;
6839 case Intrinsic::exp10: Opcode = ISD::FEXP10; break;
6840 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
6841 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
6842 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
6843 case Intrinsic::rint: Opcode = ISD::FRINT; break;
6844 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6845 case Intrinsic::round: Opcode = ISD::FROUND; break;
6846 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6847 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6848 }
6849 // clang-format on
6850
6851 setValue(&I, DAG.getNode(Opcode, sdl,
6852 getValue(I.getArgOperand(0)).getValueType(),
6853 getValue(I.getArgOperand(0)), Flags));
6854 return;
6855 }
6856 case Intrinsic::atan2:
6857 setValue(&I, DAG.getNode(ISD::FATAN2, sdl,
6858 getValue(I.getArgOperand(0)).getValueType(),
6859 getValue(I.getArgOperand(0)),
6860 getValue(I.getArgOperand(1)), Flags));
6861 return;
6862 case Intrinsic::lround:
6863 case Intrinsic::llround:
6864 case Intrinsic::lrint:
6865 case Intrinsic::llrint: {
6866 unsigned Opcode;
6867 // clang-format off
6868 switch (Intrinsic) {
6869 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6870 case Intrinsic::lround: Opcode = ISD::LROUND; break;
6871 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6872 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
6873 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
6874 }
6875 // clang-format on
6876
6877 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6878 setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6879 getValue(I.getArgOperand(0))));
6880 return;
6881 }
6882 case Intrinsic::minnum:
6883 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6884 getValue(I.getArgOperand(0)).getValueType(),
6885 getValue(I.getArgOperand(0)),
6886 getValue(I.getArgOperand(1)), Flags));
6887 return;
6888 case Intrinsic::maxnum:
6889 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6890 getValue(I.getArgOperand(0)).getValueType(),
6891 getValue(I.getArgOperand(0)),
6892 getValue(I.getArgOperand(1)), Flags));
6893 return;
6894 case Intrinsic::minimum:
6895 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6896 getValue(I.getArgOperand(0)).getValueType(),
6897 getValue(I.getArgOperand(0)),
6898 getValue(I.getArgOperand(1)), Flags));
6899 return;
6900 case Intrinsic::maximum:
6901 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6902 getValue(I.getArgOperand(0)).getValueType(),
6903 getValue(I.getArgOperand(0)),
6904 getValue(I.getArgOperand(1)), Flags));
6905 return;
6906 case Intrinsic::minimumnum:
6907 setValue(&I, DAG.getNode(ISD::FMINIMUMNUM, sdl,
6908 getValue(I.getArgOperand(0)).getValueType(),
6909 getValue(I.getArgOperand(0)),
6910 getValue(I.getArgOperand(1)), Flags));
6911 return;
6912 case Intrinsic::maximumnum:
6913 setValue(&I, DAG.getNode(ISD::FMAXIMUMNUM, sdl,
6914 getValue(I.getArgOperand(0)).getValueType(),
6915 getValue(I.getArgOperand(0)),
6916 getValue(I.getArgOperand(1)), Flags));
6917 return;
6918 case Intrinsic::copysign:
6919 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6920 getValue(I.getArgOperand(0)).getValueType(),
6921 getValue(I.getArgOperand(0)),
6922 getValue(I.getArgOperand(1)), Flags));
6923 return;
6924 case Intrinsic::ldexp:
6925 setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6926 getValue(I.getArgOperand(0)).getValueType(),
6927 getValue(I.getArgOperand(0)),
6928 getValue(I.getArgOperand(1)), Flags));
6929 return;
6930 case Intrinsic::modf:
6931 case Intrinsic::sincos:
6932 case Intrinsic::sincospi:
6933 case Intrinsic::frexp: {
6934 unsigned Opcode;
6935 switch (Intrinsic) {
6936 default:
6937 llvm_unreachable("unexpected intrinsic");
6938 case Intrinsic::sincos:
6939 Opcode = ISD::FSINCOS;
6940 break;
6941 case Intrinsic::sincospi:
6942 Opcode = ISD::FSINCOSPI;
6943 break;
6944 case Intrinsic::modf:
6945 Opcode = ISD::FMODF;
6946 break;
6947 case Intrinsic::frexp:
6948 Opcode = ISD::FFREXP;
6949 break;
6950 }
6951 SmallVector<EVT, 2> ValueVTs;
6952 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6953 SDVTList VTs = DAG.getVTList(ValueVTs);
6954 setValue(
6955 &I, DAG.getNode(Opcode, sdl, VTs, getValue(I.getArgOperand(0)), Flags));
6956 return;
6957 }
6958 case Intrinsic::arithmetic_fence: {
6959 setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6960 getValue(I.getArgOperand(0)).getValueType(),
6961 getValue(I.getArgOperand(0)), Flags));
6962 return;
6963 }
6964 case Intrinsic::fma:
6965 setValue(&I, DAG.getNode(
6966 ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6967 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6968 getValue(I.getArgOperand(2)), Flags));
6969 return;
6970 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6971 case Intrinsic::INTRINSIC:
6972 #include "llvm/IR/ConstrainedOps.def"
6973 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6974 return;
6975 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6976 #include "llvm/IR/VPIntrinsics.def"
6977 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6978 return;
6979 case Intrinsic::fptrunc_round: {
6980 // Get the last argument, the metadata and convert it to an integer in the
6981 // call
6982 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6983 std::optional<RoundingMode> RoundMode =
6984 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6985
6986 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6987
6988 // Propagate fast-math-flags from IR to node(s).
6989 SDNodeFlags Flags;
6990 Flags.copyFMF(*cast<FPMathOperator>(&I));
6991 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
6992
6993 SDValue Result;
6994 Result = DAG.getNode(
6995 ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
6996 DAG.getTargetConstant((int)*RoundMode, sdl, MVT::i32));
6997 setValue(&I, Result);
6998
6999 return;
7000 }
7001 case Intrinsic::fmuladd: {
7002 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7003 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
7004 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7005 setValue(&I, DAG.getNode(ISD::FMA, sdl,
7006 getValue(I.getArgOperand(0)).getValueType(),
7007 getValue(I.getArgOperand(0)),
7008 getValue(I.getArgOperand(1)),
7009 getValue(I.getArgOperand(2)), Flags));
7010 } else {
7011 // TODO: Intrinsic calls should have fast-math-flags.
7012 SDValue Mul = DAG.getNode(
7013 ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
7014 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
7015 SDValue Add = DAG.getNode(ISD::FADD, sdl,
7016 getValue(I.getArgOperand(0)).getValueType(),
7017 Mul, getValue(I.getArgOperand(2)), Flags);
7018 setValue(&I, Add);
7019 }
7020 return;
7021 }
7022 case Intrinsic::convert_to_fp16:
7023 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
7024 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
7025 getValue(I.getArgOperand(0)),
7026 DAG.getTargetConstant(0, sdl,
7027 MVT::i32))));
7028 return;
7029 case Intrinsic::convert_from_fp16:
7030 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
7031 TLI.getValueType(DAG.getDataLayout(), I.getType()),
7032 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7033 getValue(I.getArgOperand(0)))));
7034 return;
7035 case Intrinsic::fptosi_sat: {
7036 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7037 setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
7038 getValue(I.getArgOperand(0)),
7039 DAG.getValueType(VT.getScalarType())));
7040 return;
7041 }
7042 case Intrinsic::fptoui_sat: {
7043 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7044 setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
7045 getValue(I.getArgOperand(0)),
7046 DAG.getValueType(VT.getScalarType())));
7047 return;
7048 }
7049 case Intrinsic::set_rounding:
7050 Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7051 {getRoot(), getValue(I.getArgOperand(0))});
7052 setValue(&I, Res);
7053 DAG.setRoot(Res.getValue(0));
7054 return;
7055 case Intrinsic::is_fpclass: {
7056 const DataLayout DLayout = DAG.getDataLayout();
7057 EVT DestVT = TLI.getValueType(DLayout, I.getType());
7058 EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
7059 FPClassTest Test = static_cast<FPClassTest>(
7060 cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
7061 MachineFunction &MF = DAG.getMachineFunction();
7062 const Function &F = MF.getFunction();
7063 SDValue Op = getValue(I.getArgOperand(0));
7064 SDNodeFlags Flags;
7065 Flags.setNoFPExcept(
7066 !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7067 // If ISD::IS_FPCLASS should be expanded, do it right now, because the
7068 // expansion can use illegal types. Making expansion early allows
7069 // legalizing these types prior to selection.
7070 if (!TLI.isOperationLegal(ISD::IS_FPCLASS, ArgVT) &&
7071 !TLI.isOperationCustom(ISD::IS_FPCLASS, ArgVT)) {
7072 SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
7073 setValue(&I, Result);
7074 return;
7075 }
7076
7077 SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
7078 SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
7079 setValue(&I, V);
7080 return;
7081 }
7082 case Intrinsic::get_fpenv: {
7083 const DataLayout DLayout = DAG.getDataLayout();
7084 EVT EnvVT = TLI.getValueType(DLayout, I.getType());
7085 Align TempAlign = DAG.getEVTAlign(EnvVT);
7086 SDValue Chain = getRoot();
7087 // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
7088 // and temporary storage in stack.
7089 if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
7090 Res = DAG.getNode(
7091 ISD::GET_FPENV, sdl,
7092 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7093 MVT::Other),
7094 Chain);
7095 } else {
7096 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7097 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7098 auto MPI =
7099 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7100 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7101 MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
7102 TempAlign);
7103 Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7104 Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7105 }
7106 setValue(&I, Res);
7107 DAG.setRoot(Res.getValue(1));
7108 return;
7109 }
7110 case Intrinsic::set_fpenv: {
7111 const DataLayout DLayout = DAG.getDataLayout();
7112 SDValue Env = getValue(I.getArgOperand(0));
7113 EVT EnvVT = Env.getValueType();
7114 Align TempAlign = DAG.getEVTAlign(EnvVT);
7115 SDValue Chain = getRoot();
7116 // If SET_FPENV is custom or legal, use it. Otherwise use loading
7117 // environment from memory.
7118 if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
7119 Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7120 } else {
7121 // Allocate space in stack, copy environment bits into it and use this
7122 // memory in SET_FPENV_MEM.
7123 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7124 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7125 auto MPI =
7126 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7127 Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7128 MachineMemOperand::MOStore);
7129 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7130 MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
7131 TempAlign);
7132 Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7133 }
7134 DAG.setRoot(Chain);
7135 return;
7136 }
7137 case Intrinsic::reset_fpenv:
7138 DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
7139 return;
7140 case Intrinsic::get_fpmode:
7141 Res = DAG.getNode(
7142 ISD::GET_FPMODE, sdl,
7143 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7144 MVT::Other),
7145 DAG.getRoot());
7146 setValue(&I, Res);
7147 DAG.setRoot(Res.getValue(1));
7148 return;
7149 case Intrinsic::set_fpmode:
7150 Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
7151 getValue(I.getArgOperand(0)));
7152 DAG.setRoot(Res);
7153 return;
7154 case Intrinsic::reset_fpmode: {
7155 Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
7156 DAG.setRoot(Res);
7157 return;
7158 }
7159 case Intrinsic::pcmarker: {
7160 SDValue Tmp = getValue(I.getArgOperand(0));
7161 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
7162 return;
7163 }
7164 case Intrinsic::readcyclecounter: {
7165 SDValue Op = getRoot();
7166 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7167 DAG.getVTList(MVT::i64, MVT::Other), Op);
7168 setValue(&I, Res);
7169 DAG.setRoot(Res.getValue(1));
7170 return;
7171 }
7172 case Intrinsic::readsteadycounter: {
7173 SDValue Op = getRoot();
7174 Res = DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7175 DAG.getVTList(MVT::i64, MVT::Other), Op);
7176 setValue(&I, Res);
7177 DAG.setRoot(Res.getValue(1));
7178 return;
7179 }
7180 case Intrinsic::bitreverse:
7181 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
7182 getValue(I.getArgOperand(0)).getValueType(),
7183 getValue(I.getArgOperand(0))));
7184 return;
7185 case Intrinsic::bswap:
7186 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
7187 getValue(I.getArgOperand(0)).getValueType(),
7188 getValue(I.getArgOperand(0))));
7189 return;
7190 case Intrinsic::cttz: {
7191 SDValue Arg = getValue(I.getArgOperand(0));
7192 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7193 EVT Ty = Arg.getValueType();
7194 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
7195 sdl, Ty, Arg));
7196 return;
7197 }
7198 case Intrinsic::ctlz: {
7199 SDValue Arg = getValue(I.getArgOperand(0));
7200 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7201 EVT Ty = Arg.getValueType();
7202 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
7203 sdl, Ty, Arg));
7204 return;
7205 }
7206 case Intrinsic::ctpop: {
7207 SDValue Arg = getValue(I.getArgOperand(0));
7208 EVT Ty = Arg.getValueType();
7209 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
7210 return;
7211 }
7212 case Intrinsic::fshl:
7213 case Intrinsic::fshr: {
7214 bool IsFSHL = Intrinsic == Intrinsic::fshl;
7215 SDValue X = getValue(I.getArgOperand(0));
7216 SDValue Y = getValue(I.getArgOperand(1));
7217 SDValue Z = getValue(I.getArgOperand(2));
7218 EVT VT = X.getValueType();
7219
7220 if (X == Y) {
7221 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
7222 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
7223 } else {
7224 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
7225 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
7226 }
7227 return;
7228 }
7229 case Intrinsic::sadd_sat: {
7230 SDValue Op1 = getValue(I.getArgOperand(0));
7231 SDValue Op2 = getValue(I.getArgOperand(1));
7232 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7233 return;
7234 }
7235 case Intrinsic::uadd_sat: {
7236 SDValue Op1 = getValue(I.getArgOperand(0));
7237 SDValue Op2 = getValue(I.getArgOperand(1));
7238 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7239 return;
7240 }
7241 case Intrinsic::ssub_sat: {
7242 SDValue Op1 = getValue(I.getArgOperand(0));
7243 SDValue Op2 = getValue(I.getArgOperand(1));
7244 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7245 return;
7246 }
7247 case Intrinsic::usub_sat: {
7248 SDValue Op1 = getValue(I.getArgOperand(0));
7249 SDValue Op2 = getValue(I.getArgOperand(1));
7250 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7251 return;
7252 }
7253 case Intrinsic::sshl_sat: {
7254 SDValue Op1 = getValue(I.getArgOperand(0));
7255 SDValue Op2 = getValue(I.getArgOperand(1));
7256 setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7257 return;
7258 }
7259 case Intrinsic::ushl_sat: {
7260 SDValue Op1 = getValue(I.getArgOperand(0));
7261 SDValue Op2 = getValue(I.getArgOperand(1));
7262 setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7263 return;
7264 }
7265 case Intrinsic::smul_fix:
7266 case Intrinsic::umul_fix:
7267 case Intrinsic::smul_fix_sat:
7268 case Intrinsic::umul_fix_sat: {
7269 SDValue Op1 = getValue(I.getArgOperand(0));
7270 SDValue Op2 = getValue(I.getArgOperand(1));
7271 SDValue Op3 = getValue(I.getArgOperand(2));
7272 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
7273 Op1.getValueType(), Op1, Op2, Op3));
7274 return;
7275 }
7276 case Intrinsic::sdiv_fix:
7277 case Intrinsic::udiv_fix:
7278 case Intrinsic::sdiv_fix_sat:
7279 case Intrinsic::udiv_fix_sat: {
7280 SDValue Op1 = getValue(I.getArgOperand(0));
7281 SDValue Op2 = getValue(I.getArgOperand(1));
7282 SDValue Op3 = getValue(I.getArgOperand(2));
7283 setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
7284 Op1, Op2, Op3, DAG, TLI));
7285 return;
7286 }
7287 case Intrinsic::smax: {
7288 SDValue Op1 = getValue(I.getArgOperand(0));
7289 SDValue Op2 = getValue(I.getArgOperand(1));
7290 setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
7291 return;
7292 }
7293 case Intrinsic::smin: {
7294 SDValue Op1 = getValue(I.getArgOperand(0));
7295 SDValue Op2 = getValue(I.getArgOperand(1));
7296 setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
7297 return;
7298 }
7299 case Intrinsic::umax: {
7300 SDValue Op1 = getValue(I.getArgOperand(0));
7301 SDValue Op2 = getValue(I.getArgOperand(1));
7302 setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
7303 return;
7304 }
7305 case Intrinsic::umin: {
7306 SDValue Op1 = getValue(I.getArgOperand(0));
7307 SDValue Op2 = getValue(I.getArgOperand(1));
7308 setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
7309 return;
7310 }
7311 case Intrinsic::abs: {
7312 // TODO: Preserve "int min is poison" arg in SDAG?
7313 SDValue Op1 = getValue(I.getArgOperand(0));
7314 setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
7315 return;
7316 }
7317 case Intrinsic::scmp: {
7318 SDValue Op1 = getValue(I.getArgOperand(0));
7319 SDValue Op2 = getValue(I.getArgOperand(1));
7320 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7321 setValue(&I, DAG.getNode(ISD::SCMP, sdl, DestVT, Op1, Op2));
7322 break;
7323 }
7324 case Intrinsic::ucmp: {
7325 SDValue Op1 = getValue(I.getArgOperand(0));
7326 SDValue Op2 = getValue(I.getArgOperand(1));
7327 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7328 setValue(&I, DAG.getNode(ISD::UCMP, sdl, DestVT, Op1, Op2));
7329 break;
7330 }
7331 case Intrinsic::stacksave: {
7332 SDValue Op = getRoot();
7333 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7334 Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
7335 setValue(&I, Res);
7336 DAG.setRoot(Res.getValue(1));
7337 return;
7338 }
7339 case Intrinsic::stackrestore:
7340 Res = getValue(I.getArgOperand(0));
7341 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
7342 return;
7343 case Intrinsic::get_dynamic_area_offset: {
7344 SDValue Op = getRoot();
7345 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7346 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
7347 Op);
7348 DAG.setRoot(Op);
7349 setValue(&I, Res);
7350 return;
7351 }
7352 case Intrinsic::stackguard: {
7353 MachineFunction &MF = DAG.getMachineFunction();
7354 const Module &M = *MF.getFunction().getParent();
7355 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7356 SDValue Chain = getRoot();
7357 if (TLI.useLoadStackGuardNode(M)) {
7358 Res = getLoadStackGuard(DAG, sdl, Chain);
7359 Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7360 } else {
7361 const Value *Global = TLI.getSDagStackGuard(M);
7362 Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
7363 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
7364 MachinePointerInfo(Global, 0), Align,
7365 MachineMemOperand::MOVolatile);
7366 }
7367 if (TLI.useStackGuardXorFP())
7368 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
7369 DAG.setRoot(Chain);
7370 setValue(&I, Res);
7371 return;
7372 }
7373 case Intrinsic::stackprotector: {
7374 // Emit code into the DAG to store the stack guard onto the stack.
7375 MachineFunction &MF = DAG.getMachineFunction();
7376 MachineFrameInfo &MFI = MF.getFrameInfo();
7377 const Module &M = *MF.getFunction().getParent();
7378 SDValue Src, Chain = getRoot();
7379
7380 if (TLI.useLoadStackGuardNode(M))
7381 Src = getLoadStackGuard(DAG, sdl, Chain);
7382 else
7383 Src = getValue(I.getArgOperand(0)); // The guard's value.
7384
7385 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
7386
7387 int FI = FuncInfo.StaticAllocaMap[Slot];
7388 MFI.setStackProtectorIndex(FI);
7389 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7390
7391 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
7392
7393 // Store the stack protector onto the stack.
7394 Res = DAG.getStore(
7395 Chain, sdl, Src, FIN,
7396 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
7397 MaybeAlign(), MachineMemOperand::MOVolatile);
7398 setValue(&I, Res);
7399 DAG.setRoot(Res);
7400 return;
7401 }
7402 case Intrinsic::objectsize:
7403 llvm_unreachable("llvm.objectsize.* should have been lowered already");
7404
7405 case Intrinsic::is_constant:
7406 llvm_unreachable("llvm.is.constant.* should have been lowered already");
7407
7408 case Intrinsic::annotation:
7409 case Intrinsic::ptr_annotation:
7410 case Intrinsic::launder_invariant_group:
7411 case Intrinsic::strip_invariant_group:
7412 // Drop the intrinsic, but forward the value
7413 setValue(&I, getValue(I.getOperand(0)));
7414 return;
7415
7416 case Intrinsic::type_test:
7417 case Intrinsic::public_type_test:
7418 setValue(&I, getValue(ConstantInt::getTrue(I.getType())));
7419 return;
7420
7421 case Intrinsic::assume:
7422 case Intrinsic::experimental_noalias_scope_decl:
7423 case Intrinsic::var_annotation:
7424 case Intrinsic::sideeffect:
7425 // Discard annotate attributes, noalias scope declarations, assumptions, and
7426 // artificial side-effects.
7427 return;
7428
7429 case Intrinsic::codeview_annotation: {
7430 // Emit a label associated with this metadata.
7431 MachineFunction &MF = DAG.getMachineFunction();
7432 MCSymbol *Label = MF.getContext().createTempSymbol("annotation", true);
7433 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7434 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7435 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7436 DAG.setRoot(Res);
7437 return;
7438 }
7439
7440 case Intrinsic::init_trampoline: {
7441 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7442
7443 SDValue Ops[6];
7444 Ops[0] = getRoot();
7445 Ops[1] = getValue(I.getArgOperand(0));
7446 Ops[2] = getValue(I.getArgOperand(1));
7447 Ops[3] = getValue(I.getArgOperand(2));
7448 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7449 Ops[5] = DAG.getSrcValue(F);
7450
7451 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7452
7453 DAG.setRoot(Res);
7454 return;
7455 }
7456 case Intrinsic::adjust_trampoline:
7457 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7458 TLI.getPointerTy(DAG.getDataLayout()),
7459 getValue(I.getArgOperand(0))));
7460 return;
7461 case Intrinsic::gcroot: {
7462 assert(DAG.getMachineFunction().getFunction().hasGC() &&
7463 "only valid in functions with gc specified, enforced by Verifier");
7464 assert(GFI && "implied by previous");
7465 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7466 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7467
7468 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7469 GFI->addStackRoot(FI->getIndex(), TypeMap);
7470 return;
7471 }
7472 case Intrinsic::gcread:
7473 case Intrinsic::gcwrite:
7474 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7475 case Intrinsic::get_rounding:
7476 Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7477 setValue(&I, Res);
7478 DAG.setRoot(Res.getValue(1));
7479 return;
7480
7481 case Intrinsic::expect:
7482 case Intrinsic::expect_with_probability:
7483 // Just replace __builtin_expect(exp, c) and
7484 // __builtin_expect_with_probability(exp, c, p) with EXP.
7485 setValue(&I, getValue(I.getArgOperand(0)));
7486 return;
7487
7488 case Intrinsic::ubsantrap:
7489 case Intrinsic::debugtrap:
7490 case Intrinsic::trap: {
7491 StringRef TrapFuncName =
7492 I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7493 if (TrapFuncName.empty()) {
7494 switch (Intrinsic) {
7495 case Intrinsic::trap:
7496 DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7497 break;
7498 case Intrinsic::debugtrap:
7499 DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7500 break;
7501 case Intrinsic::ubsantrap:
7502 DAG.setRoot(DAG.getNode(
7503 ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7504 DAG.getTargetConstant(
7505 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7506 MVT::i32)));
7507 break;
7508 default: llvm_unreachable("unknown trap intrinsic");
7509 }
7510 DAG.addNoMergeSiteInfo(DAG.getRoot().getNode(),
7511 I.hasFnAttr(Attribute::NoMerge));
7512 return;
7513 }
7514 TargetLowering::ArgListTy Args;
7515 if (Intrinsic == Intrinsic::ubsantrap) {
7516 Args.push_back(TargetLoweringBase::ArgListEntry());
7517 Args[0].Val = I.getArgOperand(0);
7518 Args[0].Node = getValue(Args[0].Val);
7519 Args[0].Ty = Args[0].Val->getType();
7520 }
7521
7522 TargetLowering::CallLoweringInfo CLI(DAG);
7523 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7524 CallingConv::C, I.getType(),
7525 DAG.getExternalSymbol(TrapFuncName.data(),
7526 TLI.getPointerTy(DAG.getDataLayout())),
7527 std::move(Args));
7528 CLI.NoMerge = I.hasFnAttr(Attribute::NoMerge);
7529 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7530 DAG.setRoot(Result.second);
7531 return;
7532 }
7533
7534 case Intrinsic::allow_runtime_check:
7535 case Intrinsic::allow_ubsan_check:
7536 setValue(&I, getValue(ConstantInt::getTrue(I.getType())));
7537 return;
7538
7539 case Intrinsic::uadd_with_overflow:
7540 case Intrinsic::sadd_with_overflow:
7541 case Intrinsic::usub_with_overflow:
7542 case Intrinsic::ssub_with_overflow:
7543 case Intrinsic::umul_with_overflow:
7544 case Intrinsic::smul_with_overflow: {
7545 ISD::NodeType Op;
7546 switch (Intrinsic) {
7547 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
7548 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7549 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7550 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7551 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7552 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7553 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7554 }
7555 SDValue Op1 = getValue(I.getArgOperand(0));
7556 SDValue Op2 = getValue(I.getArgOperand(1));
7557
7558 EVT ResultVT = Op1.getValueType();
7559 EVT OverflowVT = MVT::i1;
7560 if (ResultVT.isVector())
7561 OverflowVT = EVT::getVectorVT(
7562 *Context, OverflowVT, ResultVT.getVectorElementCount());
7563
7564 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7565 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7566 return;
7567 }
7568 case Intrinsic::prefetch: {
7569 SDValue Ops[5];
7570 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7571 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7572 Ops[0] = DAG.getRoot();
7573 Ops[1] = getValue(I.getArgOperand(0));
7574 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7575 MVT::i32);
7576 Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7577 MVT::i32);
7578 Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7579 MVT::i32);
7580 SDValue Result = DAG.getMemIntrinsicNode(
7581 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7582 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7583 /* align */ std::nullopt, Flags);
7584
7585 // Chain the prefetch in parallel with any pending loads, to stay out of
7586 // the way of later optimizations.
7587 PendingLoads.push_back(Result);
7588 Result = getRoot();
7589 DAG.setRoot(Result);
7590 return;
7591 }
7592 case Intrinsic::lifetime_start:
7593 case Intrinsic::lifetime_end: {
7594 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7595 // Stack coloring is not enabled in O0, discard region information.
7596 if (TM.getOptLevel() == CodeGenOptLevel::None)
7597 return;
7598
7599 const int64_t ObjectSize =
7600 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7601 Value *const ObjectPtr = I.getArgOperand(1);
7602 SmallVector<const Value *, 4> Allocas;
7603 getUnderlyingObjects(ObjectPtr, Allocas);
7604
7605 for (const Value *Alloca : Allocas) {
7606 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7607
7608 // Could not find an Alloca.
7609 if (!LifetimeObject)
7610 continue;
7611
7612 // First check that the Alloca is static, otherwise it won't have a
7613 // valid frame index.
7614 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7615 if (SI == FuncInfo.StaticAllocaMap.end())
7616 return;
7617
7618 const int FrameIndex = SI->second;
7619 int64_t Offset;
7620 if (GetPointerBaseWithConstantOffset(
7621 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7622 Offset = -1; // Cannot determine offset from alloca to lifetime object.
7623 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7624 Offset);
7625 DAG.setRoot(Res);
7626 }
7627 return;
7628 }
7629 case Intrinsic::pseudoprobe: {
7630 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7631 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7632 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7633 Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7634 DAG.setRoot(Res);
7635 return;
7636 }
7637 case Intrinsic::invariant_start:
7638 // Discard region information.
7639 setValue(&I,
7640 DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7641 return;
7642 case Intrinsic::invariant_end:
7643 // Discard region information.
7644 return;
7645 case Intrinsic::clear_cache: {
7646 SDValue InputChain = DAG.getRoot();
7647 SDValue StartVal = getValue(I.getArgOperand(0));
7648 SDValue EndVal = getValue(I.getArgOperand(1));
7649 Res = DAG.getNode(ISD::CLEAR_CACHE, sdl, DAG.getVTList(MVT::Other),
7650 {InputChain, StartVal, EndVal});
7651 setValue(&I, Res);
7652 DAG.setRoot(Res);
7653 return;
7654 }
7655 case Intrinsic::donothing:
7656 case Intrinsic::seh_try_begin:
7657 case Intrinsic::seh_scope_begin:
7658 case Intrinsic::seh_try_end:
7659 case Intrinsic::seh_scope_end:
7660 // ignore
7661 return;
7662 case Intrinsic::experimental_stackmap:
7663 visitStackmap(I);
7664 return;
7665 case Intrinsic::experimental_patchpoint_void:
7666 case Intrinsic::experimental_patchpoint:
7667 visitPatchpoint(I);
7668 return;
7669 case Intrinsic::experimental_gc_statepoint:
7670 LowerStatepoint(cast<GCStatepointInst>(I));
7671 return;
7672 case Intrinsic::experimental_gc_result:
7673 visitGCResult(cast<GCResultInst>(I));
7674 return;
7675 case Intrinsic::experimental_gc_relocate:
7676 visitGCRelocate(cast<GCRelocateInst>(I));
7677 return;
7678 case Intrinsic::instrprof_cover:
7679 llvm_unreachable("instrprof failed to lower a cover");
7680 case Intrinsic::instrprof_increment:
7681 llvm_unreachable("instrprof failed to lower an increment");
7682 case Intrinsic::instrprof_timestamp:
7683 llvm_unreachable("instrprof failed to lower a timestamp");
7684 case Intrinsic::instrprof_value_profile:
7685 llvm_unreachable("instrprof failed to lower a value profiling call");
7686 case Intrinsic::instrprof_mcdc_parameters:
7687 llvm_unreachable("instrprof failed to lower mcdc parameters");
7688 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7689 llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7690 case Intrinsic::localescape: {
7691 MachineFunction &MF = DAG.getMachineFunction();
7692 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7693
7694 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7695 // is the same on all targets.
7696 for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7697 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7698 if (isa<ConstantPointerNull>(Arg))
7699 continue; // Skip null pointers. They represent a hole in index space.
7700 AllocaInst *Slot = cast<AllocaInst>(Arg);
7701 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7702 "can only escape static allocas");
7703 int FI = FuncInfo.StaticAllocaMap[Slot];
7704 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7705 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7706 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7707 TII->get(TargetOpcode::LOCAL_ESCAPE))
7708 .addSym(FrameAllocSym)
7709 .addFrameIndex(FI);
7710 }
7711
7712 return;
7713 }
7714
7715 case Intrinsic::localrecover: {
7716 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7717 MachineFunction &MF = DAG.getMachineFunction();
7718
7719 // Get the symbol that defines the frame offset.
7720 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7721 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7722 unsigned IdxVal =
7723 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7724 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7725 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7726
7727 Value *FP = I.getArgOperand(1);
7728 SDValue FPVal = getValue(FP);
7729 EVT PtrVT = FPVal.getValueType();
7730
7731 // Create a MCSymbol for the label to avoid any target lowering
7732 // that would make this PC relative.
7733 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7734 SDValue OffsetVal =
7735 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7736
7737 // Add the offset to the FP.
7738 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7739 setValue(&I, Add);
7740
7741 return;
7742 }
7743
7744 case Intrinsic::fake_use: {
7745 Value *V = I.getArgOperand(0);
7746 SDValue Ops[2];
7747 // For Values not declared or previously used in this basic block, the
7748 // NodeMap will not have an entry, and `getValue` will assert if V has no
7749 // valid register value.
7750 auto FakeUseValue = [&]() -> SDValue {
7751 SDValue &N = NodeMap[V];
7752 if (N.getNode())
7753 return N;
7754
7755 // If there's a virtual register allocated and initialized for this
7756 // value, use it.
7757 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
7758 return copyFromReg;
7759 // FIXME: Do we want to preserve constants? It seems pointless.
7760 if (isa<Constant>(V))
7761 return getValue(V);
7762 return SDValue();
7763 }();
7764 if (!FakeUseValue || FakeUseValue.isUndef())
7765 return;
7766 Ops[0] = getRoot();
7767 Ops[1] = FakeUseValue;
7768 // Also, do not translate a fake use with an undef operand, or any other
7769 // empty SDValues.
7770 if (!Ops[1] || Ops[1].isUndef())
7771 return;
7772 DAG.setRoot(DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other, Ops));
7773 return;
7774 }
7775
7776 case Intrinsic::eh_exceptionpointer:
7777 case Intrinsic::eh_exceptioncode: {
7778 // Get the exception pointer vreg, copy from it, and resize it to fit.
7779 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7780 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7781 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7782 Register VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7783 SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7784 if (Intrinsic == Intrinsic::eh_exceptioncode)
7785 N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7786 setValue(&I, N);
7787 return;
7788 }
7789 case Intrinsic::xray_customevent: {
7790 // Here we want to make sure that the intrinsic behaves as if it has a
7791 // specific calling convention.
7792 const auto &Triple = DAG.getTarget().getTargetTriple();
7793 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7794 return;
7795
7796 SmallVector<SDValue, 8> Ops;
7797
7798 // We want to say that we always want the arguments in registers.
7799 SDValue LogEntryVal = getValue(I.getArgOperand(0));
7800 SDValue StrSizeVal = getValue(I.getArgOperand(1));
7801 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7802 SDValue Chain = getRoot();
7803 Ops.push_back(LogEntryVal);
7804 Ops.push_back(StrSizeVal);
7805 Ops.push_back(Chain);
7806
7807 // We need to enforce the calling convention for the callsite, so that
7808 // argument ordering is enforced correctly, and that register allocation can
7809 // see that some registers may be assumed clobbered and have to preserve
7810 // them across calls to the intrinsic.
7811 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7812 sdl, NodeTys, Ops);
7813 SDValue patchableNode = SDValue(MN, 0);
7814 DAG.setRoot(patchableNode);
7815 setValue(&I, patchableNode);
7816 return;
7817 }
7818 case Intrinsic::xray_typedevent: {
7819 // Here we want to make sure that the intrinsic behaves as if it has a
7820 // specific calling convention.
7821 const auto &Triple = DAG.getTarget().getTargetTriple();
7822 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7823 return;
7824
7825 SmallVector<SDValue, 8> Ops;
7826
7827 // We want to say that we always want the arguments in registers.
7828 // It's unclear to me how manipulating the selection DAG here forces callers
7829 // to provide arguments in registers instead of on the stack.
7830 SDValue LogTypeId = getValue(I.getArgOperand(0));
7831 SDValue LogEntryVal = getValue(I.getArgOperand(1));
7832 SDValue StrSizeVal = getValue(I.getArgOperand(2));
7833 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7834 SDValue Chain = getRoot();
7835 Ops.push_back(LogTypeId);
7836 Ops.push_back(LogEntryVal);
7837 Ops.push_back(StrSizeVal);
7838 Ops.push_back(Chain);
7839
7840 // We need to enforce the calling convention for the callsite, so that
7841 // argument ordering is enforced correctly, and that register allocation can
7842 // see that some registers may be assumed clobbered and have to preserve
7843 // them across calls to the intrinsic.
7844 MachineSDNode *MN = DAG.getMachineNode(
7845 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7846 SDValue patchableNode = SDValue(MN, 0);
7847 DAG.setRoot(patchableNode);
7848 setValue(&I, patchableNode);
7849 return;
7850 }
7851 case Intrinsic::experimental_deoptimize:
7852 LowerDeoptimizeCall(&I);
7853 return;
7854 case Intrinsic::stepvector:
7855 visitStepVector(I);
7856 return;
7857 case Intrinsic::vector_reduce_fadd:
7858 case Intrinsic::vector_reduce_fmul:
7859 case Intrinsic::vector_reduce_add:
7860 case Intrinsic::vector_reduce_mul:
7861 case Intrinsic::vector_reduce_and:
7862 case Intrinsic::vector_reduce_or:
7863 case Intrinsic::vector_reduce_xor:
7864 case Intrinsic::vector_reduce_smax:
7865 case Intrinsic::vector_reduce_smin:
7866 case Intrinsic::vector_reduce_umax:
7867 case Intrinsic::vector_reduce_umin:
7868 case Intrinsic::vector_reduce_fmax:
7869 case Intrinsic::vector_reduce_fmin:
7870 case Intrinsic::vector_reduce_fmaximum:
7871 case Intrinsic::vector_reduce_fminimum:
7872 visitVectorReduce(I, Intrinsic);
7873 return;
7874
7875 case Intrinsic::icall_branch_funnel: {
7876 SmallVector<SDValue, 16> Ops;
7877 Ops.push_back(getValue(I.getArgOperand(0)));
7878
7879 int64_t Offset;
7880 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7881 I.getArgOperand(1), Offset, DAG.getDataLayout()));
7882 if (!Base)
7883 report_fatal_error(
7884 "llvm.icall.branch.funnel operand must be a GlobalValue");
7885 Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7886
7887 struct BranchFunnelTarget {
7888 int64_t Offset;
7889 SDValue Target;
7890 };
7891 SmallVector<BranchFunnelTarget, 8> Targets;
7892
7893 for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7894 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7895 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7896 if (ElemBase != Base)
7897 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7898 "to the same GlobalValue");
7899
7900 SDValue Val = getValue(I.getArgOperand(Op + 1));
7901 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7902 if (!GA)
7903 report_fatal_error(
7904 "llvm.icall.branch.funnel operand must be a GlobalValue");
7905 Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7906 GA->getGlobal(), sdl, Val.getValueType(),
7907 GA->getOffset())});
7908 }
7909 llvm::sort(Targets,
7910 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7911 return T1.Offset < T2.Offset;
7912 });
7913
7914 for (auto &T : Targets) {
7915 Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7916 Ops.push_back(T.Target);
7917 }
7918
7919 Ops.push_back(DAG.getRoot()); // Chain
7920 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7921 MVT::Other, Ops),
7922 0);
7923 DAG.setRoot(N);
7924 setValue(&I, N);
7925 HasTailCall = true;
7926 return;
7927 }
7928
7929 case Intrinsic::wasm_landingpad_index:
7930 // Information this intrinsic contained has been transferred to
7931 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7932 // delete it now.
7933 return;
7934
7935 case Intrinsic::aarch64_settag:
7936 case Intrinsic::aarch64_settag_zero: {
7937 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7938 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7939 SDValue Val = TSI.EmitTargetCodeForSetTag(
7940 DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7941 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7942 ZeroMemory);
7943 DAG.setRoot(Val);
7944 setValue(&I, Val);
7945 return;
7946 }
7947 case Intrinsic::amdgcn_cs_chain: {
7948 // At this point we don't care if it's amdgpu_cs_chain or
7949 // amdgpu_cs_chain_preserve.
7950 CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
7951
7952 Type *RetTy = I.getType();
7953 assert(RetTy->isVoidTy() && "Should not return");
7954
7955 SDValue Callee = getValue(I.getOperand(0));
7956
7957 // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
7958 // We'll also tack the value of the EXEC mask at the end.
7959 TargetLowering::ArgListTy Args;
7960 Args.reserve(3);
7961
7962 for (unsigned Idx : {2, 3, 1}) {
7963 TargetLowering::ArgListEntry Arg;
7964 Arg.Node = getValue(I.getOperand(Idx));
7965 Arg.Ty = I.getOperand(Idx)->getType();
7966 Arg.setAttributes(&I, Idx);
7967 Args.push_back(Arg);
7968 }
7969
7970 assert(Args[0].IsInReg && "SGPR args should be marked inreg");
7971 assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
7972 Args[2].IsInReg = true; // EXEC should be inreg
7973
7974 // Forward the flags and any additional arguments.
7975 for (unsigned Idx = 4; Idx < I.arg_size(); ++Idx) {
7976 TargetLowering::ArgListEntry Arg;
7977 Arg.Node = getValue(I.getOperand(Idx));
7978 Arg.Ty = I.getOperand(Idx)->getType();
7979 Arg.setAttributes(&I, Idx);
7980 Args.push_back(Arg);
7981 }
7982
7983 TargetLowering::CallLoweringInfo CLI(DAG);
7984 CLI.setDebugLoc(getCurSDLoc())
7985 .setChain(getRoot())
7986 .setCallee(CC, RetTy, Callee, std::move(Args))
7987 .setNoReturn(true)
7988 .setTailCall(true)
7989 .setConvergent(I.isConvergent());
7990 CLI.CB = &I;
7991 std::pair<SDValue, SDValue> Result =
7992 lowerInvokable(CLI, /*EHPadBB*/ nullptr);
7993 (void)Result;
7994 assert(!Result.first.getNode() && !Result.second.getNode() &&
7995 "Should've lowered as tail call");
7996
7997 HasTailCall = true;
7998 return;
7999 }
8000 case Intrinsic::ptrmask: {
8001 SDValue Ptr = getValue(I.getOperand(0));
8002 SDValue Mask = getValue(I.getOperand(1));
8003
8004 // On arm64_32, pointers are 32 bits when stored in memory, but
8005 // zero-extended to 64 bits when in registers. Thus the mask is 32 bits to
8006 // match the index type, but the pointer is 64 bits, so the mask must be
8007 // zero-extended up to 64 bits to match the pointer.
8008 EVT PtrVT =
8009 TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8010 EVT MemVT =
8011 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8012 assert(PtrVT == Ptr.getValueType());
8013 if (Mask.getValueType().getFixedSizeInBits() < MemVT.getFixedSizeInBits()) {
8014 // For AMDGPU buffer descriptors the mask is 48 bits, but the pointer is
8015 // 128-bit, so we have to pad the mask with ones for unused bits.
8016 auto HighOnes = DAG.getNode(
8017 ISD::SHL, sdl, PtrVT, DAG.getAllOnesConstant(sdl, PtrVT),
8018 DAG.getShiftAmountConstant(Mask.getValueType().getFixedSizeInBits(),
8019 PtrVT, sdl));
8020 Mask = DAG.getNode(ISD::OR, sdl, PtrVT,
8021 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8022 } else if (Mask.getValueType() != PtrVT)
8023 Mask = DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8024
8025 assert(Mask.getValueType() == PtrVT);
8026 setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
8027 return;
8028 }
8029 case Intrinsic::threadlocal_address: {
8030 setValue(&I, getValue(I.getOperand(0)));
8031 return;
8032 }
8033 case Intrinsic::get_active_lane_mask: {
8034 EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8035 SDValue Index = getValue(I.getOperand(0));
8036 SDValue TripCount = getValue(I.getOperand(1));
8037 EVT ElementVT = Index.getValueType();
8038
8039 if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
8040 setValue(&I, DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8041 TripCount));
8042 return;
8043 }
8044
8045 EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
8046 CCVT.getVectorElementCount());
8047
8048 SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
8049 SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
8050 SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
8051 SDValue VectorInduction = DAG.getNode(
8052 ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
8053 SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
8054 VectorTripCount, ISD::CondCode::SETULT);
8055 setValue(&I, SetCC);
8056 return;
8057 }
8058 case Intrinsic::experimental_get_vector_length: {
8059 assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
8060 "Expected positive VF");
8061 unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
8062 bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
8063
8064 SDValue Count = getValue(I.getOperand(0));
8065 EVT CountVT = Count.getValueType();
8066
8067 if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
8068 visitTargetIntrinsic(I, Intrinsic);
8069 return;
8070 }
8071
8072 // Expand to a umin between the trip count and the maximum elements the type
8073 // can hold.
8074 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8075
8076 // Extend the trip count to at least the result VT.
8077 if (CountVT.bitsLT(VT)) {
8078 Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
8079 CountVT = VT;
8080 }
8081
8082 SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
8083 ElementCount::get(VF, IsScalable));
8084
8085 SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
8086 // Clip to the result type if needed.
8087 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
8088
8089 setValue(&I, Trunc);
8090 return;
8091 }
8092 case Intrinsic::experimental_vector_partial_reduce_add: {
8093 if (!TLI.shouldExpandPartialReductionIntrinsic(cast<IntrinsicInst>(&I))) {
8094 visitTargetIntrinsic(I, Intrinsic);
8095 return;
8096 }
8097 SDValue Acc = getValue(I.getOperand(0));
8098 SDValue Input = getValue(I.getOperand(1));
8099 setValue(&I,
8100 DAG.getNode(ISD::PARTIAL_REDUCE_UMLA, sdl, Acc.getValueType(), Acc,
8101 Input, DAG.getConstant(1, sdl, Input.getValueType())));
8102 return;
8103 }
8104 case Intrinsic::experimental_cttz_elts: {
8105 auto DL = getCurSDLoc();
8106 SDValue Op = getValue(I.getOperand(0));
8107 EVT OpVT = Op.getValueType();
8108
8109 if (!TLI.shouldExpandCttzElements(OpVT)) {
8110 visitTargetIntrinsic(I, Intrinsic);
8111 return;
8112 }
8113
8114 if (OpVT.getScalarType() != MVT::i1) {
8115 // Compare the input vector elements to zero & use to count trailing zeros
8116 SDValue AllZero = DAG.getConstant(0, DL, OpVT);
8117 OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
8118 OpVT.getVectorElementCount());
8119 Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
8120 }
8121
8122 // If the zero-is-poison flag is set, we can assume the upper limit
8123 // of the result is VF-1.
8124 bool ZeroIsPoison =
8125 !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero();
8126 ConstantRange VScaleRange(1, true); // Dummy value.
8127 if (isa<ScalableVectorType>(I.getOperand(0)->getType()))
8128 VScaleRange = getVScaleRange(I.getCaller(), 64);
8129 unsigned EltWidth = TLI.getBitWidthForCttzElements(
8130 I.getType(), OpVT.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
8131
8132 MVT NewEltTy = MVT::getIntegerVT(EltWidth);
8133
8134 // Create the new vector type & get the vector length
8135 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
8136 OpVT.getVectorElementCount());
8137
8138 SDValue VL =
8139 DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
8140
8141 SDValue StepVec = DAG.getStepVector(DL, NewVT);
8142 SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
8143 SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
8144 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op);
8145 SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
8146 SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And);
8147 SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
8148
8149 EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
8150 SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy);
8151
8152 setValue(&I, Ret);
8153 return;
8154 }
8155 case Intrinsic::vector_insert: {
8156 SDValue Vec = getValue(I.getOperand(0));
8157 SDValue SubVec = getValue(I.getOperand(1));
8158 SDValue Index = getValue(I.getOperand(2));
8159
8160 // The intrinsic's index type is i64, but the SDNode requires an index type
8161 // suitable for the target. Convert the index as required.
8162 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8163 if (Index.getValueType() != VectorIdxTy)
8164 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8165
8166 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8167 setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
8168 Index));
8169 return;
8170 }
8171 case Intrinsic::vector_extract: {
8172 SDValue Vec = getValue(I.getOperand(0));
8173 SDValue Index = getValue(I.getOperand(1));
8174 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8175
8176 // The intrinsic's index type is i64, but the SDNode requires an index type
8177 // suitable for the target. Convert the index as required.
8178 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8179 if (Index.getValueType() != VectorIdxTy)
8180 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8181
8182 setValue(&I,
8183 DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
8184 return;
8185 }
8186 case Intrinsic::experimental_vector_match: {
8187 SDValue Op1 = getValue(I.getOperand(0));
8188 SDValue Op2 = getValue(I.getOperand(1));
8189 SDValue Mask = getValue(I.getOperand(2));
8190 EVT Op1VT = Op1.getValueType();
8191 EVT Op2VT = Op2.getValueType();
8192 EVT ResVT = Mask.getValueType();
8193 unsigned SearchSize = Op2VT.getVectorNumElements();
8194
8195 // If the target has native support for this vector match operation, lower
8196 // the intrinsic untouched; otherwise, expand it below.
8197 if (!TLI.shouldExpandVectorMatch(Op1VT, SearchSize)) {
8198 visitTargetIntrinsic(I, Intrinsic);
8199 return;
8200 }
8201
8202 SDValue Ret = DAG.getConstant(0, sdl, ResVT);
8203
8204 for (unsigned i = 0; i < SearchSize; ++i) {
8205 SDValue Op2Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl,
8206 Op2VT.getVectorElementType(), Op2,
8207 DAG.getVectorIdxConstant(i, sdl));
8208 SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, sdl, Op1VT, Op2Elem);
8209 SDValue Cmp = DAG.getSetCC(sdl, ResVT, Op1, Splat, ISD::SETEQ);
8210 Ret = DAG.getNode(ISD::OR, sdl, ResVT, Ret, Cmp);
8211 }
8212
8213 setValue(&I, DAG.getNode(ISD::AND, sdl, ResVT, Ret, Mask));
8214 return;
8215 }
8216 case Intrinsic::vector_reverse:
8217 visitVectorReverse(I);
8218 return;
8219 case Intrinsic::vector_splice:
8220 visitVectorSplice(I);
8221 return;
8222 case Intrinsic::callbr_landingpad:
8223 visitCallBrLandingPad(I);
8224 return;
8225 case Intrinsic::vector_interleave2:
8226 visitVectorInterleave(I, 2);
8227 return;
8228 case Intrinsic::vector_interleave3:
8229 visitVectorInterleave(I, 3);
8230 return;
8231 case Intrinsic::vector_interleave4:
8232 visitVectorInterleave(I, 4);
8233 return;
8234 case Intrinsic::vector_interleave5:
8235 visitVectorInterleave(I, 5);
8236 return;
8237 case Intrinsic::vector_interleave6:
8238 visitVectorInterleave(I, 6);
8239 return;
8240 case Intrinsic::vector_interleave7:
8241 visitVectorInterleave(I, 7);
8242 return;
8243 case Intrinsic::vector_interleave8:
8244 visitVectorInterleave(I, 8);
8245 return;
8246 case Intrinsic::vector_deinterleave2:
8247 visitVectorDeinterleave(I, 2);
8248 return;
8249 case Intrinsic::vector_deinterleave3:
8250 visitVectorDeinterleave(I, 3);
8251 return;
8252 case Intrinsic::vector_deinterleave4:
8253 visitVectorDeinterleave(I, 4);
8254 return;
8255 case Intrinsic::vector_deinterleave5:
8256 visitVectorDeinterleave(I, 5);
8257 return;
8258 case Intrinsic::vector_deinterleave6:
8259 visitVectorDeinterleave(I, 6);
8260 return;
8261 case Intrinsic::vector_deinterleave7:
8262 visitVectorDeinterleave(I, 7);
8263 return;
8264 case Intrinsic::vector_deinterleave8:
8265 visitVectorDeinterleave(I, 8);
8266 return;
8267 case Intrinsic::experimental_vector_compress:
8268 setValue(&I, DAG.getNode(ISD::VECTOR_COMPRESS, sdl,
8269 getValue(I.getArgOperand(0)).getValueType(),
8270 getValue(I.getArgOperand(0)),
8271 getValue(I.getArgOperand(1)),
8272 getValue(I.getArgOperand(2)), Flags));
8273 return;
8274 case Intrinsic::experimental_convergence_anchor:
8275 case Intrinsic::experimental_convergence_entry:
8276 case Intrinsic::experimental_convergence_loop:
8277 visitConvergenceControl(I, Intrinsic);
8278 return;
8279 case Intrinsic::experimental_vector_histogram_add: {
8280 visitVectorHistogram(I, Intrinsic);
8281 return;
8282 }
8283 case Intrinsic::experimental_vector_extract_last_active: {
8284 visitVectorExtractLastActive(I, Intrinsic);
8285 return;
8286 }
8287 }
8288 }
8289
visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic & FPI)8290 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8291 const ConstrainedFPIntrinsic &FPI) {
8292 SDLoc sdl = getCurSDLoc();
8293
8294 // We do not need to serialize constrained FP intrinsics against
8295 // each other or against (nonvolatile) loads, so they can be
8296 // chained like loads.
8297 SDValue Chain = DAG.getRoot();
8298 SmallVector<SDValue, 4> Opers;
8299 Opers.push_back(Chain);
8300 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
8301 Opers.push_back(getValue(FPI.getArgOperand(I)));
8302
8303 auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
8304 assert(Result.getNode()->getNumValues() == 2);
8305
8306 // Push node to the appropriate list so that future instructions can be
8307 // chained up correctly.
8308 SDValue OutChain = Result.getValue(1);
8309 switch (EB) {
8310 case fp::ExceptionBehavior::ebIgnore:
8311 // The only reason why ebIgnore nodes still need to be chained is that
8312 // they might depend on the current rounding mode, and therefore must
8313 // not be moved across instruction that may change that mode.
8314 [[fallthrough]];
8315 case fp::ExceptionBehavior::ebMayTrap:
8316 // These must not be moved across calls or instructions that may change
8317 // floating-point exception masks.
8318 PendingConstrainedFP.push_back(OutChain);
8319 break;
8320 case fp::ExceptionBehavior::ebStrict:
8321 // These must not be moved across calls or instructions that may change
8322 // floating-point exception masks or read floating-point exception flags.
8323 // In addition, they cannot be optimized out even if unused.
8324 PendingConstrainedFPStrict.push_back(OutChain);
8325 break;
8326 }
8327 };
8328
8329 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8330 EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
8331 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
8332 fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
8333
8334 SDNodeFlags Flags;
8335 if (EB == fp::ExceptionBehavior::ebIgnore)
8336 Flags.setNoFPExcept(true);
8337
8338 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8339 Flags.copyFMF(*FPOp);
8340
8341 unsigned Opcode;
8342 switch (FPI.getIntrinsicID()) {
8343 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
8344 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8345 case Intrinsic::INTRINSIC: \
8346 Opcode = ISD::STRICT_##DAGN; \
8347 break;
8348 #include "llvm/IR/ConstrainedOps.def"
8349 case Intrinsic::experimental_constrained_fmuladd: {
8350 Opcode = ISD::STRICT_FMA;
8351 // Break fmuladd into fmul and fadd.
8352 if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
8353 !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
8354 Opers.pop_back();
8355 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
8356 pushOutChain(Mul, EB);
8357 Opcode = ISD::STRICT_FADD;
8358 Opers.clear();
8359 Opers.push_back(Mul.getValue(1));
8360 Opers.push_back(Mul.getValue(0));
8361 Opers.push_back(getValue(FPI.getArgOperand(2)));
8362 }
8363 break;
8364 }
8365 }
8366
8367 // A few strict DAG nodes carry additional operands that are not
8368 // set up by the default code above.
8369 switch (Opcode) {
8370 default: break;
8371 case ISD::STRICT_FP_ROUND:
8372 Opers.push_back(
8373 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
8374 break;
8375 case ISD::STRICT_FSETCC:
8376 case ISD::STRICT_FSETCCS: {
8377 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8378 ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
8379 if (TM.Options.NoNaNsFPMath)
8380 Condition = getFCmpCodeWithoutNaN(Condition);
8381 Opers.push_back(DAG.getCondCode(Condition));
8382 break;
8383 }
8384 }
8385
8386 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
8387 pushOutChain(Result, EB);
8388
8389 SDValue FPResult = Result.getValue(0);
8390 setValue(&FPI, FPResult);
8391 }
8392
getISDForVPIntrinsic(const VPIntrinsic & VPIntrin)8393 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
8394 std::optional<unsigned> ResOPC;
8395 switch (VPIntrin.getIntrinsicID()) {
8396 case Intrinsic::vp_ctlz: {
8397 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8398 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8399 break;
8400 }
8401 case Intrinsic::vp_cttz: {
8402 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8403 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8404 break;
8405 }
8406 case Intrinsic::vp_cttz_elts: {
8407 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8408 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8409 break;
8410 }
8411 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8412 case Intrinsic::VPID: \
8413 ResOPC = ISD::VPSD; \
8414 break;
8415 #include "llvm/IR/VPIntrinsics.def"
8416 }
8417
8418 if (!ResOPC)
8419 llvm_unreachable(
8420 "Inconsistency: no SDNode available for this VPIntrinsic!");
8421
8422 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8423 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8424 if (VPIntrin.getFastMathFlags().allowReassoc())
8425 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8426 : ISD::VP_REDUCE_FMUL;
8427 }
8428
8429 return *ResOPC;
8430 }
8431
visitVPLoad(const VPIntrinsic & VPIntrin,EVT VT,const SmallVectorImpl<SDValue> & OpValues)8432 void SelectionDAGBuilder::visitVPLoad(
8433 const VPIntrinsic &VPIntrin, EVT VT,
8434 const SmallVectorImpl<SDValue> &OpValues) {
8435 SDLoc DL = getCurSDLoc();
8436 Value *PtrOperand = VPIntrin.getArgOperand(0);
8437 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8438 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8439 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8440 SDValue LD;
8441 // Do not serialize variable-length loads of constant memory with
8442 // anything.
8443 if (!Alignment)
8444 Alignment = DAG.getEVTAlign(VT);
8445 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8446 bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(ML);
8447 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8448 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8449 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
8450 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8451 LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8452 MMO, false /*IsExpanding */);
8453 if (AddToChain)
8454 PendingLoads.push_back(LD.getValue(1));
8455 setValue(&VPIntrin, LD);
8456 }
8457
visitVPGather(const VPIntrinsic & VPIntrin,EVT VT,const SmallVectorImpl<SDValue> & OpValues)8458 void SelectionDAGBuilder::visitVPGather(
8459 const VPIntrinsic &VPIntrin, EVT VT,
8460 const SmallVectorImpl<SDValue> &OpValues) {
8461 SDLoc DL = getCurSDLoc();
8462 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8463 Value *PtrOperand = VPIntrin.getArgOperand(0);
8464 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8465 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8466 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8467 SDValue LD;
8468 if (!Alignment)
8469 Alignment = DAG.getEVTAlign(VT.getScalarType());
8470 unsigned AS =
8471 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8472 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8473 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
8474 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8475 SDValue Base, Index, Scale;
8476 ISD::MemIndexType IndexType;
8477 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8478 this, VPIntrin.getParent(),
8479 VT.getScalarStoreSize());
8480 if (!UniformBase) {
8481 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8482 Index = getValue(PtrOperand);
8483 IndexType = ISD::SIGNED_SCALED;
8484 Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8485 }
8486 EVT IdxVT = Index.getValueType();
8487 EVT EltTy = IdxVT.getVectorElementType();
8488 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8489 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8490 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8491 }
8492 LD = DAG.getGatherVP(
8493 DAG.getVTList(VT, MVT::Other), VT, DL,
8494 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8495 IndexType);
8496 PendingLoads.push_back(LD.getValue(1));
8497 setValue(&VPIntrin, LD);
8498 }
8499
visitVPStore(const VPIntrinsic & VPIntrin,const SmallVectorImpl<SDValue> & OpValues)8500 void SelectionDAGBuilder::visitVPStore(
8501 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8502 SDLoc DL = getCurSDLoc();
8503 Value *PtrOperand = VPIntrin.getArgOperand(1);
8504 EVT VT = OpValues[0].getValueType();
8505 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8506 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8507 SDValue ST;
8508 if (!Alignment)
8509 Alignment = DAG.getEVTAlign(VT);
8510 SDValue Ptr = OpValues[1];
8511 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
8512 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8513 MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
8514 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8515 ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
8516 OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
8517 /* IsTruncating */ false, /*IsCompressing*/ false);
8518 DAG.setRoot(ST);
8519 setValue(&VPIntrin, ST);
8520 }
8521
visitVPScatter(const VPIntrinsic & VPIntrin,const SmallVectorImpl<SDValue> & OpValues)8522 void SelectionDAGBuilder::visitVPScatter(
8523 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8524 SDLoc DL = getCurSDLoc();
8525 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8526 Value *PtrOperand = VPIntrin.getArgOperand(1);
8527 EVT VT = OpValues[0].getValueType();
8528 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8529 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8530 SDValue ST;
8531 if (!Alignment)
8532 Alignment = DAG.getEVTAlign(VT.getScalarType());
8533 unsigned AS =
8534 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8535 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8536 MachinePointerInfo(AS), MachineMemOperand::MOStore,
8537 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8538 SDValue Base, Index, Scale;
8539 ISD::MemIndexType IndexType;
8540 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8541 this, VPIntrin.getParent(),
8542 VT.getScalarStoreSize());
8543 if (!UniformBase) {
8544 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8545 Index = getValue(PtrOperand);
8546 IndexType = ISD::SIGNED_SCALED;
8547 Scale =
8548 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8549 }
8550 EVT IdxVT = Index.getValueType();
8551 EVT EltTy = IdxVT.getVectorElementType();
8552 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8553 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8554 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8555 }
8556 ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
8557 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8558 OpValues[2], OpValues[3]},
8559 MMO, IndexType);
8560 DAG.setRoot(ST);
8561 setValue(&VPIntrin, ST);
8562 }
8563
visitVPStridedLoad(const VPIntrinsic & VPIntrin,EVT VT,const SmallVectorImpl<SDValue> & OpValues)8564 void SelectionDAGBuilder::visitVPStridedLoad(
8565 const VPIntrinsic &VPIntrin, EVT VT,
8566 const SmallVectorImpl<SDValue> &OpValues) {
8567 SDLoc DL = getCurSDLoc();
8568 Value *PtrOperand = VPIntrin.getArgOperand(0);
8569 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8570 if (!Alignment)
8571 Alignment = DAG.getEVTAlign(VT.getScalarType());
8572 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8573 const MDNode *Ranges = getRangeMetadata(VPIntrin);
8574 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8575 bool AddToChain = !BatchAA || !BatchAA->pointsToConstantMemory(ML);
8576 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8577 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8578 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8579 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
8580 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8581
8582 SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8583 OpValues[2], OpValues[3], MMO,
8584 false /*IsExpanding*/);
8585
8586 if (AddToChain)
8587 PendingLoads.push_back(LD.getValue(1));
8588 setValue(&VPIntrin, LD);
8589 }
8590
visitVPStridedStore(const VPIntrinsic & VPIntrin,const SmallVectorImpl<SDValue> & OpValues)8591 void SelectionDAGBuilder::visitVPStridedStore(
8592 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8593 SDLoc DL = getCurSDLoc();
8594 Value *PtrOperand = VPIntrin.getArgOperand(1);
8595 EVT VT = OpValues[0].getValueType();
8596 MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8597 if (!Alignment)
8598 Alignment = DAG.getEVTAlign(VT.getScalarType());
8599 AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8600 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8601 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8602 MachinePointerInfo(AS), MachineMemOperand::MOStore,
8603 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8604
8605 SDValue ST = DAG.getStridedStoreVP(
8606 getMemoryRoot(), DL, OpValues[0], OpValues[1],
8607 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8608 OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8609 /*IsCompressing*/ false);
8610
8611 DAG.setRoot(ST);
8612 setValue(&VPIntrin, ST);
8613 }
8614
visitVPCmp(const VPCmpIntrinsic & VPIntrin)8615 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8616 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8617 SDLoc DL = getCurSDLoc();
8618
8619 ISD::CondCode Condition;
8620 CmpInst::Predicate CondCode = VPIntrin.getPredicate();
8621 bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8622 if (IsFP) {
8623 // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8624 // flags, but calls that don't return floating-point types can't be
8625 // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8626 Condition = getFCmpCondCode(CondCode);
8627 if (TM.Options.NoNaNsFPMath)
8628 Condition = getFCmpCodeWithoutNaN(Condition);
8629 } else {
8630 Condition = getICmpCondCode(CondCode);
8631 }
8632
8633 SDValue Op1 = getValue(VPIntrin.getOperand(0));
8634 SDValue Op2 = getValue(VPIntrin.getOperand(1));
8635 // #2 is the condition code
8636 SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8637 SDValue EVL = getValue(VPIntrin.getOperand(4));
8638 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8639 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8640 "Unexpected target EVL type");
8641 EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8642
8643 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8644 VPIntrin.getType());
8645 setValue(&VPIntrin,
8646 DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8647 }
8648
visitVectorPredicationIntrinsic(const VPIntrinsic & VPIntrin)8649 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8650 const VPIntrinsic &VPIntrin) {
8651 SDLoc DL = getCurSDLoc();
8652 unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8653
8654 auto IID = VPIntrin.getIntrinsicID();
8655
8656 if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8657 return visitVPCmp(*CmpI);
8658
8659 SmallVector<EVT, 4> ValueVTs;
8660 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8661 ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8662 SDVTList VTs = DAG.getVTList(ValueVTs);
8663
8664 auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8665
8666 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8667 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8668 "Unexpected target EVL type");
8669
8670 // Request operands.
8671 SmallVector<SDValue, 7> OpValues;
8672 for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8673 auto Op = getValue(VPIntrin.getArgOperand(I));
8674 if (I == EVLParamPos)
8675 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8676 OpValues.push_back(Op);
8677 }
8678
8679 switch (Opcode) {
8680 default: {
8681 SDNodeFlags SDFlags;
8682 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8683 SDFlags.copyFMF(*FPMO);
8684 SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8685 setValue(&VPIntrin, Result);
8686 break;
8687 }
8688 case ISD::VP_LOAD:
8689 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8690 break;
8691 case ISD::VP_GATHER:
8692 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8693 break;
8694 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8695 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8696 break;
8697 case ISD::VP_STORE:
8698 visitVPStore(VPIntrin, OpValues);
8699 break;
8700 case ISD::VP_SCATTER:
8701 visitVPScatter(VPIntrin, OpValues);
8702 break;
8703 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8704 visitVPStridedStore(VPIntrin, OpValues);
8705 break;
8706 case ISD::VP_FMULADD: {
8707 assert(OpValues.size() == 5 && "Unexpected number of operands");
8708 SDNodeFlags SDFlags;
8709 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8710 SDFlags.copyFMF(*FPMO);
8711 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8712 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8713 setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8714 } else {
8715 SDValue Mul = DAG.getNode(
8716 ISD::VP_FMUL, DL, VTs,
8717 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8718 SDValue Add =
8719 DAG.getNode(ISD::VP_FADD, DL, VTs,
8720 {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8721 setValue(&VPIntrin, Add);
8722 }
8723 break;
8724 }
8725 case ISD::VP_IS_FPCLASS: {
8726 const DataLayout DLayout = DAG.getDataLayout();
8727 EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8728 auto Constant = OpValues[1]->getAsZExtVal();
8729 SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8730 SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8731 {OpValues[0], Check, OpValues[2], OpValues[3]});
8732 setValue(&VPIntrin, V);
8733 return;
8734 }
8735 case ISD::VP_INTTOPTR: {
8736 SDValue N = OpValues[0];
8737 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8738 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8739 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8740 OpValues[2]);
8741 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8742 OpValues[2]);
8743 setValue(&VPIntrin, N);
8744 break;
8745 }
8746 case ISD::VP_PTRTOINT: {
8747 SDValue N = OpValues[0];
8748 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8749 VPIntrin.getType());
8750 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8751 VPIntrin.getOperand(0)->getType());
8752 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8753 OpValues[2]);
8754 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8755 OpValues[2]);
8756 setValue(&VPIntrin, N);
8757 break;
8758 }
8759 case ISD::VP_ABS:
8760 case ISD::VP_CTLZ:
8761 case ISD::VP_CTLZ_ZERO_UNDEF:
8762 case ISD::VP_CTTZ:
8763 case ISD::VP_CTTZ_ZERO_UNDEF:
8764 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8765 case ISD::VP_CTTZ_ELTS: {
8766 SDValue Result =
8767 DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8768 setValue(&VPIntrin, Result);
8769 break;
8770 }
8771 }
8772 }
8773
lowerStartEH(SDValue Chain,const BasicBlock * EHPadBB,MCSymbol * & BeginLabel)8774 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8775 const BasicBlock *EHPadBB,
8776 MCSymbol *&BeginLabel) {
8777 MachineFunction &MF = DAG.getMachineFunction();
8778
8779 // Insert a label before the invoke call to mark the try range. This can be
8780 // used to detect deletion of the invoke via the MachineModuleInfo.
8781 BeginLabel = MF.getContext().createTempSymbol();
8782
8783 // For SjLj, keep track of which landing pads go with which invokes
8784 // so as to maintain the ordering of pads in the LSDA.
8785 unsigned CallSiteIndex = FuncInfo.getCurrentCallSite();
8786 if (CallSiteIndex) {
8787 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8788 LPadToCallSiteMap[FuncInfo.getMBB(EHPadBB)].push_back(CallSiteIndex);
8789
8790 // Now that the call site is handled, stop tracking it.
8791 FuncInfo.setCurrentCallSite(0);
8792 }
8793
8794 return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8795 }
8796
lowerEndEH(SDValue Chain,const InvokeInst * II,const BasicBlock * EHPadBB,MCSymbol * BeginLabel)8797 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8798 const BasicBlock *EHPadBB,
8799 MCSymbol *BeginLabel) {
8800 assert(BeginLabel && "BeginLabel should've been set");
8801
8802 MachineFunction &MF = DAG.getMachineFunction();
8803
8804 // Insert a label at the end of the invoke call to mark the try range. This
8805 // can be used to detect deletion of the invoke via the MachineModuleInfo.
8806 MCSymbol *EndLabel = MF.getContext().createTempSymbol();
8807 Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8808
8809 // Inform MachineModuleInfo of range.
8810 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8811 // There is a platform (e.g. wasm) that uses funclet style IR but does not
8812 // actually use outlined funclets and their LSDA info style.
8813 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8814 assert(II && "II should've been set");
8815 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8816 EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8817 } else if (!isScopedEHPersonality(Pers)) {
8818 assert(EHPadBB);
8819 MF.addInvoke(FuncInfo.getMBB(EHPadBB), BeginLabel, EndLabel);
8820 }
8821
8822 return Chain;
8823 }
8824
8825 std::pair<SDValue, SDValue>
lowerInvokable(TargetLowering::CallLoweringInfo & CLI,const BasicBlock * EHPadBB)8826 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8827 const BasicBlock *EHPadBB) {
8828 MCSymbol *BeginLabel = nullptr;
8829
8830 if (EHPadBB) {
8831 // Both PendingLoads and PendingExports must be flushed here;
8832 // this call might not return.
8833 (void)getRoot();
8834 DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8835 CLI.setChain(getRoot());
8836 }
8837
8838 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8839 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8840
8841 assert((CLI.IsTailCall || Result.second.getNode()) &&
8842 "Non-null chain expected with non-tail call!");
8843 assert((Result.second.getNode() || !Result.first.getNode()) &&
8844 "Null value expected with tail call!");
8845
8846 if (!Result.second.getNode()) {
8847 // As a special case, a null chain means that a tail call has been emitted
8848 // and the DAG root is already updated.
8849 HasTailCall = true;
8850
8851 // Since there's no actual continuation from this block, nothing can be
8852 // relying on us setting vregs for them.
8853 PendingExports.clear();
8854 } else {
8855 DAG.setRoot(Result.second);
8856 }
8857
8858 if (EHPadBB) {
8859 DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8860 BeginLabel));
8861 Result.second = getRoot();
8862 }
8863
8864 return Result;
8865 }
8866
LowerCallTo(const CallBase & CB,SDValue Callee,bool isTailCall,bool isMustTailCall,const BasicBlock * EHPadBB,const TargetLowering::PtrAuthInfo * PAI)8867 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8868 bool isTailCall, bool isMustTailCall,
8869 const BasicBlock *EHPadBB,
8870 const TargetLowering::PtrAuthInfo *PAI) {
8871 auto &DL = DAG.getDataLayout();
8872 FunctionType *FTy = CB.getFunctionType();
8873 Type *RetTy = CB.getType();
8874
8875 TargetLowering::ArgListTy Args;
8876 Args.reserve(CB.arg_size());
8877
8878 const Value *SwiftErrorVal = nullptr;
8879 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8880
8881 if (isTailCall) {
8882 // Avoid emitting tail calls in functions with the disable-tail-calls
8883 // attribute.
8884 auto *Caller = CB.getParent()->getParent();
8885 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8886 "true" && !isMustTailCall)
8887 isTailCall = false;
8888
8889 // We can't tail call inside a function with a swifterror argument. Lowering
8890 // does not support this yet. It would have to move into the swifterror
8891 // register before the call.
8892 if (TLI.supportSwiftError() &&
8893 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8894 isTailCall = false;
8895 }
8896
8897 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8898 TargetLowering::ArgListEntry Entry;
8899 const Value *V = *I;
8900
8901 // Skip empty types
8902 if (V->getType()->isEmptyTy())
8903 continue;
8904
8905 SDValue ArgNode = getValue(V);
8906 Entry.Node = ArgNode; Entry.Ty = V->getType();
8907
8908 Entry.setAttributes(&CB, I - CB.arg_begin());
8909
8910 // Use swifterror virtual register as input to the call.
8911 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8912 SwiftErrorVal = V;
8913 // We find the virtual register for the actual swifterror argument.
8914 // Instead of using the Value, we use the virtual register instead.
8915 Entry.Node =
8916 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8917 EVT(TLI.getPointerTy(DL)));
8918 }
8919
8920 Args.push_back(Entry);
8921
8922 // If we have an explicit sret argument that is an Instruction, (i.e., it
8923 // might point to function-local memory), we can't meaningfully tail-call.
8924 if (Entry.IsSRet && isa<Instruction>(V))
8925 isTailCall = false;
8926 }
8927
8928 // If call site has a cfguardtarget operand bundle, create and add an
8929 // additional ArgListEntry.
8930 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8931 TargetLowering::ArgListEntry Entry;
8932 Value *V = Bundle->Inputs[0];
8933 SDValue ArgNode = getValue(V);
8934 Entry.Node = ArgNode;
8935 Entry.Ty = V->getType();
8936 Entry.IsCFGuardTarget = true;
8937 Args.push_back(Entry);
8938 }
8939
8940 // Check if target-independent constraints permit a tail call here.
8941 // Target-dependent constraints are checked within TLI->LowerCallTo.
8942 if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8943 isTailCall = false;
8944
8945 // Disable tail calls if there is an swifterror argument. Targets have not
8946 // been updated to support tail calls.
8947 if (TLI.supportSwiftError() && SwiftErrorVal)
8948 isTailCall = false;
8949
8950 ConstantInt *CFIType = nullptr;
8951 if (CB.isIndirectCall()) {
8952 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8953 if (!TLI.supportKCFIBundles())
8954 report_fatal_error(
8955 "Target doesn't support calls with kcfi operand bundles.");
8956 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8957 assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8958 }
8959 }
8960
8961 SDValue ConvControlToken;
8962 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
8963 auto *Token = Bundle->Inputs[0].get();
8964 ConvControlToken = getValue(Token);
8965 }
8966
8967 TargetLowering::CallLoweringInfo CLI(DAG);
8968 CLI.setDebugLoc(getCurSDLoc())
8969 .setChain(getRoot())
8970 .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8971 .setTailCall(isTailCall)
8972 .setConvergent(CB.isConvergent())
8973 .setIsPreallocated(
8974 CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8975 .setCFIType(CFIType)
8976 .setConvergenceControlToken(ConvControlToken);
8977
8978 // Set the pointer authentication info if we have it.
8979 if (PAI) {
8980 if (!TLI.supportPtrAuthBundles())
8981 report_fatal_error(
8982 "This target doesn't support calls with ptrauth operand bundles.");
8983 CLI.setPtrAuth(*PAI);
8984 }
8985
8986 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8987
8988 if (Result.first.getNode()) {
8989 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8990 setValue(&CB, Result.first);
8991 }
8992
8993 // The last element of CLI.InVals has the SDValue for swifterror return.
8994 // Here we copy it to a virtual register and update SwiftErrorMap for
8995 // book-keeping.
8996 if (SwiftErrorVal && TLI.supportSwiftError()) {
8997 // Get the last element of InVals.
8998 SDValue Src = CLI.InVals.back();
8999 Register VReg =
9000 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
9001 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
9002 DAG.setRoot(CopyNode);
9003 }
9004 }
9005
getMemCmpLoad(const Value * PtrVal,MVT LoadVT,SelectionDAGBuilder & Builder)9006 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
9007 SelectionDAGBuilder &Builder) {
9008 // Check to see if this load can be trivially constant folded, e.g. if the
9009 // input is from a string literal.
9010 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
9011 // Cast pointer to the type we really want to load.
9012 Type *LoadTy =
9013 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
9014 if (LoadVT.isVector())
9015 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
9016 if (const Constant *LoadCst =
9017 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
9018 LoadTy, Builder.DAG.getDataLayout()))
9019 return Builder.getValue(LoadCst);
9020 }
9021
9022 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
9023 // still constant memory, the input chain can be the entry node.
9024 SDValue Root;
9025 bool ConstantMemory = false;
9026
9027 // Do not serialize (non-volatile) loads of constant memory with anything.
9028 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9029 Root = Builder.DAG.getEntryNode();
9030 ConstantMemory = true;
9031 } else {
9032 // Do not serialize non-volatile loads against each other.
9033 Root = Builder.DAG.getRoot();
9034 }
9035
9036 SDValue Ptr = Builder.getValue(PtrVal);
9037 SDValue LoadVal =
9038 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9039 MachinePointerInfo(PtrVal), Align(1));
9040
9041 if (!ConstantMemory)
9042 Builder.PendingLoads.push_back(LoadVal.getValue(1));
9043 return LoadVal;
9044 }
9045
9046 /// Record the value for an instruction that produces an integer result,
9047 /// converting the type where necessary.
processIntegerCallValue(const Instruction & I,SDValue Value,bool IsSigned)9048 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
9049 SDValue Value,
9050 bool IsSigned) {
9051 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
9052 I.getType(), true);
9053 Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
9054 setValue(&I, Value);
9055 }
9056
9057 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
9058 /// true and lower it. Otherwise return false, and it will be lowered like a
9059 /// normal call.
9060 /// The caller already checked that \p I calls the appropriate LibFunc with a
9061 /// correct prototype.
visitMemCmpBCmpCall(const CallInst & I)9062 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
9063 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
9064 const Value *Size = I.getArgOperand(2);
9065 const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
9066 if (CSize && CSize->getZExtValue() == 0) {
9067 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
9068 I.getType(), true);
9069 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
9070 return true;
9071 }
9072
9073 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9074 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
9075 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
9076 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
9077 if (Res.first.getNode()) {
9078 processIntegerCallValue(I, Res.first, true);
9079 PendingLoads.push_back(Res.second);
9080 return true;
9081 }
9082
9083 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
9084 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
9085 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
9086 return false;
9087
9088 // If the target has a fast compare for the given size, it will return a
9089 // preferred load type for that size. Require that the load VT is legal and
9090 // that the target supports unaligned loads of that type. Otherwise, return
9091 // INVALID.
9092 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
9093 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9094 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
9095 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
9096 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
9097 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
9098 // TODO: Check alignment of src and dest ptrs.
9099 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
9100 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
9101 if (!TLI.isTypeLegal(LVT) ||
9102 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
9103 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
9104 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
9105 }
9106
9107 return LVT;
9108 };
9109
9110 // This turns into unaligned loads. We only do this if the target natively
9111 // supports the MVT we'll be loading or if it is small enough (<= 4) that
9112 // we'll only produce a small number of byte loads.
9113 MVT LoadVT;
9114 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
9115 switch (NumBitsToCompare) {
9116 default:
9117 return false;
9118 case 16:
9119 LoadVT = MVT::i16;
9120 break;
9121 case 32:
9122 LoadVT = MVT::i32;
9123 break;
9124 case 64:
9125 case 128:
9126 case 256:
9127 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9128 break;
9129 }
9130
9131 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
9132 return false;
9133
9134 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
9135 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
9136
9137 // Bitcast to a wide integer type if the loads are vectors.
9138 if (LoadVT.isVector()) {
9139 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
9140 LoadL = DAG.getBitcast(CmpVT, LoadL);
9141 LoadR = DAG.getBitcast(CmpVT, LoadR);
9142 }
9143
9144 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
9145 processIntegerCallValue(I, Cmp, false);
9146 return true;
9147 }
9148
9149 /// See if we can lower a memchr call into an optimized form. If so, return
9150 /// true and lower it. Otherwise return false, and it will be lowered like a
9151 /// normal call.
9152 /// The caller already checked that \p I calls the appropriate LibFunc with a
9153 /// correct prototype.
visitMemChrCall(const CallInst & I)9154 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
9155 const Value *Src = I.getArgOperand(0);
9156 const Value *Char = I.getArgOperand(1);
9157 const Value *Length = I.getArgOperand(2);
9158
9159 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9160 std::pair<SDValue, SDValue> Res =
9161 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
9162 getValue(Src), getValue(Char), getValue(Length),
9163 MachinePointerInfo(Src));
9164 if (Res.first.getNode()) {
9165 setValue(&I, Res.first);
9166 PendingLoads.push_back(Res.second);
9167 return true;
9168 }
9169
9170 return false;
9171 }
9172
9173 /// See if we can lower a mempcpy call into an optimized form. If so, return
9174 /// true and lower it. Otherwise return false, and it will be lowered like a
9175 /// normal call.
9176 /// The caller already checked that \p I calls the appropriate LibFunc with a
9177 /// correct prototype.
visitMemPCpyCall(const CallInst & I)9178 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
9179 SDValue Dst = getValue(I.getArgOperand(0));
9180 SDValue Src = getValue(I.getArgOperand(1));
9181 SDValue Size = getValue(I.getArgOperand(2));
9182
9183 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
9184 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
9185 // DAG::getMemcpy needs Alignment to be defined.
9186 Align Alignment = std::min(DstAlign, SrcAlign);
9187
9188 SDLoc sdl = getCurSDLoc();
9189
9190 // In the mempcpy context we need to pass in a false value for isTailCall
9191 // because the return pointer needs to be adjusted by the size of
9192 // the copied memory.
9193 SDValue Root = getMemoryRoot();
9194 SDValue MC = DAG.getMemcpy(
9195 Root, sdl, Dst, Src, Size, Alignment, false, false, /*CI=*/nullptr,
9196 std::nullopt, MachinePointerInfo(I.getArgOperand(0)),
9197 MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata());
9198 assert(MC.getNode() != nullptr &&
9199 "** memcpy should not be lowered as TailCall in mempcpy context **");
9200 DAG.setRoot(MC);
9201
9202 // Check if Size needs to be truncated or extended.
9203 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
9204
9205 // Adjust return pointer to point just past the last dst byte.
9206 SDValue DstPlusSize = DAG.getMemBasePlusOffset(Dst, Size, sdl);
9207 setValue(&I, DstPlusSize);
9208 return true;
9209 }
9210
9211 /// See if we can lower a strcpy call into an optimized form. If so, return
9212 /// true and lower it, otherwise return false and it will be lowered like a
9213 /// normal call.
9214 /// The caller already checked that \p I calls the appropriate LibFunc with a
9215 /// correct prototype.
visitStrCpyCall(const CallInst & I,bool isStpcpy)9216 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
9217 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9218
9219 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9220 std::pair<SDValue, SDValue> Res =
9221 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
9222 getValue(Arg0), getValue(Arg1),
9223 MachinePointerInfo(Arg0),
9224 MachinePointerInfo(Arg1), isStpcpy);
9225 if (Res.first.getNode()) {
9226 setValue(&I, Res.first);
9227 DAG.setRoot(Res.second);
9228 return true;
9229 }
9230
9231 return false;
9232 }
9233
9234 /// See if we can lower a strcmp call into an optimized form. If so, return
9235 /// true and lower it, otherwise return false and it will be lowered like a
9236 /// normal call.
9237 /// The caller already checked that \p I calls the appropriate LibFunc with a
9238 /// correct prototype.
visitStrCmpCall(const CallInst & I)9239 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
9240 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9241
9242 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9243 std::pair<SDValue, SDValue> Res =
9244 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
9245 getValue(Arg0), getValue(Arg1),
9246 MachinePointerInfo(Arg0),
9247 MachinePointerInfo(Arg1));
9248 if (Res.first.getNode()) {
9249 processIntegerCallValue(I, Res.first, true);
9250 PendingLoads.push_back(Res.second);
9251 return true;
9252 }
9253
9254 return false;
9255 }
9256
9257 /// See if we can lower a strlen call into an optimized form. If so, return
9258 /// true and lower it, otherwise return false and it will be lowered like a
9259 /// normal call.
9260 /// The caller already checked that \p I calls the appropriate LibFunc with a
9261 /// correct prototype.
visitStrLenCall(const CallInst & I)9262 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
9263 const Value *Arg0 = I.getArgOperand(0);
9264
9265 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9266 std::pair<SDValue, SDValue> Res =
9267 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
9268 getValue(Arg0), MachinePointerInfo(Arg0));
9269 if (Res.first.getNode()) {
9270 processIntegerCallValue(I, Res.first, false);
9271 PendingLoads.push_back(Res.second);
9272 return true;
9273 }
9274
9275 return false;
9276 }
9277
9278 /// See if we can lower a strnlen call into an optimized form. If so, return
9279 /// true and lower it, otherwise return false and it will be lowered like a
9280 /// normal call.
9281 /// The caller already checked that \p I calls the appropriate LibFunc with a
9282 /// correct prototype.
visitStrNLenCall(const CallInst & I)9283 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
9284 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9285
9286 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9287 std::pair<SDValue, SDValue> Res =
9288 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
9289 getValue(Arg0), getValue(Arg1),
9290 MachinePointerInfo(Arg0));
9291 if (Res.first.getNode()) {
9292 processIntegerCallValue(I, Res.first, false);
9293 PendingLoads.push_back(Res.second);
9294 return true;
9295 }
9296
9297 return false;
9298 }
9299
9300 /// See if we can lower a unary floating-point operation into an SDNode with
9301 /// the specified Opcode. If so, return true and lower it, otherwise return
9302 /// false and it will be lowered like a normal call.
9303 /// The caller already checked that \p I calls the appropriate LibFunc with a
9304 /// correct prototype.
visitUnaryFloatCall(const CallInst & I,unsigned Opcode)9305 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
9306 unsigned Opcode) {
9307 // We already checked this call's prototype; verify it doesn't modify errno.
9308 if (!I.onlyReadsMemory())
9309 return false;
9310
9311 SDNodeFlags Flags;
9312 Flags.copyFMF(cast<FPMathOperator>(I));
9313
9314 SDValue Tmp = getValue(I.getArgOperand(0));
9315 setValue(&I,
9316 DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
9317 return true;
9318 }
9319
9320 /// See if we can lower a binary floating-point operation into an SDNode with
9321 /// the specified Opcode. If so, return true and lower it. Otherwise return
9322 /// false, and it will be lowered like a normal call.
9323 /// The caller already checked that \p I calls the appropriate LibFunc with a
9324 /// correct prototype.
visitBinaryFloatCall(const CallInst & I,unsigned Opcode)9325 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
9326 unsigned Opcode) {
9327 // We already checked this call's prototype; verify it doesn't modify errno.
9328 if (!I.onlyReadsMemory())
9329 return false;
9330
9331 SDNodeFlags Flags;
9332 Flags.copyFMF(cast<FPMathOperator>(I));
9333
9334 SDValue Tmp0 = getValue(I.getArgOperand(0));
9335 SDValue Tmp1 = getValue(I.getArgOperand(1));
9336 EVT VT = Tmp0.getValueType();
9337 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
9338 return true;
9339 }
9340
visitCall(const CallInst & I)9341 void SelectionDAGBuilder::visitCall(const CallInst &I) {
9342 // Handle inline assembly differently.
9343 if (I.isInlineAsm()) {
9344 visitInlineAsm(I);
9345 return;
9346 }
9347
9348 diagnoseDontCall(I);
9349
9350 if (Function *F = I.getCalledFunction()) {
9351 if (F->isDeclaration()) {
9352 // Is this an LLVM intrinsic?
9353 if (unsigned IID = F->getIntrinsicID()) {
9354 visitIntrinsicCall(I, IID);
9355 return;
9356 }
9357 }
9358
9359 // Check for well-known libc/libm calls. If the function is internal, it
9360 // can't be a library call. Don't do the check if marked as nobuiltin for
9361 // some reason or the call site requires strict floating point semantics.
9362 LibFunc Func;
9363 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
9364 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
9365 LibInfo->hasOptimizedCodeGen(Func)) {
9366 switch (Func) {
9367 default: break;
9368 case LibFunc_bcmp:
9369 if (visitMemCmpBCmpCall(I))
9370 return;
9371 break;
9372 case LibFunc_copysign:
9373 case LibFunc_copysignf:
9374 case LibFunc_copysignl:
9375 // We already checked this call's prototype; verify it doesn't modify
9376 // errno.
9377 if (I.onlyReadsMemory()) {
9378 SDValue LHS = getValue(I.getArgOperand(0));
9379 SDValue RHS = getValue(I.getArgOperand(1));
9380 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
9381 LHS.getValueType(), LHS, RHS));
9382 return;
9383 }
9384 break;
9385 case LibFunc_fabs:
9386 case LibFunc_fabsf:
9387 case LibFunc_fabsl:
9388 if (visitUnaryFloatCall(I, ISD::FABS))
9389 return;
9390 break;
9391 case LibFunc_fmin:
9392 case LibFunc_fminf:
9393 case LibFunc_fminl:
9394 if (visitBinaryFloatCall(I, ISD::FMINNUM))
9395 return;
9396 break;
9397 case LibFunc_fmax:
9398 case LibFunc_fmaxf:
9399 case LibFunc_fmaxl:
9400 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
9401 return;
9402 break;
9403 case LibFunc_fminimum_num:
9404 case LibFunc_fminimum_numf:
9405 case LibFunc_fminimum_numl:
9406 if (visitBinaryFloatCall(I, ISD::FMINIMUMNUM))
9407 return;
9408 break;
9409 case LibFunc_fmaximum_num:
9410 case LibFunc_fmaximum_numf:
9411 case LibFunc_fmaximum_numl:
9412 if (visitBinaryFloatCall(I, ISD::FMAXIMUMNUM))
9413 return;
9414 break;
9415 case LibFunc_sin:
9416 case LibFunc_sinf:
9417 case LibFunc_sinl:
9418 if (visitUnaryFloatCall(I, ISD::FSIN))
9419 return;
9420 break;
9421 case LibFunc_cos:
9422 case LibFunc_cosf:
9423 case LibFunc_cosl:
9424 if (visitUnaryFloatCall(I, ISD::FCOS))
9425 return;
9426 break;
9427 case LibFunc_tan:
9428 case LibFunc_tanf:
9429 case LibFunc_tanl:
9430 if (visitUnaryFloatCall(I, ISD::FTAN))
9431 return;
9432 break;
9433 case LibFunc_asin:
9434 case LibFunc_asinf:
9435 case LibFunc_asinl:
9436 if (visitUnaryFloatCall(I, ISD::FASIN))
9437 return;
9438 break;
9439 case LibFunc_acos:
9440 case LibFunc_acosf:
9441 case LibFunc_acosl:
9442 if (visitUnaryFloatCall(I, ISD::FACOS))
9443 return;
9444 break;
9445 case LibFunc_atan:
9446 case LibFunc_atanf:
9447 case LibFunc_atanl:
9448 if (visitUnaryFloatCall(I, ISD::FATAN))
9449 return;
9450 break;
9451 case LibFunc_atan2:
9452 case LibFunc_atan2f:
9453 case LibFunc_atan2l:
9454 if (visitBinaryFloatCall(I, ISD::FATAN2))
9455 return;
9456 break;
9457 case LibFunc_sinh:
9458 case LibFunc_sinhf:
9459 case LibFunc_sinhl:
9460 if (visitUnaryFloatCall(I, ISD::FSINH))
9461 return;
9462 break;
9463 case LibFunc_cosh:
9464 case LibFunc_coshf:
9465 case LibFunc_coshl:
9466 if (visitUnaryFloatCall(I, ISD::FCOSH))
9467 return;
9468 break;
9469 case LibFunc_tanh:
9470 case LibFunc_tanhf:
9471 case LibFunc_tanhl:
9472 if (visitUnaryFloatCall(I, ISD::FTANH))
9473 return;
9474 break;
9475 case LibFunc_sqrt:
9476 case LibFunc_sqrtf:
9477 case LibFunc_sqrtl:
9478 case LibFunc_sqrt_finite:
9479 case LibFunc_sqrtf_finite:
9480 case LibFunc_sqrtl_finite:
9481 if (visitUnaryFloatCall(I, ISD::FSQRT))
9482 return;
9483 break;
9484 case LibFunc_floor:
9485 case LibFunc_floorf:
9486 case LibFunc_floorl:
9487 if (visitUnaryFloatCall(I, ISD::FFLOOR))
9488 return;
9489 break;
9490 case LibFunc_nearbyint:
9491 case LibFunc_nearbyintf:
9492 case LibFunc_nearbyintl:
9493 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
9494 return;
9495 break;
9496 case LibFunc_ceil:
9497 case LibFunc_ceilf:
9498 case LibFunc_ceill:
9499 if (visitUnaryFloatCall(I, ISD::FCEIL))
9500 return;
9501 break;
9502 case LibFunc_rint:
9503 case LibFunc_rintf:
9504 case LibFunc_rintl:
9505 if (visitUnaryFloatCall(I, ISD::FRINT))
9506 return;
9507 break;
9508 case LibFunc_round:
9509 case LibFunc_roundf:
9510 case LibFunc_roundl:
9511 if (visitUnaryFloatCall(I, ISD::FROUND))
9512 return;
9513 break;
9514 case LibFunc_trunc:
9515 case LibFunc_truncf:
9516 case LibFunc_truncl:
9517 if (visitUnaryFloatCall(I, ISD::FTRUNC))
9518 return;
9519 break;
9520 case LibFunc_log2:
9521 case LibFunc_log2f:
9522 case LibFunc_log2l:
9523 if (visitUnaryFloatCall(I, ISD::FLOG2))
9524 return;
9525 break;
9526 case LibFunc_exp2:
9527 case LibFunc_exp2f:
9528 case LibFunc_exp2l:
9529 if (visitUnaryFloatCall(I, ISD::FEXP2))
9530 return;
9531 break;
9532 case LibFunc_exp10:
9533 case LibFunc_exp10f:
9534 case LibFunc_exp10l:
9535 if (visitUnaryFloatCall(I, ISD::FEXP10))
9536 return;
9537 break;
9538 case LibFunc_ldexp:
9539 case LibFunc_ldexpf:
9540 case LibFunc_ldexpl:
9541 if (visitBinaryFloatCall(I, ISD::FLDEXP))
9542 return;
9543 break;
9544 case LibFunc_memcmp:
9545 if (visitMemCmpBCmpCall(I))
9546 return;
9547 break;
9548 case LibFunc_mempcpy:
9549 if (visitMemPCpyCall(I))
9550 return;
9551 break;
9552 case LibFunc_memchr:
9553 if (visitMemChrCall(I))
9554 return;
9555 break;
9556 case LibFunc_strcpy:
9557 if (visitStrCpyCall(I, false))
9558 return;
9559 break;
9560 case LibFunc_stpcpy:
9561 if (visitStrCpyCall(I, true))
9562 return;
9563 break;
9564 case LibFunc_strcmp:
9565 if (visitStrCmpCall(I))
9566 return;
9567 break;
9568 case LibFunc_strlen:
9569 if (visitStrLenCall(I))
9570 return;
9571 break;
9572 case LibFunc_strnlen:
9573 if (visitStrNLenCall(I))
9574 return;
9575 break;
9576 }
9577 }
9578 }
9579
9580 if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
9581 LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), /*EHPadBB=*/nullptr);
9582 return;
9583 }
9584
9585 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
9586 // have to do anything here to lower funclet bundles.
9587 // CFGuardTarget bundles are lowered in LowerCallTo.
9588 if (I.hasOperandBundlesOtherThan(
9589 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9590 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9591 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9592 LLVMContext::OB_convergencectrl}))
9593 reportFatalUsageError("cannot lower calls with arbitrary operand bundles!");
9594
9595 SDValue Callee = getValue(I.getCalledOperand());
9596
9597 if (I.hasDeoptState())
9598 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
9599 else
9600 // Check if we can potentially perform a tail call. More detailed checking
9601 // is be done within LowerCallTo, after more information about the call is
9602 // known.
9603 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
9604 }
9605
LowerCallSiteWithPtrAuthBundle(const CallBase & CB,const BasicBlock * EHPadBB)9606 void SelectionDAGBuilder::LowerCallSiteWithPtrAuthBundle(
9607 const CallBase &CB, const BasicBlock *EHPadBB) {
9608 auto PAB = CB.getOperandBundle("ptrauth");
9609 const Value *CalleeV = CB.getCalledOperand();
9610
9611 // Gather the call ptrauth data from the operand bundle:
9612 // [ i32 <key>, i64 <discriminator> ]
9613 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9614 const Value *Discriminator = PAB->Inputs[1];
9615
9616 assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key");
9617 assert(Discriminator->getType()->isIntegerTy(64) &&
9618 "Invalid ptrauth discriminator");
9619
9620 // Look through ptrauth constants to find the raw callee.
9621 // Do a direct unauthenticated call if we found it and everything matches.
9622 if (const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV))
9623 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9624 DAG.getDataLayout()))
9625 return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(),
9626 CB.isMustTailCall(), EHPadBB);
9627
9628 // Functions should never be ptrauth-called directly.
9629 assert(!isa<Function>(CalleeV) && "invalid direct ptrauth call");
9630
9631 // Otherwise, do an authenticated indirect call.
9632 TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(),
9633 getValue(Discriminator)};
9634
9635 LowerCallTo(CB, getValue(CalleeV), CB.isTailCall(), CB.isMustTailCall(),
9636 EHPadBB, &PAI);
9637 }
9638
9639 namespace {
9640
9641 /// AsmOperandInfo - This contains information for each constraint that we are
9642 /// lowering.
9643 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
9644 public:
9645 /// CallOperand - If this is the result output operand or a clobber
9646 /// this is null, otherwise it is the incoming operand to the CallInst.
9647 /// This gets modified as the asm is processed.
9648 SDValue CallOperand;
9649
9650 /// AssignedRegs - If this is a register or register class operand, this
9651 /// contains the set of register corresponding to the operand.
9652 RegsForValue AssignedRegs;
9653
SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo & info)9654 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
9655 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
9656 }
9657
9658 /// Whether or not this operand accesses memory
hasMemory(const TargetLowering & TLI) const9659 bool hasMemory(const TargetLowering &TLI) const {
9660 // Indirect operand accesses access memory.
9661 if (isIndirect)
9662 return true;
9663
9664 for (const auto &Code : Codes)
9665 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
9666 return true;
9667
9668 return false;
9669 }
9670 };
9671
9672
9673 } // end anonymous namespace
9674
9675 /// Make sure that the output operand \p OpInfo and its corresponding input
9676 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
9677 /// out).
patchMatchingInput(const SDISelAsmOperandInfo & OpInfo,SDISelAsmOperandInfo & MatchingOpInfo,SelectionDAG & DAG)9678 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
9679 SDISelAsmOperandInfo &MatchingOpInfo,
9680 SelectionDAG &DAG) {
9681 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9682 return;
9683
9684 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
9685 const auto &TLI = DAG.getTargetLoweringInfo();
9686
9687 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9688 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9689 OpInfo.ConstraintVT);
9690 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9691 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9692 MatchingOpInfo.ConstraintVT);
9693 const bool OutOpIsIntOrFP =
9694 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9695 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9696 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9697 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9698 // FIXME: error out in a more elegant fashion
9699 report_fatal_error("Unsupported asm: input constraint"
9700 " with a matching output constraint of"
9701 " incompatible type!");
9702 }
9703 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9704 }
9705
9706 /// Get a direct memory input to behave well as an indirect operand.
9707 /// This may introduce stores, hence the need for a \p Chain.
9708 /// \return The (possibly updated) chain.
getAddressForMemoryInput(SDValue Chain,const SDLoc & Location,SDISelAsmOperandInfo & OpInfo,SelectionDAG & DAG)9709 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9710 SDISelAsmOperandInfo &OpInfo,
9711 SelectionDAG &DAG) {
9712 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9713
9714 // If we don't have an indirect input, put it in the constpool if we can,
9715 // otherwise spill it to a stack slot.
9716 // TODO: This isn't quite right. We need to handle these according to
9717 // the addressing mode that the constraint wants. Also, this may take
9718 // an additional register for the computation and we don't want that
9719 // either.
9720
9721 // If the operand is a float, integer, or vector constant, spill to a
9722 // constant pool entry to get its address.
9723 const Value *OpVal = OpInfo.CallOperandVal;
9724 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9725 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9726 OpInfo.CallOperand = DAG.getConstantPool(
9727 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9728 return Chain;
9729 }
9730
9731 // Otherwise, create a stack slot and emit a store to it before the asm.
9732 Type *Ty = OpVal->getType();
9733 auto &DL = DAG.getDataLayout();
9734 TypeSize TySize = DL.getTypeAllocSize(Ty);
9735 MachineFunction &MF = DAG.getMachineFunction();
9736 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
9737 int StackID = 0;
9738 if (TySize.isScalable())
9739 StackID = TFI->getStackIDForScalableVectors();
9740 int SSFI = MF.getFrameInfo().CreateStackObject(TySize.getKnownMinValue(),
9741 DL.getPrefTypeAlign(Ty), false,
9742 nullptr, StackID);
9743 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9744 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9745 MachinePointerInfo::getFixedStack(MF, SSFI),
9746 TLI.getMemValueType(DL, Ty));
9747 OpInfo.CallOperand = StackSlot;
9748
9749 return Chain;
9750 }
9751
9752 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9753 /// specified operand. We prefer to assign virtual registers, to allow the
9754 /// register allocator to handle the assignment process. However, if the asm
9755 /// uses features that we can't model on machineinstrs, we have SDISel do the
9756 /// allocation. This produces generally horrible, but correct, code.
9757 ///
9758 /// OpInfo describes the operand
9759 /// RefOpInfo describes the matching operand if any, the operand otherwise
9760 static std::optional<unsigned>
getRegistersForValue(SelectionDAG & DAG,const SDLoc & DL,SDISelAsmOperandInfo & OpInfo,SDISelAsmOperandInfo & RefOpInfo)9761 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
9762 SDISelAsmOperandInfo &OpInfo,
9763 SDISelAsmOperandInfo &RefOpInfo) {
9764 LLVMContext &Context = *DAG.getContext();
9765 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9766
9767 MachineFunction &MF = DAG.getMachineFunction();
9768 SmallVector<Register, 4> Regs;
9769 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9770
9771 // No work to do for memory/address operands.
9772 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9773 OpInfo.ConstraintType == TargetLowering::C_Address)
9774 return std::nullopt;
9775
9776 // If this is a constraint for a single physreg, or a constraint for a
9777 // register class, find it.
9778 unsigned AssignedReg;
9779 const TargetRegisterClass *RC;
9780 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9781 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9782 // RC is unset only on failure. Return immediately.
9783 if (!RC)
9784 return std::nullopt;
9785
9786 // Get the actual register value type. This is important, because the user
9787 // may have asked for (e.g.) the AX register in i32 type. We need to
9788 // remember that AX is actually i16 to get the right extension.
9789 const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9790
9791 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9792 // If this is an FP operand in an integer register (or visa versa), or more
9793 // generally if the operand value disagrees with the register class we plan
9794 // to stick it in, fix the operand type.
9795 //
9796 // If this is an input value, the bitcast to the new type is done now.
9797 // Bitcast for output value is done at the end of visitInlineAsm().
9798 if ((OpInfo.Type == InlineAsm::isOutput ||
9799 OpInfo.Type == InlineAsm::isInput) &&
9800 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9801 // Try to convert to the first EVT that the reg class contains. If the
9802 // types are identical size, use a bitcast to convert (e.g. two differing
9803 // vector types). Note: output bitcast is done at the end of
9804 // visitInlineAsm().
9805 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9806 // Exclude indirect inputs while they are unsupported because the code
9807 // to perform the load is missing and thus OpInfo.CallOperand still
9808 // refers to the input address rather than the pointed-to value.
9809 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9810 OpInfo.CallOperand =
9811 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9812 OpInfo.ConstraintVT = RegVT;
9813 // If the operand is an FP value and we want it in integer registers,
9814 // use the corresponding integer type. This turns an f64 value into
9815 // i64, which can be passed with two i32 values on a 32-bit machine.
9816 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9817 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9818 if (OpInfo.Type == InlineAsm::isInput)
9819 OpInfo.CallOperand =
9820 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9821 OpInfo.ConstraintVT = VT;
9822 }
9823 }
9824 }
9825
9826 // No need to allocate a matching input constraint since the constraint it's
9827 // matching to has already been allocated.
9828 if (OpInfo.isMatchingInputConstraint())
9829 return std::nullopt;
9830
9831 EVT ValueVT = OpInfo.ConstraintVT;
9832 if (OpInfo.ConstraintVT == MVT::Other)
9833 ValueVT = RegVT;
9834
9835 // Initialize NumRegs.
9836 unsigned NumRegs = 1;
9837 if (OpInfo.ConstraintVT != MVT::Other)
9838 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9839
9840 // If this is a constraint for a specific physical register, like {r17},
9841 // assign it now.
9842
9843 // If this associated to a specific register, initialize iterator to correct
9844 // place. If virtual, make sure we have enough registers
9845
9846 // Initialize iterator if necessary
9847 TargetRegisterClass::iterator I = RC->begin();
9848 MachineRegisterInfo &RegInfo = MF.getRegInfo();
9849
9850 // Do not check for single registers.
9851 if (AssignedReg) {
9852 I = std::find(I, RC->end(), AssignedReg);
9853 if (I == RC->end()) {
9854 // RC does not contain the selected register, which indicates a
9855 // mismatch between the register and the required type/bitwidth.
9856 return {AssignedReg};
9857 }
9858 }
9859
9860 for (; NumRegs; --NumRegs, ++I) {
9861 assert(I != RC->end() && "Ran out of registers to allocate!");
9862 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9863 Regs.push_back(R);
9864 }
9865
9866 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9867 return std::nullopt;
9868 }
9869
9870 static unsigned
findMatchingInlineAsmOperand(unsigned OperandNo,const std::vector<SDValue> & AsmNodeOperands)9871 findMatchingInlineAsmOperand(unsigned OperandNo,
9872 const std::vector<SDValue> &AsmNodeOperands) {
9873 // Scan until we find the definition we already emitted of this operand.
9874 unsigned CurOp = InlineAsm::Op_FirstOperand;
9875 for (; OperandNo; --OperandNo) {
9876 // Advance to the next operand.
9877 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9878 const InlineAsm::Flag F(OpFlag);
9879 assert(
9880 (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9881 "Skipped past definitions?");
9882 CurOp += F.getNumOperandRegisters() + 1;
9883 }
9884 return CurOp;
9885 }
9886
9887 namespace {
9888
9889 class ExtraFlags {
9890 unsigned Flags = 0;
9891
9892 public:
ExtraFlags(const CallBase & Call)9893 explicit ExtraFlags(const CallBase &Call) {
9894 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9895 if (IA->hasSideEffects())
9896 Flags |= InlineAsm::Extra_HasSideEffects;
9897 if (IA->isAlignStack())
9898 Flags |= InlineAsm::Extra_IsAlignStack;
9899 if (Call.isConvergent())
9900 Flags |= InlineAsm::Extra_IsConvergent;
9901 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9902 }
9903
update(const TargetLowering::AsmOperandInfo & OpInfo)9904 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9905 // Ideally, we would only check against memory constraints. However, the
9906 // meaning of an Other constraint can be target-specific and we can't easily
9907 // reason about it. Therefore, be conservative and set MayLoad/MayStore
9908 // for Other constraints as well.
9909 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9910 OpInfo.ConstraintType == TargetLowering::C_Other) {
9911 if (OpInfo.Type == InlineAsm::isInput)
9912 Flags |= InlineAsm::Extra_MayLoad;
9913 else if (OpInfo.Type == InlineAsm::isOutput)
9914 Flags |= InlineAsm::Extra_MayStore;
9915 else if (OpInfo.Type == InlineAsm::isClobber)
9916 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9917 }
9918 }
9919
get() const9920 unsigned get() const { return Flags; }
9921 };
9922
9923 } // end anonymous namespace
9924
isFunction(SDValue Op)9925 static bool isFunction(SDValue Op) {
9926 if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9927 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9928 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9929
9930 // In normal "call dllimport func" instruction (non-inlineasm) it force
9931 // indirect access by specifing call opcode. And usually specially print
9932 // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9933 // not do in this way now. (In fact, this is similar with "Data Access"
9934 // action). So here we ignore dllimport function.
9935 if (Fn && !Fn->hasDLLImportStorageClass())
9936 return true;
9937 }
9938 }
9939 return false;
9940 }
9941
9942 /// visitInlineAsm - Handle a call to an InlineAsm object.
visitInlineAsm(const CallBase & Call,const BasicBlock * EHPadBB)9943 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9944 const BasicBlock *EHPadBB) {
9945 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9946
9947 /// ConstraintOperands - Information about all of the constraints.
9948 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9949
9950 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9951 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9952 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9953
9954 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9955 // AsmDialect, MayLoad, MayStore).
9956 bool HasSideEffect = IA->hasSideEffects();
9957 ExtraFlags ExtraInfo(Call);
9958
9959 for (auto &T : TargetConstraints) {
9960 ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9961 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9962
9963 if (OpInfo.CallOperandVal)
9964 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9965
9966 if (!HasSideEffect)
9967 HasSideEffect = OpInfo.hasMemory(TLI);
9968
9969 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9970 // FIXME: Could we compute this on OpInfo rather than T?
9971
9972 // Compute the constraint code and ConstraintType to use.
9973 TLI.ComputeConstraintToUse(T, SDValue());
9974
9975 if (T.ConstraintType == TargetLowering::C_Immediate &&
9976 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9977 // We've delayed emitting a diagnostic like the "n" constraint because
9978 // inlining could cause an integer showing up.
9979 return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9980 "' expects an integer constant "
9981 "expression");
9982
9983 ExtraInfo.update(T);
9984 }
9985
9986 // We won't need to flush pending loads if this asm doesn't touch
9987 // memory and is nonvolatile.
9988 SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9989
9990 bool EmitEHLabels = isa<InvokeInst>(Call);
9991 if (EmitEHLabels) {
9992 assert(EHPadBB && "InvokeInst must have an EHPadBB");
9993 }
9994 bool IsCallBr = isa<CallBrInst>(Call);
9995
9996 if (IsCallBr || EmitEHLabels) {
9997 // If this is a callbr or invoke we need to flush pending exports since
9998 // inlineasm_br and invoke are terminators.
9999 // We need to do this before nodes are glued to the inlineasm_br node.
10000 Chain = getControlRoot();
10001 }
10002
10003 MCSymbol *BeginLabel = nullptr;
10004 if (EmitEHLabels) {
10005 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10006 }
10007
10008 int OpNo = -1;
10009 SmallVector<StringRef> AsmStrs;
10010 IA->collectAsmStrs(AsmStrs);
10011
10012 // Second pass over the constraints: compute which constraint option to use.
10013 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10014 if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
10015 OpNo++;
10016
10017 // If this is an output operand with a matching input operand, look up the
10018 // matching input. If their types mismatch, e.g. one is an integer, the
10019 // other is floating point, or their sizes are different, flag it as an
10020 // error.
10021 if (OpInfo.hasMatchingInput()) {
10022 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10023 patchMatchingInput(OpInfo, Input, DAG);
10024 }
10025
10026 // Compute the constraint code and ConstraintType to use.
10027 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
10028
10029 if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
10030 OpInfo.Type == InlineAsm::isClobber) ||
10031 OpInfo.ConstraintType == TargetLowering::C_Address)
10032 continue;
10033
10034 // In Linux PIC model, there are 4 cases about value/label addressing:
10035 //
10036 // 1: Function call or Label jmp inside the module.
10037 // 2: Data access (such as global variable, static variable) inside module.
10038 // 3: Function call or Label jmp outside the module.
10039 // 4: Data access (such as global variable) outside the module.
10040 //
10041 // Due to current llvm inline asm architecture designed to not "recognize"
10042 // the asm code, there are quite troubles for us to treat mem addressing
10043 // differently for same value/adress used in different instuctions.
10044 // For example, in pic model, call a func may in plt way or direclty
10045 // pc-related, but lea/mov a function adress may use got.
10046 //
10047 // Here we try to "recognize" function call for the case 1 and case 3 in
10048 // inline asm. And try to adjust the constraint for them.
10049 //
10050 // TODO: Due to current inline asm didn't encourage to jmp to the outsider
10051 // label, so here we don't handle jmp function label now, but we need to
10052 // enhance it (especilly in PIC model) if we meet meaningful requirements.
10053 if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
10054 TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
10055 TM.getCodeModel() != CodeModel::Large) {
10056 OpInfo.isIndirect = false;
10057 OpInfo.ConstraintType = TargetLowering::C_Address;
10058 }
10059
10060 // If this is a memory input, and if the operand is not indirect, do what we
10061 // need to provide an address for the memory input.
10062 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
10063 !OpInfo.isIndirect) {
10064 assert((OpInfo.isMultipleAlternative ||
10065 (OpInfo.Type == InlineAsm::isInput)) &&
10066 "Can only indirectify direct input operands!");
10067
10068 // Memory operands really want the address of the value.
10069 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
10070
10071 // There is no longer a Value* corresponding to this operand.
10072 OpInfo.CallOperandVal = nullptr;
10073
10074 // It is now an indirect operand.
10075 OpInfo.isIndirect = true;
10076 }
10077
10078 }
10079
10080 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
10081 std::vector<SDValue> AsmNodeOperands;
10082 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
10083 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
10084 IA->getAsmString().data(), TLI.getProgramPointerTy(DAG.getDataLayout())));
10085
10086 // If we have a !srcloc metadata node associated with it, we want to attach
10087 // this to the ultimately generated inline asm machineinstr. To do this, we
10088 // pass in the third operand as this (potentially null) inline asm MDNode.
10089 const MDNode *SrcLoc = Call.getMetadata("srcloc");
10090 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
10091
10092 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
10093 // bits as operand 3.
10094 AsmNodeOperands.push_back(DAG.getTargetConstant(
10095 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10096
10097 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
10098 // this, assign virtual and physical registers for inputs and otput.
10099 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10100 // Assign Registers.
10101 SDISelAsmOperandInfo &RefOpInfo =
10102 OpInfo.isMatchingInputConstraint()
10103 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10104 : OpInfo;
10105 const auto RegError =
10106 getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
10107 if (RegError) {
10108 const MachineFunction &MF = DAG.getMachineFunction();
10109 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10110 const char *RegName = TRI.getName(*RegError);
10111 emitInlineAsmError(Call, "register '" + Twine(RegName) +
10112 "' allocated for constraint '" +
10113 Twine(OpInfo.ConstraintCode) +
10114 "' does not match required type");
10115 return;
10116 }
10117
10118 auto DetectWriteToReservedRegister = [&]() {
10119 const MachineFunction &MF = DAG.getMachineFunction();
10120 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10121 for (Register Reg : OpInfo.AssignedRegs.Regs) {
10122 if (Reg.isPhysical() && TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
10123 const char *RegName = TRI.getName(Reg);
10124 emitInlineAsmError(Call, "write to reserved register '" +
10125 Twine(RegName) + "'");
10126 return true;
10127 }
10128 }
10129 return false;
10130 };
10131 assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
10132 (OpInfo.Type == InlineAsm::isInput &&
10133 !OpInfo.isMatchingInputConstraint())) &&
10134 "Only address as input operand is allowed.");
10135
10136 switch (OpInfo.Type) {
10137 case InlineAsm::isOutput:
10138 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10139 const InlineAsm::ConstraintCode ConstraintID =
10140 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10141 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10142 "Failed to convert memory constraint code to constraint id.");
10143
10144 // Add information to the INLINEASM node to know about this output.
10145 InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
10146 OpFlags.setMemConstraint(ConstraintID);
10147 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
10148 MVT::i32));
10149 AsmNodeOperands.push_back(OpInfo.CallOperand);
10150 } else {
10151 // Otherwise, this outputs to a register (directly for C_Register /
10152 // C_RegisterClass, and a target-defined fashion for
10153 // C_Immediate/C_Other). Find a register that we can use.
10154 if (OpInfo.AssignedRegs.Regs.empty()) {
10155 emitInlineAsmError(
10156 Call, "couldn't allocate output register for constraint '" +
10157 Twine(OpInfo.ConstraintCode) + "'");
10158 return;
10159 }
10160
10161 if (DetectWriteToReservedRegister())
10162 return;
10163
10164 // Add information to the INLINEASM node to know that this register is
10165 // set.
10166 OpInfo.AssignedRegs.AddInlineAsmOperands(
10167 OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
10168 : InlineAsm::Kind::RegDef,
10169 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
10170 }
10171 break;
10172
10173 case InlineAsm::isInput:
10174 case InlineAsm::isLabel: {
10175 SDValue InOperandVal = OpInfo.CallOperand;
10176
10177 if (OpInfo.isMatchingInputConstraint()) {
10178 // If this is required to match an output register we have already set,
10179 // just use its register.
10180 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
10181 AsmNodeOperands);
10182 InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10183 if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
10184 if (OpInfo.isIndirect) {
10185 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
10186 emitInlineAsmError(Call, "inline asm not supported yet: "
10187 "don't know how to handle tied "
10188 "indirect register inputs");
10189 return;
10190 }
10191
10192 SmallVector<Register, 4> Regs;
10193 MachineFunction &MF = DAG.getMachineFunction();
10194 MachineRegisterInfo &MRI = MF.getRegInfo();
10195 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10196 auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
10197 Register TiedReg = R->getReg();
10198 MVT RegVT = R->getSimpleValueType(0);
10199 const TargetRegisterClass *RC =
10200 TiedReg.isVirtual() ? MRI.getRegClass(TiedReg)
10201 : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
10202 : TRI.getMinimalPhysRegClass(TiedReg);
10203 for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
10204 Regs.push_back(MRI.createVirtualRegister(RC));
10205
10206 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
10207
10208 SDLoc dl = getCurSDLoc();
10209 // Use the produced MatchedRegs object to
10210 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
10211 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
10212 OpInfo.getMatchedOperand(), dl, DAG,
10213 AsmNodeOperands);
10214 break;
10215 }
10216
10217 assert(Flag.isMemKind() && "Unknown matching constraint!");
10218 assert(Flag.getNumOperandRegisters() == 1 &&
10219 "Unexpected number of operands");
10220 // Add information to the INLINEASM node to know about this input.
10221 // See InlineAsm.h isUseOperandTiedToDef.
10222 Flag.clearMemConstraint();
10223 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10224 AsmNodeOperands.push_back(DAG.getTargetConstant(
10225 Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10226 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10227 break;
10228 }
10229
10230 // Treat indirect 'X' constraint as memory.
10231 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
10232 OpInfo.isIndirect)
10233 OpInfo.ConstraintType = TargetLowering::C_Memory;
10234
10235 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
10236 OpInfo.ConstraintType == TargetLowering::C_Other) {
10237 std::vector<SDValue> Ops;
10238 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
10239 Ops, DAG);
10240 if (Ops.empty()) {
10241 if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
10242 if (isa<ConstantSDNode>(InOperandVal)) {
10243 emitInlineAsmError(Call, "value out of range for constraint '" +
10244 Twine(OpInfo.ConstraintCode) + "'");
10245 return;
10246 }
10247
10248 emitInlineAsmError(Call,
10249 "invalid operand for inline asm constraint '" +
10250 Twine(OpInfo.ConstraintCode) + "'");
10251 return;
10252 }
10253
10254 // Add information to the INLINEASM node to know about this input.
10255 InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
10256 AsmNodeOperands.push_back(DAG.getTargetConstant(
10257 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10258 llvm::append_range(AsmNodeOperands, Ops);
10259 break;
10260 }
10261
10262 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10263 assert((OpInfo.isIndirect ||
10264 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
10265 "Operand must be indirect to be a mem!");
10266 assert(InOperandVal.getValueType() ==
10267 TLI.getPointerTy(DAG.getDataLayout()) &&
10268 "Memory operands expect pointer values");
10269
10270 const InlineAsm::ConstraintCode ConstraintID =
10271 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10272 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10273 "Failed to convert memory constraint code to constraint id.");
10274
10275 // Add information to the INLINEASM node to know about this input.
10276 InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
10277 ResOpType.setMemConstraint(ConstraintID);
10278 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
10279 getCurSDLoc(),
10280 MVT::i32));
10281 AsmNodeOperands.push_back(InOperandVal);
10282 break;
10283 }
10284
10285 if (OpInfo.ConstraintType == TargetLowering::C_Address) {
10286 const InlineAsm::ConstraintCode ConstraintID =
10287 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10288 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10289 "Failed to convert memory constraint code to constraint id.");
10290
10291 InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
10292
10293 SDValue AsmOp = InOperandVal;
10294 if (isFunction(InOperandVal)) {
10295 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10296 ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
10297 AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
10298 InOperandVal.getValueType(),
10299 GA->getOffset());
10300 }
10301
10302 // Add information to the INLINEASM node to know about this input.
10303 ResOpType.setMemConstraint(ConstraintID);
10304
10305 AsmNodeOperands.push_back(
10306 DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
10307
10308 AsmNodeOperands.push_back(AsmOp);
10309 break;
10310 }
10311
10312 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
10313 OpInfo.ConstraintType != TargetLowering::C_Register) {
10314 emitInlineAsmError(Call, "unknown asm constraint '" +
10315 Twine(OpInfo.ConstraintCode) + "'");
10316 return;
10317 }
10318
10319 // TODO: Support this.
10320 if (OpInfo.isIndirect) {
10321 emitInlineAsmError(
10322 Call, "Don't know how to handle indirect register inputs yet "
10323 "for constraint '" +
10324 Twine(OpInfo.ConstraintCode) + "'");
10325 return;
10326 }
10327
10328 // Copy the input into the appropriate registers.
10329 if (OpInfo.AssignedRegs.Regs.empty()) {
10330 emitInlineAsmError(Call,
10331 "couldn't allocate input reg for constraint '" +
10332 Twine(OpInfo.ConstraintCode) + "'");
10333 return;
10334 }
10335
10336 if (DetectWriteToReservedRegister())
10337 return;
10338
10339 SDLoc dl = getCurSDLoc();
10340
10341 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
10342 &Call);
10343
10344 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
10345 0, dl, DAG, AsmNodeOperands);
10346 break;
10347 }
10348 case InlineAsm::isClobber:
10349 // Add the clobbered value to the operand list, so that the register
10350 // allocator is aware that the physreg got clobbered.
10351 if (!OpInfo.AssignedRegs.Regs.empty())
10352 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
10353 false, 0, getCurSDLoc(), DAG,
10354 AsmNodeOperands);
10355 break;
10356 }
10357 }
10358
10359 // Finish up input operands. Set the input chain and add the flag last.
10360 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
10361 if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
10362
10363 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10364 Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
10365 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10366 Glue = Chain.getValue(1);
10367
10368 // Do additional work to generate outputs.
10369
10370 SmallVector<EVT, 1> ResultVTs;
10371 SmallVector<SDValue, 1> ResultValues;
10372 SmallVector<SDValue, 8> OutChains;
10373
10374 llvm::Type *CallResultType = Call.getType();
10375 ArrayRef<Type *> ResultTypes;
10376 if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
10377 ResultTypes = StructResult->elements();
10378 else if (!CallResultType->isVoidTy())
10379 ResultTypes = ArrayRef(CallResultType);
10380
10381 auto CurResultType = ResultTypes.begin();
10382 auto handleRegAssign = [&](SDValue V) {
10383 assert(CurResultType != ResultTypes.end() && "Unexpected value");
10384 assert((*CurResultType)->isSized() && "Unexpected unsized type");
10385 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
10386 ++CurResultType;
10387 // If the type of the inline asm call site return value is different but has
10388 // same size as the type of the asm output bitcast it. One example of this
10389 // is for vectors with different width / number of elements. This can
10390 // happen for register classes that can contain multiple different value
10391 // types. The preg or vreg allocated may not have the same VT as was
10392 // expected.
10393 //
10394 // This can also happen for a return value that disagrees with the register
10395 // class it is put in, eg. a double in a general-purpose register on a
10396 // 32-bit machine.
10397 if (ResultVT != V.getValueType() &&
10398 ResultVT.getSizeInBits() == V.getValueSizeInBits())
10399 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
10400 else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
10401 V.getValueType().isInteger()) {
10402 // If a result value was tied to an input value, the computed result
10403 // may have a wider width than the expected result. Extract the
10404 // relevant portion.
10405 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
10406 }
10407 assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
10408 ResultVTs.push_back(ResultVT);
10409 ResultValues.push_back(V);
10410 };
10411
10412 // Deal with output operands.
10413 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10414 if (OpInfo.Type == InlineAsm::isOutput) {
10415 SDValue Val;
10416 // Skip trivial output operands.
10417 if (OpInfo.AssignedRegs.Regs.empty())
10418 continue;
10419
10420 switch (OpInfo.ConstraintType) {
10421 case TargetLowering::C_Register:
10422 case TargetLowering::C_RegisterClass:
10423 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
10424 Chain, &Glue, &Call);
10425 break;
10426 case TargetLowering::C_Immediate:
10427 case TargetLowering::C_Other:
10428 Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
10429 OpInfo, DAG);
10430 break;
10431 case TargetLowering::C_Memory:
10432 break; // Already handled.
10433 case TargetLowering::C_Address:
10434 break; // Silence warning.
10435 case TargetLowering::C_Unknown:
10436 assert(false && "Unexpected unknown constraint");
10437 }
10438
10439 // Indirect output manifest as stores. Record output chains.
10440 if (OpInfo.isIndirect) {
10441 const Value *Ptr = OpInfo.CallOperandVal;
10442 assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
10443 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
10444 MachinePointerInfo(Ptr));
10445 OutChains.push_back(Store);
10446 } else {
10447 // generate CopyFromRegs to associated registers.
10448 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
10449 if (Val.getOpcode() == ISD::MERGE_VALUES) {
10450 for (const SDValue &V : Val->op_values())
10451 handleRegAssign(V);
10452 } else
10453 handleRegAssign(Val);
10454 }
10455 }
10456 }
10457
10458 // Set results.
10459 if (!ResultValues.empty()) {
10460 assert(CurResultType == ResultTypes.end() &&
10461 "Mismatch in number of ResultTypes");
10462 assert(ResultValues.size() == ResultTypes.size() &&
10463 "Mismatch in number of output operands in asm result");
10464
10465 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
10466 DAG.getVTList(ResultVTs), ResultValues);
10467 setValue(&Call, V);
10468 }
10469
10470 // Collect store chains.
10471 if (!OutChains.empty())
10472 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
10473
10474 if (EmitEHLabels) {
10475 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10476 }
10477
10478 // Only Update Root if inline assembly has a memory effect.
10479 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
10480 EmitEHLabels)
10481 DAG.setRoot(Chain);
10482 }
10483
emitInlineAsmError(const CallBase & Call,const Twine & Message)10484 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
10485 const Twine &Message) {
10486 LLVMContext &Ctx = *DAG.getContext();
10487 Ctx.diagnose(DiagnosticInfoInlineAsm(Call, Message));
10488
10489 // Make sure we leave the DAG in a valid state
10490 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10491 SmallVector<EVT, 1> ValueVTs;
10492 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
10493
10494 if (ValueVTs.empty())
10495 return;
10496
10497 SmallVector<SDValue, 1> Ops;
10498 for (const EVT &VT : ValueVTs)
10499 Ops.push_back(DAG.getUNDEF(VT));
10500
10501 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
10502 }
10503
visitVAStart(const CallInst & I)10504 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
10505 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
10506 MVT::Other, getRoot(),
10507 getValue(I.getArgOperand(0)),
10508 DAG.getSrcValue(I.getArgOperand(0))));
10509 }
10510
visitVAArg(const VAArgInst & I)10511 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
10512 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10513 const DataLayout &DL = DAG.getDataLayout();
10514 SDValue V = DAG.getVAArg(
10515 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
10516 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
10517 DL.getABITypeAlign(I.getType()).value());
10518 DAG.setRoot(V.getValue(1));
10519
10520 if (I.getType()->isPointerTy())
10521 V = DAG.getPtrExtOrTrunc(
10522 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
10523 setValue(&I, V);
10524 }
10525
visitVAEnd(const CallInst & I)10526 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
10527 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
10528 MVT::Other, getRoot(),
10529 getValue(I.getArgOperand(0)),
10530 DAG.getSrcValue(I.getArgOperand(0))));
10531 }
10532
visitVACopy(const CallInst & I)10533 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
10534 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
10535 MVT::Other, getRoot(),
10536 getValue(I.getArgOperand(0)),
10537 getValue(I.getArgOperand(1)),
10538 DAG.getSrcValue(I.getArgOperand(0)),
10539 DAG.getSrcValue(I.getArgOperand(1))));
10540 }
10541
lowerRangeToAssertZExt(SelectionDAG & DAG,const Instruction & I,SDValue Op)10542 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
10543 const Instruction &I,
10544 SDValue Op) {
10545 std::optional<ConstantRange> CR = getRange(I);
10546
10547 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10548 return Op;
10549
10550 APInt Lo = CR->getUnsignedMin();
10551 if (!Lo.isMinValue())
10552 return Op;
10553
10554 APInt Hi = CR->getUnsignedMax();
10555 unsigned Bits = std::max(Hi.getActiveBits(),
10556 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
10557
10558 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
10559
10560 SDLoc SL = getCurSDLoc();
10561
10562 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
10563 DAG.getValueType(SmallVT));
10564 unsigned NumVals = Op.getNode()->getNumValues();
10565 if (NumVals == 1)
10566 return ZExt;
10567
10568 SmallVector<SDValue, 4> Ops;
10569
10570 Ops.push_back(ZExt);
10571 for (unsigned I = 1; I != NumVals; ++I)
10572 Ops.push_back(Op.getValue(I));
10573
10574 return DAG.getMergeValues(Ops, SL);
10575 }
10576
10577 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
10578 /// the call being lowered.
10579 ///
10580 /// This is a helper for lowering intrinsics that follow a target calling
10581 /// convention or require stack pointer adjustment. Only a subset of the
10582 /// intrinsic's operands need to participate in the calling convention.
populateCallLoweringInfo(TargetLowering::CallLoweringInfo & CLI,const CallBase * Call,unsigned ArgIdx,unsigned NumArgs,SDValue Callee,Type * ReturnTy,AttributeSet RetAttrs,bool IsPatchPoint)10583 void SelectionDAGBuilder::populateCallLoweringInfo(
10584 TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
10585 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
10586 AttributeSet RetAttrs, bool IsPatchPoint) {
10587 TargetLowering::ArgListTy Args;
10588 Args.reserve(NumArgs);
10589
10590 // Populate the argument list.
10591 // Attributes for args start at offset 1, after the return attribute.
10592 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10593 ArgI != ArgE; ++ArgI) {
10594 const Value *V = Call->getOperand(ArgI);
10595
10596 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
10597
10598 TargetLowering::ArgListEntry Entry;
10599 Entry.Node = getValue(V);
10600 Entry.Ty = V->getType();
10601 Entry.setAttributes(Call, ArgI);
10602 Args.push_back(Entry);
10603 }
10604
10605 CLI.setDebugLoc(getCurSDLoc())
10606 .setChain(getRoot())
10607 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10608 RetAttrs)
10609 .setDiscardResult(Call->use_empty())
10610 .setIsPatchPoint(IsPatchPoint)
10611 .setIsPreallocated(
10612 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
10613 }
10614
10615 /// Add a stack map intrinsic call's live variable operands to a stackmap
10616 /// or patchpoint target node's operand list.
10617 ///
10618 /// Constants are converted to TargetConstants purely as an optimization to
10619 /// avoid constant materialization and register allocation.
10620 ///
10621 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
10622 /// generate addess computation nodes, and so FinalizeISel can convert the
10623 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
10624 /// address materialization and register allocation, but may also be required
10625 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
10626 /// alloca in the entry block, then the runtime may assume that the alloca's
10627 /// StackMap location can be read immediately after compilation and that the
10628 /// location is valid at any point during execution (this is similar to the
10629 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
10630 /// only available in a register, then the runtime would need to trap when
10631 /// execution reaches the StackMap in order to read the alloca's location.
addStackMapLiveVars(const CallBase & Call,unsigned StartIdx,const SDLoc & DL,SmallVectorImpl<SDValue> & Ops,SelectionDAGBuilder & Builder)10632 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
10633 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
10634 SelectionDAGBuilder &Builder) {
10635 SelectionDAG &DAG = Builder.DAG;
10636 for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
10637 SDValue Op = Builder.getValue(Call.getArgOperand(I));
10638
10639 // Things on the stack are pointer-typed, meaning that they are already
10640 // legal and can be emitted directly to target nodes.
10641 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
10642 Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
10643 } else {
10644 // Otherwise emit a target independent node to be legalised.
10645 Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
10646 }
10647 }
10648 }
10649
10650 /// Lower llvm.experimental.stackmap.
visitStackmap(const CallInst & CI)10651 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
10652 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
10653 // [live variables...])
10654
10655 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
10656
10657 SDValue Chain, InGlue, Callee;
10658 SmallVector<SDValue, 32> Ops;
10659
10660 SDLoc DL = getCurSDLoc();
10661 Callee = getValue(CI.getCalledOperand());
10662
10663 // The stackmap intrinsic only records the live variables (the arguments
10664 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
10665 // intrinsic, this won't be lowered to a function call. This means we don't
10666 // have to worry about calling conventions and target specific lowering code.
10667 // Instead we perform the call lowering right here.
10668 //
10669 // chain, flag = CALLSEQ_START(chain, 0, 0)
10670 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
10671 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
10672 //
10673 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
10674 InGlue = Chain.getValue(1);
10675
10676 // Add the STACKMAP operands, starting with DAG house-keeping.
10677 Ops.push_back(Chain);
10678 Ops.push_back(InGlue);
10679
10680 // Add the <id>, <numShadowBytes> operands.
10681 //
10682 // These do not require legalisation, and can be emitted directly to target
10683 // constant nodes.
10684 SDValue ID = getValue(CI.getArgOperand(0));
10685 assert(ID.getValueType() == MVT::i64);
10686 SDValue IDConst =
10687 DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
10688 Ops.push_back(IDConst);
10689
10690 SDValue Shad = getValue(CI.getArgOperand(1));
10691 assert(Shad.getValueType() == MVT::i32);
10692 SDValue ShadConst =
10693 DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
10694 Ops.push_back(ShadConst);
10695
10696 // Add the live variables.
10697 addStackMapLiveVars(CI, 2, DL, Ops, *this);
10698
10699 // Create the STACKMAP node.
10700 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10701 Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10702 InGlue = Chain.getValue(1);
10703
10704 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10705
10706 // Stackmaps don't generate values, so nothing goes into the NodeMap.
10707
10708 // Set the root to the target-lowered call chain.
10709 DAG.setRoot(Chain);
10710
10711 // Inform the Frame Information that we have a stackmap in this function.
10712 FuncInfo.MF->getFrameInfo().setHasStackMap();
10713 }
10714
10715 /// Lower llvm.experimental.patchpoint directly to its target opcode.
visitPatchpoint(const CallBase & CB,const BasicBlock * EHPadBB)10716 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10717 const BasicBlock *EHPadBB) {
10718 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
10719 // i32 <numBytes>,
10720 // i8* <target>,
10721 // i32 <numArgs>,
10722 // [Args...],
10723 // [live variables...])
10724
10725 CallingConv::ID CC = CB.getCallingConv();
10726 bool IsAnyRegCC = CC == CallingConv::AnyReg;
10727 bool HasDef = !CB.getType()->isVoidTy();
10728 SDLoc dl = getCurSDLoc();
10729 SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
10730
10731 // Handle immediate and symbolic callees.
10732 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10733 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10734 /*isTarget=*/true);
10735 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10736 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10737 SDLoc(SymbolicCallee),
10738 SymbolicCallee->getValueType(0));
10739
10740 // Get the real number of arguments participating in the call <numArgs>
10741 SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
10742 unsigned NumArgs = NArgVal->getAsZExtVal();
10743
10744 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10745 // Intrinsics include all meta-operands up to but not including CC.
10746 unsigned NumMetaOpers = PatchPointOpers::CCPos;
10747 assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10748 "Not enough arguments provided to the patchpoint intrinsic");
10749
10750 // For AnyRegCC the arguments are lowered later on manually.
10751 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10752 Type *ReturnTy =
10753 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10754
10755 TargetLowering::CallLoweringInfo CLI(DAG);
10756 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10757 ReturnTy, CB.getAttributes().getRetAttrs(), true);
10758 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10759
10760 SDNode *CallEnd = Result.second.getNode();
10761 if (CallEnd->getOpcode() == ISD::EH_LABEL)
10762 CallEnd = CallEnd->getOperand(0).getNode();
10763 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10764 CallEnd = CallEnd->getOperand(0).getNode();
10765
10766 /// Get a call instruction from the call sequence chain.
10767 /// Tail calls are not allowed.
10768 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10769 "Expected a callseq node.");
10770 SDNode *Call = CallEnd->getOperand(0).getNode();
10771 bool HasGlue = Call->getGluedNode();
10772
10773 // Replace the target specific call node with the patchable intrinsic.
10774 SmallVector<SDValue, 8> Ops;
10775
10776 // Push the chain.
10777 Ops.push_back(*(Call->op_begin()));
10778
10779 // Optionally, push the glue (if any).
10780 if (HasGlue)
10781 Ops.push_back(*(Call->op_end() - 1));
10782
10783 // Push the register mask info.
10784 if (HasGlue)
10785 Ops.push_back(*(Call->op_end() - 2));
10786 else
10787 Ops.push_back(*(Call->op_end() - 1));
10788
10789 // Add the <id> and <numBytes> constants.
10790 SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
10791 Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
10792 SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
10793 Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
10794
10795 // Add the callee.
10796 Ops.push_back(Callee);
10797
10798 // Adjust <numArgs> to account for any arguments that have been passed on the
10799 // stack instead.
10800 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10801 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10802 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10803 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10804
10805 // Add the calling convention
10806 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10807
10808 // Add the arguments we omitted previously. The register allocator should
10809 // place these in any free register.
10810 if (IsAnyRegCC)
10811 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10812 Ops.push_back(getValue(CB.getArgOperand(i)));
10813
10814 // Push the arguments from the call instruction.
10815 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10816 Ops.append(Call->op_begin() + 2, e);
10817
10818 // Push live variables for the stack map.
10819 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10820
10821 SDVTList NodeTys;
10822 if (IsAnyRegCC && HasDef) {
10823 // Create the return types based on the intrinsic definition
10824 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10825 SmallVector<EVT, 3> ValueVTs;
10826 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10827 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10828
10829 // There is always a chain and a glue type at the end
10830 ValueVTs.push_back(MVT::Other);
10831 ValueVTs.push_back(MVT::Glue);
10832 NodeTys = DAG.getVTList(ValueVTs);
10833 } else
10834 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10835
10836 // Replace the target specific call node with a PATCHPOINT node.
10837 SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10838
10839 // Update the NodeMap.
10840 if (HasDef) {
10841 if (IsAnyRegCC)
10842 setValue(&CB, SDValue(PPV.getNode(), 0));
10843 else
10844 setValue(&CB, Result.first);
10845 }
10846
10847 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10848 // call sequence. Furthermore the location of the chain and glue can change
10849 // when the AnyReg calling convention is used and the intrinsic returns a
10850 // value.
10851 if (IsAnyRegCC && HasDef) {
10852 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10853 SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10854 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10855 } else
10856 DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10857 DAG.DeleteNode(Call);
10858
10859 // Inform the Frame Information that we have a patchpoint in this function.
10860 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10861 }
10862
visitVectorReduce(const CallInst & I,unsigned Intrinsic)10863 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10864 unsigned Intrinsic) {
10865 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10866 SDValue Op1 = getValue(I.getArgOperand(0));
10867 SDValue Op2;
10868 if (I.arg_size() > 1)
10869 Op2 = getValue(I.getArgOperand(1));
10870 SDLoc dl = getCurSDLoc();
10871 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10872 SDValue Res;
10873 SDNodeFlags SDFlags;
10874 if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10875 SDFlags.copyFMF(*FPMO);
10876
10877 switch (Intrinsic) {
10878 case Intrinsic::vector_reduce_fadd:
10879 if (SDFlags.hasAllowReassociation())
10880 Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10881 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10882 SDFlags);
10883 else
10884 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10885 break;
10886 case Intrinsic::vector_reduce_fmul:
10887 if (SDFlags.hasAllowReassociation())
10888 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10889 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10890 SDFlags);
10891 else
10892 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10893 break;
10894 case Intrinsic::vector_reduce_add:
10895 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10896 break;
10897 case Intrinsic::vector_reduce_mul:
10898 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10899 break;
10900 case Intrinsic::vector_reduce_and:
10901 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10902 break;
10903 case Intrinsic::vector_reduce_or:
10904 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10905 break;
10906 case Intrinsic::vector_reduce_xor:
10907 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10908 break;
10909 case Intrinsic::vector_reduce_smax:
10910 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10911 break;
10912 case Intrinsic::vector_reduce_smin:
10913 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10914 break;
10915 case Intrinsic::vector_reduce_umax:
10916 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10917 break;
10918 case Intrinsic::vector_reduce_umin:
10919 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10920 break;
10921 case Intrinsic::vector_reduce_fmax:
10922 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10923 break;
10924 case Intrinsic::vector_reduce_fmin:
10925 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10926 break;
10927 case Intrinsic::vector_reduce_fmaximum:
10928 Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10929 break;
10930 case Intrinsic::vector_reduce_fminimum:
10931 Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10932 break;
10933 default:
10934 llvm_unreachable("Unhandled vector reduce intrinsic");
10935 }
10936 setValue(&I, Res);
10937 }
10938
10939 /// Returns an AttributeList representing the attributes applied to the return
10940 /// value of the given call.
getReturnAttrs(TargetLowering::CallLoweringInfo & CLI)10941 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10942 SmallVector<Attribute::AttrKind, 2> Attrs;
10943 if (CLI.RetSExt)
10944 Attrs.push_back(Attribute::SExt);
10945 if (CLI.RetZExt)
10946 Attrs.push_back(Attribute::ZExt);
10947 if (CLI.IsInReg)
10948 Attrs.push_back(Attribute::InReg);
10949
10950 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10951 Attrs);
10952 }
10953
10954 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10955 /// implementation, which just calls LowerCall.
10956 /// FIXME: When all targets are
10957 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10958 std::pair<SDValue, SDValue>
LowerCallTo(TargetLowering::CallLoweringInfo & CLI) const10959 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10960 // Handle the incoming return values from the call.
10961 CLI.Ins.clear();
10962 SmallVector<EVT, 4> RetTys;
10963 SmallVector<TypeSize, 4> Offsets;
10964 auto &DL = CLI.DAG.getDataLayout();
10965 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
10966
10967 if (CLI.IsPostTypeLegalization) {
10968 // If we are lowering a libcall after legalization, split the return type.
10969 SmallVector<EVT, 4> OldRetTys;
10970 SmallVector<TypeSize, 4> OldOffsets;
10971 RetTys.swap(OldRetTys);
10972 Offsets.swap(OldOffsets);
10973
10974 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10975 EVT RetVT = OldRetTys[i];
10976 uint64_t Offset = OldOffsets[i];
10977 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10978 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10979 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10980 RetTys.append(NumRegs, RegisterVT);
10981 for (unsigned j = 0; j != NumRegs; ++j)
10982 Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ));
10983 }
10984 }
10985
10986 SmallVector<ISD::OutputArg, 4> Outs;
10987 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10988
10989 bool CanLowerReturn =
10990 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10991 CLI.IsVarArg, Outs, CLI.RetTy->getContext(), CLI.RetTy);
10992
10993 SDValue DemoteStackSlot;
10994 int DemoteStackIdx = -100;
10995 if (!CanLowerReturn) {
10996 // FIXME: equivalent assert?
10997 // assert(!CS.hasInAllocaArgument() &&
10998 // "sret demotion is incompatible with inalloca");
10999 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
11000 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
11001 MachineFunction &MF = CLI.DAG.getMachineFunction();
11002 DemoteStackIdx =
11003 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
11004 Type *StackSlotPtrType =
11005 PointerType::get(CLI.RetTy->getContext(), DL.getAllocaAddrSpace());
11006
11007 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
11008 ArgListEntry Entry;
11009 Entry.Node = DemoteStackSlot;
11010 Entry.Ty = StackSlotPtrType;
11011 Entry.IsSExt = false;
11012 Entry.IsZExt = false;
11013 Entry.IsInReg = false;
11014 Entry.IsSRet = true;
11015 Entry.IsNest = false;
11016 Entry.IsByVal = false;
11017 Entry.IsByRef = false;
11018 Entry.IsReturned = false;
11019 Entry.IsSwiftSelf = false;
11020 Entry.IsSwiftAsync = false;
11021 Entry.IsSwiftError = false;
11022 Entry.IsCFGuardTarget = false;
11023 Entry.Alignment = Alignment;
11024 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
11025 CLI.NumFixedArgs += 1;
11026 CLI.getArgs()[0].IndirectType = CLI.RetTy;
11027 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
11028
11029 // sret demotion isn't compatible with tail-calls, since the sret argument
11030 // points into the callers stack frame.
11031 CLI.IsTailCall = false;
11032 } else {
11033 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11034 CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
11035 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
11036 ISD::ArgFlagsTy Flags;
11037 if (NeedsRegBlock) {
11038 Flags.setInConsecutiveRegs();
11039 if (I == RetTys.size() - 1)
11040 Flags.setInConsecutiveRegsLast();
11041 }
11042 EVT VT = RetTys[I];
11043 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11044 CLI.CallConv, VT);
11045 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11046 CLI.CallConv, VT);
11047 for (unsigned i = 0; i != NumRegs; ++i) {
11048 ISD::InputArg MyFlags;
11049 MyFlags.Flags = Flags;
11050 MyFlags.VT = RegisterVT;
11051 MyFlags.ArgVT = VT;
11052 MyFlags.Used = CLI.IsReturnValueUsed;
11053 if (CLI.RetTy->isPointerTy()) {
11054 MyFlags.Flags.setPointer();
11055 MyFlags.Flags.setPointerAddrSpace(
11056 cast<PointerType>(CLI.RetTy)->getAddressSpace());
11057 }
11058 if (CLI.RetSExt)
11059 MyFlags.Flags.setSExt();
11060 if (CLI.RetZExt)
11061 MyFlags.Flags.setZExt();
11062 if (CLI.IsInReg)
11063 MyFlags.Flags.setInReg();
11064 CLI.Ins.push_back(MyFlags);
11065 }
11066 }
11067 }
11068
11069 // We push in swifterror return as the last element of CLI.Ins.
11070 ArgListTy &Args = CLI.getArgs();
11071 if (supportSwiftError()) {
11072 for (const ArgListEntry &Arg : Args) {
11073 if (Arg.IsSwiftError) {
11074 ISD::InputArg MyFlags;
11075 MyFlags.VT = getPointerTy(DL);
11076 MyFlags.ArgVT = EVT(getPointerTy(DL));
11077 MyFlags.Flags.setSwiftError();
11078 CLI.Ins.push_back(MyFlags);
11079 }
11080 }
11081 }
11082
11083 // Handle all of the outgoing arguments.
11084 CLI.Outs.clear();
11085 CLI.OutVals.clear();
11086 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
11087 SmallVector<EVT, 4> ValueVTs;
11088 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
11089 // FIXME: Split arguments if CLI.IsPostTypeLegalization
11090 Type *FinalType = Args[i].Ty;
11091 if (Args[i].IsByVal)
11092 FinalType = Args[i].IndirectType;
11093 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11094 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
11095 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
11096 ++Value) {
11097 EVT VT = ValueVTs[Value];
11098 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
11099 SDValue Op = SDValue(Args[i].Node.getNode(),
11100 Args[i].Node.getResNo() + Value);
11101 ISD::ArgFlagsTy Flags;
11102
11103 // Certain targets (such as MIPS), may have a different ABI alignment
11104 // for a type depending on the context. Give the target a chance to
11105 // specify the alignment it wants.
11106 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
11107 Flags.setOrigAlign(OriginalAlignment);
11108
11109 if (Args[i].Ty->isPointerTy()) {
11110 Flags.setPointer();
11111 Flags.setPointerAddrSpace(
11112 cast<PointerType>(Args[i].Ty)->getAddressSpace());
11113 }
11114 if (Args[i].IsZExt)
11115 Flags.setZExt();
11116 if (Args[i].IsSExt)
11117 Flags.setSExt();
11118 if (Args[i].IsNoExt)
11119 Flags.setNoExt();
11120 if (Args[i].IsInReg) {
11121 // If we are using vectorcall calling convention, a structure that is
11122 // passed InReg - is surely an HVA
11123 if (CLI.CallConv == CallingConv::X86_VectorCall &&
11124 isa<StructType>(FinalType)) {
11125 // The first value of a structure is marked
11126 if (0 == Value)
11127 Flags.setHvaStart();
11128 Flags.setHva();
11129 }
11130 // Set InReg Flag
11131 Flags.setInReg();
11132 }
11133 if (Args[i].IsSRet)
11134 Flags.setSRet();
11135 if (Args[i].IsSwiftSelf)
11136 Flags.setSwiftSelf();
11137 if (Args[i].IsSwiftAsync)
11138 Flags.setSwiftAsync();
11139 if (Args[i].IsSwiftError)
11140 Flags.setSwiftError();
11141 if (Args[i].IsCFGuardTarget)
11142 Flags.setCFGuardTarget();
11143 if (Args[i].IsByVal)
11144 Flags.setByVal();
11145 if (Args[i].IsByRef)
11146 Flags.setByRef();
11147 if (Args[i].IsPreallocated) {
11148 Flags.setPreallocated();
11149 // Set the byval flag for CCAssignFn callbacks that don't know about
11150 // preallocated. This way we can know how many bytes we should've
11151 // allocated and how many bytes a callee cleanup function will pop. If
11152 // we port preallocated to more targets, we'll have to add custom
11153 // preallocated handling in the various CC lowering callbacks.
11154 Flags.setByVal();
11155 }
11156 if (Args[i].IsInAlloca) {
11157 Flags.setInAlloca();
11158 // Set the byval flag for CCAssignFn callbacks that don't know about
11159 // inalloca. This way we can know how many bytes we should've allocated
11160 // and how many bytes a callee cleanup function will pop. If we port
11161 // inalloca to more targets, we'll have to add custom inalloca handling
11162 // in the various CC lowering callbacks.
11163 Flags.setByVal();
11164 }
11165 Align MemAlign;
11166 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11167 unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
11168 Flags.setByValSize(FrameSize);
11169
11170 // info is not there but there are cases it cannot get right.
11171 if (auto MA = Args[i].Alignment)
11172 MemAlign = *MA;
11173 else
11174 MemAlign = getByValTypeAlignment(Args[i].IndirectType, DL);
11175 } else if (auto MA = Args[i].Alignment) {
11176 MemAlign = *MA;
11177 } else {
11178 MemAlign = OriginalAlignment;
11179 }
11180 Flags.setMemAlign(MemAlign);
11181 if (Args[i].IsNest)
11182 Flags.setNest();
11183 if (NeedsRegBlock)
11184 Flags.setInConsecutiveRegs();
11185
11186 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11187 CLI.CallConv, VT);
11188 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11189 CLI.CallConv, VT);
11190 SmallVector<SDValue, 4> Parts(NumParts);
11191 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
11192
11193 if (Args[i].IsSExt)
11194 ExtendKind = ISD::SIGN_EXTEND;
11195 else if (Args[i].IsZExt)
11196 ExtendKind = ISD::ZERO_EXTEND;
11197
11198 // Conservatively only handle 'returned' on non-vectors that can be lowered,
11199 // for now.
11200 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
11201 CanLowerReturn) {
11202 assert((CLI.RetTy == Args[i].Ty ||
11203 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
11204 CLI.RetTy->getPointerAddressSpace() ==
11205 Args[i].Ty->getPointerAddressSpace())) &&
11206 RetTys.size() == NumValues && "unexpected use of 'returned'");
11207 // Before passing 'returned' to the target lowering code, ensure that
11208 // either the register MVT and the actual EVT are the same size or that
11209 // the return value and argument are extended in the same way; in these
11210 // cases it's safe to pass the argument register value unchanged as the
11211 // return register value (although it's at the target's option whether
11212 // to do so)
11213 // TODO: allow code generation to take advantage of partially preserved
11214 // registers rather than clobbering the entire register when the
11215 // parameter extension method is not compatible with the return
11216 // extension method
11217 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
11218 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
11219 CLI.RetZExt == Args[i].IsZExt))
11220 Flags.setReturned();
11221 }
11222
11223 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
11224 CLI.CallConv, ExtendKind);
11225
11226 for (unsigned j = 0; j != NumParts; ++j) {
11227 // if it isn't first piece, alignment must be 1
11228 // For scalable vectors the scalable part is currently handled
11229 // by individual targets, so we just use the known minimum size here.
11230 ISD::OutputArg MyFlags(
11231 Flags, Parts[j].getValueType().getSimpleVT(), VT,
11232 i < CLI.NumFixedArgs, i,
11233 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
11234 if (NumParts > 1 && j == 0)
11235 MyFlags.Flags.setSplit();
11236 else if (j != 0) {
11237 MyFlags.Flags.setOrigAlign(Align(1));
11238 if (j == NumParts - 1)
11239 MyFlags.Flags.setSplitEnd();
11240 }
11241
11242 CLI.Outs.push_back(MyFlags);
11243 CLI.OutVals.push_back(Parts[j]);
11244 }
11245
11246 if (NeedsRegBlock && Value == NumValues - 1)
11247 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11248 }
11249 }
11250
11251 SmallVector<SDValue, 4> InVals;
11252 CLI.Chain = LowerCall(CLI, InVals);
11253
11254 // Update CLI.InVals to use outside of this function.
11255 CLI.InVals = InVals;
11256
11257 // Verify that the target's LowerCall behaved as expected.
11258 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
11259 "LowerCall didn't return a valid chain!");
11260 assert((!CLI.IsTailCall || InVals.empty()) &&
11261 "LowerCall emitted a return value for a tail call!");
11262 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
11263 "LowerCall didn't emit the correct number of values!");
11264
11265 // For a tail call, the return value is merely live-out and there aren't
11266 // any nodes in the DAG representing it. Return a special value to
11267 // indicate that a tail call has been emitted and no more Instructions
11268 // should be processed in the current block.
11269 if (CLI.IsTailCall) {
11270 CLI.DAG.setRoot(CLI.Chain);
11271 return std::make_pair(SDValue(), SDValue());
11272 }
11273
11274 #ifndef NDEBUG
11275 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
11276 assert(InVals[i].getNode() && "LowerCall emitted a null value!");
11277 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
11278 "LowerCall emitted a value with the wrong type!");
11279 }
11280 #endif
11281
11282 SmallVector<SDValue, 4> ReturnValues;
11283 if (!CanLowerReturn) {
11284 // The instruction result is the result of loading from the
11285 // hidden sret parameter.
11286 MVT PtrVT = getPointerTy(DL, DL.getAllocaAddrSpace());
11287
11288 unsigned NumValues = RetTys.size();
11289 ReturnValues.resize(NumValues);
11290 SmallVector<SDValue, 4> Chains(NumValues);
11291
11292 // An aggregate return value cannot wrap around the address space, so
11293 // offsets to its parts don't wrap either.
11294 MachineFunction &MF = CLI.DAG.getMachineFunction();
11295 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
11296 for (unsigned i = 0; i < NumValues; ++i) {
11297 SDValue Add = CLI.DAG.getMemBasePlusOffset(
11298 DemoteStackSlot, CLI.DAG.getConstant(Offsets[i], CLI.DL, PtrVT),
11299 CLI.DL, SDNodeFlags::NoUnsignedWrap);
11300 SDValue L = CLI.DAG.getLoad(
11301 RetTys[i], CLI.DL, CLI.Chain, Add,
11302 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
11303 DemoteStackIdx, Offsets[i]),
11304 HiddenSRetAlign);
11305 ReturnValues[i] = L;
11306 Chains[i] = L.getValue(1);
11307 }
11308
11309 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
11310 } else {
11311 // Collect the legal value parts into potentially illegal values
11312 // that correspond to the original function's return values.
11313 std::optional<ISD::NodeType> AssertOp;
11314 if (CLI.RetSExt)
11315 AssertOp = ISD::AssertSext;
11316 else if (CLI.RetZExt)
11317 AssertOp = ISD::AssertZext;
11318 unsigned CurReg = 0;
11319 for (EVT VT : RetTys) {
11320 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11321 CLI.CallConv, VT);
11322 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11323 CLI.CallConv, VT);
11324
11325 ReturnValues.push_back(getCopyFromParts(
11326 CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr,
11327 CLI.Chain, CLI.CallConv, AssertOp));
11328 CurReg += NumRegs;
11329 }
11330
11331 // For a function returning void, there is no return value. We can't create
11332 // such a node, so we just return a null return value in that case. In
11333 // that case, nothing will actually look at the value.
11334 if (ReturnValues.empty())
11335 return std::make_pair(SDValue(), CLI.Chain);
11336 }
11337
11338 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
11339 CLI.DAG.getVTList(RetTys), ReturnValues);
11340 return std::make_pair(Res, CLI.Chain);
11341 }
11342
11343 /// Places new result values for the node in Results (their number
11344 /// and types must exactly match those of the original return values of
11345 /// the node), or leaves Results empty, which indicates that the node is not
11346 /// to be custom lowered after all.
LowerOperationWrapper(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const11347 void TargetLowering::LowerOperationWrapper(SDNode *N,
11348 SmallVectorImpl<SDValue> &Results,
11349 SelectionDAG &DAG) const {
11350 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
11351
11352 if (!Res.getNode())
11353 return;
11354
11355 // If the original node has one result, take the return value from
11356 // LowerOperation as is. It might not be result number 0.
11357 if (N->getNumValues() == 1) {
11358 Results.push_back(Res);
11359 return;
11360 }
11361
11362 // If the original node has multiple results, then the return node should
11363 // have the same number of results.
11364 assert((N->getNumValues() == Res->getNumValues()) &&
11365 "Lowering returned the wrong number of results!");
11366
11367 // Places new result values base on N result number.
11368 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
11369 Results.push_back(Res.getValue(I));
11370 }
11371
LowerOperation(SDValue Op,SelectionDAG & DAG) const11372 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11373 llvm_unreachable("LowerOperation not implemented for this target!");
11374 }
11375
CopyValueToVirtualRegister(const Value * V,Register Reg,ISD::NodeType ExtendType)11376 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
11377 Register Reg,
11378 ISD::NodeType ExtendType) {
11379 SDValue Op = getNonRegisterValue(V);
11380 assert((Op.getOpcode() != ISD::CopyFromReg ||
11381 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
11382 "Copy from a reg to the same reg!");
11383 assert(!Reg.isPhysical() && "Is a physreg");
11384
11385 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11386 // If this is an InlineAsm we have to match the registers required, not the
11387 // notional registers required by the type.
11388
11389 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
11390 std::nullopt); // This is not an ABI copy.
11391 SDValue Chain = DAG.getEntryNode();
11392
11393 if (ExtendType == ISD::ANY_EXTEND) {
11394 auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
11395 if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
11396 ExtendType = PreferredExtendIt->second;
11397 }
11398 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
11399 PendingExports.push_back(Chain);
11400 }
11401
11402 #include "llvm/CodeGen/SelectionDAGISel.h"
11403
11404 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
11405 /// entry block, return true. This includes arguments used by switches, since
11406 /// the switch may expand into multiple basic blocks.
isOnlyUsedInEntryBlock(const Argument * A,bool FastISel)11407 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
11408 // With FastISel active, we may be splitting blocks, so force creation
11409 // of virtual registers for all non-dead arguments.
11410 if (FastISel)
11411 return A->use_empty();
11412
11413 const BasicBlock &Entry = A->getParent()->front();
11414 for (const User *U : A->users())
11415 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
11416 return false; // Use not in entry block.
11417
11418 return true;
11419 }
11420
11421 using ArgCopyElisionMapTy =
11422 DenseMap<const Argument *,
11423 std::pair<const AllocaInst *, const StoreInst *>>;
11424
11425 /// Scan the entry block of the function in FuncInfo for arguments that look
11426 /// like copies into a local alloca. Record any copied arguments in
11427 /// ArgCopyElisionCandidates.
11428 static void
findArgumentCopyElisionCandidates(const DataLayout & DL,FunctionLoweringInfo * FuncInfo,ArgCopyElisionMapTy & ArgCopyElisionCandidates)11429 findArgumentCopyElisionCandidates(const DataLayout &DL,
11430 FunctionLoweringInfo *FuncInfo,
11431 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
11432 // Record the state of every static alloca used in the entry block. Argument
11433 // allocas are all used in the entry block, so we need approximately as many
11434 // entries as we have arguments.
11435 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
11436 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
11437 unsigned NumArgs = FuncInfo->Fn->arg_size();
11438 StaticAllocas.reserve(NumArgs * 2);
11439
11440 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
11441 if (!V)
11442 return nullptr;
11443 V = V->stripPointerCasts();
11444 const auto *AI = dyn_cast<AllocaInst>(V);
11445 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
11446 return nullptr;
11447 auto Iter = StaticAllocas.insert({AI, Unknown});
11448 return &Iter.first->second;
11449 };
11450
11451 // Look for stores of arguments to static allocas. Look through bitcasts and
11452 // GEPs to handle type coercions, as long as the alloca is fully initialized
11453 // by the store. Any non-store use of an alloca escapes it and any subsequent
11454 // unanalyzed store might write it.
11455 // FIXME: Handle structs initialized with multiple stores.
11456 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
11457 // Look for stores, and handle non-store uses conservatively.
11458 const auto *SI = dyn_cast<StoreInst>(&I);
11459 if (!SI) {
11460 // We will look through cast uses, so ignore them completely.
11461 if (I.isCast())
11462 continue;
11463 // Ignore debug info and pseudo op intrinsics, they don't escape or store
11464 // to allocas.
11465 if (I.isDebugOrPseudoInst())
11466 continue;
11467 // This is an unknown instruction. Assume it escapes or writes to all
11468 // static alloca operands.
11469 for (const Use &U : I.operands()) {
11470 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
11471 *Info = StaticAllocaInfo::Clobbered;
11472 }
11473 continue;
11474 }
11475
11476 // If the stored value is a static alloca, mark it as escaped.
11477 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11478 *Info = StaticAllocaInfo::Clobbered;
11479
11480 // Check if the destination is a static alloca.
11481 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11482 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
11483 if (!Info)
11484 continue;
11485 const AllocaInst *AI = cast<AllocaInst>(Dst);
11486
11487 // Skip allocas that have been initialized or clobbered.
11488 if (*Info != StaticAllocaInfo::Unknown)
11489 continue;
11490
11491 // Check if the stored value is an argument, and that this store fully
11492 // initializes the alloca.
11493 // If the argument type has padding bits we can't directly forward a pointer
11494 // as the upper bits may contain garbage.
11495 // Don't elide copies from the same argument twice.
11496 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11497 const auto *Arg = dyn_cast<Argument>(Val);
11498 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11499 Arg->getType()->isEmptyTy() ||
11500 DL.getTypeStoreSize(Arg->getType()) !=
11501 DL.getTypeAllocSize(AI->getAllocatedType()) ||
11502 !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11503 ArgCopyElisionCandidates.count(Arg)) {
11504 *Info = StaticAllocaInfo::Clobbered;
11505 continue;
11506 }
11507
11508 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
11509 << '\n');
11510
11511 // Mark this alloca and store for argument copy elision.
11512 *Info = StaticAllocaInfo::Elidable;
11513 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
11514
11515 // Stop scanning if we've seen all arguments. This will happen early in -O0
11516 // builds, which is useful, because -O0 builds have large entry blocks and
11517 // many allocas.
11518 if (ArgCopyElisionCandidates.size() == NumArgs)
11519 break;
11520 }
11521 }
11522
11523 /// Try to elide argument copies from memory into a local alloca. Succeeds if
11524 /// ArgVal is a load from a suitable fixed stack object.
tryToElideArgumentCopy(FunctionLoweringInfo & FuncInfo,SmallVectorImpl<SDValue> & Chains,DenseMap<int,int> & ArgCopyElisionFrameIndexMap,SmallPtrSetImpl<const Instruction * > & ElidedArgCopyInstrs,ArgCopyElisionMapTy & ArgCopyElisionCandidates,const Argument & Arg,ArrayRef<SDValue> ArgVals,bool & ArgHasUses)11525 static void tryToElideArgumentCopy(
11526 FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
11527 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
11528 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
11529 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
11530 ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
11531 // Check if this is a load from a fixed stack object.
11532 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11533 if (!LNode)
11534 return;
11535 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11536 if (!FINode)
11537 return;
11538
11539 // Check that the fixed stack object is the right size and alignment.
11540 // Look at the alignment that the user wrote on the alloca instead of looking
11541 // at the stack object.
11542 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11543 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11544 const AllocaInst *AI = ArgCopyIter->second.first;
11545 int FixedIndex = FINode->getIndex();
11546 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
11547 int OldIndex = AllocaIndex;
11548 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
11549 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
11550 LLVM_DEBUG(
11551 dbgs() << " argument copy elision failed due to bad fixed stack "
11552 "object size\n");
11553 return;
11554 }
11555 Align RequiredAlignment = AI->getAlign();
11556 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
11557 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
11558 "greater than stack argument alignment ("
11559 << DebugStr(RequiredAlignment) << " vs "
11560 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
11561 return;
11562 }
11563
11564 // Perform the elision. Delete the old stack object and replace its only use
11565 // in the variable info map. Mark the stack object as mutable and aliased.
11566 LLVM_DEBUG({
11567 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
11568 << " Replacing frame index " << OldIndex << " with " << FixedIndex
11569 << '\n';
11570 });
11571 MFI.RemoveStackObject(OldIndex);
11572 MFI.setIsImmutableObjectIndex(FixedIndex, false);
11573 MFI.setIsAliasedObjectIndex(FixedIndex, true);
11574 AllocaIndex = FixedIndex;
11575 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
11576 for (SDValue ArgVal : ArgVals)
11577 Chains.push_back(ArgVal.getValue(1));
11578
11579 // Avoid emitting code for the store implementing the copy.
11580 const StoreInst *SI = ArgCopyIter->second.second;
11581 ElidedArgCopyInstrs.insert(SI);
11582
11583 // Check for uses of the argument again so that we can avoid exporting ArgVal
11584 // if it is't used by anything other than the store.
11585 for (const Value *U : Arg.users()) {
11586 if (U != SI) {
11587 ArgHasUses = true;
11588 break;
11589 }
11590 }
11591 }
11592
LowerArguments(const Function & F)11593 void SelectionDAGISel::LowerArguments(const Function &F) {
11594 SelectionDAG &DAG = SDB->DAG;
11595 SDLoc dl = SDB->getCurSDLoc();
11596 const DataLayout &DL = DAG.getDataLayout();
11597 SmallVector<ISD::InputArg, 16> Ins;
11598
11599 // In Naked functions we aren't going to save any registers.
11600 if (F.hasFnAttribute(Attribute::Naked))
11601 return;
11602
11603 if (!FuncInfo->CanLowerReturn) {
11604 // Put in an sret pointer parameter before all the other parameters.
11605 MVT ValueVT = TLI->getPointerTy(DL, DL.getAllocaAddrSpace());
11606
11607 ISD::ArgFlagsTy Flags;
11608 Flags.setSRet();
11609 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVT);
11610 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, true,
11611 ISD::InputArg::NoArgIndex, 0);
11612 Ins.push_back(RetArg);
11613 }
11614
11615 // Look for stores of arguments to static allocas. Mark such arguments with a
11616 // flag to ask the target to give us the memory location of that argument if
11617 // available.
11618 ArgCopyElisionMapTy ArgCopyElisionCandidates;
11619 findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
11620 ArgCopyElisionCandidates);
11621
11622 // Set up the incoming argument description vector.
11623 for (const Argument &Arg : F.args()) {
11624 unsigned ArgNo = Arg.getArgNo();
11625 SmallVector<EVT, 4> ValueVTs;
11626 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11627 bool isArgValueUsed = !Arg.use_empty();
11628 unsigned PartBase = 0;
11629 Type *FinalType = Arg.getType();
11630 if (Arg.hasAttribute(Attribute::ByVal))
11631 FinalType = Arg.getParamByValType();
11632 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
11633 FinalType, F.getCallingConv(), F.isVarArg(), DL);
11634 for (unsigned Value = 0, NumValues = ValueVTs.size();
11635 Value != NumValues; ++Value) {
11636 EVT VT = ValueVTs[Value];
11637 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
11638 ISD::ArgFlagsTy Flags;
11639
11640
11641 if (Arg.getType()->isPointerTy()) {
11642 Flags.setPointer();
11643 Flags.setPointerAddrSpace(
11644 cast<PointerType>(Arg.getType())->getAddressSpace());
11645 }
11646 if (Arg.hasAttribute(Attribute::ZExt))
11647 Flags.setZExt();
11648 if (Arg.hasAttribute(Attribute::SExt))
11649 Flags.setSExt();
11650 if (Arg.hasAttribute(Attribute::InReg)) {
11651 // If we are using vectorcall calling convention, a structure that is
11652 // passed InReg - is surely an HVA
11653 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
11654 isa<StructType>(Arg.getType())) {
11655 // The first value of a structure is marked
11656 if (0 == Value)
11657 Flags.setHvaStart();
11658 Flags.setHva();
11659 }
11660 // Set InReg Flag
11661 Flags.setInReg();
11662 }
11663 if (Arg.hasAttribute(Attribute::StructRet))
11664 Flags.setSRet();
11665 if (Arg.hasAttribute(Attribute::SwiftSelf))
11666 Flags.setSwiftSelf();
11667 if (Arg.hasAttribute(Attribute::SwiftAsync))
11668 Flags.setSwiftAsync();
11669 if (Arg.hasAttribute(Attribute::SwiftError))
11670 Flags.setSwiftError();
11671 if (Arg.hasAttribute(Attribute::ByVal))
11672 Flags.setByVal();
11673 if (Arg.hasAttribute(Attribute::ByRef))
11674 Flags.setByRef();
11675 if (Arg.hasAttribute(Attribute::InAlloca)) {
11676 Flags.setInAlloca();
11677 // Set the byval flag for CCAssignFn callbacks that don't know about
11678 // inalloca. This way we can know how many bytes we should've allocated
11679 // and how many bytes a callee cleanup function will pop. If we port
11680 // inalloca to more targets, we'll have to add custom inalloca handling
11681 // in the various CC lowering callbacks.
11682 Flags.setByVal();
11683 }
11684 if (Arg.hasAttribute(Attribute::Preallocated)) {
11685 Flags.setPreallocated();
11686 // Set the byval flag for CCAssignFn callbacks that don't know about
11687 // preallocated. This way we can know how many bytes we should've
11688 // allocated and how many bytes a callee cleanup function will pop. If
11689 // we port preallocated to more targets, we'll have to add custom
11690 // preallocated handling in the various CC lowering callbacks.
11691 Flags.setByVal();
11692 }
11693
11694 // Certain targets (such as MIPS), may have a different ABI alignment
11695 // for a type depending on the context. Give the target a chance to
11696 // specify the alignment it wants.
11697 const Align OriginalAlignment(
11698 TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11699 Flags.setOrigAlign(OriginalAlignment);
11700
11701 Align MemAlign;
11702 Type *ArgMemTy = nullptr;
11703 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11704 Flags.isByRef()) {
11705 if (!ArgMemTy)
11706 ArgMemTy = Arg.getPointeeInMemoryValueType();
11707
11708 uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11709
11710 // For in-memory arguments, size and alignment should be passed from FE.
11711 // BE will guess if this info is not there but there are cases it cannot
11712 // get right.
11713 if (auto ParamAlign = Arg.getParamStackAlign())
11714 MemAlign = *ParamAlign;
11715 else if ((ParamAlign = Arg.getParamAlign()))
11716 MemAlign = *ParamAlign;
11717 else
11718 MemAlign = TLI->getByValTypeAlignment(ArgMemTy, DL);
11719 if (Flags.isByRef())
11720 Flags.setByRefSize(MemSize);
11721 else
11722 Flags.setByValSize(MemSize);
11723 } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11724 MemAlign = *ParamAlign;
11725 } else {
11726 MemAlign = OriginalAlignment;
11727 }
11728 Flags.setMemAlign(MemAlign);
11729
11730 if (Arg.hasAttribute(Attribute::Nest))
11731 Flags.setNest();
11732 if (NeedsRegBlock)
11733 Flags.setInConsecutiveRegs();
11734 if (ArgCopyElisionCandidates.count(&Arg))
11735 Flags.setCopyElisionCandidate();
11736 if (Arg.hasAttribute(Attribute::Returned))
11737 Flags.setReturned();
11738
11739 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11740 *CurDAG->getContext(), F.getCallingConv(), VT);
11741 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11742 *CurDAG->getContext(), F.getCallingConv(), VT);
11743 for (unsigned i = 0; i != NumRegs; ++i) {
11744 // For scalable vectors, use the minimum size; individual targets
11745 // are responsible for handling scalable vector arguments and
11746 // return values.
11747 ISD::InputArg MyFlags(
11748 Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11749 PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11750 if (NumRegs > 1 && i == 0)
11751 MyFlags.Flags.setSplit();
11752 // if it isn't first piece, alignment must be 1
11753 else if (i > 0) {
11754 MyFlags.Flags.setOrigAlign(Align(1));
11755 if (i == NumRegs - 1)
11756 MyFlags.Flags.setSplitEnd();
11757 }
11758 Ins.push_back(MyFlags);
11759 }
11760 if (NeedsRegBlock && Value == NumValues - 1)
11761 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11762 PartBase += VT.getStoreSize().getKnownMinValue();
11763 }
11764 }
11765
11766 // Call the target to set up the argument values.
11767 SmallVector<SDValue, 8> InVals;
11768 SDValue NewRoot = TLI->LowerFormalArguments(
11769 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11770
11771 // Verify that the target's LowerFormalArguments behaved as expected.
11772 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11773 "LowerFormalArguments didn't return a valid chain!");
11774 assert(InVals.size() == Ins.size() &&
11775 "LowerFormalArguments didn't emit the correct number of values!");
11776 LLVM_DEBUG({
11777 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11778 assert(InVals[i].getNode() &&
11779 "LowerFormalArguments emitted a null value!");
11780 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11781 "LowerFormalArguments emitted a value with the wrong type!");
11782 }
11783 });
11784
11785 // Update the DAG with the new chain value resulting from argument lowering.
11786 DAG.setRoot(NewRoot);
11787
11788 // Set up the argument values.
11789 unsigned i = 0;
11790 if (!FuncInfo->CanLowerReturn) {
11791 // Create a virtual register for the sret pointer, and put in a copy
11792 // from the sret argument into it.
11793 MVT VT = TLI->getPointerTy(DL, DL.getAllocaAddrSpace());
11794 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11795 std::optional<ISD::NodeType> AssertOp;
11796 SDValue ArgValue =
11797 getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot,
11798 F.getCallingConv(), AssertOp);
11799
11800 MachineFunction& MF = SDB->DAG.getMachineFunction();
11801 MachineRegisterInfo& RegInfo = MF.getRegInfo();
11802 Register SRetReg =
11803 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11804 FuncInfo->DemoteRegister = SRetReg;
11805 NewRoot =
11806 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11807 DAG.setRoot(NewRoot);
11808
11809 // i indexes lowered arguments. Bump it past the hidden sret argument.
11810 ++i;
11811 }
11812
11813 SmallVector<SDValue, 4> Chains;
11814 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11815 for (const Argument &Arg : F.args()) {
11816 SmallVector<SDValue, 4> ArgValues;
11817 SmallVector<EVT, 4> ValueVTs;
11818 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11819 unsigned NumValues = ValueVTs.size();
11820 if (NumValues == 0)
11821 continue;
11822
11823 bool ArgHasUses = !Arg.use_empty();
11824
11825 // Elide the copying store if the target loaded this argument from a
11826 // suitable fixed stack object.
11827 if (Ins[i].Flags.isCopyElisionCandidate()) {
11828 unsigned NumParts = 0;
11829 for (EVT VT : ValueVTs)
11830 NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11831 F.getCallingConv(), VT);
11832
11833 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11834 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11835 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11836 }
11837
11838 // If this argument is unused then remember its value. It is used to generate
11839 // debugging information.
11840 bool isSwiftErrorArg =
11841 TLI->supportSwiftError() &&
11842 Arg.hasAttribute(Attribute::SwiftError);
11843 if (!ArgHasUses && !isSwiftErrorArg) {
11844 SDB->setUnusedArgValue(&Arg, InVals[i]);
11845
11846 // Also remember any frame index for use in FastISel.
11847 if (FrameIndexSDNode *FI =
11848 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11849 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11850 }
11851
11852 for (unsigned Val = 0; Val != NumValues; ++Val) {
11853 EVT VT = ValueVTs[Val];
11854 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11855 F.getCallingConv(), VT);
11856 unsigned NumParts = TLI->getNumRegistersForCallingConv(
11857 *CurDAG->getContext(), F.getCallingConv(), VT);
11858
11859 // Even an apparent 'unused' swifterror argument needs to be returned. So
11860 // we do generate a copy for it that can be used on return from the
11861 // function.
11862 if (ArgHasUses || isSwiftErrorArg) {
11863 std::optional<ISD::NodeType> AssertOp;
11864 if (Arg.hasAttribute(Attribute::SExt))
11865 AssertOp = ISD::AssertSext;
11866 else if (Arg.hasAttribute(Attribute::ZExt))
11867 AssertOp = ISD::AssertZext;
11868
11869 SDValue OutVal =
11870 getCopyFromParts(DAG, dl, &InVals[i], NumParts, PartVT, VT, nullptr,
11871 NewRoot, F.getCallingConv(), AssertOp);
11872
11873 FPClassTest NoFPClass = Arg.getNoFPClass();
11874 if (NoFPClass != fcNone) {
11875 SDValue SDNoFPClass = DAG.getTargetConstant(
11876 static_cast<uint64_t>(NoFPClass), dl, MVT::i32);
11877 OutVal = DAG.getNode(ISD::AssertNoFPClass, dl, OutVal.getValueType(),
11878 OutVal, SDNoFPClass);
11879 }
11880 ArgValues.push_back(OutVal);
11881 }
11882
11883 i += NumParts;
11884 }
11885
11886 // We don't need to do anything else for unused arguments.
11887 if (ArgValues.empty())
11888 continue;
11889
11890 // Note down frame index.
11891 if (FrameIndexSDNode *FI =
11892 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11893 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11894
11895 SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11896 SDB->getCurSDLoc());
11897
11898 SDB->setValue(&Arg, Res);
11899 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11900 // We want to associate the argument with the frame index, among
11901 // involved operands, that correspond to the lowest address. The
11902 // getCopyFromParts function, called earlier, is swapping the order of
11903 // the operands to BUILD_PAIR depending on endianness. The result of
11904 // that swapping is that the least significant bits of the argument will
11905 // be in the first operand of the BUILD_PAIR node, and the most
11906 // significant bits will be in the second operand.
11907 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11908 if (LoadSDNode *LNode =
11909 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11910 if (FrameIndexSDNode *FI =
11911 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11912 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11913 }
11914
11915 // Analyses past this point are naive and don't expect an assertion.
11916 if (Res.getOpcode() == ISD::AssertZext)
11917 Res = Res.getOperand(0);
11918
11919 // Update the SwiftErrorVRegDefMap.
11920 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11921 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11922 if (Reg.isVirtual())
11923 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11924 Reg);
11925 }
11926
11927 // If this argument is live outside of the entry block, insert a copy from
11928 // wherever we got it to the vreg that other BB's will reference it as.
11929 if (Res.getOpcode() == ISD::CopyFromReg) {
11930 // If we can, though, try to skip creating an unnecessary vreg.
11931 // FIXME: This isn't very clean... it would be nice to make this more
11932 // general.
11933 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11934 if (Reg.isVirtual()) {
11935 FuncInfo->ValueMap[&Arg] = Reg;
11936 continue;
11937 }
11938 }
11939 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11940 FuncInfo->InitializeRegForValue(&Arg);
11941 SDB->CopyToExportRegsIfNeeded(&Arg);
11942 }
11943 }
11944
11945 if (!Chains.empty()) {
11946 Chains.push_back(NewRoot);
11947 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11948 }
11949
11950 DAG.setRoot(NewRoot);
11951
11952 assert(i == InVals.size() && "Argument register count mismatch!");
11953
11954 // If any argument copy elisions occurred and we have debug info, update the
11955 // stale frame indices used in the dbg.declare variable info table.
11956 if (!ArgCopyElisionFrameIndexMap.empty()) {
11957 for (MachineFunction::VariableDbgInfo &VI :
11958 MF->getInStackSlotVariableDbgInfo()) {
11959 auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11960 if (I != ArgCopyElisionFrameIndexMap.end())
11961 VI.updateStackSlot(I->second);
11962 }
11963 }
11964
11965 // Finally, if the target has anything special to do, allow it to do so.
11966 emitFunctionEntryCode();
11967 }
11968
11969 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
11970 /// ensure constants are generated when needed. Remember the virtual registers
11971 /// that need to be added to the Machine PHI nodes as input. We cannot just
11972 /// directly add them, because expansion might result in multiple MBB's for one
11973 /// BB. As such, the start of the BB might correspond to a different MBB than
11974 /// the end.
11975 void
HandlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)11976 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11977 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11978
11979 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11980
11981 // Check PHI nodes in successors that expect a value to be available from this
11982 // block.
11983 for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11984 if (!isa<PHINode>(SuccBB->begin())) continue;
11985 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
11986
11987 // If this terminator has multiple identical successors (common for
11988 // switches), only handle each succ once.
11989 if (!SuccsHandled.insert(SuccMBB).second)
11990 continue;
11991
11992 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11993
11994 // At this point we know that there is a 1-1 correspondence between LLVM PHI
11995 // nodes and Machine PHI nodes, but the incoming operands have not been
11996 // emitted yet.
11997 for (const PHINode &PN : SuccBB->phis()) {
11998 // Ignore dead phi's.
11999 if (PN.use_empty())
12000 continue;
12001
12002 // Skip empty types
12003 if (PN.getType()->isEmptyTy())
12004 continue;
12005
12006 Register Reg;
12007 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12008
12009 if (const auto *C = dyn_cast<Constant>(PHIOp)) {
12010 Register &RegOut = ConstantsOut[C];
12011 if (!RegOut) {
12012 RegOut = FuncInfo.CreateRegs(&PN);
12013 // We need to zero/sign extend ConstantInt phi operands to match
12014 // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
12015 ISD::NodeType ExtendType = ISD::ANY_EXTEND;
12016 if (auto *CI = dyn_cast<ConstantInt>(C))
12017 ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
12018 : ISD::ZERO_EXTEND;
12019 CopyValueToVirtualRegister(C, RegOut, ExtendType);
12020 }
12021 Reg = RegOut;
12022 } else {
12023 DenseMap<const Value *, Register>::iterator I =
12024 FuncInfo.ValueMap.find(PHIOp);
12025 if (I != FuncInfo.ValueMap.end())
12026 Reg = I->second;
12027 else {
12028 assert(isa<AllocaInst>(PHIOp) &&
12029 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
12030 "Didn't codegen value into a register!??");
12031 Reg = FuncInfo.CreateRegs(&PN);
12032 CopyValueToVirtualRegister(PHIOp, Reg);
12033 }
12034 }
12035
12036 // Remember that this register needs to added to the machine PHI node as
12037 // the input for this MBB.
12038 SmallVector<EVT, 4> ValueVTs;
12039 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
12040 for (EVT VT : ValueVTs) {
12041 const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
12042 for (unsigned i = 0; i != NumRegisters; ++i)
12043 FuncInfo.PHINodesToUpdate.emplace_back(&*MBBI++, Reg + i);
12044 Reg += NumRegisters;
12045 }
12046 }
12047 }
12048
12049 ConstantsOut.clear();
12050 }
12051
NextBlock(MachineBasicBlock * MBB)12052 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
12053 MachineFunction::iterator I(MBB);
12054 if (++I == FuncInfo.MF->end())
12055 return nullptr;
12056 return &*I;
12057 }
12058
12059 /// During lowering new call nodes can be created (such as memset, etc.).
12060 /// Those will become new roots of the current DAG, but complications arise
12061 /// when they are tail calls. In such cases, the call lowering will update
12062 /// the root, but the builder still needs to know that a tail call has been
12063 /// lowered in order to avoid generating an additional return.
updateDAGForMaybeTailCall(SDValue MaybeTC)12064 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
12065 // If the node is null, we do have a tail call.
12066 if (MaybeTC.getNode() != nullptr)
12067 DAG.setRoot(MaybeTC);
12068 else
12069 HasTailCall = true;
12070 }
12071
lowerWorkItem(SwitchWorkListItem W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineBasicBlock * DefaultMBB)12072 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
12073 MachineBasicBlock *SwitchMBB,
12074 MachineBasicBlock *DefaultMBB) {
12075 MachineFunction *CurMF = FuncInfo.MF;
12076 MachineBasicBlock *NextMBB = nullptr;
12077 MachineFunction::iterator BBI(W.MBB);
12078 if (++BBI != FuncInfo.MF->end())
12079 NextMBB = &*BBI;
12080
12081 unsigned Size = W.LastCluster - W.FirstCluster + 1;
12082
12083 BranchProbabilityInfo *BPI = FuncInfo.BPI;
12084
12085 if (Size == 2 && W.MBB == SwitchMBB) {
12086 // If any two of the cases has the same destination, and if one value
12087 // is the same as the other, but has one bit unset that the other has set,
12088 // use bit manipulation to do two compares at once. For example:
12089 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
12090 // TODO: This could be extended to merge any 2 cases in switches with 3
12091 // cases.
12092 // TODO: Handle cases where W.CaseBB != SwitchBB.
12093 CaseCluster &Small = *W.FirstCluster;
12094 CaseCluster &Big = *W.LastCluster;
12095
12096 if (Small.Low == Small.High && Big.Low == Big.High &&
12097 Small.MBB == Big.MBB) {
12098 const APInt &SmallValue = Small.Low->getValue();
12099 const APInt &BigValue = Big.Low->getValue();
12100
12101 // Check that there is only one bit different.
12102 APInt CommonBit = BigValue ^ SmallValue;
12103 if (CommonBit.isPowerOf2()) {
12104 SDValue CondLHS = getValue(Cond);
12105 EVT VT = CondLHS.getValueType();
12106 SDLoc DL = getCurSDLoc();
12107
12108 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
12109 DAG.getConstant(CommonBit, DL, VT));
12110 SDValue Cond = DAG.getSetCC(
12111 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
12112 ISD::SETEQ);
12113
12114 // Update successor info.
12115 // Both Small and Big will jump to Small.BB, so we sum up the
12116 // probabilities.
12117 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
12118 if (BPI)
12119 addSuccessorWithProb(
12120 SwitchMBB, DefaultMBB,
12121 // The default destination is the first successor in IR.
12122 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
12123 else
12124 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12125
12126 // Insert the true branch.
12127 SDValue BrCond =
12128 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
12129 DAG.getBasicBlock(Small.MBB));
12130 // Insert the false branch.
12131 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
12132 DAG.getBasicBlock(DefaultMBB));
12133
12134 DAG.setRoot(BrCond);
12135 return;
12136 }
12137 }
12138 }
12139
12140 if (TM.getOptLevel() != CodeGenOptLevel::None) {
12141 // Here, we order cases by probability so the most likely case will be
12142 // checked first. However, two clusters can have the same probability in
12143 // which case their relative ordering is non-deterministic. So we use Low
12144 // as a tie-breaker as clusters are guaranteed to never overlap.
12145 llvm::sort(W.FirstCluster, W.LastCluster + 1,
12146 [](const CaseCluster &a, const CaseCluster &b) {
12147 return a.Prob != b.Prob ?
12148 a.Prob > b.Prob :
12149 a.Low->getValue().slt(b.Low->getValue());
12150 });
12151
12152 // Rearrange the case blocks so that the last one falls through if possible
12153 // without changing the order of probabilities.
12154 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
12155 --I;
12156 if (I->Prob > W.LastCluster->Prob)
12157 break;
12158 if (I->Kind == CC_Range && I->MBB == NextMBB) {
12159 std::swap(*I, *W.LastCluster);
12160 break;
12161 }
12162 }
12163 }
12164
12165 // Compute total probability.
12166 BranchProbability DefaultProb = W.DefaultProb;
12167 BranchProbability UnhandledProbs = DefaultProb;
12168 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
12169 UnhandledProbs += I->Prob;
12170
12171 MachineBasicBlock *CurMBB = W.MBB;
12172 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
12173 bool FallthroughUnreachable = false;
12174 MachineBasicBlock *Fallthrough;
12175 if (I == W.LastCluster) {
12176 // For the last cluster, fall through to the default destination.
12177 Fallthrough = DefaultMBB;
12178 FallthroughUnreachable = isa<UnreachableInst>(
12179 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
12180 } else {
12181 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
12182 CurMF->insert(BBI, Fallthrough);
12183 // Put Cond in a virtual register to make it available from the new blocks.
12184 ExportFromCurrentBlock(Cond);
12185 }
12186 UnhandledProbs -= I->Prob;
12187
12188 switch (I->Kind) {
12189 case CC_JumpTable: {
12190 // FIXME: Optimize away range check based on pivot comparisons.
12191 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
12192 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
12193
12194 // The jump block hasn't been inserted yet; insert it here.
12195 MachineBasicBlock *JumpMBB = JT->MBB;
12196 CurMF->insert(BBI, JumpMBB);
12197
12198 auto JumpProb = I->Prob;
12199 auto FallthroughProb = UnhandledProbs;
12200
12201 // If the default statement is a target of the jump table, we evenly
12202 // distribute the default probability to successors of CurMBB. Also
12203 // update the probability on the edge from JumpMBB to Fallthrough.
12204 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
12205 SE = JumpMBB->succ_end();
12206 SI != SE; ++SI) {
12207 if (*SI == DefaultMBB) {
12208 JumpProb += DefaultProb / 2;
12209 FallthroughProb -= DefaultProb / 2;
12210 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
12211 JumpMBB->normalizeSuccProbs();
12212 break;
12213 }
12214 }
12215
12216 // If the default clause is unreachable, propagate that knowledge into
12217 // JTH->FallthroughUnreachable which will use it to suppress the range
12218 // check.
12219 //
12220 // However, don't do this if we're doing branch target enforcement,
12221 // because a table branch _without_ a range check can be a tempting JOP
12222 // gadget - out-of-bounds inputs that are impossible in correct
12223 // execution become possible again if an attacker can influence the
12224 // control flow. So if an attacker doesn't already have a BTI bypass
12225 // available, we don't want them to be able to get one out of this
12226 // table branch.
12227 if (FallthroughUnreachable) {
12228 Function &CurFunc = CurMF->getFunction();
12229 if (!CurFunc.hasFnAttribute("branch-target-enforcement"))
12230 JTH->FallthroughUnreachable = true;
12231 }
12232
12233 if (!JTH->FallthroughUnreachable)
12234 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12235 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12236 CurMBB->normalizeSuccProbs();
12237
12238 // The jump table header will be inserted in our current block, do the
12239 // range check, and fall through to our fallthrough block.
12240 JTH->HeaderBB = CurMBB;
12241 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
12242
12243 // If we're in the right place, emit the jump table header right now.
12244 if (CurMBB == SwitchMBB) {
12245 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
12246 JTH->Emitted = true;
12247 }
12248 break;
12249 }
12250 case CC_BitTests: {
12251 // FIXME: Optimize away range check based on pivot comparisons.
12252 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
12253
12254 // The bit test blocks haven't been inserted yet; insert them here.
12255 for (BitTestCase &BTC : BTB->Cases)
12256 CurMF->insert(BBI, BTC.ThisBB);
12257
12258 // Fill in fields of the BitTestBlock.
12259 BTB->Parent = CurMBB;
12260 BTB->Default = Fallthrough;
12261
12262 BTB->DefaultProb = UnhandledProbs;
12263 // If the cases in bit test don't form a contiguous range, we evenly
12264 // distribute the probability on the edge to Fallthrough to two
12265 // successors of CurMBB.
12266 if (!BTB->ContiguousRange) {
12267 BTB->Prob += DefaultProb / 2;
12268 BTB->DefaultProb -= DefaultProb / 2;
12269 }
12270
12271 if (FallthroughUnreachable)
12272 BTB->FallthroughUnreachable = true;
12273
12274 // If we're in the right place, emit the bit test header right now.
12275 if (CurMBB == SwitchMBB) {
12276 visitBitTestHeader(*BTB, SwitchMBB);
12277 BTB->Emitted = true;
12278 }
12279 break;
12280 }
12281 case CC_Range: {
12282 const Value *RHS, *LHS, *MHS;
12283 ISD::CondCode CC;
12284 if (I->Low == I->High) {
12285 // Check Cond == I->Low.
12286 CC = ISD::SETEQ;
12287 LHS = Cond;
12288 RHS=I->Low;
12289 MHS = nullptr;
12290 } else {
12291 // Check I->Low <= Cond <= I->High.
12292 CC = ISD::SETLE;
12293 LHS = I->Low;
12294 MHS = Cond;
12295 RHS = I->High;
12296 }
12297
12298 // If Fallthrough is unreachable, fold away the comparison.
12299 if (FallthroughUnreachable)
12300 CC = ISD::SETTRUE;
12301
12302 // The false probability is the sum of all unhandled cases.
12303 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
12304 getCurSDLoc(), I->Prob, UnhandledProbs);
12305
12306 if (CurMBB == SwitchMBB)
12307 visitSwitchCase(CB, SwitchMBB);
12308 else
12309 SL->SwitchCases.push_back(CB);
12310
12311 break;
12312 }
12313 }
12314 CurMBB = Fallthrough;
12315 }
12316 }
12317
splitWorkItem(SwitchWorkList & WorkList,const SwitchWorkListItem & W,Value * Cond,MachineBasicBlock * SwitchMBB)12318 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
12319 const SwitchWorkListItem &W,
12320 Value *Cond,
12321 MachineBasicBlock *SwitchMBB) {
12322 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
12323 "Clusters not sorted?");
12324 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
12325
12326 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12327 SL->computeSplitWorkItemInfo(W);
12328
12329 // Use the first element on the right as pivot since we will make less-than
12330 // comparisons against it.
12331 CaseClusterIt PivotCluster = FirstRight;
12332 assert(PivotCluster > W.FirstCluster);
12333 assert(PivotCluster <= W.LastCluster);
12334
12335 CaseClusterIt FirstLeft = W.FirstCluster;
12336 CaseClusterIt LastRight = W.LastCluster;
12337
12338 const ConstantInt *Pivot = PivotCluster->Low;
12339
12340 // New blocks will be inserted immediately after the current one.
12341 MachineFunction::iterator BBI(W.MBB);
12342 ++BBI;
12343
12344 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
12345 // we can branch to its destination directly if it's squeezed exactly in
12346 // between the known lower bound and Pivot - 1.
12347 MachineBasicBlock *LeftMBB;
12348 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
12349 FirstLeft->Low == W.GE &&
12350 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
12351 LeftMBB = FirstLeft->MBB;
12352 } else {
12353 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12354 FuncInfo.MF->insert(BBI, LeftMBB);
12355 WorkList.push_back(
12356 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
12357 // Put Cond in a virtual register to make it available from the new blocks.
12358 ExportFromCurrentBlock(Cond);
12359 }
12360
12361 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
12362 // single cluster, RHS.Low == Pivot, and we can branch to its destination
12363 // directly if RHS.High equals the current upper bound.
12364 MachineBasicBlock *RightMBB;
12365 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
12366 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
12367 RightMBB = FirstRight->MBB;
12368 } else {
12369 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12370 FuncInfo.MF->insert(BBI, RightMBB);
12371 WorkList.push_back(
12372 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
12373 // Put Cond in a virtual register to make it available from the new blocks.
12374 ExportFromCurrentBlock(Cond);
12375 }
12376
12377 // Create the CaseBlock record that will be used to lower the branch.
12378 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
12379 getCurSDLoc(), LeftProb, RightProb);
12380
12381 if (W.MBB == SwitchMBB)
12382 visitSwitchCase(CB, SwitchMBB);
12383 else
12384 SL->SwitchCases.push_back(CB);
12385 }
12386
12387 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
12388 // from the swith statement.
scaleCaseProbality(BranchProbability CaseProb,BranchProbability PeeledCaseProb)12389 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
12390 BranchProbability PeeledCaseProb) {
12391 if (PeeledCaseProb == BranchProbability::getOne())
12392 return BranchProbability::getZero();
12393 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
12394
12395 uint32_t Numerator = CaseProb.getNumerator();
12396 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
12397 return BranchProbability(Numerator, std::max(Numerator, Denominator));
12398 }
12399
12400 // Try to peel the top probability case if it exceeds the threshold.
12401 // Return current MachineBasicBlock for the switch statement if the peeling
12402 // does not occur.
12403 // If the peeling is performed, return the newly created MachineBasicBlock
12404 // for the peeled switch statement. Also update Clusters to remove the peeled
12405 // case. PeeledCaseProb is the BranchProbability for the peeled case.
peelDominantCaseCluster(const SwitchInst & SI,CaseClusterVector & Clusters,BranchProbability & PeeledCaseProb)12406 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
12407 const SwitchInst &SI, CaseClusterVector &Clusters,
12408 BranchProbability &PeeledCaseProb) {
12409 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12410 // Don't perform if there is only one cluster or optimizing for size.
12411 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
12412 TM.getOptLevel() == CodeGenOptLevel::None ||
12413 SwitchMBB->getParent()->getFunction().hasMinSize())
12414 return SwitchMBB;
12415
12416 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
12417 unsigned PeeledCaseIndex = 0;
12418 bool SwitchPeeled = false;
12419 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
12420 CaseCluster &CC = Clusters[Index];
12421 if (CC.Prob < TopCaseProb)
12422 continue;
12423 TopCaseProb = CC.Prob;
12424 PeeledCaseIndex = Index;
12425 SwitchPeeled = true;
12426 }
12427 if (!SwitchPeeled)
12428 return SwitchMBB;
12429
12430 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
12431 << TopCaseProb << "\n");
12432
12433 // Record the MBB for the peeled switch statement.
12434 MachineFunction::iterator BBI(SwitchMBB);
12435 ++BBI;
12436 MachineBasicBlock *PeeledSwitchMBB =
12437 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
12438 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12439
12440 ExportFromCurrentBlock(SI.getCondition());
12441 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12442 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12443 nullptr, nullptr, TopCaseProb.getCompl()};
12444 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12445
12446 Clusters.erase(PeeledCaseIt);
12447 for (CaseCluster &CC : Clusters) {
12448 LLVM_DEBUG(
12449 dbgs() << "Scale the probablity for one cluster, before scaling: "
12450 << CC.Prob << "\n");
12451 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
12452 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
12453 }
12454 PeeledCaseProb = TopCaseProb;
12455 return PeeledSwitchMBB;
12456 }
12457
visitSwitch(const SwitchInst & SI)12458 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
12459 // Extract cases from the switch.
12460 BranchProbabilityInfo *BPI = FuncInfo.BPI;
12461 CaseClusterVector Clusters;
12462 Clusters.reserve(SI.getNumCases());
12463 for (auto I : SI.cases()) {
12464 MachineBasicBlock *Succ = FuncInfo.getMBB(I.getCaseSuccessor());
12465 const ConstantInt *CaseVal = I.getCaseValue();
12466 BranchProbability Prob =
12467 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
12468 : BranchProbability(1, SI.getNumCases() + 1);
12469 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
12470 }
12471
12472 MachineBasicBlock *DefaultMBB = FuncInfo.getMBB(SI.getDefaultDest());
12473
12474 // Cluster adjacent cases with the same destination. We do this at all
12475 // optimization levels because it's cheap to do and will make codegen faster
12476 // if there are many clusters.
12477 sortAndRangeify(Clusters);
12478
12479 // The branch probablity of the peeled case.
12480 BranchProbability PeeledCaseProb = BranchProbability::getZero();
12481 MachineBasicBlock *PeeledSwitchMBB =
12482 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12483
12484 // If there is only the default destination, jump there directly.
12485 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12486 if (Clusters.empty()) {
12487 assert(PeeledSwitchMBB == SwitchMBB);
12488 SwitchMBB->addSuccessor(DefaultMBB);
12489 if (DefaultMBB != NextBlock(SwitchMBB)) {
12490 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
12491 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
12492 }
12493 return;
12494 }
12495
12496 SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
12497 DAG.getBFI());
12498 SL->findBitTestClusters(Clusters, &SI);
12499
12500 LLVM_DEBUG({
12501 dbgs() << "Case clusters: ";
12502 for (const CaseCluster &C : Clusters) {
12503 if (C.Kind == CC_JumpTable)
12504 dbgs() << "JT:";
12505 if (C.Kind == CC_BitTests)
12506 dbgs() << "BT:";
12507
12508 C.Low->getValue().print(dbgs(), true);
12509 if (C.Low != C.High) {
12510 dbgs() << '-';
12511 C.High->getValue().print(dbgs(), true);
12512 }
12513 dbgs() << ' ';
12514 }
12515 dbgs() << '\n';
12516 });
12517
12518 assert(!Clusters.empty());
12519 SwitchWorkList WorkList;
12520 CaseClusterIt First = Clusters.begin();
12521 CaseClusterIt Last = Clusters.end() - 1;
12522 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12523 // Scale the branchprobability for DefaultMBB if the peel occurs and
12524 // DefaultMBB is not replaced.
12525 if (PeeledCaseProb != BranchProbability::getZero() &&
12526 DefaultMBB == FuncInfo.getMBB(SI.getDefaultDest()))
12527 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
12528 WorkList.push_back(
12529 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
12530
12531 while (!WorkList.empty()) {
12532 SwitchWorkListItem W = WorkList.pop_back_val();
12533 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
12534
12535 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
12536 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
12537 // For optimized builds, lower large range as a balanced binary tree.
12538 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
12539 continue;
12540 }
12541
12542 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
12543 }
12544 }
12545
visitStepVector(const CallInst & I)12546 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
12547 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12548 auto DL = getCurSDLoc();
12549 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12550 setValue(&I, DAG.getStepVector(DL, ResultVT));
12551 }
12552
visitVectorReverse(const CallInst & I)12553 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
12554 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12555 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12556
12557 SDLoc DL = getCurSDLoc();
12558 SDValue V = getValue(I.getOperand(0));
12559 assert(VT == V.getValueType() && "Malformed vector.reverse!");
12560
12561 if (VT.isScalableVector()) {
12562 setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
12563 return;
12564 }
12565
12566 // Use VECTOR_SHUFFLE for the fixed-length vector
12567 // to maintain existing behavior.
12568 SmallVector<int, 8> Mask;
12569 unsigned NumElts = VT.getVectorMinNumElements();
12570 for (unsigned i = 0; i != NumElts; ++i)
12571 Mask.push_back(NumElts - 1 - i);
12572
12573 setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
12574 }
12575
visitVectorDeinterleave(const CallInst & I,unsigned Factor)12576 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I,
12577 unsigned Factor) {
12578 auto DL = getCurSDLoc();
12579 SDValue InVec = getValue(I.getOperand(0));
12580
12581 SmallVector<EVT, 4> ValueVTs;
12582 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
12583 ValueVTs);
12584
12585 EVT OutVT = ValueVTs[0];
12586 unsigned OutNumElts = OutVT.getVectorMinNumElements();
12587
12588 SmallVector<SDValue, 4> SubVecs(Factor);
12589 for (unsigned i = 0; i != Factor; ++i) {
12590 assert(ValueVTs[i] == OutVT && "Expected VTs to be the same");
12591 SubVecs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12592 DAG.getVectorIdxConstant(OutNumElts * i, DL));
12593 }
12594
12595 // Use VECTOR_SHUFFLE for fixed-length vectors with factor of 2 to benefit
12596 // from existing legalisation and combines.
12597 if (OutVT.isFixedLengthVector() && Factor == 2) {
12598 SDValue Even = DAG.getVectorShuffle(OutVT, DL, SubVecs[0], SubVecs[1],
12599 createStrideMask(0, 2, OutNumElts));
12600 SDValue Odd = DAG.getVectorShuffle(OutVT, DL, SubVecs[0], SubVecs[1],
12601 createStrideMask(1, 2, OutNumElts));
12602 SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
12603 setValue(&I, Res);
12604 return;
12605 }
12606
12607 SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
12608 DAG.getVTList(ValueVTs), SubVecs);
12609 setValue(&I, Res);
12610 }
12611
visitVectorInterleave(const CallInst & I,unsigned Factor)12612 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I,
12613 unsigned Factor) {
12614 auto DL = getCurSDLoc();
12615 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12616 EVT InVT = getValue(I.getOperand(0)).getValueType();
12617 EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12618
12619 SmallVector<SDValue, 8> InVecs(Factor);
12620 for (unsigned i = 0; i < Factor; ++i) {
12621 InVecs[i] = getValue(I.getOperand(i));
12622 assert(InVecs[i].getValueType() == InVecs[0].getValueType() &&
12623 "Expected VTs to be the same");
12624 }
12625
12626 // Use VECTOR_SHUFFLE for fixed-length vectors with factor of 2 to benefit
12627 // from existing legalisation and combines.
12628 if (OutVT.isFixedLengthVector() && Factor == 2) {
12629 unsigned NumElts = InVT.getVectorMinNumElements();
12630 SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVecs);
12631 setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
12632 createInterleaveMask(NumElts, 2)));
12633 return;
12634 }
12635
12636 SmallVector<EVT, 8> ValueVTs(Factor, InVT);
12637 SDValue Res =
12638 DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, DAG.getVTList(ValueVTs), InVecs);
12639
12640 SmallVector<SDValue, 8> Results(Factor);
12641 for (unsigned i = 0; i < Factor; ++i)
12642 Results[i] = Res.getValue(i);
12643
12644 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Results);
12645 setValue(&I, Res);
12646 }
12647
visitFreeze(const FreezeInst & I)12648 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
12649 SmallVector<EVT, 4> ValueVTs;
12650 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
12651 ValueVTs);
12652 unsigned NumValues = ValueVTs.size();
12653 if (NumValues == 0) return;
12654
12655 SmallVector<SDValue, 4> Values(NumValues);
12656 SDValue Op = getValue(I.getOperand(0));
12657
12658 for (unsigned i = 0; i != NumValues; ++i)
12659 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
12660 SDValue(Op.getNode(), Op.getResNo() + i));
12661
12662 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12663 DAG.getVTList(ValueVTs), Values));
12664 }
12665
visitVectorSplice(const CallInst & I)12666 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
12667 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12668 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12669
12670 SDLoc DL = getCurSDLoc();
12671 SDValue V1 = getValue(I.getOperand(0));
12672 SDValue V2 = getValue(I.getOperand(1));
12673 int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
12674
12675 // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
12676 if (VT.isScalableVector()) {
12677 setValue(
12678 &I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
12679 DAG.getSignedConstant(
12680 Imm, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
12681 return;
12682 }
12683
12684 unsigned NumElts = VT.getVectorNumElements();
12685
12686 uint64_t Idx = (NumElts + Imm) % NumElts;
12687
12688 // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12689 SmallVector<int, 8> Mask;
12690 for (unsigned i = 0; i < NumElts; ++i)
12691 Mask.push_back(Idx + i);
12692 setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12693 }
12694
12695 // Consider the following MIR after SelectionDAG, which produces output in
12696 // phyregs in the first case or virtregs in the second case.
12697 //
12698 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12699 // %5:gr32 = COPY $ebx
12700 // %6:gr32 = COPY $edx
12701 // %1:gr32 = COPY %6:gr32
12702 // %0:gr32 = COPY %5:gr32
12703 //
12704 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12705 // %1:gr32 = COPY %6:gr32
12706 // %0:gr32 = COPY %5:gr32
12707 //
12708 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
12709 // Given %1, we'd like to return $edx in the first case and %6 in the second.
12710 //
12711 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12712 // to a single virtreg (such as %0). The remaining outputs monotonically
12713 // increase in virtreg number from there. If a callbr has no outputs, then it
12714 // should not have a corresponding callbr landingpad; in fact, the callbr
12715 // landingpad would not even be able to refer to such a callbr.
FollowCopyChain(MachineRegisterInfo & MRI,Register Reg)12716 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
12717 MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12718 // There is definitely at least one copy.
12719 assert(MI->getOpcode() == TargetOpcode::COPY &&
12720 "start of copy chain MUST be COPY");
12721 Reg = MI->getOperand(1).getReg();
12722 MI = MRI.def_begin(Reg)->getParent();
12723 // There may be an optional second copy.
12724 if (MI->getOpcode() == TargetOpcode::COPY) {
12725 assert(Reg.isVirtual() && "expected COPY of virtual register");
12726 Reg = MI->getOperand(1).getReg();
12727 assert(Reg.isPhysical() && "expected COPY of physical register");
12728 MI = MRI.def_begin(Reg)->getParent();
12729 }
12730 // The start of the chain must be an INLINEASM_BR.
12731 assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12732 "end of copy chain MUST be INLINEASM_BR");
12733 return Reg;
12734 }
12735
12736 // We must do this walk rather than the simpler
12737 // setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12738 // otherwise we will end up with copies of virtregs only valid along direct
12739 // edges.
visitCallBrLandingPad(const CallInst & I)12740 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12741 SmallVector<EVT, 8> ResultVTs;
12742 SmallVector<SDValue, 8> ResultValues;
12743 const auto *CBR =
12744 cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12745
12746 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12747 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
12748 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
12749
12750 Register InitialDef = FuncInfo.ValueMap[CBR];
12751 SDValue Chain = DAG.getRoot();
12752
12753 // Re-parse the asm constraints string.
12754 TargetLowering::AsmOperandInfoVector TargetConstraints =
12755 TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12756 for (auto &T : TargetConstraints) {
12757 SDISelAsmOperandInfo OpInfo(T);
12758 if (OpInfo.Type != InlineAsm::isOutput)
12759 continue;
12760
12761 // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12762 // individual constraint.
12763 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12764
12765 switch (OpInfo.ConstraintType) {
12766 case TargetLowering::C_Register:
12767 case TargetLowering::C_RegisterClass: {
12768 // Fill in OpInfo.AssignedRegs.Regs.
12769 getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12770
12771 // getRegistersForValue may produce 1 to many registers based on whether
12772 // the OpInfo.ConstraintVT is legal on the target or not.
12773 for (Register &Reg : OpInfo.AssignedRegs.Regs) {
12774 Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12775 if (OriginalDef.isPhysical())
12776 FuncInfo.MBB->addLiveIn(OriginalDef);
12777 // Update the assigned registers to use the original defs.
12778 Reg = OriginalDef;
12779 }
12780
12781 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12782 DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12783 ResultValues.push_back(V);
12784 ResultVTs.push_back(OpInfo.ConstraintVT);
12785 break;
12786 }
12787 case TargetLowering::C_Other: {
12788 SDValue Flag;
12789 SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12790 OpInfo, DAG);
12791 ++InitialDef;
12792 ResultValues.push_back(V);
12793 ResultVTs.push_back(OpInfo.ConstraintVT);
12794 break;
12795 }
12796 default:
12797 break;
12798 }
12799 }
12800 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12801 DAG.getVTList(ResultVTs), ResultValues);
12802 setValue(&I, V);
12803 }
12804