1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DenseSet.h"
21 #include "llvm/ADT/FoldingSet.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/BinaryFormat/Dwarf.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineConstantPool.h"
36 #include "llvm/CodeGen/MachineFrameInfo.h"
37 #include "llvm/CodeGen/MachineFunction.h"
38 #include "llvm/CodeGen/MachineMemOperand.h"
39 #include "llvm/CodeGen/RuntimeLibcallUtil.h"
40 #include "llvm/CodeGen/SDPatternMatch.h"
41 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
42 #include "llvm/CodeGen/SelectionDAGNodes.h"
43 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/CodeGen/TargetRegisterInfo.h"
47 #include "llvm/CodeGen/TargetSubtargetInfo.h"
48 #include "llvm/CodeGen/ValueTypes.h"
49 #include "llvm/CodeGenTypes/MachineValueType.h"
50 #include "llvm/IR/Constant.h"
51 #include "llvm/IR/Constants.h"
52 #include "llvm/IR/DataLayout.h"
53 #include "llvm/IR/DebugInfoMetadata.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GlobalValue.h"
58 #include "llvm/IR/Metadata.h"
59 #include "llvm/IR/Type.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CodeGen.h"
62 #include "llvm/Support/Compiler.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/KnownBits.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/Mutex.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include "llvm/Target/TargetOptions.h"
71 #include "llvm/TargetParser/Triple.h"
72 #include "llvm/Transforms/Utils/SizeOpts.h"
73 #include <algorithm>
74 #include <cassert>
75 #include <cstdint>
76 #include <cstdlib>
77 #include <limits>
78 #include <optional>
79 #include <set>
80 #include <string>
81 #include <utility>
82 #include <vector>
83
84 using namespace llvm;
85 using namespace llvm::SDPatternMatch;
86
87 /// makeVTList - Return an instance of the SDVTList struct initialized with the
88 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)89 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
90 SDVTList Res = {VTs, NumVTs};
91 return Res;
92 }
93
94 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)95 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)96 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
NodeInserted(SDNode *)97 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
98
anchor()99 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
anchor()100 void SelectionDAG::DAGNodeInsertedListener::anchor() {}
101
102 #define DEBUG_TYPE "selectiondag"
103
104 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
105 cl::Hidden, cl::init(true),
106 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
107
108 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
109 cl::desc("Number limit for gluing ld/st of memcpy."),
110 cl::Hidden, cl::init(0));
111
NewSDValueDbgMsg(SDValue V,StringRef Msg,SelectionDAG * G)112 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
113 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
114 }
115
116 //===----------------------------------------------------------------------===//
117 // ConstantFPSDNode Class
118 //===----------------------------------------------------------------------===//
119
120 /// isExactlyValue - We don't rely on operator== working on double values, as
121 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
122 /// As such, this method can be used to do an exact bit-for-bit comparison of
123 /// two floating point values.
isExactlyValue(const APFloat & V) const124 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
125 return getValueAPF().bitwiseIsEqual(V);
126 }
127
isValueValidForType(EVT VT,const APFloat & Val)128 bool ConstantFPSDNode::isValueValidForType(EVT VT,
129 const APFloat& Val) {
130 assert(VT.isFloatingPoint() && "Can only convert between FP types");
131
132 // convert modifies in place, so make a copy.
133 APFloat Val2 = APFloat(Val);
134 bool losesInfo;
135 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
136 APFloat::rmNearestTiesToEven,
137 &losesInfo);
138 return !losesInfo;
139 }
140
141 //===----------------------------------------------------------------------===//
142 // ISD Namespace
143 //===----------------------------------------------------------------------===//
144
isConstantSplatVector(const SDNode * N,APInt & SplatVal)145 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
146 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
147 unsigned EltSize =
148 N->getValueType(0).getVectorElementType().getSizeInBits();
149 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
150 SplatVal = Op0->getAPIntValue().trunc(EltSize);
151 return true;
152 }
153 if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
154 SplatVal = Op0->getValueAPF().bitcastToAPInt().trunc(EltSize);
155 return true;
156 }
157 }
158
159 auto *BV = dyn_cast<BuildVectorSDNode>(N);
160 if (!BV)
161 return false;
162
163 APInt SplatUndef;
164 unsigned SplatBitSize;
165 bool HasUndefs;
166 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
167 // Endianness does not matter here. We are checking for a splat given the
168 // element size of the vector, and if we find such a splat for little endian
169 // layout, then that should be valid also for big endian (as the full vector
170 // size is known to be a multiple of the element size).
171 const bool IsBigEndian = false;
172 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
173 EltSize, IsBigEndian) &&
174 EltSize == SplatBitSize;
175 }
176
177 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
178 // specializations of the more general isConstantSplatVector()?
179
isConstantSplatVectorAllOnes(const SDNode * N,bool BuildVectorOnly)180 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
181 // Look through a bit convert.
182 while (N->getOpcode() == ISD::BITCAST)
183 N = N->getOperand(0).getNode();
184
185 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
186 APInt SplatVal;
187 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes();
188 }
189
190 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
191
192 unsigned i = 0, e = N->getNumOperands();
193
194 // Skip over all of the undef values.
195 while (i != e && N->getOperand(i).isUndef())
196 ++i;
197
198 // Do not accept an all-undef vector.
199 if (i == e) return false;
200
201 // Do not accept build_vectors that aren't all constants or which have non-~0
202 // elements. We have to be a bit careful here, as the type of the constant
203 // may not be the same as the type of the vector elements due to type
204 // legalization (the elements are promoted to a legal type for the target and
205 // a vector of a type may be legal when the base element type is not).
206 // We only want to check enough bits to cover the vector elements, because
207 // we care if the resultant vector is all ones, not whether the individual
208 // constants are.
209 SDValue NotZero = N->getOperand(i);
210 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
211 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
212 if (CN->getAPIntValue().countr_one() < EltSize)
213 return false;
214 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
215 if (CFPN->getValueAPF().bitcastToAPInt().countr_one() < EltSize)
216 return false;
217 } else
218 return false;
219
220 // Okay, we have at least one ~0 value, check to see if the rest match or are
221 // undefs. Even with the above element type twiddling, this should be OK, as
222 // the same type legalization should have applied to all the elements.
223 for (++i; i != e; ++i)
224 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
225 return false;
226 return true;
227 }
228
isConstantSplatVectorAllZeros(const SDNode * N,bool BuildVectorOnly)229 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
230 // Look through a bit convert.
231 while (N->getOpcode() == ISD::BITCAST)
232 N = N->getOperand(0).getNode();
233
234 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
235 APInt SplatVal;
236 return isConstantSplatVector(N, SplatVal) && SplatVal.isZero();
237 }
238
239 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
240
241 bool IsAllUndef = true;
242 for (const SDValue &Op : N->op_values()) {
243 if (Op.isUndef())
244 continue;
245 IsAllUndef = false;
246 // Do not accept build_vectors that aren't all constants or which have non-0
247 // elements. We have to be a bit careful here, as the type of the constant
248 // may not be the same as the type of the vector elements due to type
249 // legalization (the elements are promoted to a legal type for the target
250 // and a vector of a type may be legal when the base element type is not).
251 // We only want to check enough bits to cover the vector elements, because
252 // we care if the resultant vector is all zeros, not whether the individual
253 // constants are.
254 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
255 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
256 if (CN->getAPIntValue().countr_zero() < EltSize)
257 return false;
258 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
259 if (CFPN->getValueAPF().bitcastToAPInt().countr_zero() < EltSize)
260 return false;
261 } else
262 return false;
263 }
264
265 // Do not accept an all-undef vector.
266 if (IsAllUndef)
267 return false;
268 return true;
269 }
270
isBuildVectorAllOnes(const SDNode * N)271 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
272 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
273 }
274
isBuildVectorAllZeros(const SDNode * N)275 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
276 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
277 }
278
isBuildVectorOfConstantSDNodes(const SDNode * N)279 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
280 if (N->getOpcode() != ISD::BUILD_VECTOR)
281 return false;
282
283 for (const SDValue &Op : N->op_values()) {
284 if (Op.isUndef())
285 continue;
286 if (!isa<ConstantSDNode>(Op))
287 return false;
288 }
289 return true;
290 }
291
isBuildVectorOfConstantFPSDNodes(const SDNode * N)292 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
293 if (N->getOpcode() != ISD::BUILD_VECTOR)
294 return false;
295
296 for (const SDValue &Op : N->op_values()) {
297 if (Op.isUndef())
298 continue;
299 if (!isa<ConstantFPSDNode>(Op))
300 return false;
301 }
302 return true;
303 }
304
isVectorShrinkable(const SDNode * N,unsigned NewEltSize,bool Signed)305 bool ISD::isVectorShrinkable(const SDNode *N, unsigned NewEltSize,
306 bool Signed) {
307 assert(N->getValueType(0).isVector() && "Expected a vector!");
308
309 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
310 if (EltSize <= NewEltSize)
311 return false;
312
313 if (N->getOpcode() == ISD::ZERO_EXTEND) {
314 return (N->getOperand(0).getValueType().getScalarSizeInBits() <=
315 NewEltSize) &&
316 !Signed;
317 }
318 if (N->getOpcode() == ISD::SIGN_EXTEND) {
319 return (N->getOperand(0).getValueType().getScalarSizeInBits() <=
320 NewEltSize) &&
321 Signed;
322 }
323 if (N->getOpcode() != ISD::BUILD_VECTOR)
324 return false;
325
326 for (const SDValue &Op : N->op_values()) {
327 if (Op.isUndef())
328 continue;
329 if (!isa<ConstantSDNode>(Op))
330 return false;
331
332 APInt C = Op->getAsAPIntVal().trunc(EltSize);
333 if (Signed && C.trunc(NewEltSize).sext(EltSize) != C)
334 return false;
335 if (!Signed && C.trunc(NewEltSize).zext(EltSize) != C)
336 return false;
337 }
338
339 return true;
340 }
341
allOperandsUndef(const SDNode * N)342 bool ISD::allOperandsUndef(const SDNode *N) {
343 // Return false if the node has no operands.
344 // This is "logically inconsistent" with the definition of "all" but
345 // is probably the desired behavior.
346 if (N->getNumOperands() == 0)
347 return false;
348 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
349 }
350
isFreezeUndef(const SDNode * N)351 bool ISD::isFreezeUndef(const SDNode *N) {
352 return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndef();
353 }
354
355 template <typename ConstNodeType>
matchUnaryPredicateImpl(SDValue Op,std::function<bool (ConstNodeType *)> Match,bool AllowUndefs)356 bool ISD::matchUnaryPredicateImpl(SDValue Op,
357 std::function<bool(ConstNodeType *)> Match,
358 bool AllowUndefs) {
359 // FIXME: Add support for scalar UNDEF cases?
360 if (auto *C = dyn_cast<ConstNodeType>(Op))
361 return Match(C);
362
363 // FIXME: Add support for vector UNDEF cases?
364 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
365 ISD::SPLAT_VECTOR != Op.getOpcode())
366 return false;
367
368 EVT SVT = Op.getValueType().getScalarType();
369 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
370 if (AllowUndefs && Op.getOperand(i).isUndef()) {
371 if (!Match(nullptr))
372 return false;
373 continue;
374 }
375
376 auto *Cst = dyn_cast<ConstNodeType>(Op.getOperand(i));
377 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
378 return false;
379 }
380 return true;
381 }
382 // Build used template types.
383 template bool ISD::matchUnaryPredicateImpl<ConstantSDNode>(
384 SDValue, std::function<bool(ConstantSDNode *)>, bool);
385 template bool ISD::matchUnaryPredicateImpl<ConstantFPSDNode>(
386 SDValue, std::function<bool(ConstantFPSDNode *)>, bool);
387
matchBinaryPredicate(SDValue LHS,SDValue RHS,std::function<bool (ConstantSDNode *,ConstantSDNode *)> Match,bool AllowUndefs,bool AllowTypeMismatch)388 bool ISD::matchBinaryPredicate(
389 SDValue LHS, SDValue RHS,
390 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
391 bool AllowUndefs, bool AllowTypeMismatch) {
392 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
393 return false;
394
395 // TODO: Add support for scalar UNDEF cases?
396 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
397 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
398 return Match(LHSCst, RHSCst);
399
400 // TODO: Add support for vector UNDEF cases?
401 if (LHS.getOpcode() != RHS.getOpcode() ||
402 (LHS.getOpcode() != ISD::BUILD_VECTOR &&
403 LHS.getOpcode() != ISD::SPLAT_VECTOR))
404 return false;
405
406 EVT SVT = LHS.getValueType().getScalarType();
407 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
408 SDValue LHSOp = LHS.getOperand(i);
409 SDValue RHSOp = RHS.getOperand(i);
410 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
411 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
412 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
413 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
414 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
415 return false;
416 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
417 LHSOp.getValueType() != RHSOp.getValueType()))
418 return false;
419 if (!Match(LHSCst, RHSCst))
420 return false;
421 }
422 return true;
423 }
424
getVecReduceBaseOpcode(unsigned VecReduceOpcode)425 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
426 switch (VecReduceOpcode) {
427 default:
428 llvm_unreachable("Expected VECREDUCE opcode");
429 case ISD::VECREDUCE_FADD:
430 case ISD::VECREDUCE_SEQ_FADD:
431 case ISD::VP_REDUCE_FADD:
432 case ISD::VP_REDUCE_SEQ_FADD:
433 return ISD::FADD;
434 case ISD::VECREDUCE_FMUL:
435 case ISD::VECREDUCE_SEQ_FMUL:
436 case ISD::VP_REDUCE_FMUL:
437 case ISD::VP_REDUCE_SEQ_FMUL:
438 return ISD::FMUL;
439 case ISD::VECREDUCE_ADD:
440 case ISD::VP_REDUCE_ADD:
441 return ISD::ADD;
442 case ISD::VECREDUCE_MUL:
443 case ISD::VP_REDUCE_MUL:
444 return ISD::MUL;
445 case ISD::VECREDUCE_AND:
446 case ISD::VP_REDUCE_AND:
447 return ISD::AND;
448 case ISD::VECREDUCE_OR:
449 case ISD::VP_REDUCE_OR:
450 return ISD::OR;
451 case ISD::VECREDUCE_XOR:
452 case ISD::VP_REDUCE_XOR:
453 return ISD::XOR;
454 case ISD::VECREDUCE_SMAX:
455 case ISD::VP_REDUCE_SMAX:
456 return ISD::SMAX;
457 case ISD::VECREDUCE_SMIN:
458 case ISD::VP_REDUCE_SMIN:
459 return ISD::SMIN;
460 case ISD::VECREDUCE_UMAX:
461 case ISD::VP_REDUCE_UMAX:
462 return ISD::UMAX;
463 case ISD::VECREDUCE_UMIN:
464 case ISD::VP_REDUCE_UMIN:
465 return ISD::UMIN;
466 case ISD::VECREDUCE_FMAX:
467 case ISD::VP_REDUCE_FMAX:
468 return ISD::FMAXNUM;
469 case ISD::VECREDUCE_FMIN:
470 case ISD::VP_REDUCE_FMIN:
471 return ISD::FMINNUM;
472 case ISD::VECREDUCE_FMAXIMUM:
473 case ISD::VP_REDUCE_FMAXIMUM:
474 return ISD::FMAXIMUM;
475 case ISD::VECREDUCE_FMINIMUM:
476 case ISD::VP_REDUCE_FMINIMUM:
477 return ISD::FMINIMUM;
478 }
479 }
480
isVPOpcode(unsigned Opcode)481 bool ISD::isVPOpcode(unsigned Opcode) {
482 switch (Opcode) {
483 default:
484 return false;
485 #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
486 case ISD::VPSD: \
487 return true;
488 #include "llvm/IR/VPIntrinsics.def"
489 }
490 }
491
isVPBinaryOp(unsigned Opcode)492 bool ISD::isVPBinaryOp(unsigned Opcode) {
493 switch (Opcode) {
494 default:
495 break;
496 #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
497 #define VP_PROPERTY_BINARYOP return true;
498 #define END_REGISTER_VP_SDNODE(VPSD) break;
499 #include "llvm/IR/VPIntrinsics.def"
500 }
501 return false;
502 }
503
isVPReduction(unsigned Opcode)504 bool ISD::isVPReduction(unsigned Opcode) {
505 switch (Opcode) {
506 default:
507 break;
508 #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
509 #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
510 #define END_REGISTER_VP_SDNODE(VPSD) break;
511 #include "llvm/IR/VPIntrinsics.def"
512 }
513 return false;
514 }
515
516 /// The operand position of the vector mask.
getVPMaskIdx(unsigned Opcode)517 std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
518 switch (Opcode) {
519 default:
520 return std::nullopt;
521 #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
522 case ISD::VPSD: \
523 return MASKPOS;
524 #include "llvm/IR/VPIntrinsics.def"
525 }
526 }
527
528 /// The operand position of the explicit vector length parameter.
getVPExplicitVectorLengthIdx(unsigned Opcode)529 std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
530 switch (Opcode) {
531 default:
532 return std::nullopt;
533 #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
534 case ISD::VPSD: \
535 return EVLPOS;
536 #include "llvm/IR/VPIntrinsics.def"
537 }
538 }
539
getBaseOpcodeForVP(unsigned VPOpcode,bool hasFPExcept)540 std::optional<unsigned> ISD::getBaseOpcodeForVP(unsigned VPOpcode,
541 bool hasFPExcept) {
542 // FIXME: Return strict opcodes in case of fp exceptions.
543 switch (VPOpcode) {
544 default:
545 return std::nullopt;
546 #define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
547 #define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
548 #define END_REGISTER_VP_SDNODE(VPOPC) break;
549 #include "llvm/IR/VPIntrinsics.def"
550 }
551 return std::nullopt;
552 }
553
getVPForBaseOpcode(unsigned Opcode)554 unsigned ISD::getVPForBaseOpcode(unsigned Opcode) {
555 switch (Opcode) {
556 default:
557 llvm_unreachable("can not translate this Opcode to VP.");
558 #define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
559 #define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
560 #define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
561 #include "llvm/IR/VPIntrinsics.def"
562 }
563 }
564
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)565 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
566 switch (ExtType) {
567 case ISD::EXTLOAD:
568 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
569 case ISD::SEXTLOAD:
570 return ISD::SIGN_EXTEND;
571 case ISD::ZEXTLOAD:
572 return ISD::ZERO_EXTEND;
573 default:
574 break;
575 }
576
577 llvm_unreachable("Invalid LoadExtType");
578 }
579
getSetCCSwappedOperands(ISD::CondCode Operation)580 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
581 // To perform this operation, we just need to swap the L and G bits of the
582 // operation.
583 unsigned OldL = (Operation >> 2) & 1;
584 unsigned OldG = (Operation >> 1) & 1;
585 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
586 (OldL << 1) | // New G bit
587 (OldG << 2)); // New L bit.
588 }
589
getSetCCInverseImpl(ISD::CondCode Op,bool isIntegerLike)590 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
591 unsigned Operation = Op;
592 if (isIntegerLike)
593 Operation ^= 7; // Flip L, G, E bits, but not U.
594 else
595 Operation ^= 15; // Flip all of the condition bits.
596
597 if (Operation > ISD::SETTRUE2)
598 Operation &= ~8; // Don't let N and U bits get set.
599
600 return ISD::CondCode(Operation);
601 }
602
getSetCCInverse(ISD::CondCode Op,EVT Type)603 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
604 return getSetCCInverseImpl(Op, Type.isInteger());
605 }
606
getSetCCInverse(ISD::CondCode Op,bool isIntegerLike)607 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
608 bool isIntegerLike) {
609 return getSetCCInverseImpl(Op, isIntegerLike);
610 }
611
612 /// For an integer comparison, return 1 if the comparison is a signed operation
613 /// and 2 if the result is an unsigned comparison. Return zero if the operation
614 /// does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)615 static int isSignedOp(ISD::CondCode Opcode) {
616 switch (Opcode) {
617 default: llvm_unreachable("Illegal integer setcc operation!");
618 case ISD::SETEQ:
619 case ISD::SETNE: return 0;
620 case ISD::SETLT:
621 case ISD::SETLE:
622 case ISD::SETGT:
623 case ISD::SETGE: return 1;
624 case ISD::SETULT:
625 case ISD::SETULE:
626 case ISD::SETUGT:
627 case ISD::SETUGE: return 2;
628 }
629 }
630
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)631 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
632 EVT Type) {
633 bool IsInteger = Type.isInteger();
634 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
635 // Cannot fold a signed integer setcc with an unsigned integer setcc.
636 return ISD::SETCC_INVALID;
637
638 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
639
640 // If the N and U bits get set, then the resultant comparison DOES suddenly
641 // care about orderedness, and it is true when ordered.
642 if (Op > ISD::SETTRUE2)
643 Op &= ~16; // Clear the U bit if the N bit is set.
644
645 // Canonicalize illegal integer setcc's.
646 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
647 Op = ISD::SETNE;
648
649 return ISD::CondCode(Op);
650 }
651
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)652 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
653 EVT Type) {
654 bool IsInteger = Type.isInteger();
655 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
656 // Cannot fold a signed setcc with an unsigned setcc.
657 return ISD::SETCC_INVALID;
658
659 // Combine all of the condition bits.
660 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
661
662 // Canonicalize illegal integer setcc's.
663 if (IsInteger) {
664 switch (Result) {
665 default: break;
666 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
667 case ISD::SETOEQ: // SETEQ & SETU[LG]E
668 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
669 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
670 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
671 }
672 }
673
674 return Result;
675 }
676
677 //===----------------------------------------------------------------------===//
678 // SDNode Profile Support
679 //===----------------------------------------------------------------------===//
680
681 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)682 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
683 ID.AddInteger(OpC);
684 }
685
686 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
687 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)688 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
689 ID.AddPointer(VTList.VTs);
690 }
691
692 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)693 static void AddNodeIDOperands(FoldingSetNodeID &ID,
694 ArrayRef<SDValue> Ops) {
695 for (const auto &Op : Ops) {
696 ID.AddPointer(Op.getNode());
697 ID.AddInteger(Op.getResNo());
698 }
699 }
700
701 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)702 static void AddNodeIDOperands(FoldingSetNodeID &ID,
703 ArrayRef<SDUse> Ops) {
704 for (const auto &Op : Ops) {
705 ID.AddPointer(Op.getNode());
706 ID.AddInteger(Op.getResNo());
707 }
708 }
709
AddNodeIDNode(FoldingSetNodeID & ID,unsigned OpC,SDVTList VTList,ArrayRef<SDValue> OpList)710 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC,
711 SDVTList VTList, ArrayRef<SDValue> OpList) {
712 AddNodeIDOpcode(ID, OpC);
713 AddNodeIDValueTypes(ID, VTList);
714 AddNodeIDOperands(ID, OpList);
715 }
716
717 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)718 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
719 switch (N->getOpcode()) {
720 case ISD::TargetExternalSymbol:
721 case ISD::ExternalSymbol:
722 case ISD::MCSymbol:
723 llvm_unreachable("Should only be used on nodes with operands");
724 default: break; // Normal nodes don't need extra info.
725 case ISD::TargetConstant:
726 case ISD::Constant: {
727 const ConstantSDNode *C = cast<ConstantSDNode>(N);
728 ID.AddPointer(C->getConstantIntValue());
729 ID.AddBoolean(C->isOpaque());
730 break;
731 }
732 case ISD::TargetConstantFP:
733 case ISD::ConstantFP:
734 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
735 break;
736 case ISD::TargetGlobalAddress:
737 case ISD::GlobalAddress:
738 case ISD::TargetGlobalTLSAddress:
739 case ISD::GlobalTLSAddress: {
740 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
741 ID.AddPointer(GA->getGlobal());
742 ID.AddInteger(GA->getOffset());
743 ID.AddInteger(GA->getTargetFlags());
744 break;
745 }
746 case ISD::BasicBlock:
747 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
748 break;
749 case ISD::Register:
750 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
751 break;
752 case ISD::RegisterMask:
753 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
754 break;
755 case ISD::SRCVALUE:
756 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
757 break;
758 case ISD::FrameIndex:
759 case ISD::TargetFrameIndex:
760 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
761 break;
762 case ISD::LIFETIME_START:
763 case ISD::LIFETIME_END:
764 if (cast<LifetimeSDNode>(N)->hasOffset()) {
765 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
766 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
767 }
768 break;
769 case ISD::PSEUDO_PROBE:
770 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
771 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
772 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
773 break;
774 case ISD::JumpTable:
775 case ISD::TargetJumpTable:
776 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
777 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
778 break;
779 case ISD::ConstantPool:
780 case ISD::TargetConstantPool: {
781 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
782 ID.AddInteger(CP->getAlign().value());
783 ID.AddInteger(CP->getOffset());
784 if (CP->isMachineConstantPoolEntry())
785 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
786 else
787 ID.AddPointer(CP->getConstVal());
788 ID.AddInteger(CP->getTargetFlags());
789 break;
790 }
791 case ISD::TargetIndex: {
792 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
793 ID.AddInteger(TI->getIndex());
794 ID.AddInteger(TI->getOffset());
795 ID.AddInteger(TI->getTargetFlags());
796 break;
797 }
798 case ISD::LOAD: {
799 const LoadSDNode *LD = cast<LoadSDNode>(N);
800 ID.AddInteger(LD->getMemoryVT().getRawBits());
801 ID.AddInteger(LD->getRawSubclassData());
802 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
803 ID.AddInteger(LD->getMemOperand()->getFlags());
804 break;
805 }
806 case ISD::STORE: {
807 const StoreSDNode *ST = cast<StoreSDNode>(N);
808 ID.AddInteger(ST->getMemoryVT().getRawBits());
809 ID.AddInteger(ST->getRawSubclassData());
810 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
811 ID.AddInteger(ST->getMemOperand()->getFlags());
812 break;
813 }
814 case ISD::VP_LOAD: {
815 const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N);
816 ID.AddInteger(ELD->getMemoryVT().getRawBits());
817 ID.AddInteger(ELD->getRawSubclassData());
818 ID.AddInteger(ELD->getPointerInfo().getAddrSpace());
819 ID.AddInteger(ELD->getMemOperand()->getFlags());
820 break;
821 }
822 case ISD::VP_STORE: {
823 const VPStoreSDNode *EST = cast<VPStoreSDNode>(N);
824 ID.AddInteger(EST->getMemoryVT().getRawBits());
825 ID.AddInteger(EST->getRawSubclassData());
826 ID.AddInteger(EST->getPointerInfo().getAddrSpace());
827 ID.AddInteger(EST->getMemOperand()->getFlags());
828 break;
829 }
830 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
831 const VPStridedLoadSDNode *SLD = cast<VPStridedLoadSDNode>(N);
832 ID.AddInteger(SLD->getMemoryVT().getRawBits());
833 ID.AddInteger(SLD->getRawSubclassData());
834 ID.AddInteger(SLD->getPointerInfo().getAddrSpace());
835 break;
836 }
837 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
838 const VPStridedStoreSDNode *SST = cast<VPStridedStoreSDNode>(N);
839 ID.AddInteger(SST->getMemoryVT().getRawBits());
840 ID.AddInteger(SST->getRawSubclassData());
841 ID.AddInteger(SST->getPointerInfo().getAddrSpace());
842 break;
843 }
844 case ISD::VP_GATHER: {
845 const VPGatherSDNode *EG = cast<VPGatherSDNode>(N);
846 ID.AddInteger(EG->getMemoryVT().getRawBits());
847 ID.AddInteger(EG->getRawSubclassData());
848 ID.AddInteger(EG->getPointerInfo().getAddrSpace());
849 ID.AddInteger(EG->getMemOperand()->getFlags());
850 break;
851 }
852 case ISD::VP_SCATTER: {
853 const VPScatterSDNode *ES = cast<VPScatterSDNode>(N);
854 ID.AddInteger(ES->getMemoryVT().getRawBits());
855 ID.AddInteger(ES->getRawSubclassData());
856 ID.AddInteger(ES->getPointerInfo().getAddrSpace());
857 ID.AddInteger(ES->getMemOperand()->getFlags());
858 break;
859 }
860 case ISD::MLOAD: {
861 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
862 ID.AddInteger(MLD->getMemoryVT().getRawBits());
863 ID.AddInteger(MLD->getRawSubclassData());
864 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
865 ID.AddInteger(MLD->getMemOperand()->getFlags());
866 break;
867 }
868 case ISD::MSTORE: {
869 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
870 ID.AddInteger(MST->getMemoryVT().getRawBits());
871 ID.AddInteger(MST->getRawSubclassData());
872 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
873 ID.AddInteger(MST->getMemOperand()->getFlags());
874 break;
875 }
876 case ISD::MGATHER: {
877 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
878 ID.AddInteger(MG->getMemoryVT().getRawBits());
879 ID.AddInteger(MG->getRawSubclassData());
880 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
881 ID.AddInteger(MG->getMemOperand()->getFlags());
882 break;
883 }
884 case ISD::MSCATTER: {
885 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
886 ID.AddInteger(MS->getMemoryVT().getRawBits());
887 ID.AddInteger(MS->getRawSubclassData());
888 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
889 ID.AddInteger(MS->getMemOperand()->getFlags());
890 break;
891 }
892 case ISD::ATOMIC_CMP_SWAP:
893 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
894 case ISD::ATOMIC_SWAP:
895 case ISD::ATOMIC_LOAD_ADD:
896 case ISD::ATOMIC_LOAD_SUB:
897 case ISD::ATOMIC_LOAD_AND:
898 case ISD::ATOMIC_LOAD_CLR:
899 case ISD::ATOMIC_LOAD_OR:
900 case ISD::ATOMIC_LOAD_XOR:
901 case ISD::ATOMIC_LOAD_NAND:
902 case ISD::ATOMIC_LOAD_MIN:
903 case ISD::ATOMIC_LOAD_MAX:
904 case ISD::ATOMIC_LOAD_UMIN:
905 case ISD::ATOMIC_LOAD_UMAX:
906 case ISD::ATOMIC_LOAD:
907 case ISD::ATOMIC_STORE: {
908 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
909 ID.AddInteger(AT->getMemoryVT().getRawBits());
910 ID.AddInteger(AT->getRawSubclassData());
911 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
912 ID.AddInteger(AT->getMemOperand()->getFlags());
913 break;
914 }
915 case ISD::VECTOR_SHUFFLE: {
916 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
917 for (int M : Mask)
918 ID.AddInteger(M);
919 break;
920 }
921 case ISD::TargetBlockAddress:
922 case ISD::BlockAddress: {
923 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
924 ID.AddPointer(BA->getBlockAddress());
925 ID.AddInteger(BA->getOffset());
926 ID.AddInteger(BA->getTargetFlags());
927 break;
928 }
929 case ISD::AssertAlign:
930 ID.AddInteger(cast<AssertAlignSDNode>(N)->getAlign().value());
931 break;
932 case ISD::PREFETCH:
933 case ISD::INTRINSIC_VOID:
934 case ISD::INTRINSIC_W_CHAIN:
935 // Handled by MemIntrinsicSDNode check after the switch.
936 break;
937 } // end switch (N->getOpcode())
938
939 // MemIntrinsic nodes could also have subclass data, address spaces, and flags
940 // to check.
941 if (auto *MN = dyn_cast<MemIntrinsicSDNode>(N)) {
942 ID.AddInteger(MN->getRawSubclassData());
943 ID.AddInteger(MN->getPointerInfo().getAddrSpace());
944 ID.AddInteger(MN->getMemOperand()->getFlags());
945 ID.AddInteger(MN->getMemoryVT().getRawBits());
946 }
947 }
948
949 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
950 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)951 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
952 AddNodeIDOpcode(ID, N->getOpcode());
953 // Add the return value info.
954 AddNodeIDValueTypes(ID, N->getVTList());
955 // Add the operand info.
956 AddNodeIDOperands(ID, N->ops());
957
958 // Handle SDNode leafs with special info.
959 AddNodeIDCustom(ID, N);
960 }
961
962 //===----------------------------------------------------------------------===//
963 // SelectionDAG Class
964 //===----------------------------------------------------------------------===//
965
966 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)967 static bool doNotCSE(SDNode *N) {
968 if (N->getValueType(0) == MVT::Glue)
969 return true; // Never CSE anything that produces a glue result.
970
971 switch (N->getOpcode()) {
972 default: break;
973 case ISD::HANDLENODE:
974 case ISD::EH_LABEL:
975 return true; // Never CSE these nodes.
976 }
977
978 // Check that remaining values produced are not flags.
979 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
980 if (N->getValueType(i) == MVT::Glue)
981 return true; // Never CSE anything that produces a glue result.
982
983 return false;
984 }
985
986 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
987 /// SelectionDAG.
RemoveDeadNodes()988 void SelectionDAG::RemoveDeadNodes() {
989 // Create a dummy node (which is not added to allnodes), that adds a reference
990 // to the root node, preventing it from being deleted.
991 HandleSDNode Dummy(getRoot());
992
993 SmallVector<SDNode*, 128> DeadNodes;
994
995 // Add all obviously-dead nodes to the DeadNodes worklist.
996 for (SDNode &Node : allnodes())
997 if (Node.use_empty())
998 DeadNodes.push_back(&Node);
999
1000 RemoveDeadNodes(DeadNodes);
1001
1002 // If the root changed (e.g. it was a dead load, update the root).
1003 setRoot(Dummy.getValue());
1004 }
1005
1006 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
1007 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)1008 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
1009
1010 // Process the worklist, deleting the nodes and adding their uses to the
1011 // worklist.
1012 while (!DeadNodes.empty()) {
1013 SDNode *N = DeadNodes.pop_back_val();
1014 // Skip to next node if we've already managed to delete the node. This could
1015 // happen if replacing a node causes a node previously added to the node to
1016 // be deleted.
1017 if (N->getOpcode() == ISD::DELETED_NODE)
1018 continue;
1019
1020 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1021 DUL->NodeDeleted(N, nullptr);
1022
1023 // Take the node out of the appropriate CSE map.
1024 RemoveNodeFromCSEMaps(N);
1025
1026 // Next, brutally remove the operand list. This is safe to do, as there are
1027 // no cycles in the graph.
1028 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
1029 SDUse &Use = *I++;
1030 SDNode *Operand = Use.getNode();
1031 Use.set(SDValue());
1032
1033 // Now that we removed this operand, see if there are no uses of it left.
1034 if (Operand->use_empty())
1035 DeadNodes.push_back(Operand);
1036 }
1037
1038 DeallocateNode(N);
1039 }
1040 }
1041
RemoveDeadNode(SDNode * N)1042 void SelectionDAG::RemoveDeadNode(SDNode *N){
1043 SmallVector<SDNode*, 16> DeadNodes(1, N);
1044
1045 // Create a dummy node that adds a reference to the root node, preventing
1046 // it from being deleted. (This matters if the root is an operand of the
1047 // dead node.)
1048 HandleSDNode Dummy(getRoot());
1049
1050 RemoveDeadNodes(DeadNodes);
1051 }
1052
DeleteNode(SDNode * N)1053 void SelectionDAG::DeleteNode(SDNode *N) {
1054 // First take this out of the appropriate CSE map.
1055 RemoveNodeFromCSEMaps(N);
1056
1057 // Finally, remove uses due to operands of this node, remove from the
1058 // AllNodes list, and delete the node.
1059 DeleteNodeNotInCSEMaps(N);
1060 }
1061
DeleteNodeNotInCSEMaps(SDNode * N)1062 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
1063 assert(N->getIterator() != AllNodes.begin() &&
1064 "Cannot delete the entry node!");
1065 assert(N->use_empty() && "Cannot delete a node that is not dead!");
1066
1067 // Drop all of the operands and decrement used node's use counts.
1068 N->DropOperands();
1069
1070 DeallocateNode(N);
1071 }
1072
add(SDDbgValue * V,bool isParameter)1073 void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
1074 assert(!(V->isVariadic() && isParameter));
1075 if (isParameter)
1076 ByvalParmDbgValues.push_back(V);
1077 else
1078 DbgValues.push_back(V);
1079 for (const SDNode *Node : V->getSDNodes())
1080 if (Node)
1081 DbgValMap[Node].push_back(V);
1082 }
1083
erase(const SDNode * Node)1084 void SDDbgInfo::erase(const SDNode *Node) {
1085 DbgValMapType::iterator I = DbgValMap.find(Node);
1086 if (I == DbgValMap.end())
1087 return;
1088 for (auto &Val: I->second)
1089 Val->setIsInvalidated();
1090 DbgValMap.erase(I);
1091 }
1092
DeallocateNode(SDNode * N)1093 void SelectionDAG::DeallocateNode(SDNode *N) {
1094 // If we have operands, deallocate them.
1095 removeOperands(N);
1096
1097 NodeAllocator.Deallocate(AllNodes.remove(N));
1098
1099 // Set the opcode to DELETED_NODE to help catch bugs when node
1100 // memory is reallocated.
1101 // FIXME: There are places in SDag that have grown a dependency on the opcode
1102 // value in the released node.
1103 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
1104 N->NodeType = ISD::DELETED_NODE;
1105
1106 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
1107 // them and forget about that node.
1108 DbgInfo->erase(N);
1109
1110 // Invalidate extra info.
1111 SDEI.erase(N);
1112 }
1113
1114 #ifndef NDEBUG
1115 /// VerifySDNode - Check the given SDNode. Aborts if it is invalid.
VerifySDNode(SDNode * N,const TargetLowering * TLI)1116 static void VerifySDNode(SDNode *N, const TargetLowering *TLI) {
1117 switch (N->getOpcode()) {
1118 default:
1119 if (N->getOpcode() > ISD::BUILTIN_OP_END)
1120 TLI->verifyTargetSDNode(N);
1121 break;
1122 case ISD::BUILD_PAIR: {
1123 EVT VT = N->getValueType(0);
1124 assert(N->getNumValues() == 1 && "Too many results!");
1125 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
1126 "Wrong return type!");
1127 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
1128 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
1129 "Mismatched operand types!");
1130 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
1131 "Wrong operand type!");
1132 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
1133 "Wrong return type size");
1134 break;
1135 }
1136 case ISD::BUILD_VECTOR: {
1137 assert(N->getNumValues() == 1 && "Too many results!");
1138 assert(N->getValueType(0).isVector() && "Wrong return type!");
1139 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
1140 "Wrong number of operands!");
1141 EVT EltVT = N->getValueType(0).getVectorElementType();
1142 for (const SDUse &Op : N->ops()) {
1143 assert((Op.getValueType() == EltVT ||
1144 (EltVT.isInteger() && Op.getValueType().isInteger() &&
1145 EltVT.bitsLE(Op.getValueType()))) &&
1146 "Wrong operand type!");
1147 assert(Op.getValueType() == N->getOperand(0).getValueType() &&
1148 "Operands must all have the same type");
1149 }
1150 break;
1151 }
1152 }
1153 }
1154 #endif // NDEBUG
1155
1156 /// Insert a newly allocated node into the DAG.
1157 ///
1158 /// Handles insertion into the all nodes list and CSE map, as well as
1159 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)1160 void SelectionDAG::InsertNode(SDNode *N) {
1161 AllNodes.push_back(N);
1162 #ifndef NDEBUG
1163 N->PersistentId = NextPersistentId++;
1164 VerifySDNode(N, TLI);
1165 #endif
1166 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1167 DUL->NodeInserted(N);
1168 }
1169
1170 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1171 /// correspond to it. This is useful when we're about to delete or repurpose
1172 /// the node. We don't want future request for structurally identical nodes
1173 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)1174 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
1175 bool Erased = false;
1176 switch (N->getOpcode()) {
1177 case ISD::HANDLENODE: return false; // noop.
1178 case ISD::CONDCODE:
1179 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
1180 "Cond code doesn't exist!");
1181 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
1182 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
1183 break;
1184 case ISD::ExternalSymbol:
1185 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
1186 break;
1187 case ISD::TargetExternalSymbol: {
1188 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
1189 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1190 ESN->getSymbol(), ESN->getTargetFlags()));
1191 break;
1192 }
1193 case ISD::MCSymbol: {
1194 auto *MCSN = cast<MCSymbolSDNode>(N);
1195 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1196 break;
1197 }
1198 case ISD::VALUETYPE: {
1199 EVT VT = cast<VTSDNode>(N)->getVT();
1200 if (VT.isExtended()) {
1201 Erased = ExtendedValueTypeNodes.erase(VT);
1202 } else {
1203 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1204 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1205 }
1206 break;
1207 }
1208 default:
1209 // Remove it from the CSE Map.
1210 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1211 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1212 Erased = CSEMap.RemoveNode(N);
1213 break;
1214 }
1215 #ifndef NDEBUG
1216 // Verify that the node was actually in one of the CSE maps, unless it has a
1217 // glue result (which cannot be CSE'd) or is one of the special cases that are
1218 // not subject to CSE.
1219 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1220 !N->isMachineOpcode() && !doNotCSE(N)) {
1221 N->dump(this);
1222 dbgs() << "\n";
1223 llvm_unreachable("Node is not in map!");
1224 }
1225 #endif
1226 return Erased;
1227 }
1228
1229 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1230 /// maps and modified in place. Add it back to the CSE maps, unless an identical
1231 /// node already exists, in which case transfer all its users to the existing
1232 /// node. This transfer can potentially trigger recursive merging.
1233 void
AddModifiedNodeToCSEMaps(SDNode * N)1234 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1235 // For node types that aren't CSE'd, just act as if no identical node
1236 // already exists.
1237 if (!doNotCSE(N)) {
1238 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1239 if (Existing != N) {
1240 // If there was already an existing matching node, use ReplaceAllUsesWith
1241 // to replace the dead one with the existing one. This can cause
1242 // recursive merging of other unrelated nodes down the line.
1243 Existing->intersectFlagsWith(N->getFlags());
1244 ReplaceAllUsesWith(N, Existing);
1245
1246 // N is now dead. Inform the listeners and delete it.
1247 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1248 DUL->NodeDeleted(N, Existing);
1249 DeleteNodeNotInCSEMaps(N);
1250 return;
1251 }
1252 }
1253
1254 // If the node doesn't already exist, we updated it. Inform listeners.
1255 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1256 DUL->NodeUpdated(N);
1257 }
1258
1259 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1260 /// were replaced with those specified. If this node is never memoized,
1261 /// return null, otherwise return a pointer to the slot it would take. If a
1262 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)1263 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1264 void *&InsertPos) {
1265 if (doNotCSE(N))
1266 return nullptr;
1267
1268 SDValue Ops[] = { Op };
1269 FoldingSetNodeID ID;
1270 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1271 AddNodeIDCustom(ID, N);
1272 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1273 if (Node)
1274 Node->intersectFlagsWith(N->getFlags());
1275 return Node;
1276 }
1277
1278 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1279 /// were replaced with those specified. If this node is never memoized,
1280 /// return null, otherwise return a pointer to the slot it would take. If a
1281 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)1282 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1283 SDValue Op1, SDValue Op2,
1284 void *&InsertPos) {
1285 if (doNotCSE(N))
1286 return nullptr;
1287
1288 SDValue Ops[] = { Op1, Op2 };
1289 FoldingSetNodeID ID;
1290 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1291 AddNodeIDCustom(ID, N);
1292 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1293 if (Node)
1294 Node->intersectFlagsWith(N->getFlags());
1295 return Node;
1296 }
1297
1298 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1299 /// were replaced with those specified. If this node is never memoized,
1300 /// return null, otherwise return a pointer to the slot it would take. If a
1301 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)1302 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1303 void *&InsertPos) {
1304 if (doNotCSE(N))
1305 return nullptr;
1306
1307 FoldingSetNodeID ID;
1308 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1309 AddNodeIDCustom(ID, N);
1310 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1311 if (Node)
1312 Node->intersectFlagsWith(N->getFlags());
1313 return Node;
1314 }
1315
getEVTAlign(EVT VT) const1316 Align SelectionDAG::getEVTAlign(EVT VT) const {
1317 Type *Ty = VT == MVT::iPTR ? PointerType::get(*getContext(), 0)
1318 : VT.getTypeForEVT(*getContext());
1319
1320 return getDataLayout().getABITypeAlign(Ty);
1321 }
1322
1323 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOptLevel OL)1324 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOptLevel OL)
1325 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0, DebugLoc(),
1326 getVTList(MVT::Other, MVT::Glue)),
1327 Root(getEntryNode()) {
1328 InsertNode(&EntryNode);
1329 DbgInfo = new SDDbgInfo();
1330 }
1331
init(MachineFunction & NewMF,OptimizationRemarkEmitter & NewORE,Pass * PassPtr,const TargetLibraryInfo * LibraryInfo,UniformityInfo * NewUA,ProfileSummaryInfo * PSIin,BlockFrequencyInfo * BFIin,FunctionVarLocs const * VarLocs)1332 void SelectionDAG::init(MachineFunction &NewMF,
1333 OptimizationRemarkEmitter &NewORE, Pass *PassPtr,
1334 const TargetLibraryInfo *LibraryInfo,
1335 UniformityInfo *NewUA, ProfileSummaryInfo *PSIin,
1336 BlockFrequencyInfo *BFIin,
1337 FunctionVarLocs const *VarLocs) {
1338 MF = &NewMF;
1339 SDAGISelPass = PassPtr;
1340 ORE = &NewORE;
1341 TLI = getSubtarget().getTargetLowering();
1342 TSI = getSubtarget().getSelectionDAGInfo();
1343 LibInfo = LibraryInfo;
1344 Context = &MF->getFunction().getContext();
1345 UA = NewUA;
1346 PSI = PSIin;
1347 BFI = BFIin;
1348 FnVarLocs = VarLocs;
1349 }
1350
~SelectionDAG()1351 SelectionDAG::~SelectionDAG() {
1352 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1353 allnodes_clear();
1354 OperandRecycler.clear(OperandAllocator);
1355 delete DbgInfo;
1356 }
1357
shouldOptForSize() const1358 bool SelectionDAG::shouldOptForSize() const {
1359 return MF->getFunction().hasOptSize() ||
1360 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1361 }
1362
allnodes_clear()1363 void SelectionDAG::allnodes_clear() {
1364 assert(&*AllNodes.begin() == &EntryNode);
1365 AllNodes.remove(AllNodes.begin());
1366 while (!AllNodes.empty())
1367 DeallocateNode(&AllNodes.front());
1368 #ifndef NDEBUG
1369 NextPersistentId = 0;
1370 #endif
1371 }
1372
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)1373 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1374 void *&InsertPos) {
1375 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1376 if (N) {
1377 switch (N->getOpcode()) {
1378 default: break;
1379 case ISD::Constant:
1380 case ISD::ConstantFP:
1381 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1382 "debug location. Use another overload.");
1383 }
1384 }
1385 return N;
1386 }
1387
FindNodeOrInsertPos(const FoldingSetNodeID & ID,const SDLoc & DL,void * & InsertPos)1388 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1389 const SDLoc &DL, void *&InsertPos) {
1390 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1391 if (N) {
1392 switch (N->getOpcode()) {
1393 case ISD::Constant:
1394 case ISD::ConstantFP:
1395 // Erase debug location from the node if the node is used at several
1396 // different places. Do not propagate one location to all uses as it
1397 // will cause a worse single stepping debugging experience.
1398 if (N->getDebugLoc() != DL.getDebugLoc())
1399 N->setDebugLoc(DebugLoc());
1400 break;
1401 default:
1402 // When the node's point of use is located earlier in the instruction
1403 // sequence than its prior point of use, update its debug info to the
1404 // earlier location.
1405 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1406 N->setDebugLoc(DL.getDebugLoc());
1407 break;
1408 }
1409 }
1410 return N;
1411 }
1412
clear()1413 void SelectionDAG::clear() {
1414 allnodes_clear();
1415 OperandRecycler.clear(OperandAllocator);
1416 OperandAllocator.Reset();
1417 CSEMap.clear();
1418
1419 ExtendedValueTypeNodes.clear();
1420 ExternalSymbols.clear();
1421 TargetExternalSymbols.clear();
1422 MCSymbols.clear();
1423 SDEI.clear();
1424 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), nullptr);
1425 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), nullptr);
1426
1427 EntryNode.UseList = nullptr;
1428 InsertNode(&EntryNode);
1429 Root = getEntryNode();
1430 DbgInfo->clear();
1431 }
1432
getFPExtendOrRound(SDValue Op,const SDLoc & DL,EVT VT)1433 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1434 return VT.bitsGT(Op.getValueType())
1435 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1436 : getNode(ISD::FP_ROUND, DL, VT, Op,
1437 getIntPtrConstant(0, DL, /*isTarget=*/true));
1438 }
1439
1440 std::pair<SDValue, SDValue>
getStrictFPExtendOrRound(SDValue Op,SDValue Chain,const SDLoc & DL,EVT VT)1441 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1442 const SDLoc &DL, EVT VT) {
1443 assert(!VT.bitsEq(Op.getValueType()) &&
1444 "Strict no-op FP extend/round not allowed.");
1445 SDValue Res =
1446 VT.bitsGT(Op.getValueType())
1447 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1448 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1449 {Chain, Op, getIntPtrConstant(0, DL)});
1450
1451 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1452 }
1453
getAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1454 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1455 return VT.bitsGT(Op.getValueType()) ?
1456 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1457 getNode(ISD::TRUNCATE, DL, VT, Op);
1458 }
1459
getSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1460 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1461 return VT.bitsGT(Op.getValueType()) ?
1462 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1463 getNode(ISD::TRUNCATE, DL, VT, Op);
1464 }
1465
getZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1466 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1467 return VT.bitsGT(Op.getValueType()) ?
1468 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1469 getNode(ISD::TRUNCATE, DL, VT, Op);
1470 }
1471
getBitcastedAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1472 SDValue SelectionDAG::getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL,
1473 EVT VT) {
1474 assert(!VT.isVector());
1475 auto Type = Op.getValueType();
1476 SDValue DestOp;
1477 if (Type == VT)
1478 return Op;
1479 auto Size = Op.getValueSizeInBits();
1480 DestOp = getBitcast(EVT::getIntegerVT(*Context, Size), Op);
1481 if (DestOp.getValueType() == VT)
1482 return DestOp;
1483
1484 return getAnyExtOrTrunc(DestOp, DL, VT);
1485 }
1486
getBitcastedSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1487 SDValue SelectionDAG::getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL,
1488 EVT VT) {
1489 assert(!VT.isVector());
1490 auto Type = Op.getValueType();
1491 SDValue DestOp;
1492 if (Type == VT)
1493 return Op;
1494 auto Size = Op.getValueSizeInBits();
1495 DestOp = getBitcast(MVT::getIntegerVT(Size), Op);
1496 if (DestOp.getValueType() == VT)
1497 return DestOp;
1498
1499 return getSExtOrTrunc(DestOp, DL, VT);
1500 }
1501
getBitcastedZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1502 SDValue SelectionDAG::getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL,
1503 EVT VT) {
1504 assert(!VT.isVector());
1505 auto Type = Op.getValueType();
1506 SDValue DestOp;
1507 if (Type == VT)
1508 return Op;
1509 auto Size = Op.getValueSizeInBits();
1510 DestOp = getBitcast(MVT::getIntegerVT(Size), Op);
1511 if (DestOp.getValueType() == VT)
1512 return DestOp;
1513
1514 return getZExtOrTrunc(DestOp, DL, VT);
1515 }
1516
getBoolExtOrTrunc(SDValue Op,const SDLoc & SL,EVT VT,EVT OpVT)1517 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1518 EVT OpVT) {
1519 if (VT.bitsLE(Op.getValueType()))
1520 return getNode(ISD::TRUNCATE, SL, VT, Op);
1521
1522 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1523 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1524 }
1525
getZeroExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1526 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1527 EVT OpVT = Op.getValueType();
1528 assert(VT.isInteger() && OpVT.isInteger() &&
1529 "Cannot getZeroExtendInReg FP types");
1530 assert(VT.isVector() == OpVT.isVector() &&
1531 "getZeroExtendInReg type should be vector iff the operand "
1532 "type is vector!");
1533 assert((!VT.isVector() ||
1534 VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&
1535 "Vector element counts must match in getZeroExtendInReg");
1536 assert(VT.bitsLE(OpVT) && "Not extending!");
1537 if (OpVT == VT)
1538 return Op;
1539 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1540 VT.getScalarSizeInBits());
1541 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1542 }
1543
getVPZeroExtendInReg(SDValue Op,SDValue Mask,SDValue EVL,const SDLoc & DL,EVT VT)1544 SDValue SelectionDAG::getVPZeroExtendInReg(SDValue Op, SDValue Mask,
1545 SDValue EVL, const SDLoc &DL,
1546 EVT VT) {
1547 EVT OpVT = Op.getValueType();
1548 assert(VT.isInteger() && OpVT.isInteger() &&
1549 "Cannot getVPZeroExtendInReg FP types");
1550 assert(VT.isVector() && OpVT.isVector() &&
1551 "getVPZeroExtendInReg type and operand type should be vector!");
1552 assert(VT.getVectorElementCount() == OpVT.getVectorElementCount() &&
1553 "Vector element counts must match in getZeroExtendInReg");
1554 assert(VT.bitsLE(OpVT) && "Not extending!");
1555 if (OpVT == VT)
1556 return Op;
1557 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1558 VT.getScalarSizeInBits());
1559 return getNode(ISD::VP_AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT), Mask,
1560 EVL);
1561 }
1562
getPtrExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1563 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1564 // Only unsigned pointer semantics are supported right now. In the future this
1565 // might delegate to TLI to check pointer signedness.
1566 return getZExtOrTrunc(Op, DL, VT);
1567 }
1568
getPtrExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1569 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1570 // Only unsigned pointer semantics are supported right now. In the future this
1571 // might delegate to TLI to check pointer signedness.
1572 return getZeroExtendInReg(Op, DL, VT);
1573 }
1574
getNegative(SDValue Val,const SDLoc & DL,EVT VT)1575 SDValue SelectionDAG::getNegative(SDValue Val, const SDLoc &DL, EVT VT) {
1576 return getNode(ISD::SUB, DL, VT, getConstant(0, DL, VT), Val);
1577 }
1578
1579 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
getNOT(const SDLoc & DL,SDValue Val,EVT VT)1580 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1581 return getNode(ISD::XOR, DL, VT, Val, getAllOnesConstant(DL, VT));
1582 }
1583
getLogicalNOT(const SDLoc & DL,SDValue Val,EVT VT)1584 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1585 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1586 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1587 }
1588
getVPLogicalNOT(const SDLoc & DL,SDValue Val,SDValue Mask,SDValue EVL,EVT VT)1589 SDValue SelectionDAG::getVPLogicalNOT(const SDLoc &DL, SDValue Val,
1590 SDValue Mask, SDValue EVL, EVT VT) {
1591 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1592 return getNode(ISD::VP_XOR, DL, VT, Val, TrueValue, Mask, EVL);
1593 }
1594
getVPPtrExtOrTrunc(const SDLoc & DL,EVT VT,SDValue Op,SDValue Mask,SDValue EVL)1595 SDValue SelectionDAG::getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op,
1596 SDValue Mask, SDValue EVL) {
1597 return getVPZExtOrTrunc(DL, VT, Op, Mask, EVL);
1598 }
1599
getVPZExtOrTrunc(const SDLoc & DL,EVT VT,SDValue Op,SDValue Mask,SDValue EVL)1600 SDValue SelectionDAG::getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op,
1601 SDValue Mask, SDValue EVL) {
1602 if (VT.bitsGT(Op.getValueType()))
1603 return getNode(ISD::VP_ZERO_EXTEND, DL, VT, Op, Mask, EVL);
1604 if (VT.bitsLT(Op.getValueType()))
1605 return getNode(ISD::VP_TRUNCATE, DL, VT, Op, Mask, EVL);
1606 return Op;
1607 }
1608
getBoolConstant(bool V,const SDLoc & DL,EVT VT,EVT OpVT)1609 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1610 EVT OpVT) {
1611 if (!V)
1612 return getConstant(0, DL, VT);
1613
1614 switch (TLI->getBooleanContents(OpVT)) {
1615 case TargetLowering::ZeroOrOneBooleanContent:
1616 case TargetLowering::UndefinedBooleanContent:
1617 return getConstant(1, DL, VT);
1618 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1619 return getAllOnesConstant(DL, VT);
1620 }
1621 llvm_unreachable("Unexpected boolean content enum!");
1622 }
1623
getConstant(uint64_t Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1624 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1625 bool isT, bool isO) {
1626 EVT EltVT = VT.getScalarType();
1627 assert((EltVT.getSizeInBits() >= 64 ||
1628 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1629 "getConstant with a uint64_t value that doesn't fit in the type!");
1630 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1631 }
1632
getConstant(const APInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1633 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1634 bool isT, bool isO) {
1635 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1636 }
1637
getConstant(const ConstantInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1638 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1639 EVT VT, bool isT, bool isO) {
1640 assert(VT.isInteger() && "Cannot create FP integer constant!");
1641
1642 EVT EltVT = VT.getScalarType();
1643 const ConstantInt *Elt = &Val;
1644
1645 // In some cases the vector type is legal but the element type is illegal and
1646 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1647 // inserted value (the type does not need to match the vector element type).
1648 // Any extra bits introduced will be truncated away.
1649 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1650 TargetLowering::TypePromoteInteger) {
1651 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1652 APInt NewVal;
1653 if (TLI->isSExtCheaperThanZExt(VT.getScalarType(), EltVT))
1654 NewVal = Elt->getValue().sextOrTrunc(EltVT.getSizeInBits());
1655 else
1656 NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1657 Elt = ConstantInt::get(*getContext(), NewVal);
1658 }
1659 // In other cases the element type is illegal and needs to be expanded, for
1660 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1661 // the value into n parts and use a vector type with n-times the elements.
1662 // Then bitcast to the type requested.
1663 // Legalizing constants too early makes the DAGCombiner's job harder so we
1664 // only legalize if the DAG tells us we must produce legal types.
1665 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1666 TLI->getTypeAction(*getContext(), EltVT) ==
1667 TargetLowering::TypeExpandInteger) {
1668 const APInt &NewVal = Elt->getValue();
1669 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1670 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1671
1672 // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1673 if (VT.isScalableVector() ||
1674 TLI->isOperationLegal(ISD::SPLAT_VECTOR, VT)) {
1675 assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&
1676 "Can only handle an even split!");
1677 unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1678
1679 SmallVector<SDValue, 2> ScalarParts;
1680 for (unsigned i = 0; i != Parts; ++i)
1681 ScalarParts.push_back(getConstant(
1682 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1683 ViaEltVT, isT, isO));
1684
1685 return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1686 }
1687
1688 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1689 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1690
1691 // Check the temporary vector is the correct size. If this fails then
1692 // getTypeToTransformTo() probably returned a type whose size (in bits)
1693 // isn't a power-of-2 factor of the requested type size.
1694 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1695
1696 SmallVector<SDValue, 2> EltParts;
1697 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
1698 EltParts.push_back(getConstant(
1699 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1700 ViaEltVT, isT, isO));
1701
1702 // EltParts is currently in little endian order. If we actually want
1703 // big-endian order then reverse it now.
1704 if (getDataLayout().isBigEndian())
1705 std::reverse(EltParts.begin(), EltParts.end());
1706
1707 // The elements must be reversed when the element order is different
1708 // to the endianness of the elements (because the BITCAST is itself a
1709 // vector shuffle in this situation). However, we do not need any code to
1710 // perform this reversal because getConstant() is producing a vector
1711 // splat.
1712 // This situation occurs in MIPS MSA.
1713
1714 SmallVector<SDValue, 8> Ops;
1715 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1716 llvm::append_range(Ops, EltParts);
1717
1718 SDValue V =
1719 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1720 return V;
1721 }
1722
1723 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1724 "APInt size does not match type size!");
1725 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1726 SDVTList VTs = getVTList(EltVT);
1727 FoldingSetNodeID ID;
1728 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
1729 ID.AddPointer(Elt);
1730 ID.AddBoolean(isO);
1731 void *IP = nullptr;
1732 SDNode *N = nullptr;
1733 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1734 if (!VT.isVector())
1735 return SDValue(N, 0);
1736
1737 if (!N) {
1738 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1739 CSEMap.InsertNode(N, IP);
1740 InsertNode(N);
1741 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1742 }
1743
1744 SDValue Result(N, 0);
1745 if (VT.isVector())
1746 Result = getSplat(VT, DL, Result);
1747 return Result;
1748 }
1749
getIntPtrConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1750 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1751 bool isTarget) {
1752 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1753 }
1754
getShiftAmountConstant(uint64_t Val,EVT VT,const SDLoc & DL)1755 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1756 const SDLoc &DL) {
1757 assert(VT.isInteger() && "Shift amount is not an integer type!");
1758 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout());
1759 return getConstant(Val, DL, ShiftVT);
1760 }
1761
getShiftAmountConstant(const APInt & Val,EVT VT,const SDLoc & DL)1762 SDValue SelectionDAG::getShiftAmountConstant(const APInt &Val, EVT VT,
1763 const SDLoc &DL) {
1764 assert(Val.ult(VT.getScalarSizeInBits()) && "Out of range shift");
1765 return getShiftAmountConstant(Val.getZExtValue(), VT, DL);
1766 }
1767
getVectorIdxConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1768 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1769 bool isTarget) {
1770 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1771 }
1772
getConstantFP(const APFloat & V,const SDLoc & DL,EVT VT,bool isTarget)1773 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1774 bool isTarget) {
1775 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1776 }
1777
getConstantFP(const ConstantFP & V,const SDLoc & DL,EVT VT,bool isTarget)1778 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1779 EVT VT, bool isTarget) {
1780 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1781
1782 EVT EltVT = VT.getScalarType();
1783
1784 // Do the map lookup using the actual bit pattern for the floating point
1785 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1786 // we don't have issues with SNANs.
1787 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1788 SDVTList VTs = getVTList(EltVT);
1789 FoldingSetNodeID ID;
1790 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
1791 ID.AddPointer(&V);
1792 void *IP = nullptr;
1793 SDNode *N = nullptr;
1794 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1795 if (!VT.isVector())
1796 return SDValue(N, 0);
1797
1798 if (!N) {
1799 N = newSDNode<ConstantFPSDNode>(isTarget, &V, VTs);
1800 CSEMap.InsertNode(N, IP);
1801 InsertNode(N);
1802 }
1803
1804 SDValue Result(N, 0);
1805 if (VT.isVector())
1806 Result = getSplat(VT, DL, Result);
1807 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1808 return Result;
1809 }
1810
getConstantFP(double Val,const SDLoc & DL,EVT VT,bool isTarget)1811 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1812 bool isTarget) {
1813 EVT EltVT = VT.getScalarType();
1814 if (EltVT == MVT::f32)
1815 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1816 if (EltVT == MVT::f64)
1817 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1818 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1819 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1820 bool Ignored;
1821 APFloat APF = APFloat(Val);
1822 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1823 &Ignored);
1824 return getConstantFP(APF, DL, VT, isTarget);
1825 }
1826 llvm_unreachable("Unsupported type in getConstantFP");
1827 }
1828
getGlobalAddress(const GlobalValue * GV,const SDLoc & DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned TargetFlags)1829 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1830 EVT VT, int64_t Offset, bool isTargetGA,
1831 unsigned TargetFlags) {
1832 assert((TargetFlags == 0 || isTargetGA) &&
1833 "Cannot set target flags on target-independent globals");
1834
1835 // Truncate (with sign-extension) the offset value to the pointer size.
1836 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1837 if (BitWidth < 64)
1838 Offset = SignExtend64(Offset, BitWidth);
1839
1840 unsigned Opc;
1841 if (GV->isThreadLocal())
1842 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1843 else
1844 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1845
1846 SDVTList VTs = getVTList(VT);
1847 FoldingSetNodeID ID;
1848 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
1849 ID.AddPointer(GV);
1850 ID.AddInteger(Offset);
1851 ID.AddInteger(TargetFlags);
1852 void *IP = nullptr;
1853 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1854 return SDValue(E, 0);
1855
1856 auto *N = newSDNode<GlobalAddressSDNode>(
1857 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VTs, Offset, TargetFlags);
1858 CSEMap.InsertNode(N, IP);
1859 InsertNode(N);
1860 return SDValue(N, 0);
1861 }
1862
getFrameIndex(int FI,EVT VT,bool isTarget)1863 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1864 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1865 SDVTList VTs = getVTList(VT);
1866 FoldingSetNodeID ID;
1867 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
1868 ID.AddInteger(FI);
1869 void *IP = nullptr;
1870 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1871 return SDValue(E, 0);
1872
1873 auto *N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
1874 CSEMap.InsertNode(N, IP);
1875 InsertNode(N);
1876 return SDValue(N, 0);
1877 }
1878
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned TargetFlags)1879 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1880 unsigned TargetFlags) {
1881 assert((TargetFlags == 0 || isTarget) &&
1882 "Cannot set target flags on target-independent jump tables");
1883 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1884 SDVTList VTs = getVTList(VT);
1885 FoldingSetNodeID ID;
1886 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
1887 ID.AddInteger(JTI);
1888 ID.AddInteger(TargetFlags);
1889 void *IP = nullptr;
1890 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1891 return SDValue(E, 0);
1892
1893 auto *N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
1894 CSEMap.InsertNode(N, IP);
1895 InsertNode(N);
1896 return SDValue(N, 0);
1897 }
1898
getJumpTableDebugInfo(int JTI,SDValue Chain,const SDLoc & DL)1899 SDValue SelectionDAG::getJumpTableDebugInfo(int JTI, SDValue Chain,
1900 const SDLoc &DL) {
1901 EVT PTy = getTargetLoweringInfo().getPointerTy(getDataLayout());
1902 return getNode(ISD::JUMP_TABLE_DEBUG_INFO, DL, MVT::Glue, Chain,
1903 getTargetConstant(static_cast<uint64_t>(JTI), DL, PTy, true));
1904 }
1905
getConstantPool(const Constant * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1906 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1907 MaybeAlign Alignment, int Offset,
1908 bool isTarget, unsigned TargetFlags) {
1909 assert((TargetFlags == 0 || isTarget) &&
1910 "Cannot set target flags on target-independent globals");
1911 if (!Alignment)
1912 Alignment = shouldOptForSize()
1913 ? getDataLayout().getABITypeAlign(C->getType())
1914 : getDataLayout().getPrefTypeAlign(C->getType());
1915 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1916 SDVTList VTs = getVTList(VT);
1917 FoldingSetNodeID ID;
1918 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
1919 ID.AddInteger(Alignment->value());
1920 ID.AddInteger(Offset);
1921 ID.AddPointer(C);
1922 ID.AddInteger(TargetFlags);
1923 void *IP = nullptr;
1924 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1925 return SDValue(E, 0);
1926
1927 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VTs, Offset, *Alignment,
1928 TargetFlags);
1929 CSEMap.InsertNode(N, IP);
1930 InsertNode(N);
1931 SDValue V = SDValue(N, 0);
1932 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1933 return V;
1934 }
1935
getConstantPool(MachineConstantPoolValue * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1936 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1937 MaybeAlign Alignment, int Offset,
1938 bool isTarget, unsigned TargetFlags) {
1939 assert((TargetFlags == 0 || isTarget) &&
1940 "Cannot set target flags on target-independent globals");
1941 if (!Alignment)
1942 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1943 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1944 SDVTList VTs = getVTList(VT);
1945 FoldingSetNodeID ID;
1946 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
1947 ID.AddInteger(Alignment->value());
1948 ID.AddInteger(Offset);
1949 C->addSelectionDAGCSEId(ID);
1950 ID.AddInteger(TargetFlags);
1951 void *IP = nullptr;
1952 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1953 return SDValue(E, 0);
1954
1955 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VTs, Offset, *Alignment,
1956 TargetFlags);
1957 CSEMap.InsertNode(N, IP);
1958 InsertNode(N);
1959 return SDValue(N, 0);
1960 }
1961
getBasicBlock(MachineBasicBlock * MBB)1962 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1963 FoldingSetNodeID ID;
1964 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), std::nullopt);
1965 ID.AddPointer(MBB);
1966 void *IP = nullptr;
1967 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1968 return SDValue(E, 0);
1969
1970 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1971 CSEMap.InsertNode(N, IP);
1972 InsertNode(N);
1973 return SDValue(N, 0);
1974 }
1975
getValueType(EVT VT)1976 SDValue SelectionDAG::getValueType(EVT VT) {
1977 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1978 ValueTypeNodes.size())
1979 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1980
1981 SDNode *&N = VT.isExtended() ?
1982 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1983
1984 if (N) return SDValue(N, 0);
1985 N = newSDNode<VTSDNode>(VT);
1986 InsertNode(N);
1987 return SDValue(N, 0);
1988 }
1989
getExternalSymbol(const char * Sym,EVT VT)1990 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1991 SDNode *&N = ExternalSymbols[Sym];
1992 if (N) return SDValue(N, 0);
1993 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, getVTList(VT));
1994 InsertNode(N);
1995 return SDValue(N, 0);
1996 }
1997
getMCSymbol(MCSymbol * Sym,EVT VT)1998 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1999 SDNode *&N = MCSymbols[Sym];
2000 if (N)
2001 return SDValue(N, 0);
2002 N = newSDNode<MCSymbolSDNode>(Sym, getVTList(VT));
2003 InsertNode(N);
2004 return SDValue(N, 0);
2005 }
2006
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned TargetFlags)2007 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
2008 unsigned TargetFlags) {
2009 SDNode *&N =
2010 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
2011 if (N) return SDValue(N, 0);
2012 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, getVTList(VT));
2013 InsertNode(N);
2014 return SDValue(N, 0);
2015 }
2016
getCondCode(ISD::CondCode Cond)2017 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
2018 if ((unsigned)Cond >= CondCodeNodes.size())
2019 CondCodeNodes.resize(Cond+1);
2020
2021 if (!CondCodeNodes[Cond]) {
2022 auto *N = newSDNode<CondCodeSDNode>(Cond);
2023 CondCodeNodes[Cond] = N;
2024 InsertNode(N);
2025 }
2026
2027 return SDValue(CondCodeNodes[Cond], 0);
2028 }
2029
getVScale(const SDLoc & DL,EVT VT,APInt MulImm,bool ConstantFold)2030 SDValue SelectionDAG::getVScale(const SDLoc &DL, EVT VT, APInt MulImm,
2031 bool ConstantFold) {
2032 assert(MulImm.getBitWidth() == VT.getSizeInBits() &&
2033 "APInt size does not match type size!");
2034
2035 if (MulImm == 0)
2036 return getConstant(0, DL, VT);
2037
2038 if (ConstantFold) {
2039 const MachineFunction &MF = getMachineFunction();
2040 const Function &F = MF.getFunction();
2041 ConstantRange CR = getVScaleRange(&F, 64);
2042 if (const APInt *C = CR.getSingleElement())
2043 return getConstant(MulImm * C->getZExtValue(), DL, VT);
2044 }
2045
2046 return getNode(ISD::VSCALE, DL, VT, getConstant(MulImm, DL, VT));
2047 }
2048
getElementCount(const SDLoc & DL,EVT VT,ElementCount EC,bool ConstantFold)2049 SDValue SelectionDAG::getElementCount(const SDLoc &DL, EVT VT, ElementCount EC,
2050 bool ConstantFold) {
2051 if (EC.isScalable())
2052 return getVScale(DL, VT,
2053 APInt(VT.getSizeInBits(), EC.getKnownMinValue()));
2054
2055 return getConstant(EC.getKnownMinValue(), DL, VT);
2056 }
2057
getStepVector(const SDLoc & DL,EVT ResVT)2058 SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) {
2059 APInt One(ResVT.getScalarSizeInBits(), 1);
2060 return getStepVector(DL, ResVT, One);
2061 }
2062
getStepVector(const SDLoc & DL,EVT ResVT,const APInt & StepVal)2063 SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT,
2064 const APInt &StepVal) {
2065 assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth());
2066 if (ResVT.isScalableVector())
2067 return getNode(
2068 ISD::STEP_VECTOR, DL, ResVT,
2069 getTargetConstant(StepVal, DL, ResVT.getVectorElementType()));
2070
2071 SmallVector<SDValue, 16> OpsStepConstants;
2072 for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
2073 OpsStepConstants.push_back(
2074 getConstant(StepVal * i, DL, ResVT.getVectorElementType()));
2075 return getBuildVector(ResVT, DL, OpsStepConstants);
2076 }
2077
2078 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
2079 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,MutableArrayRef<int> M)2080 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
2081 std::swap(N1, N2);
2082 ShuffleVectorSDNode::commuteMask(M);
2083 }
2084
getVectorShuffle(EVT VT,const SDLoc & dl,SDValue N1,SDValue N2,ArrayRef<int> Mask)2085 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
2086 SDValue N2, ArrayRef<int> Mask) {
2087 assert(VT.getVectorNumElements() == Mask.size() &&
2088 "Must have the same number of vector elements as mask elements!");
2089 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
2090 "Invalid VECTOR_SHUFFLE");
2091
2092 // Canonicalize shuffle undef, undef -> undef
2093 if (N1.isUndef() && N2.isUndef())
2094 return getUNDEF(VT);
2095
2096 // Validate that all indices in Mask are within the range of the elements
2097 // input to the shuffle.
2098 int NElts = Mask.size();
2099 assert(llvm::all_of(Mask,
2100 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
2101 "Index out of range");
2102
2103 // Copy the mask so we can do any needed cleanup.
2104 SmallVector<int, 8> MaskVec(Mask);
2105
2106 // Canonicalize shuffle v, v -> v, undef
2107 if (N1 == N2) {
2108 N2 = getUNDEF(VT);
2109 for (int i = 0; i != NElts; ++i)
2110 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2111 }
2112
2113 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
2114 if (N1.isUndef())
2115 commuteShuffle(N1, N2, MaskVec);
2116
2117 if (TLI->hasVectorBlend()) {
2118 // If shuffling a splat, try to blend the splat instead. We do this here so
2119 // that even when this arises during lowering we don't have to re-handle it.
2120 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
2121 BitVector UndefElements;
2122 SDValue Splat = BV->getSplatValue(&UndefElements);
2123 if (!Splat)
2124 return;
2125
2126 for (int i = 0; i < NElts; ++i) {
2127 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
2128 continue;
2129
2130 // If this input comes from undef, mark it as such.
2131 if (UndefElements[MaskVec[i] - Offset]) {
2132 MaskVec[i] = -1;
2133 continue;
2134 }
2135
2136 // If we can blend a non-undef lane, use that instead.
2137 if (!UndefElements[i])
2138 MaskVec[i] = i + Offset;
2139 }
2140 };
2141 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2142 BlendSplat(N1BV, 0);
2143 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2144 BlendSplat(N2BV, NElts);
2145 }
2146
2147 // Canonicalize all index into lhs, -> shuffle lhs, undef
2148 // Canonicalize all index into rhs, -> shuffle rhs, undef
2149 bool AllLHS = true, AllRHS = true;
2150 bool N2Undef = N2.isUndef();
2151 for (int i = 0; i != NElts; ++i) {
2152 if (MaskVec[i] >= NElts) {
2153 if (N2Undef)
2154 MaskVec[i] = -1;
2155 else
2156 AllLHS = false;
2157 } else if (MaskVec[i] >= 0) {
2158 AllRHS = false;
2159 }
2160 }
2161 if (AllLHS && AllRHS)
2162 return getUNDEF(VT);
2163 if (AllLHS && !N2Undef)
2164 N2 = getUNDEF(VT);
2165 if (AllRHS) {
2166 N1 = getUNDEF(VT);
2167 commuteShuffle(N1, N2, MaskVec);
2168 }
2169 // Reset our undef status after accounting for the mask.
2170 N2Undef = N2.isUndef();
2171 // Re-check whether both sides ended up undef.
2172 if (N1.isUndef() && N2Undef)
2173 return getUNDEF(VT);
2174
2175 // If Identity shuffle return that node.
2176 bool Identity = true, AllSame = true;
2177 for (int i = 0; i != NElts; ++i) {
2178 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
2179 if (MaskVec[i] != MaskVec[0]) AllSame = false;
2180 }
2181 if (Identity && NElts)
2182 return N1;
2183
2184 // Shuffling a constant splat doesn't change the result.
2185 if (N2Undef) {
2186 SDValue V = N1;
2187
2188 // Look through any bitcasts. We check that these don't change the number
2189 // (and size) of elements and just changes their types.
2190 while (V.getOpcode() == ISD::BITCAST)
2191 V = V->getOperand(0);
2192
2193 // A splat should always show up as a build vector node.
2194 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2195 BitVector UndefElements;
2196 SDValue Splat = BV->getSplatValue(&UndefElements);
2197 // If this is a splat of an undef, shuffling it is also undef.
2198 if (Splat && Splat.isUndef())
2199 return getUNDEF(VT);
2200
2201 bool SameNumElts =
2202 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
2203
2204 // We only have a splat which can skip shuffles if there is a splatted
2205 // value and no undef lanes rearranged by the shuffle.
2206 if (Splat && UndefElements.none()) {
2207 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
2208 // number of elements match or the value splatted is a zero constant.
2209 if (SameNumElts || isNullConstant(Splat))
2210 return N1;
2211 }
2212
2213 // If the shuffle itself creates a splat, build the vector directly.
2214 if (AllSame && SameNumElts) {
2215 EVT BuildVT = BV->getValueType(0);
2216 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
2217 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
2218
2219 // We may have jumped through bitcasts, so the type of the
2220 // BUILD_VECTOR may not match the type of the shuffle.
2221 if (BuildVT != VT)
2222 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
2223 return NewBV;
2224 }
2225 }
2226 }
2227
2228 SDVTList VTs = getVTList(VT);
2229 FoldingSetNodeID ID;
2230 SDValue Ops[2] = { N1, N2 };
2231 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, VTs, Ops);
2232 for (int i = 0; i != NElts; ++i)
2233 ID.AddInteger(MaskVec[i]);
2234
2235 void* IP = nullptr;
2236 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2237 return SDValue(E, 0);
2238
2239 // Allocate the mask array for the node out of the BumpPtrAllocator, since
2240 // SDNode doesn't have access to it. This memory will be "leaked" when
2241 // the node is deallocated, but recovered when the NodeAllocator is released.
2242 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
2243 llvm::copy(MaskVec, MaskAlloc);
2244
2245 auto *N = newSDNode<ShuffleVectorSDNode>(VTs, dl.getIROrder(),
2246 dl.getDebugLoc(), MaskAlloc);
2247 createOperands(N, Ops);
2248
2249 CSEMap.InsertNode(N, IP);
2250 InsertNode(N);
2251 SDValue V = SDValue(N, 0);
2252 NewSDValueDbgMsg(V, "Creating new node: ", this);
2253 return V;
2254 }
2255
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)2256 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
2257 EVT VT = SV.getValueType(0);
2258 SmallVector<int, 8> MaskVec(SV.getMask());
2259 ShuffleVectorSDNode::commuteMask(MaskVec);
2260
2261 SDValue Op0 = SV.getOperand(0);
2262 SDValue Op1 = SV.getOperand(1);
2263 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
2264 }
2265
getRegister(unsigned RegNo,EVT VT)2266 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
2267 SDVTList VTs = getVTList(VT);
2268 FoldingSetNodeID ID;
2269 AddNodeIDNode(ID, ISD::Register, VTs, std::nullopt);
2270 ID.AddInteger(RegNo);
2271 void *IP = nullptr;
2272 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2273 return SDValue(E, 0);
2274
2275 auto *N = newSDNode<RegisterSDNode>(RegNo, VTs);
2276 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, UA);
2277 CSEMap.InsertNode(N, IP);
2278 InsertNode(N);
2279 return SDValue(N, 0);
2280 }
2281
getRegisterMask(const uint32_t * RegMask)2282 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
2283 FoldingSetNodeID ID;
2284 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), std::nullopt);
2285 ID.AddPointer(RegMask);
2286 void *IP = nullptr;
2287 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2288 return SDValue(E, 0);
2289
2290 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
2291 CSEMap.InsertNode(N, IP);
2292 InsertNode(N);
2293 return SDValue(N, 0);
2294 }
2295
getEHLabel(const SDLoc & dl,SDValue Root,MCSymbol * Label)2296 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
2297 MCSymbol *Label) {
2298 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
2299 }
2300
getLabelNode(unsigned Opcode,const SDLoc & dl,SDValue Root,MCSymbol * Label)2301 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
2302 SDValue Root, MCSymbol *Label) {
2303 FoldingSetNodeID ID;
2304 SDValue Ops[] = { Root };
2305 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
2306 ID.AddPointer(Label);
2307 void *IP = nullptr;
2308 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2309 return SDValue(E, 0);
2310
2311 auto *N =
2312 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
2313 createOperands(N, Ops);
2314
2315 CSEMap.InsertNode(N, IP);
2316 InsertNode(N);
2317 return SDValue(N, 0);
2318 }
2319
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned TargetFlags)2320 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
2321 int64_t Offset, bool isTarget,
2322 unsigned TargetFlags) {
2323 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2324 SDVTList VTs = getVTList(VT);
2325
2326 FoldingSetNodeID ID;
2327 AddNodeIDNode(ID, Opc, VTs, std::nullopt);
2328 ID.AddPointer(BA);
2329 ID.AddInteger(Offset);
2330 ID.AddInteger(TargetFlags);
2331 void *IP = nullptr;
2332 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2333 return SDValue(E, 0);
2334
2335 auto *N = newSDNode<BlockAddressSDNode>(Opc, VTs, BA, Offset, TargetFlags);
2336 CSEMap.InsertNode(N, IP);
2337 InsertNode(N);
2338 return SDValue(N, 0);
2339 }
2340
getSrcValue(const Value * V)2341 SDValue SelectionDAG::getSrcValue(const Value *V) {
2342 FoldingSetNodeID ID;
2343 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), std::nullopt);
2344 ID.AddPointer(V);
2345
2346 void *IP = nullptr;
2347 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2348 return SDValue(E, 0);
2349
2350 auto *N = newSDNode<SrcValueSDNode>(V);
2351 CSEMap.InsertNode(N, IP);
2352 InsertNode(N);
2353 return SDValue(N, 0);
2354 }
2355
getMDNode(const MDNode * MD)2356 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
2357 FoldingSetNodeID ID;
2358 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), std::nullopt);
2359 ID.AddPointer(MD);
2360
2361 void *IP = nullptr;
2362 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2363 return SDValue(E, 0);
2364
2365 auto *N = newSDNode<MDNodeSDNode>(MD);
2366 CSEMap.InsertNode(N, IP);
2367 InsertNode(N);
2368 return SDValue(N, 0);
2369 }
2370
getBitcast(EVT VT,SDValue V)2371 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2372 if (VT == V.getValueType())
2373 return V;
2374
2375 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2376 }
2377
getAddrSpaceCast(const SDLoc & dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)2378 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2379 unsigned SrcAS, unsigned DestAS) {
2380 SDVTList VTs = getVTList(VT);
2381 SDValue Ops[] = {Ptr};
2382 FoldingSetNodeID ID;
2383 AddNodeIDNode(ID, ISD::ADDRSPACECAST, VTs, Ops);
2384 ID.AddInteger(SrcAS);
2385 ID.AddInteger(DestAS);
2386
2387 void *IP = nullptr;
2388 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2389 return SDValue(E, 0);
2390
2391 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2392 VTs, SrcAS, DestAS);
2393 createOperands(N, Ops);
2394
2395 CSEMap.InsertNode(N, IP);
2396 InsertNode(N);
2397 return SDValue(N, 0);
2398 }
2399
getFreeze(SDValue V)2400 SDValue SelectionDAG::getFreeze(SDValue V) {
2401 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2402 }
2403
2404 /// getShiftAmountOperand - Return the specified value casted to
2405 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)2406 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2407 EVT OpTy = Op.getValueType();
2408 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2409 if (OpTy == ShTy || OpTy.isVector()) return Op;
2410
2411 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2412 }
2413
expandVAArg(SDNode * Node)2414 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2415 SDLoc dl(Node);
2416 const TargetLowering &TLI = getTargetLoweringInfo();
2417 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2418 EVT VT = Node->getValueType(0);
2419 SDValue Tmp1 = Node->getOperand(0);
2420 SDValue Tmp2 = Node->getOperand(1);
2421 const MaybeAlign MA(Node->getConstantOperandVal(3));
2422
2423 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2424 Tmp2, MachinePointerInfo(V));
2425 SDValue VAList = VAListLoad;
2426
2427 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2428 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2429 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2430
2431 VAList =
2432 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2433 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2434 }
2435
2436 // Increment the pointer, VAList, to the next vaarg
2437 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2438 getConstant(getDataLayout().getTypeAllocSize(
2439 VT.getTypeForEVT(*getContext())),
2440 dl, VAList.getValueType()));
2441 // Store the incremented VAList to the legalized pointer
2442 Tmp1 =
2443 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2444 // Load the actual argument out of the pointer VAList
2445 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2446 }
2447
expandVACopy(SDNode * Node)2448 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2449 SDLoc dl(Node);
2450 const TargetLowering &TLI = getTargetLoweringInfo();
2451 // This defaults to loading a pointer from the input and storing it to the
2452 // output, returning the chain.
2453 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2454 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2455 SDValue Tmp1 =
2456 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2457 Node->getOperand(2), MachinePointerInfo(VS));
2458 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2459 MachinePointerInfo(VD));
2460 }
2461
getReducedAlign(EVT VT,bool UseABI)2462 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2463 const DataLayout &DL = getDataLayout();
2464 Type *Ty = VT.getTypeForEVT(*getContext());
2465 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2466
2467 if (TLI->isTypeLegal(VT) || !VT.isVector())
2468 return RedAlign;
2469
2470 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2471 const Align StackAlign = TFI->getStackAlign();
2472
2473 // See if we can choose a smaller ABI alignment in cases where it's an
2474 // illegal vector type that will get broken down.
2475 if (RedAlign > StackAlign) {
2476 EVT IntermediateVT;
2477 MVT RegisterVT;
2478 unsigned NumIntermediates;
2479 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2480 NumIntermediates, RegisterVT);
2481 Ty = IntermediateVT.getTypeForEVT(*getContext());
2482 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2483 if (RedAlign2 < RedAlign)
2484 RedAlign = RedAlign2;
2485 }
2486
2487 return RedAlign;
2488 }
2489
CreateStackTemporary(TypeSize Bytes,Align Alignment)2490 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2491 MachineFrameInfo &MFI = MF->getFrameInfo();
2492 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2493 int StackID = 0;
2494 if (Bytes.isScalable())
2495 StackID = TFI->getStackIDForScalableVectors();
2496 // The stack id gives an indication of whether the object is scalable or
2497 // not, so it's safe to pass in the minimum size here.
2498 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinValue(), Alignment,
2499 false, nullptr, StackID);
2500 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2501 }
2502
CreateStackTemporary(EVT VT,unsigned minAlign)2503 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2504 Type *Ty = VT.getTypeForEVT(*getContext());
2505 Align StackAlign =
2506 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2507 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2508 }
2509
CreateStackTemporary(EVT VT1,EVT VT2)2510 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2511 TypeSize VT1Size = VT1.getStoreSize();
2512 TypeSize VT2Size = VT2.getStoreSize();
2513 assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2514 "Don't know how to choose the maximum size when creating a stack "
2515 "temporary");
2516 TypeSize Bytes = VT1Size.getKnownMinValue() > VT2Size.getKnownMinValue()
2517 ? VT1Size
2518 : VT2Size;
2519
2520 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2521 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2522 const DataLayout &DL = getDataLayout();
2523 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2524 return CreateStackTemporary(Bytes, Align);
2525 }
2526
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,const SDLoc & dl)2527 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2528 ISD::CondCode Cond, const SDLoc &dl) {
2529 EVT OpVT = N1.getValueType();
2530
2531 auto GetUndefBooleanConstant = [&]() {
2532 if (VT.getScalarType() == MVT::i1 ||
2533 TLI->getBooleanContents(OpVT) ==
2534 TargetLowering::UndefinedBooleanContent)
2535 return getUNDEF(VT);
2536 // ZeroOrOne / ZeroOrNegative require specific values for the high bits,
2537 // so we cannot use getUNDEF(). Return zero instead.
2538 return getConstant(0, dl, VT);
2539 };
2540
2541 // These setcc operations always fold.
2542 switch (Cond) {
2543 default: break;
2544 case ISD::SETFALSE:
2545 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2546 case ISD::SETTRUE:
2547 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2548
2549 case ISD::SETOEQ:
2550 case ISD::SETOGT:
2551 case ISD::SETOGE:
2552 case ISD::SETOLT:
2553 case ISD::SETOLE:
2554 case ISD::SETONE:
2555 case ISD::SETO:
2556 case ISD::SETUO:
2557 case ISD::SETUEQ:
2558 case ISD::SETUNE:
2559 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2560 break;
2561 }
2562
2563 if (OpVT.isInteger()) {
2564 // For EQ and NE, we can always pick a value for the undef to make the
2565 // predicate pass or fail, so we can return undef.
2566 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2567 // icmp eq/ne X, undef -> undef.
2568 if ((N1.isUndef() || N2.isUndef()) &&
2569 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2570 return GetUndefBooleanConstant();
2571
2572 // If both operands are undef, we can return undef for int comparison.
2573 // icmp undef, undef -> undef.
2574 if (N1.isUndef() && N2.isUndef())
2575 return GetUndefBooleanConstant();
2576
2577 // icmp X, X -> true/false
2578 // icmp X, undef -> true/false because undef could be X.
2579 if (N1.isUndef() || N2.isUndef() || N1 == N2)
2580 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2581 }
2582
2583 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2584 const APInt &C2 = N2C->getAPIntValue();
2585 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2586 const APInt &C1 = N1C->getAPIntValue();
2587
2588 return getBoolConstant(ICmpInst::compare(C1, C2, getICmpCondCode(Cond)),
2589 dl, VT, OpVT);
2590 }
2591 }
2592
2593 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2594 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2595
2596 if (N1CFP && N2CFP) {
2597 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2598 switch (Cond) {
2599 default: break;
2600 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2601 return GetUndefBooleanConstant();
2602 [[fallthrough]];
2603 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2604 OpVT);
2605 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2606 return GetUndefBooleanConstant();
2607 [[fallthrough]];
2608 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2609 R==APFloat::cmpLessThan, dl, VT,
2610 OpVT);
2611 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2612 return GetUndefBooleanConstant();
2613 [[fallthrough]];
2614 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2615 OpVT);
2616 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2617 return GetUndefBooleanConstant();
2618 [[fallthrough]];
2619 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2620 VT, OpVT);
2621 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2622 return GetUndefBooleanConstant();
2623 [[fallthrough]];
2624 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2625 R==APFloat::cmpEqual, dl, VT,
2626 OpVT);
2627 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2628 return GetUndefBooleanConstant();
2629 [[fallthrough]];
2630 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2631 R==APFloat::cmpEqual, dl, VT, OpVT);
2632 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2633 OpVT);
2634 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2635 OpVT);
2636 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2637 R==APFloat::cmpEqual, dl, VT,
2638 OpVT);
2639 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2640 OpVT);
2641 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2642 R==APFloat::cmpLessThan, dl, VT,
2643 OpVT);
2644 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2645 R==APFloat::cmpUnordered, dl, VT,
2646 OpVT);
2647 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2648 VT, OpVT);
2649 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2650 OpVT);
2651 }
2652 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2653 // Ensure that the constant occurs on the RHS.
2654 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2655 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2656 return SDValue();
2657 return getSetCC(dl, VT, N2, N1, SwappedCond);
2658 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2659 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2660 // If an operand is known to be a nan (or undef that could be a nan), we can
2661 // fold it.
2662 // Choosing NaN for the undef will always make unordered comparison succeed
2663 // and ordered comparison fails.
2664 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2665 switch (ISD::getUnorderedFlavor(Cond)) {
2666 default:
2667 llvm_unreachable("Unknown flavor!");
2668 case 0: // Known false.
2669 return getBoolConstant(false, dl, VT, OpVT);
2670 case 1: // Known true.
2671 return getBoolConstant(true, dl, VT, OpVT);
2672 case 2: // Undefined.
2673 return GetUndefBooleanConstant();
2674 }
2675 }
2676
2677 // Could not fold it.
2678 return SDValue();
2679 }
2680
2681 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2682 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2683 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2684 unsigned BitWidth = Op.getScalarValueSizeInBits();
2685 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2686 }
2687
2688 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2689 /// this predicate to simplify operations downstream. Mask is known to be zero
2690 /// for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,unsigned Depth) const2691 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2692 unsigned Depth) const {
2693 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2694 }
2695
2696 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2697 /// DemandedElts. We use this predicate to simplify operations downstream.
2698 /// Mask is known to be zero for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,const APInt & DemandedElts,unsigned Depth) const2699 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2700 const APInt &DemandedElts,
2701 unsigned Depth) const {
2702 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2703 }
2704
2705 /// MaskedVectorIsZero - Return true if 'Op' is known to be zero in
2706 /// DemandedElts. We use this predicate to simplify operations downstream.
MaskedVectorIsZero(SDValue V,const APInt & DemandedElts,unsigned Depth) const2707 bool SelectionDAG::MaskedVectorIsZero(SDValue V, const APInt &DemandedElts,
2708 unsigned Depth /* = 0 */) const {
2709 return computeKnownBits(V, DemandedElts, Depth).isZero();
2710 }
2711
2712 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
MaskedValueIsAllOnes(SDValue V,const APInt & Mask,unsigned Depth) const2713 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2714 unsigned Depth) const {
2715 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2716 }
2717
computeVectorKnownZeroElements(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2718 APInt SelectionDAG::computeVectorKnownZeroElements(SDValue Op,
2719 const APInt &DemandedElts,
2720 unsigned Depth) const {
2721 EVT VT = Op.getValueType();
2722 assert(VT.isVector() && !VT.isScalableVector() && "Only for fixed vectors!");
2723
2724 unsigned NumElts = VT.getVectorNumElements();
2725 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected demanded mask.");
2726
2727 APInt KnownZeroElements = APInt::getZero(NumElts);
2728 for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2729 if (!DemandedElts[EltIdx])
2730 continue; // Don't query elements that are not demanded.
2731 APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
2732 if (MaskedVectorIsZero(Op, Mask, Depth))
2733 KnownZeroElements.setBit(EltIdx);
2734 }
2735 return KnownZeroElements;
2736 }
2737
2738 /// isSplatValue - Return true if the vector V has the same value
2739 /// across all DemandedElts. For scalable vectors, we don't know the
2740 /// number of lanes at compile time. Instead, we use a 1 bit APInt
2741 /// to represent a conservative value for all lanes; that is, that
2742 /// one bit value is implicitly splatted across all lanes.
isSplatValue(SDValue V,const APInt & DemandedElts,APInt & UndefElts,unsigned Depth) const2743 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2744 APInt &UndefElts, unsigned Depth) const {
2745 unsigned Opcode = V.getOpcode();
2746 EVT VT = V.getValueType();
2747 assert(VT.isVector() && "Vector type expected");
2748 assert((!VT.isScalableVector() || DemandedElts.getBitWidth() == 1) &&
2749 "scalable demanded bits are ignored");
2750
2751 if (!DemandedElts)
2752 return false; // No demanded elts, better to assume we don't know anything.
2753
2754 if (Depth >= MaxRecursionDepth)
2755 return false; // Limit search depth.
2756
2757 // Deal with some common cases here that work for both fixed and scalable
2758 // vector types.
2759 switch (Opcode) {
2760 case ISD::SPLAT_VECTOR:
2761 UndefElts = V.getOperand(0).isUndef()
2762 ? APInt::getAllOnes(DemandedElts.getBitWidth())
2763 : APInt(DemandedElts.getBitWidth(), 0);
2764 return true;
2765 case ISD::ADD:
2766 case ISD::SUB:
2767 case ISD::AND:
2768 case ISD::XOR:
2769 case ISD::OR: {
2770 APInt UndefLHS, UndefRHS;
2771 SDValue LHS = V.getOperand(0);
2772 SDValue RHS = V.getOperand(1);
2773 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2774 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2775 UndefElts = UndefLHS | UndefRHS;
2776 return true;
2777 }
2778 return false;
2779 }
2780 case ISD::ABS:
2781 case ISD::TRUNCATE:
2782 case ISD::SIGN_EXTEND:
2783 case ISD::ZERO_EXTEND:
2784 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2785 default:
2786 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
2787 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
2788 return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *this,
2789 Depth);
2790 break;
2791 }
2792
2793 // We don't support other cases than those above for scalable vectors at
2794 // the moment.
2795 if (VT.isScalableVector())
2796 return false;
2797
2798 unsigned NumElts = VT.getVectorNumElements();
2799 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2800 UndefElts = APInt::getZero(NumElts);
2801
2802 switch (Opcode) {
2803 case ISD::BUILD_VECTOR: {
2804 SDValue Scl;
2805 for (unsigned i = 0; i != NumElts; ++i) {
2806 SDValue Op = V.getOperand(i);
2807 if (Op.isUndef()) {
2808 UndefElts.setBit(i);
2809 continue;
2810 }
2811 if (!DemandedElts[i])
2812 continue;
2813 if (Scl && Scl != Op)
2814 return false;
2815 Scl = Op;
2816 }
2817 return true;
2818 }
2819 case ISD::VECTOR_SHUFFLE: {
2820 // Check if this is a shuffle node doing a splat or a shuffle of a splat.
2821 APInt DemandedLHS = APInt::getZero(NumElts);
2822 APInt DemandedRHS = APInt::getZero(NumElts);
2823 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2824 for (int i = 0; i != (int)NumElts; ++i) {
2825 int M = Mask[i];
2826 if (M < 0) {
2827 UndefElts.setBit(i);
2828 continue;
2829 }
2830 if (!DemandedElts[i])
2831 continue;
2832 if (M < (int)NumElts)
2833 DemandedLHS.setBit(M);
2834 else
2835 DemandedRHS.setBit(M - NumElts);
2836 }
2837
2838 // If we aren't demanding either op, assume there's no splat.
2839 // If we are demanding both ops, assume there's no splat.
2840 if ((DemandedLHS.isZero() && DemandedRHS.isZero()) ||
2841 (!DemandedLHS.isZero() && !DemandedRHS.isZero()))
2842 return false;
2843
2844 // See if the demanded elts of the source op is a splat or we only demand
2845 // one element, which should always be a splat.
2846 // TODO: Handle source ops splats with undefs.
2847 auto CheckSplatSrc = [&](SDValue Src, const APInt &SrcElts) {
2848 APInt SrcUndefs;
2849 return (SrcElts.popcount() == 1) ||
2850 (isSplatValue(Src, SrcElts, SrcUndefs, Depth + 1) &&
2851 (SrcElts & SrcUndefs).isZero());
2852 };
2853 if (!DemandedLHS.isZero())
2854 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
2855 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
2856 }
2857 case ISD::EXTRACT_SUBVECTOR: {
2858 // Offset the demanded elts by the subvector index.
2859 SDValue Src = V.getOperand(0);
2860 // We don't support scalable vectors at the moment.
2861 if (Src.getValueType().isScalableVector())
2862 return false;
2863 uint64_t Idx = V.getConstantOperandVal(1);
2864 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2865 APInt UndefSrcElts;
2866 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
2867 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2868 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2869 return true;
2870 }
2871 break;
2872 }
2873 case ISD::ANY_EXTEND_VECTOR_INREG:
2874 case ISD::SIGN_EXTEND_VECTOR_INREG:
2875 case ISD::ZERO_EXTEND_VECTOR_INREG: {
2876 // Widen the demanded elts by the src element count.
2877 SDValue Src = V.getOperand(0);
2878 // We don't support scalable vectors at the moment.
2879 if (Src.getValueType().isScalableVector())
2880 return false;
2881 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2882 APInt UndefSrcElts;
2883 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts);
2884 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2885 UndefElts = UndefSrcElts.trunc(NumElts);
2886 return true;
2887 }
2888 break;
2889 }
2890 case ISD::BITCAST: {
2891 SDValue Src = V.getOperand(0);
2892 EVT SrcVT = Src.getValueType();
2893 unsigned SrcBitWidth = SrcVT.getScalarSizeInBits();
2894 unsigned BitWidth = VT.getScalarSizeInBits();
2895
2896 // Ignore bitcasts from unsupported types.
2897 // TODO: Add fp support?
2898 if (!SrcVT.isVector() || !SrcVT.isInteger() || !VT.isInteger())
2899 break;
2900
2901 // Bitcast 'small element' vector to 'large element' vector.
2902 if ((BitWidth % SrcBitWidth) == 0) {
2903 // See if each sub element is a splat.
2904 unsigned Scale = BitWidth / SrcBitWidth;
2905 unsigned NumSrcElts = SrcVT.getVectorNumElements();
2906 APInt ScaledDemandedElts =
2907 APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
2908 for (unsigned I = 0; I != Scale; ++I) {
2909 APInt SubUndefElts;
2910 APInt SubDemandedElt = APInt::getOneBitSet(Scale, I);
2911 APInt SubDemandedElts = APInt::getSplat(NumSrcElts, SubDemandedElt);
2912 SubDemandedElts &= ScaledDemandedElts;
2913 if (!isSplatValue(Src, SubDemandedElts, SubUndefElts, Depth + 1))
2914 return false;
2915 // TODO: Add support for merging sub undef elements.
2916 if (!SubUndefElts.isZero())
2917 return false;
2918 }
2919 return true;
2920 }
2921 break;
2922 }
2923 }
2924
2925 return false;
2926 }
2927
2928 /// Helper wrapper to main isSplatValue function.
isSplatValue(SDValue V,bool AllowUndefs) const2929 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) const {
2930 EVT VT = V.getValueType();
2931 assert(VT.isVector() && "Vector type expected");
2932
2933 APInt UndefElts;
2934 // Since the number of lanes in a scalable vector is unknown at compile time,
2935 // we track one bit which is implicitly broadcast to all lanes. This means
2936 // that all lanes in a scalable vector are considered demanded.
2937 APInt DemandedElts
2938 = APInt::getAllOnes(VT.isScalableVector() ? 1 : VT.getVectorNumElements());
2939 return isSplatValue(V, DemandedElts, UndefElts) &&
2940 (AllowUndefs || !UndefElts);
2941 }
2942
getSplatSourceVector(SDValue V,int & SplatIdx)2943 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2944 V = peekThroughExtractSubvectors(V);
2945
2946 EVT VT = V.getValueType();
2947 unsigned Opcode = V.getOpcode();
2948 switch (Opcode) {
2949 default: {
2950 APInt UndefElts;
2951 // Since the number of lanes in a scalable vector is unknown at compile time,
2952 // we track one bit which is implicitly broadcast to all lanes. This means
2953 // that all lanes in a scalable vector are considered demanded.
2954 APInt DemandedElts
2955 = APInt::getAllOnes(VT.isScalableVector() ? 1 : VT.getVectorNumElements());
2956
2957 if (isSplatValue(V, DemandedElts, UndefElts)) {
2958 if (VT.isScalableVector()) {
2959 // DemandedElts and UndefElts are ignored for scalable vectors, since
2960 // the only supported cases are SPLAT_VECTOR nodes.
2961 SplatIdx = 0;
2962 } else {
2963 // Handle case where all demanded elements are UNDEF.
2964 if (DemandedElts.isSubsetOf(UndefElts)) {
2965 SplatIdx = 0;
2966 return getUNDEF(VT);
2967 }
2968 SplatIdx = (UndefElts & DemandedElts).countr_one();
2969 }
2970 return V;
2971 }
2972 break;
2973 }
2974 case ISD::SPLAT_VECTOR:
2975 SplatIdx = 0;
2976 return V;
2977 case ISD::VECTOR_SHUFFLE: {
2978 assert(!VT.isScalableVector());
2979 // Check if this is a shuffle node doing a splat.
2980 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2981 // getTargetVShiftNode currently struggles without the splat source.
2982 auto *SVN = cast<ShuffleVectorSDNode>(V);
2983 if (!SVN->isSplat())
2984 break;
2985 int Idx = SVN->getSplatIndex();
2986 int NumElts = V.getValueType().getVectorNumElements();
2987 SplatIdx = Idx % NumElts;
2988 return V.getOperand(Idx / NumElts);
2989 }
2990 }
2991
2992 return SDValue();
2993 }
2994
getSplatValue(SDValue V,bool LegalTypes)2995 SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) {
2996 int SplatIdx;
2997 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
2998 EVT SVT = SrcVector.getValueType().getScalarType();
2999 EVT LegalSVT = SVT;
3000 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
3001 if (!SVT.isInteger())
3002 return SDValue();
3003 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
3004 if (LegalSVT.bitsLT(SVT))
3005 return SDValue();
3006 }
3007 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector,
3008 getVectorIdxConstant(SplatIdx, SDLoc(V)));
3009 }
3010 return SDValue();
3011 }
3012
3013 std::optional<ConstantRange>
getValidShiftAmountRange(SDValue V,const APInt & DemandedElts,unsigned Depth) const3014 SelectionDAG::getValidShiftAmountRange(SDValue V, const APInt &DemandedElts,
3015 unsigned Depth) const {
3016 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3017 V.getOpcode() == ISD::SRA) &&
3018 "Unknown shift node");
3019 // Shifting more than the bitwidth is not valid.
3020 unsigned BitWidth = V.getScalarValueSizeInBits();
3021
3022 if (auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3023 const APInt &ShAmt = Cst->getAPIntValue();
3024 if (ShAmt.uge(BitWidth))
3025 return std::nullopt;
3026 return ConstantRange(ShAmt);
3027 }
3028
3029 if (auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1))) {
3030 const APInt *MinAmt = nullptr, *MaxAmt = nullptr;
3031 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3032 if (!DemandedElts[i])
3033 continue;
3034 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3035 if (!SA) {
3036 MinAmt = MaxAmt = nullptr;
3037 break;
3038 }
3039 const APInt &ShAmt = SA->getAPIntValue();
3040 if (ShAmt.uge(BitWidth))
3041 return std::nullopt;
3042 if (!MinAmt || MinAmt->ugt(ShAmt))
3043 MinAmt = &ShAmt;
3044 if (!MaxAmt || MaxAmt->ult(ShAmt))
3045 MaxAmt = &ShAmt;
3046 }
3047 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3048 "Failed to find matching min/max shift amounts");
3049 if (MinAmt && MaxAmt)
3050 return ConstantRange(*MinAmt, *MaxAmt + 1);
3051 }
3052
3053 // Use computeKnownBits to find a hidden constant/knownbits (usually type
3054 // legalized). e.g. Hidden behind multiple bitcasts/build_vector/casts etc.
3055 KnownBits KnownAmt = computeKnownBits(V.getOperand(1), DemandedElts, Depth);
3056 if (KnownAmt.getMaxValue().ult(BitWidth))
3057 return ConstantRange::fromKnownBits(KnownAmt, /*IsSigned=*/false);
3058
3059 return std::nullopt;
3060 }
3061
3062 std::optional<uint64_t>
getValidShiftAmount(SDValue V,const APInt & DemandedElts,unsigned Depth) const3063 SelectionDAG::getValidShiftAmount(SDValue V, const APInt &DemandedElts,
3064 unsigned Depth) const {
3065 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3066 V.getOpcode() == ISD::SRA) &&
3067 "Unknown shift node");
3068 if (std::optional<ConstantRange> AmtRange =
3069 getValidShiftAmountRange(V, DemandedElts, Depth))
3070 if (const APInt *ShAmt = AmtRange->getSingleElement())
3071 return ShAmt->getZExtValue();
3072 return std::nullopt;
3073 }
3074
3075 std::optional<uint64_t>
getValidShiftAmount(SDValue V,unsigned Depth) const3076 SelectionDAG::getValidShiftAmount(SDValue V, unsigned Depth) const {
3077 EVT VT = V.getValueType();
3078 APInt DemandedElts = VT.isFixedLengthVector()
3079 ? APInt::getAllOnes(VT.getVectorNumElements())
3080 : APInt(1, 1);
3081 return getValidShiftAmount(V, DemandedElts, Depth);
3082 }
3083
3084 std::optional<uint64_t>
getValidMinimumShiftAmount(SDValue V,const APInt & DemandedElts,unsigned Depth) const3085 SelectionDAG::getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts,
3086 unsigned Depth) const {
3087 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3088 V.getOpcode() == ISD::SRA) &&
3089 "Unknown shift node");
3090 if (std::optional<ConstantRange> AmtRange =
3091 getValidShiftAmountRange(V, DemandedElts, Depth))
3092 return AmtRange->getUnsignedMin().getZExtValue();
3093 return std::nullopt;
3094 }
3095
3096 std::optional<uint64_t>
getValidMinimumShiftAmount(SDValue V,unsigned Depth) const3097 SelectionDAG::getValidMinimumShiftAmount(SDValue V, unsigned Depth) const {
3098 EVT VT = V.getValueType();
3099 APInt DemandedElts = VT.isFixedLengthVector()
3100 ? APInt::getAllOnes(VT.getVectorNumElements())
3101 : APInt(1, 1);
3102 return getValidMinimumShiftAmount(V, DemandedElts, Depth);
3103 }
3104
3105 std::optional<uint64_t>
getValidMaximumShiftAmount(SDValue V,const APInt & DemandedElts,unsigned Depth) const3106 SelectionDAG::getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts,
3107 unsigned Depth) const {
3108 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3109 V.getOpcode() == ISD::SRA) &&
3110 "Unknown shift node");
3111 if (std::optional<ConstantRange> AmtRange =
3112 getValidShiftAmountRange(V, DemandedElts, Depth))
3113 return AmtRange->getUnsignedMax().getZExtValue();
3114 return std::nullopt;
3115 }
3116
3117 std::optional<uint64_t>
getValidMaximumShiftAmount(SDValue V,unsigned Depth) const3118 SelectionDAG::getValidMaximumShiftAmount(SDValue V, unsigned Depth) const {
3119 EVT VT = V.getValueType();
3120 APInt DemandedElts = VT.isFixedLengthVector()
3121 ? APInt::getAllOnes(VT.getVectorNumElements())
3122 : APInt(1, 1);
3123 return getValidMaximumShiftAmount(V, DemandedElts, Depth);
3124 }
3125
3126 /// Determine which bits of Op are known to be either zero or one and return
3127 /// them in Known. For vectors, the known bits are those that are shared by
3128 /// every vector element.
computeKnownBits(SDValue Op,unsigned Depth) const3129 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
3130 EVT VT = Op.getValueType();
3131
3132 // Since the number of lanes in a scalable vector is unknown at compile time,
3133 // we track one bit which is implicitly broadcast to all lanes. This means
3134 // that all lanes in a scalable vector are considered demanded.
3135 APInt DemandedElts = VT.isFixedLengthVector()
3136 ? APInt::getAllOnes(VT.getVectorNumElements())
3137 : APInt(1, 1);
3138 return computeKnownBits(Op, DemandedElts, Depth);
3139 }
3140
3141 /// Determine which bits of Op are known to be either zero or one and return
3142 /// them in Known. The DemandedElts argument allows us to only collect the known
3143 /// bits that are shared by the requested vector elements.
computeKnownBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const3144 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
3145 unsigned Depth) const {
3146 unsigned BitWidth = Op.getScalarValueSizeInBits();
3147
3148 KnownBits Known(BitWidth); // Don't know anything.
3149
3150 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3151 // We know all of the bits for a constant!
3152 return KnownBits::makeConstant(C->getAPIntValue());
3153 }
3154 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
3155 // We know all of the bits for a constant fp!
3156 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
3157 }
3158
3159 if (Depth >= MaxRecursionDepth)
3160 return Known; // Limit search depth.
3161
3162 KnownBits Known2;
3163 unsigned NumElts = DemandedElts.getBitWidth();
3164 assert((!Op.getValueType().isFixedLengthVector() ||
3165 NumElts == Op.getValueType().getVectorNumElements()) &&
3166 "Unexpected vector size");
3167
3168 if (!DemandedElts)
3169 return Known; // No demanded elts, better to assume we don't know anything.
3170
3171 unsigned Opcode = Op.getOpcode();
3172 switch (Opcode) {
3173 case ISD::MERGE_VALUES:
3174 return computeKnownBits(Op.getOperand(Op.getResNo()), DemandedElts,
3175 Depth + 1);
3176 case ISD::SPLAT_VECTOR: {
3177 SDValue SrcOp = Op.getOperand(0);
3178 assert(SrcOp.getValueSizeInBits() >= BitWidth &&
3179 "Expected SPLAT_VECTOR implicit truncation");
3180 // Implicitly truncate the bits to match the official semantics of
3181 // SPLAT_VECTOR.
3182 Known = computeKnownBits(SrcOp, Depth + 1).trunc(BitWidth);
3183 break;
3184 }
3185 case ISD::SPLAT_VECTOR_PARTS: {
3186 unsigned ScalarSize = Op.getOperand(0).getScalarValueSizeInBits();
3187 assert(ScalarSize * Op.getNumOperands() == BitWidth &&
3188 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3189 for (auto [I, SrcOp] : enumerate(Op->ops())) {
3190 Known.insertBits(computeKnownBits(SrcOp, Depth + 1), ScalarSize * I);
3191 }
3192 break;
3193 }
3194 case ISD::STEP_VECTOR: {
3195 const APInt &Step = Op.getConstantOperandAPInt(0);
3196
3197 if (Step.isPowerOf2())
3198 Known.Zero.setLowBits(Step.logBase2());
3199
3200 const Function &F = getMachineFunction().getFunction();
3201
3202 if (!isUIntN(BitWidth, Op.getValueType().getVectorMinNumElements()))
3203 break;
3204 const APInt MinNumElts =
3205 APInt(BitWidth, Op.getValueType().getVectorMinNumElements());
3206
3207 bool Overflow;
3208 const APInt MaxNumElts = getVScaleRange(&F, BitWidth)
3209 .getUnsignedMax()
3210 .umul_ov(MinNumElts, Overflow);
3211 if (Overflow)
3212 break;
3213
3214 const APInt MaxValue = (MaxNumElts - 1).umul_ov(Step, Overflow);
3215 if (Overflow)
3216 break;
3217
3218 Known.Zero.setHighBits(MaxValue.countl_zero());
3219 break;
3220 }
3221 case ISD::BUILD_VECTOR:
3222 assert(!Op.getValueType().isScalableVector());
3223 // Collect the known bits that are shared by every demanded vector element.
3224 Known.Zero.setAllBits(); Known.One.setAllBits();
3225 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
3226 if (!DemandedElts[i])
3227 continue;
3228
3229 SDValue SrcOp = Op.getOperand(i);
3230 Known2 = computeKnownBits(SrcOp, Depth + 1);
3231
3232 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3233 if (SrcOp.getValueSizeInBits() != BitWidth) {
3234 assert(SrcOp.getValueSizeInBits() > BitWidth &&
3235 "Expected BUILD_VECTOR implicit truncation");
3236 Known2 = Known2.trunc(BitWidth);
3237 }
3238
3239 // Known bits are the values that are shared by every demanded element.
3240 Known = Known.intersectWith(Known2);
3241
3242 // If we don't know any bits, early out.
3243 if (Known.isUnknown())
3244 break;
3245 }
3246 break;
3247 case ISD::VECTOR_SHUFFLE: {
3248 assert(!Op.getValueType().isScalableVector());
3249 // Collect the known bits that are shared by every vector element referenced
3250 // by the shuffle.
3251 APInt DemandedLHS, DemandedRHS;
3252 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3253 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3254 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
3255 DemandedLHS, DemandedRHS))
3256 break;
3257
3258 // Known bits are the values that are shared by every demanded element.
3259 Known.Zero.setAllBits(); Known.One.setAllBits();
3260 if (!!DemandedLHS) {
3261 SDValue LHS = Op.getOperand(0);
3262 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
3263 Known = Known.intersectWith(Known2);
3264 }
3265 // If we don't know any bits, early out.
3266 if (Known.isUnknown())
3267 break;
3268 if (!!DemandedRHS) {
3269 SDValue RHS = Op.getOperand(1);
3270 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
3271 Known = Known.intersectWith(Known2);
3272 }
3273 break;
3274 }
3275 case ISD::VSCALE: {
3276 const Function &F = getMachineFunction().getFunction();
3277 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
3278 Known = getVScaleRange(&F, BitWidth).multiply(Multiplier).toKnownBits();
3279 break;
3280 }
3281 case ISD::CONCAT_VECTORS: {
3282 if (Op.getValueType().isScalableVector())
3283 break;
3284 // Split DemandedElts and test each of the demanded subvectors.
3285 Known.Zero.setAllBits(); Known.One.setAllBits();
3286 EVT SubVectorVT = Op.getOperand(0).getValueType();
3287 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3288 unsigned NumSubVectors = Op.getNumOperands();
3289 for (unsigned i = 0; i != NumSubVectors; ++i) {
3290 APInt DemandedSub =
3291 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
3292 if (!!DemandedSub) {
3293 SDValue Sub = Op.getOperand(i);
3294 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
3295 Known = Known.intersectWith(Known2);
3296 }
3297 // If we don't know any bits, early out.
3298 if (Known.isUnknown())
3299 break;
3300 }
3301 break;
3302 }
3303 case ISD::INSERT_SUBVECTOR: {
3304 if (Op.getValueType().isScalableVector())
3305 break;
3306 // Demand any elements from the subvector and the remainder from the src its
3307 // inserted into.
3308 SDValue Src = Op.getOperand(0);
3309 SDValue Sub = Op.getOperand(1);
3310 uint64_t Idx = Op.getConstantOperandVal(2);
3311 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3312 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3313 APInt DemandedSrcElts = DemandedElts;
3314 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
3315
3316 Known.One.setAllBits();
3317 Known.Zero.setAllBits();
3318 if (!!DemandedSubElts) {
3319 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
3320 if (Known.isUnknown())
3321 break; // early-out.
3322 }
3323 if (!!DemandedSrcElts) {
3324 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3325 Known = Known.intersectWith(Known2);
3326 }
3327 break;
3328 }
3329 case ISD::EXTRACT_SUBVECTOR: {
3330 // Offset the demanded elts by the subvector index.
3331 SDValue Src = Op.getOperand(0);
3332 // Bail until we can represent demanded elements for scalable vectors.
3333 if (Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector())
3334 break;
3335 uint64_t Idx = Op.getConstantOperandVal(1);
3336 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3337 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
3338 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3339 break;
3340 }
3341 case ISD::SCALAR_TO_VECTOR: {
3342 if (Op.getValueType().isScalableVector())
3343 break;
3344 // We know about scalar_to_vector as much as we know about it source,
3345 // which becomes the first element of otherwise unknown vector.
3346 if (DemandedElts != 1)
3347 break;
3348
3349 SDValue N0 = Op.getOperand(0);
3350 Known = computeKnownBits(N0, Depth + 1);
3351 if (N0.getValueSizeInBits() != BitWidth)
3352 Known = Known.trunc(BitWidth);
3353
3354 break;
3355 }
3356 case ISD::BITCAST: {
3357 if (Op.getValueType().isScalableVector())
3358 break;
3359
3360 SDValue N0 = Op.getOperand(0);
3361 EVT SubVT = N0.getValueType();
3362 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
3363
3364 // Ignore bitcasts from unsupported types.
3365 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
3366 break;
3367
3368 // Fast handling of 'identity' bitcasts.
3369 if (BitWidth == SubBitWidth) {
3370 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
3371 break;
3372 }
3373
3374 bool IsLE = getDataLayout().isLittleEndian();
3375
3376 // Bitcast 'small element' vector to 'large element' scalar/vector.
3377 if ((BitWidth % SubBitWidth) == 0) {
3378 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
3379
3380 // Collect known bits for the (larger) output by collecting the known
3381 // bits from each set of sub elements and shift these into place.
3382 // We need to separately call computeKnownBits for each set of
3383 // sub elements as the knownbits for each is likely to be different.
3384 unsigned SubScale = BitWidth / SubBitWidth;
3385 APInt SubDemandedElts(NumElts * SubScale, 0);
3386 for (unsigned i = 0; i != NumElts; ++i)
3387 if (DemandedElts[i])
3388 SubDemandedElts.setBit(i * SubScale);
3389
3390 for (unsigned i = 0; i != SubScale; ++i) {
3391 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
3392 Depth + 1);
3393 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3394 Known.insertBits(Known2, SubBitWidth * Shifts);
3395 }
3396 }
3397
3398 // Bitcast 'large element' scalar/vector to 'small element' vector.
3399 if ((SubBitWidth % BitWidth) == 0) {
3400 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
3401
3402 // Collect known bits for the (smaller) output by collecting the known
3403 // bits from the overlapping larger input elements and extracting the
3404 // sub sections we actually care about.
3405 unsigned SubScale = SubBitWidth / BitWidth;
3406 APInt SubDemandedElts =
3407 APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale);
3408 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
3409
3410 Known.Zero.setAllBits(); Known.One.setAllBits();
3411 for (unsigned i = 0; i != NumElts; ++i)
3412 if (DemandedElts[i]) {
3413 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3414 unsigned Offset = (Shifts % SubScale) * BitWidth;
3415 Known = Known.intersectWith(Known2.extractBits(BitWidth, Offset));
3416 // If we don't know any bits, early out.
3417 if (Known.isUnknown())
3418 break;
3419 }
3420 }
3421 break;
3422 }
3423 case ISD::AND:
3424 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3425 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3426
3427 Known &= Known2;
3428 break;
3429 case ISD::OR:
3430 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3431 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3432
3433 Known |= Known2;
3434 break;
3435 case ISD::XOR:
3436 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3437 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3438
3439 Known ^= Known2;
3440 break;
3441 case ISD::MUL: {
3442 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3443 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3444 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3445 // TODO: SelfMultiply can be poison, but not undef.
3446 if (SelfMultiply)
3447 SelfMultiply &= isGuaranteedNotToBeUndefOrPoison(
3448 Op.getOperand(0), DemandedElts, false, Depth + 1);
3449 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3450
3451 // If the multiplication is known not to overflow, the product of a number
3452 // with itself is non-negative. Only do this if we didn't already computed
3453 // the opposite value for the sign bit.
3454 if (Op->getFlags().hasNoSignedWrap() &&
3455 Op.getOperand(0) == Op.getOperand(1) &&
3456 !Known.isNegative())
3457 Known.makeNonNegative();
3458 break;
3459 }
3460 case ISD::MULHU: {
3461 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3462 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3463 Known = KnownBits::mulhu(Known, Known2);
3464 break;
3465 }
3466 case ISD::MULHS: {
3467 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3468 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3469 Known = KnownBits::mulhs(Known, Known2);
3470 break;
3471 }
3472 case ISD::ABDU: {
3473 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3474 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3475 Known = KnownBits::abdu(Known, Known2);
3476 break;
3477 }
3478 case ISD::ABDS: {
3479 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3480 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3481 Known = KnownBits::abds(Known, Known2);
3482 unsigned SignBits1 =
3483 ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3484 if (SignBits1 == 1)
3485 break;
3486 unsigned SignBits0 =
3487 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3488 Known.Zero.setHighBits(std::min(SignBits0, SignBits1) - 1);
3489 break;
3490 }
3491 case ISD::UMUL_LOHI: {
3492 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3493 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3494 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3495 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3496 if (Op.getResNo() == 0)
3497 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3498 else
3499 Known = KnownBits::mulhu(Known, Known2);
3500 break;
3501 }
3502 case ISD::SMUL_LOHI: {
3503 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3504 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3505 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3506 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3507 if (Op.getResNo() == 0)
3508 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3509 else
3510 Known = KnownBits::mulhs(Known, Known2);
3511 break;
3512 }
3513 case ISD::AVGFLOORU: {
3514 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3515 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3516 Known = KnownBits::avgFloorU(Known, Known2);
3517 break;
3518 }
3519 case ISD::AVGCEILU: {
3520 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3521 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3522 Known = KnownBits::avgCeilU(Known, Known2);
3523 break;
3524 }
3525 case ISD::AVGFLOORS: {
3526 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3527 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3528 Known = KnownBits::avgFloorS(Known, Known2);
3529 break;
3530 }
3531 case ISD::AVGCEILS: {
3532 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3533 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3534 Known = KnownBits::avgCeilS(Known, Known2);
3535 break;
3536 }
3537 case ISD::SELECT:
3538 case ISD::VSELECT:
3539 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3540 // If we don't know any bits, early out.
3541 if (Known.isUnknown())
3542 break;
3543 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3544
3545 // Only known if known in both the LHS and RHS.
3546 Known = Known.intersectWith(Known2);
3547 break;
3548 case ISD::SELECT_CC:
3549 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3550 // If we don't know any bits, early out.
3551 if (Known.isUnknown())
3552 break;
3553 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3554
3555 // Only known if known in both the LHS and RHS.
3556 Known = Known.intersectWith(Known2);
3557 break;
3558 case ISD::SMULO:
3559 case ISD::UMULO:
3560 if (Op.getResNo() != 1)
3561 break;
3562 // The boolean result conforms to getBooleanContents.
3563 // If we know the result of a setcc has the top bits zero, use this info.
3564 // We know that we have an integer-based boolean since these operations
3565 // are only available for integer.
3566 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3567 TargetLowering::ZeroOrOneBooleanContent &&
3568 BitWidth > 1)
3569 Known.Zero.setBitsFrom(1);
3570 break;
3571 case ISD::SETCC:
3572 case ISD::SETCCCARRY:
3573 case ISD::STRICT_FSETCC:
3574 case ISD::STRICT_FSETCCS: {
3575 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3576 // If we know the result of a setcc has the top bits zero, use this info.
3577 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3578 TargetLowering::ZeroOrOneBooleanContent &&
3579 BitWidth > 1)
3580 Known.Zero.setBitsFrom(1);
3581 break;
3582 }
3583 case ISD::SHL: {
3584 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3585 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3586
3587 bool NUW = Op->getFlags().hasNoUnsignedWrap();
3588 bool NSW = Op->getFlags().hasNoSignedWrap();
3589
3590 bool ShAmtNonZero = Known2.isNonZero();
3591
3592 Known = KnownBits::shl(Known, Known2, NUW, NSW, ShAmtNonZero);
3593
3594 // Minimum shift low bits are known zero.
3595 if (std::optional<uint64_t> ShMinAmt =
3596 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
3597 Known.Zero.setLowBits(*ShMinAmt);
3598 break;
3599 }
3600 case ISD::SRL:
3601 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3602 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3603 Known = KnownBits::lshr(Known, Known2, /*ShAmtNonZero=*/false,
3604 Op->getFlags().hasExact());
3605
3606 // Minimum shift high bits are known zero.
3607 if (std::optional<uint64_t> ShMinAmt =
3608 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
3609 Known.Zero.setHighBits(*ShMinAmt);
3610 break;
3611 case ISD::SRA:
3612 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3613 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3614 Known = KnownBits::ashr(Known, Known2, /*ShAmtNonZero=*/false,
3615 Op->getFlags().hasExact());
3616 break;
3617 case ISD::FSHL:
3618 case ISD::FSHR:
3619 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3620 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3621
3622 // For fshl, 0-shift returns the 1st arg.
3623 // For fshr, 0-shift returns the 2nd arg.
3624 if (Amt == 0) {
3625 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3626 DemandedElts, Depth + 1);
3627 break;
3628 }
3629
3630 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3631 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3632 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3633 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3634 if (Opcode == ISD::FSHL) {
3635 Known.One <<= Amt;
3636 Known.Zero <<= Amt;
3637 Known2.One.lshrInPlace(BitWidth - Amt);
3638 Known2.Zero.lshrInPlace(BitWidth - Amt);
3639 } else {
3640 Known.One <<= BitWidth - Amt;
3641 Known.Zero <<= BitWidth - Amt;
3642 Known2.One.lshrInPlace(Amt);
3643 Known2.Zero.lshrInPlace(Amt);
3644 }
3645 Known = Known.unionWith(Known2);
3646 }
3647 break;
3648 case ISD::SHL_PARTS:
3649 case ISD::SRA_PARTS:
3650 case ISD::SRL_PARTS: {
3651 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3652
3653 // Collect lo/hi source values and concatenate.
3654 unsigned LoBits = Op.getOperand(0).getScalarValueSizeInBits();
3655 unsigned HiBits = Op.getOperand(1).getScalarValueSizeInBits();
3656 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3657 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3658 Known = Known2.concat(Known);
3659
3660 // Collect shift amount.
3661 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3662
3663 if (Opcode == ISD::SHL_PARTS)
3664 Known = KnownBits::shl(Known, Known2);
3665 else if (Opcode == ISD::SRA_PARTS)
3666 Known = KnownBits::ashr(Known, Known2);
3667 else // if (Opcode == ISD::SRL_PARTS)
3668 Known = KnownBits::lshr(Known, Known2);
3669
3670 // TODO: Minimum shift low/high bits are known zero.
3671
3672 if (Op.getResNo() == 0)
3673 Known = Known.extractBits(LoBits, 0);
3674 else
3675 Known = Known.extractBits(HiBits, LoBits);
3676 break;
3677 }
3678 case ISD::SIGN_EXTEND_INREG: {
3679 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3680 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3681 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3682 break;
3683 }
3684 case ISD::CTTZ:
3685 case ISD::CTTZ_ZERO_UNDEF: {
3686 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3687 // If we have a known 1, its position is our upper bound.
3688 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3689 unsigned LowBits = llvm::bit_width(PossibleTZ);
3690 Known.Zero.setBitsFrom(LowBits);
3691 break;
3692 }
3693 case ISD::CTLZ:
3694 case ISD::CTLZ_ZERO_UNDEF: {
3695 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3696 // If we have a known 1, its position is our upper bound.
3697 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3698 unsigned LowBits = llvm::bit_width(PossibleLZ);
3699 Known.Zero.setBitsFrom(LowBits);
3700 break;
3701 }
3702 case ISD::CTPOP: {
3703 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3704 // If we know some of the bits are zero, they can't be one.
3705 unsigned PossibleOnes = Known2.countMaxPopulation();
3706 Known.Zero.setBitsFrom(llvm::bit_width(PossibleOnes));
3707 break;
3708 }
3709 case ISD::PARITY: {
3710 // Parity returns 0 everywhere but the LSB.
3711 Known.Zero.setBitsFrom(1);
3712 break;
3713 }
3714 case ISD::LOAD: {
3715 LoadSDNode *LD = cast<LoadSDNode>(Op);
3716 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3717 if (ISD::isNON_EXTLoad(LD) && Cst) {
3718 // Determine any common known bits from the loaded constant pool value.
3719 Type *CstTy = Cst->getType();
3720 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits() &&
3721 !Op.getValueType().isScalableVector()) {
3722 // If its a vector splat, then we can (quickly) reuse the scalar path.
3723 // NOTE: We assume all elements match and none are UNDEF.
3724 if (CstTy->isVectorTy()) {
3725 if (const Constant *Splat = Cst->getSplatValue()) {
3726 Cst = Splat;
3727 CstTy = Cst->getType();
3728 }
3729 }
3730 // TODO - do we need to handle different bitwidths?
3731 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3732 // Iterate across all vector elements finding common known bits.
3733 Known.One.setAllBits();
3734 Known.Zero.setAllBits();
3735 for (unsigned i = 0; i != NumElts; ++i) {
3736 if (!DemandedElts[i])
3737 continue;
3738 if (Constant *Elt = Cst->getAggregateElement(i)) {
3739 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3740 const APInt &Value = CInt->getValue();
3741 Known.One &= Value;
3742 Known.Zero &= ~Value;
3743 continue;
3744 }
3745 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3746 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3747 Known.One &= Value;
3748 Known.Zero &= ~Value;
3749 continue;
3750 }
3751 }
3752 Known.One.clearAllBits();
3753 Known.Zero.clearAllBits();
3754 break;
3755 }
3756 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3757 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3758 Known = KnownBits::makeConstant(CInt->getValue());
3759 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3760 Known =
3761 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3762 }
3763 }
3764 }
3765 } else if (Op.getResNo() == 0) {
3766 KnownBits Known0(!LD->getMemoryVT().isScalableVT()
3767 ? LD->getMemoryVT().getFixedSizeInBits()
3768 : BitWidth);
3769 EVT VT = Op.getValueType();
3770 // Fill in any known bits from range information. There are 3 types being
3771 // used. The results VT (same vector elt size as BitWidth), the loaded
3772 // MemoryVT (which may or may not be vector) and the range VTs original
3773 // type. The range matadata needs the full range (i.e
3774 // MemoryVT().getSizeInBits()), which is truncated to the correct elt size
3775 // if it is know. These are then extended to the original VT sizes below.
3776 if (const MDNode *MD = LD->getRanges()) {
3777 computeKnownBitsFromRangeMetadata(*MD, Known0);
3778 if (VT.isVector()) {
3779 // Handle truncation to the first demanded element.
3780 // TODO: Figure out which demanded elements are covered
3781 if (DemandedElts != 1 || !getDataLayout().isLittleEndian())
3782 break;
3783 Known0 = Known0.trunc(BitWidth);
3784 }
3785 }
3786
3787 if (LD->getMemoryVT().isVector())
3788 Known0 = Known0.trunc(LD->getMemoryVT().getScalarSizeInBits());
3789
3790 // Extend the Known bits from memory to the size of the result.
3791 if (ISD::isZEXTLoad(Op.getNode()))
3792 Known = Known0.zext(BitWidth);
3793 else if (ISD::isSEXTLoad(Op.getNode()))
3794 Known = Known0.sext(BitWidth);
3795 else if (ISD::isEXTLoad(Op.getNode()))
3796 Known = Known0.anyext(BitWidth);
3797 else
3798 Known = Known0;
3799 assert(Known.getBitWidth() == BitWidth);
3800 return Known;
3801 }
3802 break;
3803 }
3804 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3805 if (Op.getValueType().isScalableVector())
3806 break;
3807 EVT InVT = Op.getOperand(0).getValueType();
3808 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
3809 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3810 Known = Known.zext(BitWidth);
3811 break;
3812 }
3813 case ISD::ZERO_EXTEND: {
3814 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3815 Known = Known.zext(BitWidth);
3816 break;
3817 }
3818 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3819 if (Op.getValueType().isScalableVector())
3820 break;
3821 EVT InVT = Op.getOperand(0).getValueType();
3822 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
3823 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3824 // If the sign bit is known to be zero or one, then sext will extend
3825 // it to the top bits, else it will just zext.
3826 Known = Known.sext(BitWidth);
3827 break;
3828 }
3829 case ISD::SIGN_EXTEND: {
3830 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3831 // If the sign bit is known to be zero or one, then sext will extend
3832 // it to the top bits, else it will just zext.
3833 Known = Known.sext(BitWidth);
3834 break;
3835 }
3836 case ISD::ANY_EXTEND_VECTOR_INREG: {
3837 if (Op.getValueType().isScalableVector())
3838 break;
3839 EVT InVT = Op.getOperand(0).getValueType();
3840 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
3841 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3842 Known = Known.anyext(BitWidth);
3843 break;
3844 }
3845 case ISD::ANY_EXTEND: {
3846 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3847 Known = Known.anyext(BitWidth);
3848 break;
3849 }
3850 case ISD::TRUNCATE: {
3851 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3852 Known = Known.trunc(BitWidth);
3853 break;
3854 }
3855 case ISD::AssertZext: {
3856 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3857 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3858 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3859 Known.Zero |= (~InMask);
3860 Known.One &= (~Known.Zero);
3861 break;
3862 }
3863 case ISD::AssertAlign: {
3864 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3865 assert(LogOfAlign != 0);
3866
3867 // TODO: Should use maximum with source
3868 // If a node is guaranteed to be aligned, set low zero bits accordingly as
3869 // well as clearing one bits.
3870 Known.Zero.setLowBits(LogOfAlign);
3871 Known.One.clearLowBits(LogOfAlign);
3872 break;
3873 }
3874 case ISD::FGETSIGN:
3875 // All bits are zero except the low bit.
3876 Known.Zero.setBitsFrom(1);
3877 break;
3878 case ISD::ADD:
3879 case ISD::SUB: {
3880 SDNodeFlags Flags = Op.getNode()->getFlags();
3881 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3882 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3883 Known = KnownBits::computeForAddSub(
3884 Op.getOpcode() == ISD::ADD, Flags.hasNoSignedWrap(),
3885 Flags.hasNoUnsignedWrap(), Known, Known2);
3886 break;
3887 }
3888 case ISD::USUBO:
3889 case ISD::SSUBO:
3890 case ISD::USUBO_CARRY:
3891 case ISD::SSUBO_CARRY:
3892 if (Op.getResNo() == 1) {
3893 // If we know the result of a setcc has the top bits zero, use this info.
3894 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3895 TargetLowering::ZeroOrOneBooleanContent &&
3896 BitWidth > 1)
3897 Known.Zero.setBitsFrom(1);
3898 break;
3899 }
3900 [[fallthrough]];
3901 case ISD::SUBC: {
3902 assert(Op.getResNo() == 0 &&
3903 "We only compute knownbits for the difference here.");
3904
3905 // With USUBO_CARRY and SSUBO_CARRY a borrow bit may be added in.
3906 KnownBits Borrow(1);
3907 if (Opcode == ISD::USUBO_CARRY || Opcode == ISD::SSUBO_CARRY) {
3908 Borrow = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3909 // Borrow has bit width 1
3910 Borrow = Borrow.trunc(1);
3911 } else {
3912 Borrow.setAllZero();
3913 }
3914
3915 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3916 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3917 Known = KnownBits::computeForSubBorrow(Known, Known2, Borrow);
3918 break;
3919 }
3920 case ISD::UADDO:
3921 case ISD::SADDO:
3922 case ISD::UADDO_CARRY:
3923 case ISD::SADDO_CARRY:
3924 if (Op.getResNo() == 1) {
3925 // If we know the result of a setcc has the top bits zero, use this info.
3926 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3927 TargetLowering::ZeroOrOneBooleanContent &&
3928 BitWidth > 1)
3929 Known.Zero.setBitsFrom(1);
3930 break;
3931 }
3932 [[fallthrough]];
3933 case ISD::ADDC:
3934 case ISD::ADDE: {
3935 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3936
3937 // With ADDE and UADDO_CARRY, a carry bit may be added in.
3938 KnownBits Carry(1);
3939 if (Opcode == ISD::ADDE)
3940 // Can't track carry from glue, set carry to unknown.
3941 Carry.resetAll();
3942 else if (Opcode == ISD::UADDO_CARRY || Opcode == ISD::SADDO_CARRY) {
3943 Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3944 // Carry has bit width 1
3945 Carry = Carry.trunc(1);
3946 } else {
3947 Carry.setAllZero();
3948 }
3949
3950 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3951 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3952 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3953 break;
3954 }
3955 case ISD::UDIV: {
3956 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3957 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3958 Known = KnownBits::udiv(Known, Known2, Op->getFlags().hasExact());
3959 break;
3960 }
3961 case ISD::SDIV: {
3962 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3963 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3964 Known = KnownBits::sdiv(Known, Known2, Op->getFlags().hasExact());
3965 break;
3966 }
3967 case ISD::SREM: {
3968 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3969 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3970 Known = KnownBits::srem(Known, Known2);
3971 break;
3972 }
3973 case ISD::UREM: {
3974 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3975 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3976 Known = KnownBits::urem(Known, Known2);
3977 break;
3978 }
3979 case ISD::EXTRACT_ELEMENT: {
3980 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3981 const unsigned Index = Op.getConstantOperandVal(1);
3982 const unsigned EltBitWidth = Op.getValueSizeInBits();
3983
3984 // Remove low part of known bits mask
3985 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3986 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3987
3988 // Remove high part of known bit mask
3989 Known = Known.trunc(EltBitWidth);
3990 break;
3991 }
3992 case ISD::EXTRACT_VECTOR_ELT: {
3993 SDValue InVec = Op.getOperand(0);
3994 SDValue EltNo = Op.getOperand(1);
3995 EVT VecVT = InVec.getValueType();
3996 // computeKnownBits not yet implemented for scalable vectors.
3997 if (VecVT.isScalableVector())
3998 break;
3999 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
4000 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4001
4002 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
4003 // anything about the extended bits.
4004 if (BitWidth > EltBitWidth)
4005 Known = Known.trunc(EltBitWidth);
4006
4007 // If we know the element index, just demand that vector element, else for
4008 // an unknown element index, ignore DemandedElts and demand them all.
4009 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
4010 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4011 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4012 DemandedSrcElts =
4013 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4014
4015 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
4016 if (BitWidth > EltBitWidth)
4017 Known = Known.anyext(BitWidth);
4018 break;
4019 }
4020 case ISD::INSERT_VECTOR_ELT: {
4021 if (Op.getValueType().isScalableVector())
4022 break;
4023
4024 // If we know the element index, split the demand between the
4025 // source vector and the inserted element, otherwise assume we need
4026 // the original demanded vector elements and the value.
4027 SDValue InVec = Op.getOperand(0);
4028 SDValue InVal = Op.getOperand(1);
4029 SDValue EltNo = Op.getOperand(2);
4030 bool DemandedVal = true;
4031 APInt DemandedVecElts = DemandedElts;
4032 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4033 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4034 unsigned EltIdx = CEltNo->getZExtValue();
4035 DemandedVal = !!DemandedElts[EltIdx];
4036 DemandedVecElts.clearBit(EltIdx);
4037 }
4038 Known.One.setAllBits();
4039 Known.Zero.setAllBits();
4040 if (DemandedVal) {
4041 Known2 = computeKnownBits(InVal, Depth + 1);
4042 Known = Known.intersectWith(Known2.zextOrTrunc(BitWidth));
4043 }
4044 if (!!DemandedVecElts) {
4045 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
4046 Known = Known.intersectWith(Known2);
4047 }
4048 break;
4049 }
4050 case ISD::BITREVERSE: {
4051 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4052 Known = Known2.reverseBits();
4053 break;
4054 }
4055 case ISD::BSWAP: {
4056 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4057 Known = Known2.byteSwap();
4058 break;
4059 }
4060 case ISD::ABS: {
4061 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4062 Known = Known2.abs();
4063 Known.Zero.setHighBits(
4064 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1) - 1);
4065 break;
4066 }
4067 case ISD::USUBSAT: {
4068 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4069 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4070 Known = KnownBits::usub_sat(Known, Known2);
4071 break;
4072 }
4073 case ISD::UMIN: {
4074 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4075 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4076 Known = KnownBits::umin(Known, Known2);
4077 break;
4078 }
4079 case ISD::UMAX: {
4080 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4081 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4082 Known = KnownBits::umax(Known, Known2);
4083 break;
4084 }
4085 case ISD::SMIN:
4086 case ISD::SMAX: {
4087 // If we have a clamp pattern, we know that the number of sign bits will be
4088 // the minimum of the clamp min/max range.
4089 bool IsMax = (Opcode == ISD::SMAX);
4090 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
4091 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
4092 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
4093 CstHigh =
4094 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
4095 if (CstLow && CstHigh) {
4096 if (!IsMax)
4097 std::swap(CstLow, CstHigh);
4098
4099 const APInt &ValueLow = CstLow->getAPIntValue();
4100 const APInt &ValueHigh = CstHigh->getAPIntValue();
4101 if (ValueLow.sle(ValueHigh)) {
4102 unsigned LowSignBits = ValueLow.getNumSignBits();
4103 unsigned HighSignBits = ValueHigh.getNumSignBits();
4104 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4105 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
4106 Known.One.setHighBits(MinSignBits);
4107 break;
4108 }
4109 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
4110 Known.Zero.setHighBits(MinSignBits);
4111 break;
4112 }
4113 }
4114 }
4115
4116 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4117 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4118 if (IsMax)
4119 Known = KnownBits::smax(Known, Known2);
4120 else
4121 Known = KnownBits::smin(Known, Known2);
4122
4123 // For SMAX, if CstLow is non-negative we know the result will be
4124 // non-negative and thus all sign bits are 0.
4125 // TODO: There's an equivalent of this for smin with negative constant for
4126 // known ones.
4127 if (IsMax && CstLow) {
4128 const APInt &ValueLow = CstLow->getAPIntValue();
4129 if (ValueLow.isNonNegative()) {
4130 unsigned SignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4131 Known.Zero.setHighBits(std::min(SignBits, ValueLow.getNumSignBits()));
4132 }
4133 }
4134
4135 break;
4136 }
4137 case ISD::UINT_TO_FP: {
4138 Known.makeNonNegative();
4139 break;
4140 }
4141 case ISD::SINT_TO_FP: {
4142 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4143 if (Known2.isNonNegative())
4144 Known.makeNonNegative();
4145 else if (Known2.isNegative())
4146 Known.makeNegative();
4147 break;
4148 }
4149 case ISD::FP_TO_UINT_SAT: {
4150 // FP_TO_UINT_SAT produces an unsigned value that fits in the saturating VT.
4151 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
4152 Known.Zero |= APInt::getBitsSetFrom(BitWidth, VT.getScalarSizeInBits());
4153 break;
4154 }
4155 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
4156 if (Op.getResNo() == 1) {
4157 // The boolean result conforms to getBooleanContents.
4158 // If we know the result of a setcc has the top bits zero, use this info.
4159 // We know that we have an integer-based boolean since these operations
4160 // are only available for integer.
4161 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
4162 TargetLowering::ZeroOrOneBooleanContent &&
4163 BitWidth > 1)
4164 Known.Zero.setBitsFrom(1);
4165 break;
4166 }
4167 [[fallthrough]];
4168 case ISD::ATOMIC_CMP_SWAP:
4169 case ISD::ATOMIC_SWAP:
4170 case ISD::ATOMIC_LOAD_ADD:
4171 case ISD::ATOMIC_LOAD_SUB:
4172 case ISD::ATOMIC_LOAD_AND:
4173 case ISD::ATOMIC_LOAD_CLR:
4174 case ISD::ATOMIC_LOAD_OR:
4175 case ISD::ATOMIC_LOAD_XOR:
4176 case ISD::ATOMIC_LOAD_NAND:
4177 case ISD::ATOMIC_LOAD_MIN:
4178 case ISD::ATOMIC_LOAD_MAX:
4179 case ISD::ATOMIC_LOAD_UMIN:
4180 case ISD::ATOMIC_LOAD_UMAX:
4181 case ISD::ATOMIC_LOAD: {
4182 unsigned MemBits =
4183 cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
4184 // If we are looking at the loaded value.
4185 if (Op.getResNo() == 0) {
4186 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4187 Known.Zero.setBitsFrom(MemBits);
4188 else if (Op->getOpcode() == ISD::ATOMIC_LOAD &&
4189 cast<AtomicSDNode>(Op)->getExtensionType() == ISD::ZEXTLOAD)
4190 Known.Zero.setBitsFrom(MemBits);
4191 }
4192 break;
4193 }
4194 case ISD::FrameIndex:
4195 case ISD::TargetFrameIndex:
4196 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
4197 Known, getMachineFunction());
4198 break;
4199
4200 default:
4201 if (Opcode < ISD::BUILTIN_OP_END)
4202 break;
4203 [[fallthrough]];
4204 case ISD::INTRINSIC_WO_CHAIN:
4205 case ISD::INTRINSIC_W_CHAIN:
4206 case ISD::INTRINSIC_VOID:
4207 // TODO: Probably okay to remove after audit; here to reduce change size
4208 // in initial enablement patch for scalable vectors
4209 if (Op.getValueType().isScalableVector())
4210 break;
4211
4212 // Allow the target to implement this method for its nodes.
4213 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
4214 break;
4215 }
4216
4217 return Known;
4218 }
4219
4220 /// Convert ConstantRange OverflowResult into SelectionDAG::OverflowKind.
mapOverflowResult(ConstantRange::OverflowResult OR)4221 static SelectionDAG::OverflowKind mapOverflowResult(ConstantRange::OverflowResult OR) {
4222 switch (OR) {
4223 case ConstantRange::OverflowResult::MayOverflow:
4224 return SelectionDAG::OFK_Sometime;
4225 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4226 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4227 return SelectionDAG::OFK_Always;
4228 case ConstantRange::OverflowResult::NeverOverflows:
4229 return SelectionDAG::OFK_Never;
4230 }
4231 llvm_unreachable("Unknown OverflowResult");
4232 }
4233
4234 SelectionDAG::OverflowKind
computeOverflowForSignedAdd(SDValue N0,SDValue N1) const4235 SelectionDAG::computeOverflowForSignedAdd(SDValue N0, SDValue N1) const {
4236 // X + 0 never overflow
4237 if (isNullConstant(N1))
4238 return OFK_Never;
4239
4240 // If both operands each have at least two sign bits, the addition
4241 // cannot overflow.
4242 if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1)
4243 return OFK_Never;
4244
4245 // TODO: Add ConstantRange::signedAddMayOverflow handling.
4246 return OFK_Sometime;
4247 }
4248
4249 SelectionDAG::OverflowKind
computeOverflowForUnsignedAdd(SDValue N0,SDValue N1) const4250 SelectionDAG::computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const {
4251 // X + 0 never overflow
4252 if (isNullConstant(N1))
4253 return OFK_Never;
4254
4255 // mulhi + 1 never overflow
4256 KnownBits N1Known = computeKnownBits(N1);
4257 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
4258 N1Known.getMaxValue().ult(2))
4259 return OFK_Never;
4260
4261 KnownBits N0Known = computeKnownBits(N0);
4262 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1 &&
4263 N0Known.getMaxValue().ult(2))
4264 return OFK_Never;
4265
4266 // Fallback to ConstantRange::unsignedAddMayOverflow handling.
4267 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false);
4268 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false);
4269 return mapOverflowResult(N0Range.unsignedAddMayOverflow(N1Range));
4270 }
4271
4272 SelectionDAG::OverflowKind
computeOverflowForSignedSub(SDValue N0,SDValue N1) const4273 SelectionDAG::computeOverflowForSignedSub(SDValue N0, SDValue N1) const {
4274 // X - 0 never overflow
4275 if (isNullConstant(N1))
4276 return OFK_Never;
4277
4278 // If both operands each have at least two sign bits, the subtraction
4279 // cannot overflow.
4280 if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1)
4281 return OFK_Never;
4282
4283 KnownBits N0Known = computeKnownBits(N0);
4284 KnownBits N1Known = computeKnownBits(N1);
4285 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, true);
4286 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, true);
4287 return mapOverflowResult(N0Range.signedSubMayOverflow(N1Range));
4288 }
4289
4290 SelectionDAG::OverflowKind
computeOverflowForUnsignedSub(SDValue N0,SDValue N1) const4291 SelectionDAG::computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const {
4292 // X - 0 never overflow
4293 if (isNullConstant(N1))
4294 return OFK_Never;
4295
4296 KnownBits N0Known = computeKnownBits(N0);
4297 KnownBits N1Known = computeKnownBits(N1);
4298 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false);
4299 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false);
4300 return mapOverflowResult(N0Range.unsignedSubMayOverflow(N1Range));
4301 }
4302
4303 SelectionDAG::OverflowKind
computeOverflowForUnsignedMul(SDValue N0,SDValue N1) const4304 SelectionDAG::computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const {
4305 // X * 0 and X * 1 never overflow.
4306 if (isNullConstant(N1) || isOneConstant(N1))
4307 return OFK_Never;
4308
4309 KnownBits N0Known = computeKnownBits(N0);
4310 KnownBits N1Known = computeKnownBits(N1);
4311 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false);
4312 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false);
4313 return mapOverflowResult(N0Range.unsignedMulMayOverflow(N1Range));
4314 }
4315
4316 SelectionDAG::OverflowKind
computeOverflowForSignedMul(SDValue N0,SDValue N1) const4317 SelectionDAG::computeOverflowForSignedMul(SDValue N0, SDValue N1) const {
4318 // X * 0 and X * 1 never overflow.
4319 if (isNullConstant(N1) || isOneConstant(N1))
4320 return OFK_Never;
4321
4322 // Get the size of the result.
4323 unsigned BitWidth = N0.getScalarValueSizeInBits();
4324
4325 // Sum of the sign bits.
4326 unsigned SignBits = ComputeNumSignBits(N0) + ComputeNumSignBits(N1);
4327
4328 // If we have enough sign bits, then there's no overflow.
4329 if (SignBits > BitWidth + 1)
4330 return OFK_Never;
4331
4332 if (SignBits == BitWidth + 1) {
4333 // The overflow occurs when the true multiplication of the
4334 // the operands is the minimum negative number.
4335 KnownBits N0Known = computeKnownBits(N0);
4336 KnownBits N1Known = computeKnownBits(N1);
4337 // If one of the operands is non-negative, then there's no
4338 // overflow.
4339 if (N0Known.isNonNegative() || N1Known.isNonNegative())
4340 return OFK_Never;
4341 }
4342
4343 return OFK_Sometime;
4344 }
4345
isKnownToBeAPowerOfTwo(SDValue Val,unsigned Depth) const4346 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth) const {
4347 if (Depth >= MaxRecursionDepth)
4348 return false; // Limit search depth.
4349
4350 EVT OpVT = Val.getValueType();
4351 unsigned BitWidth = OpVT.getScalarSizeInBits();
4352
4353 // Is the constant a known power of 2?
4354 if (ISD::matchUnaryPredicate(Val, [BitWidth](ConstantSDNode *C) {
4355 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4356 }))
4357 return true;
4358
4359 // A left-shift of a constant one will have exactly one bit set because
4360 // shifting the bit off the end is undefined.
4361 if (Val.getOpcode() == ISD::SHL) {
4362 auto *C = isConstOrConstSplat(Val.getOperand(0));
4363 if (C && C->getAPIntValue() == 1)
4364 return true;
4365 return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1) &&
4366 isKnownNeverZero(Val, Depth);
4367 }
4368
4369 // Similarly, a logical right-shift of a constant sign-bit will have exactly
4370 // one bit set.
4371 if (Val.getOpcode() == ISD::SRL) {
4372 auto *C = isConstOrConstSplat(Val.getOperand(0));
4373 if (C && C->getAPIntValue().isSignMask())
4374 return true;
4375 return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1) &&
4376 isKnownNeverZero(Val, Depth);
4377 }
4378
4379 if (Val.getOpcode() == ISD::ROTL || Val.getOpcode() == ISD::ROTR)
4380 return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1);
4381
4382 // Are all operands of a build vector constant powers of two?
4383 if (Val.getOpcode() == ISD::BUILD_VECTOR)
4384 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
4385 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
4386 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4387 return false;
4388 }))
4389 return true;
4390
4391 // Is the operand of a splat vector a constant power of two?
4392 if (Val.getOpcode() == ISD::SPLAT_VECTOR)
4393 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val->getOperand(0)))
4394 if (C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2())
4395 return true;
4396
4397 // vscale(power-of-two) is a power-of-two for some targets
4398 if (Val.getOpcode() == ISD::VSCALE &&
4399 getTargetLoweringInfo().isVScaleKnownToBeAPowerOfTwo() &&
4400 isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1))
4401 return true;
4402
4403 if (Val.getOpcode() == ISD::SMIN || Val.getOpcode() == ISD::SMAX ||
4404 Val.getOpcode() == ISD::UMIN || Val.getOpcode() == ISD::UMAX)
4405 return isKnownToBeAPowerOfTwo(Val.getOperand(1), Depth + 1) &&
4406 isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1);
4407
4408 if (Val.getOpcode() == ISD::SELECT || Val.getOpcode() == ISD::VSELECT)
4409 return isKnownToBeAPowerOfTwo(Val.getOperand(2), Depth + 1) &&
4410 isKnownToBeAPowerOfTwo(Val.getOperand(1), Depth + 1);
4411
4412 // Looking for `x & -x` pattern:
4413 // If x == 0:
4414 // x & -x -> 0
4415 // If x != 0:
4416 // x & -x -> non-zero pow2
4417 // so if we find the pattern return whether we know `x` is non-zero.
4418 SDValue X;
4419 if (sd_match(Val, m_And(m_Value(X), m_Neg(m_Deferred(X)))))
4420 return isKnownNeverZero(X, Depth);
4421
4422 if (Val.getOpcode() == ISD::ZERO_EXTEND)
4423 return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1);
4424
4425 // More could be done here, though the above checks are enough
4426 // to handle some common cases.
4427 return false;
4428 }
4429
isKnownToBeAPowerOfTwoFP(SDValue Val,unsigned Depth) const4430 bool SelectionDAG::isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth) const {
4431 if (ConstantFPSDNode *C1 = isConstOrConstSplatFP(Val, true))
4432 return C1->getValueAPF().getExactLog2Abs() >= 0;
4433
4434 if (Val.getOpcode() == ISD::UINT_TO_FP || Val.getOpcode() == ISD::SINT_TO_FP)
4435 return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1);
4436
4437 return false;
4438 }
4439
ComputeNumSignBits(SDValue Op,unsigned Depth) const4440 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
4441 EVT VT = Op.getValueType();
4442
4443 // Since the number of lanes in a scalable vector is unknown at compile time,
4444 // we track one bit which is implicitly broadcast to all lanes. This means
4445 // that all lanes in a scalable vector are considered demanded.
4446 APInt DemandedElts = VT.isFixedLengthVector()
4447 ? APInt::getAllOnes(VT.getVectorNumElements())
4448 : APInt(1, 1);
4449 return ComputeNumSignBits(Op, DemandedElts, Depth);
4450 }
4451
ComputeNumSignBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const4452 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
4453 unsigned Depth) const {
4454 EVT VT = Op.getValueType();
4455 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
4456 unsigned VTBits = VT.getScalarSizeInBits();
4457 unsigned NumElts = DemandedElts.getBitWidth();
4458 unsigned Tmp, Tmp2;
4459 unsigned FirstAnswer = 1;
4460
4461 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
4462 const APInt &Val = C->getAPIntValue();
4463 return Val.getNumSignBits();
4464 }
4465
4466 if (Depth >= MaxRecursionDepth)
4467 return 1; // Limit search depth.
4468
4469 if (!DemandedElts)
4470 return 1; // No demanded elts, better to assume we don't know anything.
4471
4472 unsigned Opcode = Op.getOpcode();
4473 switch (Opcode) {
4474 default: break;
4475 case ISD::AssertSext:
4476 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
4477 return VTBits-Tmp+1;
4478 case ISD::AssertZext:
4479 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
4480 return VTBits-Tmp;
4481 case ISD::MERGE_VALUES:
4482 return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts,
4483 Depth + 1);
4484 case ISD::SPLAT_VECTOR: {
4485 // Check if the sign bits of source go down as far as the truncated value.
4486 unsigned NumSrcBits = Op.getOperand(0).getValueSizeInBits();
4487 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4488 if (NumSrcSignBits > (NumSrcBits - VTBits))
4489 return NumSrcSignBits - (NumSrcBits - VTBits);
4490 break;
4491 }
4492 case ISD::BUILD_VECTOR:
4493 assert(!VT.isScalableVector());
4494 Tmp = VTBits;
4495 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
4496 if (!DemandedElts[i])
4497 continue;
4498
4499 SDValue SrcOp = Op.getOperand(i);
4500 // BUILD_VECTOR can implicitly truncate sources, we handle this specially
4501 // for constant nodes to ensure we only look at the sign bits.
4502 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SrcOp)) {
4503 APInt T = C->getAPIntValue().trunc(VTBits);
4504 Tmp2 = T.getNumSignBits();
4505 } else {
4506 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
4507
4508 if (SrcOp.getValueSizeInBits() != VTBits) {
4509 assert(SrcOp.getValueSizeInBits() > VTBits &&
4510 "Expected BUILD_VECTOR implicit truncation");
4511 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
4512 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4513 }
4514 }
4515 Tmp = std::min(Tmp, Tmp2);
4516 }
4517 return Tmp;
4518
4519 case ISD::VECTOR_SHUFFLE: {
4520 // Collect the minimum number of sign bits that are shared by every vector
4521 // element referenced by the shuffle.
4522 APInt DemandedLHS, DemandedRHS;
4523 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
4524 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
4525 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
4526 DemandedLHS, DemandedRHS))
4527 return 1;
4528
4529 Tmp = std::numeric_limits<unsigned>::max();
4530 if (!!DemandedLHS)
4531 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
4532 if (!!DemandedRHS) {
4533 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
4534 Tmp = std::min(Tmp, Tmp2);
4535 }
4536 // If we don't know anything, early out and try computeKnownBits fall-back.
4537 if (Tmp == 1)
4538 break;
4539 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4540 return Tmp;
4541 }
4542
4543 case ISD::BITCAST: {
4544 if (VT.isScalableVector())
4545 break;
4546 SDValue N0 = Op.getOperand(0);
4547 EVT SrcVT = N0.getValueType();
4548 unsigned SrcBits = SrcVT.getScalarSizeInBits();
4549
4550 // Ignore bitcasts from unsupported types..
4551 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
4552 break;
4553
4554 // Fast handling of 'identity' bitcasts.
4555 if (VTBits == SrcBits)
4556 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
4557
4558 bool IsLE = getDataLayout().isLittleEndian();
4559
4560 // Bitcast 'large element' scalar/vector to 'small element' vector.
4561 if ((SrcBits % VTBits) == 0) {
4562 assert(VT.isVector() && "Expected bitcast to vector");
4563
4564 unsigned Scale = SrcBits / VTBits;
4565 APInt SrcDemandedElts =
4566 APIntOps::ScaleBitMask(DemandedElts, NumElts / Scale);
4567
4568 // Fast case - sign splat can be simply split across the small elements.
4569 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
4570 if (Tmp == SrcBits)
4571 return VTBits;
4572
4573 // Slow case - determine how far the sign extends into each sub-element.
4574 Tmp2 = VTBits;
4575 for (unsigned i = 0; i != NumElts; ++i)
4576 if (DemandedElts[i]) {
4577 unsigned SubOffset = i % Scale;
4578 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
4579 SubOffset = SubOffset * VTBits;
4580 if (Tmp <= SubOffset)
4581 return 1;
4582 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
4583 }
4584 return Tmp2;
4585 }
4586 break;
4587 }
4588
4589 case ISD::FP_TO_SINT_SAT:
4590 // FP_TO_SINT_SAT produces a signed value that fits in the saturating VT.
4591 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
4592 return VTBits - Tmp + 1;
4593 case ISD::SIGN_EXTEND:
4594 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
4595 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
4596 case ISD::SIGN_EXTEND_INREG:
4597 // Max of the input and what this extends.
4598 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
4599 Tmp = VTBits-Tmp+1;
4600 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
4601 return std::max(Tmp, Tmp2);
4602 case ISD::SIGN_EXTEND_VECTOR_INREG: {
4603 if (VT.isScalableVector())
4604 break;
4605 SDValue Src = Op.getOperand(0);
4606 EVT SrcVT = Src.getValueType();
4607 APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements());
4608 Tmp = VTBits - SrcVT.getScalarSizeInBits();
4609 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
4610 }
4611 case ISD::SRA:
4612 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4613 // SRA X, C -> adds C sign bits.
4614 if (std::optional<uint64_t> ShAmt =
4615 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
4616 Tmp = std::min<uint64_t>(Tmp + *ShAmt, VTBits);
4617 return Tmp;
4618 case ISD::SHL:
4619 if (std::optional<ConstantRange> ShAmtRange =
4620 getValidShiftAmountRange(Op, DemandedElts, Depth + 1)) {
4621 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
4622 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
4623 // Try to look through ZERO/SIGN/ANY_EXTEND. If all extended bits are
4624 // shifted out, then we can compute the number of sign bits for the
4625 // operand being extended. A future improvement could be to pass along the
4626 // "shifted left by" information in the recursive calls to
4627 // ComputeKnownSignBits. Allowing us to handle this more generically.
4628 if (ISD::isExtOpcode(Op.getOperand(0).getOpcode())) {
4629 SDValue Ext = Op.getOperand(0);
4630 EVT ExtVT = Ext.getValueType();
4631 SDValue Extendee = Ext.getOperand(0);
4632 EVT ExtendeeVT = Extendee.getValueType();
4633 uint64_t SizeDifference =
4634 ExtVT.getScalarSizeInBits() - ExtendeeVT.getScalarSizeInBits();
4635 if (SizeDifference <= MinShAmt) {
4636 Tmp = SizeDifference +
4637 ComputeNumSignBits(Extendee, DemandedElts, Depth + 1);
4638 if (MaxShAmt < Tmp)
4639 return Tmp - MaxShAmt;
4640 }
4641 }
4642 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
4643 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4644 if (MaxShAmt < Tmp)
4645 return Tmp - MaxShAmt;
4646 }
4647 break;
4648 case ISD::AND:
4649 case ISD::OR:
4650 case ISD::XOR: // NOT is handled here.
4651 // Logical binary ops preserve the number of sign bits at the worst.
4652 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
4653 if (Tmp != 1) {
4654 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
4655 FirstAnswer = std::min(Tmp, Tmp2);
4656 // We computed what we know about the sign bits as our first
4657 // answer. Now proceed to the generic code that uses
4658 // computeKnownBits, and pick whichever answer is better.
4659 }
4660 break;
4661
4662 case ISD::SELECT:
4663 case ISD::VSELECT:
4664 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
4665 if (Tmp == 1) return 1; // Early out.
4666 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
4667 return std::min(Tmp, Tmp2);
4668 case ISD::SELECT_CC:
4669 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
4670 if (Tmp == 1) return 1; // Early out.
4671 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
4672 return std::min(Tmp, Tmp2);
4673
4674 case ISD::SMIN:
4675 case ISD::SMAX: {
4676 // If we have a clamp pattern, we know that the number of sign bits will be
4677 // the minimum of the clamp min/max range.
4678 bool IsMax = (Opcode == ISD::SMAX);
4679 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
4680 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
4681 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
4682 CstHigh =
4683 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
4684 if (CstLow && CstHigh) {
4685 if (!IsMax)
4686 std::swap(CstLow, CstHigh);
4687 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
4688 Tmp = CstLow->getAPIntValue().getNumSignBits();
4689 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4690 return std::min(Tmp, Tmp2);
4691 }
4692 }
4693
4694 // Fallback - just get the minimum number of sign bits of the operands.
4695 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4696 if (Tmp == 1)
4697 return 1; // Early out.
4698 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4699 return std::min(Tmp, Tmp2);
4700 }
4701 case ISD::UMIN:
4702 case ISD::UMAX:
4703 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4704 if (Tmp == 1)
4705 return 1; // Early out.
4706 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4707 return std::min(Tmp, Tmp2);
4708 case ISD::SADDO:
4709 case ISD::UADDO:
4710 case ISD::SADDO_CARRY:
4711 case ISD::UADDO_CARRY:
4712 case ISD::SSUBO:
4713 case ISD::USUBO:
4714 case ISD::SSUBO_CARRY:
4715 case ISD::USUBO_CARRY:
4716 case ISD::SMULO:
4717 case ISD::UMULO:
4718 if (Op.getResNo() != 1)
4719 break;
4720 // The boolean result conforms to getBooleanContents. Fall through.
4721 // If setcc returns 0/-1, all bits are sign bits.
4722 // We know that we have an integer-based boolean since these operations
4723 // are only available for integer.
4724 if (TLI->getBooleanContents(VT.isVector(), false) ==
4725 TargetLowering::ZeroOrNegativeOneBooleanContent)
4726 return VTBits;
4727 break;
4728 case ISD::SETCC:
4729 case ISD::SETCCCARRY:
4730 case ISD::STRICT_FSETCC:
4731 case ISD::STRICT_FSETCCS: {
4732 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
4733 // If setcc returns 0/-1, all bits are sign bits.
4734 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
4735 TargetLowering::ZeroOrNegativeOneBooleanContent)
4736 return VTBits;
4737 break;
4738 }
4739 case ISD::ROTL:
4740 case ISD::ROTR:
4741 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4742
4743 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
4744 if (Tmp == VTBits)
4745 return VTBits;
4746
4747 if (ConstantSDNode *C =
4748 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
4749 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
4750
4751 // Handle rotate right by N like a rotate left by 32-N.
4752 if (Opcode == ISD::ROTR)
4753 RotAmt = (VTBits - RotAmt) % VTBits;
4754
4755 // If we aren't rotating out all of the known-in sign bits, return the
4756 // number that are left. This handles rotl(sext(x), 1) for example.
4757 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
4758 }
4759 break;
4760 case ISD::ADD:
4761 case ISD::ADDC:
4762 // Add can have at most one carry bit. Thus we know that the output
4763 // is, at worst, one more bit than the inputs.
4764 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4765 if (Tmp == 1) return 1; // Early out.
4766
4767 // Special case decrementing a value (ADD X, -1):
4768 if (ConstantSDNode *CRHS =
4769 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
4770 if (CRHS->isAllOnes()) {
4771 KnownBits Known =
4772 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4773
4774 // If the input is known to be 0 or 1, the output is 0/-1, which is all
4775 // sign bits set.
4776 if ((Known.Zero | 1).isAllOnes())
4777 return VTBits;
4778
4779 // If we are subtracting one from a positive number, there is no carry
4780 // out of the result.
4781 if (Known.isNonNegative())
4782 return Tmp;
4783 }
4784
4785 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4786 if (Tmp2 == 1) return 1; // Early out.
4787 return std::min(Tmp, Tmp2) - 1;
4788 case ISD::SUB:
4789 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4790 if (Tmp2 == 1) return 1; // Early out.
4791
4792 // Handle NEG.
4793 if (ConstantSDNode *CLHS =
4794 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
4795 if (CLHS->isZero()) {
4796 KnownBits Known =
4797 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4798 // If the input is known to be 0 or 1, the output is 0/-1, which is all
4799 // sign bits set.
4800 if ((Known.Zero | 1).isAllOnes())
4801 return VTBits;
4802
4803 // If the input is known to be positive (the sign bit is known clear),
4804 // the output of the NEG has the same number of sign bits as the input.
4805 if (Known.isNonNegative())
4806 return Tmp2;
4807
4808 // Otherwise, we treat this like a SUB.
4809 }
4810
4811 // Sub can have at most one carry bit. Thus we know that the output
4812 // is, at worst, one more bit than the inputs.
4813 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4814 if (Tmp == 1) return 1; // Early out.
4815 return std::min(Tmp, Tmp2) - 1;
4816 case ISD::MUL: {
4817 // The output of the Mul can be at most twice the valid bits in the inputs.
4818 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4819 if (SignBitsOp0 == 1)
4820 break;
4821 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
4822 if (SignBitsOp1 == 1)
4823 break;
4824 unsigned OutValidBits =
4825 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
4826 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
4827 }
4828 case ISD::AVGCEILS:
4829 case ISD::AVGFLOORS:
4830 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4831 if (Tmp == 1)
4832 return 1; // Early out.
4833 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4834 return std::min(Tmp, Tmp2);
4835 case ISD::SREM:
4836 // The sign bit is the LHS's sign bit, except when the result of the
4837 // remainder is zero. The magnitude of the result should be less than or
4838 // equal to the magnitude of the LHS. Therefore, the result should have
4839 // at least as many sign bits as the left hand side.
4840 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4841 case ISD::TRUNCATE: {
4842 // Check if the sign bits of source go down as far as the truncated value.
4843 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
4844 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4845 if (NumSrcSignBits > (NumSrcBits - VTBits))
4846 return NumSrcSignBits - (NumSrcBits - VTBits);
4847 break;
4848 }
4849 case ISD::EXTRACT_ELEMENT: {
4850 if (VT.isScalableVector())
4851 break;
4852 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
4853 const int BitWidth = Op.getValueSizeInBits();
4854 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
4855
4856 // Get reverse index (starting from 1), Op1 value indexes elements from
4857 // little end. Sign starts at big end.
4858 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
4859
4860 // If the sign portion ends in our element the subtraction gives correct
4861 // result. Otherwise it gives either negative or > bitwidth result
4862 return std::clamp(KnownSign - rIndex * BitWidth, 0, BitWidth);
4863 }
4864 case ISD::INSERT_VECTOR_ELT: {
4865 if (VT.isScalableVector())
4866 break;
4867 // If we know the element index, split the demand between the
4868 // source vector and the inserted element, otherwise assume we need
4869 // the original demanded vector elements and the value.
4870 SDValue InVec = Op.getOperand(0);
4871 SDValue InVal = Op.getOperand(1);
4872 SDValue EltNo = Op.getOperand(2);
4873 bool DemandedVal = true;
4874 APInt DemandedVecElts = DemandedElts;
4875 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4876 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4877 unsigned EltIdx = CEltNo->getZExtValue();
4878 DemandedVal = !!DemandedElts[EltIdx];
4879 DemandedVecElts.clearBit(EltIdx);
4880 }
4881 Tmp = std::numeric_limits<unsigned>::max();
4882 if (DemandedVal) {
4883 // TODO - handle implicit truncation of inserted elements.
4884 if (InVal.getScalarValueSizeInBits() != VTBits)
4885 break;
4886 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
4887 Tmp = std::min(Tmp, Tmp2);
4888 }
4889 if (!!DemandedVecElts) {
4890 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
4891 Tmp = std::min(Tmp, Tmp2);
4892 }
4893 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4894 return Tmp;
4895 }
4896 case ISD::EXTRACT_VECTOR_ELT: {
4897 assert(!VT.isScalableVector());
4898 SDValue InVec = Op.getOperand(0);
4899 SDValue EltNo = Op.getOperand(1);
4900 EVT VecVT = InVec.getValueType();
4901 // ComputeNumSignBits not yet implemented for scalable vectors.
4902 if (VecVT.isScalableVector())
4903 break;
4904 const unsigned BitWidth = Op.getValueSizeInBits();
4905 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
4906 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4907
4908 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
4909 // anything about sign bits. But if the sizes match we can derive knowledge
4910 // about sign bits from the vector operand.
4911 if (BitWidth != EltBitWidth)
4912 break;
4913
4914 // If we know the element index, just demand that vector element, else for
4915 // an unknown element index, ignore DemandedElts and demand them all.
4916 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
4917 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4918 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4919 DemandedSrcElts =
4920 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4921
4922 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
4923 }
4924 case ISD::EXTRACT_SUBVECTOR: {
4925 // Offset the demanded elts by the subvector index.
4926 SDValue Src = Op.getOperand(0);
4927 // Bail until we can represent demanded elements for scalable vectors.
4928 if (Src.getValueType().isScalableVector())
4929 break;
4930 uint64_t Idx = Op.getConstantOperandVal(1);
4931 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
4932 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
4933 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4934 }
4935 case ISD::CONCAT_VECTORS: {
4936 if (VT.isScalableVector())
4937 break;
4938 // Determine the minimum number of sign bits across all demanded
4939 // elts of the input vectors. Early out if the result is already 1.
4940 Tmp = std::numeric_limits<unsigned>::max();
4941 EVT SubVectorVT = Op.getOperand(0).getValueType();
4942 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
4943 unsigned NumSubVectors = Op.getNumOperands();
4944 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
4945 APInt DemandedSub =
4946 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
4947 if (!DemandedSub)
4948 continue;
4949 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
4950 Tmp = std::min(Tmp, Tmp2);
4951 }
4952 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4953 return Tmp;
4954 }
4955 case ISD::INSERT_SUBVECTOR: {
4956 if (VT.isScalableVector())
4957 break;
4958 // Demand any elements from the subvector and the remainder from the src its
4959 // inserted into.
4960 SDValue Src = Op.getOperand(0);
4961 SDValue Sub = Op.getOperand(1);
4962 uint64_t Idx = Op.getConstantOperandVal(2);
4963 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
4964 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
4965 APInt DemandedSrcElts = DemandedElts;
4966 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
4967
4968 Tmp = std::numeric_limits<unsigned>::max();
4969 if (!!DemandedSubElts) {
4970 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
4971 if (Tmp == 1)
4972 return 1; // early-out
4973 }
4974 if (!!DemandedSrcElts) {
4975 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4976 Tmp = std::min(Tmp, Tmp2);
4977 }
4978 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4979 return Tmp;
4980 }
4981 case ISD::LOAD: {
4982 LoadSDNode *LD = cast<LoadSDNode>(Op);
4983 if (const MDNode *Ranges = LD->getRanges()) {
4984 if (DemandedElts != 1)
4985 break;
4986
4987 ConstantRange CR = getConstantRangeFromMetadata(*Ranges);
4988 if (VTBits > CR.getBitWidth()) {
4989 switch (LD->getExtensionType()) {
4990 case ISD::SEXTLOAD:
4991 CR = CR.signExtend(VTBits);
4992 break;
4993 case ISD::ZEXTLOAD:
4994 CR = CR.zeroExtend(VTBits);
4995 break;
4996 default:
4997 break;
4998 }
4999 }
5000
5001 if (VTBits != CR.getBitWidth())
5002 break;
5003 return std::min(CR.getSignedMin().getNumSignBits(),
5004 CR.getSignedMax().getNumSignBits());
5005 }
5006
5007 break;
5008 }
5009 case ISD::ATOMIC_CMP_SWAP:
5010 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
5011 case ISD::ATOMIC_SWAP:
5012 case ISD::ATOMIC_LOAD_ADD:
5013 case ISD::ATOMIC_LOAD_SUB:
5014 case ISD::ATOMIC_LOAD_AND:
5015 case ISD::ATOMIC_LOAD_CLR:
5016 case ISD::ATOMIC_LOAD_OR:
5017 case ISD::ATOMIC_LOAD_XOR:
5018 case ISD::ATOMIC_LOAD_NAND:
5019 case ISD::ATOMIC_LOAD_MIN:
5020 case ISD::ATOMIC_LOAD_MAX:
5021 case ISD::ATOMIC_LOAD_UMIN:
5022 case ISD::ATOMIC_LOAD_UMAX:
5023 case ISD::ATOMIC_LOAD: {
5024 Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
5025 // If we are looking at the loaded value.
5026 if (Op.getResNo() == 0) {
5027 if (Tmp == VTBits)
5028 return 1; // early-out
5029 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
5030 return VTBits - Tmp + 1;
5031 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
5032 return VTBits - Tmp;
5033 if (Op->getOpcode() == ISD::ATOMIC_LOAD) {
5034 ISD::LoadExtType ETy = cast<AtomicSDNode>(Op)->getExtensionType();
5035 if (ETy == ISD::SEXTLOAD)
5036 return VTBits - Tmp + 1;
5037 if (ETy == ISD::ZEXTLOAD)
5038 return VTBits - Tmp;
5039 }
5040 }
5041 break;
5042 }
5043 }
5044
5045 // If we are looking at the loaded value of the SDNode.
5046 if (Op.getResNo() == 0) {
5047 // Handle LOADX separately here. EXTLOAD case will fallthrough.
5048 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
5049 unsigned ExtType = LD->getExtensionType();
5050 switch (ExtType) {
5051 default: break;
5052 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
5053 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5054 return VTBits - Tmp + 1;
5055 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
5056 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5057 return VTBits - Tmp;
5058 case ISD::NON_EXTLOAD:
5059 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
5060 // We only need to handle vectors - computeKnownBits should handle
5061 // scalar cases.
5062 Type *CstTy = Cst->getType();
5063 if (CstTy->isVectorTy() && !VT.isScalableVector() &&
5064 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() &&
5065 VTBits == CstTy->getScalarSizeInBits()) {
5066 Tmp = VTBits;
5067 for (unsigned i = 0; i != NumElts; ++i) {
5068 if (!DemandedElts[i])
5069 continue;
5070 if (Constant *Elt = Cst->getAggregateElement(i)) {
5071 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
5072 const APInt &Value = CInt->getValue();
5073 Tmp = std::min(Tmp, Value.getNumSignBits());
5074 continue;
5075 }
5076 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
5077 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5078 Tmp = std::min(Tmp, Value.getNumSignBits());
5079 continue;
5080 }
5081 }
5082 // Unknown type. Conservatively assume no bits match sign bit.
5083 return 1;
5084 }
5085 return Tmp;
5086 }
5087 }
5088 break;
5089 }
5090 }
5091 }
5092
5093 // Allow the target to implement this method for its nodes.
5094 if (Opcode >= ISD::BUILTIN_OP_END ||
5095 Opcode == ISD::INTRINSIC_WO_CHAIN ||
5096 Opcode == ISD::INTRINSIC_W_CHAIN ||
5097 Opcode == ISD::INTRINSIC_VOID) {
5098 // TODO: This can probably be removed once target code is audited. This
5099 // is here purely to reduce patch size and review complexity.
5100 if (!VT.isScalableVector()) {
5101 unsigned NumBits =
5102 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
5103 if (NumBits > 1)
5104 FirstAnswer = std::max(FirstAnswer, NumBits);
5105 }
5106 }
5107
5108 // Finally, if we can prove that the top bits of the result are 0's or 1's,
5109 // use this information.
5110 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
5111 return std::max(FirstAnswer, Known.countMinSignBits());
5112 }
5113
ComputeMaxSignificantBits(SDValue Op,unsigned Depth) const5114 unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
5115 unsigned Depth) const {
5116 unsigned SignBits = ComputeNumSignBits(Op, Depth);
5117 return Op.getScalarValueSizeInBits() - SignBits + 1;
5118 }
5119
ComputeMaxSignificantBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const5120 unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
5121 const APInt &DemandedElts,
5122 unsigned Depth) const {
5123 unsigned SignBits = ComputeNumSignBits(Op, DemandedElts, Depth);
5124 return Op.getScalarValueSizeInBits() - SignBits + 1;
5125 }
5126
isGuaranteedNotToBeUndefOrPoison(SDValue Op,bool PoisonOnly,unsigned Depth) const5127 bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly,
5128 unsigned Depth) const {
5129 // Early out for FREEZE.
5130 if (Op.getOpcode() == ISD::FREEZE)
5131 return true;
5132
5133 // TODO: Assume we don't know anything for now.
5134 EVT VT = Op.getValueType();
5135 if (VT.isScalableVector())
5136 return false;
5137
5138 APInt DemandedElts = VT.isVector()
5139 ? APInt::getAllOnes(VT.getVectorNumElements())
5140 : APInt(1, 1);
5141 return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth);
5142 }
5143
isGuaranteedNotToBeUndefOrPoison(SDValue Op,const APInt & DemandedElts,bool PoisonOnly,unsigned Depth) const5144 bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
5145 const APInt &DemandedElts,
5146 bool PoisonOnly,
5147 unsigned Depth) const {
5148 unsigned Opcode = Op.getOpcode();
5149
5150 // Early out for FREEZE.
5151 if (Opcode == ISD::FREEZE)
5152 return true;
5153
5154 if (Depth >= MaxRecursionDepth)
5155 return false; // Limit search depth.
5156
5157 if (isIntOrFPConstant(Op))
5158 return true;
5159
5160 switch (Opcode) {
5161 case ISD::CONDCODE:
5162 case ISD::VALUETYPE:
5163 case ISD::FrameIndex:
5164 case ISD::TargetFrameIndex:
5165 case ISD::CopyFromReg:
5166 return true;
5167
5168 case ISD::UNDEF:
5169 return PoisonOnly;
5170
5171 case ISD::BUILD_VECTOR:
5172 // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements -
5173 // this shouldn't affect the result.
5174 for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) {
5175 if (!DemandedElts[i])
5176 continue;
5177 if (!isGuaranteedNotToBeUndefOrPoison(Op.getOperand(i), PoisonOnly,
5178 Depth + 1))
5179 return false;
5180 }
5181 return true;
5182
5183 case ISD::VECTOR_SHUFFLE: {
5184 APInt DemandedLHS, DemandedRHS;
5185 auto *SVN = cast<ShuffleVectorSDNode>(Op);
5186 if (!getShuffleDemandedElts(DemandedElts.getBitWidth(), SVN->getMask(),
5187 DemandedElts, DemandedLHS, DemandedRHS,
5188 /*AllowUndefElts=*/false))
5189 return false;
5190 if (!DemandedLHS.isZero() &&
5191 !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedLHS,
5192 PoisonOnly, Depth + 1))
5193 return false;
5194 if (!DemandedRHS.isZero() &&
5195 !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(1), DemandedRHS,
5196 PoisonOnly, Depth + 1))
5197 return false;
5198 return true;
5199 }
5200
5201 // TODO: Search for noundef attributes from library functions.
5202
5203 // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef.
5204
5205 default:
5206 // Allow the target to implement this method for its nodes.
5207 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
5208 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
5209 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
5210 Op, DemandedElts, *this, PoisonOnly, Depth);
5211 break;
5212 }
5213
5214 // If Op can't create undef/poison and none of its operands are undef/poison
5215 // then Op is never undef/poison.
5216 // NOTE: TargetNodes can handle this in themselves in
5217 // isGuaranteedNotToBeUndefOrPoisonForTargetNode or let
5218 // TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode handle it.
5219 return !canCreateUndefOrPoison(Op, PoisonOnly, /*ConsiderFlags*/ true,
5220 Depth) &&
5221 all_of(Op->ops(), [&](SDValue V) {
5222 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5223 });
5224 }
5225
canCreateUndefOrPoison(SDValue Op,bool PoisonOnly,bool ConsiderFlags,unsigned Depth) const5226 bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, bool PoisonOnly,
5227 bool ConsiderFlags,
5228 unsigned Depth) const {
5229 // TODO: Assume we don't know anything for now.
5230 EVT VT = Op.getValueType();
5231 if (VT.isScalableVector())
5232 return true;
5233
5234 APInt DemandedElts = VT.isVector()
5235 ? APInt::getAllOnes(VT.getVectorNumElements())
5236 : APInt(1, 1);
5237 return canCreateUndefOrPoison(Op, DemandedElts, PoisonOnly, ConsiderFlags,
5238 Depth);
5239 }
5240
canCreateUndefOrPoison(SDValue Op,const APInt & DemandedElts,bool PoisonOnly,bool ConsiderFlags,unsigned Depth) const5241 bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
5242 bool PoisonOnly, bool ConsiderFlags,
5243 unsigned Depth) const {
5244 // TODO: Assume we don't know anything for now.
5245 EVT VT = Op.getValueType();
5246 if (VT.isScalableVector())
5247 return true;
5248
5249 if (ConsiderFlags && Op->hasPoisonGeneratingFlags())
5250 return true;
5251
5252 unsigned Opcode = Op.getOpcode();
5253 switch (Opcode) {
5254 case ISD::FREEZE:
5255 case ISD::CONCAT_VECTORS:
5256 case ISD::INSERT_SUBVECTOR:
5257 case ISD::SADDSAT:
5258 case ISD::UADDSAT:
5259 case ISD::SSUBSAT:
5260 case ISD::USUBSAT:
5261 case ISD::MULHU:
5262 case ISD::MULHS:
5263 case ISD::SMIN:
5264 case ISD::SMAX:
5265 case ISD::UMIN:
5266 case ISD::UMAX:
5267 case ISD::AND:
5268 case ISD::XOR:
5269 case ISD::ROTL:
5270 case ISD::ROTR:
5271 case ISD::FSHL:
5272 case ISD::FSHR:
5273 case ISD::BSWAP:
5274 case ISD::CTPOP:
5275 case ISD::BITREVERSE:
5276 case ISD::PARITY:
5277 case ISD::SIGN_EXTEND:
5278 case ISD::TRUNCATE:
5279 case ISD::SIGN_EXTEND_INREG:
5280 case ISD::SIGN_EXTEND_VECTOR_INREG:
5281 case ISD::ZERO_EXTEND_VECTOR_INREG:
5282 case ISD::BITCAST:
5283 case ISD::BUILD_VECTOR:
5284 case ISD::BUILD_PAIR:
5285 return false;
5286
5287 case ISD::SELECT_CC:
5288 case ISD::SETCC: {
5289 // Integer setcc cannot create undef or poison.
5290 if (Op.getOperand(0).getValueType().isInteger())
5291 return false;
5292
5293 // FP compares are more complicated. They can create poison for nan/infinity
5294 // based on options and flags. The options and flags also cause special
5295 // nonan condition codes to be used. Those condition codes may be preserved
5296 // even if the nonan flag is dropped somewhere.
5297 unsigned CCOp = Opcode == ISD::SETCC ? 2 : 4;
5298 ISD::CondCode CCCode = cast<CondCodeSDNode>(Op.getOperand(CCOp))->get();
5299 if (((unsigned)CCCode & 0x10U))
5300 return true;
5301
5302 const TargetOptions &Options = getTarget().Options;
5303 return Options.NoNaNsFPMath || Options.NoInfsFPMath;
5304 }
5305
5306 case ISD::OR:
5307 case ISD::ZERO_EXTEND:
5308 case ISD::ADD:
5309 case ISD::SUB:
5310 case ISD::MUL:
5311 // No poison except from flags (which is handled above)
5312 return false;
5313
5314 case ISD::SHL:
5315 case ISD::SRL:
5316 case ISD::SRA:
5317 // If the max shift amount isn't in range, then the shift can
5318 // create poison.
5319 return !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(1), DemandedElts,
5320 PoisonOnly, Depth + 1) ||
5321 !getValidMaximumShiftAmount(Op, DemandedElts, Depth + 1);
5322
5323 case ISD::SCALAR_TO_VECTOR:
5324 // Check if we demand any upper (undef) elements.
5325 return !PoisonOnly && DemandedElts.ugt(1);
5326
5327 case ISD::INSERT_VECTOR_ELT:
5328 case ISD::EXTRACT_VECTOR_ELT: {
5329 // Ensure that the element index is in bounds.
5330 EVT VecVT = Op.getOperand(0).getValueType();
5331 SDValue Idx = Op.getOperand(Opcode == ISD::INSERT_VECTOR_ELT ? 2 : 1);
5332 if (isGuaranteedNotToBeUndefOrPoison(Idx, DemandedElts, PoisonOnly,
5333 Depth + 1)) {
5334 KnownBits KnownIdx = computeKnownBits(Idx, Depth + 1);
5335 return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements());
5336 }
5337 return true;
5338 }
5339
5340 case ISD::VECTOR_SHUFFLE: {
5341 // Check for any demanded shuffle element that is undef.
5342 auto *SVN = cast<ShuffleVectorSDNode>(Op);
5343 for (auto [Idx, Elt] : enumerate(SVN->getMask()))
5344 if (Elt < 0 && DemandedElts[Idx])
5345 return true;
5346 return false;
5347 }
5348
5349 default:
5350 // Allow the target to implement this method for its nodes.
5351 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
5352 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
5353 return TLI->canCreateUndefOrPoisonForTargetNode(
5354 Op, DemandedElts, *this, PoisonOnly, ConsiderFlags, Depth);
5355 break;
5356 }
5357
5358 // Be conservative and return true.
5359 return true;
5360 }
5361
isADDLike(SDValue Op,bool NoWrap) const5362 bool SelectionDAG::isADDLike(SDValue Op, bool NoWrap) const {
5363 unsigned Opcode = Op.getOpcode();
5364 if (Opcode == ISD::OR)
5365 return Op->getFlags().hasDisjoint() ||
5366 haveNoCommonBitsSet(Op.getOperand(0), Op.getOperand(1));
5367 if (Opcode == ISD::XOR)
5368 return !NoWrap && isMinSignedConstant(Op.getOperand(1));
5369 return false;
5370 }
5371
isBaseWithConstantOffset(SDValue Op) const5372 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
5373 return Op.getNumOperands() == 2 && isa<ConstantSDNode>(Op.getOperand(1)) &&
5374 (Op.getOpcode() == ISD::ADD || isADDLike(Op));
5375 }
5376
isKnownNeverNaN(SDValue Op,bool SNaN,unsigned Depth) const5377 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
5378 // If we're told that NaNs won't happen, assume they won't.
5379 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
5380 return true;
5381
5382 if (Depth >= MaxRecursionDepth)
5383 return false; // Limit search depth.
5384
5385 // If the value is a constant, we can obviously see if it is a NaN or not.
5386 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
5387 return !C->getValueAPF().isNaN() ||
5388 (SNaN && !C->getValueAPF().isSignaling());
5389 }
5390
5391 unsigned Opcode = Op.getOpcode();
5392 switch (Opcode) {
5393 case ISD::FADD:
5394 case ISD::FSUB:
5395 case ISD::FMUL:
5396 case ISD::FDIV:
5397 case ISD::FREM:
5398 case ISD::FSIN:
5399 case ISD::FCOS:
5400 case ISD::FTAN:
5401 case ISD::FASIN:
5402 case ISD::FACOS:
5403 case ISD::FATAN:
5404 case ISD::FSINH:
5405 case ISD::FCOSH:
5406 case ISD::FTANH:
5407 case ISD::FMA:
5408 case ISD::FMAD: {
5409 if (SNaN)
5410 return true;
5411 // TODO: Need isKnownNeverInfinity
5412 return false;
5413 }
5414 case ISD::FCANONICALIZE:
5415 case ISD::FEXP:
5416 case ISD::FEXP2:
5417 case ISD::FEXP10:
5418 case ISD::FTRUNC:
5419 case ISD::FFLOOR:
5420 case ISD::FCEIL:
5421 case ISD::FROUND:
5422 case ISD::FROUNDEVEN:
5423 case ISD::FRINT:
5424 case ISD::LRINT:
5425 case ISD::LLRINT:
5426 case ISD::FNEARBYINT:
5427 case ISD::FLDEXP: {
5428 if (SNaN)
5429 return true;
5430 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
5431 }
5432 case ISD::FABS:
5433 case ISD::FNEG:
5434 case ISD::FCOPYSIGN: {
5435 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
5436 }
5437 case ISD::SELECT:
5438 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5439 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
5440 case ISD::FP_EXTEND:
5441 case ISD::FP_ROUND: {
5442 if (SNaN)
5443 return true;
5444 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
5445 }
5446 case ISD::SINT_TO_FP:
5447 case ISD::UINT_TO_FP:
5448 return true;
5449 case ISD::FSQRT: // Need is known positive
5450 case ISD::FLOG:
5451 case ISD::FLOG2:
5452 case ISD::FLOG10:
5453 case ISD::FPOWI:
5454 case ISD::FPOW: {
5455 if (SNaN)
5456 return true;
5457 // TODO: Refine on operand
5458 return false;
5459 }
5460 case ISD::FMINNUM:
5461 case ISD::FMAXNUM: {
5462 // Only one needs to be known not-nan, since it will be returned if the
5463 // other ends up being one.
5464 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
5465 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5466 }
5467 case ISD::FMINNUM_IEEE:
5468 case ISD::FMAXNUM_IEEE: {
5469 if (SNaN)
5470 return true;
5471 // This can return a NaN if either operand is an sNaN, or if both operands
5472 // are NaN.
5473 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
5474 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
5475 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
5476 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
5477 }
5478 case ISD::FMINIMUM:
5479 case ISD::FMAXIMUM: {
5480 // TODO: Does this quiet or return the origina NaN as-is?
5481 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
5482 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5483 }
5484 case ISD::EXTRACT_VECTOR_ELT: {
5485 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
5486 }
5487 case ISD::BUILD_VECTOR: {
5488 for (const SDValue &Opnd : Op->ops())
5489 if (!isKnownNeverNaN(Opnd, SNaN, Depth + 1))
5490 return false;
5491 return true;
5492 }
5493 default:
5494 if (Opcode >= ISD::BUILTIN_OP_END ||
5495 Opcode == ISD::INTRINSIC_WO_CHAIN ||
5496 Opcode == ISD::INTRINSIC_W_CHAIN ||
5497 Opcode == ISD::INTRINSIC_VOID) {
5498 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
5499 }
5500
5501 return false;
5502 }
5503 }
5504
isKnownNeverZeroFloat(SDValue Op) const5505 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
5506 assert(Op.getValueType().isFloatingPoint() &&
5507 "Floating point type expected");
5508
5509 // If the value is a constant, we can obviously see if it is a zero or not.
5510 return ISD::matchUnaryFpPredicate(
5511 Op, [](ConstantFPSDNode *C) { return !C->isZero(); });
5512 }
5513
isKnownNeverZero(SDValue Op,unsigned Depth) const5514 bool SelectionDAG::isKnownNeverZero(SDValue Op, unsigned Depth) const {
5515 if (Depth >= MaxRecursionDepth)
5516 return false; // Limit search depth.
5517
5518 assert(!Op.getValueType().isFloatingPoint() &&
5519 "Floating point types unsupported - use isKnownNeverZeroFloat");
5520
5521 // If the value is a constant, we can obviously see if it is a zero or not.
5522 if (ISD::matchUnaryPredicate(Op,
5523 [](ConstantSDNode *C) { return !C->isZero(); }))
5524 return true;
5525
5526 // TODO: Recognize more cases here. Most of the cases are also incomplete to
5527 // some degree.
5528 switch (Op.getOpcode()) {
5529 default:
5530 break;
5531
5532 case ISD::OR:
5533 return isKnownNeverZero(Op.getOperand(1), Depth + 1) ||
5534 isKnownNeverZero(Op.getOperand(0), Depth + 1);
5535
5536 case ISD::VSELECT:
5537 case ISD::SELECT:
5538 return isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
5539 isKnownNeverZero(Op.getOperand(2), Depth + 1);
5540
5541 case ISD::SHL: {
5542 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap())
5543 return isKnownNeverZero(Op.getOperand(0), Depth + 1);
5544 KnownBits ValKnown = computeKnownBits(Op.getOperand(0), Depth + 1);
5545 // 1 << X is never zero.
5546 if (ValKnown.One[0])
5547 return true;
5548 // If max shift cnt of known ones is non-zero, result is non-zero.
5549 APInt MaxCnt = computeKnownBits(Op.getOperand(1), Depth + 1).getMaxValue();
5550 if (MaxCnt.ult(ValKnown.getBitWidth()) &&
5551 !ValKnown.One.shl(MaxCnt).isZero())
5552 return true;
5553 break;
5554 }
5555 case ISD::UADDSAT:
5556 case ISD::UMAX:
5557 return isKnownNeverZero(Op.getOperand(1), Depth + 1) ||
5558 isKnownNeverZero(Op.getOperand(0), Depth + 1);
5559
5560 // For smin/smax: If either operand is known negative/positive
5561 // respectively we don't need the other to be known at all.
5562 case ISD::SMAX: {
5563 KnownBits Op1 = computeKnownBits(Op.getOperand(1), Depth + 1);
5564 if (Op1.isStrictlyPositive())
5565 return true;
5566
5567 KnownBits Op0 = computeKnownBits(Op.getOperand(0), Depth + 1);
5568 if (Op0.isStrictlyPositive())
5569 return true;
5570
5571 if (Op1.isNonZero() && Op0.isNonZero())
5572 return true;
5573
5574 return isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
5575 isKnownNeverZero(Op.getOperand(0), Depth + 1);
5576 }
5577 case ISD::SMIN: {
5578 KnownBits Op1 = computeKnownBits(Op.getOperand(1), Depth + 1);
5579 if (Op1.isNegative())
5580 return true;
5581
5582 KnownBits Op0 = computeKnownBits(Op.getOperand(0), Depth + 1);
5583 if (Op0.isNegative())
5584 return true;
5585
5586 if (Op1.isNonZero() && Op0.isNonZero())
5587 return true;
5588
5589 return isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
5590 isKnownNeverZero(Op.getOperand(0), Depth + 1);
5591 }
5592 case ISD::UMIN:
5593 return isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
5594 isKnownNeverZero(Op.getOperand(0), Depth + 1);
5595
5596 case ISD::ROTL:
5597 case ISD::ROTR:
5598 case ISD::BITREVERSE:
5599 case ISD::BSWAP:
5600 case ISD::CTPOP:
5601 case ISD::ABS:
5602 return isKnownNeverZero(Op.getOperand(0), Depth + 1);
5603
5604 case ISD::SRA:
5605 case ISD::SRL: {
5606 if (Op->getFlags().hasExact())
5607 return isKnownNeverZero(Op.getOperand(0), Depth + 1);
5608 KnownBits ValKnown = computeKnownBits(Op.getOperand(0), Depth + 1);
5609 if (ValKnown.isNegative())
5610 return true;
5611 // If max shift cnt of known ones is non-zero, result is non-zero.
5612 APInt MaxCnt = computeKnownBits(Op.getOperand(1), Depth + 1).getMaxValue();
5613 if (MaxCnt.ult(ValKnown.getBitWidth()) &&
5614 !ValKnown.One.lshr(MaxCnt).isZero())
5615 return true;
5616 break;
5617 }
5618 case ISD::UDIV:
5619 case ISD::SDIV:
5620 // div exact can only produce a zero if the dividend is zero.
5621 // TODO: For udiv this is also true if Op1 u<= Op0
5622 if (Op->getFlags().hasExact())
5623 return isKnownNeverZero(Op.getOperand(0), Depth + 1);
5624 break;
5625
5626 case ISD::ADD:
5627 if (Op->getFlags().hasNoUnsignedWrap())
5628 if (isKnownNeverZero(Op.getOperand(1), Depth + 1) ||
5629 isKnownNeverZero(Op.getOperand(0), Depth + 1))
5630 return true;
5631 // TODO: There are a lot more cases we can prove for add.
5632 break;
5633
5634 case ISD::SUB: {
5635 if (isNullConstant(Op.getOperand(0)))
5636 return isKnownNeverZero(Op.getOperand(1), Depth + 1);
5637
5638 std::optional<bool> ne =
5639 KnownBits::ne(computeKnownBits(Op.getOperand(0), Depth + 1),
5640 computeKnownBits(Op.getOperand(1), Depth + 1));
5641 return ne && *ne;
5642 }
5643
5644 case ISD::MUL:
5645 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap())
5646 if (isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
5647 isKnownNeverZero(Op.getOperand(0), Depth + 1))
5648 return true;
5649 break;
5650
5651 case ISD::ZERO_EXTEND:
5652 case ISD::SIGN_EXTEND:
5653 return isKnownNeverZero(Op.getOperand(0), Depth + 1);
5654 case ISD::VSCALE: {
5655 const Function &F = getMachineFunction().getFunction();
5656 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
5657 ConstantRange CR =
5658 getVScaleRange(&F, Op.getScalarValueSizeInBits()).multiply(Multiplier);
5659 if (!CR.contains(APInt(CR.getBitWidth(), 0)))
5660 return true;
5661 break;
5662 }
5663 }
5664
5665 return computeKnownBits(Op, Depth).isNonZero();
5666 }
5667
cannotBeOrderedNegativeFP(SDValue Op) const5668 bool SelectionDAG::cannotBeOrderedNegativeFP(SDValue Op) const {
5669 if (ConstantFPSDNode *C1 = isConstOrConstSplatFP(Op, true))
5670 return !C1->isNegative();
5671
5672 return Op.getOpcode() == ISD::FABS;
5673 }
5674
isEqualTo(SDValue A,SDValue B) const5675 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
5676 // Check the obvious case.
5677 if (A == B) return true;
5678
5679 // For negative and positive zero.
5680 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
5681 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
5682 if (CA->isZero() && CB->isZero()) return true;
5683
5684 // Otherwise they may not be equal.
5685 return false;
5686 }
5687
5688 // Only bits set in Mask must be negated, other bits may be arbitrary.
getBitwiseNotOperand(SDValue V,SDValue Mask,bool AllowUndefs)5689 SDValue llvm::getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs) {
5690 if (isBitwiseNot(V, AllowUndefs))
5691 return V.getOperand(0);
5692
5693 // Handle any_extend (not (truncate X)) pattern, where Mask only sets
5694 // bits in the non-extended part.
5695 ConstantSDNode *MaskC = isConstOrConstSplat(Mask);
5696 if (!MaskC || V.getOpcode() != ISD::ANY_EXTEND)
5697 return SDValue();
5698 SDValue ExtArg = V.getOperand(0);
5699 if (ExtArg.getScalarValueSizeInBits() >=
5700 MaskC->getAPIntValue().getActiveBits() &&
5701 isBitwiseNot(ExtArg, AllowUndefs) &&
5702 ExtArg.getOperand(0).getOpcode() == ISD::TRUNCATE &&
5703 ExtArg.getOperand(0).getOperand(0).getValueType() == V.getValueType())
5704 return ExtArg.getOperand(0).getOperand(0);
5705 return SDValue();
5706 }
5707
haveNoCommonBitsSetCommutative(SDValue A,SDValue B)5708 static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B) {
5709 // Match masked merge pattern (X & ~M) op (Y & M)
5710 // Including degenerate case (X & ~M) op M
5711 auto MatchNoCommonBitsPattern = [&](SDValue Not, SDValue Mask,
5712 SDValue Other) {
5713 if (SDValue NotOperand =
5714 getBitwiseNotOperand(Not, Mask, /* AllowUndefs */ true)) {
5715 if (NotOperand->getOpcode() == ISD::ZERO_EXTEND ||
5716 NotOperand->getOpcode() == ISD::TRUNCATE)
5717 NotOperand = NotOperand->getOperand(0);
5718
5719 if (Other == NotOperand)
5720 return true;
5721 if (Other->getOpcode() == ISD::AND)
5722 return NotOperand == Other->getOperand(0) ||
5723 NotOperand == Other->getOperand(1);
5724 }
5725 return false;
5726 };
5727
5728 if (A->getOpcode() == ISD::ZERO_EXTEND || A->getOpcode() == ISD::TRUNCATE)
5729 A = A->getOperand(0);
5730
5731 if (B->getOpcode() == ISD::ZERO_EXTEND || B->getOpcode() == ISD::TRUNCATE)
5732 B = B->getOperand(0);
5733
5734 if (A->getOpcode() == ISD::AND)
5735 return MatchNoCommonBitsPattern(A->getOperand(0), A->getOperand(1), B) ||
5736 MatchNoCommonBitsPattern(A->getOperand(1), A->getOperand(0), B);
5737 return false;
5738 }
5739
5740 // FIXME: unify with llvm::haveNoCommonBitsSet.
haveNoCommonBitsSet(SDValue A,SDValue B) const5741 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
5742 assert(A.getValueType() == B.getValueType() &&
5743 "Values must have the same type");
5744 if (haveNoCommonBitsSetCommutative(A, B) ||
5745 haveNoCommonBitsSetCommutative(B, A))
5746 return true;
5747 return KnownBits::haveNoCommonBitsSet(computeKnownBits(A),
5748 computeKnownBits(B));
5749 }
5750
FoldSTEP_VECTOR(const SDLoc & DL,EVT VT,SDValue Step,SelectionDAG & DAG)5751 static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
5752 SelectionDAG &DAG) {
5753 if (cast<ConstantSDNode>(Step)->isZero())
5754 return DAG.getConstant(0, DL, VT);
5755
5756 return SDValue();
5757 }
5758
FoldBUILD_VECTOR(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)5759 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
5760 ArrayRef<SDValue> Ops,
5761 SelectionDAG &DAG) {
5762 int NumOps = Ops.size();
5763 assert(NumOps != 0 && "Can't build an empty vector!");
5764 assert(!VT.isScalableVector() &&
5765 "BUILD_VECTOR cannot be used with scalable types");
5766 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
5767 "Incorrect element count in BUILD_VECTOR!");
5768
5769 // BUILD_VECTOR of UNDEFs is UNDEF.
5770 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
5771 return DAG.getUNDEF(VT);
5772
5773 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
5774 SDValue IdentitySrc;
5775 bool IsIdentity = true;
5776 for (int i = 0; i != NumOps; ++i) {
5777 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5778 Ops[i].getOperand(0).getValueType() != VT ||
5779 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
5780 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
5781 Ops[i].getConstantOperandAPInt(1) != i) {
5782 IsIdentity = false;
5783 break;
5784 }
5785 IdentitySrc = Ops[i].getOperand(0);
5786 }
5787 if (IsIdentity)
5788 return IdentitySrc;
5789
5790 return SDValue();
5791 }
5792
5793 /// Try to simplify vector concatenation to an input value, undef, or build
5794 /// vector.
foldCONCAT_VECTORS(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)5795 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
5796 ArrayRef<SDValue> Ops,
5797 SelectionDAG &DAG) {
5798 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
5799 assert(llvm::all_of(Ops,
5800 [Ops](SDValue Op) {
5801 return Ops[0].getValueType() == Op.getValueType();
5802 }) &&
5803 "Concatenation of vectors with inconsistent value types!");
5804 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
5805 VT.getVectorElementCount() &&
5806 "Incorrect element count in vector concatenation!");
5807
5808 if (Ops.size() == 1)
5809 return Ops[0];
5810
5811 // Concat of UNDEFs is UNDEF.
5812 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
5813 return DAG.getUNDEF(VT);
5814
5815 // Scan the operands and look for extract operations from a single source
5816 // that correspond to insertion at the same location via this concatenation:
5817 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
5818 SDValue IdentitySrc;
5819 bool IsIdentity = true;
5820 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
5821 SDValue Op = Ops[i];
5822 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
5823 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
5824 Op.getOperand(0).getValueType() != VT ||
5825 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
5826 Op.getConstantOperandVal(1) != IdentityIndex) {
5827 IsIdentity = false;
5828 break;
5829 }
5830 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
5831 "Unexpected identity source vector for concat of extracts");
5832 IdentitySrc = Op.getOperand(0);
5833 }
5834 if (IsIdentity) {
5835 assert(IdentitySrc && "Failed to set source vector of extracts");
5836 return IdentitySrc;
5837 }
5838
5839 // The code below this point is only designed to work for fixed width
5840 // vectors, so we bail out for now.
5841 if (VT.isScalableVector())
5842 return SDValue();
5843
5844 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
5845 // simplified to one big BUILD_VECTOR.
5846 // FIXME: Add support for SCALAR_TO_VECTOR as well.
5847 EVT SVT = VT.getScalarType();
5848 SmallVector<SDValue, 16> Elts;
5849 for (SDValue Op : Ops) {
5850 EVT OpVT = Op.getValueType();
5851 if (Op.isUndef())
5852 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
5853 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
5854 Elts.append(Op->op_begin(), Op->op_end());
5855 else
5856 return SDValue();
5857 }
5858
5859 // BUILD_VECTOR requires all inputs to be of the same type, find the
5860 // maximum type and extend them all.
5861 for (SDValue Op : Elts)
5862 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
5863
5864 if (SVT.bitsGT(VT.getScalarType())) {
5865 for (SDValue &Op : Elts) {
5866 if (Op.isUndef())
5867 Op = DAG.getUNDEF(SVT);
5868 else
5869 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
5870 ? DAG.getZExtOrTrunc(Op, DL, SVT)
5871 : DAG.getSExtOrTrunc(Op, DL, SVT);
5872 }
5873 }
5874
5875 SDValue V = DAG.getBuildVector(VT, DL, Elts);
5876 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
5877 return V;
5878 }
5879
5880 /// Gets or creates the specified node.
getNode(unsigned Opcode,const SDLoc & DL,EVT VT)5881 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
5882 SDVTList VTs = getVTList(VT);
5883 FoldingSetNodeID ID;
5884 AddNodeIDNode(ID, Opcode, VTs, std::nullopt);
5885 void *IP = nullptr;
5886 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5887 return SDValue(E, 0);
5888
5889 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5890 CSEMap.InsertNode(N, IP);
5891
5892 InsertNode(N);
5893 SDValue V = SDValue(N, 0);
5894 NewSDValueDbgMsg(V, "Creating new node: ", this);
5895 return V;
5896 }
5897
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1)5898 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5899 SDValue N1) {
5900 SDNodeFlags Flags;
5901 if (Inserter)
5902 Flags = Inserter->getFlags();
5903 return getNode(Opcode, DL, VT, N1, Flags);
5904 }
5905
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,const SDNodeFlags Flags)5906 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5907 SDValue N1, const SDNodeFlags Flags) {
5908 assert(N1.getOpcode() != ISD::DELETED_NODE && "Operand is DELETED_NODE!");
5909
5910 // Constant fold unary operations with a vector integer or float operand.
5911 switch (Opcode) {
5912 default:
5913 // FIXME: Entirely reasonable to perform folding of other unary
5914 // operations here as the need arises.
5915 break;
5916 case ISD::FNEG:
5917 case ISD::FABS:
5918 case ISD::FCEIL:
5919 case ISD::FTRUNC:
5920 case ISD::FFLOOR:
5921 case ISD::FP_EXTEND:
5922 case ISD::FP_TO_SINT:
5923 case ISD::FP_TO_UINT:
5924 case ISD::FP_TO_FP16:
5925 case ISD::FP_TO_BF16:
5926 case ISD::TRUNCATE:
5927 case ISD::ANY_EXTEND:
5928 case ISD::ZERO_EXTEND:
5929 case ISD::SIGN_EXTEND:
5930 case ISD::UINT_TO_FP:
5931 case ISD::SINT_TO_FP:
5932 case ISD::FP16_TO_FP:
5933 case ISD::BF16_TO_FP:
5934 case ISD::BITCAST:
5935 case ISD::ABS:
5936 case ISD::BITREVERSE:
5937 case ISD::BSWAP:
5938 case ISD::CTLZ:
5939 case ISD::CTLZ_ZERO_UNDEF:
5940 case ISD::CTTZ:
5941 case ISD::CTTZ_ZERO_UNDEF:
5942 case ISD::CTPOP:
5943 case ISD::STEP_VECTOR: {
5944 SDValue Ops = {N1};
5945 if (SDValue Fold = FoldConstantArithmetic(Opcode, DL, VT, Ops))
5946 return Fold;
5947 }
5948 }
5949
5950 unsigned OpOpcode = N1.getNode()->getOpcode();
5951 switch (Opcode) {
5952 case ISD::STEP_VECTOR:
5953 assert(VT.isScalableVector() &&
5954 "STEP_VECTOR can only be used with scalable types");
5955 assert(OpOpcode == ISD::TargetConstant &&
5956 VT.getVectorElementType() == N1.getValueType() &&
5957 "Unexpected step operand");
5958 break;
5959 case ISD::FREEZE:
5960 assert(VT == N1.getValueType() && "Unexpected VT!");
5961 if (isGuaranteedNotToBeUndefOrPoison(N1, /*PoisonOnly*/ false,
5962 /*Depth*/ 1))
5963 return N1;
5964 break;
5965 case ISD::TokenFactor:
5966 case ISD::MERGE_VALUES:
5967 case ISD::CONCAT_VECTORS:
5968 return N1; // Factor, merge or concat of one node? No need.
5969 case ISD::BUILD_VECTOR: {
5970 // Attempt to simplify BUILD_VECTOR.
5971 SDValue Ops[] = {N1};
5972 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5973 return V;
5974 break;
5975 }
5976 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
5977 case ISD::FP_EXTEND:
5978 assert(VT.isFloatingPoint() && N1.getValueType().isFloatingPoint() &&
5979 "Invalid FP cast!");
5980 if (N1.getValueType() == VT) return N1; // noop conversion.
5981 assert((!VT.isVector() || VT.getVectorElementCount() ==
5982 N1.getValueType().getVectorElementCount()) &&
5983 "Vector element count mismatch!");
5984 assert(N1.getValueType().bitsLT(VT) && "Invalid fpext node, dst < src!");
5985 if (N1.isUndef())
5986 return getUNDEF(VT);
5987 break;
5988 case ISD::FP_TO_SINT:
5989 case ISD::FP_TO_UINT:
5990 if (N1.isUndef())
5991 return getUNDEF(VT);
5992 break;
5993 case ISD::SINT_TO_FP:
5994 case ISD::UINT_TO_FP:
5995 // [us]itofp(undef) = 0, because the result value is bounded.
5996 if (N1.isUndef())
5997 return getConstantFP(0.0, DL, VT);
5998 break;
5999 case ISD::SIGN_EXTEND:
6000 assert(VT.isInteger() && N1.getValueType().isInteger() &&
6001 "Invalid SIGN_EXTEND!");
6002 assert(VT.isVector() == N1.getValueType().isVector() &&
6003 "SIGN_EXTEND result type type should be vector iff the operand "
6004 "type is vector!");
6005 if (N1.getValueType() == VT) return N1; // noop extension
6006 assert((!VT.isVector() || VT.getVectorElementCount() ==
6007 N1.getValueType().getVectorElementCount()) &&
6008 "Vector element count mismatch!");
6009 assert(N1.getValueType().bitsLT(VT) && "Invalid sext node, dst < src!");
6010 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) {
6011 SDNodeFlags Flags;
6012 if (OpOpcode == ISD::ZERO_EXTEND)
6013 Flags.setNonNeg(N1->getFlags().hasNonNeg());
6014 return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
6015 }
6016 if (OpOpcode == ISD::UNDEF)
6017 // sext(undef) = 0, because the top bits will all be the same.
6018 return getConstant(0, DL, VT);
6019 break;
6020 case ISD::ZERO_EXTEND:
6021 assert(VT.isInteger() && N1.getValueType().isInteger() &&
6022 "Invalid ZERO_EXTEND!");
6023 assert(VT.isVector() == N1.getValueType().isVector() &&
6024 "ZERO_EXTEND result type type should be vector iff the operand "
6025 "type is vector!");
6026 if (N1.getValueType() == VT) return N1; // noop extension
6027 assert((!VT.isVector() || VT.getVectorElementCount() ==
6028 N1.getValueType().getVectorElementCount()) &&
6029 "Vector element count mismatch!");
6030 assert(N1.getValueType().bitsLT(VT) && "Invalid zext node, dst < src!");
6031 if (OpOpcode == ISD::ZERO_EXTEND) { // (zext (zext x)) -> (zext x)
6032 SDNodeFlags Flags;
6033 Flags.setNonNeg(N1->getFlags().hasNonNeg());
6034 return getNode(ISD::ZERO_EXTEND, DL, VT, N1.getOperand(0), Flags);
6035 }
6036 if (OpOpcode == ISD::UNDEF)
6037 // zext(undef) = 0, because the top bits will be zero.
6038 return getConstant(0, DL, VT);
6039
6040 // Skip unnecessary zext_inreg pattern:
6041 // (zext (trunc x)) -> x iff the upper bits are known zero.
6042 // TODO: Remove (zext (trunc (and x, c))) exception which some targets
6043 // use to recognise zext_inreg patterns.
6044 if (OpOpcode == ISD::TRUNCATE) {
6045 SDValue OpOp = N1.getOperand(0);
6046 if (OpOp.getValueType() == VT) {
6047 if (OpOp.getOpcode() != ISD::AND) {
6048 APInt HiBits = APInt::getBitsSetFrom(VT.getScalarSizeInBits(),
6049 N1.getScalarValueSizeInBits());
6050 if (MaskedValueIsZero(OpOp, HiBits)) {
6051 transferDbgValues(N1, OpOp);
6052 return OpOp;
6053 }
6054 }
6055 }
6056 }
6057 break;
6058 case ISD::ANY_EXTEND:
6059 assert(VT.isInteger() && N1.getValueType().isInteger() &&
6060 "Invalid ANY_EXTEND!");
6061 assert(VT.isVector() == N1.getValueType().isVector() &&
6062 "ANY_EXTEND result type type should be vector iff the operand "
6063 "type is vector!");
6064 if (N1.getValueType() == VT) return N1; // noop extension
6065 assert((!VT.isVector() || VT.getVectorElementCount() ==
6066 N1.getValueType().getVectorElementCount()) &&
6067 "Vector element count mismatch!");
6068 assert(N1.getValueType().bitsLT(VT) && "Invalid anyext node, dst < src!");
6069
6070 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
6071 OpOpcode == ISD::ANY_EXTEND) {
6072 SDNodeFlags Flags;
6073 if (OpOpcode == ISD::ZERO_EXTEND)
6074 Flags.setNonNeg(N1->getFlags().hasNonNeg());
6075 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
6076 return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
6077 }
6078 if (OpOpcode == ISD::UNDEF)
6079 return getUNDEF(VT);
6080
6081 // (ext (trunc x)) -> x
6082 if (OpOpcode == ISD::TRUNCATE) {
6083 SDValue OpOp = N1.getOperand(0);
6084 if (OpOp.getValueType() == VT) {
6085 transferDbgValues(N1, OpOp);
6086 return OpOp;
6087 }
6088 }
6089 break;
6090 case ISD::TRUNCATE:
6091 assert(VT.isInteger() && N1.getValueType().isInteger() &&
6092 "Invalid TRUNCATE!");
6093 assert(VT.isVector() == N1.getValueType().isVector() &&
6094 "TRUNCATE result type type should be vector iff the operand "
6095 "type is vector!");
6096 if (N1.getValueType() == VT) return N1; // noop truncate
6097 assert((!VT.isVector() || VT.getVectorElementCount() ==
6098 N1.getValueType().getVectorElementCount()) &&
6099 "Vector element count mismatch!");
6100 assert(N1.getValueType().bitsGT(VT) && "Invalid truncate node, src < dst!");
6101 if (OpOpcode == ISD::TRUNCATE)
6102 return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
6103 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
6104 OpOpcode == ISD::ANY_EXTEND) {
6105 // If the source is smaller than the dest, we still need an extend.
6106 if (N1.getOperand(0).getValueType().getScalarType().bitsLT(
6107 VT.getScalarType()))
6108 return getNode(OpOpcode, DL, VT, N1.getOperand(0));
6109 if (N1.getOperand(0).getValueType().bitsGT(VT))
6110 return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
6111 return N1.getOperand(0);
6112 }
6113 if (OpOpcode == ISD::UNDEF)
6114 return getUNDEF(VT);
6115 if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
6116 return getVScale(DL, VT,
6117 N1.getConstantOperandAPInt(0).trunc(VT.getSizeInBits()));
6118 break;
6119 case ISD::ANY_EXTEND_VECTOR_INREG:
6120 case ISD::ZERO_EXTEND_VECTOR_INREG:
6121 case ISD::SIGN_EXTEND_VECTOR_INREG:
6122 assert(VT.isVector() && "This DAG node is restricted to vector types.");
6123 assert(N1.getValueType().bitsLE(VT) &&
6124 "The input must be the same size or smaller than the result.");
6125 assert(VT.getVectorMinNumElements() <
6126 N1.getValueType().getVectorMinNumElements() &&
6127 "The destination vector type must have fewer lanes than the input.");
6128 break;
6129 case ISD::ABS:
6130 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid ABS!");
6131 if (OpOpcode == ISD::UNDEF)
6132 return getConstant(0, DL, VT);
6133 break;
6134 case ISD::BSWAP:
6135 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!");
6136 assert((VT.getScalarSizeInBits() % 16 == 0) &&
6137 "BSWAP types must be a multiple of 16 bits!");
6138 if (OpOpcode == ISD::UNDEF)
6139 return getUNDEF(VT);
6140 // bswap(bswap(X)) -> X.
6141 if (OpOpcode == ISD::BSWAP)
6142 return N1.getOperand(0);
6143 break;
6144 case ISD::BITREVERSE:
6145 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BITREVERSE!");
6146 if (OpOpcode == ISD::UNDEF)
6147 return getUNDEF(VT);
6148 break;
6149 case ISD::BITCAST:
6150 assert(VT.getSizeInBits() == N1.getValueSizeInBits() &&
6151 "Cannot BITCAST between types of different sizes!");
6152 if (VT == N1.getValueType()) return N1; // noop conversion.
6153 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
6154 return getNode(ISD::BITCAST, DL, VT, N1.getOperand(0));
6155 if (OpOpcode == ISD::UNDEF)
6156 return getUNDEF(VT);
6157 break;
6158 case ISD::SCALAR_TO_VECTOR:
6159 assert(VT.isVector() && !N1.getValueType().isVector() &&
6160 (VT.getVectorElementType() == N1.getValueType() ||
6161 (VT.getVectorElementType().isInteger() &&
6162 N1.getValueType().isInteger() &&
6163 VT.getVectorElementType().bitsLE(N1.getValueType()))) &&
6164 "Illegal SCALAR_TO_VECTOR node!");
6165 if (OpOpcode == ISD::UNDEF)
6166 return getUNDEF(VT);
6167 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
6168 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
6169 isa<ConstantSDNode>(N1.getOperand(1)) &&
6170 N1.getConstantOperandVal(1) == 0 &&
6171 N1.getOperand(0).getValueType() == VT)
6172 return N1.getOperand(0);
6173 break;
6174 case ISD::FNEG:
6175 // Negation of an unknown bag of bits is still completely undefined.
6176 if (OpOpcode == ISD::UNDEF)
6177 return getUNDEF(VT);
6178
6179 if (OpOpcode == ISD::FNEG) // --X -> X
6180 return N1.getOperand(0);
6181 break;
6182 case ISD::FABS:
6183 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
6184 return getNode(ISD::FABS, DL, VT, N1.getOperand(0));
6185 break;
6186 case ISD::VSCALE:
6187 assert(VT == N1.getValueType() && "Unexpected VT!");
6188 break;
6189 case ISD::CTPOP:
6190 if (N1.getValueType().getScalarType() == MVT::i1)
6191 return N1;
6192 break;
6193 case ISD::CTLZ:
6194 case ISD::CTTZ:
6195 if (N1.getValueType().getScalarType() == MVT::i1)
6196 return getNOT(DL, N1, N1.getValueType());
6197 break;
6198 case ISD::VECREDUCE_ADD:
6199 if (N1.getValueType().getScalarType() == MVT::i1)
6200 return getNode(ISD::VECREDUCE_XOR, DL, VT, N1);
6201 break;
6202 case ISD::VECREDUCE_SMIN:
6203 case ISD::VECREDUCE_UMAX:
6204 if (N1.getValueType().getScalarType() == MVT::i1)
6205 return getNode(ISD::VECREDUCE_OR, DL, VT, N1);
6206 break;
6207 case ISD::VECREDUCE_SMAX:
6208 case ISD::VECREDUCE_UMIN:
6209 if (N1.getValueType().getScalarType() == MVT::i1)
6210 return getNode(ISD::VECREDUCE_AND, DL, VT, N1);
6211 break;
6212 case ISD::SPLAT_VECTOR:
6213 assert(VT.isVector() && "Wrong return type!");
6214 // FIXME: Hexagon uses i32 scalar for a floating point zero vector so allow
6215 // that for now.
6216 assert((VT.getVectorElementType() == N1.getValueType() ||
6217 (VT.isFloatingPoint() && N1.getValueType() == MVT::i32) ||
6218 (VT.getVectorElementType().isInteger() &&
6219 N1.getValueType().isInteger() &&
6220 VT.getVectorElementType().bitsLE(N1.getValueType()))) &&
6221 "Wrong operand type!");
6222 break;
6223 }
6224
6225 SDNode *N;
6226 SDVTList VTs = getVTList(VT);
6227 SDValue Ops[] = {N1};
6228 if (VT != MVT::Glue) { // Don't CSE glue producing nodes
6229 FoldingSetNodeID ID;
6230 AddNodeIDNode(ID, Opcode, VTs, Ops);
6231 void *IP = nullptr;
6232 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6233 E->intersectFlagsWith(Flags);
6234 return SDValue(E, 0);
6235 }
6236
6237 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6238 N->setFlags(Flags);
6239 createOperands(N, Ops);
6240 CSEMap.InsertNode(N, IP);
6241 } else {
6242 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6243 createOperands(N, Ops);
6244 }
6245
6246 InsertNode(N);
6247 SDValue V = SDValue(N, 0);
6248 NewSDValueDbgMsg(V, "Creating new node: ", this);
6249 return V;
6250 }
6251
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)6252 static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
6253 const APInt &C2) {
6254 switch (Opcode) {
6255 case ISD::ADD: return C1 + C2;
6256 case ISD::SUB: return C1 - C2;
6257 case ISD::MUL: return C1 * C2;
6258 case ISD::AND: return C1 & C2;
6259 case ISD::OR: return C1 | C2;
6260 case ISD::XOR: return C1 ^ C2;
6261 case ISD::SHL: return C1 << C2;
6262 case ISD::SRL: return C1.lshr(C2);
6263 case ISD::SRA: return C1.ashr(C2);
6264 case ISD::ROTL: return C1.rotl(C2);
6265 case ISD::ROTR: return C1.rotr(C2);
6266 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
6267 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
6268 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
6269 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
6270 case ISD::SADDSAT: return C1.sadd_sat(C2);
6271 case ISD::UADDSAT: return C1.uadd_sat(C2);
6272 case ISD::SSUBSAT: return C1.ssub_sat(C2);
6273 case ISD::USUBSAT: return C1.usub_sat(C2);
6274 case ISD::SSHLSAT: return C1.sshl_sat(C2);
6275 case ISD::USHLSAT: return C1.ushl_sat(C2);
6276 case ISD::UDIV:
6277 if (!C2.getBoolValue())
6278 break;
6279 return C1.udiv(C2);
6280 case ISD::UREM:
6281 if (!C2.getBoolValue())
6282 break;
6283 return C1.urem(C2);
6284 case ISD::SDIV:
6285 if (!C2.getBoolValue())
6286 break;
6287 return C1.sdiv(C2);
6288 case ISD::SREM:
6289 if (!C2.getBoolValue())
6290 break;
6291 return C1.srem(C2);
6292 case ISD::AVGFLOORS:
6293 return APIntOps::avgFloorS(C1, C2);
6294 case ISD::AVGFLOORU:
6295 return APIntOps::avgFloorU(C1, C2);
6296 case ISD::AVGCEILS:
6297 return APIntOps::avgCeilS(C1, C2);
6298 case ISD::AVGCEILU:
6299 return APIntOps::avgCeilU(C1, C2);
6300 case ISD::ABDS:
6301 return APIntOps::abds(C1, C2);
6302 case ISD::ABDU:
6303 return APIntOps::abdu(C1, C2);
6304 case ISD::MULHS:
6305 return APIntOps::mulhs(C1, C2);
6306 case ISD::MULHU:
6307 return APIntOps::mulhu(C1, C2);
6308 }
6309 return std::nullopt;
6310 }
6311 // Handle constant folding with UNDEF.
6312 // TODO: Handle more cases.
FoldValueWithUndef(unsigned Opcode,const APInt & C1,bool IsUndef1,const APInt & C2,bool IsUndef2)6313 static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1,
6314 bool IsUndef1, const APInt &C2,
6315 bool IsUndef2) {
6316 if (!(IsUndef1 || IsUndef2))
6317 return FoldValue(Opcode, C1, C2);
6318
6319 // Fold and(x, undef) -> 0
6320 // Fold mul(x, undef) -> 0
6321 if (Opcode == ISD::AND || Opcode == ISD::MUL)
6322 return APInt::getZero(C1.getBitWidth());
6323
6324 return std::nullopt;
6325 }
6326
FoldSymbolOffset(unsigned Opcode,EVT VT,const GlobalAddressSDNode * GA,const SDNode * N2)6327 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
6328 const GlobalAddressSDNode *GA,
6329 const SDNode *N2) {
6330 if (GA->getOpcode() != ISD::GlobalAddress)
6331 return SDValue();
6332 if (!TLI->isOffsetFoldingLegal(GA))
6333 return SDValue();
6334 auto *C2 = dyn_cast<ConstantSDNode>(N2);
6335 if (!C2)
6336 return SDValue();
6337 int64_t Offset = C2->getSExtValue();
6338 switch (Opcode) {
6339 case ISD::ADD: break;
6340 case ISD::SUB: Offset = -uint64_t(Offset); break;
6341 default: return SDValue();
6342 }
6343 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
6344 GA->getOffset() + uint64_t(Offset));
6345 }
6346
isUndef(unsigned Opcode,ArrayRef<SDValue> Ops)6347 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
6348 switch (Opcode) {
6349 case ISD::SDIV:
6350 case ISD::UDIV:
6351 case ISD::SREM:
6352 case ISD::UREM: {
6353 // If a divisor is zero/undef or any element of a divisor vector is
6354 // zero/undef, the whole op is undef.
6355 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
6356 SDValue Divisor = Ops[1];
6357 if (Divisor.isUndef() || isNullConstant(Divisor))
6358 return true;
6359
6360 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
6361 llvm::any_of(Divisor->op_values(),
6362 [](SDValue V) { return V.isUndef() ||
6363 isNullConstant(V); });
6364 // TODO: Handle signed overflow.
6365 }
6366 // TODO: Handle oversized shifts.
6367 default:
6368 return false;
6369 }
6370 }
6371
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SDNodeFlags Flags)6372 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
6373 EVT VT, ArrayRef<SDValue> Ops,
6374 SDNodeFlags Flags) {
6375 // If the opcode is a target-specific ISD node, there's nothing we can
6376 // do here and the operand rules may not line up with the below, so
6377 // bail early.
6378 // We can't create a scalar CONCAT_VECTORS so skip it. It will break
6379 // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
6380 // foldCONCAT_VECTORS in getNode before this is called.
6381 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
6382 return SDValue();
6383
6384 unsigned NumOps = Ops.size();
6385 if (NumOps == 0)
6386 return SDValue();
6387
6388 if (isUndef(Opcode, Ops))
6389 return getUNDEF(VT);
6390
6391 // Handle unary special cases.
6392 if (NumOps == 1) {
6393 SDValue N1 = Ops[0];
6394
6395 // Constant fold unary operations with an integer constant operand. Even
6396 // opaque constant will be folded, because the folding of unary operations
6397 // doesn't create new constants with different values. Nevertheless, the
6398 // opaque flag is preserved during folding to prevent future folding with
6399 // other constants.
6400 if (auto *C = dyn_cast<ConstantSDNode>(N1)) {
6401 const APInt &Val = C->getAPIntValue();
6402 switch (Opcode) {
6403 case ISD::SIGN_EXTEND:
6404 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
6405 C->isTargetOpcode(), C->isOpaque());
6406 case ISD::TRUNCATE:
6407 if (C->isOpaque())
6408 break;
6409 [[fallthrough]];
6410 case ISD::ZERO_EXTEND:
6411 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
6412 C->isTargetOpcode(), C->isOpaque());
6413 case ISD::ANY_EXTEND:
6414 // Some targets like RISCV prefer to sign extend some types.
6415 if (TLI->isSExtCheaperThanZExt(N1.getValueType(), VT))
6416 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
6417 C->isTargetOpcode(), C->isOpaque());
6418 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
6419 C->isTargetOpcode(), C->isOpaque());
6420 case ISD::ABS:
6421 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
6422 C->isOpaque());
6423 case ISD::BITREVERSE:
6424 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
6425 C->isOpaque());
6426 case ISD::BSWAP:
6427 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
6428 C->isOpaque());
6429 case ISD::CTPOP:
6430 return getConstant(Val.popcount(), DL, VT, C->isTargetOpcode(),
6431 C->isOpaque());
6432 case ISD::CTLZ:
6433 case ISD::CTLZ_ZERO_UNDEF:
6434 return getConstant(Val.countl_zero(), DL, VT, C->isTargetOpcode(),
6435 C->isOpaque());
6436 case ISD::CTTZ:
6437 case ISD::CTTZ_ZERO_UNDEF:
6438 return getConstant(Val.countr_zero(), DL, VT, C->isTargetOpcode(),
6439 C->isOpaque());
6440 case ISD::UINT_TO_FP:
6441 case ISD::SINT_TO_FP: {
6442 APFloat apf(EVTToAPFloatSemantics(VT),
6443 APInt::getZero(VT.getSizeInBits()));
6444 (void)apf.convertFromAPInt(Val, Opcode == ISD::SINT_TO_FP,
6445 APFloat::rmNearestTiesToEven);
6446 return getConstantFP(apf, DL, VT);
6447 }
6448 case ISD::FP16_TO_FP:
6449 case ISD::BF16_TO_FP: {
6450 bool Ignored;
6451 APFloat FPV(Opcode == ISD::FP16_TO_FP ? APFloat::IEEEhalf()
6452 : APFloat::BFloat(),
6453 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
6454
6455 // This can return overflow, underflow, or inexact; we don't care.
6456 // FIXME need to be more flexible about rounding mode.
6457 (void)FPV.convert(EVTToAPFloatSemantics(VT),
6458 APFloat::rmNearestTiesToEven, &Ignored);
6459 return getConstantFP(FPV, DL, VT);
6460 }
6461 case ISD::STEP_VECTOR:
6462 if (SDValue V = FoldSTEP_VECTOR(DL, VT, N1, *this))
6463 return V;
6464 break;
6465 case ISD::BITCAST:
6466 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
6467 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
6468 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
6469 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
6470 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
6471 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
6472 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
6473 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
6474 break;
6475 }
6476 }
6477
6478 // Constant fold unary operations with a floating point constant operand.
6479 if (auto *C = dyn_cast<ConstantFPSDNode>(N1)) {
6480 APFloat V = C->getValueAPF(); // make copy
6481 switch (Opcode) {
6482 case ISD::FNEG:
6483 V.changeSign();
6484 return getConstantFP(V, DL, VT);
6485 case ISD::FABS:
6486 V.clearSign();
6487 return getConstantFP(V, DL, VT);
6488 case ISD::FCEIL: {
6489 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
6490 if (fs == APFloat::opOK || fs == APFloat::opInexact)
6491 return getConstantFP(V, DL, VT);
6492 return SDValue();
6493 }
6494 case ISD::FTRUNC: {
6495 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
6496 if (fs == APFloat::opOK || fs == APFloat::opInexact)
6497 return getConstantFP(V, DL, VT);
6498 return SDValue();
6499 }
6500 case ISD::FFLOOR: {
6501 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
6502 if (fs == APFloat::opOK || fs == APFloat::opInexact)
6503 return getConstantFP(V, DL, VT);
6504 return SDValue();
6505 }
6506 case ISD::FP_EXTEND: {
6507 bool ignored;
6508 // This can return overflow, underflow, or inexact; we don't care.
6509 // FIXME need to be more flexible about rounding mode.
6510 (void)V.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
6511 &ignored);
6512 return getConstantFP(V, DL, VT);
6513 }
6514 case ISD::FP_TO_SINT:
6515 case ISD::FP_TO_UINT: {
6516 bool ignored;
6517 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
6518 // FIXME need to be more flexible about rounding mode.
6519 APFloat::opStatus s =
6520 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
6521 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
6522 break;
6523 return getConstant(IntVal, DL, VT);
6524 }
6525 case ISD::FP_TO_FP16:
6526 case ISD::FP_TO_BF16: {
6527 bool Ignored;
6528 // This can return overflow, underflow, or inexact; we don't care.
6529 // FIXME need to be more flexible about rounding mode.
6530 (void)V.convert(Opcode == ISD::FP_TO_FP16 ? APFloat::IEEEhalf()
6531 : APFloat::BFloat(),
6532 APFloat::rmNearestTiesToEven, &Ignored);
6533 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
6534 }
6535 case ISD::BITCAST:
6536 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
6537 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL,
6538 VT);
6539 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
6540 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL,
6541 VT);
6542 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
6543 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL,
6544 VT);
6545 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
6546 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
6547 break;
6548 }
6549 }
6550
6551 // Early-out if we failed to constant fold a bitcast.
6552 if (Opcode == ISD::BITCAST)
6553 return SDValue();
6554 }
6555
6556 // Handle binops special cases.
6557 if (NumOps == 2) {
6558 if (SDValue CFP = foldConstantFPMath(Opcode, DL, VT, Ops))
6559 return CFP;
6560
6561 if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
6562 if (auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
6563 if (C1->isOpaque() || C2->isOpaque())
6564 return SDValue();
6565
6566 std::optional<APInt> FoldAttempt =
6567 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
6568 if (!FoldAttempt)
6569 return SDValue();
6570
6571 SDValue Folded = getConstant(*FoldAttempt, DL, VT);
6572 assert((!Folded || !VT.isVector()) &&
6573 "Can't fold vectors ops with scalar operands");
6574 return Folded;
6575 }
6576 }
6577
6578 // fold (add Sym, c) -> Sym+c
6579 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[0]))
6580 return FoldSymbolOffset(Opcode, VT, GA, Ops[1].getNode());
6581 if (TLI->isCommutativeBinOp(Opcode))
6582 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[1]))
6583 return FoldSymbolOffset(Opcode, VT, GA, Ops[0].getNode());
6584 }
6585
6586 // This is for vector folding only from here on.
6587 if (!VT.isVector())
6588 return SDValue();
6589
6590 ElementCount NumElts = VT.getVectorElementCount();
6591
6592 // See if we can fold through any bitcasted integer ops.
6593 if (NumOps == 2 && VT.isFixedLengthVector() && VT.isInteger() &&
6594 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
6595 (Ops[0].getOpcode() == ISD::BITCAST ||
6596 Ops[1].getOpcode() == ISD::BITCAST)) {
6597 SDValue N1 = peekThroughBitcasts(Ops[0]);
6598 SDValue N2 = peekThroughBitcasts(Ops[1]);
6599 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
6600 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
6601 if (BV1 && BV2 && N1.getValueType().isInteger() &&
6602 N2.getValueType().isInteger()) {
6603 bool IsLE = getDataLayout().isLittleEndian();
6604 unsigned EltBits = VT.getScalarSizeInBits();
6605 SmallVector<APInt> RawBits1, RawBits2;
6606 BitVector UndefElts1, UndefElts2;
6607 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
6608 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
6609 SmallVector<APInt> RawBits;
6610 for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) {
6611 std::optional<APInt> Fold = FoldValueWithUndef(
6612 Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]);
6613 if (!Fold)
6614 break;
6615 RawBits.push_back(*Fold);
6616 }
6617 if (RawBits.size() == NumElts.getFixedValue()) {
6618 // We have constant folded, but we might need to cast this again back
6619 // to the original (possibly legalized) type.
6620 EVT BVVT, BVEltVT;
6621 if (N1.getValueType() == VT) {
6622 BVVT = N1.getValueType();
6623 BVEltVT = BV1->getOperand(0).getValueType();
6624 } else {
6625 BVVT = N2.getValueType();
6626 BVEltVT = BV2->getOperand(0).getValueType();
6627 }
6628 unsigned BVEltBits = BVEltVT.getSizeInBits();
6629 SmallVector<APInt> DstBits;
6630 BitVector DstUndefs;
6631 BuildVectorSDNode::recastRawBits(IsLE, BVVT.getScalarSizeInBits(),
6632 DstBits, RawBits, DstUndefs,
6633 BitVector(RawBits.size(), false));
6634 SmallVector<SDValue> Ops(DstBits.size(), getUNDEF(BVEltVT));
6635 for (unsigned I = 0, E = DstBits.size(); I != E; ++I) {
6636 if (DstUndefs[I])
6637 continue;
6638 Ops[I] = getConstant(DstBits[I].sext(BVEltBits), DL, BVEltVT);
6639 }
6640 return getBitcast(VT, getBuildVector(BVVT, DL, Ops));
6641 }
6642 }
6643 }
6644 }
6645
6646 // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)).
6647 // (shl step_vector(C0), C1) -> (step_vector(C0 << C1))
6648 if ((Opcode == ISD::MUL || Opcode == ISD::SHL) &&
6649 Ops[0].getOpcode() == ISD::STEP_VECTOR) {
6650 APInt RHSVal;
6651 if (ISD::isConstantSplatVector(Ops[1].getNode(), RHSVal)) {
6652 APInt NewStep = Opcode == ISD::MUL
6653 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
6654 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
6655 return getStepVector(DL, VT, NewStep);
6656 }
6657 }
6658
6659 auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
6660 return !Op.getValueType().isVector() ||
6661 Op.getValueType().getVectorElementCount() == NumElts;
6662 };
6663
6664 auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
6665 return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
6666 Op.getOpcode() == ISD::BUILD_VECTOR ||
6667 Op.getOpcode() == ISD::SPLAT_VECTOR;
6668 };
6669
6670 // All operands must be vector types with the same number of elements as
6671 // the result type and must be either UNDEF or a build/splat vector
6672 // or UNDEF scalars.
6673 if (!llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
6674 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
6675 return SDValue();
6676
6677 // If we are comparing vectors, then the result needs to be a i1 boolean that
6678 // is then extended back to the legal result type depending on how booleans
6679 // are represented.
6680 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
6681 ISD::NodeType ExtendCode =
6682 (Opcode == ISD::SETCC && SVT != VT.getScalarType())
6683 ? TargetLowering::getExtendForContent(TLI->getBooleanContents(VT))
6684 : ISD::SIGN_EXTEND;
6685
6686 // Find legal integer scalar type for constant promotion and
6687 // ensure that its scalar size is at least as large as source.
6688 EVT LegalSVT = VT.getScalarType();
6689 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
6690 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
6691 if (LegalSVT.bitsLT(VT.getScalarType()))
6692 return SDValue();
6693 }
6694
6695 // For scalable vector types we know we're dealing with SPLAT_VECTORs. We
6696 // only have one operand to check. For fixed-length vector types we may have
6697 // a combination of BUILD_VECTOR and SPLAT_VECTOR.
6698 unsigned NumVectorElts = NumElts.isScalable() ? 1 : NumElts.getFixedValue();
6699
6700 // Constant fold each scalar lane separately.
6701 SmallVector<SDValue, 4> ScalarResults;
6702 for (unsigned I = 0; I != NumVectorElts; I++) {
6703 SmallVector<SDValue, 4> ScalarOps;
6704 for (SDValue Op : Ops) {
6705 EVT InSVT = Op.getValueType().getScalarType();
6706 if (Op.getOpcode() != ISD::BUILD_VECTOR &&
6707 Op.getOpcode() != ISD::SPLAT_VECTOR) {
6708 if (Op.isUndef())
6709 ScalarOps.push_back(getUNDEF(InSVT));
6710 else
6711 ScalarOps.push_back(Op);
6712 continue;
6713 }
6714
6715 SDValue ScalarOp =
6716 Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I);
6717 EVT ScalarVT = ScalarOp.getValueType();
6718
6719 // Build vector (integer) scalar operands may need implicit
6720 // truncation - do this before constant folding.
6721 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) {
6722 // Don't create illegally-typed nodes unless they're constants or undef
6723 // - if we fail to constant fold we can't guarantee the (dead) nodes
6724 // we're creating will be cleaned up before being visited for
6725 // legalization.
6726 if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndef() &&
6727 !isa<ConstantSDNode>(ScalarOp) &&
6728 TLI->getTypeAction(*getContext(), InSVT) !=
6729 TargetLowering::TypeLegal)
6730 return SDValue();
6731 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
6732 }
6733
6734 ScalarOps.push_back(ScalarOp);
6735 }
6736
6737 // Constant fold the scalar operands.
6738 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
6739
6740 // Legalize the (integer) scalar constant if necessary.
6741 if (LegalSVT != SVT)
6742 ScalarResult = getNode(ExtendCode, DL, LegalSVT, ScalarResult);
6743
6744 // Scalar folding only succeeded if the result is a constant or UNDEF.
6745 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
6746 ScalarResult.getOpcode() != ISD::ConstantFP)
6747 return SDValue();
6748 ScalarResults.push_back(ScalarResult);
6749 }
6750
6751 SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0])
6752 : getBuildVector(VT, DL, ScalarResults);
6753 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
6754 return V;
6755 }
6756
foldConstantFPMath(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)6757 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
6758 EVT VT, ArrayRef<SDValue> Ops) {
6759 // TODO: Add support for unary/ternary fp opcodes.
6760 if (Ops.size() != 2)
6761 return SDValue();
6762
6763 // TODO: We don't do any constant folding for strict FP opcodes here, but we
6764 // should. That will require dealing with a potentially non-default
6765 // rounding mode, checking the "opStatus" return value from the APFloat
6766 // math calculations, and possibly other variations.
6767 SDValue N1 = Ops[0];
6768 SDValue N2 = Ops[1];
6769 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, /*AllowUndefs*/ false);
6770 ConstantFPSDNode *N2CFP = isConstOrConstSplatFP(N2, /*AllowUndefs*/ false);
6771 if (N1CFP && N2CFP) {
6772 APFloat C1 = N1CFP->getValueAPF(); // make copy
6773 const APFloat &C2 = N2CFP->getValueAPF();
6774 switch (Opcode) {
6775 case ISD::FADD:
6776 C1.add(C2, APFloat::rmNearestTiesToEven);
6777 return getConstantFP(C1, DL, VT);
6778 case ISD::FSUB:
6779 C1.subtract(C2, APFloat::rmNearestTiesToEven);
6780 return getConstantFP(C1, DL, VT);
6781 case ISD::FMUL:
6782 C1.multiply(C2, APFloat::rmNearestTiesToEven);
6783 return getConstantFP(C1, DL, VT);
6784 case ISD::FDIV:
6785 C1.divide(C2, APFloat::rmNearestTiesToEven);
6786 return getConstantFP(C1, DL, VT);
6787 case ISD::FREM:
6788 C1.mod(C2);
6789 return getConstantFP(C1, DL, VT);
6790 case ISD::FCOPYSIGN:
6791 C1.copySign(C2);
6792 return getConstantFP(C1, DL, VT);
6793 case ISD::FMINNUM:
6794 return getConstantFP(minnum(C1, C2), DL, VT);
6795 case ISD::FMAXNUM:
6796 return getConstantFP(maxnum(C1, C2), DL, VT);
6797 case ISD::FMINIMUM:
6798 return getConstantFP(minimum(C1, C2), DL, VT);
6799 case ISD::FMAXIMUM:
6800 return getConstantFP(maximum(C1, C2), DL, VT);
6801 default: break;
6802 }
6803 }
6804 if (N1CFP && Opcode == ISD::FP_ROUND) {
6805 APFloat C1 = N1CFP->getValueAPF(); // make copy
6806 bool Unused;
6807 // This can return overflow, underflow, or inexact; we don't care.
6808 // FIXME need to be more flexible about rounding mode.
6809 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
6810 &Unused);
6811 return getConstantFP(C1, DL, VT);
6812 }
6813
6814 switch (Opcode) {
6815 case ISD::FSUB:
6816 // -0.0 - undef --> undef (consistent with "fneg undef")
6817 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
6818 if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
6819 return getUNDEF(VT);
6820 [[fallthrough]];
6821
6822 case ISD::FADD:
6823 case ISD::FMUL:
6824 case ISD::FDIV:
6825 case ISD::FREM:
6826 // If both operands are undef, the result is undef. If 1 operand is undef,
6827 // the result is NaN. This should match the behavior of the IR optimizer.
6828 if (N1.isUndef() && N2.isUndef())
6829 return getUNDEF(VT);
6830 if (N1.isUndef() || N2.isUndef())
6831 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
6832 }
6833 return SDValue();
6834 }
6835
getAssertAlign(const SDLoc & DL,SDValue Val,Align A)6836 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
6837 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
6838
6839 // There's no need to assert on a byte-aligned pointer. All pointers are at
6840 // least byte aligned.
6841 if (A == Align(1))
6842 return Val;
6843
6844 SDVTList VTs = getVTList(Val.getValueType());
6845 FoldingSetNodeID ID;
6846 AddNodeIDNode(ID, ISD::AssertAlign, VTs, {Val});
6847 ID.AddInteger(A.value());
6848
6849 void *IP = nullptr;
6850 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
6851 return SDValue(E, 0);
6852
6853 auto *N =
6854 newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs, A);
6855 createOperands(N, {Val});
6856
6857 CSEMap.InsertNode(N, IP);
6858 InsertNode(N);
6859
6860 SDValue V(N, 0);
6861 NewSDValueDbgMsg(V, "Creating new node: ", this);
6862 return V;
6863 }
6864
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)6865 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6866 SDValue N1, SDValue N2) {
6867 SDNodeFlags Flags;
6868 if (Inserter)
6869 Flags = Inserter->getFlags();
6870 return getNode(Opcode, DL, VT, N1, N2, Flags);
6871 }
6872
canonicalizeCommutativeBinop(unsigned Opcode,SDValue & N1,SDValue & N2) const6873 void SelectionDAG::canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1,
6874 SDValue &N2) const {
6875 if (!TLI->isCommutativeBinOp(Opcode))
6876 return;
6877
6878 // Canonicalize:
6879 // binop(const, nonconst) -> binop(nonconst, const)
6880 SDNode *N1C = isConstantIntBuildVectorOrConstantInt(N1);
6881 SDNode *N2C = isConstantIntBuildVectorOrConstantInt(N2);
6882 SDNode *N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
6883 SDNode *N2CFP = isConstantFPBuildVectorOrConstantFP(N2);
6884 if ((N1C && !N2C) || (N1CFP && !N2CFP))
6885 std::swap(N1, N2);
6886
6887 // Canonicalize:
6888 // binop(splat(x), step_vector) -> binop(step_vector, splat(x))
6889 else if (N1.getOpcode() == ISD::SPLAT_VECTOR &&
6890 N2.getOpcode() == ISD::STEP_VECTOR)
6891 std::swap(N1, N2);
6892 }
6893
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags Flags)6894 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6895 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
6896 assert(N1.getOpcode() != ISD::DELETED_NODE &&
6897 N2.getOpcode() != ISD::DELETED_NODE &&
6898 "Operand is DELETED_NODE!");
6899
6900 canonicalizeCommutativeBinop(Opcode, N1, N2);
6901
6902 auto *N1C = dyn_cast<ConstantSDNode>(N1);
6903 auto *N2C = dyn_cast<ConstantSDNode>(N2);
6904
6905 // Don't allow undefs in vector splats - we might be returning N2 when folding
6906 // to zero etc.
6907 ConstantSDNode *N2CV =
6908 isConstOrConstSplat(N2, /*AllowUndefs*/ false, /*AllowTruncation*/ true);
6909
6910 switch (Opcode) {
6911 default: break;
6912 case ISD::TokenFactor:
6913 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
6914 N2.getValueType() == MVT::Other && "Invalid token factor!");
6915 // Fold trivial token factors.
6916 if (N1.getOpcode() == ISD::EntryToken) return N2;
6917 if (N2.getOpcode() == ISD::EntryToken) return N1;
6918 if (N1 == N2) return N1;
6919 break;
6920 case ISD::BUILD_VECTOR: {
6921 // Attempt to simplify BUILD_VECTOR.
6922 SDValue Ops[] = {N1, N2};
6923 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
6924 return V;
6925 break;
6926 }
6927 case ISD::CONCAT_VECTORS: {
6928 SDValue Ops[] = {N1, N2};
6929 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
6930 return V;
6931 break;
6932 }
6933 case ISD::AND:
6934 assert(VT.isInteger() && "This operator does not apply to FP types!");
6935 assert(N1.getValueType() == N2.getValueType() &&
6936 N1.getValueType() == VT && "Binary operator types must match!");
6937 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
6938 // worth handling here.
6939 if (N2CV && N2CV->isZero())
6940 return N2;
6941 if (N2CV && N2CV->isAllOnes()) // X & -1 -> X
6942 return N1;
6943 break;
6944 case ISD::OR:
6945 case ISD::XOR:
6946 case ISD::ADD:
6947 case ISD::SUB:
6948 assert(VT.isInteger() && "This operator does not apply to FP types!");
6949 assert(N1.getValueType() == N2.getValueType() &&
6950 N1.getValueType() == VT && "Binary operator types must match!");
6951 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
6952 // it's worth handling here.
6953 if (N2CV && N2CV->isZero())
6954 return N1;
6955 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() &&
6956 VT.getVectorElementType() == MVT::i1)
6957 return getNode(ISD::XOR, DL, VT, N1, N2);
6958 break;
6959 case ISD::MUL:
6960 assert(VT.isInteger() && "This operator does not apply to FP types!");
6961 assert(N1.getValueType() == N2.getValueType() &&
6962 N1.getValueType() == VT && "Binary operator types must match!");
6963 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
6964 return getNode(ISD::AND, DL, VT, N1, N2);
6965 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
6966 const APInt &MulImm = N1->getConstantOperandAPInt(0);
6967 const APInt &N2CImm = N2C->getAPIntValue();
6968 return getVScale(DL, VT, MulImm * N2CImm);
6969 }
6970 break;
6971 case ISD::UDIV:
6972 case ISD::UREM:
6973 case ISD::MULHU:
6974 case ISD::MULHS:
6975 case ISD::SDIV:
6976 case ISD::SREM:
6977 case ISD::SADDSAT:
6978 case ISD::SSUBSAT:
6979 case ISD::UADDSAT:
6980 case ISD::USUBSAT:
6981 assert(VT.isInteger() && "This operator does not apply to FP types!");
6982 assert(N1.getValueType() == N2.getValueType() &&
6983 N1.getValueType() == VT && "Binary operator types must match!");
6984 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
6985 // fold (add_sat x, y) -> (or x, y) for bool types.
6986 if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT)
6987 return getNode(ISD::OR, DL, VT, N1, N2);
6988 // fold (sub_sat x, y) -> (and x, ~y) for bool types.
6989 if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT)
6990 return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT));
6991 }
6992 break;
6993 case ISD::SCMP:
6994 case ISD::UCMP:
6995 assert(N1.getValueType() == N2.getValueType() &&
6996 "Types of operands of UCMP/SCMP must match");
6997 assert(N1.getValueType().isVector() == VT.isVector() &&
6998 "Operands and return type of must both be scalars or vectors");
6999 if (VT.isVector())
7000 assert(VT.getVectorElementCount() ==
7001 N1.getValueType().getVectorElementCount() &&
7002 "Result and operands must have the same number of elements");
7003 break;
7004 case ISD::AVGFLOORS:
7005 case ISD::AVGFLOORU:
7006 case ISD::AVGCEILS:
7007 case ISD::AVGCEILU:
7008 assert(VT.isInteger() && "This operator does not apply to FP types!");
7009 assert(N1.getValueType() == N2.getValueType() &&
7010 N1.getValueType() == VT && "Binary operator types must match!");
7011 break;
7012 case ISD::ABDS:
7013 case ISD::ABDU:
7014 assert(VT.isInteger() && "This operator does not apply to FP types!");
7015 assert(N1.getValueType() == N2.getValueType() &&
7016 N1.getValueType() == VT && "Binary operator types must match!");
7017 break;
7018 case ISD::SMIN:
7019 case ISD::UMAX:
7020 assert(VT.isInteger() && "This operator does not apply to FP types!");
7021 assert(N1.getValueType() == N2.getValueType() &&
7022 N1.getValueType() == VT && "Binary operator types must match!");
7023 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
7024 return getNode(ISD::OR, DL, VT, N1, N2);
7025 break;
7026 case ISD::SMAX:
7027 case ISD::UMIN:
7028 assert(VT.isInteger() && "This operator does not apply to FP types!");
7029 assert(N1.getValueType() == N2.getValueType() &&
7030 N1.getValueType() == VT && "Binary operator types must match!");
7031 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
7032 return getNode(ISD::AND, DL, VT, N1, N2);
7033 break;
7034 case ISD::FADD:
7035 case ISD::FSUB:
7036 case ISD::FMUL:
7037 case ISD::FDIV:
7038 case ISD::FREM:
7039 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
7040 assert(N1.getValueType() == N2.getValueType() &&
7041 N1.getValueType() == VT && "Binary operator types must match!");
7042 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
7043 return V;
7044 break;
7045 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
7046 assert(N1.getValueType() == VT &&
7047 N1.getValueType().isFloatingPoint() &&
7048 N2.getValueType().isFloatingPoint() &&
7049 "Invalid FCOPYSIGN!");
7050 break;
7051 case ISD::SHL:
7052 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
7053 const APInt &MulImm = N1->getConstantOperandAPInt(0);
7054 const APInt &ShiftImm = N2C->getAPIntValue();
7055 return getVScale(DL, VT, MulImm << ShiftImm);
7056 }
7057 [[fallthrough]];
7058 case ISD::SRA:
7059 case ISD::SRL:
7060 if (SDValue V = simplifyShift(N1, N2))
7061 return V;
7062 [[fallthrough]];
7063 case ISD::ROTL:
7064 case ISD::ROTR:
7065 assert(VT == N1.getValueType() &&
7066 "Shift operators return type must be the same as their first arg");
7067 assert(VT.isInteger() && N2.getValueType().isInteger() &&
7068 "Shifts only work on integers");
7069 assert((!VT.isVector() || VT == N2.getValueType()) &&
7070 "Vector shift amounts must be in the same as their first arg");
7071 // Verify that the shift amount VT is big enough to hold valid shift
7072 // amounts. This catches things like trying to shift an i1024 value by an
7073 // i8, which is easy to fall into in generic code that uses
7074 // TLI.getShiftAmount().
7075 assert(N2.getValueType().getScalarSizeInBits() >=
7076 Log2_32_Ceil(VT.getScalarSizeInBits()) &&
7077 "Invalid use of small shift amount with oversized value!");
7078
7079 // Always fold shifts of i1 values so the code generator doesn't need to
7080 // handle them. Since we know the size of the shift has to be less than the
7081 // size of the value, the shift/rotate count is guaranteed to be zero.
7082 if (VT == MVT::i1)
7083 return N1;
7084 if (N2CV && N2CV->isZero())
7085 return N1;
7086 break;
7087 case ISD::FP_ROUND:
7088 assert(VT.isFloatingPoint() &&
7089 N1.getValueType().isFloatingPoint() &&
7090 VT.bitsLE(N1.getValueType()) &&
7091 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
7092 "Invalid FP_ROUND!");
7093 if (N1.getValueType() == VT) return N1; // noop conversion.
7094 break;
7095 case ISD::AssertSext:
7096 case ISD::AssertZext: {
7097 EVT EVT = cast<VTSDNode>(N2)->getVT();
7098 assert(VT == N1.getValueType() && "Not an inreg extend!");
7099 assert(VT.isInteger() && EVT.isInteger() &&
7100 "Cannot *_EXTEND_INREG FP types");
7101 assert(!EVT.isVector() &&
7102 "AssertSExt/AssertZExt type should be the vector element type "
7103 "rather than the vector type!");
7104 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
7105 if (VT.getScalarType() == EVT) return N1; // noop assertion.
7106 break;
7107 }
7108 case ISD::SIGN_EXTEND_INREG: {
7109 EVT EVT = cast<VTSDNode>(N2)->getVT();
7110 assert(VT == N1.getValueType() && "Not an inreg extend!");
7111 assert(VT.isInteger() && EVT.isInteger() &&
7112 "Cannot *_EXTEND_INREG FP types");
7113 assert(EVT.isVector() == VT.isVector() &&
7114 "SIGN_EXTEND_INREG type should be vector iff the operand "
7115 "type is vector!");
7116 assert((!EVT.isVector() ||
7117 EVT.getVectorElementCount() == VT.getVectorElementCount()) &&
7118 "Vector element counts must match in SIGN_EXTEND_INREG");
7119 assert(EVT.bitsLE(VT) && "Not extending!");
7120 if (EVT == VT) return N1; // Not actually extending
7121
7122 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
7123 unsigned FromBits = EVT.getScalarSizeInBits();
7124 Val <<= Val.getBitWidth() - FromBits;
7125 Val.ashrInPlace(Val.getBitWidth() - FromBits);
7126 return getConstant(Val, DL, ConstantVT);
7127 };
7128
7129 if (N1C) {
7130 const APInt &Val = N1C->getAPIntValue();
7131 return SignExtendInReg(Val, VT);
7132 }
7133
7134 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
7135 SmallVector<SDValue, 8> Ops;
7136 llvm::EVT OpVT = N1.getOperand(0).getValueType();
7137 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7138 SDValue Op = N1.getOperand(i);
7139 if (Op.isUndef()) {
7140 Ops.push_back(getUNDEF(OpVT));
7141 continue;
7142 }
7143 ConstantSDNode *C = cast<ConstantSDNode>(Op);
7144 APInt Val = C->getAPIntValue();
7145 Ops.push_back(SignExtendInReg(Val, OpVT));
7146 }
7147 return getBuildVector(VT, DL, Ops);
7148 }
7149
7150 if (N1.getOpcode() == ISD::SPLAT_VECTOR &&
7151 isa<ConstantSDNode>(N1.getOperand(0)))
7152 return getNode(
7153 ISD::SPLAT_VECTOR, DL, VT,
7154 SignExtendInReg(N1.getConstantOperandAPInt(0),
7155 N1.getOperand(0).getValueType()));
7156 break;
7157 }
7158 case ISD::FP_TO_SINT_SAT:
7159 case ISD::FP_TO_UINT_SAT: {
7160 assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&
7161 N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT");
7162 assert(N1.getValueType().isVector() == VT.isVector() &&
7163 "FP_TO_*INT_SAT type should be vector iff the operand type is "
7164 "vector!");
7165 assert((!VT.isVector() || VT.getVectorElementCount() ==
7166 N1.getValueType().getVectorElementCount()) &&
7167 "Vector element counts must match in FP_TO_*INT_SAT");
7168 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
7169 "Type to saturate to must be a scalar.");
7170 assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&
7171 "Not extending!");
7172 break;
7173 }
7174 case ISD::EXTRACT_VECTOR_ELT:
7175 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
7176 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
7177 element type of the vector.");
7178
7179 // Extract from an undefined value or using an undefined index is undefined.
7180 if (N1.isUndef() || N2.isUndef())
7181 return getUNDEF(VT);
7182
7183 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
7184 // vectors. For scalable vectors we will provide appropriate support for
7185 // dealing with arbitrary indices.
7186 if (N2C && N1.getValueType().isFixedLengthVector() &&
7187 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
7188 return getUNDEF(VT);
7189
7190 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
7191 // expanding copies of large vectors from registers. This only works for
7192 // fixed length vectors, since we need to know the exact number of
7193 // elements.
7194 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
7195 N1.getOperand(0).getValueType().isFixedLengthVector()) {
7196 unsigned Factor =
7197 N1.getOperand(0).getValueType().getVectorNumElements();
7198 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
7199 N1.getOperand(N2C->getZExtValue() / Factor),
7200 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL));
7201 }
7202
7203 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
7204 // lowering is expanding large vector constants.
7205 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
7206 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
7207 assert((N1.getOpcode() != ISD::BUILD_VECTOR ||
7208 N1.getValueType().isFixedLengthVector()) &&
7209 "BUILD_VECTOR used for scalable vectors");
7210 unsigned Index =
7211 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
7212 SDValue Elt = N1.getOperand(Index);
7213
7214 if (VT != Elt.getValueType())
7215 // If the vector element type is not legal, the BUILD_VECTOR operands
7216 // are promoted and implicitly truncated, and the result implicitly
7217 // extended. Make that explicit here.
7218 Elt = getAnyExtOrTrunc(Elt, DL, VT);
7219
7220 return Elt;
7221 }
7222
7223 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
7224 // operations are lowered to scalars.
7225 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
7226 // If the indices are the same, return the inserted element else
7227 // if the indices are known different, extract the element from
7228 // the original vector.
7229 SDValue N1Op2 = N1.getOperand(2);
7230 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
7231
7232 if (N1Op2C && N2C) {
7233 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
7234 if (VT == N1.getOperand(1).getValueType())
7235 return N1.getOperand(1);
7236 if (VT.isFloatingPoint()) {
7237 assert(VT.getSizeInBits() > N1.getOperand(1).getValueType().getSizeInBits());
7238 return getFPExtendOrRound(N1.getOperand(1), DL, VT);
7239 }
7240 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
7241 }
7242 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
7243 }
7244 }
7245
7246 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
7247 // when vector types are scalarized and v1iX is legal.
7248 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
7249 // Here we are completely ignoring the extract element index (N2),
7250 // which is fine for fixed width vectors, since any index other than 0
7251 // is undefined anyway. However, this cannot be ignored for scalable
7252 // vectors - in theory we could support this, but we don't want to do this
7253 // without a profitability check.
7254 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7255 N1.getValueType().isFixedLengthVector() &&
7256 N1.getValueType().getVectorNumElements() == 1) {
7257 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
7258 N1.getOperand(1));
7259 }
7260 break;
7261 case ISD::EXTRACT_ELEMENT:
7262 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
7263 assert(!N1.getValueType().isVector() && !VT.isVector() &&
7264 (N1.getValueType().isInteger() == VT.isInteger()) &&
7265 N1.getValueType() != VT &&
7266 "Wrong types for EXTRACT_ELEMENT!");
7267
7268 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
7269 // 64-bit integers into 32-bit parts. Instead of building the extract of
7270 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
7271 if (N1.getOpcode() == ISD::BUILD_PAIR)
7272 return N1.getOperand(N2C->getZExtValue());
7273
7274 // EXTRACT_ELEMENT of a constant int is also very common.
7275 if (N1C) {
7276 unsigned ElementSize = VT.getSizeInBits();
7277 unsigned Shift = ElementSize * N2C->getZExtValue();
7278 const APInt &Val = N1C->getAPIntValue();
7279 return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
7280 }
7281 break;
7282 case ISD::EXTRACT_SUBVECTOR: {
7283 EVT N1VT = N1.getValueType();
7284 assert(VT.isVector() && N1VT.isVector() &&
7285 "Extract subvector VTs must be vectors!");
7286 assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&
7287 "Extract subvector VTs must have the same element type!");
7288 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
7289 "Cannot extract a scalable vector from a fixed length vector!");
7290 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
7291 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&
7292 "Extract subvector must be from larger vector to smaller vector!");
7293 assert(N2C && "Extract subvector index must be a constant");
7294 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
7295 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
7296 N1VT.getVectorMinNumElements()) &&
7297 "Extract subvector overflow!");
7298 assert(N2C->getAPIntValue().getBitWidth() ==
7299 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
7300 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
7301
7302 // Trivial extraction.
7303 if (VT == N1VT)
7304 return N1;
7305
7306 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
7307 if (N1.isUndef())
7308 return getUNDEF(VT);
7309
7310 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
7311 // the concat have the same type as the extract.
7312 if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
7313 VT == N1.getOperand(0).getValueType()) {
7314 unsigned Factor = VT.getVectorMinNumElements();
7315 return N1.getOperand(N2C->getZExtValue() / Factor);
7316 }
7317
7318 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
7319 // during shuffle legalization.
7320 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
7321 VT == N1.getOperand(1).getValueType())
7322 return N1.getOperand(1);
7323 break;
7324 }
7325 }
7326
7327 // Perform trivial constant folding.
7328 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}, Flags))
7329 return SV;
7330
7331 // Canonicalize an UNDEF to the RHS, even over a constant.
7332 if (N1.isUndef()) {
7333 if (TLI->isCommutativeBinOp(Opcode)) {
7334 std::swap(N1, N2);
7335 } else {
7336 switch (Opcode) {
7337 case ISD::SUB:
7338 return getUNDEF(VT); // fold op(undef, arg2) -> undef
7339 case ISD::SIGN_EXTEND_INREG:
7340 case ISD::UDIV:
7341 case ISD::SDIV:
7342 case ISD::UREM:
7343 case ISD::SREM:
7344 case ISD::SSUBSAT:
7345 case ISD::USUBSAT:
7346 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
7347 }
7348 }
7349 }
7350
7351 // Fold a bunch of operators when the RHS is undef.
7352 if (N2.isUndef()) {
7353 switch (Opcode) {
7354 case ISD::XOR:
7355 if (N1.isUndef())
7356 // Handle undef ^ undef -> 0 special case. This is a common
7357 // idiom (misuse).
7358 return getConstant(0, DL, VT);
7359 [[fallthrough]];
7360 case ISD::ADD:
7361 case ISD::SUB:
7362 case ISD::UDIV:
7363 case ISD::SDIV:
7364 case ISD::UREM:
7365 case ISD::SREM:
7366 return getUNDEF(VT); // fold op(arg1, undef) -> undef
7367 case ISD::MUL:
7368 case ISD::AND:
7369 case ISD::SSUBSAT:
7370 case ISD::USUBSAT:
7371 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
7372 case ISD::OR:
7373 case ISD::SADDSAT:
7374 case ISD::UADDSAT:
7375 return getAllOnesConstant(DL, VT);
7376 }
7377 }
7378
7379 // Memoize this node if possible.
7380 SDNode *N;
7381 SDVTList VTs = getVTList(VT);
7382 SDValue Ops[] = {N1, N2};
7383 if (VT != MVT::Glue) {
7384 FoldingSetNodeID ID;
7385 AddNodeIDNode(ID, Opcode, VTs, Ops);
7386 void *IP = nullptr;
7387 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
7388 E->intersectFlagsWith(Flags);
7389 return SDValue(E, 0);
7390 }
7391
7392 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7393 N->setFlags(Flags);
7394 createOperands(N, Ops);
7395 CSEMap.InsertNode(N, IP);
7396 } else {
7397 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7398 createOperands(N, Ops);
7399 }
7400
7401 InsertNode(N);
7402 SDValue V = SDValue(N, 0);
7403 NewSDValueDbgMsg(V, "Creating new node: ", this);
7404 return V;
7405 }
7406
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3)7407 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7408 SDValue N1, SDValue N2, SDValue N3) {
7409 SDNodeFlags Flags;
7410 if (Inserter)
7411 Flags = Inserter->getFlags();
7412 return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
7413 }
7414
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,const SDNodeFlags Flags)7415 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7416 SDValue N1, SDValue N2, SDValue N3,
7417 const SDNodeFlags Flags) {
7418 assert(N1.getOpcode() != ISD::DELETED_NODE &&
7419 N2.getOpcode() != ISD::DELETED_NODE &&
7420 N3.getOpcode() != ISD::DELETED_NODE &&
7421 "Operand is DELETED_NODE!");
7422 // Perform various simplifications.
7423 switch (Opcode) {
7424 case ISD::FMA:
7425 case ISD::FMAD: {
7426 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
7427 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
7428 N3.getValueType() == VT && "FMA types must match!");
7429 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
7430 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
7431 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
7432 if (N1CFP && N2CFP && N3CFP) {
7433 APFloat V1 = N1CFP->getValueAPF();
7434 const APFloat &V2 = N2CFP->getValueAPF();
7435 const APFloat &V3 = N3CFP->getValueAPF();
7436 if (Opcode == ISD::FMAD) {
7437 V1.multiply(V2, APFloat::rmNearestTiesToEven);
7438 V1.add(V3, APFloat::rmNearestTiesToEven);
7439 } else
7440 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
7441 return getConstantFP(V1, DL, VT);
7442 }
7443 break;
7444 }
7445 case ISD::BUILD_VECTOR: {
7446 // Attempt to simplify BUILD_VECTOR.
7447 SDValue Ops[] = {N1, N2, N3};
7448 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7449 return V;
7450 break;
7451 }
7452 case ISD::CONCAT_VECTORS: {
7453 SDValue Ops[] = {N1, N2, N3};
7454 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
7455 return V;
7456 break;
7457 }
7458 case ISD::SETCC: {
7459 assert(VT.isInteger() && "SETCC result type must be an integer!");
7460 assert(N1.getValueType() == N2.getValueType() &&
7461 "SETCC operands must have the same type!");
7462 assert(VT.isVector() == N1.getValueType().isVector() &&
7463 "SETCC type should be vector iff the operand type is vector!");
7464 assert((!VT.isVector() || VT.getVectorElementCount() ==
7465 N1.getValueType().getVectorElementCount()) &&
7466 "SETCC vector element counts must match!");
7467 // Use FoldSetCC to simplify SETCC's.
7468 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
7469 return V;
7470 // Vector constant folding.
7471 SDValue Ops[] = {N1, N2, N3};
7472 if (SDValue V = FoldConstantArithmetic(Opcode, DL, VT, Ops)) {
7473 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
7474 return V;
7475 }
7476 break;
7477 }
7478 case ISD::SELECT:
7479 case ISD::VSELECT:
7480 if (SDValue V = simplifySelect(N1, N2, N3))
7481 return V;
7482 break;
7483 case ISD::VECTOR_SHUFFLE:
7484 llvm_unreachable("should use getVectorShuffle constructor!");
7485 case ISD::VECTOR_SPLICE: {
7486 if (cast<ConstantSDNode>(N3)->isZero())
7487 return N1;
7488 break;
7489 }
7490 case ISD::INSERT_VECTOR_ELT: {
7491 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
7492 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
7493 // for scalable vectors where we will generate appropriate code to
7494 // deal with out-of-bounds cases correctly.
7495 if (N3C && N1.getValueType().isFixedLengthVector() &&
7496 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
7497 return getUNDEF(VT);
7498
7499 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
7500 if (N3.isUndef())
7501 return getUNDEF(VT);
7502
7503 // If the inserted element is an UNDEF, just use the input vector.
7504 if (N2.isUndef())
7505 return N1;
7506
7507 break;
7508 }
7509 case ISD::INSERT_SUBVECTOR: {
7510 // Inserting undef into undef is still undef.
7511 if (N1.isUndef() && N2.isUndef())
7512 return getUNDEF(VT);
7513
7514 EVT N2VT = N2.getValueType();
7515 assert(VT == N1.getValueType() &&
7516 "Dest and insert subvector source types must match!");
7517 assert(VT.isVector() && N2VT.isVector() &&
7518 "Insert subvector VTs must be vectors!");
7519 assert(VT.getVectorElementType() == N2VT.getVectorElementType() &&
7520 "Insert subvector VTs must have the same element type!");
7521 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
7522 "Cannot insert a scalable vector into a fixed length vector!");
7523 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
7524 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) &&
7525 "Insert subvector must be from smaller vector to larger vector!");
7526 assert(isa<ConstantSDNode>(N3) &&
7527 "Insert subvector index must be constant");
7528 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
7529 (N2VT.getVectorMinNumElements() + N3->getAsZExtVal()) <=
7530 VT.getVectorMinNumElements()) &&
7531 "Insert subvector overflow!");
7532 assert(N3->getAsAPIntVal().getBitWidth() ==
7533 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
7534 "Constant index for INSERT_SUBVECTOR has an invalid size");
7535
7536 // Trivial insertion.
7537 if (VT == N2VT)
7538 return N2;
7539
7540 // If this is an insert of an extracted vector into an undef vector, we
7541 // can just use the input to the extract.
7542 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7543 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
7544 return N2.getOperand(0);
7545 break;
7546 }
7547 case ISD::BITCAST:
7548 // Fold bit_convert nodes from a type to themselves.
7549 if (N1.getValueType() == VT)
7550 return N1;
7551 break;
7552 case ISD::VP_TRUNCATE:
7553 case ISD::VP_SIGN_EXTEND:
7554 case ISD::VP_ZERO_EXTEND:
7555 // Don't create noop casts.
7556 if (N1.getValueType() == VT)
7557 return N1;
7558 break;
7559 case ISD::VECTOR_COMPRESS: {
7560 [[maybe_unused]] EVT VecVT = N1.getValueType();
7561 [[maybe_unused]] EVT MaskVT = N2.getValueType();
7562 [[maybe_unused]] EVT PassthruVT = N3.getValueType();
7563 assert(VT == VecVT && "Vector and result type don't match.");
7564 assert(VecVT.isVector() && MaskVT.isVector() && PassthruVT.isVector() &&
7565 "All inputs must be vectors.");
7566 assert(VecVT == PassthruVT && "Vector and passthru types don't match.");
7567 assert(VecVT.getVectorElementCount() == MaskVT.getVectorElementCount() &&
7568 "Vector and mask must have same number of elements.");
7569
7570 if (N1.isUndef() || N2.isUndef())
7571 return N3;
7572
7573 break;
7574 }
7575 }
7576
7577 // Memoize node if it doesn't produce a glue result.
7578 SDNode *N;
7579 SDVTList VTs = getVTList(VT);
7580 SDValue Ops[] = {N1, N2, N3};
7581 if (VT != MVT::Glue) {
7582 FoldingSetNodeID ID;
7583 AddNodeIDNode(ID, Opcode, VTs, Ops);
7584 void *IP = nullptr;
7585 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
7586 E->intersectFlagsWith(Flags);
7587 return SDValue(E, 0);
7588 }
7589
7590 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7591 N->setFlags(Flags);
7592 createOperands(N, Ops);
7593 CSEMap.InsertNode(N, IP);
7594 } else {
7595 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7596 createOperands(N, Ops);
7597 }
7598
7599 InsertNode(N);
7600 SDValue V = SDValue(N, 0);
7601 NewSDValueDbgMsg(V, "Creating new node: ", this);
7602 return V;
7603 }
7604
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)7605 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7606 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
7607 SDValue Ops[] = { N1, N2, N3, N4 };
7608 return getNode(Opcode, DL, VT, Ops);
7609 }
7610
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)7611 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7612 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
7613 SDValue N5) {
7614 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7615 return getNode(Opcode, DL, VT, Ops);
7616 }
7617
7618 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
7619 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)7620 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
7621 SmallVector<SDValue, 8> ArgChains;
7622
7623 // Include the original chain at the beginning of the list. When this is
7624 // used by target LowerCall hooks, this helps legalize find the
7625 // CALLSEQ_BEGIN node.
7626 ArgChains.push_back(Chain);
7627
7628 // Add a chain value for each stack argument.
7629 for (SDNode *U : getEntryNode().getNode()->uses())
7630 if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
7631 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
7632 if (FI->getIndex() < 0)
7633 ArgChains.push_back(SDValue(L, 1));
7634
7635 // Build a tokenfactor for all the chains.
7636 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
7637 }
7638
7639 /// getMemsetValue - Vectorized representation of the memset value
7640 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,const SDLoc & dl)7641 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
7642 const SDLoc &dl) {
7643 assert(!Value.isUndef());
7644
7645 unsigned NumBits = VT.getScalarSizeInBits();
7646 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
7647 assert(C->getAPIntValue().getBitWidth() == 8);
7648 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
7649 if (VT.isInteger()) {
7650 bool IsOpaque = VT.getSizeInBits() > 64 ||
7651 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
7652 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
7653 }
7654 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
7655 VT);
7656 }
7657
7658 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
7659 EVT IntVT = VT.getScalarType();
7660 if (!IntVT.isInteger())
7661 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
7662
7663 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
7664 if (NumBits > 8) {
7665 // Use a multiplication with 0x010101... to extend the input to the
7666 // required length.
7667 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
7668 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
7669 DAG.getConstant(Magic, dl, IntVT));
7670 }
7671
7672 if (VT != Value.getValueType() && !VT.isInteger())
7673 Value = DAG.getBitcast(VT.getScalarType(), Value);
7674 if (VT != Value.getValueType())
7675 Value = DAG.getSplatBuildVector(VT, dl, Value);
7676
7677 return Value;
7678 }
7679
7680 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
7681 /// used when a memcpy is turned into a memset when the source is a constant
7682 /// string ptr.
getMemsetStringVal(EVT VT,const SDLoc & dl,SelectionDAG & DAG,const TargetLowering & TLI,const ConstantDataArraySlice & Slice)7683 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
7684 const TargetLowering &TLI,
7685 const ConstantDataArraySlice &Slice) {
7686 // Handle vector with all elements zero.
7687 if (Slice.Array == nullptr) {
7688 if (VT.isInteger())
7689 return DAG.getConstant(0, dl, VT);
7690 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
7691 return DAG.getConstantFP(0.0, dl, VT);
7692 if (VT.isVector()) {
7693 unsigned NumElts = VT.getVectorNumElements();
7694 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
7695 return DAG.getNode(ISD::BITCAST, dl, VT,
7696 DAG.getConstant(0, dl,
7697 EVT::getVectorVT(*DAG.getContext(),
7698 EltVT, NumElts)));
7699 }
7700 llvm_unreachable("Expected type!");
7701 }
7702
7703 assert(!VT.isVector() && "Can't handle vector type here!");
7704 unsigned NumVTBits = VT.getSizeInBits();
7705 unsigned NumVTBytes = NumVTBits / 8;
7706 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
7707
7708 APInt Val(NumVTBits, 0);
7709 if (DAG.getDataLayout().isLittleEndian()) {
7710 for (unsigned i = 0; i != NumBytes; ++i)
7711 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
7712 } else {
7713 for (unsigned i = 0; i != NumBytes; ++i)
7714 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
7715 }
7716
7717 // If the "cost" of materializing the integer immediate is less than the cost
7718 // of a load, then it is cost effective to turn the load into the immediate.
7719 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
7720 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
7721 return DAG.getConstant(Val, dl, VT);
7722 return SDValue();
7723 }
7724
getMemBasePlusOffset(SDValue Base,TypeSize Offset,const SDLoc & DL,const SDNodeFlags Flags)7725 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
7726 const SDLoc &DL,
7727 const SDNodeFlags Flags) {
7728 EVT VT = Base.getValueType();
7729 SDValue Index;
7730
7731 if (Offset.isScalable())
7732 Index = getVScale(DL, Base.getValueType(),
7733 APInt(Base.getValueSizeInBits().getFixedValue(),
7734 Offset.getKnownMinValue()));
7735 else
7736 Index = getConstant(Offset.getFixedValue(), DL, VT);
7737
7738 return getMemBasePlusOffset(Base, Index, DL, Flags);
7739 }
7740
getMemBasePlusOffset(SDValue Ptr,SDValue Offset,const SDLoc & DL,const SDNodeFlags Flags)7741 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
7742 const SDLoc &DL,
7743 const SDNodeFlags Flags) {
7744 assert(Offset.getValueType().isInteger());
7745 EVT BasePtrVT = Ptr.getValueType();
7746 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
7747 }
7748
7749 /// Returns true if memcpy source is constant data.
isMemSrcFromConstant(SDValue Src,ConstantDataArraySlice & Slice)7750 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
7751 uint64_t SrcDelta = 0;
7752 GlobalAddressSDNode *G = nullptr;
7753 if (Src.getOpcode() == ISD::GlobalAddress)
7754 G = cast<GlobalAddressSDNode>(Src);
7755 else if (Src.getOpcode() == ISD::ADD &&
7756 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
7757 Src.getOperand(1).getOpcode() == ISD::Constant) {
7758 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
7759 SrcDelta = Src.getConstantOperandVal(1);
7760 }
7761 if (!G)
7762 return false;
7763
7764 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
7765 SrcDelta + G->getOffset());
7766 }
7767
shouldLowerMemFuncForSize(const MachineFunction & MF,SelectionDAG & DAG)7768 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
7769 SelectionDAG &DAG) {
7770 // On Darwin, -Os means optimize for size without hurting performance, so
7771 // only really optimize for size when -Oz (MinSize) is used.
7772 if (MF.getTarget().getTargetTriple().isOSDarwin())
7773 return MF.getFunction().hasMinSize();
7774 return DAG.shouldOptForSize();
7775 }
7776
chainLoadsAndStoresForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SmallVector<SDValue,32> & OutChains,unsigned From,unsigned To,SmallVector<SDValue,16> & OutLoadChains,SmallVector<SDValue,16> & OutStoreChains)7777 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
7778 SmallVector<SDValue, 32> &OutChains, unsigned From,
7779 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
7780 SmallVector<SDValue, 16> &OutStoreChains) {
7781 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
7782 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
7783 SmallVector<SDValue, 16> GluedLoadChains;
7784 for (unsigned i = From; i < To; ++i) {
7785 OutChains.push_back(OutLoadChains[i]);
7786 GluedLoadChains.push_back(OutLoadChains[i]);
7787 }
7788
7789 // Chain for all loads.
7790 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7791 GluedLoadChains);
7792
7793 for (unsigned i = From; i < To; ++i) {
7794 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
7795 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
7796 ST->getBasePtr(), ST->getMemoryVT(),
7797 ST->getMemOperand());
7798 OutChains.push_back(NewStore);
7799 }
7800 }
7801
getMemcpyLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo,AAResults * AA)7802 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
7803 SDValue Chain, SDValue Dst, SDValue Src,
7804 uint64_t Size, Align Alignment,
7805 bool isVol, bool AlwaysInline,
7806 MachinePointerInfo DstPtrInfo,
7807 MachinePointerInfo SrcPtrInfo,
7808 const AAMDNodes &AAInfo, AAResults *AA) {
7809 // Turn a memcpy of undef to nop.
7810 // FIXME: We need to honor volatile even is Src is undef.
7811 if (Src.isUndef())
7812 return Chain;
7813
7814 // Expand memcpy to a series of load and store ops if the size operand falls
7815 // below a certain threshold.
7816 // TODO: In the AlwaysInline case, if the size is big then generate a loop
7817 // rather than maybe a humongous number of loads and stores.
7818 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7819 const DataLayout &DL = DAG.getDataLayout();
7820 LLVMContext &C = *DAG.getContext();
7821 std::vector<EVT> MemOps;
7822 bool DstAlignCanChange = false;
7823 MachineFunction &MF = DAG.getMachineFunction();
7824 MachineFrameInfo &MFI = MF.getFrameInfo();
7825 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
7826 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
7827 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
7828 DstAlignCanChange = true;
7829 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
7830 if (!SrcAlign || Alignment > *SrcAlign)
7831 SrcAlign = Alignment;
7832 assert(SrcAlign && "SrcAlign must be set");
7833 ConstantDataArraySlice Slice;
7834 // If marked as volatile, perform a copy even when marked as constant.
7835 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
7836 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
7837 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
7838 const MemOp Op = isZeroConstant
7839 ? MemOp::Set(Size, DstAlignCanChange, Alignment,
7840 /*IsZeroMemset*/ true, isVol)
7841 : MemOp::Copy(Size, DstAlignCanChange, Alignment,
7842 *SrcAlign, isVol, CopyFromConstant);
7843 if (!TLI.findOptimalMemOpLowering(
7844 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
7845 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
7846 return SDValue();
7847
7848 if (DstAlignCanChange) {
7849 Type *Ty = MemOps[0].getTypeForEVT(C);
7850 Align NewAlign = DL.getABITypeAlign(Ty);
7851
7852 // Don't promote to an alignment that would require dynamic stack
7853 // realignment which may conflict with optimizations such as tail call
7854 // optimization.
7855 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
7856 if (!TRI->hasStackRealignment(MF))
7857 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
7858 NewAlign = NewAlign.previous();
7859
7860 if (NewAlign > Alignment) {
7861 // Give the stack frame object a larger alignment if needed.
7862 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
7863 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
7864 Alignment = NewAlign;
7865 }
7866 }
7867
7868 // Prepare AAInfo for loads/stores after lowering this memcpy.
7869 AAMDNodes NewAAInfo = AAInfo;
7870 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
7871
7872 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.V);
7873 bool isConstant =
7874 AA && SrcVal &&
7875 AA->pointsToConstantMemory(MemoryLocation(SrcVal, Size, AAInfo));
7876
7877 MachineMemOperand::Flags MMOFlags =
7878 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
7879 SmallVector<SDValue, 16> OutLoadChains;
7880 SmallVector<SDValue, 16> OutStoreChains;
7881 SmallVector<SDValue, 32> OutChains;
7882 unsigned NumMemOps = MemOps.size();
7883 uint64_t SrcOff = 0, DstOff = 0;
7884 for (unsigned i = 0; i != NumMemOps; ++i) {
7885 EVT VT = MemOps[i];
7886 unsigned VTSize = VT.getSizeInBits() / 8;
7887 SDValue Value, Store;
7888
7889 if (VTSize > Size) {
7890 // Issuing an unaligned load / store pair that overlaps with the previous
7891 // pair. Adjust the offset accordingly.
7892 assert(i == NumMemOps-1 && i != 0);
7893 SrcOff -= VTSize - Size;
7894 DstOff -= VTSize - Size;
7895 }
7896
7897 if (CopyFromConstant &&
7898 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
7899 // It's unlikely a store of a vector immediate can be done in a single
7900 // instruction. It would require a load from a constantpool first.
7901 // We only handle zero vectors here.
7902 // FIXME: Handle other cases where store of vector immediate is done in
7903 // a single instruction.
7904 ConstantDataArraySlice SubSlice;
7905 if (SrcOff < Slice.Length) {
7906 SubSlice = Slice;
7907 SubSlice.move(SrcOff);
7908 } else {
7909 // This is an out-of-bounds access and hence UB. Pretend we read zero.
7910 SubSlice.Array = nullptr;
7911 SubSlice.Offset = 0;
7912 SubSlice.Length = VTSize;
7913 }
7914 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
7915 if (Value.getNode()) {
7916 Store = DAG.getStore(
7917 Chain, dl, Value,
7918 DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
7919 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
7920 OutChains.push_back(Store);
7921 }
7922 }
7923
7924 if (!Store.getNode()) {
7925 // The type might not be legal for the target. This should only happen
7926 // if the type is smaller than a legal type, as on PPC, so the right
7927 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
7928 // to Load/Store if NVT==VT.
7929 // FIXME does the case above also need this?
7930 EVT NVT = TLI.getTypeToTransformTo(C, VT);
7931 assert(NVT.bitsGE(VT));
7932
7933 bool isDereferenceable =
7934 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
7935 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
7936 if (isDereferenceable)
7937 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
7938 if (isConstant)
7939 SrcMMOFlags |= MachineMemOperand::MOInvariant;
7940
7941 Value = DAG.getExtLoad(
7942 ISD::EXTLOAD, dl, NVT, Chain,
7943 DAG.getMemBasePlusOffset(Src, TypeSize::getFixed(SrcOff), dl),
7944 SrcPtrInfo.getWithOffset(SrcOff), VT,
7945 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags, NewAAInfo);
7946 OutLoadChains.push_back(Value.getValue(1));
7947
7948 Store = DAG.getTruncStore(
7949 Chain, dl, Value,
7950 DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
7951 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
7952 OutStoreChains.push_back(Store);
7953 }
7954 SrcOff += VTSize;
7955 DstOff += VTSize;
7956 Size -= VTSize;
7957 }
7958
7959 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
7960 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
7961 unsigned NumLdStInMemcpy = OutStoreChains.size();
7962
7963 if (NumLdStInMemcpy) {
7964 // It may be that memcpy might be converted to memset if it's memcpy
7965 // of constants. In such a case, we won't have loads and stores, but
7966 // just stores. In the absence of loads, there is nothing to gang up.
7967 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
7968 // If target does not care, just leave as it.
7969 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
7970 OutChains.push_back(OutLoadChains[i]);
7971 OutChains.push_back(OutStoreChains[i]);
7972 }
7973 } else {
7974 // Ld/St less than/equal limit set by target.
7975 if (NumLdStInMemcpy <= GluedLdStLimit) {
7976 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
7977 NumLdStInMemcpy, OutLoadChains,
7978 OutStoreChains);
7979 } else {
7980 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
7981 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
7982 unsigned GlueIter = 0;
7983
7984 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
7985 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
7986 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
7987
7988 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
7989 OutLoadChains, OutStoreChains);
7990 GlueIter += GluedLdStLimit;
7991 }
7992
7993 // Residual ld/st.
7994 if (RemainingLdStInMemcpy) {
7995 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
7996 RemainingLdStInMemcpy, OutLoadChains,
7997 OutStoreChains);
7998 }
7999 }
8000 }
8001 }
8002 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
8003 }
8004
getMemmoveLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo)8005 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
8006 SDValue Chain, SDValue Dst, SDValue Src,
8007 uint64_t Size, Align Alignment,
8008 bool isVol, bool AlwaysInline,
8009 MachinePointerInfo DstPtrInfo,
8010 MachinePointerInfo SrcPtrInfo,
8011 const AAMDNodes &AAInfo) {
8012 // Turn a memmove of undef to nop.
8013 // FIXME: We need to honor volatile even is Src is undef.
8014 if (Src.isUndef())
8015 return Chain;
8016
8017 // Expand memmove to a series of load and store ops if the size operand falls
8018 // below a certain threshold.
8019 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8020 const DataLayout &DL = DAG.getDataLayout();
8021 LLVMContext &C = *DAG.getContext();
8022 std::vector<EVT> MemOps;
8023 bool DstAlignCanChange = false;
8024 MachineFunction &MF = DAG.getMachineFunction();
8025 MachineFrameInfo &MFI = MF.getFrameInfo();
8026 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
8027 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
8028 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
8029 DstAlignCanChange = true;
8030 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
8031 if (!SrcAlign || Alignment > *SrcAlign)
8032 SrcAlign = Alignment;
8033 assert(SrcAlign && "SrcAlign must be set");
8034 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
8035 if (!TLI.findOptimalMemOpLowering(
8036 MemOps, Limit,
8037 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign,
8038 /*IsVolatile*/ true),
8039 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
8040 MF.getFunction().getAttributes()))
8041 return SDValue();
8042
8043 if (DstAlignCanChange) {
8044 Type *Ty = MemOps[0].getTypeForEVT(C);
8045 Align NewAlign = DL.getABITypeAlign(Ty);
8046
8047 // Don't promote to an alignment that would require dynamic stack
8048 // realignment which may conflict with optimizations such as tail call
8049 // optimization.
8050 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
8051 if (!TRI->hasStackRealignment(MF))
8052 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
8053 NewAlign = NewAlign.previous();
8054
8055 if (NewAlign > Alignment) {
8056 // Give the stack frame object a larger alignment if needed.
8057 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
8058 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
8059 Alignment = NewAlign;
8060 }
8061 }
8062
8063 // Prepare AAInfo for loads/stores after lowering this memmove.
8064 AAMDNodes NewAAInfo = AAInfo;
8065 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
8066
8067 MachineMemOperand::Flags MMOFlags =
8068 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
8069 uint64_t SrcOff = 0, DstOff = 0;
8070 SmallVector<SDValue, 8> LoadValues;
8071 SmallVector<SDValue, 8> LoadChains;
8072 SmallVector<SDValue, 8> OutChains;
8073 unsigned NumMemOps = MemOps.size();
8074 for (unsigned i = 0; i < NumMemOps; i++) {
8075 EVT VT = MemOps[i];
8076 unsigned VTSize = VT.getSizeInBits() / 8;
8077 SDValue Value;
8078
8079 bool isDereferenceable =
8080 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
8081 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
8082 if (isDereferenceable)
8083 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
8084
8085 Value = DAG.getLoad(
8086 VT, dl, Chain,
8087 DAG.getMemBasePlusOffset(Src, TypeSize::getFixed(SrcOff), dl),
8088 SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
8089 LoadValues.push_back(Value);
8090 LoadChains.push_back(Value.getValue(1));
8091 SrcOff += VTSize;
8092 }
8093 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
8094 OutChains.clear();
8095 for (unsigned i = 0; i < NumMemOps; i++) {
8096 EVT VT = MemOps[i];
8097 unsigned VTSize = VT.getSizeInBits() / 8;
8098 SDValue Store;
8099
8100 Store = DAG.getStore(
8101 Chain, dl, LoadValues[i],
8102 DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
8103 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
8104 OutChains.push_back(Store);
8105 DstOff += VTSize;
8106 }
8107
8108 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
8109 }
8110
8111 /// Lower the call to 'memset' intrinsic function into a series of store
8112 /// operations.
8113 ///
8114 /// \param DAG Selection DAG where lowered code is placed.
8115 /// \param dl Link to corresponding IR location.
8116 /// \param Chain Control flow dependency.
8117 /// \param Dst Pointer to destination memory location.
8118 /// \param Src Value of byte to write into the memory.
8119 /// \param Size Number of bytes to write.
8120 /// \param Alignment Alignment of the destination in bytes.
8121 /// \param isVol True if destination is volatile.
8122 /// \param AlwaysInline Makes sure no function call is generated.
8123 /// \param DstPtrInfo IR information on the memory pointer.
8124 /// \returns New head in the control flow, if lowering was successful, empty
8125 /// SDValue otherwise.
8126 ///
8127 /// The function tries to replace 'llvm.memset' intrinsic with several store
8128 /// operations and value calculation code. This is usually profitable for small
8129 /// memory size or when the semantic requires inlining.
getMemsetStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,const AAMDNodes & AAInfo)8130 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
8131 SDValue Chain, SDValue Dst, SDValue Src,
8132 uint64_t Size, Align Alignment, bool isVol,
8133 bool AlwaysInline, MachinePointerInfo DstPtrInfo,
8134 const AAMDNodes &AAInfo) {
8135 // Turn a memset of undef to nop.
8136 // FIXME: We need to honor volatile even is Src is undef.
8137 if (Src.isUndef())
8138 return Chain;
8139
8140 // Expand memset to a series of load/store ops if the size operand
8141 // falls below a certain threshold.
8142 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8143 std::vector<EVT> MemOps;
8144 bool DstAlignCanChange = false;
8145 MachineFunction &MF = DAG.getMachineFunction();
8146 MachineFrameInfo &MFI = MF.getFrameInfo();
8147 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
8148 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
8149 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
8150 DstAlignCanChange = true;
8151 bool IsZeroVal = isNullConstant(Src);
8152 unsigned Limit = AlwaysInline ? ~0 : TLI.getMaxStoresPerMemset(OptSize);
8153
8154 if (!TLI.findOptimalMemOpLowering(
8155 MemOps, Limit,
8156 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
8157 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes()))
8158 return SDValue();
8159
8160 if (DstAlignCanChange) {
8161 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
8162 const DataLayout &DL = DAG.getDataLayout();
8163 Align NewAlign = DL.getABITypeAlign(Ty);
8164
8165 // Don't promote to an alignment that would require dynamic stack
8166 // realignment which may conflict with optimizations such as tail call
8167 // optimization.
8168 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
8169 if (!TRI->hasStackRealignment(MF))
8170 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
8171 NewAlign = NewAlign.previous();
8172
8173 if (NewAlign > Alignment) {
8174 // Give the stack frame object a larger alignment if needed.
8175 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
8176 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
8177 Alignment = NewAlign;
8178 }
8179 }
8180
8181 SmallVector<SDValue, 8> OutChains;
8182 uint64_t DstOff = 0;
8183 unsigned NumMemOps = MemOps.size();
8184
8185 // Find the largest store and generate the bit pattern for it.
8186 EVT LargestVT = MemOps[0];
8187 for (unsigned i = 1; i < NumMemOps; i++)
8188 if (MemOps[i].bitsGT(LargestVT))
8189 LargestVT = MemOps[i];
8190 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
8191
8192 // Prepare AAInfo for loads/stores after lowering this memset.
8193 AAMDNodes NewAAInfo = AAInfo;
8194 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
8195
8196 for (unsigned i = 0; i < NumMemOps; i++) {
8197 EVT VT = MemOps[i];
8198 unsigned VTSize = VT.getSizeInBits() / 8;
8199 if (VTSize > Size) {
8200 // Issuing an unaligned load / store pair that overlaps with the previous
8201 // pair. Adjust the offset accordingly.
8202 assert(i == NumMemOps-1 && i != 0);
8203 DstOff -= VTSize - Size;
8204 }
8205
8206 // If this store is smaller than the largest store see whether we can get
8207 // the smaller value for free with a truncate or extract vector element and
8208 // then store.
8209 SDValue Value = MemSetValue;
8210 if (VT.bitsLT(LargestVT)) {
8211 unsigned Index;
8212 unsigned NElts = LargestVT.getSizeInBits() / VT.getSizeInBits();
8213 EVT SVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), NElts);
8214 if (!LargestVT.isVector() && !VT.isVector() &&
8215 TLI.isTruncateFree(LargestVT, VT))
8216 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
8217 else if (LargestVT.isVector() && !VT.isVector() &&
8218 TLI.shallExtractConstSplatVectorElementToStore(
8219 LargestVT.getTypeForEVT(*DAG.getContext()),
8220 VT.getSizeInBits(), Index) &&
8221 TLI.isTypeLegal(SVT) &&
8222 LargestVT.getSizeInBits() == SVT.getSizeInBits()) {
8223 // Target which can combine store(extractelement VectorTy, Idx) can get
8224 // the smaller value for free.
8225 SDValue TailValue = DAG.getNode(ISD::BITCAST, dl, SVT, MemSetValue);
8226 Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, TailValue,
8227 DAG.getVectorIdxConstant(Index, dl));
8228 } else
8229 Value = getMemsetValue(Src, VT, DAG, dl);
8230 }
8231 assert(Value.getValueType() == VT && "Value with wrong type.");
8232 SDValue Store = DAG.getStore(
8233 Chain, dl, Value,
8234 DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
8235 DstPtrInfo.getWithOffset(DstOff), Alignment,
8236 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone,
8237 NewAAInfo);
8238 OutChains.push_back(Store);
8239 DstOff += VT.getSizeInBits() / 8;
8240 Size -= VTSize;
8241 }
8242
8243 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
8244 }
8245
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)8246 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
8247 unsigned AS) {
8248 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
8249 // pointer operands can be losslessly bitcasted to pointers of address space 0
8250 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
8251 report_fatal_error("cannot lower memory intrinsic in address space " +
8252 Twine(AS));
8253 }
8254 }
8255
getMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool AlwaysInline,const CallInst * CI,std::optional<bool> OverrideTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo,AAResults * AA)8256 SDValue SelectionDAG::getMemcpy(
8257 SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size,
8258 Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI,
8259 std::optional<bool> OverrideTailCall, MachinePointerInfo DstPtrInfo,
8260 MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, AAResults *AA) {
8261 // Check to see if we should lower the memcpy to loads and stores first.
8262 // For cases within the target-specified limits, this is the best choice.
8263 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
8264 if (ConstantSize) {
8265 // Memcpy with size zero? Just return the original chain.
8266 if (ConstantSize->isZero())
8267 return Chain;
8268
8269 SDValue Result = getMemcpyLoadsAndStores(
8270 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
8271 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo, AA);
8272 if (Result.getNode())
8273 return Result;
8274 }
8275
8276 // Then check to see if we should lower the memcpy with target-specific
8277 // code. If the target chooses to do this, this is the next best.
8278 if (TSI) {
8279 SDValue Result = TSI->EmitTargetCodeForMemcpy(
8280 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
8281 DstPtrInfo, SrcPtrInfo);
8282 if (Result.getNode())
8283 return Result;
8284 }
8285
8286 // If we really need inline code and the target declined to provide it,
8287 // use a (potentially long) sequence of loads and stores.
8288 if (AlwaysInline) {
8289 assert(ConstantSize && "AlwaysInline requires a constant size!");
8290 return getMemcpyLoadsAndStores(
8291 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
8292 isVol, true, DstPtrInfo, SrcPtrInfo, AAInfo, AA);
8293 }
8294
8295 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
8296 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
8297
8298 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
8299 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
8300 // respect volatile, so they may do things like read or write memory
8301 // beyond the given memory regions. But fixing this isn't easy, and most
8302 // people don't care.
8303
8304 // Emit a library call.
8305 TargetLowering::ArgListTy Args;
8306 TargetLowering::ArgListEntry Entry;
8307 Entry.Ty = PointerType::getUnqual(*getContext());
8308 Entry.Node = Dst; Args.push_back(Entry);
8309 Entry.Node = Src; Args.push_back(Entry);
8310
8311 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
8312 Entry.Node = Size; Args.push_back(Entry);
8313 // FIXME: pass in SDLoc
8314 TargetLowering::CallLoweringInfo CLI(*this);
8315 bool IsTailCall = false;
8316 if (OverrideTailCall.has_value()) {
8317 IsTailCall = *OverrideTailCall;
8318 } else {
8319 bool LowersToMemcpy =
8320 TLI->getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy");
8321 bool ReturnsFirstArg = CI && funcReturnsFirstArgOfCall(*CI);
8322 IsTailCall = CI && CI->isTailCall() &&
8323 isInTailCallPosition(*CI, getTarget(),
8324 ReturnsFirstArg && LowersToMemcpy);
8325 }
8326
8327 CLI.setDebugLoc(dl)
8328 .setChain(Chain)
8329 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
8330 Dst.getValueType().getTypeForEVT(*getContext()),
8331 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
8332 TLI->getPointerTy(getDataLayout())),
8333 std::move(Args))
8334 .setDiscardResult()
8335 .setTailCall(IsTailCall);
8336
8337 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
8338 return CallResult.second;
8339 }
8340
getAtomicMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)8341 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
8342 SDValue Dst, SDValue Src, SDValue Size,
8343 Type *SizeTy, unsigned ElemSz,
8344 bool isTailCall,
8345 MachinePointerInfo DstPtrInfo,
8346 MachinePointerInfo SrcPtrInfo) {
8347 // Emit a library call.
8348 TargetLowering::ArgListTy Args;
8349 TargetLowering::ArgListEntry Entry;
8350 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
8351 Entry.Node = Dst;
8352 Args.push_back(Entry);
8353
8354 Entry.Node = Src;
8355 Args.push_back(Entry);
8356
8357 Entry.Ty = SizeTy;
8358 Entry.Node = Size;
8359 Args.push_back(Entry);
8360
8361 RTLIB::Libcall LibraryCall =
8362 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
8363 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8364 report_fatal_error("Unsupported element size");
8365
8366 TargetLowering::CallLoweringInfo CLI(*this);
8367 CLI.setDebugLoc(dl)
8368 .setChain(Chain)
8369 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
8370 Type::getVoidTy(*getContext()),
8371 getExternalSymbol(TLI->getLibcallName(LibraryCall),
8372 TLI->getPointerTy(getDataLayout())),
8373 std::move(Args))
8374 .setDiscardResult()
8375 .setTailCall(isTailCall);
8376
8377 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
8378 return CallResult.second;
8379 }
8380
getMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,const CallInst * CI,std::optional<bool> OverrideTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo,AAResults * AA)8381 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
8382 SDValue Src, SDValue Size, Align Alignment,
8383 bool isVol, const CallInst *CI,
8384 std::optional<bool> OverrideTailCall,
8385 MachinePointerInfo DstPtrInfo,
8386 MachinePointerInfo SrcPtrInfo,
8387 const AAMDNodes &AAInfo, AAResults *AA) {
8388 // Check to see if we should lower the memmove to loads and stores first.
8389 // For cases within the target-specified limits, this is the best choice.
8390 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
8391 if (ConstantSize) {
8392 // Memmove with size zero? Just return the original chain.
8393 if (ConstantSize->isZero())
8394 return Chain;
8395
8396 SDValue Result = getMemmoveLoadsAndStores(
8397 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
8398 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo);
8399 if (Result.getNode())
8400 return Result;
8401 }
8402
8403 // Then check to see if we should lower the memmove with target-specific
8404 // code. If the target chooses to do this, this is the next best.
8405 if (TSI) {
8406 SDValue Result =
8407 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
8408 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
8409 if (Result.getNode())
8410 return Result;
8411 }
8412
8413 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
8414 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
8415
8416 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
8417 // not be safe. See memcpy above for more details.
8418
8419 // Emit a library call.
8420 TargetLowering::ArgListTy Args;
8421 TargetLowering::ArgListEntry Entry;
8422 Entry.Ty = PointerType::getUnqual(*getContext());
8423 Entry.Node = Dst; Args.push_back(Entry);
8424 Entry.Node = Src; Args.push_back(Entry);
8425
8426 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
8427 Entry.Node = Size; Args.push_back(Entry);
8428 // FIXME: pass in SDLoc
8429 TargetLowering::CallLoweringInfo CLI(*this);
8430
8431 bool IsTailCall = false;
8432 if (OverrideTailCall.has_value()) {
8433 IsTailCall = *OverrideTailCall;
8434 } else {
8435 bool LowersToMemmove =
8436 TLI->getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove");
8437 bool ReturnsFirstArg = CI && funcReturnsFirstArgOfCall(*CI);
8438 IsTailCall = CI && CI->isTailCall() &&
8439 isInTailCallPosition(*CI, getTarget(),
8440 ReturnsFirstArg && LowersToMemmove);
8441 }
8442
8443 CLI.setDebugLoc(dl)
8444 .setChain(Chain)
8445 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
8446 Dst.getValueType().getTypeForEVT(*getContext()),
8447 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
8448 TLI->getPointerTy(getDataLayout())),
8449 std::move(Args))
8450 .setDiscardResult()
8451 .setTailCall(IsTailCall);
8452
8453 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
8454 return CallResult.second;
8455 }
8456
getAtomicMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)8457 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
8458 SDValue Dst, SDValue Src, SDValue Size,
8459 Type *SizeTy, unsigned ElemSz,
8460 bool isTailCall,
8461 MachinePointerInfo DstPtrInfo,
8462 MachinePointerInfo SrcPtrInfo) {
8463 // Emit a library call.
8464 TargetLowering::ArgListTy Args;
8465 TargetLowering::ArgListEntry Entry;
8466 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
8467 Entry.Node = Dst;
8468 Args.push_back(Entry);
8469
8470 Entry.Node = Src;
8471 Args.push_back(Entry);
8472
8473 Entry.Ty = SizeTy;
8474 Entry.Node = Size;
8475 Args.push_back(Entry);
8476
8477 RTLIB::Libcall LibraryCall =
8478 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
8479 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8480 report_fatal_error("Unsupported element size");
8481
8482 TargetLowering::CallLoweringInfo CLI(*this);
8483 CLI.setDebugLoc(dl)
8484 .setChain(Chain)
8485 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
8486 Type::getVoidTy(*getContext()),
8487 getExternalSymbol(TLI->getLibcallName(LibraryCall),
8488 TLI->getPointerTy(getDataLayout())),
8489 std::move(Args))
8490 .setDiscardResult()
8491 .setTailCall(isTailCall);
8492
8493 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
8494 return CallResult.second;
8495 }
8496
getMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool AlwaysInline,const CallInst * CI,MachinePointerInfo DstPtrInfo,const AAMDNodes & AAInfo)8497 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
8498 SDValue Src, SDValue Size, Align Alignment,
8499 bool isVol, bool AlwaysInline,
8500 const CallInst *CI,
8501 MachinePointerInfo DstPtrInfo,
8502 const AAMDNodes &AAInfo) {
8503 // Check to see if we should lower the memset to stores first.
8504 // For cases within the target-specified limits, this is the best choice.
8505 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
8506 if (ConstantSize) {
8507 // Memset with size zero? Just return the original chain.
8508 if (ConstantSize->isZero())
8509 return Chain;
8510
8511 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
8512 ConstantSize->getZExtValue(), Alignment,
8513 isVol, false, DstPtrInfo, AAInfo);
8514
8515 if (Result.getNode())
8516 return Result;
8517 }
8518
8519 // Then check to see if we should lower the memset with target-specific
8520 // code. If the target chooses to do this, this is the next best.
8521 if (TSI) {
8522 SDValue Result = TSI->EmitTargetCodeForMemset(
8523 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
8524 if (Result.getNode())
8525 return Result;
8526 }
8527
8528 // If we really need inline code and the target declined to provide it,
8529 // use a (potentially long) sequence of loads and stores.
8530 if (AlwaysInline) {
8531 assert(ConstantSize && "AlwaysInline requires a constant size!");
8532 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
8533 ConstantSize->getZExtValue(), Alignment,
8534 isVol, true, DstPtrInfo, AAInfo);
8535 assert(Result &&
8536 "getMemsetStores must return a valid sequence when AlwaysInline");
8537 return Result;
8538 }
8539
8540 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
8541
8542 // Emit a library call.
8543 auto &Ctx = *getContext();
8544 const auto& DL = getDataLayout();
8545
8546 TargetLowering::CallLoweringInfo CLI(*this);
8547 // FIXME: pass in SDLoc
8548 CLI.setDebugLoc(dl).setChain(Chain);
8549
8550 const char *BzeroName = getTargetLoweringInfo().getLibcallName(RTLIB::BZERO);
8551
8552 // Helper function to create an Entry from Node and Type.
8553 const auto CreateEntry = [](SDValue Node, Type *Ty) {
8554 TargetLowering::ArgListEntry Entry;
8555 Entry.Node = Node;
8556 Entry.Ty = Ty;
8557 return Entry;
8558 };
8559
8560 bool UseBZero = isNullConstant(Src) && BzeroName;
8561 // If zeroing out and bzero is present, use it.
8562 if (UseBZero) {
8563 TargetLowering::ArgListTy Args;
8564 Args.push_back(CreateEntry(Dst, PointerType::getUnqual(Ctx)));
8565 Args.push_back(CreateEntry(Size, DL.getIntPtrType(Ctx)));
8566 CLI.setLibCallee(
8567 TLI->getLibcallCallingConv(RTLIB::BZERO), Type::getVoidTy(Ctx),
8568 getExternalSymbol(BzeroName, TLI->getPointerTy(DL)), std::move(Args));
8569 } else {
8570 TargetLowering::ArgListTy Args;
8571 Args.push_back(CreateEntry(Dst, PointerType::getUnqual(Ctx)));
8572 Args.push_back(CreateEntry(Src, Src.getValueType().getTypeForEVT(Ctx)));
8573 Args.push_back(CreateEntry(Size, DL.getIntPtrType(Ctx)));
8574 CLI.setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
8575 Dst.getValueType().getTypeForEVT(Ctx),
8576 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
8577 TLI->getPointerTy(DL)),
8578 std::move(Args));
8579 }
8580 bool LowersToMemset =
8581 TLI->getLibcallName(RTLIB::MEMSET) == StringRef("memset");
8582 // If we're going to use bzero, make sure not to tail call unless the
8583 // subsequent return doesn't need a value, as bzero doesn't return the first
8584 // arg unlike memset.
8585 bool ReturnsFirstArg = CI && funcReturnsFirstArgOfCall(*CI) && !UseBZero;
8586 bool IsTailCall =
8587 CI && CI->isTailCall() &&
8588 isInTailCallPosition(*CI, getTarget(), ReturnsFirstArg && LowersToMemset);
8589 CLI.setDiscardResult().setTailCall(IsTailCall);
8590
8591 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
8592 return CallResult.second;
8593 }
8594
getAtomicMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Value,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo)8595 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
8596 SDValue Dst, SDValue Value, SDValue Size,
8597 Type *SizeTy, unsigned ElemSz,
8598 bool isTailCall,
8599 MachinePointerInfo DstPtrInfo) {
8600 // Emit a library call.
8601 TargetLowering::ArgListTy Args;
8602 TargetLowering::ArgListEntry Entry;
8603 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
8604 Entry.Node = Dst;
8605 Args.push_back(Entry);
8606
8607 Entry.Ty = Type::getInt8Ty(*getContext());
8608 Entry.Node = Value;
8609 Args.push_back(Entry);
8610
8611 Entry.Ty = SizeTy;
8612 Entry.Node = Size;
8613 Args.push_back(Entry);
8614
8615 RTLIB::Libcall LibraryCall =
8616 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
8617 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8618 report_fatal_error("Unsupported element size");
8619
8620 TargetLowering::CallLoweringInfo CLI(*this);
8621 CLI.setDebugLoc(dl)
8622 .setChain(Chain)
8623 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
8624 Type::getVoidTy(*getContext()),
8625 getExternalSymbol(TLI->getLibcallName(LibraryCall),
8626 TLI->getPointerTy(getDataLayout())),
8627 std::move(Args))
8628 .setDiscardResult()
8629 .setTailCall(isTailCall);
8630
8631 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
8632 return CallResult.second;
8633 }
8634
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)8635 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
8636 SDVTList VTList, ArrayRef<SDValue> Ops,
8637 MachineMemOperand *MMO) {
8638 FoldingSetNodeID ID;
8639 ID.AddInteger(MemVT.getRawBits());
8640 AddNodeIDNode(ID, Opcode, VTList, Ops);
8641 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8642 ID.AddInteger(MMO->getFlags());
8643 void* IP = nullptr;
8644 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8645 cast<AtomicSDNode>(E)->refineAlignment(MMO);
8646 return SDValue(E, 0);
8647 }
8648
8649 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
8650 VTList, MemVT, MMO);
8651 createOperands(N, Ops);
8652
8653 CSEMap.InsertNode(N, IP);
8654 InsertNode(N);
8655 return SDValue(N, 0);
8656 }
8657
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO)8658 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
8659 EVT MemVT, SDVTList VTs, SDValue Chain,
8660 SDValue Ptr, SDValue Cmp, SDValue Swp,
8661 MachineMemOperand *MMO) {
8662 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
8663 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
8664 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
8665
8666 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
8667 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
8668 }
8669
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO)8670 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
8671 SDValue Chain, SDValue Ptr, SDValue Val,
8672 MachineMemOperand *MMO) {
8673 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
8674 Opcode == ISD::ATOMIC_LOAD_SUB ||
8675 Opcode == ISD::ATOMIC_LOAD_AND ||
8676 Opcode == ISD::ATOMIC_LOAD_CLR ||
8677 Opcode == ISD::ATOMIC_LOAD_OR ||
8678 Opcode == ISD::ATOMIC_LOAD_XOR ||
8679 Opcode == ISD::ATOMIC_LOAD_NAND ||
8680 Opcode == ISD::ATOMIC_LOAD_MIN ||
8681 Opcode == ISD::ATOMIC_LOAD_MAX ||
8682 Opcode == ISD::ATOMIC_LOAD_UMIN ||
8683 Opcode == ISD::ATOMIC_LOAD_UMAX ||
8684 Opcode == ISD::ATOMIC_LOAD_FADD ||
8685 Opcode == ISD::ATOMIC_LOAD_FSUB ||
8686 Opcode == ISD::ATOMIC_LOAD_FMAX ||
8687 Opcode == ISD::ATOMIC_LOAD_FMIN ||
8688 Opcode == ISD::ATOMIC_LOAD_UINC_WRAP ||
8689 Opcode == ISD::ATOMIC_LOAD_UDEC_WRAP ||
8690 Opcode == ISD::ATOMIC_SWAP ||
8691 Opcode == ISD::ATOMIC_STORE) &&
8692 "Invalid Atomic Op");
8693
8694 EVT VT = Val.getValueType();
8695
8696 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
8697 getVTList(VT, MVT::Other);
8698 SDValue Ops[] = {Chain, Ptr, Val};
8699 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
8700 }
8701
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)8702 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
8703 EVT VT, SDValue Chain, SDValue Ptr,
8704 MachineMemOperand *MMO) {
8705 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
8706
8707 SDVTList VTs = getVTList(VT, MVT::Other);
8708 SDValue Ops[] = {Chain, Ptr};
8709 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
8710 }
8711
8712 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,const SDLoc & dl)8713 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
8714 if (Ops.size() == 1)
8715 return Ops[0];
8716
8717 SmallVector<EVT, 4> VTs;
8718 VTs.reserve(Ops.size());
8719 for (const SDValue &Op : Ops)
8720 VTs.push_back(Op.getValueType());
8721 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
8722 }
8723
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags Flags,LocationSize Size,const AAMDNodes & AAInfo)8724 SDValue SelectionDAG::getMemIntrinsicNode(
8725 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
8726 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
8727 MachineMemOperand::Flags Flags, LocationSize Size,
8728 const AAMDNodes &AAInfo) {
8729 if (Size.hasValue() && !Size.getValue())
8730 Size = LocationSize::precise(MemVT.getStoreSize());
8731
8732 MachineFunction &MF = getMachineFunction();
8733 MachineMemOperand *MMO =
8734 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
8735
8736 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
8737 }
8738
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)8739 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
8740 SDVTList VTList,
8741 ArrayRef<SDValue> Ops, EVT MemVT,
8742 MachineMemOperand *MMO) {
8743 assert((Opcode == ISD::INTRINSIC_VOID ||
8744 Opcode == ISD::INTRINSIC_W_CHAIN ||
8745 Opcode == ISD::PREFETCH ||
8746 (Opcode <= (unsigned)std::numeric_limits<int>::max() &&
8747 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
8748 "Opcode is not a memory-accessing opcode!");
8749
8750 // Memoize the node unless it returns a glue result.
8751 MemIntrinsicSDNode *N;
8752 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
8753 FoldingSetNodeID ID;
8754 AddNodeIDNode(ID, Opcode, VTList, Ops);
8755 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
8756 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
8757 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8758 ID.AddInteger(MMO->getFlags());
8759 ID.AddInteger(MemVT.getRawBits());
8760 void *IP = nullptr;
8761 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8762 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
8763 return SDValue(E, 0);
8764 }
8765
8766 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
8767 VTList, MemVT, MMO);
8768 createOperands(N, Ops);
8769
8770 CSEMap.InsertNode(N, IP);
8771 } else {
8772 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
8773 VTList, MemVT, MMO);
8774 createOperands(N, Ops);
8775 }
8776 InsertNode(N);
8777 SDValue V(N, 0);
8778 NewSDValueDbgMsg(V, "Creating new node: ", this);
8779 return V;
8780 }
8781
getLifetimeNode(bool IsStart,const SDLoc & dl,SDValue Chain,int FrameIndex,int64_t Size,int64_t Offset)8782 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
8783 SDValue Chain, int FrameIndex,
8784 int64_t Size, int64_t Offset) {
8785 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
8786 const auto VTs = getVTList(MVT::Other);
8787 SDValue Ops[2] = {
8788 Chain,
8789 getFrameIndex(FrameIndex,
8790 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
8791 true)};
8792
8793 FoldingSetNodeID ID;
8794 AddNodeIDNode(ID, Opcode, VTs, Ops);
8795 ID.AddInteger(FrameIndex);
8796 ID.AddInteger(Size);
8797 ID.AddInteger(Offset);
8798 void *IP = nullptr;
8799 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
8800 return SDValue(E, 0);
8801
8802 LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
8803 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
8804 createOperands(N, Ops);
8805 CSEMap.InsertNode(N, IP);
8806 InsertNode(N);
8807 SDValue V(N, 0);
8808 NewSDValueDbgMsg(V, "Creating new node: ", this);
8809 return V;
8810 }
8811
getPseudoProbeNode(const SDLoc & Dl,SDValue Chain,uint64_t Guid,uint64_t Index,uint32_t Attr)8812 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
8813 uint64_t Guid, uint64_t Index,
8814 uint32_t Attr) {
8815 const unsigned Opcode = ISD::PSEUDO_PROBE;
8816 const auto VTs = getVTList(MVT::Other);
8817 SDValue Ops[] = {Chain};
8818 FoldingSetNodeID ID;
8819 AddNodeIDNode(ID, Opcode, VTs, Ops);
8820 ID.AddInteger(Guid);
8821 ID.AddInteger(Index);
8822 void *IP = nullptr;
8823 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
8824 return SDValue(E, 0);
8825
8826 auto *N = newSDNode<PseudoProbeSDNode>(
8827 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
8828 createOperands(N, Ops);
8829 CSEMap.InsertNode(N, IP);
8830 InsertNode(N);
8831 SDValue V(N, 0);
8832 NewSDValueDbgMsg(V, "Creating new node: ", this);
8833 return V;
8834 }
8835
8836 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
8837 /// MachinePointerInfo record from it. This is particularly useful because the
8838 /// code generator has many cases where it doesn't bother passing in a
8839 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)8840 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
8841 SelectionDAG &DAG, SDValue Ptr,
8842 int64_t Offset = 0) {
8843 // If this is FI+Offset, we can model it.
8844 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
8845 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
8846 FI->getIndex(), Offset);
8847
8848 // If this is (FI+Offset1)+Offset2, we can model it.
8849 if (Ptr.getOpcode() != ISD::ADD ||
8850 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
8851 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
8852 return Info;
8853
8854 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
8855 return MachinePointerInfo::getFixedStack(
8856 DAG.getMachineFunction(), FI,
8857 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
8858 }
8859
8860 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
8861 /// MachinePointerInfo record from it. This is particularly useful because the
8862 /// code generator has many cases where it doesn't bother passing in a
8863 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)8864 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
8865 SelectionDAG &DAG, SDValue Ptr,
8866 SDValue OffsetOp) {
8867 // If the 'Offset' value isn't a constant, we can't handle this.
8868 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
8869 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
8870 if (OffsetOp.isUndef())
8871 return InferPointerInfo(Info, DAG, Ptr);
8872 return Info;
8873 }
8874
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)8875 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
8876 EVT VT, const SDLoc &dl, SDValue Chain,
8877 SDValue Ptr, SDValue Offset,
8878 MachinePointerInfo PtrInfo, EVT MemVT,
8879 Align Alignment,
8880 MachineMemOperand::Flags MMOFlags,
8881 const AAMDNodes &AAInfo, const MDNode *Ranges) {
8882 assert(Chain.getValueType() == MVT::Other &&
8883 "Invalid chain type");
8884
8885 MMOFlags |= MachineMemOperand::MOLoad;
8886 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
8887 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
8888 // clients.
8889 if (PtrInfo.V.isNull())
8890 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
8891
8892 LocationSize Size = LocationSize::precise(MemVT.getStoreSize());
8893 MachineFunction &MF = getMachineFunction();
8894 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
8895 Alignment, AAInfo, Ranges);
8896 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
8897 }
8898
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)8899 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
8900 EVT VT, const SDLoc &dl, SDValue Chain,
8901 SDValue Ptr, SDValue Offset, EVT MemVT,
8902 MachineMemOperand *MMO) {
8903 if (VT == MemVT) {
8904 ExtType = ISD::NON_EXTLOAD;
8905 } else if (ExtType == ISD::NON_EXTLOAD) {
8906 assert(VT == MemVT && "Non-extending load from different memory type!");
8907 } else {
8908 // Extending load.
8909 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
8910 "Should only be an extending load, not truncating!");
8911 assert(VT.isInteger() == MemVT.isInteger() &&
8912 "Cannot convert from FP to Int or Int -> FP!");
8913 assert(VT.isVector() == MemVT.isVector() &&
8914 "Cannot use an ext load to convert to or from a vector!");
8915 assert((!VT.isVector() ||
8916 VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
8917 "Cannot use an ext load to change the number of vector elements!");
8918 }
8919
8920 bool Indexed = AM != ISD::UNINDEXED;
8921 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
8922
8923 SDVTList VTs = Indexed ?
8924 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
8925 SDValue Ops[] = { Chain, Ptr, Offset };
8926 FoldingSetNodeID ID;
8927 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
8928 ID.AddInteger(MemVT.getRawBits());
8929 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
8930 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
8931 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8932 ID.AddInteger(MMO->getFlags());
8933 void *IP = nullptr;
8934 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8935 cast<LoadSDNode>(E)->refineAlignment(MMO);
8936 return SDValue(E, 0);
8937 }
8938 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
8939 ExtType, MemVT, MMO);
8940 createOperands(N, Ops);
8941
8942 CSEMap.InsertNode(N, IP);
8943 InsertNode(N);
8944 SDValue V(N, 0);
8945 NewSDValueDbgMsg(V, "Creating new node: ", this);
8946 return V;
8947 }
8948
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)8949 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
8950 SDValue Ptr, MachinePointerInfo PtrInfo,
8951 MaybeAlign Alignment,
8952 MachineMemOperand::Flags MMOFlags,
8953 const AAMDNodes &AAInfo, const MDNode *Ranges) {
8954 SDValue Undef = getUNDEF(Ptr.getValueType());
8955 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
8956 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
8957 }
8958
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)8959 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
8960 SDValue Ptr, MachineMemOperand *MMO) {
8961 SDValue Undef = getUNDEF(Ptr.getValueType());
8962 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
8963 VT, MMO);
8964 }
8965
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)8966 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
8967 EVT VT, SDValue Chain, SDValue Ptr,
8968 MachinePointerInfo PtrInfo, EVT MemVT,
8969 MaybeAlign Alignment,
8970 MachineMemOperand::Flags MMOFlags,
8971 const AAMDNodes &AAInfo) {
8972 SDValue Undef = getUNDEF(Ptr.getValueType());
8973 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
8974 MemVT, Alignment, MMOFlags, AAInfo);
8975 }
8976
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)8977 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
8978 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
8979 MachineMemOperand *MMO) {
8980 SDValue Undef = getUNDEF(Ptr.getValueType());
8981 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
8982 MemVT, MMO);
8983 }
8984
getIndexedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)8985 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
8986 SDValue Base, SDValue Offset,
8987 ISD::MemIndexedMode AM) {
8988 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
8989 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
8990 // Don't propagate the invariant or dereferenceable flags.
8991 auto MMOFlags =
8992 LD->getMemOperand()->getFlags() &
8993 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
8994 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
8995 LD->getChain(), Base, Offset, LD->getPointerInfo(),
8996 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
8997 }
8998
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)8999 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
9000 SDValue Ptr, MachinePointerInfo PtrInfo,
9001 Align Alignment,
9002 MachineMemOperand::Flags MMOFlags,
9003 const AAMDNodes &AAInfo) {
9004 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9005
9006 MMOFlags |= MachineMemOperand::MOStore;
9007 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
9008
9009 if (PtrInfo.V.isNull())
9010 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
9011
9012 MachineFunction &MF = getMachineFunction();
9013 LocationSize Size = LocationSize::precise(Val.getValueType().getStoreSize());
9014 MachineMemOperand *MMO =
9015 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
9016 return getStore(Chain, dl, Val, Ptr, MMO);
9017 }
9018
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)9019 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
9020 SDValue Ptr, MachineMemOperand *MMO) {
9021 assert(Chain.getValueType() == MVT::Other &&
9022 "Invalid chain type");
9023 EVT VT = Val.getValueType();
9024 SDVTList VTs = getVTList(MVT::Other);
9025 SDValue Undef = getUNDEF(Ptr.getValueType());
9026 SDValue Ops[] = { Chain, Val, Ptr, Undef };
9027 FoldingSetNodeID ID;
9028 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
9029 ID.AddInteger(VT.getRawBits());
9030 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9031 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
9032 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9033 ID.AddInteger(MMO->getFlags());
9034 void *IP = nullptr;
9035 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9036 cast<StoreSDNode>(E)->refineAlignment(MMO);
9037 return SDValue(E, 0);
9038 }
9039 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
9040 ISD::UNINDEXED, false, VT, MMO);
9041 createOperands(N, Ops);
9042
9043 CSEMap.InsertNode(N, IP);
9044 InsertNode(N);
9045 SDValue V(N, 0);
9046 NewSDValueDbgMsg(V, "Creating new node: ", this);
9047 return V;
9048 }
9049
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)9050 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
9051 SDValue Ptr, MachinePointerInfo PtrInfo,
9052 EVT SVT, Align Alignment,
9053 MachineMemOperand::Flags MMOFlags,
9054 const AAMDNodes &AAInfo) {
9055 assert(Chain.getValueType() == MVT::Other &&
9056 "Invalid chain type");
9057
9058 MMOFlags |= MachineMemOperand::MOStore;
9059 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
9060
9061 if (PtrInfo.V.isNull())
9062 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
9063
9064 MachineFunction &MF = getMachineFunction();
9065 MachineMemOperand *MMO = MF.getMachineMemOperand(
9066 PtrInfo, MMOFlags, LocationSize::precise(SVT.getStoreSize()), Alignment,
9067 AAInfo);
9068 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
9069 }
9070
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)9071 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
9072 SDValue Ptr, EVT SVT,
9073 MachineMemOperand *MMO) {
9074 EVT VT = Val.getValueType();
9075
9076 assert(Chain.getValueType() == MVT::Other &&
9077 "Invalid chain type");
9078 if (VT == SVT)
9079 return getStore(Chain, dl, Val, Ptr, MMO);
9080
9081 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
9082 "Should only be a truncating store, not extending!");
9083 assert(VT.isInteger() == SVT.isInteger() &&
9084 "Can't do FP-INT conversion!");
9085 assert(VT.isVector() == SVT.isVector() &&
9086 "Cannot use trunc store to convert to or from a vector!");
9087 assert((!VT.isVector() ||
9088 VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
9089 "Cannot use trunc store to change the number of vector elements!");
9090
9091 SDVTList VTs = getVTList(MVT::Other);
9092 SDValue Undef = getUNDEF(Ptr.getValueType());
9093 SDValue Ops[] = { Chain, Val, Ptr, Undef };
9094 FoldingSetNodeID ID;
9095 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
9096 ID.AddInteger(SVT.getRawBits());
9097 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
9098 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
9099 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9100 ID.AddInteger(MMO->getFlags());
9101 void *IP = nullptr;
9102 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9103 cast<StoreSDNode>(E)->refineAlignment(MMO);
9104 return SDValue(E, 0);
9105 }
9106 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
9107 ISD::UNINDEXED, true, SVT, MMO);
9108 createOperands(N, Ops);
9109
9110 CSEMap.InsertNode(N, IP);
9111 InsertNode(N);
9112 SDValue V(N, 0);
9113 NewSDValueDbgMsg(V, "Creating new node: ", this);
9114 return V;
9115 }
9116
getIndexedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)9117 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
9118 SDValue Base, SDValue Offset,
9119 ISD::MemIndexedMode AM) {
9120 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
9121 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
9122 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
9123 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
9124 FoldingSetNodeID ID;
9125 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
9126 ID.AddInteger(ST->getMemoryVT().getRawBits());
9127 ID.AddInteger(ST->getRawSubclassData());
9128 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
9129 ID.AddInteger(ST->getMemOperand()->getFlags());
9130 void *IP = nullptr;
9131 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
9132 return SDValue(E, 0);
9133
9134 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
9135 ST->isTruncatingStore(), ST->getMemoryVT(),
9136 ST->getMemOperand());
9137 createOperands(N, Ops);
9138
9139 CSEMap.InsertNode(N, IP);
9140 InsertNode(N);
9141 SDValue V(N, 0);
9142 NewSDValueDbgMsg(V, "Creating new node: ", this);
9143 return V;
9144 }
9145
getLoadVP(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,EVT MemVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges,bool IsExpanding)9146 SDValue SelectionDAG::getLoadVP(
9147 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
9148 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL,
9149 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
9150 MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
9151 const MDNode *Ranges, bool IsExpanding) {
9152 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9153
9154 MMOFlags |= MachineMemOperand::MOLoad;
9155 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
9156 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
9157 // clients.
9158 if (PtrInfo.V.isNull())
9159 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
9160
9161 LocationSize Size = LocationSize::precise(MemVT.getStoreSize());
9162 MachineFunction &MF = getMachineFunction();
9163 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
9164 Alignment, AAInfo, Ranges);
9165 return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL, MemVT,
9166 MMO, IsExpanding);
9167 }
9168
getLoadVP(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,bool IsExpanding)9169 SDValue SelectionDAG::getLoadVP(ISD::MemIndexedMode AM,
9170 ISD::LoadExtType ExtType, EVT VT,
9171 const SDLoc &dl, SDValue Chain, SDValue Ptr,
9172 SDValue Offset, SDValue Mask, SDValue EVL,
9173 EVT MemVT, MachineMemOperand *MMO,
9174 bool IsExpanding) {
9175 bool Indexed = AM != ISD::UNINDEXED;
9176 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
9177
9178 SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
9179 : getVTList(VT, MVT::Other);
9180 SDValue Ops[] = {Chain, Ptr, Offset, Mask, EVL};
9181 FoldingSetNodeID ID;
9182 AddNodeIDNode(ID, ISD::VP_LOAD, VTs, Ops);
9183 ID.AddInteger(MemVT.getRawBits());
9184 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
9185 dl.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9186 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9187 ID.AddInteger(MMO->getFlags());
9188 void *IP = nullptr;
9189 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9190 cast<VPLoadSDNode>(E)->refineAlignment(MMO);
9191 return SDValue(E, 0);
9192 }
9193 auto *N = newSDNode<VPLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
9194 ExtType, IsExpanding, MemVT, MMO);
9195 createOperands(N, Ops);
9196
9197 CSEMap.InsertNode(N, IP);
9198 InsertNode(N);
9199 SDValue V(N, 0);
9200 NewSDValueDbgMsg(V, "Creating new node: ", this);
9201 return V;
9202 }
9203
getLoadVP(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges,bool IsExpanding)9204 SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain,
9205 SDValue Ptr, SDValue Mask, SDValue EVL,
9206 MachinePointerInfo PtrInfo,
9207 MaybeAlign Alignment,
9208 MachineMemOperand::Flags MMOFlags,
9209 const AAMDNodes &AAInfo, const MDNode *Ranges,
9210 bool IsExpanding) {
9211 SDValue Undef = getUNDEF(Ptr.getValueType());
9212 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
9213 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
9214 IsExpanding);
9215 }
9216
getLoadVP(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,MachineMemOperand * MMO,bool IsExpanding)9217 SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain,
9218 SDValue Ptr, SDValue Mask, SDValue EVL,
9219 MachineMemOperand *MMO, bool IsExpanding) {
9220 SDValue Undef = getUNDEF(Ptr.getValueType());
9221 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
9222 Mask, EVL, VT, MMO, IsExpanding);
9223 }
9224
getExtLoadVP(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,EVT MemVT,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,bool IsExpanding)9225 SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl,
9226 EVT VT, SDValue Chain, SDValue Ptr,
9227 SDValue Mask, SDValue EVL,
9228 MachinePointerInfo PtrInfo, EVT MemVT,
9229 MaybeAlign Alignment,
9230 MachineMemOperand::Flags MMOFlags,
9231 const AAMDNodes &AAInfo, bool IsExpanding) {
9232 SDValue Undef = getUNDEF(Ptr.getValueType());
9233 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
9234 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo, nullptr,
9235 IsExpanding);
9236 }
9237
getExtLoadVP(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,bool IsExpanding)9238 SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl,
9239 EVT VT, SDValue Chain, SDValue Ptr,
9240 SDValue Mask, SDValue EVL, EVT MemVT,
9241 MachineMemOperand *MMO, bool IsExpanding) {
9242 SDValue Undef = getUNDEF(Ptr.getValueType());
9243 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
9244 EVL, MemVT, MMO, IsExpanding);
9245 }
9246
getIndexedLoadVP(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)9247 SDValue SelectionDAG::getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl,
9248 SDValue Base, SDValue Offset,
9249 ISD::MemIndexedMode AM) {
9250 auto *LD = cast<VPLoadSDNode>(OrigLoad);
9251 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
9252 // Don't propagate the invariant or dereferenceable flags.
9253 auto MMOFlags =
9254 LD->getMemOperand()->getFlags() &
9255 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
9256 return getLoadVP(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
9257 LD->getChain(), Base, Offset, LD->getMask(),
9258 LD->getVectorLength(), LD->getPointerInfo(),
9259 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
9260 nullptr, LD->isExpandingLoad());
9261 }
9262
getStoreVP(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Offset,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)9263 SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
9264 SDValue Ptr, SDValue Offset, SDValue Mask,
9265 SDValue EVL, EVT MemVT, MachineMemOperand *MMO,
9266 ISD::MemIndexedMode AM, bool IsTruncating,
9267 bool IsCompressing) {
9268 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9269 bool Indexed = AM != ISD::UNINDEXED;
9270 assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
9271 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
9272 : getVTList(MVT::Other);
9273 SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL};
9274 FoldingSetNodeID ID;
9275 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
9276 ID.AddInteger(MemVT.getRawBits());
9277 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
9278 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9279 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9280 ID.AddInteger(MMO->getFlags());
9281 void *IP = nullptr;
9282 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9283 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9284 return SDValue(E, 0);
9285 }
9286 auto *N = newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
9287 IsTruncating, IsCompressing, MemVT, MMO);
9288 createOperands(N, Ops);
9289
9290 CSEMap.InsertNode(N, IP);
9291 InsertNode(N);
9292 SDValue V(N, 0);
9293 NewSDValueDbgMsg(V, "Creating new node: ", this);
9294 return V;
9295 }
9296
getTruncStoreVP(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,EVT SVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,bool IsCompressing)9297 SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
9298 SDValue Val, SDValue Ptr, SDValue Mask,
9299 SDValue EVL, MachinePointerInfo PtrInfo,
9300 EVT SVT, Align Alignment,
9301 MachineMemOperand::Flags MMOFlags,
9302 const AAMDNodes &AAInfo,
9303 bool IsCompressing) {
9304 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9305
9306 MMOFlags |= MachineMemOperand::MOStore;
9307 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
9308
9309 if (PtrInfo.V.isNull())
9310 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
9311
9312 MachineFunction &MF = getMachineFunction();
9313 MachineMemOperand *MMO = MF.getMachineMemOperand(
9314 PtrInfo, MMOFlags, LocationSize::precise(SVT.getStoreSize()), Alignment,
9315 AAInfo);
9316 return getTruncStoreVP(Chain, dl, Val, Ptr, Mask, EVL, SVT, MMO,
9317 IsCompressing);
9318 }
9319
getTruncStoreVP(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Mask,SDValue EVL,EVT SVT,MachineMemOperand * MMO,bool IsCompressing)9320 SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
9321 SDValue Val, SDValue Ptr, SDValue Mask,
9322 SDValue EVL, EVT SVT,
9323 MachineMemOperand *MMO,
9324 bool IsCompressing) {
9325 EVT VT = Val.getValueType();
9326
9327 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9328 if (VT == SVT)
9329 return getStoreVP(Chain, dl, Val, Ptr, getUNDEF(Ptr.getValueType()), Mask,
9330 EVL, VT, MMO, ISD::UNINDEXED,
9331 /*IsTruncating*/ false, IsCompressing);
9332
9333 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
9334 "Should only be a truncating store, not extending!");
9335 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
9336 assert(VT.isVector() == SVT.isVector() &&
9337 "Cannot use trunc store to convert to or from a vector!");
9338 assert((!VT.isVector() ||
9339 VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
9340 "Cannot use trunc store to change the number of vector elements!");
9341
9342 SDVTList VTs = getVTList(MVT::Other);
9343 SDValue Undef = getUNDEF(Ptr.getValueType());
9344 SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
9345 FoldingSetNodeID ID;
9346 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
9347 ID.AddInteger(SVT.getRawBits());
9348 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
9349 dl.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
9350 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9351 ID.AddInteger(MMO->getFlags());
9352 void *IP = nullptr;
9353 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9354 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9355 return SDValue(E, 0);
9356 }
9357 auto *N =
9358 newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
9359 ISD::UNINDEXED, true, IsCompressing, SVT, MMO);
9360 createOperands(N, Ops);
9361
9362 CSEMap.InsertNode(N, IP);
9363 InsertNode(N);
9364 SDValue V(N, 0);
9365 NewSDValueDbgMsg(V, "Creating new node: ", this);
9366 return V;
9367 }
9368
getIndexedStoreVP(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)9369 SDValue SelectionDAG::getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl,
9370 SDValue Base, SDValue Offset,
9371 ISD::MemIndexedMode AM) {
9372 auto *ST = cast<VPStoreSDNode>(OrigStore);
9373 assert(ST->getOffset().isUndef() && "Store is already an indexed store!");
9374 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
9375 SDValue Ops[] = {ST->getChain(), ST->getValue(), Base,
9376 Offset, ST->getMask(), ST->getVectorLength()};
9377 FoldingSetNodeID ID;
9378 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
9379 ID.AddInteger(ST->getMemoryVT().getRawBits());
9380 ID.AddInteger(ST->getRawSubclassData());
9381 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
9382 ID.AddInteger(ST->getMemOperand()->getFlags());
9383 void *IP = nullptr;
9384 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
9385 return SDValue(E, 0);
9386
9387 auto *N = newSDNode<VPStoreSDNode>(
9388 dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ST->isTruncatingStore(),
9389 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
9390 createOperands(N, Ops);
9391
9392 CSEMap.InsertNode(N, IP);
9393 InsertNode(N);
9394 SDValue V(N, 0);
9395 NewSDValueDbgMsg(V, "Creating new node: ", this);
9396 return V;
9397 }
9398
getStridedLoadVP(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & DL,SDValue Chain,SDValue Ptr,SDValue Offset,SDValue Stride,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,bool IsExpanding)9399 SDValue SelectionDAG::getStridedLoadVP(
9400 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL,
9401 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
9402 SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding) {
9403 bool Indexed = AM != ISD::UNINDEXED;
9404 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
9405
9406 SDValue Ops[] = {Chain, Ptr, Offset, Stride, Mask, EVL};
9407 SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
9408 : getVTList(VT, MVT::Other);
9409 FoldingSetNodeID ID;
9410 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_LOAD, VTs, Ops);
9411 ID.AddInteger(VT.getRawBits());
9412 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
9413 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9414 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9415
9416 void *IP = nullptr;
9417 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
9418 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
9419 return SDValue(E, 0);
9420 }
9421
9422 auto *N =
9423 newSDNode<VPStridedLoadSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs, AM,
9424 ExtType, IsExpanding, MemVT, MMO);
9425 createOperands(N, Ops);
9426 CSEMap.InsertNode(N, IP);
9427 InsertNode(N);
9428 SDValue V(N, 0);
9429 NewSDValueDbgMsg(V, "Creating new node: ", this);
9430 return V;
9431 }
9432
getStridedLoadVP(EVT VT,const SDLoc & DL,SDValue Chain,SDValue Ptr,SDValue Stride,SDValue Mask,SDValue EVL,MachineMemOperand * MMO,bool IsExpanding)9433 SDValue SelectionDAG::getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain,
9434 SDValue Ptr, SDValue Stride,
9435 SDValue Mask, SDValue EVL,
9436 MachineMemOperand *MMO,
9437 bool IsExpanding) {
9438 SDValue Undef = getUNDEF(Ptr.getValueType());
9439 return getStridedLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, DL, Chain, Ptr,
9440 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
9441 }
9442
getExtStridedLoadVP(ISD::LoadExtType ExtType,const SDLoc & DL,EVT VT,SDValue Chain,SDValue Ptr,SDValue Stride,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,bool IsExpanding)9443 SDValue SelectionDAG::getExtStridedLoadVP(
9444 ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain,
9445 SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT,
9446 MachineMemOperand *MMO, bool IsExpanding) {
9447 SDValue Undef = getUNDEF(Ptr.getValueType());
9448 return getStridedLoadVP(ISD::UNINDEXED, ExtType, VT, DL, Chain, Ptr, Undef,
9449 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
9450 }
9451
getStridedStoreVP(SDValue Chain,const SDLoc & DL,SDValue Val,SDValue Ptr,SDValue Offset,SDValue Stride,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)9452 SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL,
9453 SDValue Val, SDValue Ptr,
9454 SDValue Offset, SDValue Stride,
9455 SDValue Mask, SDValue EVL, EVT MemVT,
9456 MachineMemOperand *MMO,
9457 ISD::MemIndexedMode AM,
9458 bool IsTruncating, bool IsCompressing) {
9459 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9460 bool Indexed = AM != ISD::UNINDEXED;
9461 assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
9462 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
9463 : getVTList(MVT::Other);
9464 SDValue Ops[] = {Chain, Val, Ptr, Offset, Stride, Mask, EVL};
9465 FoldingSetNodeID ID;
9466 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
9467 ID.AddInteger(MemVT.getRawBits());
9468 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9469 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9470 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9471 void *IP = nullptr;
9472 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
9473 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9474 return SDValue(E, 0);
9475 }
9476 auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(),
9477 VTs, AM, IsTruncating,
9478 IsCompressing, MemVT, MMO);
9479 createOperands(N, Ops);
9480
9481 CSEMap.InsertNode(N, IP);
9482 InsertNode(N);
9483 SDValue V(N, 0);
9484 NewSDValueDbgMsg(V, "Creating new node: ", this);
9485 return V;
9486 }
9487
getTruncStridedStoreVP(SDValue Chain,const SDLoc & DL,SDValue Val,SDValue Ptr,SDValue Stride,SDValue Mask,SDValue EVL,EVT SVT,MachineMemOperand * MMO,bool IsCompressing)9488 SDValue SelectionDAG::getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL,
9489 SDValue Val, SDValue Ptr,
9490 SDValue Stride, SDValue Mask,
9491 SDValue EVL, EVT SVT,
9492 MachineMemOperand *MMO,
9493 bool IsCompressing) {
9494 EVT VT = Val.getValueType();
9495
9496 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9497 if (VT == SVT)
9498 return getStridedStoreVP(Chain, DL, Val, Ptr, getUNDEF(Ptr.getValueType()),
9499 Stride, Mask, EVL, VT, MMO, ISD::UNINDEXED,
9500 /*IsTruncating*/ false, IsCompressing);
9501
9502 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
9503 "Should only be a truncating store, not extending!");
9504 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
9505 assert(VT.isVector() == SVT.isVector() &&
9506 "Cannot use trunc store to convert to or from a vector!");
9507 assert((!VT.isVector() ||
9508 VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
9509 "Cannot use trunc store to change the number of vector elements!");
9510
9511 SDVTList VTs = getVTList(MVT::Other);
9512 SDValue Undef = getUNDEF(Ptr.getValueType());
9513 SDValue Ops[] = {Chain, Val, Ptr, Undef, Stride, Mask, EVL};
9514 FoldingSetNodeID ID;
9515 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
9516 ID.AddInteger(SVT.getRawBits());
9517 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9518 DL.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
9519 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9520 void *IP = nullptr;
9521 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
9522 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9523 return SDValue(E, 0);
9524 }
9525 auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(),
9526 VTs, ISD::UNINDEXED, true,
9527 IsCompressing, SVT, MMO);
9528 createOperands(N, Ops);
9529
9530 CSEMap.InsertNode(N, IP);
9531 InsertNode(N);
9532 SDValue V(N, 0);
9533 NewSDValueDbgMsg(V, "Creating new node: ", this);
9534 return V;
9535 }
9536
getGatherVP(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)9537 SDValue SelectionDAG::getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
9538 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
9539 ISD::MemIndexType IndexType) {
9540 assert(Ops.size() == 6 && "Incompatible number of operands");
9541
9542 FoldingSetNodeID ID;
9543 AddNodeIDNode(ID, ISD::VP_GATHER, VTs, Ops);
9544 ID.AddInteger(VT.getRawBits());
9545 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
9546 dl.getIROrder(), VTs, VT, MMO, IndexType));
9547 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9548 ID.AddInteger(MMO->getFlags());
9549 void *IP = nullptr;
9550 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9551 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
9552 return SDValue(E, 0);
9553 }
9554
9555 auto *N = newSDNode<VPGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
9556 VT, MMO, IndexType);
9557 createOperands(N, Ops);
9558
9559 assert(N->getMask().getValueType().getVectorElementCount() ==
9560 N->getValueType(0).getVectorElementCount() &&
9561 "Vector width mismatch between mask and data");
9562 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9563 N->getValueType(0).getVectorElementCount().isScalable() &&
9564 "Scalable flags of index and data do not match");
9565 assert(ElementCount::isKnownGE(
9566 N->getIndex().getValueType().getVectorElementCount(),
9567 N->getValueType(0).getVectorElementCount()) &&
9568 "Vector width mismatch between index and data");
9569 assert(isa<ConstantSDNode>(N->getScale()) &&
9570 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9571 "Scale should be a constant power of 2");
9572
9573 CSEMap.InsertNode(N, IP);
9574 InsertNode(N);
9575 SDValue V(N, 0);
9576 NewSDValueDbgMsg(V, "Creating new node: ", this);
9577 return V;
9578 }
9579
getScatterVP(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)9580 SDValue SelectionDAG::getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl,
9581 ArrayRef<SDValue> Ops,
9582 MachineMemOperand *MMO,
9583 ISD::MemIndexType IndexType) {
9584 assert(Ops.size() == 7 && "Incompatible number of operands");
9585
9586 FoldingSetNodeID ID;
9587 AddNodeIDNode(ID, ISD::VP_SCATTER, VTs, Ops);
9588 ID.AddInteger(VT.getRawBits());
9589 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
9590 dl.getIROrder(), VTs, VT, MMO, IndexType));
9591 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9592 ID.AddInteger(MMO->getFlags());
9593 void *IP = nullptr;
9594 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9595 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
9596 return SDValue(E, 0);
9597 }
9598 auto *N = newSDNode<VPScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
9599 VT, MMO, IndexType);
9600 createOperands(N, Ops);
9601
9602 assert(N->getMask().getValueType().getVectorElementCount() ==
9603 N->getValue().getValueType().getVectorElementCount() &&
9604 "Vector width mismatch between mask and data");
9605 assert(
9606 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9607 N->getValue().getValueType().getVectorElementCount().isScalable() &&
9608 "Scalable flags of index and data do not match");
9609 assert(ElementCount::isKnownGE(
9610 N->getIndex().getValueType().getVectorElementCount(),
9611 N->getValue().getValueType().getVectorElementCount()) &&
9612 "Vector width mismatch between index and data");
9613 assert(isa<ConstantSDNode>(N->getScale()) &&
9614 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9615 "Scale should be a constant power of 2");
9616
9617 CSEMap.InsertNode(N, IP);
9618 InsertNode(N);
9619 SDValue V(N, 0);
9620 NewSDValueDbgMsg(V, "Creating new node: ", this);
9621 return V;
9622 }
9623
getMaskedLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Base,SDValue Offset,SDValue Mask,SDValue PassThru,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,ISD::LoadExtType ExtTy,bool isExpanding)9624 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
9625 SDValue Base, SDValue Offset, SDValue Mask,
9626 SDValue PassThru, EVT MemVT,
9627 MachineMemOperand *MMO,
9628 ISD::MemIndexedMode AM,
9629 ISD::LoadExtType ExtTy, bool isExpanding) {
9630 bool Indexed = AM != ISD::UNINDEXED;
9631 assert((Indexed || Offset.isUndef()) &&
9632 "Unindexed masked load with an offset!");
9633 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
9634 : getVTList(VT, MVT::Other);
9635 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
9636 FoldingSetNodeID ID;
9637 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
9638 ID.AddInteger(MemVT.getRawBits());
9639 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
9640 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
9641 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9642 ID.AddInteger(MMO->getFlags());
9643 void *IP = nullptr;
9644 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9645 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
9646 return SDValue(E, 0);
9647 }
9648 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
9649 AM, ExtTy, isExpanding, MemVT, MMO);
9650 createOperands(N, Ops);
9651
9652 CSEMap.InsertNode(N, IP);
9653 InsertNode(N);
9654 SDValue V(N, 0);
9655 NewSDValueDbgMsg(V, "Creating new node: ", this);
9656 return V;
9657 }
9658
getIndexedMaskedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)9659 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
9660 SDValue Base, SDValue Offset,
9661 ISD::MemIndexedMode AM) {
9662 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
9663 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
9664 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
9665 Offset, LD->getMask(), LD->getPassThru(),
9666 LD->getMemoryVT(), LD->getMemOperand(), AM,
9667 LD->getExtensionType(), LD->isExpandingLoad());
9668 }
9669
getMaskedStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Base,SDValue Offset,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)9670 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
9671 SDValue Val, SDValue Base, SDValue Offset,
9672 SDValue Mask, EVT MemVT,
9673 MachineMemOperand *MMO,
9674 ISD::MemIndexedMode AM, bool IsTruncating,
9675 bool IsCompressing) {
9676 assert(Chain.getValueType() == MVT::Other &&
9677 "Invalid chain type");
9678 bool Indexed = AM != ISD::UNINDEXED;
9679 assert((Indexed || Offset.isUndef()) &&
9680 "Unindexed masked store with an offset!");
9681 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
9682 : getVTList(MVT::Other);
9683 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
9684 FoldingSetNodeID ID;
9685 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
9686 ID.AddInteger(MemVT.getRawBits());
9687 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
9688 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9689 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9690 ID.AddInteger(MMO->getFlags());
9691 void *IP = nullptr;
9692 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9693 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
9694 return SDValue(E, 0);
9695 }
9696 auto *N =
9697 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
9698 IsTruncating, IsCompressing, MemVT, MMO);
9699 createOperands(N, Ops);
9700
9701 CSEMap.InsertNode(N, IP);
9702 InsertNode(N);
9703 SDValue V(N, 0);
9704 NewSDValueDbgMsg(V, "Creating new node: ", this);
9705 return V;
9706 }
9707
getIndexedMaskedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)9708 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
9709 SDValue Base, SDValue Offset,
9710 ISD::MemIndexedMode AM) {
9711 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
9712 assert(ST->getOffset().isUndef() &&
9713 "Masked store is already a indexed store!");
9714 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
9715 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
9716 AM, ST->isTruncatingStore(), ST->isCompressingStore());
9717 }
9718
getMaskedGather(SDVTList VTs,EVT MemVT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,ISD::LoadExtType ExtTy)9719 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl,
9720 ArrayRef<SDValue> Ops,
9721 MachineMemOperand *MMO,
9722 ISD::MemIndexType IndexType,
9723 ISD::LoadExtType ExtTy) {
9724 assert(Ops.size() == 6 && "Incompatible number of operands");
9725
9726 FoldingSetNodeID ID;
9727 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
9728 ID.AddInteger(MemVT.getRawBits());
9729 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
9730 dl.getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
9731 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9732 ID.AddInteger(MMO->getFlags());
9733 void *IP = nullptr;
9734 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9735 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
9736 return SDValue(E, 0);
9737 }
9738
9739 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
9740 VTs, MemVT, MMO, IndexType, ExtTy);
9741 createOperands(N, Ops);
9742
9743 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
9744 "Incompatible type of the PassThru value in MaskedGatherSDNode");
9745 assert(N->getMask().getValueType().getVectorElementCount() ==
9746 N->getValueType(0).getVectorElementCount() &&
9747 "Vector width mismatch between mask and data");
9748 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9749 N->getValueType(0).getVectorElementCount().isScalable() &&
9750 "Scalable flags of index and data do not match");
9751 assert(ElementCount::isKnownGE(
9752 N->getIndex().getValueType().getVectorElementCount(),
9753 N->getValueType(0).getVectorElementCount()) &&
9754 "Vector width mismatch between index and data");
9755 assert(isa<ConstantSDNode>(N->getScale()) &&
9756 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9757 "Scale should be a constant power of 2");
9758
9759 CSEMap.InsertNode(N, IP);
9760 InsertNode(N);
9761 SDValue V(N, 0);
9762 NewSDValueDbgMsg(V, "Creating new node: ", this);
9763 return V;
9764 }
9765
getMaskedScatter(SDVTList VTs,EVT MemVT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,bool IsTrunc)9766 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl,
9767 ArrayRef<SDValue> Ops,
9768 MachineMemOperand *MMO,
9769 ISD::MemIndexType IndexType,
9770 bool IsTrunc) {
9771 assert(Ops.size() == 6 && "Incompatible number of operands");
9772
9773 FoldingSetNodeID ID;
9774 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
9775 ID.AddInteger(MemVT.getRawBits());
9776 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
9777 dl.getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
9778 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9779 ID.AddInteger(MMO->getFlags());
9780 void *IP = nullptr;
9781 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9782 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
9783 return SDValue(E, 0);
9784 }
9785
9786 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
9787 VTs, MemVT, MMO, IndexType, IsTrunc);
9788 createOperands(N, Ops);
9789
9790 assert(N->getMask().getValueType().getVectorElementCount() ==
9791 N->getValue().getValueType().getVectorElementCount() &&
9792 "Vector width mismatch between mask and data");
9793 assert(
9794 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9795 N->getValue().getValueType().getVectorElementCount().isScalable() &&
9796 "Scalable flags of index and data do not match");
9797 assert(ElementCount::isKnownGE(
9798 N->getIndex().getValueType().getVectorElementCount(),
9799 N->getValue().getValueType().getVectorElementCount()) &&
9800 "Vector width mismatch between index and data");
9801 assert(isa<ConstantSDNode>(N->getScale()) &&
9802 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9803 "Scale should be a constant power of 2");
9804
9805 CSEMap.InsertNode(N, IP);
9806 InsertNode(N);
9807 SDValue V(N, 0);
9808 NewSDValueDbgMsg(V, "Creating new node: ", this);
9809 return V;
9810 }
9811
getMaskedHistogram(SDVTList VTs,EVT MemVT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)9812 SDValue SelectionDAG::getMaskedHistogram(SDVTList VTs, EVT MemVT,
9813 const SDLoc &dl, ArrayRef<SDValue> Ops,
9814 MachineMemOperand *MMO,
9815 ISD::MemIndexType IndexType) {
9816 assert(Ops.size() == 7 && "Incompatible number of operands");
9817
9818 FoldingSetNodeID ID;
9819 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VECTOR_HISTOGRAM, VTs, Ops);
9820 ID.AddInteger(MemVT.getRawBits());
9821 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
9822 dl.getIROrder(), VTs, MemVT, MMO, IndexType));
9823 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9824 ID.AddInteger(MMO->getFlags());
9825 void *IP = nullptr;
9826 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
9827 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
9828 return SDValue(E, 0);
9829 }
9830
9831 auto *N = newSDNode<MaskedHistogramSDNode>(dl.getIROrder(), dl.getDebugLoc(),
9832 VTs, MemVT, MMO, IndexType);
9833 createOperands(N, Ops);
9834
9835 assert(N->getMask().getValueType().getVectorElementCount() ==
9836 N->getIndex().getValueType().getVectorElementCount() &&
9837 "Vector width mismatch between mask and data");
9838 assert(isa<ConstantSDNode>(N->getScale()) &&
9839 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9840 "Scale should be a constant power of 2");
9841 assert(N->getInc().getValueType().isInteger() && "Non integer update value");
9842
9843 CSEMap.InsertNode(N, IP);
9844 InsertNode(N);
9845 SDValue V(N, 0);
9846 NewSDValueDbgMsg(V, "Creating new node: ", this);
9847 return V;
9848 }
9849
getGetFPEnv(SDValue Chain,const SDLoc & dl,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)9850 SDValue SelectionDAG::getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr,
9851 EVT MemVT, MachineMemOperand *MMO) {
9852 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9853 SDVTList VTs = getVTList(MVT::Other);
9854 SDValue Ops[] = {Chain, Ptr};
9855 FoldingSetNodeID ID;
9856 AddNodeIDNode(ID, ISD::GET_FPENV_MEM, VTs, Ops);
9857 ID.AddInteger(MemVT.getRawBits());
9858 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
9859 ISD::GET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO));
9860 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9861 ID.AddInteger(MMO->getFlags());
9862 void *IP = nullptr;
9863 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
9864 return SDValue(E, 0);
9865
9866 auto *N = newSDNode<FPStateAccessSDNode>(ISD::GET_FPENV_MEM, dl.getIROrder(),
9867 dl.getDebugLoc(), VTs, MemVT, MMO);
9868 createOperands(N, Ops);
9869
9870 CSEMap.InsertNode(N, IP);
9871 InsertNode(N);
9872 SDValue V(N, 0);
9873 NewSDValueDbgMsg(V, "Creating new node: ", this);
9874 return V;
9875 }
9876
getSetFPEnv(SDValue Chain,const SDLoc & dl,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)9877 SDValue SelectionDAG::getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr,
9878 EVT MemVT, MachineMemOperand *MMO) {
9879 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
9880 SDVTList VTs = getVTList(MVT::Other);
9881 SDValue Ops[] = {Chain, Ptr};
9882 FoldingSetNodeID ID;
9883 AddNodeIDNode(ID, ISD::SET_FPENV_MEM, VTs, Ops);
9884 ID.AddInteger(MemVT.getRawBits());
9885 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
9886 ISD::SET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO));
9887 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
9888 ID.AddInteger(MMO->getFlags());
9889 void *IP = nullptr;
9890 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
9891 return SDValue(E, 0);
9892
9893 auto *N = newSDNode<FPStateAccessSDNode>(ISD::SET_FPENV_MEM, dl.getIROrder(),
9894 dl.getDebugLoc(), VTs, MemVT, MMO);
9895 createOperands(N, Ops);
9896
9897 CSEMap.InsertNode(N, IP);
9898 InsertNode(N);
9899 SDValue V(N, 0);
9900 NewSDValueDbgMsg(V, "Creating new node: ", this);
9901 return V;
9902 }
9903
simplifySelect(SDValue Cond,SDValue T,SDValue F)9904 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
9905 // select undef, T, F --> T (if T is a constant), otherwise F
9906 // select, ?, undef, F --> F
9907 // select, ?, T, undef --> T
9908 if (Cond.isUndef())
9909 return isConstantValueOfAnyType(T) ? T : F;
9910 if (T.isUndef())
9911 return F;
9912 if (F.isUndef())
9913 return T;
9914
9915 // select true, T, F --> T
9916 // select false, T, F --> F
9917 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
9918 return CondC->isZero() ? F : T;
9919
9920 // TODO: This should simplify VSELECT with non-zero constant condition using
9921 // something like this (but check boolean contents to be complete?):
9922 if (ConstantSDNode *CondC = isConstOrConstSplat(Cond, /*AllowUndefs*/ false,
9923 /*AllowTruncation*/ true))
9924 if (CondC->isZero())
9925 return F;
9926
9927 // select ?, T, T --> T
9928 if (T == F)
9929 return T;
9930
9931 return SDValue();
9932 }
9933
simplifyShift(SDValue X,SDValue Y)9934 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
9935 // shift undef, Y --> 0 (can always assume that the undef value is 0)
9936 if (X.isUndef())
9937 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
9938 // shift X, undef --> undef (because it may shift by the bitwidth)
9939 if (Y.isUndef())
9940 return getUNDEF(X.getValueType());
9941
9942 // shift 0, Y --> 0
9943 // shift X, 0 --> X
9944 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
9945 return X;
9946
9947 // shift X, C >= bitwidth(X) --> undef
9948 // All vector elements must be too big (or undef) to avoid partial undefs.
9949 auto isShiftTooBig = [X](ConstantSDNode *Val) {
9950 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
9951 };
9952 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
9953 return getUNDEF(X.getValueType());
9954
9955 // shift i1/vXi1 X, Y --> X (any non-zero shift amount is undefined).
9956 if (X.getValueType().getScalarType() == MVT::i1)
9957 return X;
9958
9959 return SDValue();
9960 }
9961
simplifyFPBinop(unsigned Opcode,SDValue X,SDValue Y,SDNodeFlags Flags)9962 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
9963 SDNodeFlags Flags) {
9964 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
9965 // (an undef operand can be chosen to be Nan/Inf), then the result of this
9966 // operation is poison. That result can be relaxed to undef.
9967 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
9968 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
9969 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
9970 (YC && YC->getValueAPF().isNaN());
9971 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
9972 (YC && YC->getValueAPF().isInfinity());
9973
9974 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
9975 return getUNDEF(X.getValueType());
9976
9977 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
9978 return getUNDEF(X.getValueType());
9979
9980 if (!YC)
9981 return SDValue();
9982
9983 // X + -0.0 --> X
9984 if (Opcode == ISD::FADD)
9985 if (YC->getValueAPF().isNegZero())
9986 return X;
9987
9988 // X - +0.0 --> X
9989 if (Opcode == ISD::FSUB)
9990 if (YC->getValueAPF().isPosZero())
9991 return X;
9992
9993 // X * 1.0 --> X
9994 // X / 1.0 --> X
9995 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
9996 if (YC->getValueAPF().isExactlyValue(1.0))
9997 return X;
9998
9999 // X * 0.0 --> 0.0
10000 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
10001 if (YC->getValueAPF().isZero())
10002 return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
10003
10004 return SDValue();
10005 }
10006
getVAArg(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)10007 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
10008 SDValue Ptr, SDValue SV, unsigned Align) {
10009 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
10010 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
10011 }
10012
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDUse> Ops)10013 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
10014 ArrayRef<SDUse> Ops) {
10015 switch (Ops.size()) {
10016 case 0: return getNode(Opcode, DL, VT);
10017 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
10018 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
10019 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
10020 default: break;
10021 }
10022
10023 // Copy from an SDUse array into an SDValue array for use with
10024 // the regular getNode logic.
10025 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
10026 return getNode(Opcode, DL, VT, NewOps);
10027 }
10028
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)10029 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
10030 ArrayRef<SDValue> Ops) {
10031 SDNodeFlags Flags;
10032 if (Inserter)
10033 Flags = Inserter->getFlags();
10034 return getNode(Opcode, DL, VT, Ops, Flags);
10035 }
10036
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)10037 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
10038 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
10039 unsigned NumOps = Ops.size();
10040 switch (NumOps) {
10041 case 0: return getNode(Opcode, DL, VT);
10042 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
10043 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
10044 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
10045 default: break;
10046 }
10047
10048 #ifndef NDEBUG
10049 for (const auto &Op : Ops)
10050 assert(Op.getOpcode() != ISD::DELETED_NODE &&
10051 "Operand is DELETED_NODE!");
10052 #endif
10053
10054 switch (Opcode) {
10055 default: break;
10056 case ISD::BUILD_VECTOR:
10057 // Attempt to simplify BUILD_VECTOR.
10058 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
10059 return V;
10060 break;
10061 case ISD::CONCAT_VECTORS:
10062 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
10063 return V;
10064 break;
10065 case ISD::SELECT_CC:
10066 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
10067 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
10068 "LHS and RHS of condition must have same type!");
10069 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
10070 "True and False arms of SelectCC must have same type!");
10071 assert(Ops[2].getValueType() == VT &&
10072 "select_cc node must be of same type as true and false value!");
10073 assert((!Ops[0].getValueType().isVector() ||
10074 Ops[0].getValueType().getVectorElementCount() ==
10075 VT.getVectorElementCount()) &&
10076 "Expected select_cc with vector result to have the same sized "
10077 "comparison type!");
10078 break;
10079 case ISD::BR_CC:
10080 assert(NumOps == 5 && "BR_CC takes 5 operands!");
10081 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
10082 "LHS/RHS of comparison should match types!");
10083 break;
10084 case ISD::VP_ADD:
10085 case ISD::VP_SUB:
10086 // If it is VP_ADD/VP_SUB mask operation then turn it to VP_XOR
10087 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
10088 Opcode = ISD::VP_XOR;
10089 break;
10090 case ISD::VP_MUL:
10091 // If it is VP_MUL mask operation then turn it to VP_AND
10092 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
10093 Opcode = ISD::VP_AND;
10094 break;
10095 case ISD::VP_REDUCE_MUL:
10096 // If it is VP_REDUCE_MUL mask operation then turn it to VP_REDUCE_AND
10097 if (VT == MVT::i1)
10098 Opcode = ISD::VP_REDUCE_AND;
10099 break;
10100 case ISD::VP_REDUCE_ADD:
10101 // If it is VP_REDUCE_ADD mask operation then turn it to VP_REDUCE_XOR
10102 if (VT == MVT::i1)
10103 Opcode = ISD::VP_REDUCE_XOR;
10104 break;
10105 case ISD::VP_REDUCE_SMAX:
10106 case ISD::VP_REDUCE_UMIN:
10107 // If it is VP_REDUCE_SMAX/VP_REDUCE_UMIN mask operation then turn it to
10108 // VP_REDUCE_AND.
10109 if (VT == MVT::i1)
10110 Opcode = ISD::VP_REDUCE_AND;
10111 break;
10112 case ISD::VP_REDUCE_SMIN:
10113 case ISD::VP_REDUCE_UMAX:
10114 // If it is VP_REDUCE_SMIN/VP_REDUCE_UMAX mask operation then turn it to
10115 // VP_REDUCE_OR.
10116 if (VT == MVT::i1)
10117 Opcode = ISD::VP_REDUCE_OR;
10118 break;
10119 }
10120
10121 // Memoize nodes.
10122 SDNode *N;
10123 SDVTList VTs = getVTList(VT);
10124
10125 if (VT != MVT::Glue) {
10126 FoldingSetNodeID ID;
10127 AddNodeIDNode(ID, Opcode, VTs, Ops);
10128 void *IP = nullptr;
10129
10130 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
10131 return SDValue(E, 0);
10132
10133 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
10134 createOperands(N, Ops);
10135
10136 CSEMap.InsertNode(N, IP);
10137 } else {
10138 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
10139 createOperands(N, Ops);
10140 }
10141
10142 N->setFlags(Flags);
10143 InsertNode(N);
10144 SDValue V(N, 0);
10145 NewSDValueDbgMsg(V, "Creating new node: ", this);
10146 return V;
10147 }
10148
getNode(unsigned Opcode,const SDLoc & DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)10149 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
10150 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
10151 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
10152 }
10153
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops)10154 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
10155 ArrayRef<SDValue> Ops) {
10156 SDNodeFlags Flags;
10157 if (Inserter)
10158 Flags = Inserter->getFlags();
10159 return getNode(Opcode, DL, VTList, Ops, Flags);
10160 }
10161
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)10162 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
10163 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
10164 if (VTList.NumVTs == 1)
10165 return getNode(Opcode, DL, VTList.VTs[0], Ops, Flags);
10166
10167 #ifndef NDEBUG
10168 for (const auto &Op : Ops)
10169 assert(Op.getOpcode() != ISD::DELETED_NODE &&
10170 "Operand is DELETED_NODE!");
10171 #endif
10172
10173 switch (Opcode) {
10174 case ISD::SADDO:
10175 case ISD::UADDO:
10176 case ISD::SSUBO:
10177 case ISD::USUBO: {
10178 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
10179 "Invalid add/sub overflow op!");
10180 assert(VTList.VTs[0].isInteger() && VTList.VTs[1].isInteger() &&
10181 Ops[0].getValueType() == Ops[1].getValueType() &&
10182 Ops[0].getValueType() == VTList.VTs[0] &&
10183 "Binary operator types must match!");
10184 SDValue N1 = Ops[0], N2 = Ops[1];
10185 canonicalizeCommutativeBinop(Opcode, N1, N2);
10186
10187 // (X +- 0) -> X with zero-overflow.
10188 ConstantSDNode *N2CV = isConstOrConstSplat(N2, /*AllowUndefs*/ false,
10189 /*AllowTruncation*/ true);
10190 if (N2CV && N2CV->isZero()) {
10191 SDValue ZeroOverFlow = getConstant(0, DL, VTList.VTs[1]);
10192 return getNode(ISD::MERGE_VALUES, DL, VTList, {N1, ZeroOverFlow}, Flags);
10193 }
10194
10195 if (VTList.VTs[0].isVector() &&
10196 VTList.VTs[0].getVectorElementType() == MVT::i1 &&
10197 VTList.VTs[1].getVectorElementType() == MVT::i1) {
10198 SDValue F1 = getFreeze(N1);
10199 SDValue F2 = getFreeze(N2);
10200 // {vXi1,vXi1} (u/s)addo(vXi1 x, vXi1y) -> {xor(x,y),and(x,y)}
10201 if (Opcode == ISD::UADDO || Opcode == ISD::SADDO)
10202 return getNode(ISD::MERGE_VALUES, DL, VTList,
10203 {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2),
10204 getNode(ISD::AND, DL, VTList.VTs[1], F1, F2)},
10205 Flags);
10206 // {vXi1,vXi1} (u/s)subo(vXi1 x, vXi1y) -> {xor(x,y),and(~x,y)}
10207 if (Opcode == ISD::USUBO || Opcode == ISD::SSUBO) {
10208 SDValue NotF1 = getNOT(DL, F1, VTList.VTs[0]);
10209 return getNode(ISD::MERGE_VALUES, DL, VTList,
10210 {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2),
10211 getNode(ISD::AND, DL, VTList.VTs[1], NotF1, F2)},
10212 Flags);
10213 }
10214 }
10215 break;
10216 }
10217 case ISD::SADDO_CARRY:
10218 case ISD::UADDO_CARRY:
10219 case ISD::SSUBO_CARRY:
10220 case ISD::USUBO_CARRY:
10221 assert(VTList.NumVTs == 2 && Ops.size() == 3 &&
10222 "Invalid add/sub overflow op!");
10223 assert(VTList.VTs[0].isInteger() && VTList.VTs[1].isInteger() &&
10224 Ops[0].getValueType() == Ops[1].getValueType() &&
10225 Ops[0].getValueType() == VTList.VTs[0] &&
10226 Ops[2].getValueType() == VTList.VTs[1] &&
10227 "Binary operator types must match!");
10228 break;
10229 case ISD::SMUL_LOHI:
10230 case ISD::UMUL_LOHI: {
10231 assert(VTList.NumVTs == 2 && Ops.size() == 2 && "Invalid mul lo/hi op!");
10232 assert(VTList.VTs[0].isInteger() && VTList.VTs[0] == VTList.VTs[1] &&
10233 VTList.VTs[0] == Ops[0].getValueType() &&
10234 VTList.VTs[0] == Ops[1].getValueType() &&
10235 "Binary operator types must match!");
10236 // Constant fold.
10237 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(Ops[0]);
10238 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ops[1]);
10239 if (LHS && RHS) {
10240 unsigned Width = VTList.VTs[0].getScalarSizeInBits();
10241 unsigned OutWidth = Width * 2;
10242 APInt Val = LHS->getAPIntValue();
10243 APInt Mul = RHS->getAPIntValue();
10244 if (Opcode == ISD::SMUL_LOHI) {
10245 Val = Val.sext(OutWidth);
10246 Mul = Mul.sext(OutWidth);
10247 } else {
10248 Val = Val.zext(OutWidth);
10249 Mul = Mul.zext(OutWidth);
10250 }
10251 Val *= Mul;
10252
10253 SDValue Hi =
10254 getConstant(Val.extractBits(Width, Width), DL, VTList.VTs[0]);
10255 SDValue Lo = getConstant(Val.trunc(Width), DL, VTList.VTs[0]);
10256 return getNode(ISD::MERGE_VALUES, DL, VTList, {Lo, Hi}, Flags);
10257 }
10258 break;
10259 }
10260 case ISD::FFREXP: {
10261 assert(VTList.NumVTs == 2 && Ops.size() == 1 && "Invalid ffrexp op!");
10262 assert(VTList.VTs[0].isFloatingPoint() && VTList.VTs[1].isInteger() &&
10263 VTList.VTs[0] == Ops[0].getValueType() && "frexp type mismatch");
10264
10265 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Ops[0])) {
10266 int FrexpExp;
10267 APFloat FrexpMant =
10268 frexp(C->getValueAPF(), FrexpExp, APFloat::rmNearestTiesToEven);
10269 SDValue Result0 = getConstantFP(FrexpMant, DL, VTList.VTs[0]);
10270 SDValue Result1 =
10271 getConstant(FrexpMant.isFinite() ? FrexpExp : 0, DL, VTList.VTs[1]);
10272 return getNode(ISD::MERGE_VALUES, DL, VTList, {Result0, Result1}, Flags);
10273 }
10274
10275 break;
10276 }
10277 case ISD::STRICT_FP_EXTEND:
10278 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
10279 "Invalid STRICT_FP_EXTEND!");
10280 assert(VTList.VTs[0].isFloatingPoint() &&
10281 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
10282 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
10283 "STRICT_FP_EXTEND result type should be vector iff the operand "
10284 "type is vector!");
10285 assert((!VTList.VTs[0].isVector() ||
10286 VTList.VTs[0].getVectorElementCount() ==
10287 Ops[1].getValueType().getVectorElementCount()) &&
10288 "Vector element count mismatch!");
10289 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
10290 "Invalid fpext node, dst <= src!");
10291 break;
10292 case ISD::STRICT_FP_ROUND:
10293 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
10294 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
10295 "STRICT_FP_ROUND result type should be vector iff the operand "
10296 "type is vector!");
10297 assert((!VTList.VTs[0].isVector() ||
10298 VTList.VTs[0].getVectorElementCount() ==
10299 Ops[1].getValueType().getVectorElementCount()) &&
10300 "Vector element count mismatch!");
10301 assert(VTList.VTs[0].isFloatingPoint() &&
10302 Ops[1].getValueType().isFloatingPoint() &&
10303 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
10304 isa<ConstantSDNode>(Ops[2]) &&
10305 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
10306 "Invalid STRICT_FP_ROUND!");
10307 break;
10308 #if 0
10309 // FIXME: figure out how to safely handle things like
10310 // int foo(int x) { return 1 << (x & 255); }
10311 // int bar() { return foo(256); }
10312 case ISD::SRA_PARTS:
10313 case ISD::SRL_PARTS:
10314 case ISD::SHL_PARTS:
10315 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
10316 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
10317 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
10318 else if (N3.getOpcode() == ISD::AND)
10319 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
10320 // If the and is only masking out bits that cannot effect the shift,
10321 // eliminate the and.
10322 unsigned NumBits = VT.getScalarSizeInBits()*2;
10323 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
10324 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
10325 }
10326 break;
10327 #endif
10328 }
10329
10330 // Memoize the node unless it returns a glue result.
10331 SDNode *N;
10332 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
10333 FoldingSetNodeID ID;
10334 AddNodeIDNode(ID, Opcode, VTList, Ops);
10335 void *IP = nullptr;
10336 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
10337 return SDValue(E, 0);
10338
10339 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
10340 createOperands(N, Ops);
10341 CSEMap.InsertNode(N, IP);
10342 } else {
10343 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
10344 createOperands(N, Ops);
10345 }
10346
10347 N->setFlags(Flags);
10348 InsertNode(N);
10349 SDValue V(N, 0);
10350 NewSDValueDbgMsg(V, "Creating new node: ", this);
10351 return V;
10352 }
10353
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList)10354 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
10355 SDVTList VTList) {
10356 return getNode(Opcode, DL, VTList, std::nullopt);
10357 }
10358
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1)10359 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
10360 SDValue N1) {
10361 SDValue Ops[] = { N1 };
10362 return getNode(Opcode, DL, VTList, Ops);
10363 }
10364
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2)10365 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
10366 SDValue N1, SDValue N2) {
10367 SDValue Ops[] = { N1, N2 };
10368 return getNode(Opcode, DL, VTList, Ops);
10369 }
10370
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)10371 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
10372 SDValue N1, SDValue N2, SDValue N3) {
10373 SDValue Ops[] = { N1, N2, N3 };
10374 return getNode(Opcode, DL, VTList, Ops);
10375 }
10376
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)10377 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
10378 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
10379 SDValue Ops[] = { N1, N2, N3, N4 };
10380 return getNode(Opcode, DL, VTList, Ops);
10381 }
10382
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)10383 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
10384 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
10385 SDValue N5) {
10386 SDValue Ops[] = { N1, N2, N3, N4, N5 };
10387 return getNode(Opcode, DL, VTList, Ops);
10388 }
10389
getVTList(EVT VT)10390 SDVTList SelectionDAG::getVTList(EVT VT) {
10391 return makeVTList(SDNode::getValueTypeList(VT), 1);
10392 }
10393
getVTList(EVT VT1,EVT VT2)10394 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
10395 FoldingSetNodeID ID;
10396 ID.AddInteger(2U);
10397 ID.AddInteger(VT1.getRawBits());
10398 ID.AddInteger(VT2.getRawBits());
10399
10400 void *IP = nullptr;
10401 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
10402 if (!Result) {
10403 EVT *Array = Allocator.Allocate<EVT>(2);
10404 Array[0] = VT1;
10405 Array[1] = VT2;
10406 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
10407 VTListMap.InsertNode(Result, IP);
10408 }
10409 return Result->getSDVTList();
10410 }
10411
getVTList(EVT VT1,EVT VT2,EVT VT3)10412 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
10413 FoldingSetNodeID ID;
10414 ID.AddInteger(3U);
10415 ID.AddInteger(VT1.getRawBits());
10416 ID.AddInteger(VT2.getRawBits());
10417 ID.AddInteger(VT3.getRawBits());
10418
10419 void *IP = nullptr;
10420 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
10421 if (!Result) {
10422 EVT *Array = Allocator.Allocate<EVT>(3);
10423 Array[0] = VT1;
10424 Array[1] = VT2;
10425 Array[2] = VT3;
10426 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
10427 VTListMap.InsertNode(Result, IP);
10428 }
10429 return Result->getSDVTList();
10430 }
10431
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)10432 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
10433 FoldingSetNodeID ID;
10434 ID.AddInteger(4U);
10435 ID.AddInteger(VT1.getRawBits());
10436 ID.AddInteger(VT2.getRawBits());
10437 ID.AddInteger(VT3.getRawBits());
10438 ID.AddInteger(VT4.getRawBits());
10439
10440 void *IP = nullptr;
10441 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
10442 if (!Result) {
10443 EVT *Array = Allocator.Allocate<EVT>(4);
10444 Array[0] = VT1;
10445 Array[1] = VT2;
10446 Array[2] = VT3;
10447 Array[3] = VT4;
10448 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
10449 VTListMap.InsertNode(Result, IP);
10450 }
10451 return Result->getSDVTList();
10452 }
10453
getVTList(ArrayRef<EVT> VTs)10454 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
10455 unsigned NumVTs = VTs.size();
10456 FoldingSetNodeID ID;
10457 ID.AddInteger(NumVTs);
10458 for (unsigned index = 0; index < NumVTs; index++) {
10459 ID.AddInteger(VTs[index].getRawBits());
10460 }
10461
10462 void *IP = nullptr;
10463 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
10464 if (!Result) {
10465 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
10466 llvm::copy(VTs, Array);
10467 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
10468 VTListMap.InsertNode(Result, IP);
10469 }
10470 return Result->getSDVTList();
10471 }
10472
10473
10474 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
10475 /// specified operands. If the resultant node already exists in the DAG,
10476 /// this does not modify the specified node, instead it returns the node that
10477 /// already exists. If the resultant node does not exist in the DAG, the
10478 /// input node is returned. As a degenerate case, if you specify the same
10479 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)10480 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
10481 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
10482
10483 // Check to see if there is no change.
10484 if (Op == N->getOperand(0)) return N;
10485
10486 // See if the modified node already exists.
10487 void *InsertPos = nullptr;
10488 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
10489 return Existing;
10490
10491 // Nope it doesn't. Remove the node from its current place in the maps.
10492 if (InsertPos)
10493 if (!RemoveNodeFromCSEMaps(N))
10494 InsertPos = nullptr;
10495
10496 // Now we update the operands.
10497 N->OperandList[0].set(Op);
10498
10499 updateDivergence(N);
10500 // If this gets put into a CSE map, add it.
10501 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
10502 return N;
10503 }
10504
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)10505 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
10506 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
10507
10508 // Check to see if there is no change.
10509 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
10510 return N; // No operands changed, just return the input node.
10511
10512 // See if the modified node already exists.
10513 void *InsertPos = nullptr;
10514 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
10515 return Existing;
10516
10517 // Nope it doesn't. Remove the node from its current place in the maps.
10518 if (InsertPos)
10519 if (!RemoveNodeFromCSEMaps(N))
10520 InsertPos = nullptr;
10521
10522 // Now we update the operands.
10523 if (N->OperandList[0] != Op1)
10524 N->OperandList[0].set(Op1);
10525 if (N->OperandList[1] != Op2)
10526 N->OperandList[1].set(Op2);
10527
10528 updateDivergence(N);
10529 // If this gets put into a CSE map, add it.
10530 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
10531 return N;
10532 }
10533
10534 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)10535 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
10536 SDValue Ops[] = { Op1, Op2, Op3 };
10537 return UpdateNodeOperands(N, Ops);
10538 }
10539
10540 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)10541 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
10542 SDValue Op3, SDValue Op4) {
10543 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
10544 return UpdateNodeOperands(N, Ops);
10545 }
10546
10547 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)10548 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
10549 SDValue Op3, SDValue Op4, SDValue Op5) {
10550 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
10551 return UpdateNodeOperands(N, Ops);
10552 }
10553
10554 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)10555 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
10556 unsigned NumOps = Ops.size();
10557 assert(N->getNumOperands() == NumOps &&
10558 "Update with wrong number of operands");
10559
10560 // If no operands changed just return the input node.
10561 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
10562 return N;
10563
10564 // See if the modified node already exists.
10565 void *InsertPos = nullptr;
10566 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
10567 return Existing;
10568
10569 // Nope it doesn't. Remove the node from its current place in the maps.
10570 if (InsertPos)
10571 if (!RemoveNodeFromCSEMaps(N))
10572 InsertPos = nullptr;
10573
10574 // Now we update the operands.
10575 for (unsigned i = 0; i != NumOps; ++i)
10576 if (N->OperandList[i] != Ops[i])
10577 N->OperandList[i].set(Ops[i]);
10578
10579 updateDivergence(N);
10580 // If this gets put into a CSE map, add it.
10581 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
10582 return N;
10583 }
10584
10585 /// DropOperands - Release the operands and set this node to have
10586 /// zero operands.
DropOperands()10587 void SDNode::DropOperands() {
10588 // Unlike the code in MorphNodeTo that does this, we don't need to
10589 // watch for dead nodes here.
10590 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
10591 SDUse &Use = *I++;
10592 Use.set(SDValue());
10593 }
10594 }
10595
setNodeMemRefs(MachineSDNode * N,ArrayRef<MachineMemOperand * > NewMemRefs)10596 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
10597 ArrayRef<MachineMemOperand *> NewMemRefs) {
10598 if (NewMemRefs.empty()) {
10599 N->clearMemRefs();
10600 return;
10601 }
10602
10603 // Check if we can avoid allocating by storing a single reference directly.
10604 if (NewMemRefs.size() == 1) {
10605 N->MemRefs = NewMemRefs[0];
10606 N->NumMemRefs = 1;
10607 return;
10608 }
10609
10610 MachineMemOperand **MemRefsBuffer =
10611 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
10612 llvm::copy(NewMemRefs, MemRefsBuffer);
10613 N->MemRefs = MemRefsBuffer;
10614 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
10615 }
10616
10617 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
10618 /// machine opcode.
10619 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)10620 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10621 EVT VT) {
10622 SDVTList VTs = getVTList(VT);
10623 return SelectNodeTo(N, MachineOpc, VTs, std::nullopt);
10624 }
10625
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)10626 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10627 EVT VT, SDValue Op1) {
10628 SDVTList VTs = getVTList(VT);
10629 SDValue Ops[] = { Op1 };
10630 return SelectNodeTo(N, MachineOpc, VTs, Ops);
10631 }
10632
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)10633 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10634 EVT VT, SDValue Op1,
10635 SDValue Op2) {
10636 SDVTList VTs = getVTList(VT);
10637 SDValue Ops[] = { Op1, Op2 };
10638 return SelectNodeTo(N, MachineOpc, VTs, Ops);
10639 }
10640
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)10641 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10642 EVT VT, SDValue Op1,
10643 SDValue Op2, SDValue Op3) {
10644 SDVTList VTs = getVTList(VT);
10645 SDValue Ops[] = { Op1, Op2, Op3 };
10646 return SelectNodeTo(N, MachineOpc, VTs, Ops);
10647 }
10648
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)10649 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10650 EVT VT, ArrayRef<SDValue> Ops) {
10651 SDVTList VTs = getVTList(VT);
10652 return SelectNodeTo(N, MachineOpc, VTs, Ops);
10653 }
10654
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)10655 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10656 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
10657 SDVTList VTs = getVTList(VT1, VT2);
10658 return SelectNodeTo(N, MachineOpc, VTs, Ops);
10659 }
10660
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)10661 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10662 EVT VT1, EVT VT2) {
10663 SDVTList VTs = getVTList(VT1, VT2);
10664 return SelectNodeTo(N, MachineOpc, VTs, std::nullopt);
10665 }
10666
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)10667 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10668 EVT VT1, EVT VT2, EVT VT3,
10669 ArrayRef<SDValue> Ops) {
10670 SDVTList VTs = getVTList(VT1, VT2, VT3);
10671 return SelectNodeTo(N, MachineOpc, VTs, Ops);
10672 }
10673
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)10674 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10675 EVT VT1, EVT VT2,
10676 SDValue Op1, SDValue Op2) {
10677 SDVTList VTs = getVTList(VT1, VT2);
10678 SDValue Ops[] = { Op1, Op2 };
10679 return SelectNodeTo(N, MachineOpc, VTs, Ops);
10680 }
10681
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)10682 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
10683 SDVTList VTs,ArrayRef<SDValue> Ops) {
10684 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
10685 // Reset the NodeID to -1.
10686 New->setNodeId(-1);
10687 if (New != N) {
10688 ReplaceAllUsesWith(N, New);
10689 RemoveDeadNode(N);
10690 }
10691 return New;
10692 }
10693
10694 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
10695 /// the line number information on the merged node since it is not possible to
10696 /// preserve the information that operation is associated with multiple lines.
10697 /// This will make the debugger working better at -O0, were there is a higher
10698 /// probability having other instructions associated with that line.
10699 ///
10700 /// For IROrder, we keep the smaller of the two
UpdateSDLocOnMergeSDNode(SDNode * N,const SDLoc & OLoc)10701 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
10702 DebugLoc NLoc = N->getDebugLoc();
10703 if (NLoc && OptLevel == CodeGenOptLevel::None && OLoc.getDebugLoc() != NLoc) {
10704 N->setDebugLoc(DebugLoc());
10705 }
10706 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
10707 N->setIROrder(Order);
10708 return N;
10709 }
10710
10711 /// MorphNodeTo - This *mutates* the specified node to have the specified
10712 /// return type, opcode, and operands.
10713 ///
10714 /// Note that MorphNodeTo returns the resultant node. If there is already a
10715 /// node of the specified opcode and operands, it returns that node instead of
10716 /// the current one. Note that the SDLoc need not be the same.
10717 ///
10718 /// Using MorphNodeTo is faster than creating a new node and swapping it in
10719 /// with ReplaceAllUsesWith both because it often avoids allocating a new
10720 /// node, and because it doesn't require CSE recalculation for any of
10721 /// the node's users.
10722 ///
10723 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
10724 /// As a consequence it isn't appropriate to use from within the DAG combiner or
10725 /// the legalizer which maintain worklists that would need to be updated when
10726 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)10727 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
10728 SDVTList VTs, ArrayRef<SDValue> Ops) {
10729 // If an identical node already exists, use it.
10730 void *IP = nullptr;
10731 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
10732 FoldingSetNodeID ID;
10733 AddNodeIDNode(ID, Opc, VTs, Ops);
10734 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
10735 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
10736 }
10737
10738 if (!RemoveNodeFromCSEMaps(N))
10739 IP = nullptr;
10740
10741 // Start the morphing.
10742 N->NodeType = Opc;
10743 N->ValueList = VTs.VTs;
10744 N->NumValues = VTs.NumVTs;
10745
10746 // Clear the operands list, updating used nodes to remove this from their
10747 // use list. Keep track of any operands that become dead as a result.
10748 SmallPtrSet<SDNode*, 16> DeadNodeSet;
10749 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
10750 SDUse &Use = *I++;
10751 SDNode *Used = Use.getNode();
10752 Use.set(SDValue());
10753 if (Used->use_empty())
10754 DeadNodeSet.insert(Used);
10755 }
10756
10757 // For MachineNode, initialize the memory references information.
10758 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
10759 MN->clearMemRefs();
10760
10761 // Swap for an appropriately sized array from the recycler.
10762 removeOperands(N);
10763 createOperands(N, Ops);
10764
10765 // Delete any nodes that are still dead after adding the uses for the
10766 // new operands.
10767 if (!DeadNodeSet.empty()) {
10768 SmallVector<SDNode *, 16> DeadNodes;
10769 for (SDNode *N : DeadNodeSet)
10770 if (N->use_empty())
10771 DeadNodes.push_back(N);
10772 RemoveDeadNodes(DeadNodes);
10773 }
10774
10775 if (IP)
10776 CSEMap.InsertNode(N, IP); // Memoize the new node.
10777 return N;
10778 }
10779
mutateStrictFPToFP(SDNode * Node)10780 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
10781 unsigned OrigOpc = Node->getOpcode();
10782 unsigned NewOpc;
10783 switch (OrigOpc) {
10784 default:
10785 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
10786 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
10787 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
10788 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
10789 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
10790 #include "llvm/IR/ConstrainedOps.def"
10791 }
10792
10793 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
10794
10795 // We're taking this node out of the chain, so we need to re-link things.
10796 SDValue InputChain = Node->getOperand(0);
10797 SDValue OutputChain = SDValue(Node, 1);
10798 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
10799
10800 SmallVector<SDValue, 3> Ops;
10801 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
10802 Ops.push_back(Node->getOperand(i));
10803
10804 SDVTList VTs = getVTList(Node->getValueType(0));
10805 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
10806
10807 // MorphNodeTo can operate in two ways: if an existing node with the
10808 // specified operands exists, it can just return it. Otherwise, it
10809 // updates the node in place to have the requested operands.
10810 if (Res == Node) {
10811 // If we updated the node in place, reset the node ID. To the isel,
10812 // this should be just like a newly allocated machine node.
10813 Res->setNodeId(-1);
10814 } else {
10815 ReplaceAllUsesWith(Node, Res);
10816 RemoveDeadNode(Node);
10817 }
10818
10819 return Res;
10820 }
10821
10822 /// getMachineNode - These are used for target selectors to create a new node
10823 /// with specified return type(s), MachineInstr opcode, and operands.
10824 ///
10825 /// Note that getMachineNode returns the resultant node. If there is already a
10826 /// node of the specified opcode and operands, it returns that node instead of
10827 /// the current one.
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT)10828 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10829 EVT VT) {
10830 SDVTList VTs = getVTList(VT);
10831 return getMachineNode(Opcode, dl, VTs, std::nullopt);
10832 }
10833
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1)10834 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10835 EVT VT, SDValue Op1) {
10836 SDVTList VTs = getVTList(VT);
10837 SDValue Ops[] = { Op1 };
10838 return getMachineNode(Opcode, dl, VTs, Ops);
10839 }
10840
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2)10841 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10842 EVT VT, SDValue Op1, SDValue Op2) {
10843 SDVTList VTs = getVTList(VT);
10844 SDValue Ops[] = { Op1, Op2 };
10845 return getMachineNode(Opcode, dl, VTs, Ops);
10846 }
10847
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)10848 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10849 EVT VT, SDValue Op1, SDValue Op2,
10850 SDValue Op3) {
10851 SDVTList VTs = getVTList(VT);
10852 SDValue Ops[] = { Op1, Op2, Op3 };
10853 return getMachineNode(Opcode, dl, VTs, Ops);
10854 }
10855
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,ArrayRef<SDValue> Ops)10856 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10857 EVT VT, ArrayRef<SDValue> Ops) {
10858 SDVTList VTs = getVTList(VT);
10859 return getMachineNode(Opcode, dl, VTs, Ops);
10860 }
10861
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)10862 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10863 EVT VT1, EVT VT2, SDValue Op1,
10864 SDValue Op2) {
10865 SDVTList VTs = getVTList(VT1, VT2);
10866 SDValue Ops[] = { Op1, Op2 };
10867 return getMachineNode(Opcode, dl, VTs, Ops);
10868 }
10869
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)10870 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10871 EVT VT1, EVT VT2, SDValue Op1,
10872 SDValue Op2, SDValue Op3) {
10873 SDVTList VTs = getVTList(VT1, VT2);
10874 SDValue Ops[] = { Op1, Op2, Op3 };
10875 return getMachineNode(Opcode, dl, VTs, Ops);
10876 }
10877
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)10878 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10879 EVT VT1, EVT VT2,
10880 ArrayRef<SDValue> Ops) {
10881 SDVTList VTs = getVTList(VT1, VT2);
10882 return getMachineNode(Opcode, dl, VTs, Ops);
10883 }
10884
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)10885 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10886 EVT VT1, EVT VT2, EVT VT3,
10887 SDValue Op1, SDValue Op2) {
10888 SDVTList VTs = getVTList(VT1, VT2, VT3);
10889 SDValue Ops[] = { Op1, Op2 };
10890 return getMachineNode(Opcode, dl, VTs, Ops);
10891 }
10892
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)10893 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10894 EVT VT1, EVT VT2, EVT VT3,
10895 SDValue Op1, SDValue Op2,
10896 SDValue Op3) {
10897 SDVTList VTs = getVTList(VT1, VT2, VT3);
10898 SDValue Ops[] = { Op1, Op2, Op3 };
10899 return getMachineNode(Opcode, dl, VTs, Ops);
10900 }
10901
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)10902 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10903 EVT VT1, EVT VT2, EVT VT3,
10904 ArrayRef<SDValue> Ops) {
10905 SDVTList VTs = getVTList(VT1, VT2, VT3);
10906 return getMachineNode(Opcode, dl, VTs, Ops);
10907 }
10908
getMachineNode(unsigned Opcode,const SDLoc & dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)10909 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
10910 ArrayRef<EVT> ResultTys,
10911 ArrayRef<SDValue> Ops) {
10912 SDVTList VTs = getVTList(ResultTys);
10913 return getMachineNode(Opcode, dl, VTs, Ops);
10914 }
10915
getMachineNode(unsigned Opcode,const SDLoc & DL,SDVTList VTs,ArrayRef<SDValue> Ops)10916 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
10917 SDVTList VTs,
10918 ArrayRef<SDValue> Ops) {
10919 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
10920 MachineSDNode *N;
10921 void *IP = nullptr;
10922
10923 if (DoCSE) {
10924 FoldingSetNodeID ID;
10925 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
10926 IP = nullptr;
10927 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
10928 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
10929 }
10930 }
10931
10932 // Allocate a new MachineSDNode.
10933 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
10934 createOperands(N, Ops);
10935
10936 if (DoCSE)
10937 CSEMap.InsertNode(N, IP);
10938
10939 InsertNode(N);
10940 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
10941 return N;
10942 }
10943
10944 /// getTargetExtractSubreg - A convenience function for creating
10945 /// TargetOpcode::EXTRACT_SUBREG nodes.
getTargetExtractSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand)10946 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
10947 SDValue Operand) {
10948 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
10949 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
10950 VT, Operand, SRIdxVal);
10951 return SDValue(Subreg, 0);
10952 }
10953
10954 /// getTargetInsertSubreg - A convenience function for creating
10955 /// TargetOpcode::INSERT_SUBREG nodes.
getTargetInsertSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand,SDValue Subreg)10956 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
10957 SDValue Operand, SDValue Subreg) {
10958 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
10959 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
10960 VT, Operand, Subreg, SRIdxVal);
10961 return SDValue(Result, 0);
10962 }
10963
10964 /// getNodeIfExists - Get the specified node if it's already available, or
10965 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)10966 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
10967 ArrayRef<SDValue> Ops) {
10968 SDNodeFlags Flags;
10969 if (Inserter)
10970 Flags = Inserter->getFlags();
10971 return getNodeIfExists(Opcode, VTList, Ops, Flags);
10972 }
10973
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)10974 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
10975 ArrayRef<SDValue> Ops,
10976 const SDNodeFlags Flags) {
10977 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
10978 FoldingSetNodeID ID;
10979 AddNodeIDNode(ID, Opcode, VTList, Ops);
10980 void *IP = nullptr;
10981 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
10982 E->intersectFlagsWith(Flags);
10983 return E;
10984 }
10985 }
10986 return nullptr;
10987 }
10988
10989 /// doesNodeExist - Check if a node exists without modifying its flags.
doesNodeExist(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)10990 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
10991 ArrayRef<SDValue> Ops) {
10992 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
10993 FoldingSetNodeID ID;
10994 AddNodeIDNode(ID, Opcode, VTList, Ops);
10995 void *IP = nullptr;
10996 if (FindNodeOrInsertPos(ID, SDLoc(), IP))
10997 return true;
10998 }
10999 return false;
11000 }
11001
11002 /// getDbgValue - Creates a SDDbgValue node.
11003 ///
11004 /// SDNode
getDbgValue(DIVariable * Var,DIExpression * Expr,SDNode * N,unsigned R,bool IsIndirect,const DebugLoc & DL,unsigned O)11005 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
11006 SDNode *N, unsigned R, bool IsIndirect,
11007 const DebugLoc &DL, unsigned O) {
11008 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
11009 "Expected inlined-at fields to agree");
11010 return new (DbgInfo->getAlloc())
11011 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R),
11012 {}, IsIndirect, DL, O,
11013 /*IsVariadic=*/false);
11014 }
11015
11016 /// Constant
getConstantDbgValue(DIVariable * Var,DIExpression * Expr,const Value * C,const DebugLoc & DL,unsigned O)11017 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
11018 DIExpression *Expr,
11019 const Value *C,
11020 const DebugLoc &DL, unsigned O) {
11021 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
11022 "Expected inlined-at fields to agree");
11023 return new (DbgInfo->getAlloc())
11024 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {},
11025 /*IsIndirect=*/false, DL, O,
11026 /*IsVariadic=*/false);
11027 }
11028
11029 /// FrameIndex
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,bool IsIndirect,const DebugLoc & DL,unsigned O)11030 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
11031 DIExpression *Expr, unsigned FI,
11032 bool IsIndirect,
11033 const DebugLoc &DL,
11034 unsigned O) {
11035 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
11036 "Expected inlined-at fields to agree");
11037 return getFrameIndexDbgValue(Var, Expr, FI, {}, IsIndirect, DL, O);
11038 }
11039
11040 /// FrameIndex with dependencies
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,ArrayRef<SDNode * > Dependencies,bool IsIndirect,const DebugLoc & DL,unsigned O)11041 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
11042 DIExpression *Expr, unsigned FI,
11043 ArrayRef<SDNode *> Dependencies,
11044 bool IsIndirect,
11045 const DebugLoc &DL,
11046 unsigned O) {
11047 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
11048 "Expected inlined-at fields to agree");
11049 return new (DbgInfo->getAlloc())
11050 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI),
11051 Dependencies, IsIndirect, DL, O,
11052 /*IsVariadic=*/false);
11053 }
11054
11055 /// VReg
getVRegDbgValue(DIVariable * Var,DIExpression * Expr,unsigned VReg,bool IsIndirect,const DebugLoc & DL,unsigned O)11056 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
11057 unsigned VReg, bool IsIndirect,
11058 const DebugLoc &DL, unsigned O) {
11059 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
11060 "Expected inlined-at fields to agree");
11061 return new (DbgInfo->getAlloc())
11062 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg),
11063 {}, IsIndirect, DL, O,
11064 /*IsVariadic=*/false);
11065 }
11066
getDbgValueList(DIVariable * Var,DIExpression * Expr,ArrayRef<SDDbgOperand> Locs,ArrayRef<SDNode * > Dependencies,bool IsIndirect,const DebugLoc & DL,unsigned O,bool IsVariadic)11067 SDDbgValue *SelectionDAG::getDbgValueList(DIVariable *Var, DIExpression *Expr,
11068 ArrayRef<SDDbgOperand> Locs,
11069 ArrayRef<SDNode *> Dependencies,
11070 bool IsIndirect, const DebugLoc &DL,
11071 unsigned O, bool IsVariadic) {
11072 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
11073 "Expected inlined-at fields to agree");
11074 return new (DbgInfo->getAlloc())
11075 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
11076 DL, O, IsVariadic);
11077 }
11078
transferDbgValues(SDValue From,SDValue To,unsigned OffsetInBits,unsigned SizeInBits,bool InvalidateDbg)11079 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
11080 unsigned OffsetInBits, unsigned SizeInBits,
11081 bool InvalidateDbg) {
11082 SDNode *FromNode = From.getNode();
11083 SDNode *ToNode = To.getNode();
11084 assert(FromNode && ToNode && "Can't modify dbg values");
11085
11086 // PR35338
11087 // TODO: assert(From != To && "Redundant dbg value transfer");
11088 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
11089 if (From == To || FromNode == ToNode)
11090 return;
11091
11092 if (!FromNode->getHasDebugValue())
11093 return;
11094
11095 SDDbgOperand FromLocOp =
11096 SDDbgOperand::fromNode(From.getNode(), From.getResNo());
11097 SDDbgOperand ToLocOp = SDDbgOperand::fromNode(To.getNode(), To.getResNo());
11098
11099 SmallVector<SDDbgValue *, 2> ClonedDVs;
11100 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
11101 if (Dbg->isInvalidated())
11102 continue;
11103
11104 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
11105
11106 // Create a new location ops vector that is equal to the old vector, but
11107 // with each instance of FromLocOp replaced with ToLocOp.
11108 bool Changed = false;
11109 auto NewLocOps = Dbg->copyLocationOps();
11110 std::replace_if(
11111 NewLocOps.begin(), NewLocOps.end(),
11112 [&Changed, FromLocOp](const SDDbgOperand &Op) {
11113 bool Match = Op == FromLocOp;
11114 Changed |= Match;
11115 return Match;
11116 },
11117 ToLocOp);
11118 // Ignore this SDDbgValue if we didn't find a matching location.
11119 if (!Changed)
11120 continue;
11121
11122 DIVariable *Var = Dbg->getVariable();
11123 auto *Expr = Dbg->getExpression();
11124 // If a fragment is requested, update the expression.
11125 if (SizeInBits) {
11126 // When splitting a larger (e.g., sign-extended) value whose
11127 // lower bits are described with an SDDbgValue, do not attempt
11128 // to transfer the SDDbgValue to the upper bits.
11129 if (auto FI = Expr->getFragmentInfo())
11130 if (OffsetInBits + SizeInBits > FI->SizeInBits)
11131 continue;
11132 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
11133 SizeInBits);
11134 if (!Fragment)
11135 continue;
11136 Expr = *Fragment;
11137 }
11138
11139 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
11140 // Clone the SDDbgValue and move it to To.
11141 SDDbgValue *Clone = getDbgValueList(
11142 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
11143 Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()),
11144 Dbg->isVariadic());
11145 ClonedDVs.push_back(Clone);
11146
11147 if (InvalidateDbg) {
11148 // Invalidate value and indicate the SDDbgValue should not be emitted.
11149 Dbg->setIsInvalidated();
11150 Dbg->setIsEmitted();
11151 }
11152 }
11153
11154 for (SDDbgValue *Dbg : ClonedDVs) {
11155 assert(is_contained(Dbg->getSDNodes(), ToNode) &&
11156 "Transferred DbgValues should depend on the new SDNode");
11157 AddDbgValue(Dbg, false);
11158 }
11159 }
11160
salvageDebugInfo(SDNode & N)11161 void SelectionDAG::salvageDebugInfo(SDNode &N) {
11162 if (!N.getHasDebugValue())
11163 return;
11164
11165 SmallVector<SDDbgValue *, 2> ClonedDVs;
11166 for (auto *DV : GetDbgValues(&N)) {
11167 if (DV->isInvalidated())
11168 continue;
11169 switch (N.getOpcode()) {
11170 default:
11171 break;
11172 case ISD::ADD: {
11173 SDValue N0 = N.getOperand(0);
11174 SDValue N1 = N.getOperand(1);
11175 if (!isa<ConstantSDNode>(N0)) {
11176 bool RHSConstant = isa<ConstantSDNode>(N1);
11177 uint64_t Offset;
11178 if (RHSConstant)
11179 Offset = N.getConstantOperandVal(1);
11180 // We are not allowed to turn indirect debug values variadic, so
11181 // don't salvage those.
11182 if (!RHSConstant && DV->isIndirect())
11183 continue;
11184
11185 // Rewrite an ADD constant node into a DIExpression. Since we are
11186 // performing arithmetic to compute the variable's *value* in the
11187 // DIExpression, we need to mark the expression with a
11188 // DW_OP_stack_value.
11189 auto *DIExpr = DV->getExpression();
11190 auto NewLocOps = DV->copyLocationOps();
11191 bool Changed = false;
11192 size_t OrigLocOpsSize = NewLocOps.size();
11193 for (size_t i = 0; i < OrigLocOpsSize; ++i) {
11194 // We're not given a ResNo to compare against because the whole
11195 // node is going away. We know that any ISD::ADD only has one
11196 // result, so we can assume any node match is using the result.
11197 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
11198 NewLocOps[i].getSDNode() != &N)
11199 continue;
11200 NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo());
11201 if (RHSConstant) {
11202 SmallVector<uint64_t, 3> ExprOps;
11203 DIExpression::appendOffset(ExprOps, Offset);
11204 DIExpr = DIExpression::appendOpsToArg(DIExpr, ExprOps, i, true);
11205 } else {
11206 // Convert to a variadic expression (if not already).
11207 // convertToVariadicExpression() returns a const pointer, so we use
11208 // a temporary const variable here.
11209 const auto *TmpDIExpr =
11210 DIExpression::convertToVariadicExpression(DIExpr);
11211 SmallVector<uint64_t, 3> ExprOps;
11212 ExprOps.push_back(dwarf::DW_OP_LLVM_arg);
11213 ExprOps.push_back(NewLocOps.size());
11214 ExprOps.push_back(dwarf::DW_OP_plus);
11215 SDDbgOperand RHS =
11216 SDDbgOperand::fromNode(N1.getNode(), N1.getResNo());
11217 NewLocOps.push_back(RHS);
11218 DIExpr = DIExpression::appendOpsToArg(TmpDIExpr, ExprOps, i, true);
11219 }
11220 Changed = true;
11221 }
11222 (void)Changed;
11223 assert(Changed && "Salvage target doesn't use N");
11224
11225 bool IsVariadic =
11226 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
11227
11228 auto AdditionalDependencies = DV->getAdditionalDependencies();
11229 SDDbgValue *Clone = getDbgValueList(
11230 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
11231 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
11232 ClonedDVs.push_back(Clone);
11233 DV->setIsInvalidated();
11234 DV->setIsEmitted();
11235 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
11236 N0.getNode()->dumprFull(this);
11237 dbgs() << " into " << *DIExpr << '\n');
11238 }
11239 break;
11240 }
11241 case ISD::TRUNCATE: {
11242 SDValue N0 = N.getOperand(0);
11243 TypeSize FromSize = N0.getValueSizeInBits();
11244 TypeSize ToSize = N.getValueSizeInBits(0);
11245
11246 DIExpression *DbgExpression = DV->getExpression();
11247 auto ExtOps = DIExpression::getExtOps(FromSize, ToSize, false);
11248 auto NewLocOps = DV->copyLocationOps();
11249 bool Changed = false;
11250 for (size_t i = 0; i < NewLocOps.size(); ++i) {
11251 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
11252 NewLocOps[i].getSDNode() != &N)
11253 continue;
11254
11255 NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo());
11256 DbgExpression = DIExpression::appendOpsToArg(DbgExpression, ExtOps, i);
11257 Changed = true;
11258 }
11259 assert(Changed && "Salvage target doesn't use N");
11260 (void)Changed;
11261
11262 SDDbgValue *Clone =
11263 getDbgValueList(DV->getVariable(), DbgExpression, NewLocOps,
11264 DV->getAdditionalDependencies(), DV->isIndirect(),
11265 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
11266
11267 ClonedDVs.push_back(Clone);
11268 DV->setIsInvalidated();
11269 DV->setIsEmitted();
11270 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this);
11271 dbgs() << " into " << *DbgExpression << '\n');
11272 break;
11273 }
11274 }
11275 }
11276
11277 for (SDDbgValue *Dbg : ClonedDVs) {
11278 assert(!Dbg->getSDNodes().empty() &&
11279 "Salvaged DbgValue should depend on a new SDNode");
11280 AddDbgValue(Dbg, false);
11281 }
11282 }
11283
11284 /// Creates a SDDbgLabel node.
getDbgLabel(DILabel * Label,const DebugLoc & DL,unsigned O)11285 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
11286 const DebugLoc &DL, unsigned O) {
11287 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
11288 "Expected inlined-at fields to agree");
11289 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
11290 }
11291
11292 namespace {
11293
11294 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
11295 /// pointed to by a use iterator is deleted, increment the use iterator
11296 /// so that it doesn't dangle.
11297 ///
11298 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
11299 SDNode::use_iterator &UI;
11300 SDNode::use_iterator &UE;
11301
NodeDeleted(SDNode * N,SDNode * E)11302 void NodeDeleted(SDNode *N, SDNode *E) override {
11303 // Increment the iterator as needed.
11304 while (UI != UE && N == *UI)
11305 ++UI;
11306 }
11307
11308 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)11309 RAUWUpdateListener(SelectionDAG &d,
11310 SDNode::use_iterator &ui,
11311 SDNode::use_iterator &ue)
11312 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
11313 };
11314
11315 } // end anonymous namespace
11316
11317 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
11318 /// This can cause recursive merging of nodes in the DAG.
11319 ///
11320 /// This version assumes From has a single result value.
11321 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)11322 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
11323 SDNode *From = FromN.getNode();
11324 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
11325 "Cannot replace with this method!");
11326 assert(From != To.getNode() && "Cannot replace uses of with self");
11327
11328 // Preserve Debug Values
11329 transferDbgValues(FromN, To);
11330 // Preserve extra info.
11331 copyExtraInfo(From, To.getNode());
11332
11333 // Iterate over all the existing uses of From. New uses will be added
11334 // to the beginning of the use list, which we avoid visiting.
11335 // This specifically avoids visiting uses of From that arise while the
11336 // replacement is happening, because any such uses would be the result
11337 // of CSE: If an existing node looks like From after one of its operands
11338 // is replaced by To, we don't want to replace of all its users with To
11339 // too. See PR3018 for more info.
11340 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
11341 RAUWUpdateListener Listener(*this, UI, UE);
11342 while (UI != UE) {
11343 SDNode *User = *UI;
11344
11345 // This node is about to morph, remove its old self from the CSE maps.
11346 RemoveNodeFromCSEMaps(User);
11347
11348 // A user can appear in a use list multiple times, and when this
11349 // happens the uses are usually next to each other in the list.
11350 // To help reduce the number of CSE recomputations, process all
11351 // the uses of this user that we can find this way.
11352 do {
11353 SDUse &Use = UI.getUse();
11354 ++UI;
11355 Use.set(To);
11356 if (To->isDivergent() != From->isDivergent())
11357 updateDivergence(User);
11358 } while (UI != UE && *UI == User);
11359 // Now that we have modified User, add it back to the CSE maps. If it
11360 // already exists there, recursively merge the results together.
11361 AddModifiedNodeToCSEMaps(User);
11362 }
11363
11364 // If we just RAUW'd the root, take note.
11365 if (FromN == getRoot())
11366 setRoot(To);
11367 }
11368
11369 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
11370 /// This can cause recursive merging of nodes in the DAG.
11371 ///
11372 /// This version assumes that for each value of From, there is a
11373 /// corresponding value in To in the same position with the same type.
11374 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)11375 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
11376 #ifndef NDEBUG
11377 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
11378 assert((!From->hasAnyUseOfValue(i) ||
11379 From->getValueType(i) == To->getValueType(i)) &&
11380 "Cannot use this version of ReplaceAllUsesWith!");
11381 #endif
11382
11383 // Handle the trivial case.
11384 if (From == To)
11385 return;
11386
11387 // Preserve Debug Info. Only do this if there's a use.
11388 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
11389 if (From->hasAnyUseOfValue(i)) {
11390 assert((i < To->getNumValues()) && "Invalid To location");
11391 transferDbgValues(SDValue(From, i), SDValue(To, i));
11392 }
11393 // Preserve extra info.
11394 copyExtraInfo(From, To);
11395
11396 // Iterate over just the existing users of From. See the comments in
11397 // the ReplaceAllUsesWith above.
11398 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
11399 RAUWUpdateListener Listener(*this, UI, UE);
11400 while (UI != UE) {
11401 SDNode *User = *UI;
11402
11403 // This node is about to morph, remove its old self from the CSE maps.
11404 RemoveNodeFromCSEMaps(User);
11405
11406 // A user can appear in a use list multiple times, and when this
11407 // happens the uses are usually next to each other in the list.
11408 // To help reduce the number of CSE recomputations, process all
11409 // the uses of this user that we can find this way.
11410 do {
11411 SDUse &Use = UI.getUse();
11412 ++UI;
11413 Use.setNode(To);
11414 if (To->isDivergent() != From->isDivergent())
11415 updateDivergence(User);
11416 } while (UI != UE && *UI == User);
11417
11418 // Now that we have modified User, add it back to the CSE maps. If it
11419 // already exists there, recursively merge the results together.
11420 AddModifiedNodeToCSEMaps(User);
11421 }
11422
11423 // If we just RAUW'd the root, take note.
11424 if (From == getRoot().getNode())
11425 setRoot(SDValue(To, getRoot().getResNo()));
11426 }
11427
11428 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
11429 /// This can cause recursive merging of nodes in the DAG.
11430 ///
11431 /// This version can replace From with any result values. To must match the
11432 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)11433 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
11434 if (From->getNumValues() == 1) // Handle the simple case efficiently.
11435 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
11436
11437 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) {
11438 // Preserve Debug Info.
11439 transferDbgValues(SDValue(From, i), To[i]);
11440 // Preserve extra info.
11441 copyExtraInfo(From, To[i].getNode());
11442 }
11443
11444 // Iterate over just the existing users of From. See the comments in
11445 // the ReplaceAllUsesWith above.
11446 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
11447 RAUWUpdateListener Listener(*this, UI, UE);
11448 while (UI != UE) {
11449 SDNode *User = *UI;
11450
11451 // This node is about to morph, remove its old self from the CSE maps.
11452 RemoveNodeFromCSEMaps(User);
11453
11454 // A user can appear in a use list multiple times, and when this happens the
11455 // uses are usually next to each other in the list. To help reduce the
11456 // number of CSE and divergence recomputations, process all the uses of this
11457 // user that we can find this way.
11458 bool To_IsDivergent = false;
11459 do {
11460 SDUse &Use = UI.getUse();
11461 const SDValue &ToOp = To[Use.getResNo()];
11462 ++UI;
11463 Use.set(ToOp);
11464 To_IsDivergent |= ToOp->isDivergent();
11465 } while (UI != UE && *UI == User);
11466
11467 if (To_IsDivergent != From->isDivergent())
11468 updateDivergence(User);
11469
11470 // Now that we have modified User, add it back to the CSE maps. If it
11471 // already exists there, recursively merge the results together.
11472 AddModifiedNodeToCSEMaps(User);
11473 }
11474
11475 // If we just RAUW'd the root, take note.
11476 if (From == getRoot().getNode())
11477 setRoot(SDValue(To[getRoot().getResNo()]));
11478 }
11479
11480 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
11481 /// uses of other values produced by From.getNode() alone. The Deleted
11482 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)11483 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
11484 // Handle the really simple, really trivial case efficiently.
11485 if (From == To) return;
11486
11487 // Handle the simple, trivial, case efficiently.
11488 if (From.getNode()->getNumValues() == 1) {
11489 ReplaceAllUsesWith(From, To);
11490 return;
11491 }
11492
11493 // Preserve Debug Info.
11494 transferDbgValues(From, To);
11495 copyExtraInfo(From.getNode(), To.getNode());
11496
11497 // Iterate over just the existing users of From. See the comments in
11498 // the ReplaceAllUsesWith above.
11499 SDNode::use_iterator UI = From.getNode()->use_begin(),
11500 UE = From.getNode()->use_end();
11501 RAUWUpdateListener Listener(*this, UI, UE);
11502 while (UI != UE) {
11503 SDNode *User = *UI;
11504 bool UserRemovedFromCSEMaps = false;
11505
11506 // A user can appear in a use list multiple times, and when this
11507 // happens the uses are usually next to each other in the list.
11508 // To help reduce the number of CSE recomputations, process all
11509 // the uses of this user that we can find this way.
11510 do {
11511 SDUse &Use = UI.getUse();
11512
11513 // Skip uses of different values from the same node.
11514 if (Use.getResNo() != From.getResNo()) {
11515 ++UI;
11516 continue;
11517 }
11518
11519 // If this node hasn't been modified yet, it's still in the CSE maps,
11520 // so remove its old self from the CSE maps.
11521 if (!UserRemovedFromCSEMaps) {
11522 RemoveNodeFromCSEMaps(User);
11523 UserRemovedFromCSEMaps = true;
11524 }
11525
11526 ++UI;
11527 Use.set(To);
11528 if (To->isDivergent() != From->isDivergent())
11529 updateDivergence(User);
11530 } while (UI != UE && *UI == User);
11531 // We are iterating over all uses of the From node, so if a use
11532 // doesn't use the specific value, no changes are made.
11533 if (!UserRemovedFromCSEMaps)
11534 continue;
11535
11536 // Now that we have modified User, add it back to the CSE maps. If it
11537 // already exists there, recursively merge the results together.
11538 AddModifiedNodeToCSEMaps(User);
11539 }
11540
11541 // If we just RAUW'd the root, take note.
11542 if (From == getRoot())
11543 setRoot(To);
11544 }
11545
11546 namespace {
11547
11548 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
11549 /// to record information about a use.
11550 struct UseMemo {
11551 SDNode *User;
11552 unsigned Index;
11553 SDUse *Use;
11554 };
11555
11556 /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)11557 bool operator<(const UseMemo &L, const UseMemo &R) {
11558 return (intptr_t)L.User < (intptr_t)R.User;
11559 }
11560
11561 /// RAUOVWUpdateListener - Helper for ReplaceAllUsesOfValuesWith - When the node
11562 /// pointed to by a UseMemo is deleted, set the User to nullptr to indicate that
11563 /// the node already has been taken care of recursively.
11564 class RAUOVWUpdateListener : public SelectionDAG::DAGUpdateListener {
11565 SmallVector<UseMemo, 4> &Uses;
11566
NodeDeleted(SDNode * N,SDNode * E)11567 void NodeDeleted(SDNode *N, SDNode *E) override {
11568 for (UseMemo &Memo : Uses)
11569 if (Memo.User == N)
11570 Memo.User = nullptr;
11571 }
11572
11573 public:
RAUOVWUpdateListener(SelectionDAG & d,SmallVector<UseMemo,4> & uses)11574 RAUOVWUpdateListener(SelectionDAG &d, SmallVector<UseMemo, 4> &uses)
11575 : SelectionDAG::DAGUpdateListener(d), Uses(uses) {}
11576 };
11577
11578 } // end anonymous namespace
11579
calculateDivergence(SDNode * N)11580 bool SelectionDAG::calculateDivergence(SDNode *N) {
11581 if (TLI->isSDNodeAlwaysUniform(N)) {
11582 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, UA) &&
11583 "Conflicting divergence information!");
11584 return false;
11585 }
11586 if (TLI->isSDNodeSourceOfDivergence(N, FLI, UA))
11587 return true;
11588 for (const auto &Op : N->ops()) {
11589 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
11590 return true;
11591 }
11592 return false;
11593 }
11594
updateDivergence(SDNode * N)11595 void SelectionDAG::updateDivergence(SDNode *N) {
11596 SmallVector<SDNode *, 16> Worklist(1, N);
11597 do {
11598 N = Worklist.pop_back_val();
11599 bool IsDivergent = calculateDivergence(N);
11600 if (N->SDNodeBits.IsDivergent != IsDivergent) {
11601 N->SDNodeBits.IsDivergent = IsDivergent;
11602 llvm::append_range(Worklist, N->uses());
11603 }
11604 } while (!Worklist.empty());
11605 }
11606
CreateTopologicalOrder(std::vector<SDNode * > & Order)11607 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
11608 DenseMap<SDNode *, unsigned> Degree;
11609 Order.reserve(AllNodes.size());
11610 for (auto &N : allnodes()) {
11611 unsigned NOps = N.getNumOperands();
11612 Degree[&N] = NOps;
11613 if (0 == NOps)
11614 Order.push_back(&N);
11615 }
11616 for (size_t I = 0; I != Order.size(); ++I) {
11617 SDNode *N = Order[I];
11618 for (auto *U : N->uses()) {
11619 unsigned &UnsortedOps = Degree[U];
11620 if (0 == --UnsortedOps)
11621 Order.push_back(U);
11622 }
11623 }
11624 }
11625
11626 #if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
VerifyDAGDivergence()11627 void SelectionDAG::VerifyDAGDivergence() {
11628 std::vector<SDNode *> TopoOrder;
11629 CreateTopologicalOrder(TopoOrder);
11630 for (auto *N : TopoOrder) {
11631 assert(calculateDivergence(N) == N->isDivergent() &&
11632 "Divergence bit inconsistency detected");
11633 }
11634 }
11635 #endif
11636
11637 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
11638 /// uses of other values produced by From.getNode() alone. The same value
11639 /// may appear in both the From and To list. The Deleted vector is
11640 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)11641 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
11642 const SDValue *To,
11643 unsigned Num){
11644 // Handle the simple, trivial case efficiently.
11645 if (Num == 1)
11646 return ReplaceAllUsesOfValueWith(*From, *To);
11647
11648 transferDbgValues(*From, *To);
11649 copyExtraInfo(From->getNode(), To->getNode());
11650
11651 // Read up all the uses and make records of them. This helps
11652 // processing new uses that are introduced during the
11653 // replacement process.
11654 SmallVector<UseMemo, 4> Uses;
11655 for (unsigned i = 0; i != Num; ++i) {
11656 unsigned FromResNo = From[i].getResNo();
11657 SDNode *FromNode = From[i].getNode();
11658 for (SDNode::use_iterator UI = FromNode->use_begin(),
11659 E = FromNode->use_end(); UI != E; ++UI) {
11660 SDUse &Use = UI.getUse();
11661 if (Use.getResNo() == FromResNo) {
11662 UseMemo Memo = { *UI, i, &Use };
11663 Uses.push_back(Memo);
11664 }
11665 }
11666 }
11667
11668 // Sort the uses, so that all the uses from a given User are together.
11669 llvm::sort(Uses);
11670 RAUOVWUpdateListener Listener(*this, Uses);
11671
11672 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
11673 UseIndex != UseIndexEnd; ) {
11674 // We know that this user uses some value of From. If it is the right
11675 // value, update it.
11676 SDNode *User = Uses[UseIndex].User;
11677 // If the node has been deleted by recursive CSE updates when updating
11678 // another node, then just skip this entry.
11679 if (User == nullptr) {
11680 ++UseIndex;
11681 continue;
11682 }
11683
11684 // This node is about to morph, remove its old self from the CSE maps.
11685 RemoveNodeFromCSEMaps(User);
11686
11687 // The Uses array is sorted, so all the uses for a given User
11688 // are next to each other in the list.
11689 // To help reduce the number of CSE recomputations, process all
11690 // the uses of this user that we can find this way.
11691 do {
11692 unsigned i = Uses[UseIndex].Index;
11693 SDUse &Use = *Uses[UseIndex].Use;
11694 ++UseIndex;
11695
11696 Use.set(To[i]);
11697 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
11698
11699 // Now that we have modified User, add it back to the CSE maps. If it
11700 // already exists there, recursively merge the results together.
11701 AddModifiedNodeToCSEMaps(User);
11702 }
11703 }
11704
11705 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
11706 /// based on their topological order. It returns the maximum id and a vector
11707 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()11708 unsigned SelectionDAG::AssignTopologicalOrder() {
11709 unsigned DAGSize = 0;
11710
11711 // SortedPos tracks the progress of the algorithm. Nodes before it are
11712 // sorted, nodes after it are unsorted. When the algorithm completes
11713 // it is at the end of the list.
11714 allnodes_iterator SortedPos = allnodes_begin();
11715
11716 // Visit all the nodes. Move nodes with no operands to the front of
11717 // the list immediately. Annotate nodes that do have operands with their
11718 // operand count. Before we do this, the Node Id fields of the nodes
11719 // may contain arbitrary values. After, the Node Id fields for nodes
11720 // before SortedPos will contain the topological sort index, and the
11721 // Node Id fields for nodes At SortedPos and after will contain the
11722 // count of outstanding operands.
11723 for (SDNode &N : llvm::make_early_inc_range(allnodes())) {
11724 checkForCycles(&N, this);
11725 unsigned Degree = N.getNumOperands();
11726 if (Degree == 0) {
11727 // A node with no uses, add it to the result array immediately.
11728 N.setNodeId(DAGSize++);
11729 allnodes_iterator Q(&N);
11730 if (Q != SortedPos)
11731 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
11732 assert(SortedPos != AllNodes.end() && "Overran node list");
11733 ++SortedPos;
11734 } else {
11735 // Temporarily use the Node Id as scratch space for the degree count.
11736 N.setNodeId(Degree);
11737 }
11738 }
11739
11740 // Visit all the nodes. As we iterate, move nodes into sorted order,
11741 // such that by the time the end is reached all nodes will be sorted.
11742 for (SDNode &Node : allnodes()) {
11743 SDNode *N = &Node;
11744 checkForCycles(N, this);
11745 // N is in sorted position, so all its uses have one less operand
11746 // that needs to be sorted.
11747 for (SDNode *P : N->uses()) {
11748 unsigned Degree = P->getNodeId();
11749 assert(Degree != 0 && "Invalid node degree");
11750 --Degree;
11751 if (Degree == 0) {
11752 // All of P's operands are sorted, so P may sorted now.
11753 P->setNodeId(DAGSize++);
11754 if (P->getIterator() != SortedPos)
11755 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
11756 assert(SortedPos != AllNodes.end() && "Overran node list");
11757 ++SortedPos;
11758 } else {
11759 // Update P's outstanding operand count.
11760 P->setNodeId(Degree);
11761 }
11762 }
11763 if (Node.getIterator() == SortedPos) {
11764 #ifndef NDEBUG
11765 allnodes_iterator I(N);
11766 SDNode *S = &*++I;
11767 dbgs() << "Overran sorted position:\n";
11768 S->dumprFull(this); dbgs() << "\n";
11769 dbgs() << "Checking if this is due to cycles\n";
11770 checkForCycles(this, true);
11771 #endif
11772 llvm_unreachable(nullptr);
11773 }
11774 }
11775
11776 assert(SortedPos == AllNodes.end() &&
11777 "Topological sort incomplete!");
11778 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
11779 "First node in topological sort is not the entry token!");
11780 assert(AllNodes.front().getNodeId() == 0 &&
11781 "First node in topological sort has non-zero id!");
11782 assert(AllNodes.front().getNumOperands() == 0 &&
11783 "First node in topological sort has operands!");
11784 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
11785 "Last node in topologic sort has unexpected id!");
11786 assert(AllNodes.back().use_empty() &&
11787 "Last node in topologic sort has users!");
11788 assert(DAGSize == allnodes_size() && "Node count mismatch!");
11789 return DAGSize;
11790 }
11791
11792 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
11793 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,bool isParameter)11794 void SelectionDAG::AddDbgValue(SDDbgValue *DB, bool isParameter) {
11795 for (SDNode *SD : DB->getSDNodes()) {
11796 if (!SD)
11797 continue;
11798 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
11799 SD->setHasDebugValue(true);
11800 }
11801 DbgInfo->add(DB, isParameter);
11802 }
11803
AddDbgLabel(SDDbgLabel * DB)11804 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); }
11805
makeEquivalentMemoryOrdering(SDValue OldChain,SDValue NewMemOpChain)11806 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
11807 SDValue NewMemOpChain) {
11808 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
11809 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
11810 // The new memory operation must have the same position as the old load in
11811 // terms of memory dependency. Create a TokenFactor for the old load and new
11812 // memory operation and update uses of the old load's output chain to use that
11813 // TokenFactor.
11814 if (OldChain == NewMemOpChain || OldChain.use_empty())
11815 return NewMemOpChain;
11816
11817 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
11818 OldChain, NewMemOpChain);
11819 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
11820 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
11821 return TokenFactor;
11822 }
11823
makeEquivalentMemoryOrdering(LoadSDNode * OldLoad,SDValue NewMemOp)11824 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
11825 SDValue NewMemOp) {
11826 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
11827 SDValue OldChain = SDValue(OldLoad, 1);
11828 SDValue NewMemOpChain = NewMemOp.getValue(1);
11829 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
11830 }
11831
getSymbolFunctionGlobalAddress(SDValue Op,Function ** OutFunction)11832 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
11833 Function **OutFunction) {
11834 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
11835
11836 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
11837 auto *Module = MF->getFunction().getParent();
11838 auto *Function = Module->getFunction(Symbol);
11839
11840 if (OutFunction != nullptr)
11841 *OutFunction = Function;
11842
11843 if (Function != nullptr) {
11844 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
11845 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
11846 }
11847
11848 std::string ErrorStr;
11849 raw_string_ostream ErrorFormatter(ErrorStr);
11850 ErrorFormatter << "Undefined external symbol ";
11851 ErrorFormatter << '"' << Symbol << '"';
11852 report_fatal_error(Twine(ErrorStr));
11853 }
11854
11855 //===----------------------------------------------------------------------===//
11856 // SDNode Class
11857 //===----------------------------------------------------------------------===//
11858
isNullConstant(SDValue V)11859 bool llvm::isNullConstant(SDValue V) {
11860 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
11861 return Const != nullptr && Const->isZero();
11862 }
11863
isNullConstantOrUndef(SDValue V)11864 bool llvm::isNullConstantOrUndef(SDValue V) {
11865 return V.isUndef() || isNullConstant(V);
11866 }
11867
isNullFPConstant(SDValue V)11868 bool llvm::isNullFPConstant(SDValue V) {
11869 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
11870 return Const != nullptr && Const->isZero() && !Const->isNegative();
11871 }
11872
isAllOnesConstant(SDValue V)11873 bool llvm::isAllOnesConstant(SDValue V) {
11874 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
11875 return Const != nullptr && Const->isAllOnes();
11876 }
11877
isOneConstant(SDValue V)11878 bool llvm::isOneConstant(SDValue V) {
11879 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
11880 return Const != nullptr && Const->isOne();
11881 }
11882
isMinSignedConstant(SDValue V)11883 bool llvm::isMinSignedConstant(SDValue V) {
11884 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
11885 return Const != nullptr && Const->isMinSignedValue();
11886 }
11887
isNeutralConstant(unsigned Opcode,SDNodeFlags Flags,SDValue V,unsigned OperandNo)11888 bool llvm::isNeutralConstant(unsigned Opcode, SDNodeFlags Flags, SDValue V,
11889 unsigned OperandNo) {
11890 // NOTE: The cases should match with IR's ConstantExpr::getBinOpIdentity().
11891 // TODO: Target-specific opcodes could be added.
11892 if (auto *ConstV = isConstOrConstSplat(V, /*AllowUndefs*/ false,
11893 /*AllowTruncation*/ true)) {
11894 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits());
11895 switch (Opcode) {
11896 case ISD::ADD:
11897 case ISD::OR:
11898 case ISD::XOR:
11899 case ISD::UMAX:
11900 return Const.isZero();
11901 case ISD::MUL:
11902 return Const.isOne();
11903 case ISD::AND:
11904 case ISD::UMIN:
11905 return Const.isAllOnes();
11906 case ISD::SMAX:
11907 return Const.isMinSignedValue();
11908 case ISD::SMIN:
11909 return Const.isMaxSignedValue();
11910 case ISD::SUB:
11911 case ISD::SHL:
11912 case ISD::SRA:
11913 case ISD::SRL:
11914 return OperandNo == 1 && Const.isZero();
11915 case ISD::UDIV:
11916 case ISD::SDIV:
11917 return OperandNo == 1 && Const.isOne();
11918 }
11919 } else if (auto *ConstFP = isConstOrConstSplatFP(V)) {
11920 switch (Opcode) {
11921 case ISD::FADD:
11922 return ConstFP->isZero() &&
11923 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
11924 case ISD::FSUB:
11925 return OperandNo == 1 && ConstFP->isZero() &&
11926 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
11927 case ISD::FMUL:
11928 return ConstFP->isExactlyValue(1.0);
11929 case ISD::FDIV:
11930 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
11931 case ISD::FMINNUM:
11932 case ISD::FMAXNUM: {
11933 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
11934 EVT VT = V.getValueType();
11935 const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT);
11936 APFloat NeutralAF = !Flags.hasNoNaNs()
11937 ? APFloat::getQNaN(Semantics)
11938 : !Flags.hasNoInfs()
11939 ? APFloat::getInf(Semantics)
11940 : APFloat::getLargest(Semantics);
11941 if (Opcode == ISD::FMAXNUM)
11942 NeutralAF.changeSign();
11943
11944 return ConstFP->isExactlyValue(NeutralAF);
11945 }
11946 }
11947 }
11948 return false;
11949 }
11950
peekThroughBitcasts(SDValue V)11951 SDValue llvm::peekThroughBitcasts(SDValue V) {
11952 while (V.getOpcode() == ISD::BITCAST)
11953 V = V.getOperand(0);
11954 return V;
11955 }
11956
peekThroughOneUseBitcasts(SDValue V)11957 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
11958 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
11959 V = V.getOperand(0);
11960 return V;
11961 }
11962
peekThroughExtractSubvectors(SDValue V)11963 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
11964 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
11965 V = V.getOperand(0);
11966 return V;
11967 }
11968
peekThroughTruncates(SDValue V)11969 SDValue llvm::peekThroughTruncates(SDValue V) {
11970 while (V.getOpcode() == ISD::TRUNCATE)
11971 V = V.getOperand(0);
11972 return V;
11973 }
11974
isBitwiseNot(SDValue V,bool AllowUndefs)11975 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
11976 if (V.getOpcode() != ISD::XOR)
11977 return false;
11978 V = peekThroughBitcasts(V.getOperand(1));
11979 unsigned NumBits = V.getScalarValueSizeInBits();
11980 ConstantSDNode *C =
11981 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
11982 return C && (C->getAPIntValue().countr_one() >= NumBits);
11983 }
11984
isConstOrConstSplat(SDValue N,bool AllowUndefs,bool AllowTruncation)11985 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
11986 bool AllowTruncation) {
11987 EVT VT = N.getValueType();
11988 APInt DemandedElts = VT.isFixedLengthVector()
11989 ? APInt::getAllOnes(VT.getVectorMinNumElements())
11990 : APInt(1, 1);
11991 return isConstOrConstSplat(N, DemandedElts, AllowUndefs, AllowTruncation);
11992 }
11993
isConstOrConstSplat(SDValue N,const APInt & DemandedElts,bool AllowUndefs,bool AllowTruncation)11994 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
11995 bool AllowUndefs,
11996 bool AllowTruncation) {
11997 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
11998 return CN;
11999
12000 // SplatVectors can truncate their operands. Ignore that case here unless
12001 // AllowTruncation is set.
12002 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
12003 EVT VecEltVT = N->getValueType(0).getVectorElementType();
12004 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
12005 EVT CVT = CN->getValueType(0);
12006 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
12007 if (AllowTruncation || CVT == VecEltVT)
12008 return CN;
12009 }
12010 }
12011
12012 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
12013 BitVector UndefElements;
12014 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
12015
12016 // BuildVectors can truncate their operands. Ignore that case here unless
12017 // AllowTruncation is set.
12018 // TODO: Look into whether we should allow UndefElements in non-DemandedElts
12019 if (CN && (UndefElements.none() || AllowUndefs)) {
12020 EVT CVT = CN->getValueType(0);
12021 EVT NSVT = N.getValueType().getScalarType();
12022 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
12023 if (AllowTruncation || (CVT == NSVT))
12024 return CN;
12025 }
12026 }
12027
12028 return nullptr;
12029 }
12030
isConstOrConstSplatFP(SDValue N,bool AllowUndefs)12031 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
12032 EVT VT = N.getValueType();
12033 APInt DemandedElts = VT.isFixedLengthVector()
12034 ? APInt::getAllOnes(VT.getVectorMinNumElements())
12035 : APInt(1, 1);
12036 return isConstOrConstSplatFP(N, DemandedElts, AllowUndefs);
12037 }
12038
isConstOrConstSplatFP(SDValue N,const APInt & DemandedElts,bool AllowUndefs)12039 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
12040 const APInt &DemandedElts,
12041 bool AllowUndefs) {
12042 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
12043 return CN;
12044
12045 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
12046 BitVector UndefElements;
12047 ConstantFPSDNode *CN =
12048 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
12049 // TODO: Look into whether we should allow UndefElements in non-DemandedElts
12050 if (CN && (UndefElements.none() || AllowUndefs))
12051 return CN;
12052 }
12053
12054 if (N.getOpcode() == ISD::SPLAT_VECTOR)
12055 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
12056 return CN;
12057
12058 return nullptr;
12059 }
12060
isNullOrNullSplat(SDValue N,bool AllowUndefs)12061 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
12062 // TODO: may want to use peekThroughBitcast() here.
12063 ConstantSDNode *C =
12064 isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true);
12065 return C && C->isZero();
12066 }
12067
isOneOrOneSplat(SDValue N,bool AllowUndefs)12068 bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) {
12069 ConstantSDNode *C =
12070 isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation*/ true);
12071 return C && C->isOne();
12072 }
12073
isAllOnesOrAllOnesSplat(SDValue N,bool AllowUndefs)12074 bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) {
12075 N = peekThroughBitcasts(N);
12076 unsigned BitWidth = N.getScalarValueSizeInBits();
12077 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
12078 return C && C->isAllOnes() && C->getValueSizeInBits(0) == BitWidth;
12079 }
12080
~HandleSDNode()12081 HandleSDNode::~HandleSDNode() {
12082 DropOperands();
12083 }
12084
MemSDNode(unsigned Opc,unsigned Order,const DebugLoc & dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)12085 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
12086 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
12087 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
12088 MemSDNodeBits.IsVolatile = MMO->isVolatile();
12089 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
12090 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
12091 MemSDNodeBits.IsInvariant = MMO->isInvariant();
12092
12093 // We check here that the size of the memory operand fits within the size of
12094 // the MMO. This is because the MMO might indicate only a possible address
12095 // range instead of specifying the affected memory addresses precisely.
12096 assert(
12097 (!MMO->getType().isValid() ||
12098 TypeSize::isKnownLE(memvt.getStoreSize(), MMO->getSize().getValue())) &&
12099 "Size mismatch!");
12100 }
12101
12102 /// Profile - Gather unique data for the node.
12103 ///
Profile(FoldingSetNodeID & ID) const12104 void SDNode::Profile(FoldingSetNodeID &ID) const {
12105 AddNodeIDNode(ID, this);
12106 }
12107
12108 namespace {
12109
12110 struct EVTArray {
12111 std::vector<EVT> VTs;
12112
EVTArray__anond0b30be71811::EVTArray12113 EVTArray() {
12114 VTs.reserve(MVT::VALUETYPE_SIZE);
12115 for (unsigned i = 0; i < MVT::VALUETYPE_SIZE; ++i)
12116 VTs.push_back(MVT((MVT::SimpleValueType)i));
12117 }
12118 };
12119
12120 } // end anonymous namespace
12121
12122 /// getValueTypeList - Return a pointer to the specified value type.
12123 ///
getValueTypeList(EVT VT)12124 const EVT *SDNode::getValueTypeList(EVT VT) {
12125 static std::set<EVT, EVT::compareRawBits> EVTs;
12126 static EVTArray SimpleVTArray;
12127 static sys::SmartMutex<true> VTMutex;
12128
12129 if (VT.isExtended()) {
12130 sys::SmartScopedLock<true> Lock(VTMutex);
12131 return &(*EVTs.insert(VT).first);
12132 }
12133 assert(VT.getSimpleVT() < MVT::VALUETYPE_SIZE && "Value type out of range!");
12134 return &SimpleVTArray.VTs[VT.getSimpleVT().SimpleTy];
12135 }
12136
12137 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
12138 /// indicated value. This method ignores uses of other values defined by this
12139 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const12140 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
12141 assert(Value < getNumValues() && "Bad value!");
12142
12143 // TODO: Only iterate over uses of a given value of the node
12144 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
12145 if (UI.getUse().getResNo() == Value) {
12146 if (NUses == 0)
12147 return false;
12148 --NUses;
12149 }
12150 }
12151
12152 // Found exactly the right number of uses?
12153 return NUses == 0;
12154 }
12155
12156 /// hasAnyUseOfValue - Return true if there are any use of the indicated
12157 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const12158 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
12159 assert(Value < getNumValues() && "Bad value!");
12160
12161 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
12162 if (UI.getUse().getResNo() == Value)
12163 return true;
12164
12165 return false;
12166 }
12167
12168 /// isOnlyUserOf - Return true if this node is the only use of N.
isOnlyUserOf(const SDNode * N) const12169 bool SDNode::isOnlyUserOf(const SDNode *N) const {
12170 bool Seen = false;
12171 for (const SDNode *User : N->uses()) {
12172 if (User == this)
12173 Seen = true;
12174 else
12175 return false;
12176 }
12177
12178 return Seen;
12179 }
12180
12181 /// Return true if the only users of N are contained in Nodes.
areOnlyUsersOf(ArrayRef<const SDNode * > Nodes,const SDNode * N)12182 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
12183 bool Seen = false;
12184 for (const SDNode *User : N->uses()) {
12185 if (llvm::is_contained(Nodes, User))
12186 Seen = true;
12187 else
12188 return false;
12189 }
12190
12191 return Seen;
12192 }
12193
12194 /// isOperand - Return true if this node is an operand of N.
isOperandOf(const SDNode * N) const12195 bool SDValue::isOperandOf(const SDNode *N) const {
12196 return is_contained(N->op_values(), *this);
12197 }
12198
isOperandOf(const SDNode * N) const12199 bool SDNode::isOperandOf(const SDNode *N) const {
12200 return any_of(N->op_values(),
12201 [this](SDValue Op) { return this == Op.getNode(); });
12202 }
12203
12204 /// reachesChainWithoutSideEffects - Return true if this operand (which must
12205 /// be a chain) reaches the specified operand without crossing any
12206 /// side-effecting instructions on any chain path. In practice, this looks
12207 /// through token factors and non-volatile loads. In order to remain efficient,
12208 /// this only looks a couple of nodes in, it does not do an exhaustive search.
12209 ///
12210 /// Note that we only need to examine chains when we're searching for
12211 /// side-effects; SelectionDAG requires that all side-effects are represented
12212 /// by chains, even if another operand would force a specific ordering. This
12213 /// constraint is necessary to allow transformations like splitting loads.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const12214 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
12215 unsigned Depth) const {
12216 if (*this == Dest) return true;
12217
12218 // Don't search too deeply, we just want to be able to see through
12219 // TokenFactor's etc.
12220 if (Depth == 0) return false;
12221
12222 // If this is a token factor, all inputs to the TF happen in parallel.
12223 if (getOpcode() == ISD::TokenFactor) {
12224 // First, try a shallow search.
12225 if (is_contained((*this)->ops(), Dest)) {
12226 // We found the chain we want as an operand of this TokenFactor.
12227 // Essentially, we reach the chain without side-effects if we could
12228 // serialize the TokenFactor into a simple chain of operations with
12229 // Dest as the last operation. This is automatically true if the
12230 // chain has one use: there are no other ordering constraints.
12231 // If the chain has more than one use, we give up: some other
12232 // use of Dest might force a side-effect between Dest and the current
12233 // node.
12234 if (Dest.hasOneUse())
12235 return true;
12236 }
12237 // Next, try a deep search: check whether every operand of the TokenFactor
12238 // reaches Dest.
12239 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
12240 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
12241 });
12242 }
12243
12244 // Loads don't have side effects, look through them.
12245 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
12246 if (Ld->isUnordered())
12247 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
12248 }
12249 return false;
12250 }
12251
hasPredecessor(const SDNode * N) const12252 bool SDNode::hasPredecessor(const SDNode *N) const {
12253 SmallPtrSet<const SDNode *, 32> Visited;
12254 SmallVector<const SDNode *, 16> Worklist;
12255 Worklist.push_back(this);
12256 return hasPredecessorHelper(N, Visited, Worklist);
12257 }
12258
intersectFlagsWith(const SDNodeFlags Flags)12259 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
12260 this->Flags.intersectWith(Flags);
12261 }
12262
12263 SDValue
matchBinOpReduction(SDNode * Extract,ISD::NodeType & BinOp,ArrayRef<ISD::NodeType> CandidateBinOps,bool AllowPartials)12264 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
12265 ArrayRef<ISD::NodeType> CandidateBinOps,
12266 bool AllowPartials) {
12267 // The pattern must end in an extract from index 0.
12268 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
12269 !isNullConstant(Extract->getOperand(1)))
12270 return SDValue();
12271
12272 // Match against one of the candidate binary ops.
12273 SDValue Op = Extract->getOperand(0);
12274 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
12275 return Op.getOpcode() == unsigned(BinOp);
12276 }))
12277 return SDValue();
12278
12279 // Floating-point reductions may require relaxed constraints on the final step
12280 // of the reduction because they may reorder intermediate operations.
12281 unsigned CandidateBinOp = Op.getOpcode();
12282 if (Op.getValueType().isFloatingPoint()) {
12283 SDNodeFlags Flags = Op->getFlags();
12284 switch (CandidateBinOp) {
12285 case ISD::FADD:
12286 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
12287 return SDValue();
12288 break;
12289 default:
12290 llvm_unreachable("Unhandled FP opcode for binop reduction");
12291 }
12292 }
12293
12294 // Matching failed - attempt to see if we did enough stages that a partial
12295 // reduction from a subvector is possible.
12296 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
12297 if (!AllowPartials || !Op)
12298 return SDValue();
12299 EVT OpVT = Op.getValueType();
12300 EVT OpSVT = OpVT.getScalarType();
12301 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
12302 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
12303 return SDValue();
12304 BinOp = (ISD::NodeType)CandidateBinOp;
12305 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
12306 getVectorIdxConstant(0, SDLoc(Op)));
12307 };
12308
12309 // At each stage, we're looking for something that looks like:
12310 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
12311 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
12312 // i32 undef, i32 undef, i32 undef, i32 undef>
12313 // %a = binop <8 x i32> %op, %s
12314 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
12315 // we expect something like:
12316 // <4,5,6,7,u,u,u,u>
12317 // <2,3,u,u,u,u,u,u>
12318 // <1,u,u,u,u,u,u,u>
12319 // While a partial reduction match would be:
12320 // <2,3,u,u,u,u,u,u>
12321 // <1,u,u,u,u,u,u,u>
12322 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
12323 SDValue PrevOp;
12324 for (unsigned i = 0; i < Stages; ++i) {
12325 unsigned MaskEnd = (1 << i);
12326
12327 if (Op.getOpcode() != CandidateBinOp)
12328 return PartialReduction(PrevOp, MaskEnd);
12329
12330 SDValue Op0 = Op.getOperand(0);
12331 SDValue Op1 = Op.getOperand(1);
12332
12333 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
12334 if (Shuffle) {
12335 Op = Op1;
12336 } else {
12337 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
12338 Op = Op0;
12339 }
12340
12341 // The first operand of the shuffle should be the same as the other operand
12342 // of the binop.
12343 if (!Shuffle || Shuffle->getOperand(0) != Op)
12344 return PartialReduction(PrevOp, MaskEnd);
12345
12346 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
12347 for (int Index = 0; Index < (int)MaskEnd; ++Index)
12348 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
12349 return PartialReduction(PrevOp, MaskEnd);
12350
12351 PrevOp = Op;
12352 }
12353
12354 // Handle subvector reductions, which tend to appear after the shuffle
12355 // reduction stages.
12356 while (Op.getOpcode() == CandidateBinOp) {
12357 unsigned NumElts = Op.getValueType().getVectorNumElements();
12358 SDValue Op0 = Op.getOperand(0);
12359 SDValue Op1 = Op.getOperand(1);
12360 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12361 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12362 Op0.getOperand(0) != Op1.getOperand(0))
12363 break;
12364 SDValue Src = Op0.getOperand(0);
12365 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
12366 if (NumSrcElts != (2 * NumElts))
12367 break;
12368 if (!(Op0.getConstantOperandAPInt(1) == 0 &&
12369 Op1.getConstantOperandAPInt(1) == NumElts) &&
12370 !(Op1.getConstantOperandAPInt(1) == 0 &&
12371 Op0.getConstantOperandAPInt(1) == NumElts))
12372 break;
12373 Op = Src;
12374 }
12375
12376 BinOp = (ISD::NodeType)CandidateBinOp;
12377 return Op;
12378 }
12379
UnrollVectorOp(SDNode * N,unsigned ResNE)12380 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
12381 EVT VT = N->getValueType(0);
12382 EVT EltVT = VT.getVectorElementType();
12383 unsigned NE = VT.getVectorNumElements();
12384
12385 SDLoc dl(N);
12386
12387 // If ResNE is 0, fully unroll the vector op.
12388 if (ResNE == 0)
12389 ResNE = NE;
12390 else if (NE > ResNE)
12391 NE = ResNE;
12392
12393 if (N->getNumValues() == 2) {
12394 SmallVector<SDValue, 8> Scalars0, Scalars1;
12395 SmallVector<SDValue, 4> Operands(N->getNumOperands());
12396 EVT VT1 = N->getValueType(1);
12397 EVT EltVT1 = VT1.getVectorElementType();
12398
12399 unsigned i;
12400 for (i = 0; i != NE; ++i) {
12401 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
12402 SDValue Operand = N->getOperand(j);
12403 EVT OperandVT = Operand.getValueType();
12404
12405 // A vector operand; extract a single element.
12406 EVT OperandEltVT = OperandVT.getVectorElementType();
12407 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT,
12408 Operand, getVectorIdxConstant(i, dl));
12409 }
12410
12411 SDValue EltOp = getNode(N->getOpcode(), dl, {EltVT, EltVT1}, Operands);
12412 Scalars0.push_back(EltOp);
12413 Scalars1.push_back(EltOp.getValue(1));
12414 }
12415
12416 SDValue Vec0 = getBuildVector(VT, dl, Scalars0);
12417 SDValue Vec1 = getBuildVector(VT1, dl, Scalars1);
12418 return getMergeValues({Vec0, Vec1}, dl);
12419 }
12420
12421 assert(N->getNumValues() == 1 &&
12422 "Can't unroll a vector with multiple results!");
12423
12424 SmallVector<SDValue, 8> Scalars;
12425 SmallVector<SDValue, 4> Operands(N->getNumOperands());
12426
12427 unsigned i;
12428 for (i= 0; i != NE; ++i) {
12429 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
12430 SDValue Operand = N->getOperand(j);
12431 EVT OperandVT = Operand.getValueType();
12432 if (OperandVT.isVector()) {
12433 // A vector operand; extract a single element.
12434 EVT OperandEltVT = OperandVT.getVectorElementType();
12435 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT,
12436 Operand, getVectorIdxConstant(i, dl));
12437 } else {
12438 // A scalar operand; just use it as is.
12439 Operands[j] = Operand;
12440 }
12441 }
12442
12443 switch (N->getOpcode()) {
12444 default: {
12445 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
12446 N->getFlags()));
12447 break;
12448 }
12449 case ISD::VSELECT:
12450 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
12451 break;
12452 case ISD::SHL:
12453 case ISD::SRA:
12454 case ISD::SRL:
12455 case ISD::ROTL:
12456 case ISD::ROTR:
12457 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
12458 getShiftAmountOperand(Operands[0].getValueType(),
12459 Operands[1])));
12460 break;
12461 case ISD::SIGN_EXTEND_INREG: {
12462 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
12463 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
12464 Operands[0],
12465 getValueType(ExtVT)));
12466 }
12467 }
12468 }
12469
12470 for (; i < ResNE; ++i)
12471 Scalars.push_back(getUNDEF(EltVT));
12472
12473 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
12474 return getBuildVector(VecVT, dl, Scalars);
12475 }
12476
UnrollVectorOverflowOp(SDNode * N,unsigned ResNE)12477 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
12478 SDNode *N, unsigned ResNE) {
12479 unsigned Opcode = N->getOpcode();
12480 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
12481 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
12482 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
12483 "Expected an overflow opcode");
12484
12485 EVT ResVT = N->getValueType(0);
12486 EVT OvVT = N->getValueType(1);
12487 EVT ResEltVT = ResVT.getVectorElementType();
12488 EVT OvEltVT = OvVT.getVectorElementType();
12489 SDLoc dl(N);
12490
12491 // If ResNE is 0, fully unroll the vector op.
12492 unsigned NE = ResVT.getVectorNumElements();
12493 if (ResNE == 0)
12494 ResNE = NE;
12495 else if (NE > ResNE)
12496 NE = ResNE;
12497
12498 SmallVector<SDValue, 8> LHSScalars;
12499 SmallVector<SDValue, 8> RHSScalars;
12500 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
12501 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
12502
12503 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
12504 SDVTList VTs = getVTList(ResEltVT, SVT);
12505 SmallVector<SDValue, 8> ResScalars;
12506 SmallVector<SDValue, 8> OvScalars;
12507 for (unsigned i = 0; i < NE; ++i) {
12508 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
12509 SDValue Ov =
12510 getSelect(dl, OvEltVT, Res.getValue(1),
12511 getBoolConstant(true, dl, OvEltVT, ResVT),
12512 getConstant(0, dl, OvEltVT));
12513
12514 ResScalars.push_back(Res);
12515 OvScalars.push_back(Ov);
12516 }
12517
12518 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
12519 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
12520
12521 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
12522 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
12523 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
12524 getBuildVector(NewOvVT, dl, OvScalars));
12525 }
12526
areNonVolatileConsecutiveLoads(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const12527 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
12528 LoadSDNode *Base,
12529 unsigned Bytes,
12530 int Dist) const {
12531 if (LD->isVolatile() || Base->isVolatile())
12532 return false;
12533 // TODO: probably too restrictive for atomics, revisit
12534 if (!LD->isSimple())
12535 return false;
12536 if (LD->isIndexed() || Base->isIndexed())
12537 return false;
12538 if (LD->getChain() != Base->getChain())
12539 return false;
12540 EVT VT = LD->getMemoryVT();
12541 if (VT.getSizeInBits() / 8 != Bytes)
12542 return false;
12543
12544 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
12545 auto LocDecomp = BaseIndexOffset::match(LD, *this);
12546
12547 int64_t Offset = 0;
12548 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
12549 return (Dist * (int64_t)Bytes == Offset);
12550 return false;
12551 }
12552
12553 /// InferPtrAlignment - Infer alignment of a load / store address. Return
12554 /// std::nullopt if it cannot be inferred.
InferPtrAlign(SDValue Ptr) const12555 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const {
12556 // If this is a GlobalAddress + cst, return the alignment.
12557 const GlobalValue *GV = nullptr;
12558 int64_t GVOffset = 0;
12559 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
12560 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
12561 KnownBits Known(PtrWidth);
12562 llvm::computeKnownBits(GV, Known, getDataLayout());
12563 unsigned AlignBits = Known.countMinTrailingZeros();
12564 if (AlignBits)
12565 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
12566 }
12567
12568 // If this is a direct reference to a stack slot, use information about the
12569 // stack slot's alignment.
12570 int FrameIdx = INT_MIN;
12571 int64_t FrameOffset = 0;
12572 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
12573 FrameIdx = FI->getIndex();
12574 } else if (isBaseWithConstantOffset(Ptr) &&
12575 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
12576 // Handle FI+Cst
12577 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
12578 FrameOffset = Ptr.getConstantOperandVal(1);
12579 }
12580
12581 if (FrameIdx != INT_MIN) {
12582 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
12583 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
12584 }
12585
12586 return std::nullopt;
12587 }
12588
12589 /// Split the scalar node with EXTRACT_ELEMENT using the provided
12590 /// VTs and return the low/high part.
SplitScalar(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)12591 std::pair<SDValue, SDValue> SelectionDAG::SplitScalar(const SDValue &N,
12592 const SDLoc &DL,
12593 const EVT &LoVT,
12594 const EVT &HiVT) {
12595 assert(!LoVT.isVector() && !HiVT.isVector() && !N.getValueType().isVector() &&
12596 "Split node must be a scalar type");
12597 SDValue Lo =
12598 getNode(ISD::EXTRACT_ELEMENT, DL, LoVT, N, getIntPtrConstant(0, DL));
12599 SDValue Hi =
12600 getNode(ISD::EXTRACT_ELEMENT, DL, HiVT, N, getIntPtrConstant(1, DL));
12601 return std::make_pair(Lo, Hi);
12602 }
12603
12604 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
12605 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const12606 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
12607 // Currently all types are split in half.
12608 EVT LoVT, HiVT;
12609 if (!VT.isVector())
12610 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
12611 else
12612 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
12613
12614 return std::make_pair(LoVT, HiVT);
12615 }
12616
12617 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
12618 /// type, dependent on an enveloping VT that has been split into two identical
12619 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
12620 std::pair<EVT, EVT>
GetDependentSplitDestVTs(const EVT & VT,const EVT & EnvVT,bool * HiIsEmpty) const12621 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
12622 bool *HiIsEmpty) const {
12623 EVT EltTp = VT.getVectorElementType();
12624 // Examples:
12625 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty)
12626 // custom VL=9 with enveloping VL=8/8 yields 8/1
12627 // custom VL=10 with enveloping VL=8/8 yields 8/2
12628 // etc.
12629 ElementCount VTNumElts = VT.getVectorElementCount();
12630 ElementCount EnvNumElts = EnvVT.getVectorElementCount();
12631 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
12632 "Mixing fixed width and scalable vectors when enveloping a type");
12633 EVT LoVT, HiVT;
12634 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
12635 LoVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
12636 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
12637 *HiIsEmpty = false;
12638 } else {
12639 // Flag that hi type has zero storage size, but return split envelop type
12640 // (this would be easier if vector types with zero elements were allowed).
12641 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
12642 HiVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
12643 *HiIsEmpty = true;
12644 }
12645 return std::make_pair(LoVT, HiVT);
12646 }
12647
12648 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
12649 /// low/high part.
12650 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)12651 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
12652 const EVT &HiVT) {
12653 assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
12654 LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
12655 "Splitting vector with an invalid mixture of fixed and scalable "
12656 "vector types");
12657 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <=
12658 N.getValueType().getVectorMinNumElements() &&
12659 "More vector elements requested than available!");
12660 SDValue Lo, Hi;
12661 Lo =
12662 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL));
12663 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
12664 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
12665 // IDX with the runtime scaling factor of the result vector type. For
12666 // fixed-width result vectors, that runtime scaling factor is 1.
12667 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
12668 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
12669 return std::make_pair(Lo, Hi);
12670 }
12671
SplitEVL(SDValue N,EVT VecVT,const SDLoc & DL)12672 std::pair<SDValue, SDValue> SelectionDAG::SplitEVL(SDValue N, EVT VecVT,
12673 const SDLoc &DL) {
12674 // Split the vector length parameter.
12675 // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts).
12676 EVT VT = N.getValueType();
12677 assert(VecVT.getVectorElementCount().isKnownEven() &&
12678 "Expecting the mask to be an evenly-sized vector");
12679 unsigned HalfMinNumElts = VecVT.getVectorMinNumElements() / 2;
12680 SDValue HalfNumElts =
12681 VecVT.isFixedLengthVector()
12682 ? getConstant(HalfMinNumElts, DL, VT)
12683 : getVScale(DL, VT, APInt(VT.getScalarSizeInBits(), HalfMinNumElts));
12684 SDValue Lo = getNode(ISD::UMIN, DL, VT, N, HalfNumElts);
12685 SDValue Hi = getNode(ISD::USUBSAT, DL, VT, N, HalfNumElts);
12686 return std::make_pair(Lo, Hi);
12687 }
12688
12689 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
WidenVector(const SDValue & N,const SDLoc & DL)12690 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
12691 EVT VT = N.getValueType();
12692 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
12693 NextPowerOf2(VT.getVectorNumElements()));
12694 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
12695 getVectorIdxConstant(0, DL));
12696 }
12697
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count,EVT EltVT)12698 void SelectionDAG::ExtractVectorElements(SDValue Op,
12699 SmallVectorImpl<SDValue> &Args,
12700 unsigned Start, unsigned Count,
12701 EVT EltVT) {
12702 EVT VT = Op.getValueType();
12703 if (Count == 0)
12704 Count = VT.getVectorNumElements();
12705 if (EltVT == EVT())
12706 EltVT = VT.getVectorElementType();
12707 SDLoc SL(Op);
12708 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
12709 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op,
12710 getVectorIdxConstant(i, SL)));
12711 }
12712 }
12713
12714 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const12715 unsigned GlobalAddressSDNode::getAddressSpace() const {
12716 return getGlobal()->getType()->getAddressSpace();
12717 }
12718
getType() const12719 Type *ConstantPoolSDNode::getType() const {
12720 if (isMachineConstantPoolEntry())
12721 return Val.MachineCPVal->getType();
12722 return Val.ConstVal->getType();
12723 }
12724
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool IsBigEndian) const12725 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
12726 unsigned &SplatBitSize,
12727 bool &HasAnyUndefs,
12728 unsigned MinSplatBits,
12729 bool IsBigEndian) const {
12730 EVT VT = getValueType(0);
12731 assert(VT.isVector() && "Expected a vector type");
12732 unsigned VecWidth = VT.getSizeInBits();
12733 if (MinSplatBits > VecWidth)
12734 return false;
12735
12736 // FIXME: The widths are based on this node's type, but build vectors can
12737 // truncate their operands.
12738 SplatValue = APInt(VecWidth, 0);
12739 SplatUndef = APInt(VecWidth, 0);
12740
12741 // Get the bits. Bits with undefined values (when the corresponding element
12742 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
12743 // in SplatValue. If any of the values are not constant, give up and return
12744 // false.
12745 unsigned int NumOps = getNumOperands();
12746 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
12747 unsigned EltWidth = VT.getScalarSizeInBits();
12748
12749 for (unsigned j = 0; j < NumOps; ++j) {
12750 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
12751 SDValue OpVal = getOperand(i);
12752 unsigned BitPos = j * EltWidth;
12753
12754 if (OpVal.isUndef())
12755 SplatUndef.setBits(BitPos, BitPos + EltWidth);
12756 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
12757 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
12758 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
12759 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
12760 else
12761 return false;
12762 }
12763
12764 // The build_vector is all constants or undefs. Find the smallest element
12765 // size that splats the vector.
12766 HasAnyUndefs = (SplatUndef != 0);
12767
12768 // FIXME: This does not work for vectors with elements less than 8 bits.
12769 while (VecWidth > 8) {
12770 // If we can't split in half, stop here.
12771 if (VecWidth & 1)
12772 break;
12773
12774 unsigned HalfSize = VecWidth / 2;
12775 APInt HighValue = SplatValue.extractBits(HalfSize, HalfSize);
12776 APInt LowValue = SplatValue.extractBits(HalfSize, 0);
12777 APInt HighUndef = SplatUndef.extractBits(HalfSize, HalfSize);
12778 APInt LowUndef = SplatUndef.extractBits(HalfSize, 0);
12779
12780 // If the two halves do not match (ignoring undef bits), stop here.
12781 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
12782 MinSplatBits > HalfSize)
12783 break;
12784
12785 SplatValue = HighValue | LowValue;
12786 SplatUndef = HighUndef & LowUndef;
12787
12788 VecWidth = HalfSize;
12789 }
12790
12791 // FIXME: The loop above only tries to split in halves. But if the input
12792 // vector for example is <3 x i16> it wouldn't be able to detect a
12793 // SplatBitSize of 16. No idea if that is a design flaw currently limiting
12794 // optimizations. I guess that back in the days when this helper was created
12795 // vectors normally was power-of-2 sized.
12796
12797 SplatBitSize = VecWidth;
12798 return true;
12799 }
12800
getSplatValue(const APInt & DemandedElts,BitVector * UndefElements) const12801 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
12802 BitVector *UndefElements) const {
12803 unsigned NumOps = getNumOperands();
12804 if (UndefElements) {
12805 UndefElements->clear();
12806 UndefElements->resize(NumOps);
12807 }
12808 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
12809 if (!DemandedElts)
12810 return SDValue();
12811 SDValue Splatted;
12812 for (unsigned i = 0; i != NumOps; ++i) {
12813 if (!DemandedElts[i])
12814 continue;
12815 SDValue Op = getOperand(i);
12816 if (Op.isUndef()) {
12817 if (UndefElements)
12818 (*UndefElements)[i] = true;
12819 } else if (!Splatted) {
12820 Splatted = Op;
12821 } else if (Splatted != Op) {
12822 return SDValue();
12823 }
12824 }
12825
12826 if (!Splatted) {
12827 unsigned FirstDemandedIdx = DemandedElts.countr_zero();
12828 assert(getOperand(FirstDemandedIdx).isUndef() &&
12829 "Can only have a splat without a constant for all undefs.");
12830 return getOperand(FirstDemandedIdx);
12831 }
12832
12833 return Splatted;
12834 }
12835
getSplatValue(BitVector * UndefElements) const12836 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
12837 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
12838 return getSplatValue(DemandedElts, UndefElements);
12839 }
12840
getRepeatedSequence(const APInt & DemandedElts,SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const12841 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
12842 SmallVectorImpl<SDValue> &Sequence,
12843 BitVector *UndefElements) const {
12844 unsigned NumOps = getNumOperands();
12845 Sequence.clear();
12846 if (UndefElements) {
12847 UndefElements->clear();
12848 UndefElements->resize(NumOps);
12849 }
12850 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
12851 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
12852 return false;
12853
12854 // Set the undefs even if we don't find a sequence (like getSplatValue).
12855 if (UndefElements)
12856 for (unsigned I = 0; I != NumOps; ++I)
12857 if (DemandedElts[I] && getOperand(I).isUndef())
12858 (*UndefElements)[I] = true;
12859
12860 // Iteratively widen the sequence length looking for repetitions.
12861 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
12862 Sequence.append(SeqLen, SDValue());
12863 for (unsigned I = 0; I != NumOps; ++I) {
12864 if (!DemandedElts[I])
12865 continue;
12866 SDValue &SeqOp = Sequence[I % SeqLen];
12867 SDValue Op = getOperand(I);
12868 if (Op.isUndef()) {
12869 if (!SeqOp)
12870 SeqOp = Op;
12871 continue;
12872 }
12873 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
12874 Sequence.clear();
12875 break;
12876 }
12877 SeqOp = Op;
12878 }
12879 if (!Sequence.empty())
12880 return true;
12881 }
12882
12883 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
12884 return false;
12885 }
12886
getRepeatedSequence(SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const12887 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
12888 BitVector *UndefElements) const {
12889 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
12890 return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
12891 }
12892
12893 ConstantSDNode *
getConstantSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const12894 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
12895 BitVector *UndefElements) const {
12896 return dyn_cast_or_null<ConstantSDNode>(
12897 getSplatValue(DemandedElts, UndefElements));
12898 }
12899
12900 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const12901 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
12902 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
12903 }
12904
12905 ConstantFPSDNode *
getConstantFPSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const12906 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
12907 BitVector *UndefElements) const {
12908 return dyn_cast_or_null<ConstantFPSDNode>(
12909 getSplatValue(DemandedElts, UndefElements));
12910 }
12911
12912 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const12913 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
12914 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
12915 }
12916
12917 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const12918 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
12919 uint32_t BitWidth) const {
12920 if (ConstantFPSDNode *CN =
12921 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
12922 bool IsExact;
12923 APSInt IntVal(BitWidth);
12924 const APFloat &APF = CN->getValueAPF();
12925 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
12926 APFloat::opOK ||
12927 !IsExact)
12928 return -1;
12929
12930 return IntVal.exactLogBase2();
12931 }
12932 return -1;
12933 }
12934
getConstantRawBits(bool IsLittleEndian,unsigned DstEltSizeInBits,SmallVectorImpl<APInt> & RawBitElements,BitVector & UndefElements) const12935 bool BuildVectorSDNode::getConstantRawBits(
12936 bool IsLittleEndian, unsigned DstEltSizeInBits,
12937 SmallVectorImpl<APInt> &RawBitElements, BitVector &UndefElements) const {
12938 // Early-out if this contains anything but Undef/Constant/ConstantFP.
12939 if (!isConstant())
12940 return false;
12941
12942 unsigned NumSrcOps = getNumOperands();
12943 unsigned SrcEltSizeInBits = getValueType(0).getScalarSizeInBits();
12944 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
12945 "Invalid bitcast scale");
12946
12947 // Extract raw src bits.
12948 SmallVector<APInt> SrcBitElements(NumSrcOps,
12949 APInt::getZero(SrcEltSizeInBits));
12950 BitVector SrcUndeElements(NumSrcOps, false);
12951
12952 for (unsigned I = 0; I != NumSrcOps; ++I) {
12953 SDValue Op = getOperand(I);
12954 if (Op.isUndef()) {
12955 SrcUndeElements.set(I);
12956 continue;
12957 }
12958 auto *CInt = dyn_cast<ConstantSDNode>(Op);
12959 auto *CFP = dyn_cast<ConstantFPSDNode>(Op);
12960 assert((CInt || CFP) && "Unknown constant");
12961 SrcBitElements[I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
12962 : CFP->getValueAPF().bitcastToAPInt();
12963 }
12964
12965 // Recast to dst width.
12966 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
12967 SrcBitElements, UndefElements, SrcUndeElements);
12968 return true;
12969 }
12970
recastRawBits(bool IsLittleEndian,unsigned DstEltSizeInBits,SmallVectorImpl<APInt> & DstBitElements,ArrayRef<APInt> SrcBitElements,BitVector & DstUndefElements,const BitVector & SrcUndefElements)12971 void BuildVectorSDNode::recastRawBits(bool IsLittleEndian,
12972 unsigned DstEltSizeInBits,
12973 SmallVectorImpl<APInt> &DstBitElements,
12974 ArrayRef<APInt> SrcBitElements,
12975 BitVector &DstUndefElements,
12976 const BitVector &SrcUndefElements) {
12977 unsigned NumSrcOps = SrcBitElements.size();
12978 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
12979 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
12980 "Invalid bitcast scale");
12981 assert(NumSrcOps == SrcUndefElements.size() &&
12982 "Vector size mismatch");
12983
12984 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
12985 DstUndefElements.clear();
12986 DstUndefElements.resize(NumDstOps, false);
12987 DstBitElements.assign(NumDstOps, APInt::getZero(DstEltSizeInBits));
12988
12989 // Concatenate src elements constant bits together into dst element.
12990 if (SrcEltSizeInBits <= DstEltSizeInBits) {
12991 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
12992 for (unsigned I = 0; I != NumDstOps; ++I) {
12993 DstUndefElements.set(I);
12994 APInt &DstBits = DstBitElements[I];
12995 for (unsigned J = 0; J != Scale; ++J) {
12996 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
12997 if (SrcUndefElements[Idx])
12998 continue;
12999 DstUndefElements.reset(I);
13000 const APInt &SrcBits = SrcBitElements[Idx];
13001 assert(SrcBits.getBitWidth() == SrcEltSizeInBits &&
13002 "Illegal constant bitwidths");
13003 DstBits.insertBits(SrcBits, J * SrcEltSizeInBits);
13004 }
13005 }
13006 return;
13007 }
13008
13009 // Split src element constant bits into dst elements.
13010 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
13011 for (unsigned I = 0; I != NumSrcOps; ++I) {
13012 if (SrcUndefElements[I]) {
13013 DstUndefElements.set(I * Scale, (I + 1) * Scale);
13014 continue;
13015 }
13016 const APInt &SrcBits = SrcBitElements[I];
13017 for (unsigned J = 0; J != Scale; ++J) {
13018 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
13019 APInt &DstBits = DstBitElements[Idx];
13020 DstBits = SrcBits.extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
13021 }
13022 }
13023 }
13024
isConstant() const13025 bool BuildVectorSDNode::isConstant() const {
13026 for (const SDValue &Op : op_values()) {
13027 unsigned Opc = Op.getOpcode();
13028 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
13029 return false;
13030 }
13031 return true;
13032 }
13033
13034 std::optional<std::pair<APInt, APInt>>
isConstantSequence() const13035 BuildVectorSDNode::isConstantSequence() const {
13036 unsigned NumOps = getNumOperands();
13037 if (NumOps < 2)
13038 return std::nullopt;
13039
13040 if (!isa<ConstantSDNode>(getOperand(0)) ||
13041 !isa<ConstantSDNode>(getOperand(1)))
13042 return std::nullopt;
13043
13044 unsigned EltSize = getValueType(0).getScalarSizeInBits();
13045 APInt Start = getConstantOperandAPInt(0).trunc(EltSize);
13046 APInt Stride = getConstantOperandAPInt(1).trunc(EltSize) - Start;
13047
13048 if (Stride.isZero())
13049 return std::nullopt;
13050
13051 for (unsigned i = 2; i < NumOps; ++i) {
13052 if (!isa<ConstantSDNode>(getOperand(i)))
13053 return std::nullopt;
13054
13055 APInt Val = getConstantOperandAPInt(i).trunc(EltSize);
13056 if (Val != (Start + (Stride * i)))
13057 return std::nullopt;
13058 }
13059
13060 return std::make_pair(Start, Stride);
13061 }
13062
isSplatMask(const int * Mask,EVT VT)13063 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
13064 // Find the first non-undef value in the shuffle mask.
13065 unsigned i, e;
13066 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
13067 /* search */;
13068
13069 // If all elements are undefined, this shuffle can be considered a splat
13070 // (although it should eventually get simplified away completely).
13071 if (i == e)
13072 return true;
13073
13074 // Make sure all remaining elements are either undef or the same as the first
13075 // non-undef value.
13076 for (int Idx = Mask[i]; i != e; ++i)
13077 if (Mask[i] >= 0 && Mask[i] != Idx)
13078 return false;
13079 return true;
13080 }
13081
13082 // Returns the SDNode if it is a constant integer BuildVector
13083 // or constant integer.
isConstantIntBuildVectorOrConstantInt(SDValue N) const13084 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
13085 if (isa<ConstantSDNode>(N))
13086 return N.getNode();
13087 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
13088 return N.getNode();
13089 // Treat a GlobalAddress supporting constant offset folding as a
13090 // constant integer.
13091 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
13092 if (GA->getOpcode() == ISD::GlobalAddress &&
13093 TLI->isOffsetFoldingLegal(GA))
13094 return GA;
13095 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
13096 isa<ConstantSDNode>(N.getOperand(0)))
13097 return N.getNode();
13098 return nullptr;
13099 }
13100
13101 // Returns the SDNode if it is a constant float BuildVector
13102 // or constant float.
isConstantFPBuildVectorOrConstantFP(SDValue N) const13103 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
13104 if (isa<ConstantFPSDNode>(N))
13105 return N.getNode();
13106
13107 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
13108 return N.getNode();
13109
13110 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
13111 isa<ConstantFPSDNode>(N.getOperand(0)))
13112 return N.getNode();
13113
13114 return nullptr;
13115 }
13116
createOperands(SDNode * Node,ArrayRef<SDValue> Vals)13117 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
13118 assert(!Node->OperandList && "Node already has operands");
13119 assert(SDNode::getMaxNumOperands() >= Vals.size() &&
13120 "too many operands to fit into SDNode");
13121 SDUse *Ops = OperandRecycler.allocate(
13122 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
13123
13124 bool IsDivergent = false;
13125 for (unsigned I = 0; I != Vals.size(); ++I) {
13126 Ops[I].setUser(Node);
13127 Ops[I].setInitial(Vals[I]);
13128 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
13129 IsDivergent |= Ops[I].getNode()->isDivergent();
13130 }
13131 Node->NumOperands = Vals.size();
13132 Node->OperandList = Ops;
13133 if (!TLI->isSDNodeAlwaysUniform(Node)) {
13134 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA);
13135 Node->SDNodeBits.IsDivergent = IsDivergent;
13136 }
13137 checkForCycles(Node);
13138 }
13139
getTokenFactor(const SDLoc & DL,SmallVectorImpl<SDValue> & Vals)13140 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
13141 SmallVectorImpl<SDValue> &Vals) {
13142 size_t Limit = SDNode::getMaxNumOperands();
13143 while (Vals.size() > Limit) {
13144 unsigned SliceIdx = Vals.size() - Limit;
13145 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
13146 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
13147 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
13148 Vals.emplace_back(NewTF);
13149 }
13150 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
13151 }
13152
getNeutralElement(unsigned Opcode,const SDLoc & DL,EVT VT,SDNodeFlags Flags)13153 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
13154 EVT VT, SDNodeFlags Flags) {
13155 switch (Opcode) {
13156 default:
13157 return SDValue();
13158 case ISD::ADD:
13159 case ISD::OR:
13160 case ISD::XOR:
13161 case ISD::UMAX:
13162 return getConstant(0, DL, VT);
13163 case ISD::MUL:
13164 return getConstant(1, DL, VT);
13165 case ISD::AND:
13166 case ISD::UMIN:
13167 return getAllOnesConstant(DL, VT);
13168 case ISD::SMAX:
13169 return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
13170 case ISD::SMIN:
13171 return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
13172 case ISD::FADD:
13173 return getConstantFP(-0.0, DL, VT);
13174 case ISD::FMUL:
13175 return getConstantFP(1.0, DL, VT);
13176 case ISD::FMINNUM:
13177 case ISD::FMAXNUM: {
13178 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
13179 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
13180 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
13181 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
13182 APFloat::getLargest(Semantics);
13183 if (Opcode == ISD::FMAXNUM)
13184 NeutralAF.changeSign();
13185
13186 return getConstantFP(NeutralAF, DL, VT);
13187 }
13188 case ISD::FMINIMUM:
13189 case ISD::FMAXIMUM: {
13190 // Neutral element for fminimum is Inf or FLT_MAX, depending on FMF.
13191 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
13192 APFloat NeutralAF = !Flags.hasNoInfs() ? APFloat::getInf(Semantics)
13193 : APFloat::getLargest(Semantics);
13194 if (Opcode == ISD::FMAXIMUM)
13195 NeutralAF.changeSign();
13196
13197 return getConstantFP(NeutralAF, DL, VT);
13198 }
13199
13200 }
13201 }
13202
13203 /// Helper used to make a call to a library function that has one argument of
13204 /// pointer type.
13205 ///
13206 /// Such functions include 'fegetmode', 'fesetenv' and some others, which are
13207 /// used to get or set floating-point state. They have one argument of pointer
13208 /// type, which points to the memory region containing bits of the
13209 /// floating-point state. The value returned by such function is ignored in the
13210 /// created call.
13211 ///
13212 /// \param LibFunc Reference to library function (value of RTLIB::Libcall).
13213 /// \param Ptr Pointer used to save/load state.
13214 /// \param InChain Ingoing token chain.
13215 /// \returns Outgoing chain token.
makeStateFunctionCall(unsigned LibFunc,SDValue Ptr,SDValue InChain,const SDLoc & DLoc)13216 SDValue SelectionDAG::makeStateFunctionCall(unsigned LibFunc, SDValue Ptr,
13217 SDValue InChain,
13218 const SDLoc &DLoc) {
13219 assert(InChain.getValueType() == MVT::Other && "Expected token chain");
13220 TargetLowering::ArgListTy Args;
13221 TargetLowering::ArgListEntry Entry;
13222 Entry.Node = Ptr;
13223 Entry.Ty = Ptr.getValueType().getTypeForEVT(*getContext());
13224 Args.push_back(Entry);
13225 RTLIB::Libcall LC = static_cast<RTLIB::Libcall>(LibFunc);
13226 SDValue Callee = getExternalSymbol(TLI->getLibcallName(LC),
13227 TLI->getPointerTy(getDataLayout()));
13228 TargetLowering::CallLoweringInfo CLI(*this);
13229 CLI.setDebugLoc(DLoc).setChain(InChain).setLibCallee(
13230 TLI->getLibcallCallingConv(LC), Type::getVoidTy(*getContext()), Callee,
13231 std::move(Args));
13232 return TLI->LowerCallTo(CLI).second;
13233 }
13234
copyExtraInfo(SDNode * From,SDNode * To)13235 void SelectionDAG::copyExtraInfo(SDNode *From, SDNode *To) {
13236 assert(From && To && "Invalid SDNode; empty source SDValue?");
13237 auto I = SDEI.find(From);
13238 if (I == SDEI.end())
13239 return;
13240
13241 // Use of operator[] on the DenseMap may cause an insertion, which invalidates
13242 // the iterator, hence the need to make a copy to prevent a use-after-free.
13243 NodeExtraInfo NEI = I->second;
13244 if (LLVM_LIKELY(!NEI.PCSections) && LLVM_LIKELY(!NEI.MMRA)) {
13245 // No deep copy required for the types of extra info set.
13246 //
13247 // FIXME: Investigate if other types of extra info also need deep copy. This
13248 // depends on the types of nodes they can be attached to: if some extra info
13249 // is only ever attached to nodes where a replacement To node is always the
13250 // node where later use and propagation of the extra info has the intended
13251 // semantics, no deep copy is required.
13252 SDEI[To] = std::move(NEI);
13253 return;
13254 }
13255
13256 // We need to copy NodeExtraInfo to all _new_ nodes that are being introduced
13257 // through the replacement of From with To. Otherwise, replacements of a node
13258 // (From) with more complex nodes (To and its operands) may result in lost
13259 // extra info where the root node (To) is insignificant in further propagating
13260 // and using extra info when further lowering to MIR.
13261 //
13262 // In the first step pre-populate the visited set with the nodes reachable
13263 // from the old From node. This avoids copying NodeExtraInfo to parts of the
13264 // DAG that is not new and should be left untouched.
13265 SmallVector<const SDNode *> Leafs{From}; // Leafs reachable with VisitFrom.
13266 DenseSet<const SDNode *> FromReach; // The set of nodes reachable from From.
13267 auto VisitFrom = [&](auto &&Self, const SDNode *N, int MaxDepth) {
13268 if (MaxDepth == 0) {
13269 // Remember this node in case we need to increase MaxDepth and continue
13270 // populating FromReach from this node.
13271 Leafs.emplace_back(N);
13272 return;
13273 }
13274 if (!FromReach.insert(N).second)
13275 return;
13276 for (const SDValue &Op : N->op_values())
13277 Self(Self, Op.getNode(), MaxDepth - 1);
13278 };
13279
13280 // Copy extra info to To and all its transitive operands (that are new).
13281 SmallPtrSet<const SDNode *, 8> Visited;
13282 auto DeepCopyTo = [&](auto &&Self, const SDNode *N) {
13283 if (FromReach.contains(N))
13284 return true;
13285 if (!Visited.insert(N).second)
13286 return true;
13287 if (getEntryNode().getNode() == N)
13288 return false;
13289 for (const SDValue &Op : N->op_values()) {
13290 if (!Self(Self, Op.getNode()))
13291 return false;
13292 }
13293 // Copy only if entry node was not reached.
13294 SDEI[N] = NEI;
13295 return true;
13296 };
13297
13298 // We first try with a lower MaxDepth, assuming that the path to common
13299 // operands between From and To is relatively short. This significantly
13300 // improves performance in the common case. The initial MaxDepth is big
13301 // enough to avoid retry in the common case; the last MaxDepth is large
13302 // enough to avoid having to use the fallback below (and protects from
13303 // potential stack exhaustion from recursion).
13304 for (int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
13305 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.clear()) {
13306 // StartFrom is the previous (or initial) set of leafs reachable at the
13307 // previous maximum depth.
13308 SmallVector<const SDNode *> StartFrom;
13309 std::swap(StartFrom, Leafs);
13310 for (const SDNode *N : StartFrom)
13311 VisitFrom(VisitFrom, N, MaxDepth - PrevDepth);
13312 if (LLVM_LIKELY(DeepCopyTo(DeepCopyTo, To)))
13313 return;
13314 // This should happen very rarely (reached the entry node).
13315 LLVM_DEBUG(dbgs() << __func__ << ": MaxDepth=" << MaxDepth << " too low\n");
13316 assert(!Leafs.empty());
13317 }
13318
13319 // This should not happen - but if it did, that means the subgraph reachable
13320 // from From has depth greater or equal to maximum MaxDepth, and VisitFrom()
13321 // could not visit all reachable common operands. Consequently, we were able
13322 // to reach the entry node.
13323 errs() << "warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
13324 assert(false && "From subgraph too complex - increase max. MaxDepth?");
13325 // Best-effort fallback if assertions disabled.
13326 SDEI[To] = std::move(NEI);
13327 }
13328
13329 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)13330 static void checkForCyclesHelper(const SDNode *N,
13331 SmallPtrSetImpl<const SDNode*> &Visited,
13332 SmallPtrSetImpl<const SDNode*> &Checked,
13333 const llvm::SelectionDAG *DAG) {
13334 // If this node has already been checked, don't check it again.
13335 if (Checked.count(N))
13336 return;
13337
13338 // If a node has already been visited on this depth-first walk, reject it as
13339 // a cycle.
13340 if (!Visited.insert(N).second) {
13341 errs() << "Detected cycle in SelectionDAG\n";
13342 dbgs() << "Offending node:\n";
13343 N->dumprFull(DAG); dbgs() << "\n";
13344 abort();
13345 }
13346
13347 for (const SDValue &Op : N->op_values())
13348 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
13349
13350 Checked.insert(N);
13351 Visited.erase(N);
13352 }
13353 #endif
13354
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)13355 void llvm::checkForCycles(const llvm::SDNode *N,
13356 const llvm::SelectionDAG *DAG,
13357 bool force) {
13358 #ifndef NDEBUG
13359 bool check = force;
13360 #ifdef EXPENSIVE_CHECKS
13361 check = true;
13362 #endif // EXPENSIVE_CHECKS
13363 if (check) {
13364 assert(N && "Checking nonexistent SDNode");
13365 SmallPtrSet<const SDNode*, 32> visited;
13366 SmallPtrSet<const SDNode*, 32> checked;
13367 checkForCyclesHelper(N, visited, checked, DAG);
13368 }
13369 #endif // !NDEBUG
13370 }
13371
checkForCycles(const llvm::SelectionDAG * DAG,bool force)13372 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
13373 checkForCycles(DAG->getRoot().getNode(), DAG, force);
13374 }
13375