xref: /freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/ISDOpcodes.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares codegen opcodes and related utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CODEGEN_ISDOPCODES_H
14 #define LLVM_CODEGEN_ISDOPCODES_H
15 
16 #include "llvm/CodeGen/ValueTypes.h"
17 
18 namespace llvm {
19 
20 /// ISD namespace - This namespace contains an enum which represents all of the
21 /// SelectionDAG node types and value types.
22 ///
23 namespace ISD {
24 
25 //===--------------------------------------------------------------------===//
26 /// ISD::NodeType enum - This enum defines the target-independent operators
27 /// for a SelectionDAG.
28 ///
29 /// Targets may also define target-dependent operator codes for SDNodes. For
30 /// example, on x86, these are the enum values in the X86ISD namespace.
31 /// Targets should aim to use target-independent operators to model their
32 /// instruction sets as much as possible, and only use target-dependent
33 /// operators when they have special requirements.
34 ///
35 /// Finally, during and after selection proper, SNodes may use special
36 /// operator codes that correspond directly with MachineInstr opcodes. These
37 /// are used to represent selected instructions. See the isMachineOpcode()
38 /// and getMachineOpcode() member functions of SDNode.
39 ///
40 enum NodeType {
41 
42   /// DELETED_NODE - This is an illegal value that is used to catch
43   /// errors.  This opcode is not a legal opcode for any node.
44   DELETED_NODE,
45 
46   /// EntryToken - This is the marker used to indicate the start of a region.
47   EntryToken,
48 
49   /// TokenFactor - This node takes multiple tokens as input and produces a
50   /// single token result. This is used to represent the fact that the operand
51   /// operators are independent of each other.
52   TokenFactor,
53 
54   /// AssertSext, AssertZext - These nodes record if a register contains a
55   /// value that has already been zero or sign extended from a narrower type.
56   /// These nodes take two operands.  The first is the node that has already
57   /// been extended, and the second is a value type node indicating the width
58   /// of the extension.
59   /// NOTE: In case of the source value (or any vector element value) is
60   /// poisoned the assertion will not be true for that value.
61   AssertSext,
62   AssertZext,
63 
64   /// AssertAlign - These nodes record if a register contains a value that
65   /// has a known alignment and the trailing bits are known to be zero.
66   /// NOTE: In case of the source value (or any vector element value) is
67   /// poisoned the assertion will not be true for that value.
68   AssertAlign,
69 
70   /// Various leaf nodes.
71   BasicBlock,
72   VALUETYPE,
73   CONDCODE,
74   Register,
75   RegisterMask,
76   Constant,
77   ConstantFP,
78   GlobalAddress,
79   GlobalTLSAddress,
80   FrameIndex,
81   JumpTable,
82   ConstantPool,
83   ExternalSymbol,
84   BlockAddress,
85 
86   /// A ptrauth constant.
87   /// ptr, key, addr-disc, disc
88   /// Note that the addr-disc can be a non-constant value, to allow representing
89   /// a constant global address signed using address-diversification, in code.
90   PtrAuthGlobalAddress,
91 
92   /// The address of the GOT
93   GLOBAL_OFFSET_TABLE,
94 
95   /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
96   /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
97   /// of the frame or return address to return.  An index of zero corresponds
98   /// to the current function's frame or return address, an index of one to
99   /// the parent's frame or return address, and so on.
100   FRAMEADDR,
101   RETURNADDR,
102 
103   /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
104   /// This node takes no operand, returns a target-specific pointer to the
105   /// place in the stack frame where the return address of the current
106   /// function is stored.
107   ADDROFRETURNADDR,
108 
109   /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
110   /// and returns the stack pointer value at the entry of the current
111   /// function calling this intrinsic.
112   SPONENTRY,
113 
114   /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
115   /// Materializes the offset from the local object pointer of another
116   /// function to a particular local object passed to llvm.localescape. The
117   /// operand is the MCSymbol label used to represent this offset, since
118   /// typically the offset is not known until after code generation of the
119   /// parent.
120   LOCAL_RECOVER,
121 
122   /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
123   /// the DAG, which implements the named register global variables extension.
124   READ_REGISTER,
125   WRITE_REGISTER,
126 
127   /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
128   /// first (possible) on-stack argument. This is needed for correct stack
129   /// adjustment during unwind.
130   FRAME_TO_ARGS_OFFSET,
131 
132   /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
133   /// Frame Address (CFA), generally the value of the stack pointer at the
134   /// call site in the previous frame.
135   EH_DWARF_CFA,
136 
137   /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
138   /// 'eh_return' gcc dwarf builtin, which is used to return from
139   /// exception. The general meaning is: adjust stack by OFFSET and pass
140   /// execution to HANDLER. Many platform-related details also :)
141   EH_RETURN,
142 
143   /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
144   /// This corresponds to the eh.sjlj.setjmp intrinsic.
145   /// It takes an input chain and a pointer to the jump buffer as inputs
146   /// and returns an outchain.
147   EH_SJLJ_SETJMP,
148 
149   /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
150   /// This corresponds to the eh.sjlj.longjmp intrinsic.
151   /// It takes an input chain and a pointer to the jump buffer as inputs
152   /// and returns an outchain.
153   EH_SJLJ_LONGJMP,
154 
155   /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
156   /// The target initializes the dispatch table here.
157   EH_SJLJ_SETUP_DISPATCH,
158 
159   /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
160   /// simplification, or lowering of the constant. They are used for constants
161   /// which are known to fit in the immediate fields of their users, or for
162   /// carrying magic numbers which are not values which need to be
163   /// materialized in registers.
164   TargetConstant,
165   TargetConstantFP,
166 
167   /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
168   /// anything else with this node, and this is valid in the target-specific
169   /// dag, turning into a GlobalAddress operand.
170   TargetGlobalAddress,
171   TargetGlobalTLSAddress,
172   TargetFrameIndex,
173   TargetJumpTable,
174   TargetConstantPool,
175   TargetExternalSymbol,
176   TargetBlockAddress,
177 
178   MCSymbol,
179 
180   /// TargetIndex - Like a constant pool entry, but with completely
181   /// target-dependent semantics. Holds target flags, a 32-bit index, and a
182   /// 64-bit index. Targets can use this however they like.
183   TargetIndex,
184 
185   /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
186   /// This node represents a target intrinsic function with no side effects.
187   /// The first operand is the ID number of the intrinsic from the
188   /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
189   /// node returns the result of the intrinsic.
190   INTRINSIC_WO_CHAIN,
191 
192   /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
193   /// This node represents a target intrinsic function with side effects that
194   /// returns a result.  The first operand is a chain pointer.  The second is
195   /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
196   /// operands to the intrinsic follow.  The node has two results, the result
197   /// of the intrinsic and an output chain.
198   INTRINSIC_W_CHAIN,
199 
200   /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
201   /// This node represents a target intrinsic function with side effects that
202   /// does not return a result.  The first operand is a chain pointer.  The
203   /// second is the ID number of the intrinsic from the llvm::Intrinsic
204   /// namespace.  The operands to the intrinsic follow.
205   INTRINSIC_VOID,
206 
207   /// CopyToReg - This node has three operands: a chain, a register number to
208   /// set to this value, and a value.
209   CopyToReg,
210 
211   /// CopyFromReg - This node indicates that the input value is a virtual or
212   /// physical register that is defined outside of the scope of this
213   /// SelectionDAG.  The register is available from the RegisterSDNode object.
214   /// Note that CopyFromReg is considered as also freezing the value.
215   CopyFromReg,
216 
217   /// UNDEF - An undefined node.
218   UNDEF,
219 
220   // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
221   // is evaluated to UNDEF), or returns VAL otherwise. Note that each
222   // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
223   FREEZE,
224 
225   /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
226   /// a Constant, which is required to be operand #1) half of the integer or
227   /// float value specified as operand #0.  This is only for use before
228   /// legalization, for values that will be broken into multiple registers.
229   EXTRACT_ELEMENT,
230 
231   /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
232   /// Given two values of the same integer value type, this produces a value
233   /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
234   /// legalization. The lower part of the composite value should be in
235   /// element 0 and the upper part should be in element 1.
236   BUILD_PAIR,
237 
238   /// MERGE_VALUES - This node takes multiple discrete operands and returns
239   /// them all as its individual results.  This nodes has exactly the same
240   /// number of inputs and outputs. This node is useful for some pieces of the
241   /// code generator that want to think about a single node with multiple
242   /// results, not multiple nodes.
243   MERGE_VALUES,
244 
245   /// Simple integer binary arithmetic operators.
246   ADD,
247   SUB,
248   MUL,
249   SDIV,
250   UDIV,
251   SREM,
252   UREM,
253 
254   /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
255   /// a signed/unsigned value of type i[2*N], and return the full value as
256   /// two results, each of type iN.
257   SMUL_LOHI,
258   UMUL_LOHI,
259 
260   /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
261   /// remainder result.
262   SDIVREM,
263   UDIVREM,
264 
265   /// CARRY_FALSE - This node is used when folding other nodes,
266   /// like ADDC/SUBC, which indicate the carry result is always false.
267   CARRY_FALSE,
268 
269   /// Carry-setting nodes for multiple precision addition and subtraction.
270   /// These nodes take two operands of the same value type, and produce two
271   /// results.  The first result is the normal add or sub result, the second
272   /// result is the carry flag result.
273   /// FIXME: These nodes are deprecated in favor of UADDO_CARRY and USUBO_CARRY.
274   /// They are kept around for now to provide a smooth transition path
275   /// toward the use of UADDO_CARRY/USUBO_CARRY and will eventually be removed.
276   ADDC,
277   SUBC,
278 
279   /// Carry-using nodes for multiple precision addition and subtraction. These
280   /// nodes take three operands: The first two are the normal lhs and rhs to
281   /// the add or sub, and the third is the input carry flag.  These nodes
282   /// produce two results; the normal result of the add or sub, and the output
283   /// carry flag.  These nodes both read and write a carry flag to allow them
284   /// to them to be chained together for add and sub of arbitrarily large
285   /// values.
286   ADDE,
287   SUBE,
288 
289   /// Carry-using nodes for multiple precision addition and subtraction.
290   /// These nodes take three operands: The first two are the normal lhs and
291   /// rhs to the add or sub, and the third is a boolean value that is 1 if and
292   /// only if there is an incoming carry/borrow. These nodes produce two
293   /// results: the normal result of the add or sub, and a boolean value that is
294   /// 1 if and only if there is an outgoing carry/borrow.
295   ///
296   /// Care must be taken if these opcodes are lowered to hardware instructions
297   /// that use the inverse logic -- 0 if and only if there is an
298   /// incoming/outgoing carry/borrow.  In such cases, you must preserve the
299   /// semantics of these opcodes by inverting the incoming carry/borrow, feeding
300   /// it to the add/sub hardware instruction, and then inverting the outgoing
301   /// carry/borrow.
302   ///
303   /// The use of these opcodes is preferable to adde/sube if the target supports
304   /// it, as the carry is a regular value rather than a glue, which allows
305   /// further optimisation.
306   ///
307   /// These opcodes are different from [US]{ADD,SUB}O in that
308   /// U{ADD,SUB}O_CARRY consume and produce a carry/borrow, whereas
309   /// [US]{ADD,SUB}O produce an overflow.
310   UADDO_CARRY,
311   USUBO_CARRY,
312 
313   /// Carry-using overflow-aware nodes for multiple precision addition and
314   /// subtraction. These nodes take three operands: The first two are normal lhs
315   /// and rhs to the add or sub, and the third is a boolean indicating if there
316   /// is an incoming carry. They produce two results: the normal result of the
317   /// add or sub, and a boolean that indicates if an overflow occurred (*not*
318   /// flag, because it may be a store to memory, etc.). If the type of the
319   /// boolean is not i1 then the high bits conform to getBooleanContents.
320   SADDO_CARRY,
321   SSUBO_CARRY,
322 
323   /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
324   /// These nodes take two operands: the normal LHS and RHS to the add. They
325   /// produce two results: the normal result of the add, and a boolean that
326   /// indicates if an overflow occurred (*not* a flag, because it may be store
327   /// to memory, etc.).  If the type of the boolean is not i1 then the high
328   /// bits conform to getBooleanContents.
329   /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
330   SADDO,
331   UADDO,
332 
333   /// Same for subtraction.
334   SSUBO,
335   USUBO,
336 
337   /// Same for multiplication.
338   SMULO,
339   UMULO,
340 
341   /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
342   /// integers with the same bit width (W). If the true value of LHS + RHS
343   /// exceeds the largest value that can be represented by W bits, the
344   /// resulting value is this maximum value. Otherwise, if this value is less
345   /// than the smallest value that can be represented by W bits, the
346   /// resulting value is this minimum value.
347   SADDSAT,
348   UADDSAT,
349 
350   /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
351   /// integers with the same bit width (W). If the true value of LHS - RHS
352   /// exceeds the largest value that can be represented by W bits, the
353   /// resulting value is this maximum value. Otherwise, if this value is less
354   /// than the smallest value that can be represented by W bits, the
355   /// resulting value is this minimum value.
356   SSUBSAT,
357   USUBSAT,
358 
359   /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
360   /// operand is the value to be shifted, and the second argument is the amount
361   /// to shift by. Both must be integers of the same bit width (W). If the true
362   /// value of LHS << RHS exceeds the largest value that can be represented by
363   /// W bits, the resulting value is this maximum value, Otherwise, if this
364   /// value is less than the smallest value that can be represented by W bits,
365   /// the resulting value is this minimum value.
366   SSHLSAT,
367   USHLSAT,
368 
369   /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
370   /// on 2 integers with the same width and scale. SCALE represents the scale
371   /// of both operands as fixed point numbers. This SCALE parameter must be a
372   /// constant integer. A scale of zero is effectively performing
373   /// multiplication on 2 integers.
374   SMULFIX,
375   UMULFIX,
376 
377   /// Same as the corresponding unsaturated fixed point instructions, but the
378   /// result is clamped between the min and max values representable by the
379   /// bits of the first 2 operands.
380   SMULFIXSAT,
381   UMULFIXSAT,
382 
383   /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
384   /// 2 integers with the same width and scale. SCALE represents the scale
385   /// of both operands as fixed point numbers. This SCALE parameter must be a
386   /// constant integer.
387   SDIVFIX,
388   UDIVFIX,
389 
390   /// Same as the corresponding unsaturated fixed point instructions, but the
391   /// result is clamped between the min and max values representable by the
392   /// bits of the first 2 operands.
393   SDIVFIXSAT,
394   UDIVFIXSAT,
395 
396   /// Simple binary floating point operators.
397   FADD,
398   FSUB,
399   FMUL,
400   FDIV,
401   FREM,
402 
403   /// Constrained versions of the binary floating point operators.
404   /// These will be lowered to the simple operators before final selection.
405   /// They are used to limit optimizations while the DAG is being
406   /// optimized.
407   STRICT_FADD,
408   STRICT_FSUB,
409   STRICT_FMUL,
410   STRICT_FDIV,
411   STRICT_FREM,
412   STRICT_FMA,
413 
414   /// Constrained versions of libm-equivalent floating point intrinsics.
415   /// These will be lowered to the equivalent non-constrained pseudo-op
416   /// (or expanded to the equivalent library call) before final selection.
417   /// They are used to limit optimizations while the DAG is being optimized.
418   STRICT_FSQRT,
419   STRICT_FPOW,
420   STRICT_FPOWI,
421   STRICT_FLDEXP,
422   STRICT_FSIN,
423   STRICT_FCOS,
424   STRICT_FTAN,
425   STRICT_FASIN,
426   STRICT_FACOS,
427   STRICT_FATAN,
428   STRICT_FSINH,
429   STRICT_FCOSH,
430   STRICT_FTANH,
431   STRICT_FEXP,
432   STRICT_FEXP2,
433   STRICT_FLOG,
434   STRICT_FLOG10,
435   STRICT_FLOG2,
436   STRICT_FRINT,
437   STRICT_FNEARBYINT,
438   STRICT_FMAXNUM,
439   STRICT_FMINNUM,
440   STRICT_FCEIL,
441   STRICT_FFLOOR,
442   STRICT_FROUND,
443   STRICT_FROUNDEVEN,
444   STRICT_FTRUNC,
445   STRICT_LROUND,
446   STRICT_LLROUND,
447   STRICT_LRINT,
448   STRICT_LLRINT,
449   STRICT_FMAXIMUM,
450   STRICT_FMINIMUM,
451 
452   /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
453   /// unsigned integer. These have the same semantics as fptosi and fptoui
454   /// in IR.
455   /// They are used to limit optimizations while the DAG is being optimized.
456   STRICT_FP_TO_SINT,
457   STRICT_FP_TO_UINT,
458 
459   /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to
460   /// a floating point value. These have the same semantics as sitofp and
461   /// uitofp in IR.
462   /// They are used to limit optimizations while the DAG is being optimized.
463   STRICT_SINT_TO_FP,
464   STRICT_UINT_TO_FP,
465 
466   /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating
467   /// point type down to the precision of the destination VT.  TRUNC is a
468   /// flag, which is always an integer that is zero or one.  If TRUNC is 0,
469   /// this is a normal rounding, if it is 1, this FP_ROUND is known to not
470   /// change the value of Y.
471   ///
472   /// The TRUNC = 1 case is used in cases where we know that the value will
473   /// not be modified by the node, because Y is not using any of the extra
474   /// precision of source type.  This allows certain transformations like
475   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for
476   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't
477   /// removed.
478   /// It is used to limit optimizations while the DAG is being optimized.
479   STRICT_FP_ROUND,
480 
481   /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP
482   /// type.
483   /// It is used to limit optimizations while the DAG is being optimized.
484   STRICT_FP_EXTEND,
485 
486   /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used
487   /// for floating-point operands only.  STRICT_FSETCC performs a quiet
488   /// comparison operation, while STRICT_FSETCCS performs a signaling
489   /// comparison operation.
490   STRICT_FSETCC,
491   STRICT_FSETCCS,
492 
493   // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
494   FPTRUNC_ROUND,
495 
496   /// FMA - Perform a * b + c with no intermediate rounding step.
497   FMA,
498 
499   /// FMAD - Perform a * b + c, while getting the same result as the
500   /// separately rounded operations.
501   FMAD,
502 
503   /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
504   /// DAG node does not require that X and Y have the same type, just that
505   /// they are both floating point.  X and the result must have the same type.
506   /// FCOPYSIGN(f32, f64) is allowed.
507   FCOPYSIGN,
508 
509   /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
510   /// value as an integer 0/1 value.
511   FGETSIGN,
512 
513   /// Returns platform specific canonical encoding of a floating point number.
514   FCANONICALIZE,
515 
516   /// Performs a check of floating point class property, defined by IEEE-754.
517   /// The first operand is the floating point value to check. The second operand
518   /// specifies the checked property and is a TargetConstant which specifies
519   /// test in the same way as intrinsic 'is_fpclass'.
520   /// Returns boolean value.
521   IS_FPCLASS,
522 
523   /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
524   /// with the specified, possibly variable, elements. The types of the
525   /// operands must match the vector element type, except that integer types
526   /// are allowed to be larger than the element type, in which case the
527   /// operands are implicitly truncated. The types of the operands must all
528   /// be the same.
529   BUILD_VECTOR,
530 
531   /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
532   /// at IDX replaced with VAL. If the type of VAL is larger than the vector
533   /// element type then VAL is truncated before replacement.
534   ///
535   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
536   /// vector width. IDX is not first scaled by the runtime scaling factor of
537   /// VECTOR.
538   INSERT_VECTOR_ELT,
539 
540   /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
541   /// identified by the (potentially variable) element number IDX. If the return
542   /// type is an integer type larger than the element type of the vector, the
543   /// result is extended to the width of the return type. In that case, the high
544   /// bits are undefined.
545   ///
546   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
547   /// vector width. IDX is not first scaled by the runtime scaling factor of
548   /// VECTOR.
549   EXTRACT_VECTOR_ELT,
550 
551   /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
552   /// vector type with the same length and element type, this produces a
553   /// concatenated vector result value, with length equal to the sum of the
554   /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then
555   /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0
556   /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors.
557   CONCAT_VECTORS,
558 
559   /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2
560   /// inserted into VECTOR1. IDX represents the starting element number at which
561   /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known
562   /// minimum vector length. Let the type of VECTOR2 be T, then if T is a
563   /// scalable vector, IDX is first scaled by the runtime scaling factor of T.
564   /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2.
565   /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1
566   /// indices. If this condition cannot be determined statically but is false at
567   /// runtime, then the result vector is undefined. The IDX parameter must be a
568   /// vector index constant type, which for most targets will be an integer
569   /// pointer type.
570   ///
571   /// This operation supports inserting a fixed-width vector into a scalable
572   /// vector, but not the other way around.
573   INSERT_SUBVECTOR,
574 
575   /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
576   /// Let the result type be T, then IDX represents the starting element number
577   /// from which a subvector of type T is extracted. IDX must be a constant
578   /// multiple of T's known minimum vector length. If T is a scalable vector,
579   /// IDX is first scaled by the runtime scaling factor of T. Elements IDX
580   /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
581   /// condition cannot be determined statically but is false at runtime, then
582   /// the result vector is undefined. The IDX parameter must be a vector index
583   /// constant type, which for most targets will be an integer pointer type.
584   ///
585   /// This operation supports extracting a fixed-width vector from a scalable
586   /// vector, but not the other way around.
587   EXTRACT_SUBVECTOR,
588 
589   /// VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
590   /// output vectors having the same type. The first output contains the even
591   /// indices from CONCAT_VECTORS(VEC1, VEC2), with the second output
592   /// containing the odd indices. The relative order of elements within an
593   /// output match that of the concatenated input.
594   VECTOR_DEINTERLEAVE,
595 
596   /// VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
597   /// output vectors having the same type. The first output contains the
598   /// result of interleaving the low half of CONCAT_VECTORS(VEC1, VEC2), with
599   /// the second output containing the result of interleaving the high half.
600   VECTOR_INTERLEAVE,
601 
602   /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR,
603   /// whose elements are shuffled using the following algorithm:
604   ///   RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i]
605   VECTOR_REVERSE,
606 
607   /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
608   /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
609   /// values that indicate which value (or undef) each result element will
610   /// get.  These constant ints are accessible through the
611   /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
612   /// 'vperm' instruction, except that the indices must be constants and are
613   /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
614   VECTOR_SHUFFLE,
615 
616   /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as
617   /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways.
618   /// Let the result type be T, if IMM is positive it represents the starting
619   /// element number (an index) from which a subvector of type T is extracted
620   /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count
621   /// specifying the number of trailing elements to extract from VEC1, where the
622   /// elements of T are selected using the following algorithm:
623   ///   RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i]
624   /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM
625   /// is a constant integer.
626   VECTOR_SPLICE,
627 
628   /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
629   /// scalar value into element 0 of the resultant vector type.  The top
630   /// elements 1 to N-1 of the N-element vector are undefined.  The type
631   /// of the operand must match the vector element type, except when they
632   /// are integer types.  In this case the operand is allowed to be wider
633   /// than the vector element type, and is implicitly truncated to it.
634   SCALAR_TO_VECTOR,
635 
636   /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL
637   /// duplicated in all lanes. The type of the operand must match the vector
638   /// element type, except when they are integer types.  In this case the
639   /// operand is allowed to be wider than the vector element type, and is
640   /// implicitly truncated to it.
641   SPLAT_VECTOR,
642 
643   /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the
644   /// scalar values joined together and then duplicated in all lanes. This
645   /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This
646   /// allows representing a 64-bit splat on a target with 32-bit integers. The
647   /// total width of the scalars must cover the element width. SCALAR1 contains
648   /// the least significant bits of the value regardless of endianness and all
649   /// scalars should have the same type.
650   SPLAT_VECTOR_PARTS,
651 
652   /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised
653   /// of a linear sequence of unsigned values starting from 0 with a step of
654   /// IMM, where IMM must be a TargetConstant with type equal to the vector
655   /// element type. The arithmetic is performed modulo the bitwidth of the
656   /// element.
657   ///
658   /// The operation does not support returning fixed-width vectors or
659   /// non-constant operands.
660   STEP_VECTOR,
661 
662   /// VECTOR_COMPRESS(Vec, Mask, Passthru)
663   /// consecutively place vector elements based on mask
664   /// e.g., vec = {A, B, C, D} and mask = {1, 0, 1, 0}
665   ///         --> {A, C, ?, ?} where ? is undefined
666   /// If passthru is defined, ?s are replaced with elements from passthru.
667   /// If passthru is undef, ?s remain undefined.
668   VECTOR_COMPRESS,
669 
670   /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
671   /// producing an unsigned/signed value of type i[2*N], then return the top
672   /// part.
673   MULHU,
674   MULHS,
675 
676   /// AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of
677   /// type i[N+1], halving the result by shifting it one bit right.
678   /// shr(add(ext(X), ext(Y)), 1)
679   AVGFLOORS,
680   AVGFLOORU,
681   /// AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an
682   /// integer of type i[N+2], add 1 and halve the result by shifting it one bit
683   /// right. shr(add(ext(X), ext(Y), 1), 1)
684   AVGCEILS,
685   AVGCEILU,
686 
687   // ABDS/ABDU - Absolute difference - Return the absolute difference between
688   // two numbers interpreted as signed/unsigned.
689   // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
690   //  or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
691   ABDS,
692   ABDU,
693 
694   /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned
695   /// integers.
696   SMIN,
697   SMAX,
698   UMIN,
699   UMAX,
700 
701   /// [US]CMP - 3-way comparison of signed or unsigned integers. Returns -1, 0,
702   /// or 1 depending on whether Op0 <, ==, or > Op1. The operands can have type
703   /// different to the result.
704   SCMP,
705   UCMP,
706 
707   /// Bitwise operators - logical and, logical or, logical xor.
708   AND,
709   OR,
710   XOR,
711 
712   /// ABS - Determine the unsigned absolute value of a signed integer value of
713   /// the same bitwidth.
714   /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
715   /// is performed.
716   ABS,
717 
718   /// Shift and rotation operations.  After legalization, the type of the
719   /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
720   /// the shift amount can be any type, but care must be taken to ensure it is
721   /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
722   /// legalization, types like i1024 can occur and i8 doesn't have enough bits
723   /// to represent the shift amount.
724   /// When the 1st operand is a vector, the shift amount must be in the same
725   /// type. (TLI.getShiftAmountTy() will return the same type when the input
726   /// type is a vector.)
727   /// For rotates and funnel shifts, the shift amount is treated as an unsigned
728   /// amount modulo the element size of the first operand.
729   ///
730   /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
731   /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
732   /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
733   SHL,
734   SRA,
735   SRL,
736   ROTL,
737   ROTR,
738   FSHL,
739   FSHR,
740 
741   /// Byte Swap and Counting operators.
742   BSWAP,
743   CTTZ,
744   CTLZ,
745   CTPOP,
746   BITREVERSE,
747   PARITY,
748 
749   /// Bit counting operators with an undefined result for zero inputs.
750   CTTZ_ZERO_UNDEF,
751   CTLZ_ZERO_UNDEF,
752 
753   /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
754   /// i1 then the high bits must conform to getBooleanContents.
755   SELECT,
756 
757   /// Select with a vector condition (op #0) and two vector operands (ops #1
758   /// and #2), returning a vector result.  All vectors have the same length.
759   /// Much like the scalar select and setcc, each bit in the condition selects
760   /// whether the corresponding result element is taken from op #1 or op #2.
761   /// At first, the VSELECT condition is of vXi1 type. Later, targets may
762   /// change the condition type in order to match the VSELECT node using a
763   /// pattern. The condition follows the BooleanContent format of the target.
764   VSELECT,
765 
766   /// Select with condition operator - This selects between a true value and
767   /// a false value (ops #2 and #3) based on the boolean result of comparing
768   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
769   /// condition code in op #4, a CondCodeSDNode.
770   SELECT_CC,
771 
772   /// SetCC operator - This evaluates to a true value iff the condition is
773   /// true.  If the result value type is not i1 then the high bits conform
774   /// to getBooleanContents.  The operands to this are the left and right
775   /// operands to compare (ops #0, and #1) and the condition code to compare
776   /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
777   /// then the result type must also be a vector type.
778   SETCC,
779 
780   /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
781   /// op #2 is a boolean indicating if there is an incoming carry. This
782   /// operator checks the result of "LHS - RHS - Carry", and can be used to
783   /// compare two wide integers:
784   /// (setcccarry lhshi rhshi (usubo_carry lhslo rhslo) cc).
785   /// Only valid for integers.
786   SETCCCARRY,
787 
788   /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
789   /// integer shift operations.  The operation ordering is:
790   ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
791   SHL_PARTS,
792   SRA_PARTS,
793   SRL_PARTS,
794 
795   /// Conversion operators.  These are all single input single output
796   /// operations.  For all of these, the result type must be strictly
797   /// wider or narrower (depending on the operation) than the source
798   /// type.
799 
800   /// SIGN_EXTEND - Used for integer types, replicating the sign bit
801   /// into new bits.
802   SIGN_EXTEND,
803 
804   /// ZERO_EXTEND - Used for integer types, zeroing the new bits. Can carry
805   /// the NonNeg SDNodeFlag to indicate that the input is known to be
806   /// non-negative. If the flag is present and the input is negative, the result
807   /// is poison.
808   ZERO_EXTEND,
809 
810   /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
811   ANY_EXTEND,
812 
813   /// TRUNCATE - Completely drop the high bits.
814   TRUNCATE,
815 
816   /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
817   /// depends on the first letter) to floating point.
818   SINT_TO_FP,
819   UINT_TO_FP,
820 
821   /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
822   /// sign extend a small value in a large integer register (e.g. sign
823   /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
824   /// with the 7th bit).  The size of the smaller type is indicated by the 1th
825   /// operand, a ValueType node.
826   SIGN_EXTEND_INREG,
827 
828   /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
829   /// in-register any-extension of the low lanes of an integer vector. The
830   /// result type must have fewer elements than the operand type, and those
831   /// elements must be larger integer types such that the total size of the
832   /// operand type is less than or equal to the size of the result type. Each
833   /// of the low operand elements is any-extended into the corresponding,
834   /// wider result elements with the high bits becoming undef.
835   /// NOTE: The type legalizer prefers to make the operand and result size
836   /// the same to allow expansion to shuffle vector during op legalization.
837   ANY_EXTEND_VECTOR_INREG,
838 
839   /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
840   /// in-register sign-extension of the low lanes of an integer vector. The
841   /// result type must have fewer elements than the operand type, and those
842   /// elements must be larger integer types such that the total size of the
843   /// operand type is less than or equal to the size of the result type. Each
844   /// of the low operand elements is sign-extended into the corresponding,
845   /// wider result elements.
846   /// NOTE: The type legalizer prefers to make the operand and result size
847   /// the same to allow expansion to shuffle vector during op legalization.
848   SIGN_EXTEND_VECTOR_INREG,
849 
850   /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
851   /// in-register zero-extension of the low lanes of an integer vector. The
852   /// result type must have fewer elements than the operand type, and those
853   /// elements must be larger integer types such that the total size of the
854   /// operand type is less than or equal to the size of the result type. Each
855   /// of the low operand elements is zero-extended into the corresponding,
856   /// wider result elements.
857   /// NOTE: The type legalizer prefers to make the operand and result size
858   /// the same to allow expansion to shuffle vector during op legalization.
859   ZERO_EXTEND_VECTOR_INREG,
860 
861   /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
862   /// integer. These have the same semantics as fptosi and fptoui in IR. If
863   /// the FP value cannot fit in the integer type, the results are undefined.
864   FP_TO_SINT,
865   FP_TO_UINT,
866 
867   /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
868   /// signed or unsigned scalar integer type given in operand 1 with the
869   /// following semantics:
870   ///
871   ///  * If the value is NaN, zero is returned.
872   ///  * If the value is larger/smaller than the largest/smallest integer,
873   ///    the largest/smallest integer is returned (saturation).
874   ///  * Otherwise the result of rounding the value towards zero is returned.
875   ///
876   /// The scalar width of the type given in operand 1 must be equal to, or
877   /// smaller than, the scalar result type width. It may end up being smaller
878   /// than the result width as a result of integer type legalization.
879   ///
880   /// After converting to the scalar integer type in operand 1, the value is
881   /// extended to the result VT. FP_TO_SINT_SAT sign extends and FP_TO_UINT_SAT
882   /// zero extends.
883   FP_TO_SINT_SAT,
884   FP_TO_UINT_SAT,
885 
886   /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
887   /// down to the precision of the destination VT.  TRUNC is a flag, which is
888   /// always an integer that is zero or one.  If TRUNC is 0, this is a
889   /// normal rounding, if it is 1, this FP_ROUND is known to not change the
890   /// value of Y.
891   ///
892   /// The TRUNC = 1 case is used in cases where we know that the value will
893   /// not be modified by the node, because Y is not using any of the extra
894   /// precision of source type.  This allows certain transformations like
895   /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
896   /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
897   FP_ROUND,
898 
899   /// Returns current rounding mode:
900   /// -1 Undefined
901   ///  0 Round to 0
902   ///  1 Round to nearest, ties to even
903   ///  2 Round to +inf
904   ///  3 Round to -inf
905   ///  4 Round to nearest, ties to zero
906   ///  Other values are target dependent.
907   /// Result is rounding mode and chain. Input is a chain.
908   GET_ROUNDING,
909 
910   /// Set rounding mode.
911   /// The first operand is a chain pointer. The second specifies the required
912   /// rounding mode, encoded in the same way as used in '``GET_ROUNDING``'.
913   SET_ROUNDING,
914 
915   /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
916   FP_EXTEND,
917 
918   /// BITCAST - This operator converts between integer, vector and FP
919   /// values, as if the value was stored to memory with one type and loaded
920   /// from the same address with the other type (or equivalently for vector
921   /// format conversions, etc).  The source and result are required to have
922   /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
923   /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
924   /// getNode().
925   ///
926   /// This operator is subtly different from the bitcast instruction from
927   /// LLVM-IR since this node may change the bits in the register. For
928   /// example, this occurs on big-endian NEON and big-endian MSA where the
929   /// layout of the bits in the register depends on the vector type and this
930   /// operator acts as a shuffle operation for some vector type combinations.
931   BITCAST,
932 
933   /// ADDRSPACECAST - This operator converts between pointers of different
934   /// address spaces.
935   ADDRSPACECAST,
936 
937   /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
938   /// and truncation for half-precision (16 bit) floating numbers. These nodes
939   /// form a semi-softened interface for dealing with f16 (as an i16), which
940   /// is often a storage-only type but has native conversions.
941   FP16_TO_FP,
942   FP_TO_FP16,
943   STRICT_FP16_TO_FP,
944   STRICT_FP_TO_FP16,
945 
946   /// BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions
947   /// and truncation for bfloat16. These nodes form a semi-softened interface
948   /// for dealing with bf16 (as an i16), which is often a storage-only type but
949   /// has native conversions.
950   BF16_TO_FP,
951   FP_TO_BF16,
952   STRICT_BF16_TO_FP,
953   STRICT_FP_TO_BF16,
954 
955   /// Perform various unary floating-point operations inspired by libm. For
956   /// FPOWI, the result is undefined if the integer operand doesn't fit into
957   /// sizeof(int).
958   FNEG,
959   FABS,
960   FSQRT,
961   FCBRT,
962   FSIN,
963   FCOS,
964   FTAN,
965   FASIN,
966   FACOS,
967   FATAN,
968   FSINH,
969   FCOSH,
970   FTANH,
971   FPOW,
972   FPOWI,
973   /// FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
974   FLDEXP,
975 
976   /// FFREXP - frexp, extract fractional and exponent component of a
977   /// floating-point value. Returns the two components as separate return
978   /// values.
979   FFREXP,
980 
981   FLOG,
982   FLOG2,
983   FLOG10,
984   FEXP,
985   FEXP2,
986   FEXP10,
987   FCEIL,
988   FTRUNC,
989   FRINT,
990   FNEARBYINT,
991   FROUND,
992   FROUNDEVEN,
993   FFLOOR,
994   LROUND,
995   LLROUND,
996   LRINT,
997   LLRINT,
998 
999   /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
1000   /// values.
1001   //
1002   /// In the case where a single input is a NaN (either signaling or quiet),
1003   /// the non-NaN input is returned.
1004   ///
1005   /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
1006   FMINNUM,
1007   FMAXNUM,
1008 
1009   /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or
1010   /// maximumNumber on two values, following IEEE-754 definitions. This differs
1011   /// from FMINNUM/FMAXNUM in the handling of signaling NaNs, and signed zero.
1012   ///
1013   /// If one input is a signaling NaN, returns a quiet NaN. This matches
1014   /// IEEE-754 2008's minnum/maxnum behavior for signaling NaNs (which differs
1015   /// from 2019).
1016   ///
1017   /// These treat -0 as ordered less than +0, matching the behavior of IEEE-754
1018   /// 2019's minimumNumber/maximumNumber.
1019   FMINNUM_IEEE,
1020   FMAXNUM_IEEE,
1021 
1022   /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
1023   /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
1024   /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2019 semantics.
1025   FMINIMUM,
1026   FMAXIMUM,
1027 
1028   /// FSINCOS - Compute both fsin and fcos as a single operation.
1029   FSINCOS,
1030 
1031   /// Gets the current floating-point environment. The first operand is a token
1032   /// chain. The results are FP environment, represented by an integer value,
1033   /// and a token chain.
1034   GET_FPENV,
1035 
1036   /// Sets the current floating-point environment. The first operand is a token
1037   /// chain, the second is FP environment, represented by an integer value. The
1038   /// result is a token chain.
1039   SET_FPENV,
1040 
1041   /// Set floating-point environment to default state. The first operand and the
1042   /// result are token chains.
1043   RESET_FPENV,
1044 
1045   /// Gets the current floating-point environment. The first operand is a token
1046   /// chain, the second is a pointer to memory, where FP environment is stored
1047   /// to. The result is a token chain.
1048   GET_FPENV_MEM,
1049 
1050   /// Sets the current floating point environment. The first operand is a token
1051   /// chain, the second is a pointer to memory, where FP environment is loaded
1052   /// from. The result is a token chain.
1053   SET_FPENV_MEM,
1054 
1055   /// Reads the current dynamic floating-point control modes. The operand is
1056   /// a token chain.
1057   GET_FPMODE,
1058 
1059   /// Sets the current dynamic floating-point control modes. The first operand
1060   /// is a token chain, the second is control modes set represented as integer
1061   /// value.
1062   SET_FPMODE,
1063 
1064   /// Sets default dynamic floating-point control modes. The operand is a
1065   /// token chain.
1066   RESET_FPMODE,
1067 
1068   /// LOAD and STORE have token chains as their first operand, then the same
1069   /// operands as an LLVM load/store instruction, then an offset node that
1070   /// is added / subtracted from the base pointer to form the address (for
1071   /// indexed memory ops).
1072   LOAD,
1073   STORE,
1074 
1075   /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
1076   /// to a specified boundary.  This node always has two return values: a new
1077   /// stack pointer value and a chain. The first operand is the token chain,
1078   /// the second is the number of bytes to allocate, and the third is the
1079   /// alignment boundary.  The size is guaranteed to be a multiple of the
1080   /// stack alignment, and the alignment is guaranteed to be bigger than the
1081   /// stack alignment (if required) or 0 to get standard stack alignment.
1082   DYNAMIC_STACKALLOC,
1083 
1084   /// Control flow instructions.  These all have token chains.
1085 
1086   /// BR - Unconditional branch.  The first operand is the chain
1087   /// operand, the second is the MBB to branch to.
1088   BR,
1089 
1090   /// BRIND - Indirect branch.  The first operand is the chain, the second
1091   /// is the value to branch to, which must be of the same type as the
1092   /// target's pointer type.
1093   BRIND,
1094 
1095   /// BR_JT - Jumptable branch. The first operand is the chain, the second
1096   /// is the jumptable index, the last one is the jumptable entry index.
1097   BR_JT,
1098 
1099   /// JUMP_TABLE_DEBUG_INFO - Jumptable debug info. The first operand is the
1100   /// chain, the second is the jumptable index.
1101   JUMP_TABLE_DEBUG_INFO,
1102 
1103   /// BRCOND - Conditional branch.  The first operand is the chain, the
1104   /// second is the condition, the third is the block to branch to if the
1105   /// condition is true.  If the type of the condition is not i1, then the
1106   /// high bits must conform to getBooleanContents. If the condition is undef,
1107   /// it nondeterministically jumps to the block.
1108   /// TODO: Its semantics w.r.t undef requires further discussion; we need to
1109   /// make it sure that it is consistent with optimizations in MIR & the
1110   /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
1111   BRCOND,
1112 
1113   /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
1114   /// that the condition is represented as condition code, and two nodes to
1115   /// compare, rather than as a combined SetCC node.  The operands in order
1116   /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
1117   /// condition is undef, it nondeterministically jumps to the block.
1118   BR_CC,
1119 
1120   /// INLINEASM - Represents an inline asm block.  This node always has two
1121   /// return values: a chain and a flag result.  The inputs are as follows:
1122   ///   Operand #0  : Input chain.
1123   ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
1124   ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
1125   ///   Operand #3  : HasSideEffect, IsAlignStack bits.
1126   ///   After this, it is followed by a list of operands with this format:
1127   ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
1128   ///                     of operands that follow, etc.  See InlineAsm.h.
1129   ///     ... however many operands ...
1130   ///   Operand #last: Optional, an incoming flag.
1131   ///
1132   /// The variable width operands are required to represent target addressing
1133   /// modes as a single "operand", even though they may have multiple
1134   /// SDOperands.
1135   INLINEASM,
1136 
1137   /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
1138   INLINEASM_BR,
1139 
1140   /// EH_LABEL - Represents a label in mid basic block used to track
1141   /// locations needed for debug and exception handling tables.  These nodes
1142   /// take a chain as input and return a chain.
1143   EH_LABEL,
1144 
1145   /// ANNOTATION_LABEL - Represents a mid basic block label used by
1146   /// annotations. This should remain within the basic block and be ordered
1147   /// with respect to other call instructions, but loads and stores may float
1148   /// past it.
1149   ANNOTATION_LABEL,
1150 
1151   /// CATCHRET - Represents a return from a catch block funclet. Used for
1152   /// MSVC compatible exception handling. Takes a chain operand and a
1153   /// destination basic block operand.
1154   CATCHRET,
1155 
1156   /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
1157   /// MSVC compatible exception handling. Takes only a chain operand.
1158   CLEANUPRET,
1159 
1160   /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
1161   /// value, the same type as the pointer type for the system, and an output
1162   /// chain.
1163   STACKSAVE,
1164 
1165   /// STACKRESTORE has two operands, an input chain and a pointer to restore
1166   /// to it returns an output chain.
1167   STACKRESTORE,
1168 
1169   /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
1170   /// of a call sequence, and carry arbitrary information that target might
1171   /// want to know.  The first operand is a chain, the rest are specified by
1172   /// the target and not touched by the DAG optimizers.
1173   /// Targets that may use stack to pass call arguments define additional
1174   /// operands:
1175   /// - size of the call frame part that must be set up within the
1176   ///   CALLSEQ_START..CALLSEQ_END pair,
1177   /// - part of the call frame prepared prior to CALLSEQ_START.
1178   /// Both these parameters must be constants, their sum is the total call
1179   /// frame size.
1180   /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
1181   CALLSEQ_START, // Beginning of a call sequence
1182   CALLSEQ_END,   // End of a call sequence
1183 
1184   /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
1185   /// and the alignment. It returns a pair of values: the vaarg value and a
1186   /// new chain.
1187   VAARG,
1188 
1189   /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
1190   /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
1191   /// source.
1192   VACOPY,
1193 
1194   /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
1195   /// pointer, and a SRCVALUE.
1196   VAEND,
1197   VASTART,
1198 
1199   // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
1200   // with the preallocated call Value.
1201   PREALLOCATED_SETUP,
1202   // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
1203   // with the preallocated call Value, and a constant int.
1204   PREALLOCATED_ARG,
1205 
1206   /// SRCVALUE - This is a node type that holds a Value* that is used to
1207   /// make reference to a value in the LLVM IR.
1208   SRCVALUE,
1209 
1210   /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
1211   /// reference metadata in the IR.
1212   MDNODE_SDNODE,
1213 
1214   /// PCMARKER - This corresponds to the pcmarker intrinsic.
1215   PCMARKER,
1216 
1217   /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
1218   /// It produces a chain and one i64 value. The only operand is a chain.
1219   /// If i64 is not legal, the result will be expanded into smaller values.
1220   /// Still, it returns an i64, so targets should set legality for i64.
1221   /// The result is the content of the architecture-specific cycle
1222   /// counter-like register (or other high accuracy low latency clock source).
1223   READCYCLECOUNTER,
1224 
1225   /// READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
1226   /// It has the same semantics as the READCYCLECOUNTER implementation except
1227   /// that the result is the content of the architecture-specific fixed
1228   /// frequency counter suitable for measuring elapsed time.
1229   READSTEADYCOUNTER,
1230 
1231   /// HANDLENODE node - Used as a handle for various purposes.
1232   HANDLENODE,
1233 
1234   /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
1235   /// takes as input a token chain, the pointer to the trampoline, the pointer
1236   /// to the nested function, the pointer to pass for the 'nest' parameter, a
1237   /// SRCVALUE for the trampoline and another for the nested function
1238   /// (allowing targets to access the original Function*).
1239   /// It produces a token chain as output.
1240   INIT_TRAMPOLINE,
1241 
1242   /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
1243   /// It takes a pointer to the trampoline and produces a (possibly) new
1244   /// pointer to the same trampoline with platform-specific adjustments
1245   /// applied.  The pointer it returns points to an executable block of code.
1246   ADJUST_TRAMPOLINE,
1247 
1248   /// TRAP - Trapping instruction
1249   TRAP,
1250 
1251   /// DEBUGTRAP - Trap intended to get the attention of a debugger.
1252   DEBUGTRAP,
1253 
1254   /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer
1255   /// failure.
1256   UBSANTRAP,
1257 
1258   /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
1259   /// is the chain.  The other operands are the address to prefetch,
1260   /// read / write specifier, locality specifier and instruction / data cache
1261   /// specifier.
1262   PREFETCH,
1263 
1264   /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its
1265   /// operand and output are the same floating type.
1266   ARITH_FENCE,
1267 
1268   /// MEMBARRIER - Compiler barrier only; generate a no-op.
1269   MEMBARRIER,
1270 
1271   /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
1272   /// This corresponds to the fence instruction. It takes an input chain, and
1273   /// two integer constants: an AtomicOrdering and a SynchronizationScope.
1274   ATOMIC_FENCE,
1275 
1276   /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
1277   /// This corresponds to "load atomic" instruction.
1278   ATOMIC_LOAD,
1279 
1280   /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
1281   /// This corresponds to "store atomic" instruction.
1282   ATOMIC_STORE,
1283 
1284   /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
1285   /// For double-word atomic operations:
1286   /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
1287   ///                                          swapLo, swapHi)
1288   /// This corresponds to the cmpxchg instruction.
1289   ATOMIC_CMP_SWAP,
1290 
1291   /// Val, Success, OUTCHAIN
1292   ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
1293   /// N.b. this is still a strong cmpxchg operation, so
1294   /// Success == "Val == cmp".
1295   ATOMIC_CMP_SWAP_WITH_SUCCESS,
1296 
1297   /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
1298   /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
1299   /// For double-word atomic operations:
1300   /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
1301   /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
1302   /// These correspond to the atomicrmw instruction.
1303   ATOMIC_SWAP,
1304   ATOMIC_LOAD_ADD,
1305   ATOMIC_LOAD_SUB,
1306   ATOMIC_LOAD_AND,
1307   ATOMIC_LOAD_CLR,
1308   ATOMIC_LOAD_OR,
1309   ATOMIC_LOAD_XOR,
1310   ATOMIC_LOAD_NAND,
1311   ATOMIC_LOAD_MIN,
1312   ATOMIC_LOAD_MAX,
1313   ATOMIC_LOAD_UMIN,
1314   ATOMIC_LOAD_UMAX,
1315   ATOMIC_LOAD_FADD,
1316   ATOMIC_LOAD_FSUB,
1317   ATOMIC_LOAD_FMAX,
1318   ATOMIC_LOAD_FMIN,
1319   ATOMIC_LOAD_UINC_WRAP,
1320   ATOMIC_LOAD_UDEC_WRAP,
1321 
1322   // Masked load and store - consecutive vector load and store operations
1323   // with additional mask operand that prevents memory accesses to the
1324   // masked-off lanes.
1325   //
1326   // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
1327   // OutChain = MSTORE(Value, BasePtr, Mask)
1328   MLOAD,
1329   MSTORE,
1330 
1331   // Masked gather and scatter - load and store operations for a vector of
1332   // random addresses with additional mask operand that prevents memory
1333   // accesses to the masked-off lanes.
1334   //
1335   // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
1336   // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
1337   //
1338   // The Index operand can have more vector elements than the other operands
1339   // due to type legalization. The extra elements are ignored.
1340   MGATHER,
1341   MSCATTER,
1342 
1343   /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
1344   /// is the chain and the second operand is the alloca pointer.
1345   LIFETIME_START,
1346   LIFETIME_END,
1347 
1348   /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
1349   /// beginning and end of GC transition  sequence, and carry arbitrary
1350   /// information that target might need for lowering.  The first operand is
1351   /// a chain, the rest are specified by the target and not touched by the DAG
1352   /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
1353   /// nested.
1354   GC_TRANSITION_START,
1355   GC_TRANSITION_END,
1356 
1357   /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
1358   /// the most recent dynamic alloca. For most targets that would be 0, but
1359   /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
1360   /// known nonzero constant. The only operand here is the chain.
1361   GET_DYNAMIC_AREA_OFFSET,
1362 
1363   /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
1364   /// the sample counts quality.
1365   PSEUDO_PROBE,
1366 
1367   /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
1368   /// number of elements within a scalable vector. IMM is a constant integer
1369   /// multiplier that is applied to the runtime value.
1370   VSCALE,
1371 
1372   /// Generic reduction nodes. These nodes represent horizontal vector
1373   /// reduction operations, producing a scalar result.
1374   /// The SEQ variants perform reductions in sequential order. The first
1375   /// operand is an initial scalar accumulator value, and the second operand
1376   /// is the vector to reduce.
1377   /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
1378   ///  ... is equivalent to
1379   /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
1380   VECREDUCE_SEQ_FADD,
1381   VECREDUCE_SEQ_FMUL,
1382 
1383   /// These reductions have relaxed evaluation order semantics, and have a
1384   /// single vector operand. The order of evaluation is unspecified. For
1385   /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
1386   /// reduction, i.e.:
1387   /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
1388   ///   PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
1389   ///   PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
1390   ///   RES = FADD PART_RDX2[0], PART_RDX2[1]
1391   /// For non-pow-2 vectors, this can be computed by extracting each element
1392   /// and performing the operation as if it were scalarized.
1393   VECREDUCE_FADD,
1394   VECREDUCE_FMUL,
1395   /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
1396   VECREDUCE_FMAX,
1397   VECREDUCE_FMIN,
1398   /// FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the
1399   /// llvm.minimum and llvm.maximum semantics.
1400   VECREDUCE_FMAXIMUM,
1401   VECREDUCE_FMINIMUM,
1402   /// Integer reductions may have a result type larger than the vector element
1403   /// type. However, the reduction is performed using the vector element type
1404   /// and the value in the top bits is unspecified.
1405   VECREDUCE_ADD,
1406   VECREDUCE_MUL,
1407   VECREDUCE_AND,
1408   VECREDUCE_OR,
1409   VECREDUCE_XOR,
1410   VECREDUCE_SMAX,
1411   VECREDUCE_SMIN,
1412   VECREDUCE_UMAX,
1413   VECREDUCE_UMIN,
1414 
1415   // The `llvm.experimental.stackmap` intrinsic.
1416   // Operands: input chain, glue, <id>, <numShadowBytes>, [live0[, live1...]]
1417   // Outputs: output chain, glue
1418   STACKMAP,
1419 
1420   // The `llvm.experimental.patchpoint.*` intrinsic.
1421   // Operands: input chain, [glue], reg-mask, <id>, <numShadowBytes>, callee,
1422   //   <numArgs>, cc, ...
1423   // Outputs: [rv], output chain, glue
1424   PATCHPOINT,
1425 
1426 // Vector Predication
1427 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
1428 #include "llvm/IR/VPIntrinsics.def"
1429 
1430   // The `llvm.experimental.convergence.*` intrinsics.
1431   CONVERGENCECTRL_ANCHOR,
1432   CONVERGENCECTRL_ENTRY,
1433   CONVERGENCECTRL_LOOP,
1434   // This does not correspond to any convergence control intrinsic. It is used
1435   // to glue a convergence control token to a convergent operation in the DAG,
1436   // which is later translated to an implicit use in the MIR.
1437   CONVERGENCECTRL_GLUE,
1438 
1439   // Experimental vector histogram intrinsic
1440   // Operands: Input Chain, Inc, Mask, Base, Index, Scale, ID
1441   // Output: Output Chain
1442   EXPERIMENTAL_VECTOR_HISTOGRAM,
1443 
1444   // llvm.clear_cache intrinsic
1445   // Operands: Input Chain, Start Addres, End Address
1446   // Outputs: Output Chain
1447   CLEAR_CACHE,
1448 
1449   /// BUILTIN_OP_END - This must be the last enum value in this list.
1450   /// The target-specific pre-isel opcode values start here.
1451   BUILTIN_OP_END
1452 };
1453 
1454 /// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations
1455 /// which cannot raise FP exceptions should be less than this value.
1456 /// Those that do must not be less than this value.
1457 static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;
1458 
1459 /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
1460 /// which do not reference a specific memory location should be less than
1461 /// this value. Those that do must not be less than this value, and can
1462 /// be used with SelectionDAG::getMemIntrinsicNode.
1463 static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;
1464 
1465 /// Whether this is bitwise logic opcode.
isBitwiseLogicOp(unsigned Opcode)1466 inline bool isBitwiseLogicOp(unsigned Opcode) {
1467   return Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR;
1468 }
1469 
1470 /// Get underlying scalar opcode for VECREDUCE opcode.
1471 /// For example ISD::AND for ISD::VECREDUCE_AND.
1472 NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
1473 
1474 /// Whether this is a vector-predicated Opcode.
1475 bool isVPOpcode(unsigned Opcode);
1476 
1477 /// Whether this is a vector-predicated binary operation opcode.
1478 bool isVPBinaryOp(unsigned Opcode);
1479 
1480 /// Whether this is a vector-predicated reduction opcode.
1481 bool isVPReduction(unsigned Opcode);
1482 
1483 /// The operand position of the vector mask.
1484 std::optional<unsigned> getVPMaskIdx(unsigned Opcode);
1485 
1486 /// The operand position of the explicit vector length parameter.
1487 std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
1488 
1489 /// Translate this VP Opcode to its corresponding non-VP Opcode.
1490 std::optional<unsigned> getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept);
1491 
1492 /// Translate this non-VP Opcode to its corresponding VP Opcode.
1493 unsigned getVPForBaseOpcode(unsigned Opcode);
1494 
1495 //===--------------------------------------------------------------------===//
1496 /// MemIndexedMode enum - This enum defines the load / store indexed
1497 /// addressing modes.
1498 ///
1499 /// UNINDEXED    "Normal" load / store. The effective address is already
1500 ///              computed and is available in the base pointer. The offset
1501 ///              operand is always undefined. In addition to producing a
1502 ///              chain, an unindexed load produces one value (result of the
1503 ///              load); an unindexed store does not produce a value.
1504 ///
1505 /// PRE_INC      Similar to the unindexed mode where the effective address is
1506 /// PRE_DEC      the value of the base pointer add / subtract the offset.
1507 ///              It considers the computation as being folded into the load /
1508 ///              store operation (i.e. the load / store does the address
1509 ///              computation as well as performing the memory transaction).
1510 ///              The base operand is always undefined. In addition to
1511 ///              producing a chain, pre-indexed load produces two values
1512 ///              (result of the load and the result of the address
1513 ///              computation); a pre-indexed store produces one value (result
1514 ///              of the address computation).
1515 ///
1516 /// POST_INC     The effective address is the value of the base pointer. The
1517 /// POST_DEC     value of the offset operand is then added to / subtracted
1518 ///              from the base after memory transaction. In addition to
1519 ///              producing a chain, post-indexed load produces two values
1520 ///              (the result of the load and the result of the base +/- offset
1521 ///              computation); a post-indexed store produces one value (the
1522 ///              the result of the base +/- offset computation).
1523 enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC };
1524 
1525 static const int LAST_INDEXED_MODE = POST_DEC + 1;
1526 
1527 //===--------------------------------------------------------------------===//
1528 /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
1529 /// index parameter when calculating addresses.
1530 ///
1531 /// SIGNED_SCALED     Addr = Base + ((signed)Index * Scale)
1532 /// UNSIGNED_SCALED   Addr = Base + ((unsigned)Index * Scale)
1533 ///
1534 /// NOTE: The value of Scale is typically only known to the node owning the
1535 /// IndexType, with a value of 1 the equivalent of being unscaled.
1536 enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED };
1537 
1538 static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1;
1539 
isIndexTypeSigned(MemIndexType IndexType)1540 inline bool isIndexTypeSigned(MemIndexType IndexType) {
1541   return IndexType == SIGNED_SCALED;
1542 }
1543 
1544 //===--------------------------------------------------------------------===//
1545 /// LoadExtType enum - This enum defines the three variants of LOADEXT
1546 /// (load with extension).
1547 ///
1548 /// SEXTLOAD loads the integer operand and sign extends it to a larger
1549 ///          integer result type.
1550 /// ZEXTLOAD loads the integer operand and zero extends it to a larger
1551 ///          integer result type.
1552 /// EXTLOAD  is used for two things: floating point extending loads and
1553 ///          integer extending loads [the top bits are undefined].
1554 enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD };
1555 
1556 static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
1557 
1558 NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
1559 
1560 //===--------------------------------------------------------------------===//
1561 /// ISD::CondCode enum - These are ordered carefully to make the bitfields
1562 /// below work out, when considering SETFALSE (something that never exists
1563 /// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
1564 /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
1565 /// to.  If the "N" column is 1, the result of the comparison is undefined if
1566 /// the input is a NAN.
1567 ///
1568 /// All of these (except for the 'always folded ops') should be handled for
1569 /// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
1570 /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
1571 ///
1572 /// Note that these are laid out in a specific order to allow bit-twiddling
1573 /// to transform conditions.
1574 enum CondCode {
1575   // Opcode       N U L G E       Intuitive operation
1576   SETFALSE, //      0 0 0 0       Always false (always folded)
1577   SETOEQ,   //      0 0 0 1       True if ordered and equal
1578   SETOGT,   //      0 0 1 0       True if ordered and greater than
1579   SETOGE,   //      0 0 1 1       True if ordered and greater than or equal
1580   SETOLT,   //      0 1 0 0       True if ordered and less than
1581   SETOLE,   //      0 1 0 1       True if ordered and less than or equal
1582   SETONE,   //      0 1 1 0       True if ordered and operands are unequal
1583   SETO,     //      0 1 1 1       True if ordered (no nans)
1584   SETUO,    //      1 0 0 0       True if unordered: isnan(X) | isnan(Y)
1585   SETUEQ,   //      1 0 0 1       True if unordered or equal
1586   SETUGT,   //      1 0 1 0       True if unordered or greater than
1587   SETUGE,   //      1 0 1 1       True if unordered, greater than, or equal
1588   SETULT,   //      1 1 0 0       True if unordered or less than
1589   SETULE,   //      1 1 0 1       True if unordered, less than, or equal
1590   SETUNE,   //      1 1 1 0       True if unordered or not equal
1591   SETTRUE,  //      1 1 1 1       Always true (always folded)
1592   // Don't care operations: undefined if the input is a nan.
1593   SETFALSE2, //   1 X 0 0 0       Always false (always folded)
1594   SETEQ,     //   1 X 0 0 1       True if equal
1595   SETGT,     //   1 X 0 1 0       True if greater than
1596   SETGE,     //   1 X 0 1 1       True if greater than or equal
1597   SETLT,     //   1 X 1 0 0       True if less than
1598   SETLE,     //   1 X 1 0 1       True if less than or equal
1599   SETNE,     //   1 X 1 1 0       True if not equal
1600   SETTRUE2,  //   1 X 1 1 1       Always true (always folded)
1601 
1602   SETCC_INVALID // Marker value.
1603 };
1604 
1605 /// Return true if this is a setcc instruction that performs a signed
1606 /// comparison when used with integer operands.
isSignedIntSetCC(CondCode Code)1607 inline bool isSignedIntSetCC(CondCode Code) {
1608   return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
1609 }
1610 
1611 /// Return true if this is a setcc instruction that performs an unsigned
1612 /// comparison when used with integer operands.
isUnsignedIntSetCC(CondCode Code)1613 inline bool isUnsignedIntSetCC(CondCode Code) {
1614   return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
1615 }
1616 
1617 /// Return true if this is a setcc instruction that performs an equality
1618 /// comparison when used with integer operands.
isIntEqualitySetCC(CondCode Code)1619 inline bool isIntEqualitySetCC(CondCode Code) {
1620   return Code == SETEQ || Code == SETNE;
1621 }
1622 
1623 /// Return true if this is a setcc instruction that performs an equality
1624 /// comparison when used with floating point operands.
isFPEqualitySetCC(CondCode Code)1625 inline bool isFPEqualitySetCC(CondCode Code) {
1626   return Code == SETOEQ || Code == SETONE || Code == SETUEQ || Code == SETUNE;
1627 }
1628 
1629 /// Return true if the specified condition returns true if the two operands to
1630 /// the condition are equal. Note that if one of the two operands is a NaN,
1631 /// this value is meaningless.
isTrueWhenEqual(CondCode Cond)1632 inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; }
1633 
1634 /// This function returns 0 if the condition is always false if an operand is
1635 /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
1636 /// the condition is undefined if the operand is a NaN.
getUnorderedFlavor(CondCode Cond)1637 inline unsigned getUnorderedFlavor(CondCode Cond) {
1638   return ((int)Cond >> 3) & 3;
1639 }
1640 
1641 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1642 /// SetCC operation.
1643 CondCode getSetCCInverse(CondCode Operation, EVT Type);
1644 
isExtOpcode(unsigned Opcode)1645 inline bool isExtOpcode(unsigned Opcode) {
1646   return Opcode == ISD::ANY_EXTEND || Opcode == ISD::ZERO_EXTEND ||
1647          Opcode == ISD::SIGN_EXTEND;
1648 }
1649 
isExtVecInRegOpcode(unsigned Opcode)1650 inline bool isExtVecInRegOpcode(unsigned Opcode) {
1651   return Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
1652          Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
1653          Opcode == ISD::SIGN_EXTEND_VECTOR_INREG;
1654 }
1655 
1656 namespace GlobalISel {
1657 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1658 /// SetCC operation. The U bit of the condition code has different meanings
1659 /// between floating point and integer comparisons and LLT's don't provide
1660 /// this distinction. As such we need to be told whether the comparison is
1661 /// floating point or integer-like. Pointers should use integer-like
1662 /// comparisons.
1663 CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike);
1664 } // end namespace GlobalISel
1665 
1666 /// Return the operation corresponding to (Y op X) when given the operation
1667 /// for (X op Y).
1668 CondCode getSetCCSwappedOperands(CondCode Operation);
1669 
1670 /// Return the result of a logical OR between different comparisons of
1671 /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
1672 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1673 CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type);
1674 
1675 /// Return the result of a logical AND between different comparisons of
1676 /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
1677 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1678 CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type);
1679 
1680 } // namespace ISD
1681 
1682 } // namespace llvm
1683 
1684 #endif
1685