xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines the interfaces that X86 uses to lower LLVM code into a
100b57cec5SDimitry Andric // selection DAG.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
150b57cec5SDimitry Andric #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
160b57cec5SDimitry Andric 
17349cc55cSDimitry Andric #include "llvm/CodeGen/MachineFunction.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
190b57cec5SDimitry Andric 
200b57cec5SDimitry Andric namespace llvm {
210b57cec5SDimitry Andric   class X86Subtarget;
220b57cec5SDimitry Andric   class X86TargetMachine;
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric   namespace X86ISD {
250b57cec5SDimitry Andric     // X86 Specific DAG Nodes
260b57cec5SDimitry Andric   enum NodeType : unsigned {
270b57cec5SDimitry Andric     // Start the numbering where the builtin ops leave off.
280b57cec5SDimitry Andric     FIRST_NUMBER = ISD::BUILTIN_OP_END,
290b57cec5SDimitry Andric 
300b57cec5SDimitry Andric     /// Bit scan forward.
310b57cec5SDimitry Andric     BSF,
320b57cec5SDimitry Andric     /// Bit scan reverse.
330b57cec5SDimitry Andric     BSR,
340b57cec5SDimitry Andric 
355ffd83dbSDimitry Andric     /// X86 funnel/double shift i16 instructions. These correspond to
365ffd83dbSDimitry Andric     /// X86::SHLDW and X86::SHRDW instructions which have different amt
375ffd83dbSDimitry Andric     /// modulo rules to generic funnel shifts.
385ffd83dbSDimitry Andric     /// NOTE: The operand order matches ISD::FSHL/FSHR not SHLD/SHRD.
395ffd83dbSDimitry Andric     FSHL,
405ffd83dbSDimitry Andric     FSHR,
410b57cec5SDimitry Andric 
420b57cec5SDimitry Andric     /// Bitwise logical AND of floating point values. This corresponds
430b57cec5SDimitry Andric     /// to X86::ANDPS or X86::ANDPD.
440b57cec5SDimitry Andric     FAND,
450b57cec5SDimitry Andric 
460b57cec5SDimitry Andric     /// Bitwise logical OR of floating point values. This corresponds
470b57cec5SDimitry Andric     /// to X86::ORPS or X86::ORPD.
480b57cec5SDimitry Andric     FOR,
490b57cec5SDimitry Andric 
500b57cec5SDimitry Andric     /// Bitwise logical XOR of floating point values. This corresponds
510b57cec5SDimitry Andric     /// to X86::XORPS or X86::XORPD.
520b57cec5SDimitry Andric     FXOR,
530b57cec5SDimitry Andric 
540b57cec5SDimitry Andric     ///  Bitwise logical ANDNOT of floating point values. This
550b57cec5SDimitry Andric     /// corresponds to X86::ANDNPS or X86::ANDNPD.
560b57cec5SDimitry Andric     FANDN,
570b57cec5SDimitry Andric 
580b57cec5SDimitry Andric     /// These operations represent an abstract X86 call
590b57cec5SDimitry Andric     /// instruction, which includes a bunch of information.  In particular the
600b57cec5SDimitry Andric     /// operands of these node are:
610b57cec5SDimitry Andric     ///
620b57cec5SDimitry Andric     ///     #0 - The incoming token chain
630b57cec5SDimitry Andric     ///     #1 - The callee
640b57cec5SDimitry Andric     ///     #2 - The number of arg bytes the caller pushes on the stack.
650b57cec5SDimitry Andric     ///     #3 - The number of arg bytes the callee pops off the stack.
660b57cec5SDimitry Andric     ///     #4 - The value to pass in AL/AX/EAX (optional)
670b57cec5SDimitry Andric     ///     #5 - The value to pass in DL/DX/EDX (optional)
680b57cec5SDimitry Andric     ///
690b57cec5SDimitry Andric     /// The result values of these nodes are:
700b57cec5SDimitry Andric     ///
710b57cec5SDimitry Andric     ///     #0 - The outgoing token chain
720b57cec5SDimitry Andric     ///     #1 - The first register result value (optional)
730b57cec5SDimitry Andric     ///     #2 - The second register result value (optional)
740b57cec5SDimitry Andric     ///
750b57cec5SDimitry Andric     CALL,
760b57cec5SDimitry Andric 
770b57cec5SDimitry Andric     /// Same as call except it adds the NoTrack prefix.
780b57cec5SDimitry Andric     NT_CALL,
790b57cec5SDimitry Andric 
80fe6060f1SDimitry Andric     // Pseudo for a OBJC call that gets emitted together with a special
81fe6060f1SDimitry Andric     // marker instruction.
82fe6060f1SDimitry Andric     CALL_RVMARKER,
83fe6060f1SDimitry Andric 
840b57cec5SDimitry Andric     /// X86 compare and logical compare instructions.
855ffd83dbSDimitry Andric     CMP,
865ffd83dbSDimitry Andric     FCMP,
875ffd83dbSDimitry Andric     COMI,
885ffd83dbSDimitry Andric     UCOMI,
890b57cec5SDimitry Andric 
900b57cec5SDimitry Andric     /// X86 bit-test instructions.
910b57cec5SDimitry Andric     BT,
920b57cec5SDimitry Andric 
930b57cec5SDimitry Andric     /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
940b57cec5SDimitry Andric     /// operand, usually produced by a CMP instruction.
950b57cec5SDimitry Andric     SETCC,
960b57cec5SDimitry Andric 
970b57cec5SDimitry Andric     /// X86 Select
980b57cec5SDimitry Andric     SELECTS,
990b57cec5SDimitry Andric 
1000b57cec5SDimitry Andric     // Same as SETCC except it's materialized with a sbb and the value is all
1010b57cec5SDimitry Andric     // one's or all zero's.
1020b57cec5SDimitry Andric     SETCC_CARRY, // R = carry_bit ? ~0 : 0
1030b57cec5SDimitry Andric 
1040b57cec5SDimitry Andric     /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
1050b57cec5SDimitry Andric     /// Operands are two FP values to compare; result is a mask of
1060b57cec5SDimitry Andric     /// 0s or 1s.  Generally DTRT for C/C++ with NaNs.
1070b57cec5SDimitry Andric     FSETCC,
1080b57cec5SDimitry Andric 
1090b57cec5SDimitry Andric     /// X86 FP SETCC, similar to above, but with output as an i1 mask and
1100b57cec5SDimitry Andric     /// and a version with SAE.
1115ffd83dbSDimitry Andric     FSETCCM,
1125ffd83dbSDimitry Andric     FSETCCM_SAE,
1130b57cec5SDimitry Andric 
1140b57cec5SDimitry Andric     /// X86 conditional moves. Operand 0 and operand 1 are the two values
1150b57cec5SDimitry Andric     /// to select from. Operand 2 is the condition code, and operand 3 is the
1160b57cec5SDimitry Andric     /// flag operand produced by a CMP or TEST instruction.
1170b57cec5SDimitry Andric     CMOV,
1180b57cec5SDimitry Andric 
1190b57cec5SDimitry Andric     /// X86 conditional branches. Operand 0 is the chain operand, operand 1
1200b57cec5SDimitry Andric     /// is the block to branch if condition is true, operand 2 is the
1210b57cec5SDimitry Andric     /// condition code, and operand 3 is the flag operand produced by a CMP
1220b57cec5SDimitry Andric     /// or TEST instruction.
1230b57cec5SDimitry Andric     BRCOND,
1240b57cec5SDimitry Andric 
1250b57cec5SDimitry Andric     /// BRIND node with NoTrack prefix. Operand 0 is the chain operand and
1260b57cec5SDimitry Andric     /// operand 1 is the target address.
1270b57cec5SDimitry Andric     NT_BRIND,
1280b57cec5SDimitry Andric 
12906c3fb27SDimitry Andric     /// Return with a glue operand. Operand 0 is the chain operand, operand
1300b57cec5SDimitry Andric     /// 1 is the number of bytes of stack to pop.
13106c3fb27SDimitry Andric     RET_GLUE,
1320b57cec5SDimitry Andric 
1330b57cec5SDimitry Andric     /// Return from interrupt. Operand 0 is the number of bytes to pop.
1340b57cec5SDimitry Andric     IRET,
1350b57cec5SDimitry Andric 
1360b57cec5SDimitry Andric     /// Repeat fill, corresponds to X86::REP_STOSx.
1370b57cec5SDimitry Andric     REP_STOS,
1380b57cec5SDimitry Andric 
1390b57cec5SDimitry Andric     /// Repeat move, corresponds to X86::REP_MOVSx.
1400b57cec5SDimitry Andric     REP_MOVS,
1410b57cec5SDimitry Andric 
1420b57cec5SDimitry Andric     /// On Darwin, this node represents the result of the popl
1430b57cec5SDimitry Andric     /// at function entry, used for PIC code.
1440b57cec5SDimitry Andric     GlobalBaseReg,
1450b57cec5SDimitry Andric 
1460b57cec5SDimitry Andric     /// A wrapper node for TargetConstantPool, TargetJumpTable,
1470b57cec5SDimitry Andric     /// TargetExternalSymbol, TargetGlobalAddress, TargetGlobalTLSAddress,
1480b57cec5SDimitry Andric     /// MCSymbol and TargetBlockAddress.
1490b57cec5SDimitry Andric     Wrapper,
1500b57cec5SDimitry Andric 
1510b57cec5SDimitry Andric     /// Special wrapper used under X86-64 PIC mode for RIP
1520b57cec5SDimitry Andric     /// relative displacements.
1530b57cec5SDimitry Andric     WrapperRIP,
1540b57cec5SDimitry Andric 
1558bcb0991SDimitry Andric     /// Copies a 64-bit value from an MMX vector to the low word
1568bcb0991SDimitry Andric     /// of an XMM vector, with the high word zero filled.
1578bcb0991SDimitry Andric     MOVQ2DQ,
1588bcb0991SDimitry Andric 
1590b57cec5SDimitry Andric     /// Copies a 64-bit value from the low word of an XMM vector
1600b57cec5SDimitry Andric     /// to an MMX vector.
1610b57cec5SDimitry Andric     MOVDQ2Q,
1620b57cec5SDimitry Andric 
1630b57cec5SDimitry Andric     /// Copies a 32-bit value from the low word of a MMX
1640b57cec5SDimitry Andric     /// vector to a GPR.
1650b57cec5SDimitry Andric     MMX_MOVD2W,
1660b57cec5SDimitry Andric 
1670b57cec5SDimitry Andric     /// Copies a GPR into the low 32-bit word of a MMX vector
1680b57cec5SDimitry Andric     /// and zero out the high word.
1690b57cec5SDimitry Andric     MMX_MOVW2D,
1700b57cec5SDimitry Andric 
1710b57cec5SDimitry Andric     /// Extract an 8-bit value from a vector and zero extend it to
1720b57cec5SDimitry Andric     /// i32, corresponds to X86::PEXTRB.
1730b57cec5SDimitry Andric     PEXTRB,
1740b57cec5SDimitry Andric 
1750b57cec5SDimitry Andric     /// Extract a 16-bit value from a vector and zero extend it to
1760b57cec5SDimitry Andric     /// i32, corresponds to X86::PEXTRW.
1770b57cec5SDimitry Andric     PEXTRW,
1780b57cec5SDimitry Andric 
1790b57cec5SDimitry Andric     /// Insert any element of a 4 x float vector into any element
1800b57cec5SDimitry Andric     /// of a destination 4 x floatvector.
1810b57cec5SDimitry Andric     INSERTPS,
1820b57cec5SDimitry Andric 
1830b57cec5SDimitry Andric     /// Insert the lower 8-bits of a 32-bit value to a vector,
1840b57cec5SDimitry Andric     /// corresponds to X86::PINSRB.
1850b57cec5SDimitry Andric     PINSRB,
1860b57cec5SDimitry Andric 
1870b57cec5SDimitry Andric     /// Insert the lower 16-bits of a 32-bit value to a vector,
1880b57cec5SDimitry Andric     /// corresponds to X86::PINSRW.
1890b57cec5SDimitry Andric     PINSRW,
1900b57cec5SDimitry Andric 
1910b57cec5SDimitry Andric     /// Shuffle 16 8-bit values within a vector.
1920b57cec5SDimitry Andric     PSHUFB,
1930b57cec5SDimitry Andric 
1940b57cec5SDimitry Andric     /// Compute Sum of Absolute Differences.
1950b57cec5SDimitry Andric     PSADBW,
1960b57cec5SDimitry Andric     /// Compute Double Block Packed Sum-Absolute-Differences
1970b57cec5SDimitry Andric     DBPSADBW,
1980b57cec5SDimitry Andric 
1990b57cec5SDimitry Andric     /// Bitwise Logical AND NOT of Packed FP values.
2000b57cec5SDimitry Andric     ANDNP,
2010b57cec5SDimitry Andric 
2020b57cec5SDimitry Andric     /// Blend where the selector is an immediate.
2030b57cec5SDimitry Andric     BLENDI,
2040b57cec5SDimitry Andric 
2050b57cec5SDimitry Andric     /// Dynamic (non-constant condition) vector blend where only the sign bits
2060b57cec5SDimitry Andric     /// of the condition elements are used. This is used to enforce that the
2070b57cec5SDimitry Andric     /// condition mask is not valid for generic VSELECT optimizations. This
2080b57cec5SDimitry Andric     /// is also used to implement the intrinsics.
2090b57cec5SDimitry Andric     /// Operands are in VSELECT order: MASK, TRUE, FALSE
2100b57cec5SDimitry Andric     BLENDV,
2110b57cec5SDimitry Andric 
2120b57cec5SDimitry Andric     /// Combined add and sub on an FP vector.
2130b57cec5SDimitry Andric     ADDSUB,
2140b57cec5SDimitry Andric 
2150b57cec5SDimitry Andric     //  FP vector ops with rounding mode.
2165ffd83dbSDimitry Andric     FADD_RND,
2175ffd83dbSDimitry Andric     FADDS,
2185ffd83dbSDimitry Andric     FADDS_RND,
2195ffd83dbSDimitry Andric     FSUB_RND,
2205ffd83dbSDimitry Andric     FSUBS,
2215ffd83dbSDimitry Andric     FSUBS_RND,
2225ffd83dbSDimitry Andric     FMUL_RND,
2235ffd83dbSDimitry Andric     FMULS,
2245ffd83dbSDimitry Andric     FMULS_RND,
2255ffd83dbSDimitry Andric     FDIV_RND,
2265ffd83dbSDimitry Andric     FDIVS,
2275ffd83dbSDimitry Andric     FDIVS_RND,
2285ffd83dbSDimitry Andric     FMAX_SAE,
2295ffd83dbSDimitry Andric     FMAXS_SAE,
2305ffd83dbSDimitry Andric     FMIN_SAE,
2315ffd83dbSDimitry Andric     FMINS_SAE,
2325ffd83dbSDimitry Andric     FSQRT_RND,
2335ffd83dbSDimitry Andric     FSQRTS,
2345ffd83dbSDimitry Andric     FSQRTS_RND,
2350b57cec5SDimitry Andric 
2360b57cec5SDimitry Andric     // FP vector get exponent.
2375ffd83dbSDimitry Andric     FGETEXP,
2385ffd83dbSDimitry Andric     FGETEXP_SAE,
2395ffd83dbSDimitry Andric     FGETEXPS,
2405ffd83dbSDimitry Andric     FGETEXPS_SAE,
2410b57cec5SDimitry Andric     // Extract Normalized Mantissas.
2425ffd83dbSDimitry Andric     VGETMANT,
2435ffd83dbSDimitry Andric     VGETMANT_SAE,
2445ffd83dbSDimitry Andric     VGETMANTS,
2455ffd83dbSDimitry Andric     VGETMANTS_SAE,
2460b57cec5SDimitry Andric     // FP Scale.
2475ffd83dbSDimitry Andric     SCALEF,
2485ffd83dbSDimitry Andric     SCALEF_RND,
2495ffd83dbSDimitry Andric     SCALEFS,
2505ffd83dbSDimitry Andric     SCALEFS_RND,
2510b57cec5SDimitry Andric 
2520b57cec5SDimitry Andric     /// Integer horizontal add/sub.
2530b57cec5SDimitry Andric     HADD,
2540b57cec5SDimitry Andric     HSUB,
2550b57cec5SDimitry Andric 
2560b57cec5SDimitry Andric     /// Floating point horizontal add/sub.
2570b57cec5SDimitry Andric     FHADD,
2580b57cec5SDimitry Andric     FHSUB,
2590b57cec5SDimitry Andric 
2600b57cec5SDimitry Andric     // Detect Conflicts Within a Vector
2610b57cec5SDimitry Andric     CONFLICT,
2620b57cec5SDimitry Andric 
2630b57cec5SDimitry Andric     /// Floating point max and min.
2645ffd83dbSDimitry Andric     FMAX,
2655ffd83dbSDimitry Andric     FMIN,
2660b57cec5SDimitry Andric 
2670b57cec5SDimitry Andric     /// Commutative FMIN and FMAX.
2685ffd83dbSDimitry Andric     FMAXC,
2695ffd83dbSDimitry Andric     FMINC,
2700b57cec5SDimitry Andric 
2710b57cec5SDimitry Andric     /// Scalar intrinsic floating point max and min.
2725ffd83dbSDimitry Andric     FMAXS,
2735ffd83dbSDimitry Andric     FMINS,
2740b57cec5SDimitry Andric 
2750b57cec5SDimitry Andric     /// Floating point reciprocal-sqrt and reciprocal approximation.
2760b57cec5SDimitry Andric     /// Note that these typically require refinement
2770b57cec5SDimitry Andric     /// in order to obtain suitable precision.
2785ffd83dbSDimitry Andric     FRSQRT,
2795ffd83dbSDimitry Andric     FRCP,
2800b57cec5SDimitry Andric 
2810b57cec5SDimitry Andric     // AVX-512 reciprocal approximations with a little more precision.
2825ffd83dbSDimitry Andric     RSQRT14,
2835ffd83dbSDimitry Andric     RSQRT14S,
2845ffd83dbSDimitry Andric     RCP14,
2855ffd83dbSDimitry Andric     RCP14S,
2860b57cec5SDimitry Andric 
2870b57cec5SDimitry Andric     // Thread Local Storage.
2880b57cec5SDimitry Andric     TLSADDR,
2890b57cec5SDimitry Andric 
2900b57cec5SDimitry Andric     // Thread Local Storage. A call to get the start address
2910b57cec5SDimitry Andric     // of the TLS block for the current module.
2920b57cec5SDimitry Andric     TLSBASEADDR,
2930b57cec5SDimitry Andric 
2940b57cec5SDimitry Andric     // Thread Local Storage.  When calling to an OS provided
2950b57cec5SDimitry Andric     // thunk at the address from an earlier relocation.
2960b57cec5SDimitry Andric     TLSCALL,
2970b57cec5SDimitry Andric 
298*0fca6ea1SDimitry Andric     // Thread Local Storage. A descriptor containing pointer to
299*0fca6ea1SDimitry Andric     // code and to argument to get the TLS offset for the symbol.
300*0fca6ea1SDimitry Andric     TLSDESC,
301*0fca6ea1SDimitry Andric 
3020b57cec5SDimitry Andric     // Exception Handling helpers.
3030b57cec5SDimitry Andric     EH_RETURN,
3040b57cec5SDimitry Andric 
3050b57cec5SDimitry Andric     // SjLj exception handling setjmp.
3060b57cec5SDimitry Andric     EH_SJLJ_SETJMP,
3070b57cec5SDimitry Andric 
3080b57cec5SDimitry Andric     // SjLj exception handling longjmp.
3090b57cec5SDimitry Andric     EH_SJLJ_LONGJMP,
3100b57cec5SDimitry Andric 
3110b57cec5SDimitry Andric     // SjLj exception handling dispatch.
3120b57cec5SDimitry Andric     EH_SJLJ_SETUP_DISPATCH,
3130b57cec5SDimitry Andric 
3140b57cec5SDimitry Andric     /// Tail call return. See X86TargetLowering::LowerCall for
3150b57cec5SDimitry Andric     /// the list of operands.
3160b57cec5SDimitry Andric     TC_RETURN,
3170b57cec5SDimitry Andric 
3180b57cec5SDimitry Andric     // Vector move to low scalar and zero higher vector elements.
3190b57cec5SDimitry Andric     VZEXT_MOVL,
3200b57cec5SDimitry Andric 
3210b57cec5SDimitry Andric     // Vector integer truncate.
3220b57cec5SDimitry Andric     VTRUNC,
3230b57cec5SDimitry Andric     // Vector integer truncate with unsigned/signed saturation.
3245ffd83dbSDimitry Andric     VTRUNCUS,
3255ffd83dbSDimitry Andric     VTRUNCS,
3260b57cec5SDimitry Andric 
3270b57cec5SDimitry Andric     // Masked version of the above. Used when less than a 128-bit result is
3280b57cec5SDimitry Andric     // produced since the mask only applies to the lower elements and can't
3290b57cec5SDimitry Andric     // be represented by a select.
3300b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
3315ffd83dbSDimitry Andric     VMTRUNC,
3325ffd83dbSDimitry Andric     VMTRUNCUS,
3335ffd83dbSDimitry Andric     VMTRUNCS,
3340b57cec5SDimitry Andric 
3350b57cec5SDimitry Andric     // Vector FP extend.
3365ffd83dbSDimitry Andric     VFPEXT,
3375ffd83dbSDimitry Andric     VFPEXT_SAE,
3385ffd83dbSDimitry Andric     VFPEXTS,
3395ffd83dbSDimitry Andric     VFPEXTS_SAE,
3400b57cec5SDimitry Andric 
3410b57cec5SDimitry Andric     // Vector FP round.
3425ffd83dbSDimitry Andric     VFPROUND,
3435ffd83dbSDimitry Andric     VFPROUND_RND,
3445ffd83dbSDimitry Andric     VFPROUNDS,
3455ffd83dbSDimitry Andric     VFPROUNDS_RND,
3460b57cec5SDimitry Andric 
3470b57cec5SDimitry Andric     // Masked version of above. Used for v2f64->v4f32.
3480b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
3490b57cec5SDimitry Andric     VMFPROUND,
3500b57cec5SDimitry Andric 
3510b57cec5SDimitry Andric     // 128-bit vector logical left / right shift
3525ffd83dbSDimitry Andric     VSHLDQ,
3535ffd83dbSDimitry Andric     VSRLDQ,
3540b57cec5SDimitry Andric 
3550b57cec5SDimitry Andric     // Vector shift elements
3565ffd83dbSDimitry Andric     VSHL,
3575ffd83dbSDimitry Andric     VSRL,
3585ffd83dbSDimitry Andric     VSRA,
3590b57cec5SDimitry Andric 
3600b57cec5SDimitry Andric     // Vector variable shift
3615ffd83dbSDimitry Andric     VSHLV,
3625ffd83dbSDimitry Andric     VSRLV,
3635ffd83dbSDimitry Andric     VSRAV,
3640b57cec5SDimitry Andric 
3650b57cec5SDimitry Andric     // Vector shift elements by immediate
3665ffd83dbSDimitry Andric     VSHLI,
3675ffd83dbSDimitry Andric     VSRLI,
3685ffd83dbSDimitry Andric     VSRAI,
3690b57cec5SDimitry Andric 
3700b57cec5SDimitry Andric     // Shifts of mask registers.
3715ffd83dbSDimitry Andric     KSHIFTL,
3725ffd83dbSDimitry Andric     KSHIFTR,
3730b57cec5SDimitry Andric 
3740b57cec5SDimitry Andric     // Bit rotate by immediate
3755ffd83dbSDimitry Andric     VROTLI,
3765ffd83dbSDimitry Andric     VROTRI,
3770b57cec5SDimitry Andric 
3780b57cec5SDimitry Andric     // Vector packed double/float comparison.
3790b57cec5SDimitry Andric     CMPP,
3800b57cec5SDimitry Andric 
3810b57cec5SDimitry Andric     // Vector integer comparisons.
3825ffd83dbSDimitry Andric     PCMPEQ,
3835ffd83dbSDimitry Andric     PCMPGT,
3840b57cec5SDimitry Andric 
3850b57cec5SDimitry Andric     // v8i16 Horizontal minimum and position.
3860b57cec5SDimitry Andric     PHMINPOS,
3870b57cec5SDimitry Andric 
3880b57cec5SDimitry Andric     MULTISHIFT,
3890b57cec5SDimitry Andric 
3900b57cec5SDimitry Andric     /// Vector comparison generating mask bits for fp and
3910b57cec5SDimitry Andric     /// integer signed and unsigned data types.
3920b57cec5SDimitry Andric     CMPM,
393e8d8bef9SDimitry Andric     // Vector mask comparison generating mask bits for FP values.
394e8d8bef9SDimitry Andric     CMPMM,
395e8d8bef9SDimitry Andric     // Vector mask comparison with SAE for FP values.
396e8d8bef9SDimitry Andric     CMPMM_SAE,
3970b57cec5SDimitry Andric 
3980b57cec5SDimitry Andric     // Arithmetic operations with FLAGS results.
3995ffd83dbSDimitry Andric     ADD,
4005ffd83dbSDimitry Andric     SUB,
4015ffd83dbSDimitry Andric     ADC,
4025ffd83dbSDimitry Andric     SBB,
4035ffd83dbSDimitry Andric     SMUL,
4045ffd83dbSDimitry Andric     UMUL,
4055ffd83dbSDimitry Andric     OR,
4065ffd83dbSDimitry Andric     XOR,
4075ffd83dbSDimitry Andric     AND,
4080b57cec5SDimitry Andric 
4090b57cec5SDimitry Andric     // Bit field extract.
4100b57cec5SDimitry Andric     BEXTR,
411e8d8bef9SDimitry Andric     BEXTRI,
4120b57cec5SDimitry Andric 
4130b57cec5SDimitry Andric     // Zero High Bits Starting with Specified Bit Position.
4140b57cec5SDimitry Andric     BZHI,
4150b57cec5SDimitry Andric 
4165ffd83dbSDimitry Andric     // Parallel extract and deposit.
4175ffd83dbSDimitry Andric     PDEP,
4185ffd83dbSDimitry Andric     PEXT,
4195ffd83dbSDimitry Andric 
4200b57cec5SDimitry Andric     // X86-specific multiply by immediate.
4210b57cec5SDimitry Andric     MUL_IMM,
4220b57cec5SDimitry Andric 
4230b57cec5SDimitry Andric     // Vector sign bit extraction.
4240b57cec5SDimitry Andric     MOVMSK,
4250b57cec5SDimitry Andric 
4260b57cec5SDimitry Andric     // Vector bitwise comparisons.
4270b57cec5SDimitry Andric     PTEST,
4280b57cec5SDimitry Andric 
4290b57cec5SDimitry Andric     // Vector packed fp sign bitwise comparisons.
4300b57cec5SDimitry Andric     TESTP,
4310b57cec5SDimitry Andric 
4320b57cec5SDimitry Andric     // OR/AND test for masks.
4330b57cec5SDimitry Andric     KORTEST,
4340b57cec5SDimitry Andric     KTEST,
4350b57cec5SDimitry Andric 
4360b57cec5SDimitry Andric     // ADD for masks.
4370b57cec5SDimitry Andric     KADD,
4380b57cec5SDimitry Andric 
4390b57cec5SDimitry Andric     // Several flavors of instructions with vector shuffle behaviors.
4400b57cec5SDimitry Andric     // Saturated signed/unnsigned packing.
4410b57cec5SDimitry Andric     PACKSS,
4420b57cec5SDimitry Andric     PACKUS,
4430b57cec5SDimitry Andric     // Intra-lane alignr.
4440b57cec5SDimitry Andric     PALIGNR,
4450b57cec5SDimitry Andric     // AVX512 inter-lane alignr.
4460b57cec5SDimitry Andric     VALIGN,
4470b57cec5SDimitry Andric     PSHUFD,
4480b57cec5SDimitry Andric     PSHUFHW,
4490b57cec5SDimitry Andric     PSHUFLW,
4500b57cec5SDimitry Andric     SHUFP,
4510b57cec5SDimitry Andric     // VBMI2 Concat & Shift.
4520b57cec5SDimitry Andric     VSHLD,
4530b57cec5SDimitry Andric     VSHRD,
4540b57cec5SDimitry Andric     VSHLDV,
4550b57cec5SDimitry Andric     VSHRDV,
4560b57cec5SDimitry Andric     // Shuffle Packed Values at 128-bit granularity.
4570b57cec5SDimitry Andric     SHUF128,
4580b57cec5SDimitry Andric     MOVDDUP,
4590b57cec5SDimitry Andric     MOVSHDUP,
4600b57cec5SDimitry Andric     MOVSLDUP,
4610b57cec5SDimitry Andric     MOVLHPS,
4620b57cec5SDimitry Andric     MOVHLPS,
4630b57cec5SDimitry Andric     MOVSD,
4640b57cec5SDimitry Andric     MOVSS,
465349cc55cSDimitry Andric     MOVSH,
4660b57cec5SDimitry Andric     UNPCKL,
4670b57cec5SDimitry Andric     UNPCKH,
4680b57cec5SDimitry Andric     VPERMILPV,
4690b57cec5SDimitry Andric     VPERMILPI,
4700b57cec5SDimitry Andric     VPERMI,
4710b57cec5SDimitry Andric     VPERM2X128,
4720b57cec5SDimitry Andric 
4730b57cec5SDimitry Andric     // Variable Permute (VPERM).
4740b57cec5SDimitry Andric     // Res = VPERMV MaskV, V0
4750b57cec5SDimitry Andric     VPERMV,
4760b57cec5SDimitry Andric 
4770b57cec5SDimitry Andric     // 3-op Variable Permute (VPERMT2).
4780b57cec5SDimitry Andric     // Res = VPERMV3 V0, MaskV, V1
4790b57cec5SDimitry Andric     VPERMV3,
4800b57cec5SDimitry Andric 
4810b57cec5SDimitry Andric     // Bitwise ternary logic.
4820b57cec5SDimitry Andric     VPTERNLOG,
4830b57cec5SDimitry Andric     // Fix Up Special Packed Float32/64 values.
4845ffd83dbSDimitry Andric     VFIXUPIMM,
4855ffd83dbSDimitry Andric     VFIXUPIMM_SAE,
4865ffd83dbSDimitry Andric     VFIXUPIMMS,
4875ffd83dbSDimitry Andric     VFIXUPIMMS_SAE,
4880b57cec5SDimitry Andric     // Range Restriction Calculation For Packed Pairs of Float32/64 values.
4895ffd83dbSDimitry Andric     VRANGE,
4905ffd83dbSDimitry Andric     VRANGE_SAE,
4915ffd83dbSDimitry Andric     VRANGES,
4925ffd83dbSDimitry Andric     VRANGES_SAE,
4930b57cec5SDimitry Andric     // Reduce - Perform Reduction Transformation on scalar\packed FP.
4945ffd83dbSDimitry Andric     VREDUCE,
4955ffd83dbSDimitry Andric     VREDUCE_SAE,
4965ffd83dbSDimitry Andric     VREDUCES,
4975ffd83dbSDimitry Andric     VREDUCES_SAE,
4980b57cec5SDimitry Andric     // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
4990b57cec5SDimitry Andric     // Also used by the legacy (V)ROUND intrinsics where we mask out the
5000b57cec5SDimitry Andric     // scaling part of the immediate.
5015ffd83dbSDimitry Andric     VRNDSCALE,
5025ffd83dbSDimitry Andric     VRNDSCALE_SAE,
5035ffd83dbSDimitry Andric     VRNDSCALES,
5045ffd83dbSDimitry Andric     VRNDSCALES_SAE,
5050b57cec5SDimitry Andric     // Tests Types Of a FP Values for packed types.
5060b57cec5SDimitry Andric     VFPCLASS,
5070b57cec5SDimitry Andric     // Tests Types Of a FP Values for scalar types.
5080b57cec5SDimitry Andric     VFPCLASSS,
5090b57cec5SDimitry Andric 
5100b57cec5SDimitry Andric     // Broadcast (splat) scalar or element 0 of a vector. If the operand is
5110b57cec5SDimitry Andric     // a vector, this node may change the vector length as part of the splat.
5120b57cec5SDimitry Andric     VBROADCAST,
5130b57cec5SDimitry Andric     // Broadcast mask to vector.
5140b57cec5SDimitry Andric     VBROADCASTM,
5150b57cec5SDimitry Andric 
5160b57cec5SDimitry Andric     /// SSE4A Extraction and Insertion.
5175ffd83dbSDimitry Andric     EXTRQI,
5185ffd83dbSDimitry Andric     INSERTQI,
5190b57cec5SDimitry Andric 
5200b57cec5SDimitry Andric     // XOP arithmetic/logical shifts.
5215ffd83dbSDimitry Andric     VPSHA,
5225ffd83dbSDimitry Andric     VPSHL,
5230b57cec5SDimitry Andric     // XOP signed/unsigned integer comparisons.
5245ffd83dbSDimitry Andric     VPCOM,
5255ffd83dbSDimitry Andric     VPCOMU,
5260b57cec5SDimitry Andric     // XOP packed permute bytes.
5270b57cec5SDimitry Andric     VPPERM,
5280b57cec5SDimitry Andric     // XOP two source permutation.
5290b57cec5SDimitry Andric     VPERMIL2,
5300b57cec5SDimitry Andric 
5310b57cec5SDimitry Andric     // Vector multiply packed unsigned doubleword integers.
5320b57cec5SDimitry Andric     PMULUDQ,
5330b57cec5SDimitry Andric     // Vector multiply packed signed doubleword integers.
5340b57cec5SDimitry Andric     PMULDQ,
5350b57cec5SDimitry Andric     // Vector Multiply Packed UnsignedIntegers with Round and Scale.
5360b57cec5SDimitry Andric     MULHRS,
5370b57cec5SDimitry Andric 
5380b57cec5SDimitry Andric     // Multiply and Add Packed Integers.
5395ffd83dbSDimitry Andric     VPMADDUBSW,
5405ffd83dbSDimitry Andric     VPMADDWD,
5410b57cec5SDimitry Andric 
5420b57cec5SDimitry Andric     // AVX512IFMA multiply and add.
5430b57cec5SDimitry Andric     // NOTE: These are different than the instruction and perform
5440b57cec5SDimitry Andric     // op0 x op1 + op2.
5455ffd83dbSDimitry Andric     VPMADD52L,
5465ffd83dbSDimitry Andric     VPMADD52H,
5470b57cec5SDimitry Andric 
5480b57cec5SDimitry Andric     // VNNI
5490b57cec5SDimitry Andric     VPDPBUSD,
5500b57cec5SDimitry Andric     VPDPBUSDS,
5510b57cec5SDimitry Andric     VPDPWSSD,
5520b57cec5SDimitry Andric     VPDPWSSDS,
5530b57cec5SDimitry Andric 
5540b57cec5SDimitry Andric     // FMA nodes.
5550b57cec5SDimitry Andric     // We use the target independent ISD::FMA for the non-inverted case.
5560b57cec5SDimitry Andric     FNMADD,
5570b57cec5SDimitry Andric     FMSUB,
5580b57cec5SDimitry Andric     FNMSUB,
5590b57cec5SDimitry Andric     FMADDSUB,
5600b57cec5SDimitry Andric     FMSUBADD,
5610b57cec5SDimitry Andric 
5620b57cec5SDimitry Andric     // FMA with rounding mode.
5630b57cec5SDimitry Andric     FMADD_RND,
5640b57cec5SDimitry Andric     FNMADD_RND,
5650b57cec5SDimitry Andric     FMSUB_RND,
5660b57cec5SDimitry Andric     FNMSUB_RND,
5670b57cec5SDimitry Andric     FMADDSUB_RND,
5680b57cec5SDimitry Andric     FMSUBADD_RND,
5690b57cec5SDimitry Andric 
570349cc55cSDimitry Andric     // AVX512-FP16 complex addition and multiplication.
571349cc55cSDimitry Andric     VFMADDC,
572349cc55cSDimitry Andric     VFMADDC_RND,
573349cc55cSDimitry Andric     VFCMADDC,
574349cc55cSDimitry Andric     VFCMADDC_RND,
575349cc55cSDimitry Andric 
576349cc55cSDimitry Andric     VFMULC,
577349cc55cSDimitry Andric     VFMULC_RND,
578349cc55cSDimitry Andric     VFCMULC,
579349cc55cSDimitry Andric     VFCMULC_RND,
580349cc55cSDimitry Andric 
581349cc55cSDimitry Andric     VFMADDCSH,
582349cc55cSDimitry Andric     VFMADDCSH_RND,
583349cc55cSDimitry Andric     VFCMADDCSH,
584349cc55cSDimitry Andric     VFCMADDCSH_RND,
585349cc55cSDimitry Andric 
586349cc55cSDimitry Andric     VFMULCSH,
587349cc55cSDimitry Andric     VFMULCSH_RND,
588349cc55cSDimitry Andric     VFCMULCSH,
589349cc55cSDimitry Andric     VFCMULCSH_RND,
590349cc55cSDimitry Andric 
591bdd1243dSDimitry Andric     VPDPBSUD,
592bdd1243dSDimitry Andric     VPDPBSUDS,
593bdd1243dSDimitry Andric     VPDPBUUD,
594bdd1243dSDimitry Andric     VPDPBUUDS,
595bdd1243dSDimitry Andric     VPDPBSSD,
596bdd1243dSDimitry Andric     VPDPBSSDS,
597bdd1243dSDimitry Andric 
5980b57cec5SDimitry Andric     // Compress and expand.
5990b57cec5SDimitry Andric     COMPRESS,
6000b57cec5SDimitry Andric     EXPAND,
6010b57cec5SDimitry Andric 
6020b57cec5SDimitry Andric     // Bits shuffle
6030b57cec5SDimitry Andric     VPSHUFBITQMB,
6040b57cec5SDimitry Andric 
6050b57cec5SDimitry Andric     // Convert Unsigned/Integer to Floating-Point Value with rounding mode.
6065ffd83dbSDimitry Andric     SINT_TO_FP_RND,
6075ffd83dbSDimitry Andric     UINT_TO_FP_RND,
6085ffd83dbSDimitry Andric     SCALAR_SINT_TO_FP,
6095ffd83dbSDimitry Andric     SCALAR_UINT_TO_FP,
6105ffd83dbSDimitry Andric     SCALAR_SINT_TO_FP_RND,
6115ffd83dbSDimitry Andric     SCALAR_UINT_TO_FP_RND,
6120b57cec5SDimitry Andric 
6130b57cec5SDimitry Andric     // Vector float/double to signed/unsigned integer.
6145ffd83dbSDimitry Andric     CVTP2SI,
6155ffd83dbSDimitry Andric     CVTP2UI,
6165ffd83dbSDimitry Andric     CVTP2SI_RND,
6175ffd83dbSDimitry Andric     CVTP2UI_RND,
6180b57cec5SDimitry Andric     // Scalar float/double to signed/unsigned integer.
6195ffd83dbSDimitry Andric     CVTS2SI,
6205ffd83dbSDimitry Andric     CVTS2UI,
6215ffd83dbSDimitry Andric     CVTS2SI_RND,
6225ffd83dbSDimitry Andric     CVTS2UI_RND,
6230b57cec5SDimitry Andric 
6240b57cec5SDimitry Andric     // Vector float/double to signed/unsigned integer with truncation.
6255ffd83dbSDimitry Andric     CVTTP2SI,
6265ffd83dbSDimitry Andric     CVTTP2UI,
6275ffd83dbSDimitry Andric     CVTTP2SI_SAE,
6285ffd83dbSDimitry Andric     CVTTP2UI_SAE,
6290b57cec5SDimitry Andric     // Scalar float/double to signed/unsigned integer with truncation.
6305ffd83dbSDimitry Andric     CVTTS2SI,
6315ffd83dbSDimitry Andric     CVTTS2UI,
6325ffd83dbSDimitry Andric     CVTTS2SI_SAE,
6335ffd83dbSDimitry Andric     CVTTS2UI_SAE,
6340b57cec5SDimitry Andric 
6350b57cec5SDimitry Andric     // Vector signed/unsigned integer to float/double.
6365ffd83dbSDimitry Andric     CVTSI2P,
6375ffd83dbSDimitry Andric     CVTUI2P,
6380b57cec5SDimitry Andric 
6390b57cec5SDimitry Andric     // Masked versions of above. Used for v2f64->v4f32.
6400b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
6415ffd83dbSDimitry Andric     MCVTP2SI,
6425ffd83dbSDimitry Andric     MCVTP2UI,
6435ffd83dbSDimitry Andric     MCVTTP2SI,
6445ffd83dbSDimitry Andric     MCVTTP2UI,
6455ffd83dbSDimitry Andric     MCVTSI2P,
6465ffd83dbSDimitry Andric     MCVTUI2P,
6470b57cec5SDimitry Andric 
6480b57cec5SDimitry Andric     // Vector float to bfloat16.
6490b57cec5SDimitry Andric     // Convert TWO packed single data to one packed BF16 data
6500b57cec5SDimitry Andric     CVTNE2PS2BF16,
6510b57cec5SDimitry Andric     // Convert packed single data to packed BF16 data
6520b57cec5SDimitry Andric     CVTNEPS2BF16,
6530b57cec5SDimitry Andric     // Masked version of above.
6540b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
6550b57cec5SDimitry Andric     MCVTNEPS2BF16,
6560b57cec5SDimitry Andric 
6570b57cec5SDimitry Andric     // Dot product of BF16 pairs to accumulated into
6580b57cec5SDimitry Andric     // packed single precision.
6590b57cec5SDimitry Andric     DPBF16PS,
6600b57cec5SDimitry Andric 
661349cc55cSDimitry Andric     // A stack checking function call. On Windows it's _chkstk call.
662349cc55cSDimitry Andric     DYN_ALLOCA,
6630b57cec5SDimitry Andric 
6640b57cec5SDimitry Andric     // For allocating variable amounts of stack space when using
6650b57cec5SDimitry Andric     // segmented stacks. Check if the current stacklet has enough space, and
6660b57cec5SDimitry Andric     // falls back to heap allocation if not.
6670b57cec5SDimitry Andric     SEG_ALLOCA,
6680b57cec5SDimitry Andric 
6695ffd83dbSDimitry Andric     // For allocating stack space when using stack clash protector.
6705ffd83dbSDimitry Andric     // Allocation is performed by block, and each block is probed.
6715ffd83dbSDimitry Andric     PROBED_ALLOCA,
6725ffd83dbSDimitry Andric 
6730b57cec5SDimitry Andric     // Memory barriers.
6740b57cec5SDimitry Andric     MFENCE,
6750b57cec5SDimitry Andric 
6760b57cec5SDimitry Andric     // Get a random integer and indicate whether it is valid in CF.
6770b57cec5SDimitry Andric     RDRAND,
6780b57cec5SDimitry Andric 
6790b57cec5SDimitry Andric     // Get a NIST SP800-90B & C compliant random integer and
6800b57cec5SDimitry Andric     // indicate whether it is valid in CF.
6810b57cec5SDimitry Andric     RDSEED,
6820b57cec5SDimitry Andric 
6830b57cec5SDimitry Andric     // Protection keys
6840b57cec5SDimitry Andric     // RDPKRU - Operand 0 is chain. Operand 1 is value for ECX.
6850b57cec5SDimitry Andric     // WRPKRU - Operand 0 is chain. Operand 1 is value for EDX. Operand 2 is
6860b57cec5SDimitry Andric     // value for ECX.
6875ffd83dbSDimitry Andric     RDPKRU,
6885ffd83dbSDimitry Andric     WRPKRU,
6890b57cec5SDimitry Andric 
6900b57cec5SDimitry Andric     // SSE42 string comparisons.
6910b57cec5SDimitry Andric     // These nodes produce 3 results, index, mask, and flags. X86ISelDAGToDAG
6920b57cec5SDimitry Andric     // will emit one or two instructions based on which results are used. If
6930b57cec5SDimitry Andric     // flags and index/mask this allows us to use a single instruction since
6940b57cec5SDimitry Andric     // we won't have to pick and opcode for flags. Instead we can rely on the
6950b57cec5SDimitry Andric     // DAG to CSE everything and decide at isel.
6960b57cec5SDimitry Andric     PCMPISTR,
6970b57cec5SDimitry Andric     PCMPESTR,
6980b57cec5SDimitry Andric 
6990b57cec5SDimitry Andric     // Test if in transactional execution.
7000b57cec5SDimitry Andric     XTEST,
7010b57cec5SDimitry Andric 
7020b57cec5SDimitry Andric     // Conversions between float and half-float.
7035ffd83dbSDimitry Andric     CVTPS2PH,
704bdd1243dSDimitry Andric     CVTPS2PH_SAE,
7055ffd83dbSDimitry Andric     CVTPH2PS,
7065ffd83dbSDimitry Andric     CVTPH2PS_SAE,
7070b57cec5SDimitry Andric 
7080b57cec5SDimitry Andric     // Masked version of above.
7090b57cec5SDimitry Andric     // SRC, RND, PASSTHRU, MASK
7100b57cec5SDimitry Andric     MCVTPS2PH,
711bdd1243dSDimitry Andric     MCVTPS2PH_SAE,
7120b57cec5SDimitry Andric 
7130b57cec5SDimitry Andric     // Galois Field Arithmetic Instructions
7145ffd83dbSDimitry Andric     GF2P8AFFINEINVQB,
7155ffd83dbSDimitry Andric     GF2P8AFFINEQB,
7165ffd83dbSDimitry Andric     GF2P8MULB,
7170b57cec5SDimitry Andric 
7180b57cec5SDimitry Andric     // LWP insert record.
7190b57cec5SDimitry Andric     LWPINS,
7200b57cec5SDimitry Andric 
7210b57cec5SDimitry Andric     // User level wait
7225ffd83dbSDimitry Andric     UMWAIT,
7235ffd83dbSDimitry Andric     TPAUSE,
7240b57cec5SDimitry Andric 
7250b57cec5SDimitry Andric     // Enqueue Stores Instructions
7265ffd83dbSDimitry Andric     ENQCMD,
7275ffd83dbSDimitry Andric     ENQCMDS,
7280b57cec5SDimitry Andric 
7290b57cec5SDimitry Andric     // For avx512-vp2intersect
7300b57cec5SDimitry Andric     VP2INTERSECT,
7310b57cec5SDimitry Andric 
732e8d8bef9SDimitry Andric     // User level interrupts - testui
733e8d8bef9SDimitry Andric     TESTUI,
734e8d8bef9SDimitry Andric 
7351ac55f4cSDimitry Andric     // Perform an FP80 add after changing precision control in FPCW.
7361ac55f4cSDimitry Andric     FP80_ADD,
7371ac55f4cSDimitry Andric 
738*0fca6ea1SDimitry Andric     // Conditional compare instructions
739*0fca6ea1SDimitry Andric     CCMP,
740*0fca6ea1SDimitry Andric     CTEST,
741*0fca6ea1SDimitry Andric 
742480093f4SDimitry Andric     /// X86 strict FP compare instructions.
743480093f4SDimitry Andric     STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
744480093f4SDimitry Andric     STRICT_FCMPS,
745480093f4SDimitry Andric 
746480093f4SDimitry Andric     // Vector packed double/float comparison.
747480093f4SDimitry Andric     STRICT_CMPP,
748480093f4SDimitry Andric 
749480093f4SDimitry Andric     /// Vector comparison generating mask bits for fp and
750480093f4SDimitry Andric     /// integer signed and unsigned data types.
751480093f4SDimitry Andric     STRICT_CMPM,
752480093f4SDimitry Andric 
753480093f4SDimitry Andric     // Vector float/double to signed/unsigned integer with truncation.
7545ffd83dbSDimitry Andric     STRICT_CVTTP2SI,
7555ffd83dbSDimitry Andric     STRICT_CVTTP2UI,
756480093f4SDimitry Andric 
757480093f4SDimitry Andric     // Vector FP extend.
758480093f4SDimitry Andric     STRICT_VFPEXT,
759480093f4SDimitry Andric 
760480093f4SDimitry Andric     // Vector FP round.
761480093f4SDimitry Andric     STRICT_VFPROUND,
762480093f4SDimitry Andric 
763480093f4SDimitry Andric     // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
764480093f4SDimitry Andric     // Also used by the legacy (V)ROUND intrinsics where we mask out the
765480093f4SDimitry Andric     // scaling part of the immediate.
766480093f4SDimitry Andric     STRICT_VRNDSCALE,
767480093f4SDimitry Andric 
768480093f4SDimitry Andric     // Vector signed/unsigned integer to float/double.
7695ffd83dbSDimitry Andric     STRICT_CVTSI2P,
7705ffd83dbSDimitry Andric     STRICT_CVTUI2P,
7715ffd83dbSDimitry Andric 
7725ffd83dbSDimitry Andric     // Strict FMA nodes.
7735ffd83dbSDimitry Andric     STRICT_FNMADD,
7745ffd83dbSDimitry Andric     STRICT_FMSUB,
7755ffd83dbSDimitry Andric     STRICT_FNMSUB,
7765ffd83dbSDimitry Andric 
7775ffd83dbSDimitry Andric     // Conversions between float and half-float.
7785ffd83dbSDimitry Andric     STRICT_CVTPS2PH,
7795ffd83dbSDimitry Andric     STRICT_CVTPH2PS,
780480093f4SDimitry Andric 
7811ac55f4cSDimitry Andric     // Perform an FP80 add after changing precision control in FPCW.
7821ac55f4cSDimitry Andric     STRICT_FP80_ADD,
7831ac55f4cSDimitry Andric 
784bdd1243dSDimitry Andric     // WARNING: Only add nodes here if they are strict FP nodes. Non-memory and
785e8d8bef9SDimitry Andric     // non-strict FP nodes should be above FIRST_TARGET_STRICTFP_OPCODE.
786e8d8bef9SDimitry Andric 
7870b57cec5SDimitry Andric     // Compare and swap.
7880b57cec5SDimitry Andric     LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
7890b57cec5SDimitry Andric     LCMPXCHG8_DAG,
7900b57cec5SDimitry Andric     LCMPXCHG16_DAG,
7910b57cec5SDimitry Andric     LCMPXCHG16_SAVE_RBX_DAG,
7920b57cec5SDimitry Andric 
7930b57cec5SDimitry Andric     /// LOCK-prefixed arithmetic read-modify-write instructions.
7940b57cec5SDimitry Andric     /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
7955ffd83dbSDimitry Andric     LADD,
7965ffd83dbSDimitry Andric     LSUB,
7975ffd83dbSDimitry Andric     LOR,
7985ffd83dbSDimitry Andric     LXOR,
7995ffd83dbSDimitry Andric     LAND,
80081ad6265SDimitry Andric     LBTS,
80181ad6265SDimitry Andric     LBTC,
80281ad6265SDimitry Andric     LBTR,
803bdd1243dSDimitry Andric     LBTS_RM,
804bdd1243dSDimitry Andric     LBTC_RM,
805bdd1243dSDimitry Andric     LBTR_RM,
806bdd1243dSDimitry Andric 
807bdd1243dSDimitry Andric     /// RAO arithmetic instructions.
808bdd1243dSDimitry Andric     /// OUTCHAIN = AADD(INCHAIN, PTR, RHS)
809bdd1243dSDimitry Andric     AADD,
810bdd1243dSDimitry Andric     AOR,
811bdd1243dSDimitry Andric     AXOR,
812bdd1243dSDimitry Andric     AAND,
8130b57cec5SDimitry Andric 
8140b57cec5SDimitry Andric     // Load, scalar_to_vector, and zero extend.
8150b57cec5SDimitry Andric     VZEXT_LOAD,
8160b57cec5SDimitry Andric 
8170b57cec5SDimitry Andric     // extract_vector_elt, store.
8180b57cec5SDimitry Andric     VEXTRACT_STORE,
8190b57cec5SDimitry Andric 
820e8d8bef9SDimitry Andric     // scalar broadcast from memory.
8218bcb0991SDimitry Andric     VBROADCAST_LOAD,
8228bcb0991SDimitry Andric 
823e8d8bef9SDimitry Andric     // subvector broadcast from memory.
824e8d8bef9SDimitry Andric     SUBV_BROADCAST_LOAD,
825e8d8bef9SDimitry Andric 
826fe6060f1SDimitry Andric     // Store FP control word into i16 memory.
8270b57cec5SDimitry Andric     FNSTCW16m,
8280b57cec5SDimitry Andric 
829fe6060f1SDimitry Andric     // Load FP control word from i16 memory.
830fe6060f1SDimitry Andric     FLDCW16m,
831fe6060f1SDimitry Andric 
83206c3fb27SDimitry Andric     // Store x87 FPU environment into memory.
83306c3fb27SDimitry Andric     FNSTENVm,
83406c3fb27SDimitry Andric 
83506c3fb27SDimitry Andric     // Load x87 FPU environment from memory.
83606c3fb27SDimitry Andric     FLDENVm,
83706c3fb27SDimitry Andric 
8380b57cec5SDimitry Andric     /// This instruction implements FP_TO_SINT with the
8390b57cec5SDimitry Andric     /// integer destination in memory and a FP reg source.  This corresponds
8400b57cec5SDimitry Andric     /// to the X86::FIST*m instructions and the rounding mode change stuff. It
8410b57cec5SDimitry Andric     /// has two inputs (token chain and address) and two outputs (int value
8420b57cec5SDimitry Andric     /// and token chain). Memory VT specifies the type to store to.
8430b57cec5SDimitry Andric     FP_TO_INT_IN_MEM,
8440b57cec5SDimitry Andric 
8450b57cec5SDimitry Andric     /// This instruction implements SINT_TO_FP with the
8460b57cec5SDimitry Andric     /// integer source in memory and FP reg result.  This corresponds to the
8470b57cec5SDimitry Andric     /// X86::FILD*m instructions. It has two inputs (token chain and address)
8485ffd83dbSDimitry Andric     /// and two outputs (FP value and token chain). The integer source type is
8495ffd83dbSDimitry Andric     /// specified by the memory VT.
8500b57cec5SDimitry Andric     FILD,
8510b57cec5SDimitry Andric 
8520b57cec5SDimitry Andric     /// This instruction implements a fp->int store from FP stack
8530b57cec5SDimitry Andric     /// slots. This corresponds to the fist instruction. It takes a
8540b57cec5SDimitry Andric     /// chain operand, value to store, address, and glue. The memory VT
8550b57cec5SDimitry Andric     /// specifies the type to store as.
8560b57cec5SDimitry Andric     FIST,
8570b57cec5SDimitry Andric 
8580b57cec5SDimitry Andric     /// This instruction implements an extending load to FP stack slots.
8590b57cec5SDimitry Andric     /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
8600b57cec5SDimitry Andric     /// operand, and ptr to load from. The memory VT specifies the type to
8610b57cec5SDimitry Andric     /// load from.
8620b57cec5SDimitry Andric     FLD,
8630b57cec5SDimitry Andric 
8640b57cec5SDimitry Andric     /// This instruction implements a truncating store from FP stack
8650b57cec5SDimitry Andric     /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
8660b57cec5SDimitry Andric     /// chain operand, value to store, address, and glue. The memory VT
8670b57cec5SDimitry Andric     /// specifies the type to store as.
8680b57cec5SDimitry Andric     FST,
8690b57cec5SDimitry Andric 
870e8d8bef9SDimitry Andric     /// These instructions grab the address of the next argument
8710b57cec5SDimitry Andric     /// from a va_list. (reads and modifies the va_list in memory)
8720b57cec5SDimitry Andric     VAARG_64,
873e8d8bef9SDimitry Andric     VAARG_X32,
8740b57cec5SDimitry Andric 
8750b57cec5SDimitry Andric     // Vector truncating store with unsigned/signed saturation
8765ffd83dbSDimitry Andric     VTRUNCSTOREUS,
8775ffd83dbSDimitry Andric     VTRUNCSTORES,
8780b57cec5SDimitry Andric     // Vector truncating masked store with unsigned/signed saturation
8795ffd83dbSDimitry Andric     VMTRUNCSTOREUS,
8805ffd83dbSDimitry Andric     VMTRUNCSTORES,
8810b57cec5SDimitry Andric 
8820b57cec5SDimitry Andric     // X86 specific gather and scatter
8835ffd83dbSDimitry Andric     MGATHER,
8845ffd83dbSDimitry Andric     MSCATTER,
8850b57cec5SDimitry Andric 
886e8d8bef9SDimitry Andric     // Key locker nodes that produce flags.
887e8d8bef9SDimitry Andric     AESENC128KL,
888e8d8bef9SDimitry Andric     AESDEC128KL,
889e8d8bef9SDimitry Andric     AESENC256KL,
890e8d8bef9SDimitry Andric     AESDEC256KL,
891e8d8bef9SDimitry Andric     AESENCWIDE128KL,
892e8d8bef9SDimitry Andric     AESDECWIDE128KL,
893e8d8bef9SDimitry Andric     AESENCWIDE256KL,
894e8d8bef9SDimitry Andric     AESDECWIDE256KL,
895e8d8bef9SDimitry Andric 
896bdd1243dSDimitry Andric     /// Compare and Add if Condition is Met. Compare value in operand 2 with
89706c3fb27SDimitry Andric     /// value in memory of operand 1. If condition of operand 4 is met, add
89806c3fb27SDimitry Andric     /// value operand 3 to m32 and write new value in operand 1. Operand 2 is
899bdd1243dSDimitry Andric     /// always updated with the original value from operand 1.
900bdd1243dSDimitry Andric     CMPCCXADD,
901bdd1243dSDimitry Andric 
902349cc55cSDimitry Andric     // Save xmm argument registers to the stack, according to %al. An operator
903349cc55cSDimitry Andric     // is needed so that this can be expanded with control flow.
904349cc55cSDimitry Andric     VASTART_SAVE_XMM_REGS,
905349cc55cSDimitry Andric 
906*0fca6ea1SDimitry Andric     // Conditional load/store instructions
907*0fca6ea1SDimitry Andric     CLOAD,
908*0fca6ea1SDimitry Andric     CSTORE,
909*0fca6ea1SDimitry Andric 
9100b57cec5SDimitry Andric     // WARNING: Do not add anything in the end unless you want the node to
9110b57cec5SDimitry Andric     // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
9120b57cec5SDimitry Andric     // opcodes will be thought as target memory ops!
9130b57cec5SDimitry Andric   };
9140b57cec5SDimitry Andric   } // end namespace X86ISD
9150b57cec5SDimitry Andric 
916fe6060f1SDimitry Andric   namespace X86 {
917fe6060f1SDimitry Andric     /// Current rounding mode is represented in bits 11:10 of FPSR. These
918fe6060f1SDimitry Andric     /// values are same as corresponding constants for rounding mode used
919fe6060f1SDimitry Andric     /// in glibc.
920fe6060f1SDimitry Andric     enum RoundingMode {
921fe6060f1SDimitry Andric       rmToNearest   = 0,        // FE_TONEAREST
922fe6060f1SDimitry Andric       rmDownward    = 1 << 10,  // FE_DOWNWARD
923fe6060f1SDimitry Andric       rmUpward      = 2 << 10,  // FE_UPWARD
924fe6060f1SDimitry Andric       rmTowardZero  = 3 << 10,  // FE_TOWARDZERO
925fe6060f1SDimitry Andric       rmMask        = 3 << 10   // Bit mask selecting rounding mode
926fe6060f1SDimitry Andric     };
927fe6060f1SDimitry Andric   }
928fe6060f1SDimitry Andric 
9290b57cec5SDimitry Andric   /// Define some predicates that are used for node matching.
9300b57cec5SDimitry Andric   namespace X86 {
9310b57cec5SDimitry Andric     /// Returns true if Elt is a constant zero or floating point constant +0.0.
9320b57cec5SDimitry Andric     bool isZeroNode(SDValue Elt);
9330b57cec5SDimitry Andric 
9340b57cec5SDimitry Andric     /// Returns true of the given offset can be
9350b57cec5SDimitry Andric     /// fit into displacement field of the instruction.
9360b57cec5SDimitry Andric     bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
937e8d8bef9SDimitry Andric                                       bool hasSymbolicDisplacement);
9380b57cec5SDimitry Andric 
9390b57cec5SDimitry Andric     /// Determines whether the callee is required to pop its
9400b57cec5SDimitry Andric     /// own arguments. Callee pop is necessary to support tail calls.
9410b57cec5SDimitry Andric     bool isCalleePop(CallingConv::ID CallingConv,
9420b57cec5SDimitry Andric                      bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
9430b57cec5SDimitry Andric 
9448bcb0991SDimitry Andric     /// If Op is a constant whose elements are all the same constant or
9458bcb0991SDimitry Andric     /// undefined, return true and return the constant value in \p SplatVal.
9465ffd83dbSDimitry Andric     /// If we have undef bits that don't cover an entire element, we treat these
9475ffd83dbSDimitry Andric     /// as zero if AllowPartialUndefs is set, else we fail and return false.
9485ffd83dbSDimitry Andric     bool isConstantSplat(SDValue Op, APInt &SplatVal,
9495ffd83dbSDimitry Andric                          bool AllowPartialUndefs = true);
950349cc55cSDimitry Andric 
951349cc55cSDimitry Andric     /// Check if Op is a load operation that could be folded into some other x86
952349cc55cSDimitry Andric     /// instruction as a memory operand. Example: vpaddd (%rdi), %xmm0, %xmm0.
953349cc55cSDimitry Andric     bool mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
954349cc55cSDimitry Andric                      bool AssumeSingleUse = false);
955349cc55cSDimitry Andric 
956349cc55cSDimitry Andric     /// Check if Op is a load operation that could be folded into a vector splat
957349cc55cSDimitry Andric     /// instruction as a memory operand. Example: vbroadcastss 16(%rdi), %xmm2.
958349cc55cSDimitry Andric     bool mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
959349cc55cSDimitry Andric                                          const X86Subtarget &Subtarget,
960349cc55cSDimitry Andric                                          bool AssumeSingleUse = false);
961349cc55cSDimitry Andric 
962349cc55cSDimitry Andric     /// Check if Op is a value that could be used to fold a store into some
963349cc55cSDimitry Andric     /// other x86 instruction as a memory operand. Ex: pextrb $0, %xmm0, (%rdi).
964349cc55cSDimitry Andric     bool mayFoldIntoStore(SDValue Op);
965349cc55cSDimitry Andric 
966349cc55cSDimitry Andric     /// Check if Op is an operation that could be folded into a zero extend x86
967349cc55cSDimitry Andric     /// instruction.
968349cc55cSDimitry Andric     bool mayFoldIntoZeroExtend(SDValue Op);
969*0fca6ea1SDimitry Andric 
970*0fca6ea1SDimitry Andric     /// True if the target supports the extended frame for async Swift
971*0fca6ea1SDimitry Andric     /// functions.
972*0fca6ea1SDimitry Andric     bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
973*0fca6ea1SDimitry Andric                                             const MachineFunction &MF);
9740b57cec5SDimitry Andric   } // end namespace X86
9750b57cec5SDimitry Andric 
9760b57cec5SDimitry Andric   //===--------------------------------------------------------------------===//
9770b57cec5SDimitry Andric   //  X86 Implementation of the TargetLowering interface
9780b57cec5SDimitry Andric   class X86TargetLowering final : public TargetLowering {
9790b57cec5SDimitry Andric   public:
9800b57cec5SDimitry Andric     explicit X86TargetLowering(const X86TargetMachine &TM,
9810b57cec5SDimitry Andric                                const X86Subtarget &STI);
9820b57cec5SDimitry Andric 
9830b57cec5SDimitry Andric     unsigned getJumpTableEncoding() const override;
9840b57cec5SDimitry Andric     bool useSoftFloat() const override;
9850b57cec5SDimitry Andric 
9860b57cec5SDimitry Andric     void markLibCallAttributes(MachineFunction *MF, unsigned CC,
9870b57cec5SDimitry Andric                                ArgListTy &Args) const override;
9880b57cec5SDimitry Andric 
getScalarShiftAmountTy(const DataLayout &,EVT VT)9890b57cec5SDimitry Andric     MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override {
9900b57cec5SDimitry Andric       return MVT::i8;
9910b57cec5SDimitry Andric     }
9920b57cec5SDimitry Andric 
9930b57cec5SDimitry Andric     const MCExpr *
9940b57cec5SDimitry Andric     LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
9950b57cec5SDimitry Andric                               const MachineBasicBlock *MBB, unsigned uid,
9960b57cec5SDimitry Andric                               MCContext &Ctx) const override;
9970b57cec5SDimitry Andric 
9980b57cec5SDimitry Andric     /// Returns relocation base for the given PIC jumptable.
9990b57cec5SDimitry Andric     SDValue getPICJumpTableRelocBase(SDValue Table,
10000b57cec5SDimitry Andric                                      SelectionDAG &DAG) const override;
10010b57cec5SDimitry Andric     const MCExpr *
10020b57cec5SDimitry Andric     getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
10030b57cec5SDimitry Andric                                  unsigned JTI, MCContext &Ctx) const override;
10040b57cec5SDimitry Andric 
10050b57cec5SDimitry Andric     /// Return the desired alignment for ByVal aggregate
10060b57cec5SDimitry Andric     /// function arguments in the caller parameter area. For X86, aggregates
10070b57cec5SDimitry Andric     /// that contains are placed at 16-byte boundaries while the rest are at
10080b57cec5SDimitry Andric     /// 4-byte boundaries.
1009349cc55cSDimitry Andric     uint64_t getByValTypeAlignment(Type *Ty,
10100b57cec5SDimitry Andric                                    const DataLayout &DL) const override;
10110b57cec5SDimitry Andric 
10125ffd83dbSDimitry Andric     EVT getOptimalMemOpType(const MemOp &Op,
10130b57cec5SDimitry Andric                             const AttributeList &FuncAttributes) const override;
10140b57cec5SDimitry Andric 
10150b57cec5SDimitry Andric     /// Returns true if it's safe to use load / store of the
10160b57cec5SDimitry Andric     /// specified type to expand memcpy / memset inline. This is mostly true
10170b57cec5SDimitry Andric     /// for all types except for some special cases. For example, on X86
10180b57cec5SDimitry Andric     /// targets without SSE2 f64 load / store are done with fldl / fstpl which
10190b57cec5SDimitry Andric     /// also does type conversion. Note the specified type doesn't have to be
10200b57cec5SDimitry Andric     /// legal as the hook is used before type legalization.
10210b57cec5SDimitry Andric     bool isSafeMemOpType(MVT VT) const override;
10220b57cec5SDimitry Andric 
1023bdd1243dSDimitry Andric     bool isMemoryAccessFast(EVT VT, Align Alignment) const;
1024bdd1243dSDimitry Andric 
10250b57cec5SDimitry Andric     /// Returns true if the target allows unaligned memory accesses of the
10260b57cec5SDimitry Andric     /// specified type. Returns whether it is "fast" in the last argument.
1027fe6060f1SDimitry Andric     bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
10280b57cec5SDimitry Andric                                         MachineMemOperand::Flags Flags,
1029bdd1243dSDimitry Andric                                         unsigned *Fast) const override;
1030bdd1243dSDimitry Andric 
1031bdd1243dSDimitry Andric     /// This function returns true if the memory access is aligned or if the
1032bdd1243dSDimitry Andric     /// target allows this specific unaligned memory access. If the access is
1033bdd1243dSDimitry Andric     /// allowed, the optional final parameter returns a relative speed of the
1034bdd1243dSDimitry Andric     /// access (as defined by the target).
1035bdd1243dSDimitry Andric     bool allowsMemoryAccess(
1036bdd1243dSDimitry Andric         LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1037bdd1243dSDimitry Andric         Align Alignment,
1038bdd1243dSDimitry Andric         MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1039bdd1243dSDimitry Andric         unsigned *Fast = nullptr) const override;
1040bdd1243dSDimitry Andric 
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,const MachineMemOperand & MMO,unsigned * Fast)1041bdd1243dSDimitry Andric     bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1042bdd1243dSDimitry Andric                             const MachineMemOperand &MMO,
1043bdd1243dSDimitry Andric                             unsigned *Fast) const {
1044bdd1243dSDimitry Andric       return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
1045bdd1243dSDimitry Andric                                 MMO.getAlign(), MMO.getFlags(), Fast);
1046bdd1243dSDimitry Andric     }
10470b57cec5SDimitry Andric 
10480b57cec5SDimitry Andric     /// Provide custom lowering hooks for some operations.
10490b57cec5SDimitry Andric     ///
10500b57cec5SDimitry Andric     SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
10510b57cec5SDimitry Andric 
10520b57cec5SDimitry Andric     /// Replace the results of node with an illegal result
10530b57cec5SDimitry Andric     /// type with new values built out of custom code.
10540b57cec5SDimitry Andric     ///
10550b57cec5SDimitry Andric     void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
10560b57cec5SDimitry Andric                             SelectionDAG &DAG) const override;
10570b57cec5SDimitry Andric 
10580b57cec5SDimitry Andric     SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
10590b57cec5SDimitry Andric 
106006c3fb27SDimitry Andric     bool preferABDSToABSWithNSW(EVT VT) const override;
106106c3fb27SDimitry Andric 
10625f757f3fSDimitry Andric     bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT,
10635f757f3fSDimitry Andric                                    EVT ExtVT) const override;
10645f757f3fSDimitry Andric 
10655f757f3fSDimitry Andric     bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond,
10665f757f3fSDimitry Andric                                            EVT VT) const override;
10675f757f3fSDimitry Andric 
10680b57cec5SDimitry Andric     /// Return true if the target has native support for
10690b57cec5SDimitry Andric     /// the specified value type and it is 'desirable' to use the type for the
10700b57cec5SDimitry Andric     /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
10710b57cec5SDimitry Andric     /// instruction encodings are longer and some i16 instructions are slow.
10720b57cec5SDimitry Andric     bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
10730b57cec5SDimitry Andric 
10740b57cec5SDimitry Andric     /// Return true if the target has native support for the
10750b57cec5SDimitry Andric     /// specified value type and it is 'desirable' to use the type. e.g. On x86
10760b57cec5SDimitry Andric     /// i16 is legal, but undesirable since i16 instruction encodings are longer
10770b57cec5SDimitry Andric     /// and some i16 instructions are slow.
10780b57cec5SDimitry Andric     bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
10790b57cec5SDimitry Andric 
108006c3fb27SDimitry Andric     /// Return prefered fold type, Abs if this is a vector, AddAnd if its an
108106c3fb27SDimitry Andric     /// integer, None otherwise.
108206c3fb27SDimitry Andric     TargetLowering::AndOrSETCCFoldKind
108306c3fb27SDimitry Andric     isDesirableToCombineLogicOpOfSETCC(const SDNode *LogicOp,
108406c3fb27SDimitry Andric                                        const SDNode *SETCC0,
108506c3fb27SDimitry Andric                                        const SDNode *SETCC1) const override;
108606c3fb27SDimitry Andric 
10875ffd83dbSDimitry Andric     /// Return the newly negated expression if the cost is not expensive and
10885ffd83dbSDimitry Andric     /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
10895ffd83dbSDimitry Andric     /// do the negation.
10908bcb0991SDimitry Andric     SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
10918bcb0991SDimitry Andric                                  bool LegalOperations, bool ForCodeSize,
10925ffd83dbSDimitry Andric                                  NegatibleCost &Cost,
10938bcb0991SDimitry Andric                                  unsigned Depth) const override;
10948bcb0991SDimitry Andric 
10950b57cec5SDimitry Andric     MachineBasicBlock *
10960b57cec5SDimitry Andric     EmitInstrWithCustomInserter(MachineInstr &MI,
10970b57cec5SDimitry Andric                                 MachineBasicBlock *MBB) const override;
10980b57cec5SDimitry Andric 
10990b57cec5SDimitry Andric     /// This method returns the name of a target specific DAG node.
11000b57cec5SDimitry Andric     const char *getTargetNodeName(unsigned Opcode) const override;
11010b57cec5SDimitry Andric 
11020b57cec5SDimitry Andric     /// Do not merge vector stores after legalization because that may conflict
11030b57cec5SDimitry Andric     /// with x86-specific store splitting optimizations.
mergeStoresAfterLegalization(EVT MemVT)11040b57cec5SDimitry Andric     bool mergeStoresAfterLegalization(EVT MemVT) const override {
11050b57cec5SDimitry Andric       return !MemVT.isVector();
11060b57cec5SDimitry Andric     }
11070b57cec5SDimitry Andric 
11080b57cec5SDimitry Andric     bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
1109349cc55cSDimitry Andric                           const MachineFunction &MF) const override;
11100b57cec5SDimitry Andric 
1111bdd1243dSDimitry Andric     bool isCheapToSpeculateCttz(Type *Ty) const override;
11120b57cec5SDimitry Andric 
1113bdd1243dSDimitry Andric     bool isCheapToSpeculateCtlz(Type *Ty) const override;
11140b57cec5SDimitry Andric 
11150b57cec5SDimitry Andric     bool isCtlzFast() const override;
11160b57cec5SDimitry Andric 
isMultiStoresCheaperThanBitsMerge(EVT LTy,EVT HTy)11170b57cec5SDimitry Andric     bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
11180b57cec5SDimitry Andric       // If the pair to store is a mixture of float and int values, we will
11190b57cec5SDimitry Andric       // save two bitwise instructions and one float-to-int instruction and
11200b57cec5SDimitry Andric       // increase one store instruction. There is potentially a more
11210b57cec5SDimitry Andric       // significant benefit because it avoids the float->int domain switch
11220b57cec5SDimitry Andric       // for input value. So It is more likely a win.
11230b57cec5SDimitry Andric       if ((LTy.isFloatingPoint() && HTy.isInteger()) ||
11240b57cec5SDimitry Andric           (LTy.isInteger() && HTy.isFloatingPoint()))
11250b57cec5SDimitry Andric         return true;
11260b57cec5SDimitry Andric       // If the pair only contains int values, we will save two bitwise
11270b57cec5SDimitry Andric       // instructions and increase one store instruction (costing one more
11280b57cec5SDimitry Andric       // store buffer). Since the benefit is more blurred so we leave
11290b57cec5SDimitry Andric       // such pair out until we get testcase to prove it is a win.
11300b57cec5SDimitry Andric       return false;
11310b57cec5SDimitry Andric     }
11320b57cec5SDimitry Andric 
11330b57cec5SDimitry Andric     bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
11340b57cec5SDimitry Andric 
11350b57cec5SDimitry Andric     bool hasAndNotCompare(SDValue Y) const override;
11360b57cec5SDimitry Andric 
11370b57cec5SDimitry Andric     bool hasAndNot(SDValue Y) const override;
11380b57cec5SDimitry Andric 
11398bcb0991SDimitry Andric     bool hasBitTest(SDValue X, SDValue Y) const override;
11408bcb0991SDimitry Andric 
11418bcb0991SDimitry Andric     bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
11428bcb0991SDimitry Andric         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
11438bcb0991SDimitry Andric         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
11448bcb0991SDimitry Andric         SelectionDAG &DAG) const override;
11458bcb0991SDimitry Andric 
11465f757f3fSDimitry Andric     unsigned preferedOpcodeForCmpEqPiecesOfOperand(
11475f757f3fSDimitry Andric         EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
11485f757f3fSDimitry Andric         const APInt &ShiftOrRotateAmt,
11495f757f3fSDimitry Andric         const std::optional<APInt> &AndMask) const override;
11505f757f3fSDimitry Andric 
115106c3fb27SDimitry Andric     bool preferScalarizeSplat(SDNode *N) const override;
1152bdd1243dSDimitry Andric 
1153*0fca6ea1SDimitry Andric     CondMergingParams
1154*0fca6ea1SDimitry Andric     getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs,
1155*0fca6ea1SDimitry Andric                                   const Value *Rhs) const override;
1156*0fca6ea1SDimitry Andric 
11570b57cec5SDimitry Andric     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
11580b57cec5SDimitry Andric                                            CombineLevel Level) const override;
11590b57cec5SDimitry Andric 
11600b57cec5SDimitry Andric     bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override;
11610b57cec5SDimitry Andric 
11620b57cec5SDimitry Andric     bool
shouldTransformSignedTruncationCheck(EVT XVT,unsigned KeptBits)11630b57cec5SDimitry Andric     shouldTransformSignedTruncationCheck(EVT XVT,
11640b57cec5SDimitry Andric                                          unsigned KeptBits) const override {
11650b57cec5SDimitry Andric       // For vectors, we don't have a preference..
11660b57cec5SDimitry Andric       if (XVT.isVector())
11670b57cec5SDimitry Andric         return false;
11680b57cec5SDimitry Andric 
11690b57cec5SDimitry Andric       auto VTIsOk = [](EVT VT) -> bool {
11700b57cec5SDimitry Andric         return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
11710b57cec5SDimitry Andric                VT == MVT::i64;
11720b57cec5SDimitry Andric       };
11730b57cec5SDimitry Andric 
11740b57cec5SDimitry Andric       // We are ok with KeptBitsVT being byte/word/dword, what MOVS supports.
11750b57cec5SDimitry Andric       // XVT will be larger than KeptBitsVT.
11760b57cec5SDimitry Andric       MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
11770b57cec5SDimitry Andric       return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
11780b57cec5SDimitry Andric     }
11790b57cec5SDimitry Andric 
1180bdd1243dSDimitry Andric     ShiftLegalizationStrategy
1181bdd1243dSDimitry Andric     preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
1182bdd1243dSDimitry Andric                                        unsigned ExpansionFactor) const override;
11830b57cec5SDimitry Andric 
11840b57cec5SDimitry Andric     bool shouldSplatInsEltVarIndex(EVT VT) const override;
11850b57cec5SDimitry Andric 
shouldConvertFpToSat(unsigned Op,EVT FPVT,EVT VT)11860eae32dcSDimitry Andric     bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override {
11870eae32dcSDimitry Andric       // Converting to sat variants holds little benefit on X86 as we will just
11880eae32dcSDimitry Andric       // need to saturate the value back using fp arithmatic.
11890eae32dcSDimitry Andric       return Op != ISD::FP_TO_UINT_SAT && isOperationLegalOrCustom(Op, VT);
11900eae32dcSDimitry Andric     }
11910eae32dcSDimitry Andric 
convertSetCCLogicToBitwiseLogic(EVT VT)11920b57cec5SDimitry Andric     bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
11930b57cec5SDimitry Andric       return VT.isScalarInteger();
11940b57cec5SDimitry Andric     }
11950b57cec5SDimitry Andric 
11960b57cec5SDimitry Andric     /// Vector-sized comparisons are fast using PCMPEQ + PMOVMSK or PTEST.
11970b57cec5SDimitry Andric     MVT hasFastEqualityCompare(unsigned NumBits) const override;
11980b57cec5SDimitry Andric 
11990b57cec5SDimitry Andric     /// Return the value type to use for ISD::SETCC.
12000b57cec5SDimitry Andric     EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
12010b57cec5SDimitry Andric                            EVT VT) const override;
12020b57cec5SDimitry Andric 
12035ffd83dbSDimitry Andric     bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
12045ffd83dbSDimitry Andric                                       const APInt &DemandedElts,
12050b57cec5SDimitry Andric                                       TargetLoweringOpt &TLO) const override;
12060b57cec5SDimitry Andric 
12070b57cec5SDimitry Andric     /// Determine which of the bits specified in Mask are known to be either
12080b57cec5SDimitry Andric     /// zero or one and return them in the KnownZero/KnownOne bitsets.
12090b57cec5SDimitry Andric     void computeKnownBitsForTargetNode(const SDValue Op,
12100b57cec5SDimitry Andric                                        KnownBits &Known,
12110b57cec5SDimitry Andric                                        const APInt &DemandedElts,
12120b57cec5SDimitry Andric                                        const SelectionDAG &DAG,
12130b57cec5SDimitry Andric                                        unsigned Depth = 0) const override;
12140b57cec5SDimitry Andric 
12150b57cec5SDimitry Andric     /// Determine the number of bits in the operation that are sign bits.
12160b57cec5SDimitry Andric     unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
12170b57cec5SDimitry Andric                                              const APInt &DemandedElts,
12180b57cec5SDimitry Andric                                              const SelectionDAG &DAG,
12190b57cec5SDimitry Andric                                              unsigned Depth) const override;
12200b57cec5SDimitry Andric 
12210b57cec5SDimitry Andric     bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op,
12220b57cec5SDimitry Andric                                                  const APInt &DemandedElts,
12230b57cec5SDimitry Andric                                                  APInt &KnownUndef,
12240b57cec5SDimitry Andric                                                  APInt &KnownZero,
12250b57cec5SDimitry Andric                                                  TargetLoweringOpt &TLO,
12260b57cec5SDimitry Andric                                                  unsigned Depth) const override;
12270b57cec5SDimitry Andric 
12285ffd83dbSDimitry Andric     bool SimplifyDemandedVectorEltsForTargetShuffle(SDValue Op,
12295ffd83dbSDimitry Andric                                                     const APInt &DemandedElts,
12305ffd83dbSDimitry Andric                                                     unsigned MaskIndex,
12315ffd83dbSDimitry Andric                                                     TargetLoweringOpt &TLO,
12325ffd83dbSDimitry Andric                                                     unsigned Depth) const;
12335ffd83dbSDimitry Andric 
12340b57cec5SDimitry Andric     bool SimplifyDemandedBitsForTargetNode(SDValue Op,
12350b57cec5SDimitry Andric                                            const APInt &DemandedBits,
12360b57cec5SDimitry Andric                                            const APInt &DemandedElts,
12370b57cec5SDimitry Andric                                            KnownBits &Known,
12380b57cec5SDimitry Andric                                            TargetLoweringOpt &TLO,
12390b57cec5SDimitry Andric                                            unsigned Depth) const override;
12400b57cec5SDimitry Andric 
12418bcb0991SDimitry Andric     SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
12428bcb0991SDimitry Andric         SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
12438bcb0991SDimitry Andric         SelectionDAG &DAG, unsigned Depth) const override;
12448bcb0991SDimitry Andric 
1245bdd1243dSDimitry Andric     bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(
1246bdd1243dSDimitry Andric         SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1247bdd1243dSDimitry Andric         bool PoisonOnly, unsigned Depth) const override;
1248bdd1243dSDimitry Andric 
1249bdd1243dSDimitry Andric     bool canCreateUndefOrPoisonForTargetNode(
1250bdd1243dSDimitry Andric         SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1251bdd1243dSDimitry Andric         bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const override;
1252bdd1243dSDimitry Andric 
12530eae32dcSDimitry Andric     bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
1254bdd1243dSDimitry Andric                                    APInt &UndefElts, const SelectionDAG &DAG,
12550eae32dcSDimitry Andric                                    unsigned Depth) const override;
12560eae32dcSDimitry Andric 
isTargetCanonicalConstantNode(SDValue Op)125781ad6265SDimitry Andric     bool isTargetCanonicalConstantNode(SDValue Op) const override {
125881ad6265SDimitry Andric       // Peek through bitcasts/extracts/inserts to see if we have a broadcast
125981ad6265SDimitry Andric       // vector from memory.
126081ad6265SDimitry Andric       while (Op.getOpcode() == ISD::BITCAST ||
126181ad6265SDimitry Andric              Op.getOpcode() == ISD::EXTRACT_SUBVECTOR ||
126281ad6265SDimitry Andric              (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
126381ad6265SDimitry Andric               Op.getOperand(0).isUndef()))
126481ad6265SDimitry Andric         Op = Op.getOperand(Op.getOpcode() == ISD::INSERT_SUBVECTOR ? 1 : 0);
126581ad6265SDimitry Andric 
126681ad6265SDimitry Andric       return Op.getOpcode() == X86ISD::VBROADCAST_LOAD ||
126781ad6265SDimitry Andric              TargetLowering::isTargetCanonicalConstantNode(Op);
126881ad6265SDimitry Andric     }
126981ad6265SDimitry Andric 
12700b57cec5SDimitry Andric     const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
12710b57cec5SDimitry Andric 
12720b57cec5SDimitry Andric     SDValue unwrapAddress(SDValue N) const override;
12730b57cec5SDimitry Andric 
12740b57cec5SDimitry Andric     SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
12750b57cec5SDimitry Andric 
12760b57cec5SDimitry Andric     bool ExpandInlineAsm(CallInst *CI) const override;
12770b57cec5SDimitry Andric 
12780b57cec5SDimitry Andric     ConstraintType getConstraintType(StringRef Constraint) const override;
12790b57cec5SDimitry Andric 
12800b57cec5SDimitry Andric     /// Examine constraint string and operand type and determine a weight value.
12810b57cec5SDimitry Andric     /// The operand object must already have been set up with the operand type.
12820b57cec5SDimitry Andric     ConstraintWeight
12835f757f3fSDimitry Andric       getSingleConstraintMatchWeight(AsmOperandInfo &Info,
12845f757f3fSDimitry Andric                                      const char *Constraint) const override;
12850b57cec5SDimitry Andric 
12860b57cec5SDimitry Andric     const char *LowerXConstraint(EVT ConstraintVT) const override;
12870b57cec5SDimitry Andric 
12880b57cec5SDimitry Andric     /// Lower the specified operand into the Ops vector. If it is invalid, don't
12890b57cec5SDimitry Andric     /// add anything to Ops. If hasMemory is true it means one of the asm
12900b57cec5SDimitry Andric     /// constraint of the inline asm instruction being processed is 'm'.
12915f757f3fSDimitry Andric     void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
12920b57cec5SDimitry Andric                                       std::vector<SDValue> &Ops,
12930b57cec5SDimitry Andric                                       SelectionDAG &DAG) const override;
12940b57cec5SDimitry Andric 
12955f757f3fSDimitry Andric     InlineAsm::ConstraintCode
getInlineAsmMemConstraint(StringRef ConstraintCode)12960b57cec5SDimitry Andric     getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
1297fe6060f1SDimitry Andric       if (ConstraintCode == "v")
12985f757f3fSDimitry Andric         return InlineAsm::ConstraintCode::v;
12990b57cec5SDimitry Andric       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
13000b57cec5SDimitry Andric     }
13010b57cec5SDimitry Andric 
13020b57cec5SDimitry Andric     /// Handle Lowering flag assembly outputs.
1303e8d8bef9SDimitry Andric     SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
1304e8d8bef9SDimitry Andric                                         const SDLoc &DL,
13050b57cec5SDimitry Andric                                         const AsmOperandInfo &Constraint,
13060b57cec5SDimitry Andric                                         SelectionDAG &DAG) const override;
13070b57cec5SDimitry Andric 
13080b57cec5SDimitry Andric     /// Given a physical register constraint
13090b57cec5SDimitry Andric     /// (e.g. {edx}), return the register number and the register class for the
13100b57cec5SDimitry Andric     /// register.  This should only be used for C_Register constraints.  On
13110b57cec5SDimitry Andric     /// error, this returns a register number of 0.
13120b57cec5SDimitry Andric     std::pair<unsigned, const TargetRegisterClass *>
13130b57cec5SDimitry Andric     getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
13140b57cec5SDimitry Andric                                  StringRef Constraint, MVT VT) const override;
13150b57cec5SDimitry Andric 
13160b57cec5SDimitry Andric     /// Return true if the addressing mode represented
13170b57cec5SDimitry Andric     /// by AM is legal for this target, for a load/store of the specified type.
13180b57cec5SDimitry Andric     bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
13190b57cec5SDimitry Andric                                Type *Ty, unsigned AS,
13200b57cec5SDimitry Andric                                Instruction *I = nullptr) const override;
13210b57cec5SDimitry Andric 
1322*0fca6ea1SDimitry Andric     bool addressingModeSupportsTLS(const GlobalValue &GV) const override;
1323*0fca6ea1SDimitry Andric 
13240b57cec5SDimitry Andric     /// Return true if the specified immediate is legal
13250b57cec5SDimitry Andric     /// icmp immediate, that is the target has icmp instructions which can
13260b57cec5SDimitry Andric     /// compare a register against the immediate without having to materialize
13270b57cec5SDimitry Andric     /// the immediate into a register.
13280b57cec5SDimitry Andric     bool isLegalICmpImmediate(int64_t Imm) const override;
13290b57cec5SDimitry Andric 
13300b57cec5SDimitry Andric     /// Return true if the specified immediate is legal
13310b57cec5SDimitry Andric     /// add immediate, that is the target has add instructions which can
13320b57cec5SDimitry Andric     /// add a register and the immediate without having to materialize
13330b57cec5SDimitry Andric     /// the immediate into a register.
13340b57cec5SDimitry Andric     bool isLegalAddImmediate(int64_t Imm) const override;
13350b57cec5SDimitry Andric 
13360b57cec5SDimitry Andric     bool isLegalStoreImmediate(int64_t Imm) const override;
13370b57cec5SDimitry Andric 
13385ffd83dbSDimitry Andric     /// This is used to enable splatted operand transforms for vector shifts
13395ffd83dbSDimitry Andric     /// and vector funnel shifts.
13400b57cec5SDimitry Andric     bool isVectorShiftByScalarCheap(Type *Ty) const override;
13410b57cec5SDimitry Andric 
13420b57cec5SDimitry Andric     /// Add x86-specific opcodes to the default list.
13430b57cec5SDimitry Andric     bool isBinOp(unsigned Opcode) const override;
13440b57cec5SDimitry Andric 
13450b57cec5SDimitry Andric     /// Returns true if the opcode is a commutative binary operation.
13460b57cec5SDimitry Andric     bool isCommutativeBinOp(unsigned Opcode) const override;
13470b57cec5SDimitry Andric 
13480b57cec5SDimitry Andric     /// Return true if it's free to truncate a value of
13490b57cec5SDimitry Andric     /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
13500b57cec5SDimitry Andric     /// register EAX to i16 by referencing its sub-register AX.
13510b57cec5SDimitry Andric     bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
13520b57cec5SDimitry Andric     bool isTruncateFree(EVT VT1, EVT VT2) const override;
13530b57cec5SDimitry Andric 
13540b57cec5SDimitry Andric     bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
13550b57cec5SDimitry Andric 
13560b57cec5SDimitry Andric     /// Return true if any actual instruction that defines a
13570b57cec5SDimitry Andric     /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
13580b57cec5SDimitry Andric     /// register. This does not necessarily include registers defined in
13590b57cec5SDimitry Andric     /// unknown ways, such as incoming arguments, or copies from unknown
13600b57cec5SDimitry Andric     /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
13610b57cec5SDimitry Andric     /// does not necessarily apply to truncate instructions. e.g. on x86-64,
13620b57cec5SDimitry Andric     /// all instructions that define 32-bit values implicit zero-extend the
13630b57cec5SDimitry Andric     /// result out to 64 bits.
13640b57cec5SDimitry Andric     bool isZExtFree(Type *Ty1, Type *Ty2) const override;
13650b57cec5SDimitry Andric     bool isZExtFree(EVT VT1, EVT VT2) const override;
13660b57cec5SDimitry Andric     bool isZExtFree(SDValue Val, EVT VT2) const override;
13670b57cec5SDimitry Andric 
13685ffd83dbSDimitry Andric     bool shouldSinkOperands(Instruction *I,
13695ffd83dbSDimitry Andric                             SmallVectorImpl<Use *> &Ops) const override;
13705ffd83dbSDimitry Andric     bool shouldConvertPhiType(Type *From, Type *To) const override;
13715ffd83dbSDimitry Andric 
13720b57cec5SDimitry Andric     /// Return true if folding a vector load into ExtVal (a sign, zero, or any
13730b57cec5SDimitry Andric     /// extend node) is profitable.
13740b57cec5SDimitry Andric     bool isVectorLoadExtDesirable(SDValue) const override;
13750b57cec5SDimitry Andric 
13760b57cec5SDimitry Andric     /// Return true if an FMA operation is faster than a pair of fmul and fadd
13770b57cec5SDimitry Andric     /// instructions. fmuladd intrinsics will be expanded to FMAs when this
13780b57cec5SDimitry Andric     /// method returns true, otherwise fmuladd is expanded to fmul + fadd.
1379480093f4SDimitry Andric     bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
1380480093f4SDimitry Andric                                     EVT VT) const override;
13810b57cec5SDimitry Andric 
138206c3fb27SDimitry Andric     /// Return true if it's profitable to narrow operations of type SrcVT to
138306c3fb27SDimitry Andric     /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not
138406c3fb27SDimitry Andric     /// from i32 to i16.
138506c3fb27SDimitry Andric     bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const override;
13860b57cec5SDimitry Andric 
1387d56accc7SDimitry Andric     bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
1388d56accc7SDimitry Andric                                               EVT VT) const override;
1389d56accc7SDimitry Andric 
13900b57cec5SDimitry Andric     /// Given an intrinsic, checks if on the target the intrinsic will need to map
13910b57cec5SDimitry Andric     /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
13920b57cec5SDimitry Andric     /// true and stores the intrinsic information into the IntrinsicInfo that was
13930b57cec5SDimitry Andric     /// passed to the function.
13940b57cec5SDimitry Andric     bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
13950b57cec5SDimitry Andric                             MachineFunction &MF,
13960b57cec5SDimitry Andric                             unsigned Intrinsic) const override;
13970b57cec5SDimitry Andric 
13980b57cec5SDimitry Andric     /// Returns true if the target can instruction select the
13990b57cec5SDimitry Andric     /// specified FP immediate natively. If false, the legalizer will
14000b57cec5SDimitry Andric     /// materialize the FP immediate as a load from a constant pool.
14010b57cec5SDimitry Andric     bool isFPImmLegal(const APFloat &Imm, EVT VT,
14020b57cec5SDimitry Andric                       bool ForCodeSize) const override;
14030b57cec5SDimitry Andric 
14040b57cec5SDimitry Andric     /// Targets can use this to indicate that they only support *some*
14050b57cec5SDimitry Andric     /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
14060b57cec5SDimitry Andric     /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to
14070b57cec5SDimitry Andric     /// be legal.
14080b57cec5SDimitry Andric     bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
14090b57cec5SDimitry Andric 
14100b57cec5SDimitry Andric     /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
14110b57cec5SDimitry Andric     /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
14120b57cec5SDimitry Andric     /// constant pool entry.
14130b57cec5SDimitry Andric     bool isVectorClearMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
14140b57cec5SDimitry Andric 
14150b57cec5SDimitry Andric     /// Returns true if lowering to a jump table is allowed.
14160b57cec5SDimitry Andric     bool areJTsAllowed(const Function *Fn) const override;
14170b57cec5SDimitry Andric 
141881ad6265SDimitry Andric     MVT getPreferredSwitchConditionType(LLVMContext &Context,
141981ad6265SDimitry Andric                                         EVT ConditionVT) const override;
142081ad6265SDimitry Andric 
14210b57cec5SDimitry Andric     /// If true, then instruction selection should
14220b57cec5SDimitry Andric     /// seek to shrink the FP constant of the specified type to a smaller type
14230b57cec5SDimitry Andric     /// in order to save space and / or reduce runtime.
142481ad6265SDimitry Andric     bool ShouldShrinkFPConstant(EVT VT) const override;
14250b57cec5SDimitry Andric 
14260b57cec5SDimitry Andric     /// Return true if we believe it is correct and profitable to reduce the
14270b57cec5SDimitry Andric     /// load node to a smaller type.
14280b57cec5SDimitry Andric     bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
14290b57cec5SDimitry Andric                                EVT NewVT) const override;
14300b57cec5SDimitry Andric 
14310b57cec5SDimitry Andric     /// Return true if the specified scalar FP type is computed in an SSE
14320b57cec5SDimitry Andric     /// register, not on the X87 floating point stack.
143381ad6265SDimitry Andric     bool isScalarFPTypeInSSEReg(EVT VT) const;
14340b57cec5SDimitry Andric 
14350b57cec5SDimitry Andric     /// Returns true if it is beneficial to convert a load of a constant
14360b57cec5SDimitry Andric     /// to just the constant itself.
14370b57cec5SDimitry Andric     bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
14380b57cec5SDimitry Andric                                            Type *Ty) const override;
14390b57cec5SDimitry Andric 
14408bcb0991SDimitry Andric     bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const override;
14410b57cec5SDimitry Andric 
14420b57cec5SDimitry Andric     bool convertSelectOfConstantsToMath(EVT VT) const override;
14430b57cec5SDimitry Andric 
14448bcb0991SDimitry Andric     bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
14458bcb0991SDimitry Andric                                 SDValue C) const override;
14460b57cec5SDimitry Andric 
14470b57cec5SDimitry Andric     /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
14480b57cec5SDimitry Andric     /// with this index.
14490b57cec5SDimitry Andric     bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
14500b57cec5SDimitry Andric                                  unsigned Index) const override;
14510b57cec5SDimitry Andric 
14520b57cec5SDimitry Andric     /// Scalar ops always have equal or better analysis/performance/power than
14530b57cec5SDimitry Andric     /// the vector equivalent, so this always makes sense if the scalar op is
14540b57cec5SDimitry Andric     /// supported.
shouldScalarizeBinop(SDValue)14550b57cec5SDimitry Andric     bool shouldScalarizeBinop(SDValue) const override;
14560b57cec5SDimitry Andric 
14570b57cec5SDimitry Andric     /// Extract of a scalar FP value from index 0 of a vector is free.
14580b57cec5SDimitry Andric     bool isExtractVecEltCheap(EVT VT, unsigned Index) const override {
14590b57cec5SDimitry Andric       EVT EltVT = VT.getScalarType();
14600b57cec5SDimitry Andric       return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0;
14610b57cec5SDimitry Andric     }
14620b57cec5SDimitry Andric 
14630b57cec5SDimitry Andric     /// Overflow nodes should get combined/lowered to optimal instructions
14640b57cec5SDimitry Andric     /// (they should allow eliminating explicit compares by getting flags from
14650b57cec5SDimitry Andric     /// math ops).
14665ffd83dbSDimitry Andric     bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
14675ffd83dbSDimitry Andric                               bool MathUsed) const override;
14680b57cec5SDimitry Andric 
storeOfVectorConstantIsCheap(bool IsZero,EVT MemVT,unsigned NumElem,unsigned AddrSpace)146906c3fb27SDimitry Andric     bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem,
14700b57cec5SDimitry Andric                                       unsigned AddrSpace) const override {
14710b57cec5SDimitry Andric       // If we can replace more than 2 scalar stores, there will be a reduction
14720b57cec5SDimitry Andric       // in instructions even after we add a vector constant load.
147306c3fb27SDimitry Andric       return IsZero || NumElem > 2;
14740b57cec5SDimitry Andric     }
14750b57cec5SDimitry Andric 
14760b57cec5SDimitry Andric     bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
14770b57cec5SDimitry Andric                                  const SelectionDAG &DAG,
14780b57cec5SDimitry Andric                                  const MachineMemOperand &MMO) const override;
14790b57cec5SDimitry Andric 
1480480093f4SDimitry Andric     Register getRegisterByName(const char* RegName, LLT VT,
14818bcb0991SDimitry Andric                                const MachineFunction &MF) const override;
14820b57cec5SDimitry Andric 
14830b57cec5SDimitry Andric     /// If a physical register, this returns the register that receives the
14840b57cec5SDimitry Andric     /// exception address on entry to an EH pad.
14855ffd83dbSDimitry Andric     Register
14860b57cec5SDimitry Andric     getExceptionPointerRegister(const Constant *PersonalityFn) const override;
14870b57cec5SDimitry Andric 
14880b57cec5SDimitry Andric     /// If a physical register, this returns the register that receives the
14890b57cec5SDimitry Andric     /// exception typeid on entry to a landing pad.
14905ffd83dbSDimitry Andric     Register
14910b57cec5SDimitry Andric     getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
14920b57cec5SDimitry Andric 
1493972a253aSDimitry Andric     bool needsFixedCatchObjects() const override;
14940b57cec5SDimitry Andric 
14950b57cec5SDimitry Andric     /// This method returns a target specific FastISel object,
14960b57cec5SDimitry Andric     /// or null if the target does not support "fast" ISel.
14970b57cec5SDimitry Andric     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
14980b57cec5SDimitry Andric                              const TargetLibraryInfo *libInfo) const override;
14990b57cec5SDimitry Andric 
15000b57cec5SDimitry Andric     /// If the target has a standard location for the stack protector cookie,
15010b57cec5SDimitry Andric     /// returns the address of that location. Otherwise, returns nullptr.
1502fe6060f1SDimitry Andric     Value *getIRStackGuard(IRBuilderBase &IRB) const override;
15030b57cec5SDimitry Andric 
15040b57cec5SDimitry Andric     bool useLoadStackGuardNode() const override;
15050b57cec5SDimitry Andric     bool useStackGuardXorFP() const override;
15060b57cec5SDimitry Andric     void insertSSPDeclarations(Module &M) const override;
15070b57cec5SDimitry Andric     Value *getSDagStackGuard(const Module &M) const override;
15080b57cec5SDimitry Andric     Function *getSSPStackGuardCheck(const Module &M) const override;
15090b57cec5SDimitry Andric     SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
15100b57cec5SDimitry Andric                                 const SDLoc &DL) const override;
15110b57cec5SDimitry Andric 
15120b57cec5SDimitry Andric 
15130b57cec5SDimitry Andric     /// Return true if the target stores SafeStack pointer at a fixed offset in
15140b57cec5SDimitry Andric     /// some non-standard address space, and populates the address space and
15150b57cec5SDimitry Andric     /// offset as appropriate.
1516fe6060f1SDimitry Andric     Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override;
15170b57cec5SDimitry Andric 
15185ffd83dbSDimitry Andric     std::pair<SDValue, SDValue> BuildFILD(EVT DstVT, EVT SrcVT, const SDLoc &DL,
15195ffd83dbSDimitry Andric                                           SDValue Chain, SDValue Pointer,
15205ffd83dbSDimitry Andric                                           MachinePointerInfo PtrInfo,
15215ffd83dbSDimitry Andric                                           Align Alignment,
15220b57cec5SDimitry Andric                                           SelectionDAG &DAG) const;
15230b57cec5SDimitry Andric 
15240b57cec5SDimitry Andric     /// Customize the preferred legalization strategy for certain types.
15250b57cec5SDimitry Andric     LegalizeTypeAction getPreferredVectorAction(MVT VT) const override;
15260b57cec5SDimitry Andric 
softPromoteHalfType()15275ffd83dbSDimitry Andric     bool softPromoteHalfType() const override { return true; }
15285ffd83dbSDimitry Andric 
15290b57cec5SDimitry Andric     MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
15300b57cec5SDimitry Andric                                       EVT VT) const override;
15310b57cec5SDimitry Andric 
15320b57cec5SDimitry Andric     unsigned getNumRegistersForCallingConv(LLVMContext &Context,
15330b57cec5SDimitry Andric                                            CallingConv::ID CC,
15340b57cec5SDimitry Andric                                            EVT VT) const override;
15350b57cec5SDimitry Andric 
15368bcb0991SDimitry Andric     unsigned getVectorTypeBreakdownForCallingConv(
15378bcb0991SDimitry Andric         LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
15388bcb0991SDimitry Andric         unsigned &NumIntermediates, MVT &RegisterVT) const override;
15398bcb0991SDimitry Andric 
15400b57cec5SDimitry Andric     bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
15410b57cec5SDimitry Andric 
15420b57cec5SDimitry Andric     bool supportSwiftError() const override;
15430b57cec5SDimitry Andric 
supportKCFIBundles()1544bdd1243dSDimitry Andric     bool supportKCFIBundles() const override { return true; }
15450b57cec5SDimitry Andric 
154606c3fb27SDimitry Andric     MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
154706c3fb27SDimitry Andric                                 MachineBasicBlock::instr_iterator &MBBI,
154806c3fb27SDimitry Andric                                 const TargetInstrInfo *TII) const override;
154906c3fb27SDimitry Andric 
1550bdd1243dSDimitry Andric     bool hasStackProbeSymbol(const MachineFunction &MF) const override;
1551bdd1243dSDimitry Andric     bool hasInlineStackProbe(const MachineFunction &MF) const override;
1552bdd1243dSDimitry Andric     StringRef getStackProbeSymbolName(const MachineFunction &MF) const override;
1553bdd1243dSDimitry Andric 
1554bdd1243dSDimitry Andric     unsigned getStackProbeSize(const MachineFunction &MF) const;
15558bcb0991SDimitry Andric 
hasVectorBlend()15560b57cec5SDimitry Andric     bool hasVectorBlend() const override { return true; }
15570b57cec5SDimitry Andric 
getMaxSupportedInterleaveFactor()15580b57cec5SDimitry Andric     unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
15590b57cec5SDimitry Andric 
1560bdd1243dSDimitry Andric     bool isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs,
1561bdd1243dSDimitry Andric                                  unsigned OpNo) const override;
1562bdd1243dSDimitry Andric 
1563*0fca6ea1SDimitry Andric     SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
1564*0fca6ea1SDimitry Andric                             MachineMemOperand *MMO, SDValue &NewLoad,
1565*0fca6ea1SDimitry Andric                             SDValue Ptr, SDValue PassThru,
1566*0fca6ea1SDimitry Andric                             SDValue Mask) const override;
1567*0fca6ea1SDimitry Andric     SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
1568*0fca6ea1SDimitry Andric                              MachineMemOperand *MMO, SDValue Ptr, SDValue Val,
1569*0fca6ea1SDimitry Andric                              SDValue Mask) const override;
1570*0fca6ea1SDimitry Andric 
15710b57cec5SDimitry Andric     /// Lower interleaved load(s) into target specific
15720b57cec5SDimitry Andric     /// instructions/intrinsics.
15730b57cec5SDimitry Andric     bool lowerInterleavedLoad(LoadInst *LI,
15740b57cec5SDimitry Andric                               ArrayRef<ShuffleVectorInst *> Shuffles,
15750b57cec5SDimitry Andric                               ArrayRef<unsigned> Indices,
15760b57cec5SDimitry Andric                               unsigned Factor) const override;
15770b57cec5SDimitry Andric 
15780b57cec5SDimitry Andric     /// Lower interleaved store(s) into target specific
15790b57cec5SDimitry Andric     /// instructions/intrinsics.
15800b57cec5SDimitry Andric     bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
15810b57cec5SDimitry Andric                                unsigned Factor) const override;
15820b57cec5SDimitry Andric 
15835f757f3fSDimitry Andric     SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr,
15845f757f3fSDimitry Andric                                    int JTI, SelectionDAG &DAG) const override;
15850b57cec5SDimitry Andric 
1586e8d8bef9SDimitry Andric     Align getPrefLoopAlignment(MachineLoop *ML) const override;
1587e8d8bef9SDimitry Andric 
getTypeToTransformTo(LLVMContext & Context,EVT VT)1588bdd1243dSDimitry Andric     EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override {
1589bdd1243dSDimitry Andric       if (VT == MVT::f80)
1590bdd1243dSDimitry Andric         return EVT::getIntegerVT(Context, 96);
1591bdd1243dSDimitry Andric       return TargetLoweringBase::getTypeToTransformTo(Context, VT);
1592bdd1243dSDimitry Andric     }
1593bdd1243dSDimitry Andric 
15940b57cec5SDimitry Andric   protected:
15950b57cec5SDimitry Andric     std::pair<const TargetRegisterClass *, uint8_t>
15960b57cec5SDimitry Andric     findRepresentativeClass(const TargetRegisterInfo *TRI,
15970b57cec5SDimitry Andric                             MVT VT) const override;
15980b57cec5SDimitry Andric 
15990b57cec5SDimitry Andric   private:
16000b57cec5SDimitry Andric     /// Keep a reference to the X86Subtarget around so that we can
16010b57cec5SDimitry Andric     /// make the right decision when generating code for different targets.
16020b57cec5SDimitry Andric     const X86Subtarget &Subtarget;
16030b57cec5SDimitry Andric 
16040b57cec5SDimitry Andric     /// A list of legal FP immediates.
16050b57cec5SDimitry Andric     std::vector<APFloat> LegalFPImmediates;
16060b57cec5SDimitry Andric 
16070b57cec5SDimitry Andric     /// Indicate that this x86 target can instruction
16080b57cec5SDimitry Andric     /// select the specified FP immediate natively.
addLegalFPImmediate(const APFloat & Imm)16090b57cec5SDimitry Andric     void addLegalFPImmediate(const APFloat& Imm) {
16100b57cec5SDimitry Andric       LegalFPImmediates.push_back(Imm);
16110b57cec5SDimitry Andric     }
16120b57cec5SDimitry Andric 
161306c3fb27SDimitry Andric     SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
16140b57cec5SDimitry Andric                             CallingConv::ID CallConv, bool isVarArg,
16150b57cec5SDimitry Andric                             const SmallVectorImpl<ISD::InputArg> &Ins,
16160b57cec5SDimitry Andric                             const SDLoc &dl, SelectionDAG &DAG,
16170b57cec5SDimitry Andric                             SmallVectorImpl<SDValue> &InVals,
16180b57cec5SDimitry Andric                             uint32_t *RegMask) const;
16190b57cec5SDimitry Andric     SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
16200b57cec5SDimitry Andric                              const SmallVectorImpl<ISD::InputArg> &ArgInfo,
16210b57cec5SDimitry Andric                              const SDLoc &dl, SelectionDAG &DAG,
16220b57cec5SDimitry Andric                              const CCValAssign &VA, MachineFrameInfo &MFI,
16230b57cec5SDimitry Andric                              unsigned i) const;
16240b57cec5SDimitry Andric     SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
16250b57cec5SDimitry Andric                              const SDLoc &dl, SelectionDAG &DAG,
16260b57cec5SDimitry Andric                              const CCValAssign &VA,
16275ffd83dbSDimitry Andric                              ISD::ArgFlagsTy Flags, bool isByval) const;
16280b57cec5SDimitry Andric 
16290b57cec5SDimitry Andric     // Call lowering helpers.
16300b57cec5SDimitry Andric 
16310b57cec5SDimitry Andric     /// Check whether the call is eligible for tail call optimization. Targets
16320b57cec5SDimitry Andric     /// that want to do tail call optimization should implement this function.
1633349cc55cSDimitry Andric     bool IsEligibleForTailCallOptimization(
1634*0fca6ea1SDimitry Andric         TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
1635*0fca6ea1SDimitry Andric         SmallVectorImpl<CCValAssign> &ArgLocs, bool IsCalleePopSRet) const;
16360b57cec5SDimitry Andric     SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
16370b57cec5SDimitry Andric                                     SDValue Chain, bool IsTailCall,
16380b57cec5SDimitry Andric                                     bool Is64Bit, int FPDiff,
16390b57cec5SDimitry Andric                                     const SDLoc &dl) const;
16400b57cec5SDimitry Andric 
16410b57cec5SDimitry Andric     unsigned GetAlignedArgumentStackSize(unsigned StackSize,
16420b57cec5SDimitry Andric                                          SelectionDAG &DAG) const;
16430b57cec5SDimitry Andric 
164404eeddc0SDimitry Andric     unsigned getAddressSpace() const;
16450b57cec5SDimitry Andric 
16465ffd83dbSDimitry Andric     SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned,
1647480093f4SDimitry Andric                             SDValue &Chain) const;
16485ffd83dbSDimitry Andric     SDValue LRINT_LLRINTHelper(SDNode *N, SelectionDAG &DAG) const;
16490b57cec5SDimitry Andric 
16500b57cec5SDimitry Andric     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
16510b57cec5SDimitry Andric     SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
16520b57cec5SDimitry Andric     SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
16530b57cec5SDimitry Andric     SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
16540b57cec5SDimitry Andric 
16555f757f3fSDimitry Andric     unsigned getGlobalWrapperKind(const GlobalValue *GV,
16565f757f3fSDimitry Andric                                   const unsigned char OpFlags) const;
16570b57cec5SDimitry Andric     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
16580b57cec5SDimitry Andric     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
16590b57cec5SDimitry Andric     SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
16600b57cec5SDimitry Andric     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
16610b57cec5SDimitry Andric     SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
16620b57cec5SDimitry Andric 
16630b57cec5SDimitry Andric     /// Creates target global address or external symbol nodes for calls or
16640b57cec5SDimitry Andric     /// other uses.
16650b57cec5SDimitry Andric     SDValue LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
16660b57cec5SDimitry Andric                                   bool ForCall) const;
16670b57cec5SDimitry Andric 
16680b57cec5SDimitry Andric     SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
16690b57cec5SDimitry Andric     SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
16700b57cec5SDimitry Andric     SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
16710b57cec5SDimitry Andric     SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1672e8d8bef9SDimitry Andric     SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
16735ffd83dbSDimitry Andric     SDValue LowerLRINT_LLRINT(SDValue Op, SelectionDAG &DAG) const;
16740b57cec5SDimitry Andric     SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
16750b57cec5SDimitry Andric     SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
16760b57cec5SDimitry Andric     SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
16770b57cec5SDimitry Andric     SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
16780b57cec5SDimitry Andric     SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
16790b57cec5SDimitry Andric     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
16800b57cec5SDimitry Andric     SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
16810b57cec5SDimitry Andric     SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
16820b57cec5SDimitry Andric     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
16830b57cec5SDimitry Andric     SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
16840b57cec5SDimitry Andric     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
16850b57cec5SDimitry Andric     SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
16860b57cec5SDimitry Andric     SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
16870b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
16880b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
16890b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
16900b57cec5SDimitry Andric     SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1691bdd1243dSDimitry Andric     SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
1692fe6060f1SDimitry Andric     SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
169306c3fb27SDimitry Andric     SDValue LowerGET_FPENV_MEM(SDValue Op, SelectionDAG &DAG) const;
169406c3fb27SDimitry Andric     SDValue LowerSET_FPENV_MEM(SDValue Op, SelectionDAG &DAG) const;
169506c3fb27SDimitry Andric     SDValue LowerRESET_FPENV(SDValue Op, SelectionDAG &DAG) const;
16960b57cec5SDimitry Andric     SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
1697349cc55cSDimitry Andric     SDValue LowerWin64_FP_TO_INT128(SDValue Op, SelectionDAG &DAG,
1698349cc55cSDimitry Andric                                     SDValue &Chain) const;
1699349cc55cSDimitry Andric     SDValue LowerWin64_INT128_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1700480093f4SDimitry Andric     SDValue LowerGC_TRANSITION(SDValue Op, SelectionDAG &DAG) const;
17010b57cec5SDimitry Andric     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
17028bcb0991SDimitry Andric     SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const;
17038bcb0991SDimitry Andric     SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
17048bcb0991SDimitry Andric     SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
170561cfbce3SDimitry Andric     SDValue LowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
17068bcb0991SDimitry Andric 
17070b57cec5SDimitry Andric     SDValue
17080b57cec5SDimitry Andric     LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
17090b57cec5SDimitry Andric                          const SmallVectorImpl<ISD::InputArg> &Ins,
17100b57cec5SDimitry Andric                          const SDLoc &dl, SelectionDAG &DAG,
17110b57cec5SDimitry Andric                          SmallVectorImpl<SDValue> &InVals) const override;
17120b57cec5SDimitry Andric     SDValue LowerCall(CallLoweringInfo &CLI,
17130b57cec5SDimitry Andric                       SmallVectorImpl<SDValue> &InVals) const override;
17140b57cec5SDimitry Andric 
17150b57cec5SDimitry Andric     SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
17160b57cec5SDimitry Andric                         const SmallVectorImpl<ISD::OutputArg> &Outs,
17170b57cec5SDimitry Andric                         const SmallVectorImpl<SDValue> &OutVals,
17180b57cec5SDimitry Andric                         const SDLoc &dl, SelectionDAG &DAG) const override;
17190b57cec5SDimitry Andric 
supportSplitCSR(MachineFunction * MF)17200b57cec5SDimitry Andric     bool supportSplitCSR(MachineFunction *MF) const override {
17210b57cec5SDimitry Andric       return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
17220b57cec5SDimitry Andric           MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
17230b57cec5SDimitry Andric     }
17240b57cec5SDimitry Andric     void initializeSplitCSR(MachineBasicBlock *Entry) const override;
17250b57cec5SDimitry Andric     void insertCopiesSplitCSR(
17260b57cec5SDimitry Andric       MachineBasicBlock *Entry,
17270b57cec5SDimitry Andric       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
17280b57cec5SDimitry Andric 
17290b57cec5SDimitry Andric     bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
17300b57cec5SDimitry Andric 
17310b57cec5SDimitry Andric     bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
17320b57cec5SDimitry Andric 
17330b57cec5SDimitry Andric     EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
17340b57cec5SDimitry Andric                             ISD::NodeType ExtendKind) const override;
17350b57cec5SDimitry Andric 
17360b57cec5SDimitry Andric     bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
17370b57cec5SDimitry Andric                         bool isVarArg,
17380b57cec5SDimitry Andric                         const SmallVectorImpl<ISD::OutputArg> &Outs,
17390b57cec5SDimitry Andric                         LLVMContext &Context) const override;
17400b57cec5SDimitry Andric 
17410b57cec5SDimitry Andric     const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
174206c3fb27SDimitry Andric     ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
17430b57cec5SDimitry Andric 
17440b57cec5SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
17455ffd83dbSDimitry Andric     shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
174681ad6265SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
174781ad6265SDimitry Andric     shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
17480b57cec5SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
17490b57cec5SDimitry Andric     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
175081ad6265SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
175181ad6265SDimitry Andric     shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const;
175281ad6265SDimitry Andric     void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const override;
1753bdd1243dSDimitry Andric     void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const override;
17540b57cec5SDimitry Andric 
17550b57cec5SDimitry Andric     LoadInst *
17560b57cec5SDimitry Andric     lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
17570b57cec5SDimitry Andric 
17580b57cec5SDimitry Andric     bool needsCmpXchgNb(Type *MemType) const;
17590b57cec5SDimitry Andric 
17600b57cec5SDimitry Andric     void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
17610b57cec5SDimitry Andric                                 MachineBasicBlock *DispatchBB, int FI) const;
17620b57cec5SDimitry Andric 
17630b57cec5SDimitry Andric     // Utility function to emit the low-level va_arg code for X86-64.
17640b57cec5SDimitry Andric     MachineBasicBlock *
1765e8d8bef9SDimitry Andric     EmitVAARGWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
17660b57cec5SDimitry Andric 
17670b57cec5SDimitry Andric     /// Utility function to emit the xmm reg save portion of va_start.
17680b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredCascadedSelect(MachineInstr &MI1,
17690b57cec5SDimitry Andric                                                  MachineInstr &MI2,
17700b57cec5SDimitry Andric                                                  MachineBasicBlock *BB) const;
17710b57cec5SDimitry Andric 
17720b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
17730b57cec5SDimitry Andric                                          MachineBasicBlock *BB) const;
17740b57cec5SDimitry Andric 
17750b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
17760b57cec5SDimitry Andric                                            MachineBasicBlock *BB) const;
17770b57cec5SDimitry Andric 
17785ffd83dbSDimitry Andric     MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
17790b57cec5SDimitry Andric                                             MachineBasicBlock *BB) const;
17800b57cec5SDimitry Andric 
17815ffd83dbSDimitry Andric     MachineBasicBlock *EmitLoweredProbedAlloca(MachineInstr &MI,
17820b57cec5SDimitry Andric                                                MachineBasicBlock *BB) const;
17830b57cec5SDimitry Andric 
17840b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
17850b57cec5SDimitry Andric                                           MachineBasicBlock *BB) const;
17860b57cec5SDimitry Andric 
17870b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
17880b57cec5SDimitry Andric                                           MachineBasicBlock *BB) const;
17890b57cec5SDimitry Andric 
17900946e70aSDimitry Andric     MachineBasicBlock *EmitLoweredIndirectThunk(MachineInstr &MI,
17910b57cec5SDimitry Andric                                                 MachineBasicBlock *BB) const;
17920b57cec5SDimitry Andric 
17930b57cec5SDimitry Andric     MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
17940b57cec5SDimitry Andric                                         MachineBasicBlock *MBB) const;
17950b57cec5SDimitry Andric 
17960b57cec5SDimitry Andric     void emitSetJmpShadowStackFix(MachineInstr &MI,
17970b57cec5SDimitry Andric                                   MachineBasicBlock *MBB) const;
17980b57cec5SDimitry Andric 
17990b57cec5SDimitry Andric     MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
18000b57cec5SDimitry Andric                                          MachineBasicBlock *MBB) const;
18010b57cec5SDimitry Andric 
18020b57cec5SDimitry Andric     MachineBasicBlock *emitLongJmpShadowStackFix(MachineInstr &MI,
18030b57cec5SDimitry Andric                                                  MachineBasicBlock *MBB) const;
18040b57cec5SDimitry Andric 
18050b57cec5SDimitry Andric     MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
18060b57cec5SDimitry Andric                                              MachineBasicBlock *MBB) const;
18070b57cec5SDimitry Andric 
1808*0fca6ea1SDimitry Andric     MachineBasicBlock *emitPatchableEventCall(MachineInstr &MI,
1809*0fca6ea1SDimitry Andric                                               MachineBasicBlock *MBB) const;
1810*0fca6ea1SDimitry Andric 
18110b57cec5SDimitry Andric     /// Emit flags for the given setcc condition and operands. Also returns the
18120b57cec5SDimitry Andric     /// corresponding X86 condition code constant in X86CC.
1813480093f4SDimitry Andric     SDValue emitFlagsForSetcc(SDValue Op0, SDValue Op1, ISD::CondCode CC,
1814480093f4SDimitry Andric                               const SDLoc &dl, SelectionDAG &DAG,
18155ffd83dbSDimitry Andric                               SDValue &X86CC) const;
18160b57cec5SDimitry Andric 
18175f757f3fSDimitry Andric     bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst,
18185f757f3fSDimitry Andric                                              SDValue IntPow2) const override;
18195f757f3fSDimitry Andric 
18200b57cec5SDimitry Andric     /// Check if replacement of SQRT with RSQRT should be disabled.
18215ffd83dbSDimitry Andric     bool isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const override;
18220b57cec5SDimitry Andric 
18230b57cec5SDimitry Andric     /// Use rsqrt* to speed up sqrt calculations.
18245ffd83dbSDimitry Andric     SDValue getSqrtEstimate(SDValue Op, SelectionDAG &DAG, int Enabled,
18250b57cec5SDimitry Andric                             int &RefinementSteps, bool &UseOneConstNR,
18260b57cec5SDimitry Andric                             bool Reciprocal) const override;
18270b57cec5SDimitry Andric 
18280b57cec5SDimitry Andric     /// Use rcp* to speed up fdiv calculations.
18295ffd83dbSDimitry Andric     SDValue getRecipEstimate(SDValue Op, SelectionDAG &DAG, int Enabled,
18300b57cec5SDimitry Andric                              int &RefinementSteps) const override;
18310b57cec5SDimitry Andric 
18320b57cec5SDimitry Andric     /// Reassociate floating point divisions into multiply by reciprocal.
18330b57cec5SDimitry Andric     unsigned combineRepeatedFPDivisors() const override;
18348bcb0991SDimitry Andric 
18358bcb0991SDimitry Andric     SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
18368bcb0991SDimitry Andric                           SmallVectorImpl<SDNode *> &Created) const override;
18375f757f3fSDimitry Andric 
18385f757f3fSDimitry Andric     SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
18395f757f3fSDimitry Andric                     SDValue V2) const;
18400b57cec5SDimitry Andric   };
18410b57cec5SDimitry Andric 
18420b57cec5SDimitry Andric   namespace X86 {
18430b57cec5SDimitry Andric     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
18440b57cec5SDimitry Andric                              const TargetLibraryInfo *libInfo);
18450b57cec5SDimitry Andric   } // end namespace X86
18460b57cec5SDimitry Andric 
18470b57cec5SDimitry Andric   // X86 specific Gather/Scatter nodes.
18480b57cec5SDimitry Andric   // The class has the same order of operands as MaskedGatherScatterSDNode for
18490b57cec5SDimitry Andric   // convenience.
18505ffd83dbSDimitry Andric   class X86MaskedGatherScatterSDNode : public MemIntrinsicSDNode {
18510b57cec5SDimitry Andric   public:
18525ffd83dbSDimitry Andric     // This is a intended as a utility and should never be directly created.
18535ffd83dbSDimitry Andric     X86MaskedGatherScatterSDNode() = delete;
18545ffd83dbSDimitry Andric     ~X86MaskedGatherScatterSDNode() = delete;
18550b57cec5SDimitry Andric 
getBasePtr()18560b57cec5SDimitry Andric     const SDValue &getBasePtr() const { return getOperand(3); }
getIndex()18570b57cec5SDimitry Andric     const SDValue &getIndex()   const { return getOperand(4); }
getMask()18580b57cec5SDimitry Andric     const SDValue &getMask()    const { return getOperand(2); }
getScale()18590b57cec5SDimitry Andric     const SDValue &getScale()   const { return getOperand(5); }
18600b57cec5SDimitry Andric 
classof(const SDNode * N)18610b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
18620b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MGATHER ||
18630b57cec5SDimitry Andric              N->getOpcode() == X86ISD::MSCATTER;
18640b57cec5SDimitry Andric     }
18650b57cec5SDimitry Andric   };
18660b57cec5SDimitry Andric 
18670b57cec5SDimitry Andric   class X86MaskedGatherSDNode : public X86MaskedGatherScatterSDNode {
18680b57cec5SDimitry Andric   public:
getPassThru()18690b57cec5SDimitry Andric     const SDValue &getPassThru() const { return getOperand(1); }
18700b57cec5SDimitry Andric 
classof(const SDNode * N)18710b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
18720b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MGATHER;
18730b57cec5SDimitry Andric     }
18740b57cec5SDimitry Andric   };
18750b57cec5SDimitry Andric 
18760b57cec5SDimitry Andric   class X86MaskedScatterSDNode : public X86MaskedGatherScatterSDNode {
18770b57cec5SDimitry Andric   public:
getValue()18780b57cec5SDimitry Andric     const SDValue &getValue() const { return getOperand(1); }
18790b57cec5SDimitry Andric 
classof(const SDNode * N)18800b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
18810b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MSCATTER;
18820b57cec5SDimitry Andric     }
18830b57cec5SDimitry Andric   };
18840b57cec5SDimitry Andric 
18850b57cec5SDimitry Andric   /// Generate unpacklo/unpackhi shuffle mask.
1886e8d8bef9SDimitry Andric   void createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask, bool Lo,
18875ffd83dbSDimitry Andric                                bool Unary);
18880b57cec5SDimitry Andric 
18895ffd83dbSDimitry Andric   /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
18905ffd83dbSDimitry Andric   /// imposed by AVX and specific to the unary pattern. Example:
18915ffd83dbSDimitry Andric   /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
18925ffd83dbSDimitry Andric   /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
18935ffd83dbSDimitry Andric   void createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask, bool Lo);
18940b57cec5SDimitry Andric 
18950b57cec5SDimitry Andric } // end namespace llvm
18960b57cec5SDimitry Andric 
18970b57cec5SDimitry Andric #endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
1898