xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h (revision fe6060f10f634930ff71b7c50291ddc610da2475)
10b57cec5SDimitry Andric //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines the interfaces that X86 uses to lower LLVM code into a
100b57cec5SDimitry Andric // selection DAG.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
150b57cec5SDimitry Andric #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
160b57cec5SDimitry Andric 
170b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
180b57cec5SDimitry Andric 
190b57cec5SDimitry Andric namespace llvm {
200b57cec5SDimitry Andric   class X86Subtarget;
210b57cec5SDimitry Andric   class X86TargetMachine;
220b57cec5SDimitry Andric 
230b57cec5SDimitry Andric   namespace X86ISD {
240b57cec5SDimitry Andric     // X86 Specific DAG Nodes
250b57cec5SDimitry Andric   enum NodeType : unsigned {
260b57cec5SDimitry Andric     // Start the numbering where the builtin ops leave off.
270b57cec5SDimitry Andric     FIRST_NUMBER = ISD::BUILTIN_OP_END,
280b57cec5SDimitry Andric 
290b57cec5SDimitry Andric     /// Bit scan forward.
300b57cec5SDimitry Andric     BSF,
310b57cec5SDimitry Andric     /// Bit scan reverse.
320b57cec5SDimitry Andric     BSR,
330b57cec5SDimitry Andric 
345ffd83dbSDimitry Andric     /// X86 funnel/double shift i16 instructions. These correspond to
355ffd83dbSDimitry Andric     /// X86::SHLDW and X86::SHRDW instructions which have different amt
365ffd83dbSDimitry Andric     /// modulo rules to generic funnel shifts.
375ffd83dbSDimitry Andric     /// NOTE: The operand order matches ISD::FSHL/FSHR not SHLD/SHRD.
385ffd83dbSDimitry Andric     FSHL,
395ffd83dbSDimitry Andric     FSHR,
400b57cec5SDimitry Andric 
410b57cec5SDimitry Andric     /// Bitwise logical AND of floating point values. This corresponds
420b57cec5SDimitry Andric     /// to X86::ANDPS or X86::ANDPD.
430b57cec5SDimitry Andric     FAND,
440b57cec5SDimitry Andric 
450b57cec5SDimitry Andric     /// Bitwise logical OR of floating point values. This corresponds
460b57cec5SDimitry Andric     /// to X86::ORPS or X86::ORPD.
470b57cec5SDimitry Andric     FOR,
480b57cec5SDimitry Andric 
490b57cec5SDimitry Andric     /// Bitwise logical XOR of floating point values. This corresponds
500b57cec5SDimitry Andric     /// to X86::XORPS or X86::XORPD.
510b57cec5SDimitry Andric     FXOR,
520b57cec5SDimitry Andric 
530b57cec5SDimitry Andric     ///  Bitwise logical ANDNOT of floating point values. This
540b57cec5SDimitry Andric     /// corresponds to X86::ANDNPS or X86::ANDNPD.
550b57cec5SDimitry Andric     FANDN,
560b57cec5SDimitry Andric 
570b57cec5SDimitry Andric     /// These operations represent an abstract X86 call
580b57cec5SDimitry Andric     /// instruction, which includes a bunch of information.  In particular the
590b57cec5SDimitry Andric     /// operands of these node are:
600b57cec5SDimitry Andric     ///
610b57cec5SDimitry Andric     ///     #0 - The incoming token chain
620b57cec5SDimitry Andric     ///     #1 - The callee
630b57cec5SDimitry Andric     ///     #2 - The number of arg bytes the caller pushes on the stack.
640b57cec5SDimitry Andric     ///     #3 - The number of arg bytes the callee pops off the stack.
650b57cec5SDimitry Andric     ///     #4 - The value to pass in AL/AX/EAX (optional)
660b57cec5SDimitry Andric     ///     #5 - The value to pass in DL/DX/EDX (optional)
670b57cec5SDimitry Andric     ///
680b57cec5SDimitry Andric     /// The result values of these nodes are:
690b57cec5SDimitry Andric     ///
700b57cec5SDimitry Andric     ///     #0 - The outgoing token chain
710b57cec5SDimitry Andric     ///     #1 - The first register result value (optional)
720b57cec5SDimitry Andric     ///     #2 - The second register result value (optional)
730b57cec5SDimitry Andric     ///
740b57cec5SDimitry Andric     CALL,
750b57cec5SDimitry Andric 
760b57cec5SDimitry Andric     /// Same as call except it adds the NoTrack prefix.
770b57cec5SDimitry Andric     NT_CALL,
780b57cec5SDimitry Andric 
79*fe6060f1SDimitry Andric     // Pseudo for a OBJC call that gets emitted together with a special
80*fe6060f1SDimitry Andric     // marker instruction.
81*fe6060f1SDimitry Andric     CALL_RVMARKER,
82*fe6060f1SDimitry Andric 
830b57cec5SDimitry Andric     /// X86 compare and logical compare instructions.
845ffd83dbSDimitry Andric     CMP,
855ffd83dbSDimitry Andric     FCMP,
865ffd83dbSDimitry Andric     COMI,
875ffd83dbSDimitry Andric     UCOMI,
880b57cec5SDimitry Andric 
890b57cec5SDimitry Andric     /// X86 bit-test instructions.
900b57cec5SDimitry Andric     BT,
910b57cec5SDimitry Andric 
920b57cec5SDimitry Andric     /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
930b57cec5SDimitry Andric     /// operand, usually produced by a CMP instruction.
940b57cec5SDimitry Andric     SETCC,
950b57cec5SDimitry Andric 
960b57cec5SDimitry Andric     /// X86 Select
970b57cec5SDimitry Andric     SELECTS,
980b57cec5SDimitry Andric 
990b57cec5SDimitry Andric     // Same as SETCC except it's materialized with a sbb and the value is all
1000b57cec5SDimitry Andric     // one's or all zero's.
1010b57cec5SDimitry Andric     SETCC_CARRY, // R = carry_bit ? ~0 : 0
1020b57cec5SDimitry Andric 
1030b57cec5SDimitry Andric     /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
1040b57cec5SDimitry Andric     /// Operands are two FP values to compare; result is a mask of
1050b57cec5SDimitry Andric     /// 0s or 1s.  Generally DTRT for C/C++ with NaNs.
1060b57cec5SDimitry Andric     FSETCC,
1070b57cec5SDimitry Andric 
1080b57cec5SDimitry Andric     /// X86 FP SETCC, similar to above, but with output as an i1 mask and
1090b57cec5SDimitry Andric     /// and a version with SAE.
1105ffd83dbSDimitry Andric     FSETCCM,
1115ffd83dbSDimitry Andric     FSETCCM_SAE,
1120b57cec5SDimitry Andric 
1130b57cec5SDimitry Andric     /// X86 conditional moves. Operand 0 and operand 1 are the two values
1140b57cec5SDimitry Andric     /// to select from. Operand 2 is the condition code, and operand 3 is the
1150b57cec5SDimitry Andric     /// flag operand produced by a CMP or TEST instruction.
1160b57cec5SDimitry Andric     CMOV,
1170b57cec5SDimitry Andric 
1180b57cec5SDimitry Andric     /// X86 conditional branches. Operand 0 is the chain operand, operand 1
1190b57cec5SDimitry Andric     /// is the block to branch if condition is true, operand 2 is the
1200b57cec5SDimitry Andric     /// condition code, and operand 3 is the flag operand produced by a CMP
1210b57cec5SDimitry Andric     /// or TEST instruction.
1220b57cec5SDimitry Andric     BRCOND,
1230b57cec5SDimitry Andric 
1240b57cec5SDimitry Andric     /// BRIND node with NoTrack prefix. Operand 0 is the chain operand and
1250b57cec5SDimitry Andric     /// operand 1 is the target address.
1260b57cec5SDimitry Andric     NT_BRIND,
1270b57cec5SDimitry Andric 
1280b57cec5SDimitry Andric     /// Return with a flag operand. Operand 0 is the chain operand, operand
1290b57cec5SDimitry Andric     /// 1 is the number of bytes of stack to pop.
1300b57cec5SDimitry Andric     RET_FLAG,
1310b57cec5SDimitry Andric 
1320b57cec5SDimitry Andric     /// Return from interrupt. Operand 0 is the number of bytes to pop.
1330b57cec5SDimitry Andric     IRET,
1340b57cec5SDimitry Andric 
1350b57cec5SDimitry Andric     /// Repeat fill, corresponds to X86::REP_STOSx.
1360b57cec5SDimitry Andric     REP_STOS,
1370b57cec5SDimitry Andric 
1380b57cec5SDimitry Andric     /// Repeat move, corresponds to X86::REP_MOVSx.
1390b57cec5SDimitry Andric     REP_MOVS,
1400b57cec5SDimitry Andric 
1410b57cec5SDimitry Andric     /// On Darwin, this node represents the result of the popl
1420b57cec5SDimitry Andric     /// at function entry, used for PIC code.
1430b57cec5SDimitry Andric     GlobalBaseReg,
1440b57cec5SDimitry Andric 
1450b57cec5SDimitry Andric     /// A wrapper node for TargetConstantPool, TargetJumpTable,
1460b57cec5SDimitry Andric     /// TargetExternalSymbol, TargetGlobalAddress, TargetGlobalTLSAddress,
1470b57cec5SDimitry Andric     /// MCSymbol and TargetBlockAddress.
1480b57cec5SDimitry Andric     Wrapper,
1490b57cec5SDimitry Andric 
1500b57cec5SDimitry Andric     /// Special wrapper used under X86-64 PIC mode for RIP
1510b57cec5SDimitry Andric     /// relative displacements.
1520b57cec5SDimitry Andric     WrapperRIP,
1530b57cec5SDimitry Andric 
1548bcb0991SDimitry Andric     /// Copies a 64-bit value from an MMX vector to the low word
1558bcb0991SDimitry Andric     /// of an XMM vector, with the high word zero filled.
1568bcb0991SDimitry Andric     MOVQ2DQ,
1578bcb0991SDimitry Andric 
1580b57cec5SDimitry Andric     /// Copies a 64-bit value from the low word of an XMM vector
1590b57cec5SDimitry Andric     /// to an MMX vector.
1600b57cec5SDimitry Andric     MOVDQ2Q,
1610b57cec5SDimitry Andric 
1620b57cec5SDimitry Andric     /// Copies a 32-bit value from the low word of a MMX
1630b57cec5SDimitry Andric     /// vector to a GPR.
1640b57cec5SDimitry Andric     MMX_MOVD2W,
1650b57cec5SDimitry Andric 
1660b57cec5SDimitry Andric     /// Copies a GPR into the low 32-bit word of a MMX vector
1670b57cec5SDimitry Andric     /// and zero out the high word.
1680b57cec5SDimitry Andric     MMX_MOVW2D,
1690b57cec5SDimitry Andric 
1700b57cec5SDimitry Andric     /// Extract an 8-bit value from a vector and zero extend it to
1710b57cec5SDimitry Andric     /// i32, corresponds to X86::PEXTRB.
1720b57cec5SDimitry Andric     PEXTRB,
1730b57cec5SDimitry Andric 
1740b57cec5SDimitry Andric     /// Extract a 16-bit value from a vector and zero extend it to
1750b57cec5SDimitry Andric     /// i32, corresponds to X86::PEXTRW.
1760b57cec5SDimitry Andric     PEXTRW,
1770b57cec5SDimitry Andric 
1780b57cec5SDimitry Andric     /// Insert any element of a 4 x float vector into any element
1790b57cec5SDimitry Andric     /// of a destination 4 x floatvector.
1800b57cec5SDimitry Andric     INSERTPS,
1810b57cec5SDimitry Andric 
1820b57cec5SDimitry Andric     /// Insert the lower 8-bits of a 32-bit value to a vector,
1830b57cec5SDimitry Andric     /// corresponds to X86::PINSRB.
1840b57cec5SDimitry Andric     PINSRB,
1850b57cec5SDimitry Andric 
1860b57cec5SDimitry Andric     /// Insert the lower 16-bits of a 32-bit value to a vector,
1870b57cec5SDimitry Andric     /// corresponds to X86::PINSRW.
1880b57cec5SDimitry Andric     PINSRW,
1890b57cec5SDimitry Andric 
1900b57cec5SDimitry Andric     /// Shuffle 16 8-bit values within a vector.
1910b57cec5SDimitry Andric     PSHUFB,
1920b57cec5SDimitry Andric 
1930b57cec5SDimitry Andric     /// Compute Sum of Absolute Differences.
1940b57cec5SDimitry Andric     PSADBW,
1950b57cec5SDimitry Andric     /// Compute Double Block Packed Sum-Absolute-Differences
1960b57cec5SDimitry Andric     DBPSADBW,
1970b57cec5SDimitry Andric 
1980b57cec5SDimitry Andric     /// Bitwise Logical AND NOT of Packed FP values.
1990b57cec5SDimitry Andric     ANDNP,
2000b57cec5SDimitry Andric 
2010b57cec5SDimitry Andric     /// Blend where the selector is an immediate.
2020b57cec5SDimitry Andric     BLENDI,
2030b57cec5SDimitry Andric 
2040b57cec5SDimitry Andric     /// Dynamic (non-constant condition) vector blend where only the sign bits
2050b57cec5SDimitry Andric     /// of the condition elements are used. This is used to enforce that the
2060b57cec5SDimitry Andric     /// condition mask is not valid for generic VSELECT optimizations. This
2070b57cec5SDimitry Andric     /// is also used to implement the intrinsics.
2080b57cec5SDimitry Andric     /// Operands are in VSELECT order: MASK, TRUE, FALSE
2090b57cec5SDimitry Andric     BLENDV,
2100b57cec5SDimitry Andric 
2110b57cec5SDimitry Andric     /// Combined add and sub on an FP vector.
2120b57cec5SDimitry Andric     ADDSUB,
2130b57cec5SDimitry Andric 
2140b57cec5SDimitry Andric     //  FP vector ops with rounding mode.
2155ffd83dbSDimitry Andric     FADD_RND,
2165ffd83dbSDimitry Andric     FADDS,
2175ffd83dbSDimitry Andric     FADDS_RND,
2185ffd83dbSDimitry Andric     FSUB_RND,
2195ffd83dbSDimitry Andric     FSUBS,
2205ffd83dbSDimitry Andric     FSUBS_RND,
2215ffd83dbSDimitry Andric     FMUL_RND,
2225ffd83dbSDimitry Andric     FMULS,
2235ffd83dbSDimitry Andric     FMULS_RND,
2245ffd83dbSDimitry Andric     FDIV_RND,
2255ffd83dbSDimitry Andric     FDIVS,
2265ffd83dbSDimitry Andric     FDIVS_RND,
2275ffd83dbSDimitry Andric     FMAX_SAE,
2285ffd83dbSDimitry Andric     FMAXS_SAE,
2295ffd83dbSDimitry Andric     FMIN_SAE,
2305ffd83dbSDimitry Andric     FMINS_SAE,
2315ffd83dbSDimitry Andric     FSQRT_RND,
2325ffd83dbSDimitry Andric     FSQRTS,
2335ffd83dbSDimitry Andric     FSQRTS_RND,
2340b57cec5SDimitry Andric 
2350b57cec5SDimitry Andric     // FP vector get exponent.
2365ffd83dbSDimitry Andric     FGETEXP,
2375ffd83dbSDimitry Andric     FGETEXP_SAE,
2385ffd83dbSDimitry Andric     FGETEXPS,
2395ffd83dbSDimitry Andric     FGETEXPS_SAE,
2400b57cec5SDimitry Andric     // Extract Normalized Mantissas.
2415ffd83dbSDimitry Andric     VGETMANT,
2425ffd83dbSDimitry Andric     VGETMANT_SAE,
2435ffd83dbSDimitry Andric     VGETMANTS,
2445ffd83dbSDimitry Andric     VGETMANTS_SAE,
2450b57cec5SDimitry Andric     // FP Scale.
2465ffd83dbSDimitry Andric     SCALEF,
2475ffd83dbSDimitry Andric     SCALEF_RND,
2485ffd83dbSDimitry Andric     SCALEFS,
2495ffd83dbSDimitry Andric     SCALEFS_RND,
2500b57cec5SDimitry Andric 
2510b57cec5SDimitry Andric     // Unsigned Integer average.
2520b57cec5SDimitry Andric     AVG,
2530b57cec5SDimitry Andric 
2540b57cec5SDimitry Andric     /// Integer horizontal add/sub.
2550b57cec5SDimitry Andric     HADD,
2560b57cec5SDimitry Andric     HSUB,
2570b57cec5SDimitry Andric 
2580b57cec5SDimitry Andric     /// Floating point horizontal add/sub.
2590b57cec5SDimitry Andric     FHADD,
2600b57cec5SDimitry Andric     FHSUB,
2610b57cec5SDimitry Andric 
2620b57cec5SDimitry Andric     // Detect Conflicts Within a Vector
2630b57cec5SDimitry Andric     CONFLICT,
2640b57cec5SDimitry Andric 
2650b57cec5SDimitry Andric     /// Floating point max and min.
2665ffd83dbSDimitry Andric     FMAX,
2675ffd83dbSDimitry Andric     FMIN,
2680b57cec5SDimitry Andric 
2690b57cec5SDimitry Andric     /// Commutative FMIN and FMAX.
2705ffd83dbSDimitry Andric     FMAXC,
2715ffd83dbSDimitry Andric     FMINC,
2720b57cec5SDimitry Andric 
2730b57cec5SDimitry Andric     /// Scalar intrinsic floating point max and min.
2745ffd83dbSDimitry Andric     FMAXS,
2755ffd83dbSDimitry Andric     FMINS,
2760b57cec5SDimitry Andric 
2770b57cec5SDimitry Andric     /// Floating point reciprocal-sqrt and reciprocal approximation.
2780b57cec5SDimitry Andric     /// Note that these typically require refinement
2790b57cec5SDimitry Andric     /// in order to obtain suitable precision.
2805ffd83dbSDimitry Andric     FRSQRT,
2815ffd83dbSDimitry Andric     FRCP,
2820b57cec5SDimitry Andric 
2830b57cec5SDimitry Andric     // AVX-512 reciprocal approximations with a little more precision.
2845ffd83dbSDimitry Andric     RSQRT14,
2855ffd83dbSDimitry Andric     RSQRT14S,
2865ffd83dbSDimitry Andric     RCP14,
2875ffd83dbSDimitry Andric     RCP14S,
2880b57cec5SDimitry Andric 
2890b57cec5SDimitry Andric     // Thread Local Storage.
2900b57cec5SDimitry Andric     TLSADDR,
2910b57cec5SDimitry Andric 
2920b57cec5SDimitry Andric     // Thread Local Storage. A call to get the start address
2930b57cec5SDimitry Andric     // of the TLS block for the current module.
2940b57cec5SDimitry Andric     TLSBASEADDR,
2950b57cec5SDimitry Andric 
2960b57cec5SDimitry Andric     // Thread Local Storage.  When calling to an OS provided
2970b57cec5SDimitry Andric     // thunk at the address from an earlier relocation.
2980b57cec5SDimitry Andric     TLSCALL,
2990b57cec5SDimitry Andric 
3000b57cec5SDimitry Andric     // Exception Handling helpers.
3010b57cec5SDimitry Andric     EH_RETURN,
3020b57cec5SDimitry Andric 
3030b57cec5SDimitry Andric     // SjLj exception handling setjmp.
3040b57cec5SDimitry Andric     EH_SJLJ_SETJMP,
3050b57cec5SDimitry Andric 
3060b57cec5SDimitry Andric     // SjLj exception handling longjmp.
3070b57cec5SDimitry Andric     EH_SJLJ_LONGJMP,
3080b57cec5SDimitry Andric 
3090b57cec5SDimitry Andric     // SjLj exception handling dispatch.
3100b57cec5SDimitry Andric     EH_SJLJ_SETUP_DISPATCH,
3110b57cec5SDimitry Andric 
3120b57cec5SDimitry Andric     /// Tail call return. See X86TargetLowering::LowerCall for
3130b57cec5SDimitry Andric     /// the list of operands.
3140b57cec5SDimitry Andric     TC_RETURN,
3150b57cec5SDimitry Andric 
3160b57cec5SDimitry Andric     // Vector move to low scalar and zero higher vector elements.
3170b57cec5SDimitry Andric     VZEXT_MOVL,
3180b57cec5SDimitry Andric 
3190b57cec5SDimitry Andric     // Vector integer truncate.
3200b57cec5SDimitry Andric     VTRUNC,
3210b57cec5SDimitry Andric     // Vector integer truncate with unsigned/signed saturation.
3225ffd83dbSDimitry Andric     VTRUNCUS,
3235ffd83dbSDimitry Andric     VTRUNCS,
3240b57cec5SDimitry Andric 
3250b57cec5SDimitry Andric     // Masked version of the above. Used when less than a 128-bit result is
3260b57cec5SDimitry Andric     // produced since the mask only applies to the lower elements and can't
3270b57cec5SDimitry Andric     // be represented by a select.
3280b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
3295ffd83dbSDimitry Andric     VMTRUNC,
3305ffd83dbSDimitry Andric     VMTRUNCUS,
3315ffd83dbSDimitry Andric     VMTRUNCS,
3320b57cec5SDimitry Andric 
3330b57cec5SDimitry Andric     // Vector FP extend.
3345ffd83dbSDimitry Andric     VFPEXT,
3355ffd83dbSDimitry Andric     VFPEXT_SAE,
3365ffd83dbSDimitry Andric     VFPEXTS,
3375ffd83dbSDimitry Andric     VFPEXTS_SAE,
3380b57cec5SDimitry Andric 
3390b57cec5SDimitry Andric     // Vector FP round.
3405ffd83dbSDimitry Andric     VFPROUND,
3415ffd83dbSDimitry Andric     VFPROUND_RND,
3425ffd83dbSDimitry Andric     VFPROUNDS,
3435ffd83dbSDimitry Andric     VFPROUNDS_RND,
3440b57cec5SDimitry Andric 
3450b57cec5SDimitry Andric     // Masked version of above. Used for v2f64->v4f32.
3460b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
3470b57cec5SDimitry Andric     VMFPROUND,
3480b57cec5SDimitry Andric 
3490b57cec5SDimitry Andric     // 128-bit vector logical left / right shift
3505ffd83dbSDimitry Andric     VSHLDQ,
3515ffd83dbSDimitry Andric     VSRLDQ,
3520b57cec5SDimitry Andric 
3530b57cec5SDimitry Andric     // Vector shift elements
3545ffd83dbSDimitry Andric     VSHL,
3555ffd83dbSDimitry Andric     VSRL,
3565ffd83dbSDimitry Andric     VSRA,
3570b57cec5SDimitry Andric 
3580b57cec5SDimitry Andric     // Vector variable shift
3595ffd83dbSDimitry Andric     VSHLV,
3605ffd83dbSDimitry Andric     VSRLV,
3615ffd83dbSDimitry Andric     VSRAV,
3620b57cec5SDimitry Andric 
3630b57cec5SDimitry Andric     // Vector shift elements by immediate
3645ffd83dbSDimitry Andric     VSHLI,
3655ffd83dbSDimitry Andric     VSRLI,
3665ffd83dbSDimitry Andric     VSRAI,
3670b57cec5SDimitry Andric 
3680b57cec5SDimitry Andric     // Shifts of mask registers.
3695ffd83dbSDimitry Andric     KSHIFTL,
3705ffd83dbSDimitry Andric     KSHIFTR,
3710b57cec5SDimitry Andric 
3720b57cec5SDimitry Andric     // Bit rotate by immediate
3735ffd83dbSDimitry Andric     VROTLI,
3745ffd83dbSDimitry Andric     VROTRI,
3750b57cec5SDimitry Andric 
3760b57cec5SDimitry Andric     // Vector packed double/float comparison.
3770b57cec5SDimitry Andric     CMPP,
3780b57cec5SDimitry Andric 
3790b57cec5SDimitry Andric     // Vector integer comparisons.
3805ffd83dbSDimitry Andric     PCMPEQ,
3815ffd83dbSDimitry Andric     PCMPGT,
3820b57cec5SDimitry Andric 
3830b57cec5SDimitry Andric     // v8i16 Horizontal minimum and position.
3840b57cec5SDimitry Andric     PHMINPOS,
3850b57cec5SDimitry Andric 
3860b57cec5SDimitry Andric     MULTISHIFT,
3870b57cec5SDimitry Andric 
3880b57cec5SDimitry Andric     /// Vector comparison generating mask bits for fp and
3890b57cec5SDimitry Andric     /// integer signed and unsigned data types.
3900b57cec5SDimitry Andric     CMPM,
391e8d8bef9SDimitry Andric     // Vector mask comparison generating mask bits for FP values.
392e8d8bef9SDimitry Andric     CMPMM,
393e8d8bef9SDimitry Andric     // Vector mask comparison with SAE for FP values.
394e8d8bef9SDimitry Andric     CMPMM_SAE,
3950b57cec5SDimitry Andric 
3960b57cec5SDimitry Andric     // Arithmetic operations with FLAGS results.
3975ffd83dbSDimitry Andric     ADD,
3985ffd83dbSDimitry Andric     SUB,
3995ffd83dbSDimitry Andric     ADC,
4005ffd83dbSDimitry Andric     SBB,
4015ffd83dbSDimitry Andric     SMUL,
4025ffd83dbSDimitry Andric     UMUL,
4035ffd83dbSDimitry Andric     OR,
4045ffd83dbSDimitry Andric     XOR,
4055ffd83dbSDimitry Andric     AND,
4060b57cec5SDimitry Andric 
4070b57cec5SDimitry Andric     // Bit field extract.
4080b57cec5SDimitry Andric     BEXTR,
409e8d8bef9SDimitry Andric     BEXTRI,
4100b57cec5SDimitry Andric 
4110b57cec5SDimitry Andric     // Zero High Bits Starting with Specified Bit Position.
4120b57cec5SDimitry Andric     BZHI,
4130b57cec5SDimitry Andric 
4145ffd83dbSDimitry Andric     // Parallel extract and deposit.
4155ffd83dbSDimitry Andric     PDEP,
4165ffd83dbSDimitry Andric     PEXT,
4175ffd83dbSDimitry Andric 
4180b57cec5SDimitry Andric     // X86-specific multiply by immediate.
4190b57cec5SDimitry Andric     MUL_IMM,
4200b57cec5SDimitry Andric 
4210b57cec5SDimitry Andric     // Vector sign bit extraction.
4220b57cec5SDimitry Andric     MOVMSK,
4230b57cec5SDimitry Andric 
4240b57cec5SDimitry Andric     // Vector bitwise comparisons.
4250b57cec5SDimitry Andric     PTEST,
4260b57cec5SDimitry Andric 
4270b57cec5SDimitry Andric     // Vector packed fp sign bitwise comparisons.
4280b57cec5SDimitry Andric     TESTP,
4290b57cec5SDimitry Andric 
4300b57cec5SDimitry Andric     // OR/AND test for masks.
4310b57cec5SDimitry Andric     KORTEST,
4320b57cec5SDimitry Andric     KTEST,
4330b57cec5SDimitry Andric 
4340b57cec5SDimitry Andric     // ADD for masks.
4350b57cec5SDimitry Andric     KADD,
4360b57cec5SDimitry Andric 
4370b57cec5SDimitry Andric     // Several flavors of instructions with vector shuffle behaviors.
4380b57cec5SDimitry Andric     // Saturated signed/unnsigned packing.
4390b57cec5SDimitry Andric     PACKSS,
4400b57cec5SDimitry Andric     PACKUS,
4410b57cec5SDimitry Andric     // Intra-lane alignr.
4420b57cec5SDimitry Andric     PALIGNR,
4430b57cec5SDimitry Andric     // AVX512 inter-lane alignr.
4440b57cec5SDimitry Andric     VALIGN,
4450b57cec5SDimitry Andric     PSHUFD,
4460b57cec5SDimitry Andric     PSHUFHW,
4470b57cec5SDimitry Andric     PSHUFLW,
4480b57cec5SDimitry Andric     SHUFP,
4490b57cec5SDimitry Andric     // VBMI2 Concat & Shift.
4500b57cec5SDimitry Andric     VSHLD,
4510b57cec5SDimitry Andric     VSHRD,
4520b57cec5SDimitry Andric     VSHLDV,
4530b57cec5SDimitry Andric     VSHRDV,
4540b57cec5SDimitry Andric     // Shuffle Packed Values at 128-bit granularity.
4550b57cec5SDimitry Andric     SHUF128,
4560b57cec5SDimitry Andric     MOVDDUP,
4570b57cec5SDimitry Andric     MOVSHDUP,
4580b57cec5SDimitry Andric     MOVSLDUP,
4590b57cec5SDimitry Andric     MOVLHPS,
4600b57cec5SDimitry Andric     MOVHLPS,
4610b57cec5SDimitry Andric     MOVSD,
4620b57cec5SDimitry Andric     MOVSS,
4630b57cec5SDimitry Andric     UNPCKL,
4640b57cec5SDimitry Andric     UNPCKH,
4650b57cec5SDimitry Andric     VPERMILPV,
4660b57cec5SDimitry Andric     VPERMILPI,
4670b57cec5SDimitry Andric     VPERMI,
4680b57cec5SDimitry Andric     VPERM2X128,
4690b57cec5SDimitry Andric 
4700b57cec5SDimitry Andric     // Variable Permute (VPERM).
4710b57cec5SDimitry Andric     // Res = VPERMV MaskV, V0
4720b57cec5SDimitry Andric     VPERMV,
4730b57cec5SDimitry Andric 
4740b57cec5SDimitry Andric     // 3-op Variable Permute (VPERMT2).
4750b57cec5SDimitry Andric     // Res = VPERMV3 V0, MaskV, V1
4760b57cec5SDimitry Andric     VPERMV3,
4770b57cec5SDimitry Andric 
4780b57cec5SDimitry Andric     // Bitwise ternary logic.
4790b57cec5SDimitry Andric     VPTERNLOG,
4800b57cec5SDimitry Andric     // Fix Up Special Packed Float32/64 values.
4815ffd83dbSDimitry Andric     VFIXUPIMM,
4825ffd83dbSDimitry Andric     VFIXUPIMM_SAE,
4835ffd83dbSDimitry Andric     VFIXUPIMMS,
4845ffd83dbSDimitry Andric     VFIXUPIMMS_SAE,
4850b57cec5SDimitry Andric     // Range Restriction Calculation For Packed Pairs of Float32/64 values.
4865ffd83dbSDimitry Andric     VRANGE,
4875ffd83dbSDimitry Andric     VRANGE_SAE,
4885ffd83dbSDimitry Andric     VRANGES,
4895ffd83dbSDimitry Andric     VRANGES_SAE,
4900b57cec5SDimitry Andric     // Reduce - Perform Reduction Transformation on scalar\packed FP.
4915ffd83dbSDimitry Andric     VREDUCE,
4925ffd83dbSDimitry Andric     VREDUCE_SAE,
4935ffd83dbSDimitry Andric     VREDUCES,
4945ffd83dbSDimitry Andric     VREDUCES_SAE,
4950b57cec5SDimitry Andric     // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
4960b57cec5SDimitry Andric     // Also used by the legacy (V)ROUND intrinsics where we mask out the
4970b57cec5SDimitry Andric     // scaling part of the immediate.
4985ffd83dbSDimitry Andric     VRNDSCALE,
4995ffd83dbSDimitry Andric     VRNDSCALE_SAE,
5005ffd83dbSDimitry Andric     VRNDSCALES,
5015ffd83dbSDimitry Andric     VRNDSCALES_SAE,
5020b57cec5SDimitry Andric     // Tests Types Of a FP Values for packed types.
5030b57cec5SDimitry Andric     VFPCLASS,
5040b57cec5SDimitry Andric     // Tests Types Of a FP Values for scalar types.
5050b57cec5SDimitry Andric     VFPCLASSS,
5060b57cec5SDimitry Andric 
5070b57cec5SDimitry Andric     // Broadcast (splat) scalar or element 0 of a vector. If the operand is
5080b57cec5SDimitry Andric     // a vector, this node may change the vector length as part of the splat.
5090b57cec5SDimitry Andric     VBROADCAST,
5100b57cec5SDimitry Andric     // Broadcast mask to vector.
5110b57cec5SDimitry Andric     VBROADCASTM,
5120b57cec5SDimitry Andric 
5130b57cec5SDimitry Andric     /// SSE4A Extraction and Insertion.
5145ffd83dbSDimitry Andric     EXTRQI,
5155ffd83dbSDimitry Andric     INSERTQI,
5160b57cec5SDimitry Andric 
5170b57cec5SDimitry Andric     // XOP arithmetic/logical shifts.
5185ffd83dbSDimitry Andric     VPSHA,
5195ffd83dbSDimitry Andric     VPSHL,
5200b57cec5SDimitry Andric     // XOP signed/unsigned integer comparisons.
5215ffd83dbSDimitry Andric     VPCOM,
5225ffd83dbSDimitry Andric     VPCOMU,
5230b57cec5SDimitry Andric     // XOP packed permute bytes.
5240b57cec5SDimitry Andric     VPPERM,
5250b57cec5SDimitry Andric     // XOP two source permutation.
5260b57cec5SDimitry Andric     VPERMIL2,
5270b57cec5SDimitry Andric 
5280b57cec5SDimitry Andric     // Vector multiply packed unsigned doubleword integers.
5290b57cec5SDimitry Andric     PMULUDQ,
5300b57cec5SDimitry Andric     // Vector multiply packed signed doubleword integers.
5310b57cec5SDimitry Andric     PMULDQ,
5320b57cec5SDimitry Andric     // Vector Multiply Packed UnsignedIntegers with Round and Scale.
5330b57cec5SDimitry Andric     MULHRS,
5340b57cec5SDimitry Andric 
5350b57cec5SDimitry Andric     // Multiply and Add Packed Integers.
5365ffd83dbSDimitry Andric     VPMADDUBSW,
5375ffd83dbSDimitry Andric     VPMADDWD,
5380b57cec5SDimitry Andric 
5390b57cec5SDimitry Andric     // AVX512IFMA multiply and add.
5400b57cec5SDimitry Andric     // NOTE: These are different than the instruction and perform
5410b57cec5SDimitry Andric     // op0 x op1 + op2.
5425ffd83dbSDimitry Andric     VPMADD52L,
5435ffd83dbSDimitry Andric     VPMADD52H,
5440b57cec5SDimitry Andric 
5450b57cec5SDimitry Andric     // VNNI
5460b57cec5SDimitry Andric     VPDPBUSD,
5470b57cec5SDimitry Andric     VPDPBUSDS,
5480b57cec5SDimitry Andric     VPDPWSSD,
5490b57cec5SDimitry Andric     VPDPWSSDS,
5500b57cec5SDimitry Andric 
5510b57cec5SDimitry Andric     // FMA nodes.
5520b57cec5SDimitry Andric     // We use the target independent ISD::FMA for the non-inverted case.
5530b57cec5SDimitry Andric     FNMADD,
5540b57cec5SDimitry Andric     FMSUB,
5550b57cec5SDimitry Andric     FNMSUB,
5560b57cec5SDimitry Andric     FMADDSUB,
5570b57cec5SDimitry Andric     FMSUBADD,
5580b57cec5SDimitry Andric 
5590b57cec5SDimitry Andric     // FMA with rounding mode.
5600b57cec5SDimitry Andric     FMADD_RND,
5610b57cec5SDimitry Andric     FNMADD_RND,
5620b57cec5SDimitry Andric     FMSUB_RND,
5630b57cec5SDimitry Andric     FNMSUB_RND,
5640b57cec5SDimitry Andric     FMADDSUB_RND,
5650b57cec5SDimitry Andric     FMSUBADD_RND,
5660b57cec5SDimitry Andric 
5670b57cec5SDimitry Andric     // Compress and expand.
5680b57cec5SDimitry Andric     COMPRESS,
5690b57cec5SDimitry Andric     EXPAND,
5700b57cec5SDimitry Andric 
5710b57cec5SDimitry Andric     // Bits shuffle
5720b57cec5SDimitry Andric     VPSHUFBITQMB,
5730b57cec5SDimitry Andric 
5740b57cec5SDimitry Andric     // Convert Unsigned/Integer to Floating-Point Value with rounding mode.
5755ffd83dbSDimitry Andric     SINT_TO_FP_RND,
5765ffd83dbSDimitry Andric     UINT_TO_FP_RND,
5775ffd83dbSDimitry Andric     SCALAR_SINT_TO_FP,
5785ffd83dbSDimitry Andric     SCALAR_UINT_TO_FP,
5795ffd83dbSDimitry Andric     SCALAR_SINT_TO_FP_RND,
5805ffd83dbSDimitry Andric     SCALAR_UINT_TO_FP_RND,
5810b57cec5SDimitry Andric 
5820b57cec5SDimitry Andric     // Vector float/double to signed/unsigned integer.
5835ffd83dbSDimitry Andric     CVTP2SI,
5845ffd83dbSDimitry Andric     CVTP2UI,
5855ffd83dbSDimitry Andric     CVTP2SI_RND,
5865ffd83dbSDimitry Andric     CVTP2UI_RND,
5870b57cec5SDimitry Andric     // Scalar float/double to signed/unsigned integer.
5885ffd83dbSDimitry Andric     CVTS2SI,
5895ffd83dbSDimitry Andric     CVTS2UI,
5905ffd83dbSDimitry Andric     CVTS2SI_RND,
5915ffd83dbSDimitry Andric     CVTS2UI_RND,
5920b57cec5SDimitry Andric 
5930b57cec5SDimitry Andric     // Vector float/double to signed/unsigned integer with truncation.
5945ffd83dbSDimitry Andric     CVTTP2SI,
5955ffd83dbSDimitry Andric     CVTTP2UI,
5965ffd83dbSDimitry Andric     CVTTP2SI_SAE,
5975ffd83dbSDimitry Andric     CVTTP2UI_SAE,
5980b57cec5SDimitry Andric     // Scalar float/double to signed/unsigned integer with truncation.
5995ffd83dbSDimitry Andric     CVTTS2SI,
6005ffd83dbSDimitry Andric     CVTTS2UI,
6015ffd83dbSDimitry Andric     CVTTS2SI_SAE,
6025ffd83dbSDimitry Andric     CVTTS2UI_SAE,
6030b57cec5SDimitry Andric 
6040b57cec5SDimitry Andric     // Vector signed/unsigned integer to float/double.
6055ffd83dbSDimitry Andric     CVTSI2P,
6065ffd83dbSDimitry Andric     CVTUI2P,
6070b57cec5SDimitry Andric 
6080b57cec5SDimitry Andric     // Masked versions of above. Used for v2f64->v4f32.
6090b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
6105ffd83dbSDimitry Andric     MCVTP2SI,
6115ffd83dbSDimitry Andric     MCVTP2UI,
6125ffd83dbSDimitry Andric     MCVTTP2SI,
6135ffd83dbSDimitry Andric     MCVTTP2UI,
6145ffd83dbSDimitry Andric     MCVTSI2P,
6155ffd83dbSDimitry Andric     MCVTUI2P,
6160b57cec5SDimitry Andric 
6170b57cec5SDimitry Andric     // Vector float to bfloat16.
6180b57cec5SDimitry Andric     // Convert TWO packed single data to one packed BF16 data
6190b57cec5SDimitry Andric     CVTNE2PS2BF16,
6200b57cec5SDimitry Andric     // Convert packed single data to packed BF16 data
6210b57cec5SDimitry Andric     CVTNEPS2BF16,
6220b57cec5SDimitry Andric     // Masked version of above.
6230b57cec5SDimitry Andric     // SRC, PASSTHRU, MASK
6240b57cec5SDimitry Andric     MCVTNEPS2BF16,
6250b57cec5SDimitry Andric 
6260b57cec5SDimitry Andric     // Dot product of BF16 pairs to accumulated into
6270b57cec5SDimitry Andric     // packed single precision.
6280b57cec5SDimitry Andric     DPBF16PS,
6290b57cec5SDimitry Andric 
6300b57cec5SDimitry Andric     // Save xmm argument registers to the stack, according to %al. An operator
6310b57cec5SDimitry Andric     // is needed so that this can be expanded with control flow.
6320b57cec5SDimitry Andric     VASTART_SAVE_XMM_REGS,
6330b57cec5SDimitry Andric 
6340b57cec5SDimitry Andric     // Windows's _chkstk call to do stack probing.
6350b57cec5SDimitry Andric     WIN_ALLOCA,
6360b57cec5SDimitry Andric 
6370b57cec5SDimitry Andric     // For allocating variable amounts of stack space when using
6380b57cec5SDimitry Andric     // segmented stacks. Check if the current stacklet has enough space, and
6390b57cec5SDimitry Andric     // falls back to heap allocation if not.
6400b57cec5SDimitry Andric     SEG_ALLOCA,
6410b57cec5SDimitry Andric 
6425ffd83dbSDimitry Andric     // For allocating stack space when using stack clash protector.
6435ffd83dbSDimitry Andric     // Allocation is performed by block, and each block is probed.
6445ffd83dbSDimitry Andric     PROBED_ALLOCA,
6455ffd83dbSDimitry Andric 
6460b57cec5SDimitry Andric     // Memory barriers.
6470b57cec5SDimitry Andric     MEMBARRIER,
6480b57cec5SDimitry Andric     MFENCE,
6490b57cec5SDimitry Andric 
6500b57cec5SDimitry Andric     // Get a random integer and indicate whether it is valid in CF.
6510b57cec5SDimitry Andric     RDRAND,
6520b57cec5SDimitry Andric 
6530b57cec5SDimitry Andric     // Get a NIST SP800-90B & C compliant random integer and
6540b57cec5SDimitry Andric     // indicate whether it is valid in CF.
6550b57cec5SDimitry Andric     RDSEED,
6560b57cec5SDimitry Andric 
6570b57cec5SDimitry Andric     // Protection keys
6580b57cec5SDimitry Andric     // RDPKRU - Operand 0 is chain. Operand 1 is value for ECX.
6590b57cec5SDimitry Andric     // WRPKRU - Operand 0 is chain. Operand 1 is value for EDX. Operand 2 is
6600b57cec5SDimitry Andric     // value for ECX.
6615ffd83dbSDimitry Andric     RDPKRU,
6625ffd83dbSDimitry Andric     WRPKRU,
6630b57cec5SDimitry Andric 
6640b57cec5SDimitry Andric     // SSE42 string comparisons.
6650b57cec5SDimitry Andric     // These nodes produce 3 results, index, mask, and flags. X86ISelDAGToDAG
6660b57cec5SDimitry Andric     // will emit one or two instructions based on which results are used. If
6670b57cec5SDimitry Andric     // flags and index/mask this allows us to use a single instruction since
6680b57cec5SDimitry Andric     // we won't have to pick and opcode for flags. Instead we can rely on the
6690b57cec5SDimitry Andric     // DAG to CSE everything and decide at isel.
6700b57cec5SDimitry Andric     PCMPISTR,
6710b57cec5SDimitry Andric     PCMPESTR,
6720b57cec5SDimitry Andric 
6730b57cec5SDimitry Andric     // Test if in transactional execution.
6740b57cec5SDimitry Andric     XTEST,
6750b57cec5SDimitry Andric 
6760b57cec5SDimitry Andric     // ERI instructions.
6775ffd83dbSDimitry Andric     RSQRT28,
6785ffd83dbSDimitry Andric     RSQRT28_SAE,
6795ffd83dbSDimitry Andric     RSQRT28S,
6805ffd83dbSDimitry Andric     RSQRT28S_SAE,
6815ffd83dbSDimitry Andric     RCP28,
6825ffd83dbSDimitry Andric     RCP28_SAE,
6835ffd83dbSDimitry Andric     RCP28S,
6845ffd83dbSDimitry Andric     RCP28S_SAE,
6855ffd83dbSDimitry Andric     EXP2,
6865ffd83dbSDimitry Andric     EXP2_SAE,
6870b57cec5SDimitry Andric 
6880b57cec5SDimitry Andric     // Conversions between float and half-float.
6895ffd83dbSDimitry Andric     CVTPS2PH,
6905ffd83dbSDimitry Andric     CVTPH2PS,
6915ffd83dbSDimitry Andric     CVTPH2PS_SAE,
6920b57cec5SDimitry Andric 
6930b57cec5SDimitry Andric     // Masked version of above.
6940b57cec5SDimitry Andric     // SRC, RND, PASSTHRU, MASK
6950b57cec5SDimitry Andric     MCVTPS2PH,
6960b57cec5SDimitry Andric 
6970b57cec5SDimitry Andric     // Galois Field Arithmetic Instructions
6985ffd83dbSDimitry Andric     GF2P8AFFINEINVQB,
6995ffd83dbSDimitry Andric     GF2P8AFFINEQB,
7005ffd83dbSDimitry Andric     GF2P8MULB,
7010b57cec5SDimitry Andric 
7020b57cec5SDimitry Andric     // LWP insert record.
7030b57cec5SDimitry Andric     LWPINS,
7040b57cec5SDimitry Andric 
7050b57cec5SDimitry Andric     // User level wait
7065ffd83dbSDimitry Andric     UMWAIT,
7075ffd83dbSDimitry Andric     TPAUSE,
7080b57cec5SDimitry Andric 
7090b57cec5SDimitry Andric     // Enqueue Stores Instructions
7105ffd83dbSDimitry Andric     ENQCMD,
7115ffd83dbSDimitry Andric     ENQCMDS,
7120b57cec5SDimitry Andric 
7130b57cec5SDimitry Andric     // For avx512-vp2intersect
7140b57cec5SDimitry Andric     VP2INTERSECT,
7150b57cec5SDimitry Andric 
716e8d8bef9SDimitry Andric     // User level interrupts - testui
717e8d8bef9SDimitry Andric     TESTUI,
718e8d8bef9SDimitry Andric 
719480093f4SDimitry Andric     /// X86 strict FP compare instructions.
720480093f4SDimitry Andric     STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
721480093f4SDimitry Andric     STRICT_FCMPS,
722480093f4SDimitry Andric 
723480093f4SDimitry Andric     // Vector packed double/float comparison.
724480093f4SDimitry Andric     STRICT_CMPP,
725480093f4SDimitry Andric 
726480093f4SDimitry Andric     /// Vector comparison generating mask bits for fp and
727480093f4SDimitry Andric     /// integer signed and unsigned data types.
728480093f4SDimitry Andric     STRICT_CMPM,
729480093f4SDimitry Andric 
730480093f4SDimitry Andric     // Vector float/double to signed/unsigned integer with truncation.
7315ffd83dbSDimitry Andric     STRICT_CVTTP2SI,
7325ffd83dbSDimitry Andric     STRICT_CVTTP2UI,
733480093f4SDimitry Andric 
734480093f4SDimitry Andric     // Vector FP extend.
735480093f4SDimitry Andric     STRICT_VFPEXT,
736480093f4SDimitry Andric 
737480093f4SDimitry Andric     // Vector FP round.
738480093f4SDimitry Andric     STRICT_VFPROUND,
739480093f4SDimitry Andric 
740480093f4SDimitry Andric     // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
741480093f4SDimitry Andric     // Also used by the legacy (V)ROUND intrinsics where we mask out the
742480093f4SDimitry Andric     // scaling part of the immediate.
743480093f4SDimitry Andric     STRICT_VRNDSCALE,
744480093f4SDimitry Andric 
745480093f4SDimitry Andric     // Vector signed/unsigned integer to float/double.
7465ffd83dbSDimitry Andric     STRICT_CVTSI2P,
7475ffd83dbSDimitry Andric     STRICT_CVTUI2P,
7485ffd83dbSDimitry Andric 
7495ffd83dbSDimitry Andric     // Strict FMA nodes.
7505ffd83dbSDimitry Andric     STRICT_FNMADD,
7515ffd83dbSDimitry Andric     STRICT_FMSUB,
7525ffd83dbSDimitry Andric     STRICT_FNMSUB,
7535ffd83dbSDimitry Andric 
7545ffd83dbSDimitry Andric     // Conversions between float and half-float.
7555ffd83dbSDimitry Andric     STRICT_CVTPS2PH,
7565ffd83dbSDimitry Andric     STRICT_CVTPH2PS,
757480093f4SDimitry Andric 
758e8d8bef9SDimitry Andric     // WARNING: Only add nodes here if they are stric FP nodes. Non-memory and
759e8d8bef9SDimitry Andric     // non-strict FP nodes should be above FIRST_TARGET_STRICTFP_OPCODE.
760e8d8bef9SDimitry Andric 
7610b57cec5SDimitry Andric     // Compare and swap.
7620b57cec5SDimitry Andric     LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
7630b57cec5SDimitry Andric     LCMPXCHG8_DAG,
7640b57cec5SDimitry Andric     LCMPXCHG16_DAG,
7650b57cec5SDimitry Andric     LCMPXCHG16_SAVE_RBX_DAG,
7660b57cec5SDimitry Andric 
7670b57cec5SDimitry Andric     /// LOCK-prefixed arithmetic read-modify-write instructions.
7680b57cec5SDimitry Andric     /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
7695ffd83dbSDimitry Andric     LADD,
7705ffd83dbSDimitry Andric     LSUB,
7715ffd83dbSDimitry Andric     LOR,
7725ffd83dbSDimitry Andric     LXOR,
7735ffd83dbSDimitry Andric     LAND,
7740b57cec5SDimitry Andric 
7750b57cec5SDimitry Andric     // Load, scalar_to_vector, and zero extend.
7760b57cec5SDimitry Andric     VZEXT_LOAD,
7770b57cec5SDimitry Andric 
7780b57cec5SDimitry Andric     // extract_vector_elt, store.
7790b57cec5SDimitry Andric     VEXTRACT_STORE,
7800b57cec5SDimitry Andric 
781e8d8bef9SDimitry Andric     // scalar broadcast from memory.
7828bcb0991SDimitry Andric     VBROADCAST_LOAD,
7838bcb0991SDimitry Andric 
784e8d8bef9SDimitry Andric     // subvector broadcast from memory.
785e8d8bef9SDimitry Andric     SUBV_BROADCAST_LOAD,
786e8d8bef9SDimitry Andric 
787*fe6060f1SDimitry Andric     // Store FP control word into i16 memory.
7880b57cec5SDimitry Andric     FNSTCW16m,
7890b57cec5SDimitry Andric 
790*fe6060f1SDimitry Andric     // Load FP control word from i16 memory.
791*fe6060f1SDimitry Andric     FLDCW16m,
792*fe6060f1SDimitry Andric 
7930b57cec5SDimitry Andric     /// This instruction implements FP_TO_SINT with the
7940b57cec5SDimitry Andric     /// integer destination in memory and a FP reg source.  This corresponds
7950b57cec5SDimitry Andric     /// to the X86::FIST*m instructions and the rounding mode change stuff. It
7960b57cec5SDimitry Andric     /// has two inputs (token chain and address) and two outputs (int value
7970b57cec5SDimitry Andric     /// and token chain). Memory VT specifies the type to store to.
7980b57cec5SDimitry Andric     FP_TO_INT_IN_MEM,
7990b57cec5SDimitry Andric 
8000b57cec5SDimitry Andric     /// This instruction implements SINT_TO_FP with the
8010b57cec5SDimitry Andric     /// integer source in memory and FP reg result.  This corresponds to the
8020b57cec5SDimitry Andric     /// X86::FILD*m instructions. It has two inputs (token chain and address)
8035ffd83dbSDimitry Andric     /// and two outputs (FP value and token chain). The integer source type is
8045ffd83dbSDimitry Andric     /// specified by the memory VT.
8050b57cec5SDimitry Andric     FILD,
8060b57cec5SDimitry Andric 
8070b57cec5SDimitry Andric     /// This instruction implements a fp->int store from FP stack
8080b57cec5SDimitry Andric     /// slots. This corresponds to the fist instruction. It takes a
8090b57cec5SDimitry Andric     /// chain operand, value to store, address, and glue. The memory VT
8100b57cec5SDimitry Andric     /// specifies the type to store as.
8110b57cec5SDimitry Andric     FIST,
8120b57cec5SDimitry Andric 
8130b57cec5SDimitry Andric     /// This instruction implements an extending load to FP stack slots.
8140b57cec5SDimitry Andric     /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
8150b57cec5SDimitry Andric     /// operand, and ptr to load from. The memory VT specifies the type to
8160b57cec5SDimitry Andric     /// load from.
8170b57cec5SDimitry Andric     FLD,
8180b57cec5SDimitry Andric 
8190b57cec5SDimitry Andric     /// This instruction implements a truncating store from FP stack
8200b57cec5SDimitry Andric     /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
8210b57cec5SDimitry Andric     /// chain operand, value to store, address, and glue. The memory VT
8220b57cec5SDimitry Andric     /// specifies the type to store as.
8230b57cec5SDimitry Andric     FST,
8240b57cec5SDimitry Andric 
825e8d8bef9SDimitry Andric     /// These instructions grab the address of the next argument
8260b57cec5SDimitry Andric     /// from a va_list. (reads and modifies the va_list in memory)
8270b57cec5SDimitry Andric     VAARG_64,
828e8d8bef9SDimitry Andric     VAARG_X32,
8290b57cec5SDimitry Andric 
8300b57cec5SDimitry Andric     // Vector truncating store with unsigned/signed saturation
8315ffd83dbSDimitry Andric     VTRUNCSTOREUS,
8325ffd83dbSDimitry Andric     VTRUNCSTORES,
8330b57cec5SDimitry Andric     // Vector truncating masked store with unsigned/signed saturation
8345ffd83dbSDimitry Andric     VMTRUNCSTOREUS,
8355ffd83dbSDimitry Andric     VMTRUNCSTORES,
8360b57cec5SDimitry Andric 
8370b57cec5SDimitry Andric     // X86 specific gather and scatter
8385ffd83dbSDimitry Andric     MGATHER,
8395ffd83dbSDimitry Andric     MSCATTER,
8400b57cec5SDimitry Andric 
841e8d8bef9SDimitry Andric     // Key locker nodes that produce flags.
842e8d8bef9SDimitry Andric     AESENC128KL,
843e8d8bef9SDimitry Andric     AESDEC128KL,
844e8d8bef9SDimitry Andric     AESENC256KL,
845e8d8bef9SDimitry Andric     AESDEC256KL,
846e8d8bef9SDimitry Andric     AESENCWIDE128KL,
847e8d8bef9SDimitry Andric     AESDECWIDE128KL,
848e8d8bef9SDimitry Andric     AESENCWIDE256KL,
849e8d8bef9SDimitry Andric     AESDECWIDE256KL,
850e8d8bef9SDimitry Andric 
8510b57cec5SDimitry Andric     // WARNING: Do not add anything in the end unless you want the node to
8520b57cec5SDimitry Andric     // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
8530b57cec5SDimitry Andric     // opcodes will be thought as target memory ops!
8540b57cec5SDimitry Andric   };
8550b57cec5SDimitry Andric   } // end namespace X86ISD
8560b57cec5SDimitry Andric 
857*fe6060f1SDimitry Andric   namespace X86 {
858*fe6060f1SDimitry Andric     /// Current rounding mode is represented in bits 11:10 of FPSR. These
859*fe6060f1SDimitry Andric     /// values are same as corresponding constants for rounding mode used
860*fe6060f1SDimitry Andric     /// in glibc.
861*fe6060f1SDimitry Andric     enum RoundingMode {
862*fe6060f1SDimitry Andric       rmToNearest   = 0,        // FE_TONEAREST
863*fe6060f1SDimitry Andric       rmDownward    = 1 << 10,  // FE_DOWNWARD
864*fe6060f1SDimitry Andric       rmUpward      = 2 << 10,  // FE_UPWARD
865*fe6060f1SDimitry Andric       rmTowardZero  = 3 << 10,  // FE_TOWARDZERO
866*fe6060f1SDimitry Andric       rmMask        = 3 << 10   // Bit mask selecting rounding mode
867*fe6060f1SDimitry Andric     };
868*fe6060f1SDimitry Andric   }
869*fe6060f1SDimitry Andric 
8700b57cec5SDimitry Andric   /// Define some predicates that are used for node matching.
8710b57cec5SDimitry Andric   namespace X86 {
8720b57cec5SDimitry Andric     /// Returns true if Elt is a constant zero or floating point constant +0.0.
8730b57cec5SDimitry Andric     bool isZeroNode(SDValue Elt);
8740b57cec5SDimitry Andric 
8750b57cec5SDimitry Andric     /// Returns true of the given offset can be
8760b57cec5SDimitry Andric     /// fit into displacement field of the instruction.
8770b57cec5SDimitry Andric     bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
878e8d8bef9SDimitry Andric                                       bool hasSymbolicDisplacement);
8790b57cec5SDimitry Andric 
8800b57cec5SDimitry Andric     /// Determines whether the callee is required to pop its
8810b57cec5SDimitry Andric     /// own arguments. Callee pop is necessary to support tail calls.
8820b57cec5SDimitry Andric     bool isCalleePop(CallingConv::ID CallingConv,
8830b57cec5SDimitry Andric                      bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
8840b57cec5SDimitry Andric 
8858bcb0991SDimitry Andric     /// If Op is a constant whose elements are all the same constant or
8868bcb0991SDimitry Andric     /// undefined, return true and return the constant value in \p SplatVal.
8875ffd83dbSDimitry Andric     /// If we have undef bits that don't cover an entire element, we treat these
8885ffd83dbSDimitry Andric     /// as zero if AllowPartialUndefs is set, else we fail and return false.
8895ffd83dbSDimitry Andric     bool isConstantSplat(SDValue Op, APInt &SplatVal,
8905ffd83dbSDimitry Andric                          bool AllowPartialUndefs = true);
8910b57cec5SDimitry Andric   } // end namespace X86
8920b57cec5SDimitry Andric 
8930b57cec5SDimitry Andric   //===--------------------------------------------------------------------===//
8940b57cec5SDimitry Andric   //  X86 Implementation of the TargetLowering interface
8950b57cec5SDimitry Andric   class X86TargetLowering final : public TargetLowering {
8960b57cec5SDimitry Andric   public:
8970b57cec5SDimitry Andric     explicit X86TargetLowering(const X86TargetMachine &TM,
8980b57cec5SDimitry Andric                                const X86Subtarget &STI);
8990b57cec5SDimitry Andric 
9000b57cec5SDimitry Andric     unsigned getJumpTableEncoding() const override;
9010b57cec5SDimitry Andric     bool useSoftFloat() const override;
9020b57cec5SDimitry Andric 
9030b57cec5SDimitry Andric     void markLibCallAttributes(MachineFunction *MF, unsigned CC,
9040b57cec5SDimitry Andric                                ArgListTy &Args) const override;
9050b57cec5SDimitry Andric 
9060b57cec5SDimitry Andric     MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override {
9070b57cec5SDimitry Andric       return MVT::i8;
9080b57cec5SDimitry Andric     }
9090b57cec5SDimitry Andric 
9100b57cec5SDimitry Andric     const MCExpr *
9110b57cec5SDimitry Andric     LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
9120b57cec5SDimitry Andric                               const MachineBasicBlock *MBB, unsigned uid,
9130b57cec5SDimitry Andric                               MCContext &Ctx) const override;
9140b57cec5SDimitry Andric 
9150b57cec5SDimitry Andric     /// Returns relocation base for the given PIC jumptable.
9160b57cec5SDimitry Andric     SDValue getPICJumpTableRelocBase(SDValue Table,
9170b57cec5SDimitry Andric                                      SelectionDAG &DAG) const override;
9180b57cec5SDimitry Andric     const MCExpr *
9190b57cec5SDimitry Andric     getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
9200b57cec5SDimitry Andric                                  unsigned JTI, MCContext &Ctx) const override;
9210b57cec5SDimitry Andric 
9220b57cec5SDimitry Andric     /// Return the desired alignment for ByVal aggregate
9230b57cec5SDimitry Andric     /// function arguments in the caller parameter area. For X86, aggregates
9240b57cec5SDimitry Andric     /// that contains are placed at 16-byte boundaries while the rest are at
9250b57cec5SDimitry Andric     /// 4-byte boundaries.
9260b57cec5SDimitry Andric     unsigned getByValTypeAlignment(Type *Ty,
9270b57cec5SDimitry Andric                                    const DataLayout &DL) const override;
9280b57cec5SDimitry Andric 
9295ffd83dbSDimitry Andric     EVT getOptimalMemOpType(const MemOp &Op,
9300b57cec5SDimitry Andric                             const AttributeList &FuncAttributes) const override;
9310b57cec5SDimitry Andric 
9320b57cec5SDimitry Andric     /// Returns true if it's safe to use load / store of the
9330b57cec5SDimitry Andric     /// specified type to expand memcpy / memset inline. This is mostly true
9340b57cec5SDimitry Andric     /// for all types except for some special cases. For example, on X86
9350b57cec5SDimitry Andric     /// targets without SSE2 f64 load / store are done with fldl / fstpl which
9360b57cec5SDimitry Andric     /// also does type conversion. Note the specified type doesn't have to be
9370b57cec5SDimitry Andric     /// legal as the hook is used before type legalization.
9380b57cec5SDimitry Andric     bool isSafeMemOpType(MVT VT) const override;
9390b57cec5SDimitry Andric 
9400b57cec5SDimitry Andric     /// Returns true if the target allows unaligned memory accesses of the
9410b57cec5SDimitry Andric     /// specified type. Returns whether it is "fast" in the last argument.
942*fe6060f1SDimitry Andric     bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
9430b57cec5SDimitry Andric                                         MachineMemOperand::Flags Flags,
9440b57cec5SDimitry Andric                                         bool *Fast) const override;
9450b57cec5SDimitry Andric 
9460b57cec5SDimitry Andric     /// Provide custom lowering hooks for some operations.
9470b57cec5SDimitry Andric     ///
9480b57cec5SDimitry Andric     SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
9490b57cec5SDimitry Andric 
9500b57cec5SDimitry Andric     /// Replace the results of node with an illegal result
9510b57cec5SDimitry Andric     /// type with new values built out of custom code.
9520b57cec5SDimitry Andric     ///
9530b57cec5SDimitry Andric     void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
9540b57cec5SDimitry Andric                             SelectionDAG &DAG) const override;
9550b57cec5SDimitry Andric 
9560b57cec5SDimitry Andric     SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
9570b57cec5SDimitry Andric 
9580b57cec5SDimitry Andric     /// Return true if the target has native support for
9590b57cec5SDimitry Andric     /// the specified value type and it is 'desirable' to use the type for the
9600b57cec5SDimitry Andric     /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
9610b57cec5SDimitry Andric     /// instruction encodings are longer and some i16 instructions are slow.
9620b57cec5SDimitry Andric     bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
9630b57cec5SDimitry Andric 
9640b57cec5SDimitry Andric     /// Return true if the target has native support for the
9650b57cec5SDimitry Andric     /// specified value type and it is 'desirable' to use the type. e.g. On x86
9660b57cec5SDimitry Andric     /// i16 is legal, but undesirable since i16 instruction encodings are longer
9670b57cec5SDimitry Andric     /// and some i16 instructions are slow.
9680b57cec5SDimitry Andric     bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
9690b57cec5SDimitry Andric 
9705ffd83dbSDimitry Andric     /// Return the newly negated expression if the cost is not expensive and
9715ffd83dbSDimitry Andric     /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
9725ffd83dbSDimitry Andric     /// do the negation.
9738bcb0991SDimitry Andric     SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
9748bcb0991SDimitry Andric                                  bool LegalOperations, bool ForCodeSize,
9755ffd83dbSDimitry Andric                                  NegatibleCost &Cost,
9768bcb0991SDimitry Andric                                  unsigned Depth) const override;
9778bcb0991SDimitry Andric 
9780b57cec5SDimitry Andric     MachineBasicBlock *
9790b57cec5SDimitry Andric     EmitInstrWithCustomInserter(MachineInstr &MI,
9800b57cec5SDimitry Andric                                 MachineBasicBlock *MBB) const override;
9810b57cec5SDimitry Andric 
9820b57cec5SDimitry Andric     /// This method returns the name of a target specific DAG node.
9830b57cec5SDimitry Andric     const char *getTargetNodeName(unsigned Opcode) const override;
9840b57cec5SDimitry Andric 
9850b57cec5SDimitry Andric     /// Do not merge vector stores after legalization because that may conflict
9860b57cec5SDimitry Andric     /// with x86-specific store splitting optimizations.
9870b57cec5SDimitry Andric     bool mergeStoresAfterLegalization(EVT MemVT) const override {
9880b57cec5SDimitry Andric       return !MemVT.isVector();
9890b57cec5SDimitry Andric     }
9900b57cec5SDimitry Andric 
9910b57cec5SDimitry Andric     bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
9920b57cec5SDimitry Andric                           const SelectionDAG &DAG) const override;
9930b57cec5SDimitry Andric 
9940b57cec5SDimitry Andric     bool isCheapToSpeculateCttz() const override;
9950b57cec5SDimitry Andric 
9960b57cec5SDimitry Andric     bool isCheapToSpeculateCtlz() const override;
9970b57cec5SDimitry Andric 
9980b57cec5SDimitry Andric     bool isCtlzFast() const override;
9990b57cec5SDimitry Andric 
10000b57cec5SDimitry Andric     bool hasBitPreservingFPLogic(EVT VT) const override {
10010b57cec5SDimitry Andric       return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
10020b57cec5SDimitry Andric     }
10030b57cec5SDimitry Andric 
10040b57cec5SDimitry Andric     bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
10050b57cec5SDimitry Andric       // If the pair to store is a mixture of float and int values, we will
10060b57cec5SDimitry Andric       // save two bitwise instructions and one float-to-int instruction and
10070b57cec5SDimitry Andric       // increase one store instruction. There is potentially a more
10080b57cec5SDimitry Andric       // significant benefit because it avoids the float->int domain switch
10090b57cec5SDimitry Andric       // for input value. So It is more likely a win.
10100b57cec5SDimitry Andric       if ((LTy.isFloatingPoint() && HTy.isInteger()) ||
10110b57cec5SDimitry Andric           (LTy.isInteger() && HTy.isFloatingPoint()))
10120b57cec5SDimitry Andric         return true;
10130b57cec5SDimitry Andric       // If the pair only contains int values, we will save two bitwise
10140b57cec5SDimitry Andric       // instructions and increase one store instruction (costing one more
10150b57cec5SDimitry Andric       // store buffer). Since the benefit is more blurred so we leave
10160b57cec5SDimitry Andric       // such pair out until we get testcase to prove it is a win.
10170b57cec5SDimitry Andric       return false;
10180b57cec5SDimitry Andric     }
10190b57cec5SDimitry Andric 
10200b57cec5SDimitry Andric     bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
10210b57cec5SDimitry Andric 
10220b57cec5SDimitry Andric     bool hasAndNotCompare(SDValue Y) const override;
10230b57cec5SDimitry Andric 
10240b57cec5SDimitry Andric     bool hasAndNot(SDValue Y) const override;
10250b57cec5SDimitry Andric 
10268bcb0991SDimitry Andric     bool hasBitTest(SDValue X, SDValue Y) const override;
10278bcb0991SDimitry Andric 
10288bcb0991SDimitry Andric     bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
10298bcb0991SDimitry Andric         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
10308bcb0991SDimitry Andric         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
10318bcb0991SDimitry Andric         SelectionDAG &DAG) const override;
10328bcb0991SDimitry Andric 
10330b57cec5SDimitry Andric     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
10340b57cec5SDimitry Andric                                            CombineLevel Level) const override;
10350b57cec5SDimitry Andric 
10360b57cec5SDimitry Andric     bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override;
10370b57cec5SDimitry Andric 
10380b57cec5SDimitry Andric     bool
10390b57cec5SDimitry Andric     shouldTransformSignedTruncationCheck(EVT XVT,
10400b57cec5SDimitry Andric                                          unsigned KeptBits) const override {
10410b57cec5SDimitry Andric       // For vectors, we don't have a preference..
10420b57cec5SDimitry Andric       if (XVT.isVector())
10430b57cec5SDimitry Andric         return false;
10440b57cec5SDimitry Andric 
10450b57cec5SDimitry Andric       auto VTIsOk = [](EVT VT) -> bool {
10460b57cec5SDimitry Andric         return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
10470b57cec5SDimitry Andric                VT == MVT::i64;
10480b57cec5SDimitry Andric       };
10490b57cec5SDimitry Andric 
10500b57cec5SDimitry Andric       // We are ok with KeptBitsVT being byte/word/dword, what MOVS supports.
10510b57cec5SDimitry Andric       // XVT will be larger than KeptBitsVT.
10520b57cec5SDimitry Andric       MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
10530b57cec5SDimitry Andric       return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
10540b57cec5SDimitry Andric     }
10550b57cec5SDimitry Andric 
10560b57cec5SDimitry Andric     bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
10570b57cec5SDimitry Andric 
10580b57cec5SDimitry Andric     bool shouldSplatInsEltVarIndex(EVT VT) const override;
10590b57cec5SDimitry Andric 
10600b57cec5SDimitry Andric     bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
10610b57cec5SDimitry Andric       return VT.isScalarInteger();
10620b57cec5SDimitry Andric     }
10630b57cec5SDimitry Andric 
10640b57cec5SDimitry Andric     /// Vector-sized comparisons are fast using PCMPEQ + PMOVMSK or PTEST.
10650b57cec5SDimitry Andric     MVT hasFastEqualityCompare(unsigned NumBits) const override;
10660b57cec5SDimitry Andric 
10670b57cec5SDimitry Andric     /// Return the value type to use for ISD::SETCC.
10680b57cec5SDimitry Andric     EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
10690b57cec5SDimitry Andric                            EVT VT) const override;
10700b57cec5SDimitry Andric 
10715ffd83dbSDimitry Andric     bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
10725ffd83dbSDimitry Andric                                       const APInt &DemandedElts,
10730b57cec5SDimitry Andric                                       TargetLoweringOpt &TLO) const override;
10740b57cec5SDimitry Andric 
10750b57cec5SDimitry Andric     /// Determine which of the bits specified in Mask are known to be either
10760b57cec5SDimitry Andric     /// zero or one and return them in the KnownZero/KnownOne bitsets.
10770b57cec5SDimitry Andric     void computeKnownBitsForTargetNode(const SDValue Op,
10780b57cec5SDimitry Andric                                        KnownBits &Known,
10790b57cec5SDimitry Andric                                        const APInt &DemandedElts,
10800b57cec5SDimitry Andric                                        const SelectionDAG &DAG,
10810b57cec5SDimitry Andric                                        unsigned Depth = 0) const override;
10820b57cec5SDimitry Andric 
10830b57cec5SDimitry Andric     /// Determine the number of bits in the operation that are sign bits.
10840b57cec5SDimitry Andric     unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
10850b57cec5SDimitry Andric                                              const APInt &DemandedElts,
10860b57cec5SDimitry Andric                                              const SelectionDAG &DAG,
10870b57cec5SDimitry Andric                                              unsigned Depth) const override;
10880b57cec5SDimitry Andric 
10890b57cec5SDimitry Andric     bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op,
10900b57cec5SDimitry Andric                                                  const APInt &DemandedElts,
10910b57cec5SDimitry Andric                                                  APInt &KnownUndef,
10920b57cec5SDimitry Andric                                                  APInt &KnownZero,
10930b57cec5SDimitry Andric                                                  TargetLoweringOpt &TLO,
10940b57cec5SDimitry Andric                                                  unsigned Depth) const override;
10950b57cec5SDimitry Andric 
10965ffd83dbSDimitry Andric     bool SimplifyDemandedVectorEltsForTargetShuffle(SDValue Op,
10975ffd83dbSDimitry Andric                                                     const APInt &DemandedElts,
10985ffd83dbSDimitry Andric                                                     unsigned MaskIndex,
10995ffd83dbSDimitry Andric                                                     TargetLoweringOpt &TLO,
11005ffd83dbSDimitry Andric                                                     unsigned Depth) const;
11015ffd83dbSDimitry Andric 
11020b57cec5SDimitry Andric     bool SimplifyDemandedBitsForTargetNode(SDValue Op,
11030b57cec5SDimitry Andric                                            const APInt &DemandedBits,
11040b57cec5SDimitry Andric                                            const APInt &DemandedElts,
11050b57cec5SDimitry Andric                                            KnownBits &Known,
11060b57cec5SDimitry Andric                                            TargetLoweringOpt &TLO,
11070b57cec5SDimitry Andric                                            unsigned Depth) const override;
11080b57cec5SDimitry Andric 
11098bcb0991SDimitry Andric     SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
11108bcb0991SDimitry Andric         SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
11118bcb0991SDimitry Andric         SelectionDAG &DAG, unsigned Depth) const override;
11128bcb0991SDimitry Andric 
11130b57cec5SDimitry Andric     const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
11140b57cec5SDimitry Andric 
11150b57cec5SDimitry Andric     SDValue unwrapAddress(SDValue N) const override;
11160b57cec5SDimitry Andric 
11170b57cec5SDimitry Andric     SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
11180b57cec5SDimitry Andric 
11190b57cec5SDimitry Andric     bool ExpandInlineAsm(CallInst *CI) const override;
11200b57cec5SDimitry Andric 
11210b57cec5SDimitry Andric     ConstraintType getConstraintType(StringRef Constraint) const override;
11220b57cec5SDimitry Andric 
11230b57cec5SDimitry Andric     /// Examine constraint string and operand type and determine a weight value.
11240b57cec5SDimitry Andric     /// The operand object must already have been set up with the operand type.
11250b57cec5SDimitry Andric     ConstraintWeight
11260b57cec5SDimitry Andric       getSingleConstraintMatchWeight(AsmOperandInfo &info,
11270b57cec5SDimitry Andric                                      const char *constraint) const override;
11280b57cec5SDimitry Andric 
11290b57cec5SDimitry Andric     const char *LowerXConstraint(EVT ConstraintVT) const override;
11300b57cec5SDimitry Andric 
11310b57cec5SDimitry Andric     /// Lower the specified operand into the Ops vector. If it is invalid, don't
11320b57cec5SDimitry Andric     /// add anything to Ops. If hasMemory is true it means one of the asm
11330b57cec5SDimitry Andric     /// constraint of the inline asm instruction being processed is 'm'.
11340b57cec5SDimitry Andric     void LowerAsmOperandForConstraint(SDValue Op,
11350b57cec5SDimitry Andric                                       std::string &Constraint,
11360b57cec5SDimitry Andric                                       std::vector<SDValue> &Ops,
11370b57cec5SDimitry Andric                                       SelectionDAG &DAG) const override;
11380b57cec5SDimitry Andric 
11390b57cec5SDimitry Andric     unsigned
11400b57cec5SDimitry Andric     getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
1141*fe6060f1SDimitry Andric       if (ConstraintCode == "v")
11420b57cec5SDimitry Andric         return InlineAsm::Constraint_v;
11430b57cec5SDimitry Andric       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11440b57cec5SDimitry Andric     }
11450b57cec5SDimitry Andric 
11460b57cec5SDimitry Andric     /// Handle Lowering flag assembly outputs.
1147e8d8bef9SDimitry Andric     SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
1148e8d8bef9SDimitry Andric                                         const SDLoc &DL,
11490b57cec5SDimitry Andric                                         const AsmOperandInfo &Constraint,
11500b57cec5SDimitry Andric                                         SelectionDAG &DAG) const override;
11510b57cec5SDimitry Andric 
11520b57cec5SDimitry Andric     /// Given a physical register constraint
11530b57cec5SDimitry Andric     /// (e.g. {edx}), return the register number and the register class for the
11540b57cec5SDimitry Andric     /// register.  This should only be used for C_Register constraints.  On
11550b57cec5SDimitry Andric     /// error, this returns a register number of 0.
11560b57cec5SDimitry Andric     std::pair<unsigned, const TargetRegisterClass *>
11570b57cec5SDimitry Andric     getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11580b57cec5SDimitry Andric                                  StringRef Constraint, MVT VT) const override;
11590b57cec5SDimitry Andric 
11600b57cec5SDimitry Andric     /// Return true if the addressing mode represented
11610b57cec5SDimitry Andric     /// by AM is legal for this target, for a load/store of the specified type.
11620b57cec5SDimitry Andric     bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
11630b57cec5SDimitry Andric                                Type *Ty, unsigned AS,
11640b57cec5SDimitry Andric                                Instruction *I = nullptr) const override;
11650b57cec5SDimitry Andric 
11660b57cec5SDimitry Andric     /// Return true if the specified immediate is legal
11670b57cec5SDimitry Andric     /// icmp immediate, that is the target has icmp instructions which can
11680b57cec5SDimitry Andric     /// compare a register against the immediate without having to materialize
11690b57cec5SDimitry Andric     /// the immediate into a register.
11700b57cec5SDimitry Andric     bool isLegalICmpImmediate(int64_t Imm) const override;
11710b57cec5SDimitry Andric 
11720b57cec5SDimitry Andric     /// Return true if the specified immediate is legal
11730b57cec5SDimitry Andric     /// add immediate, that is the target has add instructions which can
11740b57cec5SDimitry Andric     /// add a register and the immediate without having to materialize
11750b57cec5SDimitry Andric     /// the immediate into a register.
11760b57cec5SDimitry Andric     bool isLegalAddImmediate(int64_t Imm) const override;
11770b57cec5SDimitry Andric 
11780b57cec5SDimitry Andric     bool isLegalStoreImmediate(int64_t Imm) const override;
11790b57cec5SDimitry Andric 
11800b57cec5SDimitry Andric     /// Return the cost of the scaling factor used in the addressing
11810b57cec5SDimitry Andric     /// mode represented by AM for this target, for a load/store
11820b57cec5SDimitry Andric     /// of the specified type.
11830b57cec5SDimitry Andric     /// If the AM is supported, the return value must be >= 0.
11840b57cec5SDimitry Andric     /// If the AM is not supported, it returns a negative value.
1185*fe6060f1SDimitry Andric     InstructionCost getScalingFactorCost(const DataLayout &DL,
1186*fe6060f1SDimitry Andric                                          const AddrMode &AM, Type *Ty,
11870b57cec5SDimitry Andric                                          unsigned AS) const override;
11880b57cec5SDimitry Andric 
11895ffd83dbSDimitry Andric     /// This is used to enable splatted operand transforms for vector shifts
11905ffd83dbSDimitry Andric     /// and vector funnel shifts.
11910b57cec5SDimitry Andric     bool isVectorShiftByScalarCheap(Type *Ty) const override;
11920b57cec5SDimitry Andric 
11930b57cec5SDimitry Andric     /// Add x86-specific opcodes to the default list.
11940b57cec5SDimitry Andric     bool isBinOp(unsigned Opcode) const override;
11950b57cec5SDimitry Andric 
11960b57cec5SDimitry Andric     /// Returns true if the opcode is a commutative binary operation.
11970b57cec5SDimitry Andric     bool isCommutativeBinOp(unsigned Opcode) const override;
11980b57cec5SDimitry Andric 
11990b57cec5SDimitry Andric     /// Return true if it's free to truncate a value of
12000b57cec5SDimitry Andric     /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
12010b57cec5SDimitry Andric     /// register EAX to i16 by referencing its sub-register AX.
12020b57cec5SDimitry Andric     bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
12030b57cec5SDimitry Andric     bool isTruncateFree(EVT VT1, EVT VT2) const override;
12040b57cec5SDimitry Andric 
12050b57cec5SDimitry Andric     bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
12060b57cec5SDimitry Andric 
12070b57cec5SDimitry Andric     /// Return true if any actual instruction that defines a
12080b57cec5SDimitry Andric     /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
12090b57cec5SDimitry Andric     /// register. This does not necessarily include registers defined in
12100b57cec5SDimitry Andric     /// unknown ways, such as incoming arguments, or copies from unknown
12110b57cec5SDimitry Andric     /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
12120b57cec5SDimitry Andric     /// does not necessarily apply to truncate instructions. e.g. on x86-64,
12130b57cec5SDimitry Andric     /// all instructions that define 32-bit values implicit zero-extend the
12140b57cec5SDimitry Andric     /// result out to 64 bits.
12150b57cec5SDimitry Andric     bool isZExtFree(Type *Ty1, Type *Ty2) const override;
12160b57cec5SDimitry Andric     bool isZExtFree(EVT VT1, EVT VT2) const override;
12170b57cec5SDimitry Andric     bool isZExtFree(SDValue Val, EVT VT2) const override;
12180b57cec5SDimitry Andric 
12195ffd83dbSDimitry Andric     bool shouldSinkOperands(Instruction *I,
12205ffd83dbSDimitry Andric                             SmallVectorImpl<Use *> &Ops) const override;
12215ffd83dbSDimitry Andric     bool shouldConvertPhiType(Type *From, Type *To) const override;
12225ffd83dbSDimitry Andric 
12230b57cec5SDimitry Andric     /// Return true if folding a vector load into ExtVal (a sign, zero, or any
12240b57cec5SDimitry Andric     /// extend node) is profitable.
12250b57cec5SDimitry Andric     bool isVectorLoadExtDesirable(SDValue) const override;
12260b57cec5SDimitry Andric 
12270b57cec5SDimitry Andric     /// Return true if an FMA operation is faster than a pair of fmul and fadd
12280b57cec5SDimitry Andric     /// instructions. fmuladd intrinsics will be expanded to FMAs when this
12290b57cec5SDimitry Andric     /// method returns true, otherwise fmuladd is expanded to fmul + fadd.
1230480093f4SDimitry Andric     bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
1231480093f4SDimitry Andric                                     EVT VT) const override;
12320b57cec5SDimitry Andric 
12330b57cec5SDimitry Andric     /// Return true if it's profitable to narrow
12340b57cec5SDimitry Andric     /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
12350b57cec5SDimitry Andric     /// from i32 to i8 but not from i32 to i16.
12360b57cec5SDimitry Andric     bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
12370b57cec5SDimitry Andric 
12380b57cec5SDimitry Andric     /// Given an intrinsic, checks if on the target the intrinsic will need to map
12390b57cec5SDimitry Andric     /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
12400b57cec5SDimitry Andric     /// true and stores the intrinsic information into the IntrinsicInfo that was
12410b57cec5SDimitry Andric     /// passed to the function.
12420b57cec5SDimitry Andric     bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
12430b57cec5SDimitry Andric                             MachineFunction &MF,
12440b57cec5SDimitry Andric                             unsigned Intrinsic) const override;
12450b57cec5SDimitry Andric 
12460b57cec5SDimitry Andric     /// Returns true if the target can instruction select the
12470b57cec5SDimitry Andric     /// specified FP immediate natively. If false, the legalizer will
12480b57cec5SDimitry Andric     /// materialize the FP immediate as a load from a constant pool.
12490b57cec5SDimitry Andric     bool isFPImmLegal(const APFloat &Imm, EVT VT,
12500b57cec5SDimitry Andric                       bool ForCodeSize) const override;
12510b57cec5SDimitry Andric 
12520b57cec5SDimitry Andric     /// Targets can use this to indicate that they only support *some*
12530b57cec5SDimitry Andric     /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
12540b57cec5SDimitry Andric     /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to
12550b57cec5SDimitry Andric     /// be legal.
12560b57cec5SDimitry Andric     bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
12570b57cec5SDimitry Andric 
12580b57cec5SDimitry Andric     /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
12590b57cec5SDimitry Andric     /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
12600b57cec5SDimitry Andric     /// constant pool entry.
12610b57cec5SDimitry Andric     bool isVectorClearMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
12620b57cec5SDimitry Andric 
12630b57cec5SDimitry Andric     /// Returns true if lowering to a jump table is allowed.
12640b57cec5SDimitry Andric     bool areJTsAllowed(const Function *Fn) const override;
12650b57cec5SDimitry Andric 
12660b57cec5SDimitry Andric     /// If true, then instruction selection should
12670b57cec5SDimitry Andric     /// seek to shrink the FP constant of the specified type to a smaller type
12680b57cec5SDimitry Andric     /// in order to save space and / or reduce runtime.
12690b57cec5SDimitry Andric     bool ShouldShrinkFPConstant(EVT VT) const override {
12700b57cec5SDimitry Andric       // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
12710b57cec5SDimitry Andric       // expensive than a straight movsd. On the other hand, it's important to
12720b57cec5SDimitry Andric       // shrink long double fp constant since fldt is very slow.
12730b57cec5SDimitry Andric       return !X86ScalarSSEf64 || VT == MVT::f80;
12740b57cec5SDimitry Andric     }
12750b57cec5SDimitry Andric 
12760b57cec5SDimitry Andric     /// Return true if we believe it is correct and profitable to reduce the
12770b57cec5SDimitry Andric     /// load node to a smaller type.
12780b57cec5SDimitry Andric     bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
12790b57cec5SDimitry Andric                                EVT NewVT) const override;
12800b57cec5SDimitry Andric 
12810b57cec5SDimitry Andric     /// Return true if the specified scalar FP type is computed in an SSE
12820b57cec5SDimitry Andric     /// register, not on the X87 floating point stack.
12830b57cec5SDimitry Andric     bool isScalarFPTypeInSSEReg(EVT VT) const {
12840b57cec5SDimitry Andric       return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
12850b57cec5SDimitry Andric              (VT == MVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
12860b57cec5SDimitry Andric     }
12870b57cec5SDimitry Andric 
12880b57cec5SDimitry Andric     /// Returns true if it is beneficial to convert a load of a constant
12890b57cec5SDimitry Andric     /// to just the constant itself.
12900b57cec5SDimitry Andric     bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
12910b57cec5SDimitry Andric                                            Type *Ty) const override;
12920b57cec5SDimitry Andric 
12938bcb0991SDimitry Andric     bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const override;
12940b57cec5SDimitry Andric 
12950b57cec5SDimitry Andric     bool convertSelectOfConstantsToMath(EVT VT) const override;
12960b57cec5SDimitry Andric 
12978bcb0991SDimitry Andric     bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
12988bcb0991SDimitry Andric                                 SDValue C) const override;
12990b57cec5SDimitry Andric 
13000b57cec5SDimitry Andric     /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
13010b57cec5SDimitry Andric     /// with this index.
13020b57cec5SDimitry Andric     bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
13030b57cec5SDimitry Andric                                  unsigned Index) const override;
13040b57cec5SDimitry Andric 
13050b57cec5SDimitry Andric     /// Scalar ops always have equal or better analysis/performance/power than
13060b57cec5SDimitry Andric     /// the vector equivalent, so this always makes sense if the scalar op is
13070b57cec5SDimitry Andric     /// supported.
13080b57cec5SDimitry Andric     bool shouldScalarizeBinop(SDValue) const override;
13090b57cec5SDimitry Andric 
13100b57cec5SDimitry Andric     /// Extract of a scalar FP value from index 0 of a vector is free.
13110b57cec5SDimitry Andric     bool isExtractVecEltCheap(EVT VT, unsigned Index) const override {
13120b57cec5SDimitry Andric       EVT EltVT = VT.getScalarType();
13130b57cec5SDimitry Andric       return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0;
13140b57cec5SDimitry Andric     }
13150b57cec5SDimitry Andric 
13160b57cec5SDimitry Andric     /// Overflow nodes should get combined/lowered to optimal instructions
13170b57cec5SDimitry Andric     /// (they should allow eliminating explicit compares by getting flags from
13180b57cec5SDimitry Andric     /// math ops).
13195ffd83dbSDimitry Andric     bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
13205ffd83dbSDimitry Andric                               bool MathUsed) const override;
13210b57cec5SDimitry Andric 
13220b57cec5SDimitry Andric     bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem,
13230b57cec5SDimitry Andric                                       unsigned AddrSpace) const override {
13240b57cec5SDimitry Andric       // If we can replace more than 2 scalar stores, there will be a reduction
13250b57cec5SDimitry Andric       // in instructions even after we add a vector constant load.
13260b57cec5SDimitry Andric       return NumElem > 2;
13270b57cec5SDimitry Andric     }
13280b57cec5SDimitry Andric 
13290b57cec5SDimitry Andric     bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
13300b57cec5SDimitry Andric                                  const SelectionDAG &DAG,
13310b57cec5SDimitry Andric                                  const MachineMemOperand &MMO) const override;
13320b57cec5SDimitry Andric 
13330b57cec5SDimitry Andric     /// Intel processors have a unified instruction and data cache
13340b57cec5SDimitry Andric     const char * getClearCacheBuiltinName() const override {
13350b57cec5SDimitry Andric       return nullptr; // nothing to do, move along.
13360b57cec5SDimitry Andric     }
13370b57cec5SDimitry Andric 
1338480093f4SDimitry Andric     Register getRegisterByName(const char* RegName, LLT VT,
13398bcb0991SDimitry Andric                                const MachineFunction &MF) const override;
13400b57cec5SDimitry Andric 
13410b57cec5SDimitry Andric     /// If a physical register, this returns the register that receives the
13420b57cec5SDimitry Andric     /// exception address on entry to an EH pad.
13435ffd83dbSDimitry Andric     Register
13440b57cec5SDimitry Andric     getExceptionPointerRegister(const Constant *PersonalityFn) const override;
13450b57cec5SDimitry Andric 
13460b57cec5SDimitry Andric     /// If a physical register, this returns the register that receives the
13470b57cec5SDimitry Andric     /// exception typeid on entry to a landing pad.
13485ffd83dbSDimitry Andric     Register
13490b57cec5SDimitry Andric     getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
13500b57cec5SDimitry Andric 
13510b57cec5SDimitry Andric     virtual bool needsFixedCatchObjects() const override;
13520b57cec5SDimitry Andric 
13530b57cec5SDimitry Andric     /// This method returns a target specific FastISel object,
13540b57cec5SDimitry Andric     /// or null if the target does not support "fast" ISel.
13550b57cec5SDimitry Andric     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
13560b57cec5SDimitry Andric                              const TargetLibraryInfo *libInfo) const override;
13570b57cec5SDimitry Andric 
13580b57cec5SDimitry Andric     /// If the target has a standard location for the stack protector cookie,
13590b57cec5SDimitry Andric     /// returns the address of that location. Otherwise, returns nullptr.
1360*fe6060f1SDimitry Andric     Value *getIRStackGuard(IRBuilderBase &IRB) const override;
13610b57cec5SDimitry Andric 
13620b57cec5SDimitry Andric     bool useLoadStackGuardNode() const override;
13630b57cec5SDimitry Andric     bool useStackGuardXorFP() const override;
13640b57cec5SDimitry Andric     void insertSSPDeclarations(Module &M) const override;
13650b57cec5SDimitry Andric     Value *getSDagStackGuard(const Module &M) const override;
13660b57cec5SDimitry Andric     Function *getSSPStackGuardCheck(const Module &M) const override;
13670b57cec5SDimitry Andric     SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
13680b57cec5SDimitry Andric                                 const SDLoc &DL) const override;
13690b57cec5SDimitry Andric 
13700b57cec5SDimitry Andric 
13710b57cec5SDimitry Andric     /// Return true if the target stores SafeStack pointer at a fixed offset in
13720b57cec5SDimitry Andric     /// some non-standard address space, and populates the address space and
13730b57cec5SDimitry Andric     /// offset as appropriate.
1374*fe6060f1SDimitry Andric     Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override;
13750b57cec5SDimitry Andric 
13765ffd83dbSDimitry Andric     std::pair<SDValue, SDValue> BuildFILD(EVT DstVT, EVT SrcVT, const SDLoc &DL,
13775ffd83dbSDimitry Andric                                           SDValue Chain, SDValue Pointer,
13785ffd83dbSDimitry Andric                                           MachinePointerInfo PtrInfo,
13795ffd83dbSDimitry Andric                                           Align Alignment,
13800b57cec5SDimitry Andric                                           SelectionDAG &DAG) const;
13810b57cec5SDimitry Andric 
13820b57cec5SDimitry Andric     /// Customize the preferred legalization strategy for certain types.
13830b57cec5SDimitry Andric     LegalizeTypeAction getPreferredVectorAction(MVT VT) const override;
13840b57cec5SDimitry Andric 
13855ffd83dbSDimitry Andric     bool softPromoteHalfType() const override { return true; }
13865ffd83dbSDimitry Andric 
13870b57cec5SDimitry Andric     MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
13880b57cec5SDimitry Andric                                       EVT VT) const override;
13890b57cec5SDimitry Andric 
13900b57cec5SDimitry Andric     unsigned getNumRegistersForCallingConv(LLVMContext &Context,
13910b57cec5SDimitry Andric                                            CallingConv::ID CC,
13920b57cec5SDimitry Andric                                            EVT VT) const override;
13930b57cec5SDimitry Andric 
13948bcb0991SDimitry Andric     unsigned getVectorTypeBreakdownForCallingConv(
13958bcb0991SDimitry Andric         LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
13968bcb0991SDimitry Andric         unsigned &NumIntermediates, MVT &RegisterVT) const override;
13978bcb0991SDimitry Andric 
13980b57cec5SDimitry Andric     bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
13990b57cec5SDimitry Andric 
14000b57cec5SDimitry Andric     bool supportSwiftError() const override;
14010b57cec5SDimitry Andric 
14025ffd83dbSDimitry Andric     bool hasStackProbeSymbol(MachineFunction &MF) const override;
14035ffd83dbSDimitry Andric     bool hasInlineStackProbe(MachineFunction &MF) const override;
14040b57cec5SDimitry Andric     StringRef getStackProbeSymbolName(MachineFunction &MF) const override;
14050b57cec5SDimitry Andric 
14068bcb0991SDimitry Andric     unsigned getStackProbeSize(MachineFunction &MF) const;
14078bcb0991SDimitry Andric 
14080b57cec5SDimitry Andric     bool hasVectorBlend() const override { return true; }
14090b57cec5SDimitry Andric 
14100b57cec5SDimitry Andric     unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
14110b57cec5SDimitry Andric 
14120b57cec5SDimitry Andric     /// Lower interleaved load(s) into target specific
14130b57cec5SDimitry Andric     /// instructions/intrinsics.
14140b57cec5SDimitry Andric     bool lowerInterleavedLoad(LoadInst *LI,
14150b57cec5SDimitry Andric                               ArrayRef<ShuffleVectorInst *> Shuffles,
14160b57cec5SDimitry Andric                               ArrayRef<unsigned> Indices,
14170b57cec5SDimitry Andric                               unsigned Factor) const override;
14180b57cec5SDimitry Andric 
14190b57cec5SDimitry Andric     /// Lower interleaved store(s) into target specific
14200b57cec5SDimitry Andric     /// instructions/intrinsics.
14210b57cec5SDimitry Andric     bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
14220b57cec5SDimitry Andric                                unsigned Factor) const override;
14230b57cec5SDimitry Andric 
14240b57cec5SDimitry Andric     SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value,
14250b57cec5SDimitry Andric                                    SDValue Addr, SelectionDAG &DAG)
14260b57cec5SDimitry Andric                                    const override;
14270b57cec5SDimitry Andric 
1428e8d8bef9SDimitry Andric     Align getPrefLoopAlignment(MachineLoop *ML) const override;
1429e8d8bef9SDimitry Andric 
14300b57cec5SDimitry Andric   protected:
14310b57cec5SDimitry Andric     std::pair<const TargetRegisterClass *, uint8_t>
14320b57cec5SDimitry Andric     findRepresentativeClass(const TargetRegisterInfo *TRI,
14330b57cec5SDimitry Andric                             MVT VT) const override;
14340b57cec5SDimitry Andric 
14350b57cec5SDimitry Andric   private:
14360b57cec5SDimitry Andric     /// Keep a reference to the X86Subtarget around so that we can
14370b57cec5SDimitry Andric     /// make the right decision when generating code for different targets.
14380b57cec5SDimitry Andric     const X86Subtarget &Subtarget;
14390b57cec5SDimitry Andric 
14400b57cec5SDimitry Andric     /// Select between SSE or x87 floating point ops.
14410b57cec5SDimitry Andric     /// When SSE is available, use it for f32 operations.
14420b57cec5SDimitry Andric     /// When SSE2 is available, use it for f64 operations.
14430b57cec5SDimitry Andric     bool X86ScalarSSEf32;
14440b57cec5SDimitry Andric     bool X86ScalarSSEf64;
14450b57cec5SDimitry Andric 
14460b57cec5SDimitry Andric     /// A list of legal FP immediates.
14470b57cec5SDimitry Andric     std::vector<APFloat> LegalFPImmediates;
14480b57cec5SDimitry Andric 
14490b57cec5SDimitry Andric     /// Indicate that this x86 target can instruction
14500b57cec5SDimitry Andric     /// select the specified FP immediate natively.
14510b57cec5SDimitry Andric     void addLegalFPImmediate(const APFloat& Imm) {
14520b57cec5SDimitry Andric       LegalFPImmediates.push_back(Imm);
14530b57cec5SDimitry Andric     }
14540b57cec5SDimitry Andric 
14550b57cec5SDimitry Andric     SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
14560b57cec5SDimitry Andric                             CallingConv::ID CallConv, bool isVarArg,
14570b57cec5SDimitry Andric                             const SmallVectorImpl<ISD::InputArg> &Ins,
14580b57cec5SDimitry Andric                             const SDLoc &dl, SelectionDAG &DAG,
14590b57cec5SDimitry Andric                             SmallVectorImpl<SDValue> &InVals,
14600b57cec5SDimitry Andric                             uint32_t *RegMask) const;
14610b57cec5SDimitry Andric     SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
14620b57cec5SDimitry Andric                              const SmallVectorImpl<ISD::InputArg> &ArgInfo,
14630b57cec5SDimitry Andric                              const SDLoc &dl, SelectionDAG &DAG,
14640b57cec5SDimitry Andric                              const CCValAssign &VA, MachineFrameInfo &MFI,
14650b57cec5SDimitry Andric                              unsigned i) const;
14660b57cec5SDimitry Andric     SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
14670b57cec5SDimitry Andric                              const SDLoc &dl, SelectionDAG &DAG,
14680b57cec5SDimitry Andric                              const CCValAssign &VA,
14695ffd83dbSDimitry Andric                              ISD::ArgFlagsTy Flags, bool isByval) const;
14700b57cec5SDimitry Andric 
14710b57cec5SDimitry Andric     // Call lowering helpers.
14720b57cec5SDimitry Andric 
14730b57cec5SDimitry Andric     /// Check whether the call is eligible for tail call optimization. Targets
14740b57cec5SDimitry Andric     /// that want to do tail call optimization should implement this function.
14750b57cec5SDimitry Andric     bool IsEligibleForTailCallOptimization(SDValue Callee,
14760b57cec5SDimitry Andric                                            CallingConv::ID CalleeCC,
14770b57cec5SDimitry Andric                                            bool isVarArg,
14780b57cec5SDimitry Andric                                            bool isCalleeStructRet,
14790b57cec5SDimitry Andric                                            bool isCallerStructRet,
14800b57cec5SDimitry Andric                                            Type *RetTy,
14810b57cec5SDimitry Andric                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
14820b57cec5SDimitry Andric                                     const SmallVectorImpl<SDValue> &OutVals,
14830b57cec5SDimitry Andric                                     const SmallVectorImpl<ISD::InputArg> &Ins,
14840b57cec5SDimitry Andric                                            SelectionDAG& DAG) const;
14850b57cec5SDimitry Andric     SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
14860b57cec5SDimitry Andric                                     SDValue Chain, bool IsTailCall,
14870b57cec5SDimitry Andric                                     bool Is64Bit, int FPDiff,
14880b57cec5SDimitry Andric                                     const SDLoc &dl) const;
14890b57cec5SDimitry Andric 
14900b57cec5SDimitry Andric     unsigned GetAlignedArgumentStackSize(unsigned StackSize,
14910b57cec5SDimitry Andric                                          SelectionDAG &DAG) const;
14920b57cec5SDimitry Andric 
14930b57cec5SDimitry Andric     unsigned getAddressSpace(void) const;
14940b57cec5SDimitry Andric 
14955ffd83dbSDimitry Andric     SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned,
1496480093f4SDimitry Andric                             SDValue &Chain) const;
14975ffd83dbSDimitry Andric     SDValue LRINT_LLRINTHelper(SDNode *N, SelectionDAG &DAG) const;
14980b57cec5SDimitry Andric 
14990b57cec5SDimitry Andric     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
15000b57cec5SDimitry Andric     SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
15010b57cec5SDimitry Andric     SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
15020b57cec5SDimitry Andric     SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
15030b57cec5SDimitry Andric 
15040b57cec5SDimitry Andric     unsigned getGlobalWrapperKind(const GlobalValue *GV = nullptr,
15050b57cec5SDimitry Andric                                   const unsigned char OpFlags = 0) const;
15060b57cec5SDimitry Andric     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
15070b57cec5SDimitry Andric     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
15080b57cec5SDimitry Andric     SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
15090b57cec5SDimitry Andric     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
15100b57cec5SDimitry Andric     SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
15110b57cec5SDimitry Andric 
15120b57cec5SDimitry Andric     /// Creates target global address or external symbol nodes for calls or
15130b57cec5SDimitry Andric     /// other uses.
15140b57cec5SDimitry Andric     SDValue LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
15150b57cec5SDimitry Andric                                   bool ForCall) const;
15160b57cec5SDimitry Andric 
15170b57cec5SDimitry Andric     SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
15180b57cec5SDimitry Andric     SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
15190b57cec5SDimitry Andric     SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
15200b57cec5SDimitry Andric     SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1521e8d8bef9SDimitry Andric     SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
15225ffd83dbSDimitry Andric     SDValue LowerLRINT_LLRINT(SDValue Op, SelectionDAG &DAG) const;
15230b57cec5SDimitry Andric     SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
15240b57cec5SDimitry Andric     SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
15250b57cec5SDimitry Andric     SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
15260b57cec5SDimitry Andric     SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
15270b57cec5SDimitry Andric     SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
15280b57cec5SDimitry Andric     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
15290b57cec5SDimitry Andric     SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
15300b57cec5SDimitry Andric     SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
15310b57cec5SDimitry Andric     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
15320b57cec5SDimitry Andric     SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
15330b57cec5SDimitry Andric     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
15340b57cec5SDimitry Andric     SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
15350b57cec5SDimitry Andric     SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
15360b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
15370b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
15380b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
15390b57cec5SDimitry Andric     SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
15400b57cec5SDimitry Andric     SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1541*fe6060f1SDimitry Andric     SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
15420b57cec5SDimitry Andric     SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
1543480093f4SDimitry Andric     SDValue LowerGC_TRANSITION(SDValue Op, SelectionDAG &DAG) const;
15440b57cec5SDimitry Andric     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
15458bcb0991SDimitry Andric     SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const;
15468bcb0991SDimitry Andric     SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
15478bcb0991SDimitry Andric     SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
15488bcb0991SDimitry Andric 
15490b57cec5SDimitry Andric     SDValue
15500b57cec5SDimitry Andric     LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
15510b57cec5SDimitry Andric                          const SmallVectorImpl<ISD::InputArg> &Ins,
15520b57cec5SDimitry Andric                          const SDLoc &dl, SelectionDAG &DAG,
15530b57cec5SDimitry Andric                          SmallVectorImpl<SDValue> &InVals) const override;
15540b57cec5SDimitry Andric     SDValue LowerCall(CallLoweringInfo &CLI,
15550b57cec5SDimitry Andric                       SmallVectorImpl<SDValue> &InVals) const override;
15560b57cec5SDimitry Andric 
15570b57cec5SDimitry Andric     SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
15580b57cec5SDimitry Andric                         const SmallVectorImpl<ISD::OutputArg> &Outs,
15590b57cec5SDimitry Andric                         const SmallVectorImpl<SDValue> &OutVals,
15600b57cec5SDimitry Andric                         const SDLoc &dl, SelectionDAG &DAG) const override;
15610b57cec5SDimitry Andric 
15620b57cec5SDimitry Andric     bool supportSplitCSR(MachineFunction *MF) const override {
15630b57cec5SDimitry Andric       return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
15640b57cec5SDimitry Andric           MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
15650b57cec5SDimitry Andric     }
15660b57cec5SDimitry Andric     void initializeSplitCSR(MachineBasicBlock *Entry) const override;
15670b57cec5SDimitry Andric     void insertCopiesSplitCSR(
15680b57cec5SDimitry Andric       MachineBasicBlock *Entry,
15690b57cec5SDimitry Andric       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
15700b57cec5SDimitry Andric 
15710b57cec5SDimitry Andric     bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
15720b57cec5SDimitry Andric 
15730b57cec5SDimitry Andric     bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
15740b57cec5SDimitry Andric 
15750b57cec5SDimitry Andric     EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
15760b57cec5SDimitry Andric                             ISD::NodeType ExtendKind) const override;
15770b57cec5SDimitry Andric 
15780b57cec5SDimitry Andric     bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
15790b57cec5SDimitry Andric                         bool isVarArg,
15800b57cec5SDimitry Andric                         const SmallVectorImpl<ISD::OutputArg> &Outs,
15810b57cec5SDimitry Andric                         LLVMContext &Context) const override;
15820b57cec5SDimitry Andric 
15830b57cec5SDimitry Andric     const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
15840b57cec5SDimitry Andric 
15850b57cec5SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
15865ffd83dbSDimitry Andric     shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
15870b57cec5SDimitry Andric     bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
15880b57cec5SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
15890b57cec5SDimitry Andric     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
15900b57cec5SDimitry Andric 
15910b57cec5SDimitry Andric     LoadInst *
15920b57cec5SDimitry Andric     lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
15930b57cec5SDimitry Andric 
15948bcb0991SDimitry Andric     bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const override;
15958bcb0991SDimitry Andric     bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const override;
15968bcb0991SDimitry Andric 
15970b57cec5SDimitry Andric     bool needsCmpXchgNb(Type *MemType) const;
15980b57cec5SDimitry Andric 
15990b57cec5SDimitry Andric     void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
16000b57cec5SDimitry Andric                                 MachineBasicBlock *DispatchBB, int FI) const;
16010b57cec5SDimitry Andric 
16020b57cec5SDimitry Andric     // Utility function to emit the low-level va_arg code for X86-64.
16030b57cec5SDimitry Andric     MachineBasicBlock *
1604e8d8bef9SDimitry Andric     EmitVAARGWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
16050b57cec5SDimitry Andric 
16060b57cec5SDimitry Andric     /// Utility function to emit the xmm reg save portion of va_start.
16070b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredCascadedSelect(MachineInstr &MI1,
16080b57cec5SDimitry Andric                                                  MachineInstr &MI2,
16090b57cec5SDimitry Andric                                                  MachineBasicBlock *BB) const;
16100b57cec5SDimitry Andric 
16110b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
16120b57cec5SDimitry Andric                                          MachineBasicBlock *BB) const;
16130b57cec5SDimitry Andric 
16140b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
16150b57cec5SDimitry Andric                                            MachineBasicBlock *BB) const;
16160b57cec5SDimitry Andric 
16175ffd83dbSDimitry Andric     MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
16180b57cec5SDimitry Andric                                             MachineBasicBlock *BB) const;
16190b57cec5SDimitry Andric 
16205ffd83dbSDimitry Andric     MachineBasicBlock *EmitLoweredProbedAlloca(MachineInstr &MI,
16210b57cec5SDimitry Andric                                                MachineBasicBlock *BB) const;
16220b57cec5SDimitry Andric 
16230b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
16240b57cec5SDimitry Andric                                           MachineBasicBlock *BB) const;
16250b57cec5SDimitry Andric 
16260b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
16270b57cec5SDimitry Andric                                           MachineBasicBlock *BB) const;
16280b57cec5SDimitry Andric 
16290946e70aSDimitry Andric     MachineBasicBlock *EmitLoweredIndirectThunk(MachineInstr &MI,
16300b57cec5SDimitry Andric                                                 MachineBasicBlock *BB) const;
16310b57cec5SDimitry Andric 
16320b57cec5SDimitry Andric     MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
16330b57cec5SDimitry Andric                                         MachineBasicBlock *MBB) const;
16340b57cec5SDimitry Andric 
16350b57cec5SDimitry Andric     void emitSetJmpShadowStackFix(MachineInstr &MI,
16360b57cec5SDimitry Andric                                   MachineBasicBlock *MBB) const;
16370b57cec5SDimitry Andric 
16380b57cec5SDimitry Andric     MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
16390b57cec5SDimitry Andric                                          MachineBasicBlock *MBB) const;
16400b57cec5SDimitry Andric 
16410b57cec5SDimitry Andric     MachineBasicBlock *emitLongJmpShadowStackFix(MachineInstr &MI,
16420b57cec5SDimitry Andric                                                  MachineBasicBlock *MBB) const;
16430b57cec5SDimitry Andric 
16440b57cec5SDimitry Andric     MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
16450b57cec5SDimitry Andric                                              MachineBasicBlock *MBB) const;
16460b57cec5SDimitry Andric 
16470b57cec5SDimitry Andric     /// Emit flags for the given setcc condition and operands. Also returns the
16480b57cec5SDimitry Andric     /// corresponding X86 condition code constant in X86CC.
1649480093f4SDimitry Andric     SDValue emitFlagsForSetcc(SDValue Op0, SDValue Op1, ISD::CondCode CC,
1650480093f4SDimitry Andric                               const SDLoc &dl, SelectionDAG &DAG,
16515ffd83dbSDimitry Andric                               SDValue &X86CC) const;
16520b57cec5SDimitry Andric 
16530b57cec5SDimitry Andric     /// Check if replacement of SQRT with RSQRT should be disabled.
16545ffd83dbSDimitry Andric     bool isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const override;
16550b57cec5SDimitry Andric 
16560b57cec5SDimitry Andric     /// Use rsqrt* to speed up sqrt calculations.
16575ffd83dbSDimitry Andric     SDValue getSqrtEstimate(SDValue Op, SelectionDAG &DAG, int Enabled,
16580b57cec5SDimitry Andric                             int &RefinementSteps, bool &UseOneConstNR,
16590b57cec5SDimitry Andric                             bool Reciprocal) const override;
16600b57cec5SDimitry Andric 
16610b57cec5SDimitry Andric     /// Use rcp* to speed up fdiv calculations.
16625ffd83dbSDimitry Andric     SDValue getRecipEstimate(SDValue Op, SelectionDAG &DAG, int Enabled,
16630b57cec5SDimitry Andric                              int &RefinementSteps) const override;
16640b57cec5SDimitry Andric 
16650b57cec5SDimitry Andric     /// Reassociate floating point divisions into multiply by reciprocal.
16660b57cec5SDimitry Andric     unsigned combineRepeatedFPDivisors() const override;
16678bcb0991SDimitry Andric 
16688bcb0991SDimitry Andric     SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
16698bcb0991SDimitry Andric                           SmallVectorImpl<SDNode *> &Created) const override;
16700b57cec5SDimitry Andric   };
16710b57cec5SDimitry Andric 
16720b57cec5SDimitry Andric   namespace X86 {
16730b57cec5SDimitry Andric     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
16740b57cec5SDimitry Andric                              const TargetLibraryInfo *libInfo);
16750b57cec5SDimitry Andric   } // end namespace X86
16760b57cec5SDimitry Andric 
16770b57cec5SDimitry Andric   // X86 specific Gather/Scatter nodes.
16780b57cec5SDimitry Andric   // The class has the same order of operands as MaskedGatherScatterSDNode for
16790b57cec5SDimitry Andric   // convenience.
16805ffd83dbSDimitry Andric   class X86MaskedGatherScatterSDNode : public MemIntrinsicSDNode {
16810b57cec5SDimitry Andric   public:
16825ffd83dbSDimitry Andric     // This is a intended as a utility and should never be directly created.
16835ffd83dbSDimitry Andric     X86MaskedGatherScatterSDNode() = delete;
16845ffd83dbSDimitry Andric     ~X86MaskedGatherScatterSDNode() = delete;
16850b57cec5SDimitry Andric 
16860b57cec5SDimitry Andric     const SDValue &getBasePtr() const { return getOperand(3); }
16870b57cec5SDimitry Andric     const SDValue &getIndex()   const { return getOperand(4); }
16880b57cec5SDimitry Andric     const SDValue &getMask()    const { return getOperand(2); }
16890b57cec5SDimitry Andric     const SDValue &getScale()   const { return getOperand(5); }
16900b57cec5SDimitry Andric 
16910b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
16920b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MGATHER ||
16930b57cec5SDimitry Andric              N->getOpcode() == X86ISD::MSCATTER;
16940b57cec5SDimitry Andric     }
16950b57cec5SDimitry Andric   };
16960b57cec5SDimitry Andric 
16970b57cec5SDimitry Andric   class X86MaskedGatherSDNode : public X86MaskedGatherScatterSDNode {
16980b57cec5SDimitry Andric   public:
16990b57cec5SDimitry Andric     const SDValue &getPassThru() const { return getOperand(1); }
17000b57cec5SDimitry Andric 
17010b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
17020b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MGATHER;
17030b57cec5SDimitry Andric     }
17040b57cec5SDimitry Andric   };
17050b57cec5SDimitry Andric 
17060b57cec5SDimitry Andric   class X86MaskedScatterSDNode : public X86MaskedGatherScatterSDNode {
17070b57cec5SDimitry Andric   public:
17080b57cec5SDimitry Andric     const SDValue &getValue() const { return getOperand(1); }
17090b57cec5SDimitry Andric 
17100b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
17110b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MSCATTER;
17120b57cec5SDimitry Andric     }
17130b57cec5SDimitry Andric   };
17140b57cec5SDimitry Andric 
17150b57cec5SDimitry Andric   /// Generate unpacklo/unpackhi shuffle mask.
1716e8d8bef9SDimitry Andric   void createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask, bool Lo,
17175ffd83dbSDimitry Andric                                bool Unary);
17180b57cec5SDimitry Andric 
17195ffd83dbSDimitry Andric   /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
17205ffd83dbSDimitry Andric   /// imposed by AVX and specific to the unary pattern. Example:
17215ffd83dbSDimitry Andric   /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
17225ffd83dbSDimitry Andric   /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
17235ffd83dbSDimitry Andric   void createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask, bool Lo);
17240b57cec5SDimitry Andric 
17250b57cec5SDimitry Andric } // end namespace llvm
17260b57cec5SDimitry Andric 
17270b57cec5SDimitry Andric #endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
1728