xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1*0b57cec5SDimitry Andric //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
2*0b57cec5SDimitry Andric //
3*0b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*0b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*0b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*0b57cec5SDimitry Andric //
7*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
8*0b57cec5SDimitry Andric //
9*0b57cec5SDimitry Andric // This file defines the interfaces that X86 uses to lower LLVM code into a
10*0b57cec5SDimitry Andric // selection DAG.
11*0b57cec5SDimitry Andric //
12*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
13*0b57cec5SDimitry Andric 
14*0b57cec5SDimitry Andric #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
15*0b57cec5SDimitry Andric #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
16*0b57cec5SDimitry Andric 
17*0b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h"
18*0b57cec5SDimitry Andric #include "llvm/CodeGen/SelectionDAG.h"
19*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
20*0b57cec5SDimitry Andric #include "llvm/Target/TargetOptions.h"
21*0b57cec5SDimitry Andric 
22*0b57cec5SDimitry Andric namespace llvm {
23*0b57cec5SDimitry Andric   class X86Subtarget;
24*0b57cec5SDimitry Andric   class X86TargetMachine;
25*0b57cec5SDimitry Andric 
26*0b57cec5SDimitry Andric   namespace X86ISD {
27*0b57cec5SDimitry Andric     // X86 Specific DAG Nodes
28*0b57cec5SDimitry Andric     enum NodeType : unsigned {
29*0b57cec5SDimitry Andric       // Start the numbering where the builtin ops leave off.
30*0b57cec5SDimitry Andric       FIRST_NUMBER = ISD::BUILTIN_OP_END,
31*0b57cec5SDimitry Andric 
32*0b57cec5SDimitry Andric       /// Bit scan forward.
33*0b57cec5SDimitry Andric       BSF,
34*0b57cec5SDimitry Andric       /// Bit scan reverse.
35*0b57cec5SDimitry Andric       BSR,
36*0b57cec5SDimitry Andric 
37*0b57cec5SDimitry Andric       /// Double shift instructions. These correspond to
38*0b57cec5SDimitry Andric       /// X86::SHLDxx and X86::SHRDxx instructions.
39*0b57cec5SDimitry Andric       SHLD,
40*0b57cec5SDimitry Andric       SHRD,
41*0b57cec5SDimitry Andric 
42*0b57cec5SDimitry Andric       /// Bitwise logical AND of floating point values. This corresponds
43*0b57cec5SDimitry Andric       /// to X86::ANDPS or X86::ANDPD.
44*0b57cec5SDimitry Andric       FAND,
45*0b57cec5SDimitry Andric 
46*0b57cec5SDimitry Andric       /// Bitwise logical OR of floating point values. This corresponds
47*0b57cec5SDimitry Andric       /// to X86::ORPS or X86::ORPD.
48*0b57cec5SDimitry Andric       FOR,
49*0b57cec5SDimitry Andric 
50*0b57cec5SDimitry Andric       /// Bitwise logical XOR of floating point values. This corresponds
51*0b57cec5SDimitry Andric       /// to X86::XORPS or X86::XORPD.
52*0b57cec5SDimitry Andric       FXOR,
53*0b57cec5SDimitry Andric 
54*0b57cec5SDimitry Andric       ///  Bitwise logical ANDNOT of floating point values. This
55*0b57cec5SDimitry Andric       /// corresponds to X86::ANDNPS or X86::ANDNPD.
56*0b57cec5SDimitry Andric       FANDN,
57*0b57cec5SDimitry Andric 
58*0b57cec5SDimitry Andric       /// These operations represent an abstract X86 call
59*0b57cec5SDimitry Andric       /// instruction, which includes a bunch of information.  In particular the
60*0b57cec5SDimitry Andric       /// operands of these node are:
61*0b57cec5SDimitry Andric       ///
62*0b57cec5SDimitry Andric       ///     #0 - The incoming token chain
63*0b57cec5SDimitry Andric       ///     #1 - The callee
64*0b57cec5SDimitry Andric       ///     #2 - The number of arg bytes the caller pushes on the stack.
65*0b57cec5SDimitry Andric       ///     #3 - The number of arg bytes the callee pops off the stack.
66*0b57cec5SDimitry Andric       ///     #4 - The value to pass in AL/AX/EAX (optional)
67*0b57cec5SDimitry Andric       ///     #5 - The value to pass in DL/DX/EDX (optional)
68*0b57cec5SDimitry Andric       ///
69*0b57cec5SDimitry Andric       /// The result values of these nodes are:
70*0b57cec5SDimitry Andric       ///
71*0b57cec5SDimitry Andric       ///     #0 - The outgoing token chain
72*0b57cec5SDimitry Andric       ///     #1 - The first register result value (optional)
73*0b57cec5SDimitry Andric       ///     #2 - The second register result value (optional)
74*0b57cec5SDimitry Andric       ///
75*0b57cec5SDimitry Andric       CALL,
76*0b57cec5SDimitry Andric 
77*0b57cec5SDimitry Andric       /// Same as call except it adds the NoTrack prefix.
78*0b57cec5SDimitry Andric       NT_CALL,
79*0b57cec5SDimitry Andric 
80*0b57cec5SDimitry Andric       /// X86 compare and logical compare instructions.
81*0b57cec5SDimitry Andric       CMP, COMI, UCOMI,
82*0b57cec5SDimitry Andric 
83*0b57cec5SDimitry Andric       /// X86 bit-test instructions.
84*0b57cec5SDimitry Andric       BT,
85*0b57cec5SDimitry Andric 
86*0b57cec5SDimitry Andric       /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
87*0b57cec5SDimitry Andric       /// operand, usually produced by a CMP instruction.
88*0b57cec5SDimitry Andric       SETCC,
89*0b57cec5SDimitry Andric 
90*0b57cec5SDimitry Andric       /// X86 Select
91*0b57cec5SDimitry Andric       SELECTS,
92*0b57cec5SDimitry Andric 
93*0b57cec5SDimitry Andric       // Same as SETCC except it's materialized with a sbb and the value is all
94*0b57cec5SDimitry Andric       // one's or all zero's.
95*0b57cec5SDimitry Andric       SETCC_CARRY,  // R = carry_bit ? ~0 : 0
96*0b57cec5SDimitry Andric 
97*0b57cec5SDimitry Andric       /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
98*0b57cec5SDimitry Andric       /// Operands are two FP values to compare; result is a mask of
99*0b57cec5SDimitry Andric       /// 0s or 1s.  Generally DTRT for C/C++ with NaNs.
100*0b57cec5SDimitry Andric       FSETCC,
101*0b57cec5SDimitry Andric 
102*0b57cec5SDimitry Andric       /// X86 FP SETCC, similar to above, but with output as an i1 mask and
103*0b57cec5SDimitry Andric       /// and a version with SAE.
104*0b57cec5SDimitry Andric       FSETCCM, FSETCCM_SAE,
105*0b57cec5SDimitry Andric 
106*0b57cec5SDimitry Andric       /// X86 conditional moves. Operand 0 and operand 1 are the two values
107*0b57cec5SDimitry Andric       /// to select from. Operand 2 is the condition code, and operand 3 is the
108*0b57cec5SDimitry Andric       /// flag operand produced by a CMP or TEST instruction.
109*0b57cec5SDimitry Andric       CMOV,
110*0b57cec5SDimitry Andric 
111*0b57cec5SDimitry Andric       /// X86 conditional branches. Operand 0 is the chain operand, operand 1
112*0b57cec5SDimitry Andric       /// is the block to branch if condition is true, operand 2 is the
113*0b57cec5SDimitry Andric       /// condition code, and operand 3 is the flag operand produced by a CMP
114*0b57cec5SDimitry Andric       /// or TEST instruction.
115*0b57cec5SDimitry Andric       BRCOND,
116*0b57cec5SDimitry Andric 
117*0b57cec5SDimitry Andric       /// BRIND node with NoTrack prefix. Operand 0 is the chain operand and
118*0b57cec5SDimitry Andric       /// operand 1 is the target address.
119*0b57cec5SDimitry Andric       NT_BRIND,
120*0b57cec5SDimitry Andric 
121*0b57cec5SDimitry Andric       /// Return with a flag operand. Operand 0 is the chain operand, operand
122*0b57cec5SDimitry Andric       /// 1 is the number of bytes of stack to pop.
123*0b57cec5SDimitry Andric       RET_FLAG,
124*0b57cec5SDimitry Andric 
125*0b57cec5SDimitry Andric       /// Return from interrupt. Operand 0 is the number of bytes to pop.
126*0b57cec5SDimitry Andric       IRET,
127*0b57cec5SDimitry Andric 
128*0b57cec5SDimitry Andric       /// Repeat fill, corresponds to X86::REP_STOSx.
129*0b57cec5SDimitry Andric       REP_STOS,
130*0b57cec5SDimitry Andric 
131*0b57cec5SDimitry Andric       /// Repeat move, corresponds to X86::REP_MOVSx.
132*0b57cec5SDimitry Andric       REP_MOVS,
133*0b57cec5SDimitry Andric 
134*0b57cec5SDimitry Andric       /// On Darwin, this node represents the result of the popl
135*0b57cec5SDimitry Andric       /// at function entry, used for PIC code.
136*0b57cec5SDimitry Andric       GlobalBaseReg,
137*0b57cec5SDimitry Andric 
138*0b57cec5SDimitry Andric       /// A wrapper node for TargetConstantPool, TargetJumpTable,
139*0b57cec5SDimitry Andric       /// TargetExternalSymbol, TargetGlobalAddress, TargetGlobalTLSAddress,
140*0b57cec5SDimitry Andric       /// MCSymbol and TargetBlockAddress.
141*0b57cec5SDimitry Andric       Wrapper,
142*0b57cec5SDimitry Andric 
143*0b57cec5SDimitry Andric       /// Special wrapper used under X86-64 PIC mode for RIP
144*0b57cec5SDimitry Andric       /// relative displacements.
145*0b57cec5SDimitry Andric       WrapperRIP,
146*0b57cec5SDimitry Andric 
147*0b57cec5SDimitry Andric       /// Copies a 64-bit value from the low word of an XMM vector
148*0b57cec5SDimitry Andric       /// to an MMX vector.
149*0b57cec5SDimitry Andric       MOVDQ2Q,
150*0b57cec5SDimitry Andric 
151*0b57cec5SDimitry Andric       /// Copies a 32-bit value from the low word of a MMX
152*0b57cec5SDimitry Andric       /// vector to a GPR.
153*0b57cec5SDimitry Andric       MMX_MOVD2W,
154*0b57cec5SDimitry Andric 
155*0b57cec5SDimitry Andric       /// Copies a GPR into the low 32-bit word of a MMX vector
156*0b57cec5SDimitry Andric       /// and zero out the high word.
157*0b57cec5SDimitry Andric       MMX_MOVW2D,
158*0b57cec5SDimitry Andric 
159*0b57cec5SDimitry Andric       /// Extract an 8-bit value from a vector and zero extend it to
160*0b57cec5SDimitry Andric       /// i32, corresponds to X86::PEXTRB.
161*0b57cec5SDimitry Andric       PEXTRB,
162*0b57cec5SDimitry Andric 
163*0b57cec5SDimitry Andric       /// Extract a 16-bit value from a vector and zero extend it to
164*0b57cec5SDimitry Andric       /// i32, corresponds to X86::PEXTRW.
165*0b57cec5SDimitry Andric       PEXTRW,
166*0b57cec5SDimitry Andric 
167*0b57cec5SDimitry Andric       /// Insert any element of a 4 x float vector into any element
168*0b57cec5SDimitry Andric       /// of a destination 4 x floatvector.
169*0b57cec5SDimitry Andric       INSERTPS,
170*0b57cec5SDimitry Andric 
171*0b57cec5SDimitry Andric       /// Insert the lower 8-bits of a 32-bit value to a vector,
172*0b57cec5SDimitry Andric       /// corresponds to X86::PINSRB.
173*0b57cec5SDimitry Andric       PINSRB,
174*0b57cec5SDimitry Andric 
175*0b57cec5SDimitry Andric       /// Insert the lower 16-bits of a 32-bit value to a vector,
176*0b57cec5SDimitry Andric       /// corresponds to X86::PINSRW.
177*0b57cec5SDimitry Andric       PINSRW,
178*0b57cec5SDimitry Andric 
179*0b57cec5SDimitry Andric       /// Shuffle 16 8-bit values within a vector.
180*0b57cec5SDimitry Andric       PSHUFB,
181*0b57cec5SDimitry Andric 
182*0b57cec5SDimitry Andric       /// Compute Sum of Absolute Differences.
183*0b57cec5SDimitry Andric       PSADBW,
184*0b57cec5SDimitry Andric       /// Compute Double Block Packed Sum-Absolute-Differences
185*0b57cec5SDimitry Andric       DBPSADBW,
186*0b57cec5SDimitry Andric 
187*0b57cec5SDimitry Andric       /// Bitwise Logical AND NOT of Packed FP values.
188*0b57cec5SDimitry Andric       ANDNP,
189*0b57cec5SDimitry Andric 
190*0b57cec5SDimitry Andric       /// Blend where the selector is an immediate.
191*0b57cec5SDimitry Andric       BLENDI,
192*0b57cec5SDimitry Andric 
193*0b57cec5SDimitry Andric       /// Dynamic (non-constant condition) vector blend where only the sign bits
194*0b57cec5SDimitry Andric       /// of the condition elements are used. This is used to enforce that the
195*0b57cec5SDimitry Andric       /// condition mask is not valid for generic VSELECT optimizations. This
196*0b57cec5SDimitry Andric       /// is also used to implement the intrinsics.
197*0b57cec5SDimitry Andric       /// Operands are in VSELECT order: MASK, TRUE, FALSE
198*0b57cec5SDimitry Andric       BLENDV,
199*0b57cec5SDimitry Andric 
200*0b57cec5SDimitry Andric       /// Combined add and sub on an FP vector.
201*0b57cec5SDimitry Andric       ADDSUB,
202*0b57cec5SDimitry Andric 
203*0b57cec5SDimitry Andric       //  FP vector ops with rounding mode.
204*0b57cec5SDimitry Andric       FADD_RND, FADDS, FADDS_RND,
205*0b57cec5SDimitry Andric       FSUB_RND, FSUBS, FSUBS_RND,
206*0b57cec5SDimitry Andric       FMUL_RND, FMULS, FMULS_RND,
207*0b57cec5SDimitry Andric       FDIV_RND, FDIVS, FDIVS_RND,
208*0b57cec5SDimitry Andric       FMAX_SAE, FMAXS_SAE,
209*0b57cec5SDimitry Andric       FMIN_SAE, FMINS_SAE,
210*0b57cec5SDimitry Andric       FSQRT_RND, FSQRTS, FSQRTS_RND,
211*0b57cec5SDimitry Andric 
212*0b57cec5SDimitry Andric       // FP vector get exponent.
213*0b57cec5SDimitry Andric       FGETEXP, FGETEXP_SAE, FGETEXPS, FGETEXPS_SAE,
214*0b57cec5SDimitry Andric       // Extract Normalized Mantissas.
215*0b57cec5SDimitry Andric       VGETMANT, VGETMANT_SAE, VGETMANTS, VGETMANTS_SAE,
216*0b57cec5SDimitry Andric       // FP Scale.
217*0b57cec5SDimitry Andric       SCALEF, SCALEF_RND,
218*0b57cec5SDimitry Andric       SCALEFS, SCALEFS_RND,
219*0b57cec5SDimitry Andric 
220*0b57cec5SDimitry Andric       // Unsigned Integer average.
221*0b57cec5SDimitry Andric       AVG,
222*0b57cec5SDimitry Andric 
223*0b57cec5SDimitry Andric       /// Integer horizontal add/sub.
224*0b57cec5SDimitry Andric       HADD,
225*0b57cec5SDimitry Andric       HSUB,
226*0b57cec5SDimitry Andric 
227*0b57cec5SDimitry Andric       /// Floating point horizontal add/sub.
228*0b57cec5SDimitry Andric       FHADD,
229*0b57cec5SDimitry Andric       FHSUB,
230*0b57cec5SDimitry Andric 
231*0b57cec5SDimitry Andric       // Detect Conflicts Within a Vector
232*0b57cec5SDimitry Andric       CONFLICT,
233*0b57cec5SDimitry Andric 
234*0b57cec5SDimitry Andric       /// Floating point max and min.
235*0b57cec5SDimitry Andric       FMAX, FMIN,
236*0b57cec5SDimitry Andric 
237*0b57cec5SDimitry Andric       /// Commutative FMIN and FMAX.
238*0b57cec5SDimitry Andric       FMAXC, FMINC,
239*0b57cec5SDimitry Andric 
240*0b57cec5SDimitry Andric       /// Scalar intrinsic floating point max and min.
241*0b57cec5SDimitry Andric       FMAXS, FMINS,
242*0b57cec5SDimitry Andric 
243*0b57cec5SDimitry Andric       /// Floating point reciprocal-sqrt and reciprocal approximation.
244*0b57cec5SDimitry Andric       /// Note that these typically require refinement
245*0b57cec5SDimitry Andric       /// in order to obtain suitable precision.
246*0b57cec5SDimitry Andric       FRSQRT, FRCP,
247*0b57cec5SDimitry Andric 
248*0b57cec5SDimitry Andric       // AVX-512 reciprocal approximations with a little more precision.
249*0b57cec5SDimitry Andric       RSQRT14, RSQRT14S, RCP14, RCP14S,
250*0b57cec5SDimitry Andric 
251*0b57cec5SDimitry Andric       // Thread Local Storage.
252*0b57cec5SDimitry Andric       TLSADDR,
253*0b57cec5SDimitry Andric 
254*0b57cec5SDimitry Andric       // Thread Local Storage. A call to get the start address
255*0b57cec5SDimitry Andric       // of the TLS block for the current module.
256*0b57cec5SDimitry Andric       TLSBASEADDR,
257*0b57cec5SDimitry Andric 
258*0b57cec5SDimitry Andric       // Thread Local Storage.  When calling to an OS provided
259*0b57cec5SDimitry Andric       // thunk at the address from an earlier relocation.
260*0b57cec5SDimitry Andric       TLSCALL,
261*0b57cec5SDimitry Andric 
262*0b57cec5SDimitry Andric       // Exception Handling helpers.
263*0b57cec5SDimitry Andric       EH_RETURN,
264*0b57cec5SDimitry Andric 
265*0b57cec5SDimitry Andric       // SjLj exception handling setjmp.
266*0b57cec5SDimitry Andric       EH_SJLJ_SETJMP,
267*0b57cec5SDimitry Andric 
268*0b57cec5SDimitry Andric       // SjLj exception handling longjmp.
269*0b57cec5SDimitry Andric       EH_SJLJ_LONGJMP,
270*0b57cec5SDimitry Andric 
271*0b57cec5SDimitry Andric       // SjLj exception handling dispatch.
272*0b57cec5SDimitry Andric       EH_SJLJ_SETUP_DISPATCH,
273*0b57cec5SDimitry Andric 
274*0b57cec5SDimitry Andric       /// Tail call return. See X86TargetLowering::LowerCall for
275*0b57cec5SDimitry Andric       /// the list of operands.
276*0b57cec5SDimitry Andric       TC_RETURN,
277*0b57cec5SDimitry Andric 
278*0b57cec5SDimitry Andric       // Vector move to low scalar and zero higher vector elements.
279*0b57cec5SDimitry Andric       VZEXT_MOVL,
280*0b57cec5SDimitry Andric 
281*0b57cec5SDimitry Andric       // Vector integer truncate.
282*0b57cec5SDimitry Andric       VTRUNC,
283*0b57cec5SDimitry Andric       // Vector integer truncate with unsigned/signed saturation.
284*0b57cec5SDimitry Andric       VTRUNCUS, VTRUNCS,
285*0b57cec5SDimitry Andric 
286*0b57cec5SDimitry Andric       // Masked version of the above. Used when less than a 128-bit result is
287*0b57cec5SDimitry Andric       // produced since the mask only applies to the lower elements and can't
288*0b57cec5SDimitry Andric       // be represented by a select.
289*0b57cec5SDimitry Andric       // SRC, PASSTHRU, MASK
290*0b57cec5SDimitry Andric       VMTRUNC, VMTRUNCUS, VMTRUNCS,
291*0b57cec5SDimitry Andric 
292*0b57cec5SDimitry Andric       // Vector FP extend.
293*0b57cec5SDimitry Andric       VFPEXT, VFPEXT_SAE, VFPEXTS, VFPEXTS_SAE,
294*0b57cec5SDimitry Andric 
295*0b57cec5SDimitry Andric       // Vector FP round.
296*0b57cec5SDimitry Andric       VFPROUND, VFPROUND_RND, VFPROUNDS, VFPROUNDS_RND,
297*0b57cec5SDimitry Andric 
298*0b57cec5SDimitry Andric       // Masked version of above. Used for v2f64->v4f32.
299*0b57cec5SDimitry Andric       // SRC, PASSTHRU, MASK
300*0b57cec5SDimitry Andric       VMFPROUND,
301*0b57cec5SDimitry Andric 
302*0b57cec5SDimitry Andric       // 128-bit vector logical left / right shift
303*0b57cec5SDimitry Andric       VSHLDQ, VSRLDQ,
304*0b57cec5SDimitry Andric 
305*0b57cec5SDimitry Andric       // Vector shift elements
306*0b57cec5SDimitry Andric       VSHL, VSRL, VSRA,
307*0b57cec5SDimitry Andric 
308*0b57cec5SDimitry Andric       // Vector variable shift
309*0b57cec5SDimitry Andric       VSHLV, VSRLV, VSRAV,
310*0b57cec5SDimitry Andric 
311*0b57cec5SDimitry Andric       // Vector shift elements by immediate
312*0b57cec5SDimitry Andric       VSHLI, VSRLI, VSRAI,
313*0b57cec5SDimitry Andric 
314*0b57cec5SDimitry Andric       // Shifts of mask registers.
315*0b57cec5SDimitry Andric       KSHIFTL, KSHIFTR,
316*0b57cec5SDimitry Andric 
317*0b57cec5SDimitry Andric       // Bit rotate by immediate
318*0b57cec5SDimitry Andric       VROTLI, VROTRI,
319*0b57cec5SDimitry Andric 
320*0b57cec5SDimitry Andric       // Vector packed double/float comparison.
321*0b57cec5SDimitry Andric       CMPP,
322*0b57cec5SDimitry Andric 
323*0b57cec5SDimitry Andric       // Vector integer comparisons.
324*0b57cec5SDimitry Andric       PCMPEQ, PCMPGT,
325*0b57cec5SDimitry Andric 
326*0b57cec5SDimitry Andric       // v8i16 Horizontal minimum and position.
327*0b57cec5SDimitry Andric       PHMINPOS,
328*0b57cec5SDimitry Andric 
329*0b57cec5SDimitry Andric       MULTISHIFT,
330*0b57cec5SDimitry Andric 
331*0b57cec5SDimitry Andric       /// Vector comparison generating mask bits for fp and
332*0b57cec5SDimitry Andric       /// integer signed and unsigned data types.
333*0b57cec5SDimitry Andric       CMPM,
334*0b57cec5SDimitry Andric       // Vector comparison with SAE for FP values
335*0b57cec5SDimitry Andric       CMPM_SAE,
336*0b57cec5SDimitry Andric 
337*0b57cec5SDimitry Andric       // Arithmetic operations with FLAGS results.
338*0b57cec5SDimitry Andric       ADD, SUB, ADC, SBB, SMUL, UMUL,
339*0b57cec5SDimitry Andric       OR, XOR, AND,
340*0b57cec5SDimitry Andric 
341*0b57cec5SDimitry Andric       // Bit field extract.
342*0b57cec5SDimitry Andric       BEXTR,
343*0b57cec5SDimitry Andric 
344*0b57cec5SDimitry Andric       // Zero High Bits Starting with Specified Bit Position.
345*0b57cec5SDimitry Andric       BZHI,
346*0b57cec5SDimitry Andric 
347*0b57cec5SDimitry Andric       // X86-specific multiply by immediate.
348*0b57cec5SDimitry Andric       MUL_IMM,
349*0b57cec5SDimitry Andric 
350*0b57cec5SDimitry Andric       // Vector sign bit extraction.
351*0b57cec5SDimitry Andric       MOVMSK,
352*0b57cec5SDimitry Andric 
353*0b57cec5SDimitry Andric       // Vector bitwise comparisons.
354*0b57cec5SDimitry Andric       PTEST,
355*0b57cec5SDimitry Andric 
356*0b57cec5SDimitry Andric       // Vector packed fp sign bitwise comparisons.
357*0b57cec5SDimitry Andric       TESTP,
358*0b57cec5SDimitry Andric 
359*0b57cec5SDimitry Andric       // OR/AND test for masks.
360*0b57cec5SDimitry Andric       KORTEST,
361*0b57cec5SDimitry Andric       KTEST,
362*0b57cec5SDimitry Andric 
363*0b57cec5SDimitry Andric       // ADD for masks.
364*0b57cec5SDimitry Andric       KADD,
365*0b57cec5SDimitry Andric 
366*0b57cec5SDimitry Andric       // Several flavors of instructions with vector shuffle behaviors.
367*0b57cec5SDimitry Andric       // Saturated signed/unnsigned packing.
368*0b57cec5SDimitry Andric       PACKSS,
369*0b57cec5SDimitry Andric       PACKUS,
370*0b57cec5SDimitry Andric       // Intra-lane alignr.
371*0b57cec5SDimitry Andric       PALIGNR,
372*0b57cec5SDimitry Andric       // AVX512 inter-lane alignr.
373*0b57cec5SDimitry Andric       VALIGN,
374*0b57cec5SDimitry Andric       PSHUFD,
375*0b57cec5SDimitry Andric       PSHUFHW,
376*0b57cec5SDimitry Andric       PSHUFLW,
377*0b57cec5SDimitry Andric       SHUFP,
378*0b57cec5SDimitry Andric       // VBMI2 Concat & Shift.
379*0b57cec5SDimitry Andric       VSHLD,
380*0b57cec5SDimitry Andric       VSHRD,
381*0b57cec5SDimitry Andric       VSHLDV,
382*0b57cec5SDimitry Andric       VSHRDV,
383*0b57cec5SDimitry Andric       //Shuffle Packed Values at 128-bit granularity.
384*0b57cec5SDimitry Andric       SHUF128,
385*0b57cec5SDimitry Andric       MOVDDUP,
386*0b57cec5SDimitry Andric       MOVSHDUP,
387*0b57cec5SDimitry Andric       MOVSLDUP,
388*0b57cec5SDimitry Andric       MOVLHPS,
389*0b57cec5SDimitry Andric       MOVHLPS,
390*0b57cec5SDimitry Andric       MOVSD,
391*0b57cec5SDimitry Andric       MOVSS,
392*0b57cec5SDimitry Andric       UNPCKL,
393*0b57cec5SDimitry Andric       UNPCKH,
394*0b57cec5SDimitry Andric       VPERMILPV,
395*0b57cec5SDimitry Andric       VPERMILPI,
396*0b57cec5SDimitry Andric       VPERMI,
397*0b57cec5SDimitry Andric       VPERM2X128,
398*0b57cec5SDimitry Andric 
399*0b57cec5SDimitry Andric       // Variable Permute (VPERM).
400*0b57cec5SDimitry Andric       // Res = VPERMV MaskV, V0
401*0b57cec5SDimitry Andric       VPERMV,
402*0b57cec5SDimitry Andric 
403*0b57cec5SDimitry Andric       // 3-op Variable Permute (VPERMT2).
404*0b57cec5SDimitry Andric       // Res = VPERMV3 V0, MaskV, V1
405*0b57cec5SDimitry Andric       VPERMV3,
406*0b57cec5SDimitry Andric 
407*0b57cec5SDimitry Andric       // Bitwise ternary logic.
408*0b57cec5SDimitry Andric       VPTERNLOG,
409*0b57cec5SDimitry Andric       // Fix Up Special Packed Float32/64 values.
410*0b57cec5SDimitry Andric       VFIXUPIMM, VFIXUPIMM_SAE,
411*0b57cec5SDimitry Andric       VFIXUPIMMS, VFIXUPIMMS_SAE,
412*0b57cec5SDimitry Andric       // Range Restriction Calculation For Packed Pairs of Float32/64 values.
413*0b57cec5SDimitry Andric       VRANGE, VRANGE_SAE, VRANGES, VRANGES_SAE,
414*0b57cec5SDimitry Andric       // Reduce - Perform Reduction Transformation on scalar\packed FP.
415*0b57cec5SDimitry Andric       VREDUCE, VREDUCE_SAE, VREDUCES, VREDUCES_SAE,
416*0b57cec5SDimitry Andric       // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
417*0b57cec5SDimitry Andric       // Also used by the legacy (V)ROUND intrinsics where we mask out the
418*0b57cec5SDimitry Andric       // scaling part of the immediate.
419*0b57cec5SDimitry Andric       VRNDSCALE, VRNDSCALE_SAE, VRNDSCALES, VRNDSCALES_SAE,
420*0b57cec5SDimitry Andric       // Tests Types Of a FP Values for packed types.
421*0b57cec5SDimitry Andric       VFPCLASS,
422*0b57cec5SDimitry Andric       // Tests Types Of a FP Values for scalar types.
423*0b57cec5SDimitry Andric       VFPCLASSS,
424*0b57cec5SDimitry Andric 
425*0b57cec5SDimitry Andric       // Broadcast (splat) scalar or element 0 of a vector. If the operand is
426*0b57cec5SDimitry Andric       // a vector, this node may change the vector length as part of the splat.
427*0b57cec5SDimitry Andric       VBROADCAST,
428*0b57cec5SDimitry Andric       // Broadcast mask to vector.
429*0b57cec5SDimitry Andric       VBROADCASTM,
430*0b57cec5SDimitry Andric       // Broadcast subvector to vector.
431*0b57cec5SDimitry Andric       SUBV_BROADCAST,
432*0b57cec5SDimitry Andric 
433*0b57cec5SDimitry Andric       /// SSE4A Extraction and Insertion.
434*0b57cec5SDimitry Andric       EXTRQI, INSERTQI,
435*0b57cec5SDimitry Andric 
436*0b57cec5SDimitry Andric       // XOP arithmetic/logical shifts.
437*0b57cec5SDimitry Andric       VPSHA, VPSHL,
438*0b57cec5SDimitry Andric       // XOP signed/unsigned integer comparisons.
439*0b57cec5SDimitry Andric       VPCOM, VPCOMU,
440*0b57cec5SDimitry Andric       // XOP packed permute bytes.
441*0b57cec5SDimitry Andric       VPPERM,
442*0b57cec5SDimitry Andric       // XOP two source permutation.
443*0b57cec5SDimitry Andric       VPERMIL2,
444*0b57cec5SDimitry Andric 
445*0b57cec5SDimitry Andric       // Vector multiply packed unsigned doubleword integers.
446*0b57cec5SDimitry Andric       PMULUDQ,
447*0b57cec5SDimitry Andric       // Vector multiply packed signed doubleword integers.
448*0b57cec5SDimitry Andric       PMULDQ,
449*0b57cec5SDimitry Andric       // Vector Multiply Packed UnsignedIntegers with Round and Scale.
450*0b57cec5SDimitry Andric       MULHRS,
451*0b57cec5SDimitry Andric 
452*0b57cec5SDimitry Andric       // Multiply and Add Packed Integers.
453*0b57cec5SDimitry Andric       VPMADDUBSW, VPMADDWD,
454*0b57cec5SDimitry Andric 
455*0b57cec5SDimitry Andric       // AVX512IFMA multiply and add.
456*0b57cec5SDimitry Andric       // NOTE: These are different than the instruction and perform
457*0b57cec5SDimitry Andric       // op0 x op1 + op2.
458*0b57cec5SDimitry Andric       VPMADD52L, VPMADD52H,
459*0b57cec5SDimitry Andric 
460*0b57cec5SDimitry Andric       // VNNI
461*0b57cec5SDimitry Andric       VPDPBUSD,
462*0b57cec5SDimitry Andric       VPDPBUSDS,
463*0b57cec5SDimitry Andric       VPDPWSSD,
464*0b57cec5SDimitry Andric       VPDPWSSDS,
465*0b57cec5SDimitry Andric 
466*0b57cec5SDimitry Andric       // FMA nodes.
467*0b57cec5SDimitry Andric       // We use the target independent ISD::FMA for the non-inverted case.
468*0b57cec5SDimitry Andric       FNMADD,
469*0b57cec5SDimitry Andric       FMSUB,
470*0b57cec5SDimitry Andric       FNMSUB,
471*0b57cec5SDimitry Andric       FMADDSUB,
472*0b57cec5SDimitry Andric       FMSUBADD,
473*0b57cec5SDimitry Andric 
474*0b57cec5SDimitry Andric       // FMA with rounding mode.
475*0b57cec5SDimitry Andric       FMADD_RND,
476*0b57cec5SDimitry Andric       FNMADD_RND,
477*0b57cec5SDimitry Andric       FMSUB_RND,
478*0b57cec5SDimitry Andric       FNMSUB_RND,
479*0b57cec5SDimitry Andric       FMADDSUB_RND,
480*0b57cec5SDimitry Andric       FMSUBADD_RND,
481*0b57cec5SDimitry Andric 
482*0b57cec5SDimitry Andric       // Compress and expand.
483*0b57cec5SDimitry Andric       COMPRESS,
484*0b57cec5SDimitry Andric       EXPAND,
485*0b57cec5SDimitry Andric 
486*0b57cec5SDimitry Andric       // Bits shuffle
487*0b57cec5SDimitry Andric       VPSHUFBITQMB,
488*0b57cec5SDimitry Andric 
489*0b57cec5SDimitry Andric       // Convert Unsigned/Integer to Floating-Point Value with rounding mode.
490*0b57cec5SDimitry Andric       SINT_TO_FP_RND, UINT_TO_FP_RND,
491*0b57cec5SDimitry Andric       SCALAR_SINT_TO_FP, SCALAR_UINT_TO_FP,
492*0b57cec5SDimitry Andric       SCALAR_SINT_TO_FP_RND, SCALAR_UINT_TO_FP_RND,
493*0b57cec5SDimitry Andric 
494*0b57cec5SDimitry Andric       // Vector float/double to signed/unsigned integer.
495*0b57cec5SDimitry Andric       CVTP2SI, CVTP2UI, CVTP2SI_RND, CVTP2UI_RND,
496*0b57cec5SDimitry Andric       // Scalar float/double to signed/unsigned integer.
497*0b57cec5SDimitry Andric       CVTS2SI, CVTS2UI, CVTS2SI_RND, CVTS2UI_RND,
498*0b57cec5SDimitry Andric 
499*0b57cec5SDimitry Andric       // Vector float/double to signed/unsigned integer with truncation.
500*0b57cec5SDimitry Andric       CVTTP2SI, CVTTP2UI, CVTTP2SI_SAE, CVTTP2UI_SAE,
501*0b57cec5SDimitry Andric       // Scalar float/double to signed/unsigned integer with truncation.
502*0b57cec5SDimitry Andric       CVTTS2SI, CVTTS2UI, CVTTS2SI_SAE, CVTTS2UI_SAE,
503*0b57cec5SDimitry Andric 
504*0b57cec5SDimitry Andric       // Vector signed/unsigned integer to float/double.
505*0b57cec5SDimitry Andric       CVTSI2P, CVTUI2P,
506*0b57cec5SDimitry Andric 
507*0b57cec5SDimitry Andric       // Masked versions of above. Used for v2f64->v4f32.
508*0b57cec5SDimitry Andric       // SRC, PASSTHRU, MASK
509*0b57cec5SDimitry Andric       MCVTP2SI, MCVTP2UI, MCVTTP2SI, MCVTTP2UI,
510*0b57cec5SDimitry Andric       MCVTSI2P, MCVTUI2P,
511*0b57cec5SDimitry Andric 
512*0b57cec5SDimitry Andric       // Vector float to bfloat16.
513*0b57cec5SDimitry Andric       // Convert TWO packed single data to one packed BF16 data
514*0b57cec5SDimitry Andric       CVTNE2PS2BF16,
515*0b57cec5SDimitry Andric       // Convert packed single data to packed BF16 data
516*0b57cec5SDimitry Andric       CVTNEPS2BF16,
517*0b57cec5SDimitry Andric       // Masked version of above.
518*0b57cec5SDimitry Andric       // SRC, PASSTHRU, MASK
519*0b57cec5SDimitry Andric       MCVTNEPS2BF16,
520*0b57cec5SDimitry Andric 
521*0b57cec5SDimitry Andric       // Dot product of BF16 pairs to accumulated into
522*0b57cec5SDimitry Andric       // packed single precision.
523*0b57cec5SDimitry Andric       DPBF16PS,
524*0b57cec5SDimitry Andric 
525*0b57cec5SDimitry Andric       // Save xmm argument registers to the stack, according to %al. An operator
526*0b57cec5SDimitry Andric       // is needed so that this can be expanded with control flow.
527*0b57cec5SDimitry Andric       VASTART_SAVE_XMM_REGS,
528*0b57cec5SDimitry Andric 
529*0b57cec5SDimitry Andric       // Windows's _chkstk call to do stack probing.
530*0b57cec5SDimitry Andric       WIN_ALLOCA,
531*0b57cec5SDimitry Andric 
532*0b57cec5SDimitry Andric       // For allocating variable amounts of stack space when using
533*0b57cec5SDimitry Andric       // segmented stacks. Check if the current stacklet has enough space, and
534*0b57cec5SDimitry Andric       // falls back to heap allocation if not.
535*0b57cec5SDimitry Andric       SEG_ALLOCA,
536*0b57cec5SDimitry Andric 
537*0b57cec5SDimitry Andric       // Memory barriers.
538*0b57cec5SDimitry Andric       MEMBARRIER,
539*0b57cec5SDimitry Andric       MFENCE,
540*0b57cec5SDimitry Andric 
541*0b57cec5SDimitry Andric       // Store FP status word into i16 register.
542*0b57cec5SDimitry Andric       FNSTSW16r,
543*0b57cec5SDimitry Andric 
544*0b57cec5SDimitry Andric       // Store contents of %ah into %eflags.
545*0b57cec5SDimitry Andric       SAHF,
546*0b57cec5SDimitry Andric 
547*0b57cec5SDimitry Andric       // Get a random integer and indicate whether it is valid in CF.
548*0b57cec5SDimitry Andric       RDRAND,
549*0b57cec5SDimitry Andric 
550*0b57cec5SDimitry Andric       // Get a NIST SP800-90B & C compliant random integer and
551*0b57cec5SDimitry Andric       // indicate whether it is valid in CF.
552*0b57cec5SDimitry Andric       RDSEED,
553*0b57cec5SDimitry Andric 
554*0b57cec5SDimitry Andric       // Protection keys
555*0b57cec5SDimitry Andric       // RDPKRU - Operand 0 is chain. Operand 1 is value for ECX.
556*0b57cec5SDimitry Andric       // WRPKRU - Operand 0 is chain. Operand 1 is value for EDX. Operand 2 is
557*0b57cec5SDimitry Andric       // value for ECX.
558*0b57cec5SDimitry Andric       RDPKRU, WRPKRU,
559*0b57cec5SDimitry Andric 
560*0b57cec5SDimitry Andric       // SSE42 string comparisons.
561*0b57cec5SDimitry Andric       // These nodes produce 3 results, index, mask, and flags. X86ISelDAGToDAG
562*0b57cec5SDimitry Andric       // will emit one or two instructions based on which results are used. If
563*0b57cec5SDimitry Andric       // flags and index/mask this allows us to use a single instruction since
564*0b57cec5SDimitry Andric       // we won't have to pick and opcode for flags. Instead we can rely on the
565*0b57cec5SDimitry Andric       // DAG to CSE everything and decide at isel.
566*0b57cec5SDimitry Andric       PCMPISTR,
567*0b57cec5SDimitry Andric       PCMPESTR,
568*0b57cec5SDimitry Andric 
569*0b57cec5SDimitry Andric       // Test if in transactional execution.
570*0b57cec5SDimitry Andric       XTEST,
571*0b57cec5SDimitry Andric 
572*0b57cec5SDimitry Andric       // ERI instructions.
573*0b57cec5SDimitry Andric       RSQRT28, RSQRT28_SAE, RSQRT28S, RSQRT28S_SAE,
574*0b57cec5SDimitry Andric       RCP28, RCP28_SAE, RCP28S, RCP28S_SAE, EXP2, EXP2_SAE,
575*0b57cec5SDimitry Andric 
576*0b57cec5SDimitry Andric       // Conversions between float and half-float.
577*0b57cec5SDimitry Andric       CVTPS2PH, CVTPH2PS, CVTPH2PS_SAE,
578*0b57cec5SDimitry Andric 
579*0b57cec5SDimitry Andric       // Masked version of above.
580*0b57cec5SDimitry Andric       // SRC, RND, PASSTHRU, MASK
581*0b57cec5SDimitry Andric       MCVTPS2PH,
582*0b57cec5SDimitry Andric 
583*0b57cec5SDimitry Andric       // Galois Field Arithmetic Instructions
584*0b57cec5SDimitry Andric       GF2P8AFFINEINVQB, GF2P8AFFINEQB, GF2P8MULB,
585*0b57cec5SDimitry Andric 
586*0b57cec5SDimitry Andric       // LWP insert record.
587*0b57cec5SDimitry Andric       LWPINS,
588*0b57cec5SDimitry Andric 
589*0b57cec5SDimitry Andric       // User level wait
590*0b57cec5SDimitry Andric       UMWAIT, TPAUSE,
591*0b57cec5SDimitry Andric 
592*0b57cec5SDimitry Andric       // Enqueue Stores Instructions
593*0b57cec5SDimitry Andric       ENQCMD, ENQCMDS,
594*0b57cec5SDimitry Andric 
595*0b57cec5SDimitry Andric       // For avx512-vp2intersect
596*0b57cec5SDimitry Andric       VP2INTERSECT,
597*0b57cec5SDimitry Andric 
598*0b57cec5SDimitry Andric       // Compare and swap.
599*0b57cec5SDimitry Andric       LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
600*0b57cec5SDimitry Andric       LCMPXCHG8_DAG,
601*0b57cec5SDimitry Andric       LCMPXCHG16_DAG,
602*0b57cec5SDimitry Andric       LCMPXCHG8_SAVE_EBX_DAG,
603*0b57cec5SDimitry Andric       LCMPXCHG16_SAVE_RBX_DAG,
604*0b57cec5SDimitry Andric 
605*0b57cec5SDimitry Andric       /// LOCK-prefixed arithmetic read-modify-write instructions.
606*0b57cec5SDimitry Andric       /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
607*0b57cec5SDimitry Andric       LADD, LSUB, LOR, LXOR, LAND,
608*0b57cec5SDimitry Andric 
609*0b57cec5SDimitry Andric       // Load, scalar_to_vector, and zero extend.
610*0b57cec5SDimitry Andric       VZEXT_LOAD,
611*0b57cec5SDimitry Andric 
612*0b57cec5SDimitry Andric       // extract_vector_elt, store.
613*0b57cec5SDimitry Andric       VEXTRACT_STORE,
614*0b57cec5SDimitry Andric 
615*0b57cec5SDimitry Andric       // Store FP control world into i16 memory.
616*0b57cec5SDimitry Andric       FNSTCW16m,
617*0b57cec5SDimitry Andric 
618*0b57cec5SDimitry Andric       /// This instruction implements FP_TO_SINT with the
619*0b57cec5SDimitry Andric       /// integer destination in memory and a FP reg source.  This corresponds
620*0b57cec5SDimitry Andric       /// to the X86::FIST*m instructions and the rounding mode change stuff. It
621*0b57cec5SDimitry Andric       /// has two inputs (token chain and address) and two outputs (int value
622*0b57cec5SDimitry Andric       /// and token chain). Memory VT specifies the type to store to.
623*0b57cec5SDimitry Andric       FP_TO_INT_IN_MEM,
624*0b57cec5SDimitry Andric 
625*0b57cec5SDimitry Andric       /// This instruction implements SINT_TO_FP with the
626*0b57cec5SDimitry Andric       /// integer source in memory and FP reg result.  This corresponds to the
627*0b57cec5SDimitry Andric       /// X86::FILD*m instructions. It has two inputs (token chain and address)
628*0b57cec5SDimitry Andric       /// and two outputs (FP value and token chain). FILD_FLAG also produces a
629*0b57cec5SDimitry Andric       /// flag). The integer source type is specified by the memory VT.
630*0b57cec5SDimitry Andric       FILD,
631*0b57cec5SDimitry Andric       FILD_FLAG,
632*0b57cec5SDimitry Andric 
633*0b57cec5SDimitry Andric       /// This instruction implements a fp->int store from FP stack
634*0b57cec5SDimitry Andric       /// slots. This corresponds to the fist instruction. It takes a
635*0b57cec5SDimitry Andric       /// chain operand, value to store, address, and glue. The memory VT
636*0b57cec5SDimitry Andric       /// specifies the type to store as.
637*0b57cec5SDimitry Andric       FIST,
638*0b57cec5SDimitry Andric 
639*0b57cec5SDimitry Andric       /// This instruction implements an extending load to FP stack slots.
640*0b57cec5SDimitry Andric       /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
641*0b57cec5SDimitry Andric       /// operand, and ptr to load from. The memory VT specifies the type to
642*0b57cec5SDimitry Andric       /// load from.
643*0b57cec5SDimitry Andric       FLD,
644*0b57cec5SDimitry Andric 
645*0b57cec5SDimitry Andric       /// This instruction implements a truncating store from FP stack
646*0b57cec5SDimitry Andric       /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
647*0b57cec5SDimitry Andric       /// chain operand, value to store, address, and glue. The memory VT
648*0b57cec5SDimitry Andric       /// specifies the type to store as.
649*0b57cec5SDimitry Andric       FST,
650*0b57cec5SDimitry Andric 
651*0b57cec5SDimitry Andric       /// This instruction grabs the address of the next argument
652*0b57cec5SDimitry Andric       /// from a va_list. (reads and modifies the va_list in memory)
653*0b57cec5SDimitry Andric       VAARG_64,
654*0b57cec5SDimitry Andric 
655*0b57cec5SDimitry Andric       // Vector truncating store with unsigned/signed saturation
656*0b57cec5SDimitry Andric       VTRUNCSTOREUS, VTRUNCSTORES,
657*0b57cec5SDimitry Andric       // Vector truncating masked store with unsigned/signed saturation
658*0b57cec5SDimitry Andric       VMTRUNCSTOREUS, VMTRUNCSTORES,
659*0b57cec5SDimitry Andric 
660*0b57cec5SDimitry Andric       // X86 specific gather and scatter
661*0b57cec5SDimitry Andric       MGATHER, MSCATTER,
662*0b57cec5SDimitry Andric 
663*0b57cec5SDimitry Andric       // WARNING: Do not add anything in the end unless you want the node to
664*0b57cec5SDimitry Andric       // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
665*0b57cec5SDimitry Andric       // opcodes will be thought as target memory ops!
666*0b57cec5SDimitry Andric     };
667*0b57cec5SDimitry Andric   } // end namespace X86ISD
668*0b57cec5SDimitry Andric 
669*0b57cec5SDimitry Andric   /// Define some predicates that are used for node matching.
670*0b57cec5SDimitry Andric   namespace X86 {
671*0b57cec5SDimitry Andric     /// Returns true if Elt is a constant zero or floating point constant +0.0.
672*0b57cec5SDimitry Andric     bool isZeroNode(SDValue Elt);
673*0b57cec5SDimitry Andric 
674*0b57cec5SDimitry Andric     /// Returns true of the given offset can be
675*0b57cec5SDimitry Andric     /// fit into displacement field of the instruction.
676*0b57cec5SDimitry Andric     bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
677*0b57cec5SDimitry Andric                                       bool hasSymbolicDisplacement = true);
678*0b57cec5SDimitry Andric 
679*0b57cec5SDimitry Andric     /// Determines whether the callee is required to pop its
680*0b57cec5SDimitry Andric     /// own arguments. Callee pop is necessary to support tail calls.
681*0b57cec5SDimitry Andric     bool isCalleePop(CallingConv::ID CallingConv,
682*0b57cec5SDimitry Andric                      bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
683*0b57cec5SDimitry Andric 
684*0b57cec5SDimitry Andric   } // end namespace X86
685*0b57cec5SDimitry Andric 
686*0b57cec5SDimitry Andric   //===--------------------------------------------------------------------===//
687*0b57cec5SDimitry Andric   //  X86 Implementation of the TargetLowering interface
688*0b57cec5SDimitry Andric   class X86TargetLowering final : public TargetLowering {
689*0b57cec5SDimitry Andric   public:
690*0b57cec5SDimitry Andric     explicit X86TargetLowering(const X86TargetMachine &TM,
691*0b57cec5SDimitry Andric                                const X86Subtarget &STI);
692*0b57cec5SDimitry Andric 
693*0b57cec5SDimitry Andric     unsigned getJumpTableEncoding() const override;
694*0b57cec5SDimitry Andric     bool useSoftFloat() const override;
695*0b57cec5SDimitry Andric 
696*0b57cec5SDimitry Andric     void markLibCallAttributes(MachineFunction *MF, unsigned CC,
697*0b57cec5SDimitry Andric                                ArgListTy &Args) const override;
698*0b57cec5SDimitry Andric 
699*0b57cec5SDimitry Andric     MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override {
700*0b57cec5SDimitry Andric       return MVT::i8;
701*0b57cec5SDimitry Andric     }
702*0b57cec5SDimitry Andric 
703*0b57cec5SDimitry Andric     const MCExpr *
704*0b57cec5SDimitry Andric     LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
705*0b57cec5SDimitry Andric                               const MachineBasicBlock *MBB, unsigned uid,
706*0b57cec5SDimitry Andric                               MCContext &Ctx) const override;
707*0b57cec5SDimitry Andric 
708*0b57cec5SDimitry Andric     /// Returns relocation base for the given PIC jumptable.
709*0b57cec5SDimitry Andric     SDValue getPICJumpTableRelocBase(SDValue Table,
710*0b57cec5SDimitry Andric                                      SelectionDAG &DAG) const override;
711*0b57cec5SDimitry Andric     const MCExpr *
712*0b57cec5SDimitry Andric     getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
713*0b57cec5SDimitry Andric                                  unsigned JTI, MCContext &Ctx) const override;
714*0b57cec5SDimitry Andric 
715*0b57cec5SDimitry Andric     /// Return the desired alignment for ByVal aggregate
716*0b57cec5SDimitry Andric     /// function arguments in the caller parameter area. For X86, aggregates
717*0b57cec5SDimitry Andric     /// that contains are placed at 16-byte boundaries while the rest are at
718*0b57cec5SDimitry Andric     /// 4-byte boundaries.
719*0b57cec5SDimitry Andric     unsigned getByValTypeAlignment(Type *Ty,
720*0b57cec5SDimitry Andric                                    const DataLayout &DL) const override;
721*0b57cec5SDimitry Andric 
722*0b57cec5SDimitry Andric     /// Returns the target specific optimal type for load
723*0b57cec5SDimitry Andric     /// and store operations as a result of memset, memcpy, and memmove
724*0b57cec5SDimitry Andric     /// lowering. If DstAlign is zero that means it's safe to destination
725*0b57cec5SDimitry Andric     /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
726*0b57cec5SDimitry Andric     /// means there isn't a need to check it against alignment requirement,
727*0b57cec5SDimitry Andric     /// probably because the source does not need to be loaded. If 'IsMemset' is
728*0b57cec5SDimitry Andric     /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
729*0b57cec5SDimitry Andric     /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
730*0b57cec5SDimitry Andric     /// source is constant so it does not need to be loaded.
731*0b57cec5SDimitry Andric     /// It returns EVT::Other if the type should be determined using generic
732*0b57cec5SDimitry Andric     /// target-independent logic.
733*0b57cec5SDimitry Andric     EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
734*0b57cec5SDimitry Andric                             bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
735*0b57cec5SDimitry Andric                             const AttributeList &FuncAttributes) const override;
736*0b57cec5SDimitry Andric 
737*0b57cec5SDimitry Andric     /// Returns true if it's safe to use load / store of the
738*0b57cec5SDimitry Andric     /// specified type to expand memcpy / memset inline. This is mostly true
739*0b57cec5SDimitry Andric     /// for all types except for some special cases. For example, on X86
740*0b57cec5SDimitry Andric     /// targets without SSE2 f64 load / store are done with fldl / fstpl which
741*0b57cec5SDimitry Andric     /// also does type conversion. Note the specified type doesn't have to be
742*0b57cec5SDimitry Andric     /// legal as the hook is used before type legalization.
743*0b57cec5SDimitry Andric     bool isSafeMemOpType(MVT VT) const override;
744*0b57cec5SDimitry Andric 
745*0b57cec5SDimitry Andric     /// Returns true if the target allows unaligned memory accesses of the
746*0b57cec5SDimitry Andric     /// specified type. Returns whether it is "fast" in the last argument.
747*0b57cec5SDimitry Andric     bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align,
748*0b57cec5SDimitry Andric                                         MachineMemOperand::Flags Flags,
749*0b57cec5SDimitry Andric                                         bool *Fast) const override;
750*0b57cec5SDimitry Andric 
751*0b57cec5SDimitry Andric     /// Provide custom lowering hooks for some operations.
752*0b57cec5SDimitry Andric     ///
753*0b57cec5SDimitry Andric     SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
754*0b57cec5SDimitry Andric 
755*0b57cec5SDimitry Andric     /// Places new result values for the node in Results (their number
756*0b57cec5SDimitry Andric     /// and types must exactly match those of the original return values of
757*0b57cec5SDimitry Andric     /// the node), or leaves Results empty, which indicates that the node is not
758*0b57cec5SDimitry Andric     /// to be custom lowered after all.
759*0b57cec5SDimitry Andric     void LowerOperationWrapper(SDNode *N,
760*0b57cec5SDimitry Andric                                SmallVectorImpl<SDValue> &Results,
761*0b57cec5SDimitry Andric                                SelectionDAG &DAG) const override;
762*0b57cec5SDimitry Andric 
763*0b57cec5SDimitry Andric     /// Replace the results of node with an illegal result
764*0b57cec5SDimitry Andric     /// type with new values built out of custom code.
765*0b57cec5SDimitry Andric     ///
766*0b57cec5SDimitry Andric     void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
767*0b57cec5SDimitry Andric                             SelectionDAG &DAG) const override;
768*0b57cec5SDimitry Andric 
769*0b57cec5SDimitry Andric     SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
770*0b57cec5SDimitry Andric 
771*0b57cec5SDimitry Andric     // Return true if it is profitable to combine a BUILD_VECTOR with a
772*0b57cec5SDimitry Andric     // stride-pattern to a shuffle and a truncate.
773*0b57cec5SDimitry Andric     // Example of such a combine:
774*0b57cec5SDimitry Andric     // v4i32 build_vector((extract_elt V, 1),
775*0b57cec5SDimitry Andric     //                    (extract_elt V, 3),
776*0b57cec5SDimitry Andric     //                    (extract_elt V, 5),
777*0b57cec5SDimitry Andric     //                    (extract_elt V, 7))
778*0b57cec5SDimitry Andric     //  -->
779*0b57cec5SDimitry Andric     // v4i32 truncate (bitcast (shuffle<1,u,3,u,4,u,5,u,6,u,7,u> V, u) to
780*0b57cec5SDimitry Andric     // v4i64)
781*0b57cec5SDimitry Andric     bool isDesirableToCombineBuildVectorToShuffleTruncate(
782*0b57cec5SDimitry Andric         ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const override;
783*0b57cec5SDimitry Andric 
784*0b57cec5SDimitry Andric     /// Return true if the target has native support for
785*0b57cec5SDimitry Andric     /// the specified value type and it is 'desirable' to use the type for the
786*0b57cec5SDimitry Andric     /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
787*0b57cec5SDimitry Andric     /// instruction encodings are longer and some i16 instructions are slow.
788*0b57cec5SDimitry Andric     bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
789*0b57cec5SDimitry Andric 
790*0b57cec5SDimitry Andric     /// Return true if the target has native support for the
791*0b57cec5SDimitry Andric     /// specified value type and it is 'desirable' to use the type. e.g. On x86
792*0b57cec5SDimitry Andric     /// i16 is legal, but undesirable since i16 instruction encodings are longer
793*0b57cec5SDimitry Andric     /// and some i16 instructions are slow.
794*0b57cec5SDimitry Andric     bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
795*0b57cec5SDimitry Andric 
796*0b57cec5SDimitry Andric     MachineBasicBlock *
797*0b57cec5SDimitry Andric     EmitInstrWithCustomInserter(MachineInstr &MI,
798*0b57cec5SDimitry Andric                                 MachineBasicBlock *MBB) const override;
799*0b57cec5SDimitry Andric 
800*0b57cec5SDimitry Andric     /// This method returns the name of a target specific DAG node.
801*0b57cec5SDimitry Andric     const char *getTargetNodeName(unsigned Opcode) const override;
802*0b57cec5SDimitry Andric 
803*0b57cec5SDimitry Andric     /// Do not merge vector stores after legalization because that may conflict
804*0b57cec5SDimitry Andric     /// with x86-specific store splitting optimizations.
805*0b57cec5SDimitry Andric     bool mergeStoresAfterLegalization(EVT MemVT) const override {
806*0b57cec5SDimitry Andric       return !MemVT.isVector();
807*0b57cec5SDimitry Andric     }
808*0b57cec5SDimitry Andric 
809*0b57cec5SDimitry Andric     bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
810*0b57cec5SDimitry Andric                           const SelectionDAG &DAG) const override;
811*0b57cec5SDimitry Andric 
812*0b57cec5SDimitry Andric     bool isCheapToSpeculateCttz() const override;
813*0b57cec5SDimitry Andric 
814*0b57cec5SDimitry Andric     bool isCheapToSpeculateCtlz() const override;
815*0b57cec5SDimitry Andric 
816*0b57cec5SDimitry Andric     bool isCtlzFast() const override;
817*0b57cec5SDimitry Andric 
818*0b57cec5SDimitry Andric     bool hasBitPreservingFPLogic(EVT VT) const override {
819*0b57cec5SDimitry Andric       return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
820*0b57cec5SDimitry Andric     }
821*0b57cec5SDimitry Andric 
822*0b57cec5SDimitry Andric     bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
823*0b57cec5SDimitry Andric       // If the pair to store is a mixture of float and int values, we will
824*0b57cec5SDimitry Andric       // save two bitwise instructions and one float-to-int instruction and
825*0b57cec5SDimitry Andric       // increase one store instruction. There is potentially a more
826*0b57cec5SDimitry Andric       // significant benefit because it avoids the float->int domain switch
827*0b57cec5SDimitry Andric       // for input value. So It is more likely a win.
828*0b57cec5SDimitry Andric       if ((LTy.isFloatingPoint() && HTy.isInteger()) ||
829*0b57cec5SDimitry Andric           (LTy.isInteger() && HTy.isFloatingPoint()))
830*0b57cec5SDimitry Andric         return true;
831*0b57cec5SDimitry Andric       // If the pair only contains int values, we will save two bitwise
832*0b57cec5SDimitry Andric       // instructions and increase one store instruction (costing one more
833*0b57cec5SDimitry Andric       // store buffer). Since the benefit is more blurred so we leave
834*0b57cec5SDimitry Andric       // such pair out until we get testcase to prove it is a win.
835*0b57cec5SDimitry Andric       return false;
836*0b57cec5SDimitry Andric     }
837*0b57cec5SDimitry Andric 
838*0b57cec5SDimitry Andric     bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
839*0b57cec5SDimitry Andric 
840*0b57cec5SDimitry Andric     bool hasAndNotCompare(SDValue Y) const override;
841*0b57cec5SDimitry Andric 
842*0b57cec5SDimitry Andric     bool hasAndNot(SDValue Y) const override;
843*0b57cec5SDimitry Andric 
844*0b57cec5SDimitry Andric     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
845*0b57cec5SDimitry Andric                                            CombineLevel Level) const override;
846*0b57cec5SDimitry Andric 
847*0b57cec5SDimitry Andric     bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override;
848*0b57cec5SDimitry Andric 
849*0b57cec5SDimitry Andric     bool
850*0b57cec5SDimitry Andric     shouldTransformSignedTruncationCheck(EVT XVT,
851*0b57cec5SDimitry Andric                                          unsigned KeptBits) const override {
852*0b57cec5SDimitry Andric       // For vectors, we don't have a preference..
853*0b57cec5SDimitry Andric       if (XVT.isVector())
854*0b57cec5SDimitry Andric         return false;
855*0b57cec5SDimitry Andric 
856*0b57cec5SDimitry Andric       auto VTIsOk = [](EVT VT) -> bool {
857*0b57cec5SDimitry Andric         return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
858*0b57cec5SDimitry Andric                VT == MVT::i64;
859*0b57cec5SDimitry Andric       };
860*0b57cec5SDimitry Andric 
861*0b57cec5SDimitry Andric       // We are ok with KeptBitsVT being byte/word/dword, what MOVS supports.
862*0b57cec5SDimitry Andric       // XVT will be larger than KeptBitsVT.
863*0b57cec5SDimitry Andric       MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
864*0b57cec5SDimitry Andric       return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
865*0b57cec5SDimitry Andric     }
866*0b57cec5SDimitry Andric 
867*0b57cec5SDimitry Andric     bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
868*0b57cec5SDimitry Andric 
869*0b57cec5SDimitry Andric     bool shouldSplatInsEltVarIndex(EVT VT) const override;
870*0b57cec5SDimitry Andric 
871*0b57cec5SDimitry Andric     bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
872*0b57cec5SDimitry Andric       return VT.isScalarInteger();
873*0b57cec5SDimitry Andric     }
874*0b57cec5SDimitry Andric 
875*0b57cec5SDimitry Andric     /// Vector-sized comparisons are fast using PCMPEQ + PMOVMSK or PTEST.
876*0b57cec5SDimitry Andric     MVT hasFastEqualityCompare(unsigned NumBits) const override;
877*0b57cec5SDimitry Andric 
878*0b57cec5SDimitry Andric     /// Return the value type to use for ISD::SETCC.
879*0b57cec5SDimitry Andric     EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
880*0b57cec5SDimitry Andric                            EVT VT) const override;
881*0b57cec5SDimitry Andric 
882*0b57cec5SDimitry Andric     bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
883*0b57cec5SDimitry Andric                                       TargetLoweringOpt &TLO) const override;
884*0b57cec5SDimitry Andric 
885*0b57cec5SDimitry Andric     /// Determine which of the bits specified in Mask are known to be either
886*0b57cec5SDimitry Andric     /// zero or one and return them in the KnownZero/KnownOne bitsets.
887*0b57cec5SDimitry Andric     void computeKnownBitsForTargetNode(const SDValue Op,
888*0b57cec5SDimitry Andric                                        KnownBits &Known,
889*0b57cec5SDimitry Andric                                        const APInt &DemandedElts,
890*0b57cec5SDimitry Andric                                        const SelectionDAG &DAG,
891*0b57cec5SDimitry Andric                                        unsigned Depth = 0) const override;
892*0b57cec5SDimitry Andric 
893*0b57cec5SDimitry Andric     /// Determine the number of bits in the operation that are sign bits.
894*0b57cec5SDimitry Andric     unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
895*0b57cec5SDimitry Andric                                              const APInt &DemandedElts,
896*0b57cec5SDimitry Andric                                              const SelectionDAG &DAG,
897*0b57cec5SDimitry Andric                                              unsigned Depth) const override;
898*0b57cec5SDimitry Andric 
899*0b57cec5SDimitry Andric     bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op,
900*0b57cec5SDimitry Andric                                                  const APInt &DemandedElts,
901*0b57cec5SDimitry Andric                                                  APInt &KnownUndef,
902*0b57cec5SDimitry Andric                                                  APInt &KnownZero,
903*0b57cec5SDimitry Andric                                                  TargetLoweringOpt &TLO,
904*0b57cec5SDimitry Andric                                                  unsigned Depth) const override;
905*0b57cec5SDimitry Andric 
906*0b57cec5SDimitry Andric     bool SimplifyDemandedBitsForTargetNode(SDValue Op,
907*0b57cec5SDimitry Andric                                            const APInt &DemandedBits,
908*0b57cec5SDimitry Andric                                            const APInt &DemandedElts,
909*0b57cec5SDimitry Andric                                            KnownBits &Known,
910*0b57cec5SDimitry Andric                                            TargetLoweringOpt &TLO,
911*0b57cec5SDimitry Andric                                            unsigned Depth) const override;
912*0b57cec5SDimitry Andric 
913*0b57cec5SDimitry Andric     const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
914*0b57cec5SDimitry Andric 
915*0b57cec5SDimitry Andric     SDValue unwrapAddress(SDValue N) const override;
916*0b57cec5SDimitry Andric 
917*0b57cec5SDimitry Andric     SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
918*0b57cec5SDimitry Andric 
919*0b57cec5SDimitry Andric     bool ExpandInlineAsm(CallInst *CI) const override;
920*0b57cec5SDimitry Andric 
921*0b57cec5SDimitry Andric     ConstraintType getConstraintType(StringRef Constraint) const override;
922*0b57cec5SDimitry Andric 
923*0b57cec5SDimitry Andric     /// Examine constraint string and operand type and determine a weight value.
924*0b57cec5SDimitry Andric     /// The operand object must already have been set up with the operand type.
925*0b57cec5SDimitry Andric     ConstraintWeight
926*0b57cec5SDimitry Andric       getSingleConstraintMatchWeight(AsmOperandInfo &info,
927*0b57cec5SDimitry Andric                                      const char *constraint) const override;
928*0b57cec5SDimitry Andric 
929*0b57cec5SDimitry Andric     const char *LowerXConstraint(EVT ConstraintVT) const override;
930*0b57cec5SDimitry Andric 
931*0b57cec5SDimitry Andric     /// Lower the specified operand into the Ops vector. If it is invalid, don't
932*0b57cec5SDimitry Andric     /// add anything to Ops. If hasMemory is true it means one of the asm
933*0b57cec5SDimitry Andric     /// constraint of the inline asm instruction being processed is 'm'.
934*0b57cec5SDimitry Andric     void LowerAsmOperandForConstraint(SDValue Op,
935*0b57cec5SDimitry Andric                                       std::string &Constraint,
936*0b57cec5SDimitry Andric                                       std::vector<SDValue> &Ops,
937*0b57cec5SDimitry Andric                                       SelectionDAG &DAG) const override;
938*0b57cec5SDimitry Andric 
939*0b57cec5SDimitry Andric     unsigned
940*0b57cec5SDimitry Andric     getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
941*0b57cec5SDimitry Andric       if (ConstraintCode == "i")
942*0b57cec5SDimitry Andric         return InlineAsm::Constraint_i;
943*0b57cec5SDimitry Andric       else if (ConstraintCode == "o")
944*0b57cec5SDimitry Andric         return InlineAsm::Constraint_o;
945*0b57cec5SDimitry Andric       else if (ConstraintCode == "v")
946*0b57cec5SDimitry Andric         return InlineAsm::Constraint_v;
947*0b57cec5SDimitry Andric       else if (ConstraintCode == "X")
948*0b57cec5SDimitry Andric         return InlineAsm::Constraint_X;
949*0b57cec5SDimitry Andric       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
950*0b57cec5SDimitry Andric     }
951*0b57cec5SDimitry Andric 
952*0b57cec5SDimitry Andric     /// Handle Lowering flag assembly outputs.
953*0b57cec5SDimitry Andric     SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag, SDLoc DL,
954*0b57cec5SDimitry Andric                                         const AsmOperandInfo &Constraint,
955*0b57cec5SDimitry Andric                                         SelectionDAG &DAG) const override;
956*0b57cec5SDimitry Andric 
957*0b57cec5SDimitry Andric     /// Given a physical register constraint
958*0b57cec5SDimitry Andric     /// (e.g. {edx}), return the register number and the register class for the
959*0b57cec5SDimitry Andric     /// register.  This should only be used for C_Register constraints.  On
960*0b57cec5SDimitry Andric     /// error, this returns a register number of 0.
961*0b57cec5SDimitry Andric     std::pair<unsigned, const TargetRegisterClass *>
962*0b57cec5SDimitry Andric     getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
963*0b57cec5SDimitry Andric                                  StringRef Constraint, MVT VT) const override;
964*0b57cec5SDimitry Andric 
965*0b57cec5SDimitry Andric     /// Return true if the addressing mode represented
966*0b57cec5SDimitry Andric     /// by AM is legal for this target, for a load/store of the specified type.
967*0b57cec5SDimitry Andric     bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
968*0b57cec5SDimitry Andric                                Type *Ty, unsigned AS,
969*0b57cec5SDimitry Andric                                Instruction *I = nullptr) const override;
970*0b57cec5SDimitry Andric 
971*0b57cec5SDimitry Andric     /// Return true if the specified immediate is legal
972*0b57cec5SDimitry Andric     /// icmp immediate, that is the target has icmp instructions which can
973*0b57cec5SDimitry Andric     /// compare a register against the immediate without having to materialize
974*0b57cec5SDimitry Andric     /// the immediate into a register.
975*0b57cec5SDimitry Andric     bool isLegalICmpImmediate(int64_t Imm) const override;
976*0b57cec5SDimitry Andric 
977*0b57cec5SDimitry Andric     /// Return true if the specified immediate is legal
978*0b57cec5SDimitry Andric     /// add immediate, that is the target has add instructions which can
979*0b57cec5SDimitry Andric     /// add a register and the immediate without having to materialize
980*0b57cec5SDimitry Andric     /// the immediate into a register.
981*0b57cec5SDimitry Andric     bool isLegalAddImmediate(int64_t Imm) const override;
982*0b57cec5SDimitry Andric 
983*0b57cec5SDimitry Andric     bool isLegalStoreImmediate(int64_t Imm) const override;
984*0b57cec5SDimitry Andric 
985*0b57cec5SDimitry Andric     /// Return the cost of the scaling factor used in the addressing
986*0b57cec5SDimitry Andric     /// mode represented by AM for this target, for a load/store
987*0b57cec5SDimitry Andric     /// of the specified type.
988*0b57cec5SDimitry Andric     /// If the AM is supported, the return value must be >= 0.
989*0b57cec5SDimitry Andric     /// If the AM is not supported, it returns a negative value.
990*0b57cec5SDimitry Andric     int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
991*0b57cec5SDimitry Andric                              unsigned AS) const override;
992*0b57cec5SDimitry Andric 
993*0b57cec5SDimitry Andric     bool isVectorShiftByScalarCheap(Type *Ty) const override;
994*0b57cec5SDimitry Andric 
995*0b57cec5SDimitry Andric     /// Add x86-specific opcodes to the default list.
996*0b57cec5SDimitry Andric     bool isBinOp(unsigned Opcode) const override;
997*0b57cec5SDimitry Andric 
998*0b57cec5SDimitry Andric     /// Returns true if the opcode is a commutative binary operation.
999*0b57cec5SDimitry Andric     bool isCommutativeBinOp(unsigned Opcode) const override;
1000*0b57cec5SDimitry Andric 
1001*0b57cec5SDimitry Andric     /// Return true if it's free to truncate a value of
1002*0b57cec5SDimitry Andric     /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
1003*0b57cec5SDimitry Andric     /// register EAX to i16 by referencing its sub-register AX.
1004*0b57cec5SDimitry Andric     bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
1005*0b57cec5SDimitry Andric     bool isTruncateFree(EVT VT1, EVT VT2) const override;
1006*0b57cec5SDimitry Andric 
1007*0b57cec5SDimitry Andric     bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
1008*0b57cec5SDimitry Andric 
1009*0b57cec5SDimitry Andric     /// Return true if any actual instruction that defines a
1010*0b57cec5SDimitry Andric     /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
1011*0b57cec5SDimitry Andric     /// register. This does not necessarily include registers defined in
1012*0b57cec5SDimitry Andric     /// unknown ways, such as incoming arguments, or copies from unknown
1013*0b57cec5SDimitry Andric     /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
1014*0b57cec5SDimitry Andric     /// does not necessarily apply to truncate instructions. e.g. on x86-64,
1015*0b57cec5SDimitry Andric     /// all instructions that define 32-bit values implicit zero-extend the
1016*0b57cec5SDimitry Andric     /// result out to 64 bits.
1017*0b57cec5SDimitry Andric     bool isZExtFree(Type *Ty1, Type *Ty2) const override;
1018*0b57cec5SDimitry Andric     bool isZExtFree(EVT VT1, EVT VT2) const override;
1019*0b57cec5SDimitry Andric     bool isZExtFree(SDValue Val, EVT VT2) const override;
1020*0b57cec5SDimitry Andric 
1021*0b57cec5SDimitry Andric     /// Return true if folding a vector load into ExtVal (a sign, zero, or any
1022*0b57cec5SDimitry Andric     /// extend node) is profitable.
1023*0b57cec5SDimitry Andric     bool isVectorLoadExtDesirable(SDValue) const override;
1024*0b57cec5SDimitry Andric 
1025*0b57cec5SDimitry Andric     /// Return true if an FMA operation is faster than a pair of fmul and fadd
1026*0b57cec5SDimitry Andric     /// instructions. fmuladd intrinsics will be expanded to FMAs when this
1027*0b57cec5SDimitry Andric     /// method returns true, otherwise fmuladd is expanded to fmul + fadd.
1028*0b57cec5SDimitry Andric     bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
1029*0b57cec5SDimitry Andric 
1030*0b57cec5SDimitry Andric     /// Return true if it's profitable to narrow
1031*0b57cec5SDimitry Andric     /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
1032*0b57cec5SDimitry Andric     /// from i32 to i8 but not from i32 to i16.
1033*0b57cec5SDimitry Andric     bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
1034*0b57cec5SDimitry Andric 
1035*0b57cec5SDimitry Andric     /// Given an intrinsic, checks if on the target the intrinsic will need to map
1036*0b57cec5SDimitry Andric     /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1037*0b57cec5SDimitry Andric     /// true and stores the intrinsic information into the IntrinsicInfo that was
1038*0b57cec5SDimitry Andric     /// passed to the function.
1039*0b57cec5SDimitry Andric     bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
1040*0b57cec5SDimitry Andric                             MachineFunction &MF,
1041*0b57cec5SDimitry Andric                             unsigned Intrinsic) const override;
1042*0b57cec5SDimitry Andric 
1043*0b57cec5SDimitry Andric     /// Returns true if the target can instruction select the
1044*0b57cec5SDimitry Andric     /// specified FP immediate natively. If false, the legalizer will
1045*0b57cec5SDimitry Andric     /// materialize the FP immediate as a load from a constant pool.
1046*0b57cec5SDimitry Andric     bool isFPImmLegal(const APFloat &Imm, EVT VT,
1047*0b57cec5SDimitry Andric                       bool ForCodeSize) const override;
1048*0b57cec5SDimitry Andric 
1049*0b57cec5SDimitry Andric     /// Targets can use this to indicate that they only support *some*
1050*0b57cec5SDimitry Andric     /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1051*0b57cec5SDimitry Andric     /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to
1052*0b57cec5SDimitry Andric     /// be legal.
1053*0b57cec5SDimitry Andric     bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
1054*0b57cec5SDimitry Andric 
1055*0b57cec5SDimitry Andric     /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1056*0b57cec5SDimitry Andric     /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1057*0b57cec5SDimitry Andric     /// constant pool entry.
1058*0b57cec5SDimitry Andric     bool isVectorClearMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
1059*0b57cec5SDimitry Andric 
1060*0b57cec5SDimitry Andric     /// Returns true if lowering to a jump table is allowed.
1061*0b57cec5SDimitry Andric     bool areJTsAllowed(const Function *Fn) const override;
1062*0b57cec5SDimitry Andric 
1063*0b57cec5SDimitry Andric     /// If true, then instruction selection should
1064*0b57cec5SDimitry Andric     /// seek to shrink the FP constant of the specified type to a smaller type
1065*0b57cec5SDimitry Andric     /// in order to save space and / or reduce runtime.
1066*0b57cec5SDimitry Andric     bool ShouldShrinkFPConstant(EVT VT) const override {
1067*0b57cec5SDimitry Andric       // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
1068*0b57cec5SDimitry Andric       // expensive than a straight movsd. On the other hand, it's important to
1069*0b57cec5SDimitry Andric       // shrink long double fp constant since fldt is very slow.
1070*0b57cec5SDimitry Andric       return !X86ScalarSSEf64 || VT == MVT::f80;
1071*0b57cec5SDimitry Andric     }
1072*0b57cec5SDimitry Andric 
1073*0b57cec5SDimitry Andric     /// Return true if we believe it is correct and profitable to reduce the
1074*0b57cec5SDimitry Andric     /// load node to a smaller type.
1075*0b57cec5SDimitry Andric     bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1076*0b57cec5SDimitry Andric                                EVT NewVT) const override;
1077*0b57cec5SDimitry Andric 
1078*0b57cec5SDimitry Andric     /// Return true if the specified scalar FP type is computed in an SSE
1079*0b57cec5SDimitry Andric     /// register, not on the X87 floating point stack.
1080*0b57cec5SDimitry Andric     bool isScalarFPTypeInSSEReg(EVT VT) const {
1081*0b57cec5SDimitry Andric       return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
1082*0b57cec5SDimitry Andric              (VT == MVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
1083*0b57cec5SDimitry Andric     }
1084*0b57cec5SDimitry Andric 
1085*0b57cec5SDimitry Andric     /// Returns true if it is beneficial to convert a load of a constant
1086*0b57cec5SDimitry Andric     /// to just the constant itself.
1087*0b57cec5SDimitry Andric     bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1088*0b57cec5SDimitry Andric                                            Type *Ty) const override;
1089*0b57cec5SDimitry Andric 
1090*0b57cec5SDimitry Andric     bool reduceSelectOfFPConstantLoads(bool IsFPSetCC) const override;
1091*0b57cec5SDimitry Andric 
1092*0b57cec5SDimitry Andric     bool convertSelectOfConstantsToMath(EVT VT) const override;
1093*0b57cec5SDimitry Andric 
1094*0b57cec5SDimitry Andric     bool decomposeMulByConstant(EVT VT, SDValue C) const override;
1095*0b57cec5SDimitry Andric 
1096*0b57cec5SDimitry Andric     bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
1097*0b57cec5SDimitry Andric                                   bool IsSigned) const override;
1098*0b57cec5SDimitry Andric 
1099*0b57cec5SDimitry Andric     /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1100*0b57cec5SDimitry Andric     /// with this index.
1101*0b57cec5SDimitry Andric     bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
1102*0b57cec5SDimitry Andric                                  unsigned Index) const override;
1103*0b57cec5SDimitry Andric 
1104*0b57cec5SDimitry Andric     /// Scalar ops always have equal or better analysis/performance/power than
1105*0b57cec5SDimitry Andric     /// the vector equivalent, so this always makes sense if the scalar op is
1106*0b57cec5SDimitry Andric     /// supported.
1107*0b57cec5SDimitry Andric     bool shouldScalarizeBinop(SDValue) const override;
1108*0b57cec5SDimitry Andric 
1109*0b57cec5SDimitry Andric     /// Extract of a scalar FP value from index 0 of a vector is free.
1110*0b57cec5SDimitry Andric     bool isExtractVecEltCheap(EVT VT, unsigned Index) const override {
1111*0b57cec5SDimitry Andric       EVT EltVT = VT.getScalarType();
1112*0b57cec5SDimitry Andric       return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0;
1113*0b57cec5SDimitry Andric     }
1114*0b57cec5SDimitry Andric 
1115*0b57cec5SDimitry Andric     /// Overflow nodes should get combined/lowered to optimal instructions
1116*0b57cec5SDimitry Andric     /// (they should allow eliminating explicit compares by getting flags from
1117*0b57cec5SDimitry Andric     /// math ops).
1118*0b57cec5SDimitry Andric     bool shouldFormOverflowOp(unsigned Opcode, EVT VT) const override;
1119*0b57cec5SDimitry Andric 
1120*0b57cec5SDimitry Andric     bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem,
1121*0b57cec5SDimitry Andric                                       unsigned AddrSpace) const override {
1122*0b57cec5SDimitry Andric       // If we can replace more than 2 scalar stores, there will be a reduction
1123*0b57cec5SDimitry Andric       // in instructions even after we add a vector constant load.
1124*0b57cec5SDimitry Andric       return NumElem > 2;
1125*0b57cec5SDimitry Andric     }
1126*0b57cec5SDimitry Andric 
1127*0b57cec5SDimitry Andric     bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
1128*0b57cec5SDimitry Andric                                  const SelectionDAG &DAG,
1129*0b57cec5SDimitry Andric                                  const MachineMemOperand &MMO) const override;
1130*0b57cec5SDimitry Andric 
1131*0b57cec5SDimitry Andric     /// Intel processors have a unified instruction and data cache
1132*0b57cec5SDimitry Andric     const char * getClearCacheBuiltinName() const override {
1133*0b57cec5SDimitry Andric       return nullptr; // nothing to do, move along.
1134*0b57cec5SDimitry Andric     }
1135*0b57cec5SDimitry Andric 
1136*0b57cec5SDimitry Andric     unsigned getRegisterByName(const char* RegName, EVT VT,
1137*0b57cec5SDimitry Andric                                SelectionDAG &DAG) const override;
1138*0b57cec5SDimitry Andric 
1139*0b57cec5SDimitry Andric     /// If a physical register, this returns the register that receives the
1140*0b57cec5SDimitry Andric     /// exception address on entry to an EH pad.
1141*0b57cec5SDimitry Andric     unsigned
1142*0b57cec5SDimitry Andric     getExceptionPointerRegister(const Constant *PersonalityFn) const override;
1143*0b57cec5SDimitry Andric 
1144*0b57cec5SDimitry Andric     /// If a physical register, this returns the register that receives the
1145*0b57cec5SDimitry Andric     /// exception typeid on entry to a landing pad.
1146*0b57cec5SDimitry Andric     unsigned
1147*0b57cec5SDimitry Andric     getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
1148*0b57cec5SDimitry Andric 
1149*0b57cec5SDimitry Andric     virtual bool needsFixedCatchObjects() const override;
1150*0b57cec5SDimitry Andric 
1151*0b57cec5SDimitry Andric     /// This method returns a target specific FastISel object,
1152*0b57cec5SDimitry Andric     /// or null if the target does not support "fast" ISel.
1153*0b57cec5SDimitry Andric     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1154*0b57cec5SDimitry Andric                              const TargetLibraryInfo *libInfo) const override;
1155*0b57cec5SDimitry Andric 
1156*0b57cec5SDimitry Andric     /// If the target has a standard location for the stack protector cookie,
1157*0b57cec5SDimitry Andric     /// returns the address of that location. Otherwise, returns nullptr.
1158*0b57cec5SDimitry Andric     Value *getIRStackGuard(IRBuilder<> &IRB) const override;
1159*0b57cec5SDimitry Andric 
1160*0b57cec5SDimitry Andric     bool useLoadStackGuardNode() const override;
1161*0b57cec5SDimitry Andric     bool useStackGuardXorFP() const override;
1162*0b57cec5SDimitry Andric     void insertSSPDeclarations(Module &M) const override;
1163*0b57cec5SDimitry Andric     Value *getSDagStackGuard(const Module &M) const override;
1164*0b57cec5SDimitry Andric     Function *getSSPStackGuardCheck(const Module &M) const override;
1165*0b57cec5SDimitry Andric     SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
1166*0b57cec5SDimitry Andric                                 const SDLoc &DL) const override;
1167*0b57cec5SDimitry Andric 
1168*0b57cec5SDimitry Andric 
1169*0b57cec5SDimitry Andric     /// Return true if the target stores SafeStack pointer at a fixed offset in
1170*0b57cec5SDimitry Andric     /// some non-standard address space, and populates the address space and
1171*0b57cec5SDimitry Andric     /// offset as appropriate.
1172*0b57cec5SDimitry Andric     Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
1173*0b57cec5SDimitry Andric 
1174*0b57cec5SDimitry Andric     SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
1175*0b57cec5SDimitry Andric                       SelectionDAG &DAG) const;
1176*0b57cec5SDimitry Andric 
1177*0b57cec5SDimitry Andric     bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
1178*0b57cec5SDimitry Andric 
1179*0b57cec5SDimitry Andric     /// Customize the preferred legalization strategy for certain types.
1180*0b57cec5SDimitry Andric     LegalizeTypeAction getPreferredVectorAction(MVT VT) const override;
1181*0b57cec5SDimitry Andric 
1182*0b57cec5SDimitry Andric     MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
1183*0b57cec5SDimitry Andric                                       EVT VT) const override;
1184*0b57cec5SDimitry Andric 
1185*0b57cec5SDimitry Andric     unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1186*0b57cec5SDimitry Andric                                            CallingConv::ID CC,
1187*0b57cec5SDimitry Andric                                            EVT VT) const override;
1188*0b57cec5SDimitry Andric 
1189*0b57cec5SDimitry Andric     bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
1190*0b57cec5SDimitry Andric 
1191*0b57cec5SDimitry Andric     bool supportSwiftError() const override;
1192*0b57cec5SDimitry Andric 
1193*0b57cec5SDimitry Andric     StringRef getStackProbeSymbolName(MachineFunction &MF) const override;
1194*0b57cec5SDimitry Andric 
1195*0b57cec5SDimitry Andric     bool hasVectorBlend() const override { return true; }
1196*0b57cec5SDimitry Andric 
1197*0b57cec5SDimitry Andric     unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
1198*0b57cec5SDimitry Andric 
1199*0b57cec5SDimitry Andric     /// Lower interleaved load(s) into target specific
1200*0b57cec5SDimitry Andric     /// instructions/intrinsics.
1201*0b57cec5SDimitry Andric     bool lowerInterleavedLoad(LoadInst *LI,
1202*0b57cec5SDimitry Andric                               ArrayRef<ShuffleVectorInst *> Shuffles,
1203*0b57cec5SDimitry Andric                               ArrayRef<unsigned> Indices,
1204*0b57cec5SDimitry Andric                               unsigned Factor) const override;
1205*0b57cec5SDimitry Andric 
1206*0b57cec5SDimitry Andric     /// Lower interleaved store(s) into target specific
1207*0b57cec5SDimitry Andric     /// instructions/intrinsics.
1208*0b57cec5SDimitry Andric     bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
1209*0b57cec5SDimitry Andric                                unsigned Factor) const override;
1210*0b57cec5SDimitry Andric 
1211*0b57cec5SDimitry Andric     SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value,
1212*0b57cec5SDimitry Andric                                    SDValue Addr, SelectionDAG &DAG)
1213*0b57cec5SDimitry Andric                                    const override;
1214*0b57cec5SDimitry Andric 
1215*0b57cec5SDimitry Andric   protected:
1216*0b57cec5SDimitry Andric     std::pair<const TargetRegisterClass *, uint8_t>
1217*0b57cec5SDimitry Andric     findRepresentativeClass(const TargetRegisterInfo *TRI,
1218*0b57cec5SDimitry Andric                             MVT VT) const override;
1219*0b57cec5SDimitry Andric 
1220*0b57cec5SDimitry Andric   private:
1221*0b57cec5SDimitry Andric     /// Keep a reference to the X86Subtarget around so that we can
1222*0b57cec5SDimitry Andric     /// make the right decision when generating code for different targets.
1223*0b57cec5SDimitry Andric     const X86Subtarget &Subtarget;
1224*0b57cec5SDimitry Andric 
1225*0b57cec5SDimitry Andric     /// Select between SSE or x87 floating point ops.
1226*0b57cec5SDimitry Andric     /// When SSE is available, use it for f32 operations.
1227*0b57cec5SDimitry Andric     /// When SSE2 is available, use it for f64 operations.
1228*0b57cec5SDimitry Andric     bool X86ScalarSSEf32;
1229*0b57cec5SDimitry Andric     bool X86ScalarSSEf64;
1230*0b57cec5SDimitry Andric 
1231*0b57cec5SDimitry Andric     /// A list of legal FP immediates.
1232*0b57cec5SDimitry Andric     std::vector<APFloat> LegalFPImmediates;
1233*0b57cec5SDimitry Andric 
1234*0b57cec5SDimitry Andric     /// Indicate that this x86 target can instruction
1235*0b57cec5SDimitry Andric     /// select the specified FP immediate natively.
1236*0b57cec5SDimitry Andric     void addLegalFPImmediate(const APFloat& Imm) {
1237*0b57cec5SDimitry Andric       LegalFPImmediates.push_back(Imm);
1238*0b57cec5SDimitry Andric     }
1239*0b57cec5SDimitry Andric 
1240*0b57cec5SDimitry Andric     SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1241*0b57cec5SDimitry Andric                             CallingConv::ID CallConv, bool isVarArg,
1242*0b57cec5SDimitry Andric                             const SmallVectorImpl<ISD::InputArg> &Ins,
1243*0b57cec5SDimitry Andric                             const SDLoc &dl, SelectionDAG &DAG,
1244*0b57cec5SDimitry Andric                             SmallVectorImpl<SDValue> &InVals,
1245*0b57cec5SDimitry Andric                             uint32_t *RegMask) const;
1246*0b57cec5SDimitry Andric     SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
1247*0b57cec5SDimitry Andric                              const SmallVectorImpl<ISD::InputArg> &ArgInfo,
1248*0b57cec5SDimitry Andric                              const SDLoc &dl, SelectionDAG &DAG,
1249*0b57cec5SDimitry Andric                              const CCValAssign &VA, MachineFrameInfo &MFI,
1250*0b57cec5SDimitry Andric                              unsigned i) const;
1251*0b57cec5SDimitry Andric     SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
1252*0b57cec5SDimitry Andric                              const SDLoc &dl, SelectionDAG &DAG,
1253*0b57cec5SDimitry Andric                              const CCValAssign &VA,
1254*0b57cec5SDimitry Andric                              ISD::ArgFlagsTy Flags) const;
1255*0b57cec5SDimitry Andric 
1256*0b57cec5SDimitry Andric     // Call lowering helpers.
1257*0b57cec5SDimitry Andric 
1258*0b57cec5SDimitry Andric     /// Check whether the call is eligible for tail call optimization. Targets
1259*0b57cec5SDimitry Andric     /// that want to do tail call optimization should implement this function.
1260*0b57cec5SDimitry Andric     bool IsEligibleForTailCallOptimization(SDValue Callee,
1261*0b57cec5SDimitry Andric                                            CallingConv::ID CalleeCC,
1262*0b57cec5SDimitry Andric                                            bool isVarArg,
1263*0b57cec5SDimitry Andric                                            bool isCalleeStructRet,
1264*0b57cec5SDimitry Andric                                            bool isCallerStructRet,
1265*0b57cec5SDimitry Andric                                            Type *RetTy,
1266*0b57cec5SDimitry Andric                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
1267*0b57cec5SDimitry Andric                                     const SmallVectorImpl<SDValue> &OutVals,
1268*0b57cec5SDimitry Andric                                     const SmallVectorImpl<ISD::InputArg> &Ins,
1269*0b57cec5SDimitry Andric                                            SelectionDAG& DAG) const;
1270*0b57cec5SDimitry Andric     SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
1271*0b57cec5SDimitry Andric                                     SDValue Chain, bool IsTailCall,
1272*0b57cec5SDimitry Andric                                     bool Is64Bit, int FPDiff,
1273*0b57cec5SDimitry Andric                                     const SDLoc &dl) const;
1274*0b57cec5SDimitry Andric 
1275*0b57cec5SDimitry Andric     unsigned GetAlignedArgumentStackSize(unsigned StackSize,
1276*0b57cec5SDimitry Andric                                          SelectionDAG &DAG) const;
1277*0b57cec5SDimitry Andric 
1278*0b57cec5SDimitry Andric     unsigned getAddressSpace(void) const;
1279*0b57cec5SDimitry Andric 
1280*0b57cec5SDimitry Andric     SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool isSigned) const;
1281*0b57cec5SDimitry Andric 
1282*0b57cec5SDimitry Andric     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1283*0b57cec5SDimitry Andric     SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
1284*0b57cec5SDimitry Andric     SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1285*0b57cec5SDimitry Andric     SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1286*0b57cec5SDimitry Andric 
1287*0b57cec5SDimitry Andric     unsigned getGlobalWrapperKind(const GlobalValue *GV = nullptr,
1288*0b57cec5SDimitry Andric                                   const unsigned char OpFlags = 0) const;
1289*0b57cec5SDimitry Andric     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
1290*0b57cec5SDimitry Andric     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
1291*0b57cec5SDimitry Andric     SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1292*0b57cec5SDimitry Andric     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1293*0b57cec5SDimitry Andric     SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
1294*0b57cec5SDimitry Andric 
1295*0b57cec5SDimitry Andric     /// Creates target global address or external symbol nodes for calls or
1296*0b57cec5SDimitry Andric     /// other uses.
1297*0b57cec5SDimitry Andric     SDValue LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
1298*0b57cec5SDimitry Andric                                   bool ForCall) const;
1299*0b57cec5SDimitry Andric 
1300*0b57cec5SDimitry Andric     SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1301*0b57cec5SDimitry Andric     SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1302*0b57cec5SDimitry Andric     SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1303*0b57cec5SDimitry Andric     SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1304*0b57cec5SDimitry Andric     SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1305*0b57cec5SDimitry Andric     SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
1306*0b57cec5SDimitry Andric     SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
1307*0b57cec5SDimitry Andric     SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
1308*0b57cec5SDimitry Andric     SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1309*0b57cec5SDimitry Andric     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1310*0b57cec5SDimitry Andric     SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1311*0b57cec5SDimitry Andric     SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1312*0b57cec5SDimitry Andric     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1313*0b57cec5SDimitry Andric     SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1314*0b57cec5SDimitry Andric     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
1315*0b57cec5SDimitry Andric     SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
1316*0b57cec5SDimitry Andric     SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
1317*0b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1318*0b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1319*0b57cec5SDimitry Andric     SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
1320*0b57cec5SDimitry Andric     SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1321*0b57cec5SDimitry Andric     SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1322*0b57cec5SDimitry Andric     SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
1323*0b57cec5SDimitry Andric     SDValue LowerGC_TRANSITION_START(SDValue Op, SelectionDAG &DAG) const;
1324*0b57cec5SDimitry Andric     SDValue LowerGC_TRANSITION_END(SDValue Op, SelectionDAG &DAG) const;
1325*0b57cec5SDimitry Andric     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1326*0b57cec5SDimitry Andric 
1327*0b57cec5SDimitry Andric     SDValue
1328*0b57cec5SDimitry Andric     LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1329*0b57cec5SDimitry Andric                          const SmallVectorImpl<ISD::InputArg> &Ins,
1330*0b57cec5SDimitry Andric                          const SDLoc &dl, SelectionDAG &DAG,
1331*0b57cec5SDimitry Andric                          SmallVectorImpl<SDValue> &InVals) const override;
1332*0b57cec5SDimitry Andric     SDValue LowerCall(CallLoweringInfo &CLI,
1333*0b57cec5SDimitry Andric                       SmallVectorImpl<SDValue> &InVals) const override;
1334*0b57cec5SDimitry Andric 
1335*0b57cec5SDimitry Andric     SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1336*0b57cec5SDimitry Andric                         const SmallVectorImpl<ISD::OutputArg> &Outs,
1337*0b57cec5SDimitry Andric                         const SmallVectorImpl<SDValue> &OutVals,
1338*0b57cec5SDimitry Andric                         const SDLoc &dl, SelectionDAG &DAG) const override;
1339*0b57cec5SDimitry Andric 
1340*0b57cec5SDimitry Andric     bool supportSplitCSR(MachineFunction *MF) const override {
1341*0b57cec5SDimitry Andric       return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
1342*0b57cec5SDimitry Andric           MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
1343*0b57cec5SDimitry Andric     }
1344*0b57cec5SDimitry Andric     void initializeSplitCSR(MachineBasicBlock *Entry) const override;
1345*0b57cec5SDimitry Andric     void insertCopiesSplitCSR(
1346*0b57cec5SDimitry Andric       MachineBasicBlock *Entry,
1347*0b57cec5SDimitry Andric       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
1348*0b57cec5SDimitry Andric 
1349*0b57cec5SDimitry Andric     bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
1350*0b57cec5SDimitry Andric 
1351*0b57cec5SDimitry Andric     bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1352*0b57cec5SDimitry Andric 
1353*0b57cec5SDimitry Andric     EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
1354*0b57cec5SDimitry Andric                             ISD::NodeType ExtendKind) const override;
1355*0b57cec5SDimitry Andric 
1356*0b57cec5SDimitry Andric     bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1357*0b57cec5SDimitry Andric                         bool isVarArg,
1358*0b57cec5SDimitry Andric                         const SmallVectorImpl<ISD::OutputArg> &Outs,
1359*0b57cec5SDimitry Andric                         LLVMContext &Context) const override;
1360*0b57cec5SDimitry Andric 
1361*0b57cec5SDimitry Andric     const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
1362*0b57cec5SDimitry Andric 
1363*0b57cec5SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
1364*0b57cec5SDimitry Andric     shouldExpandAtomicLoadInIR(LoadInst *SI) const override;
1365*0b57cec5SDimitry Andric     bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
1366*0b57cec5SDimitry Andric     TargetLoweringBase::AtomicExpansionKind
1367*0b57cec5SDimitry Andric     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
1368*0b57cec5SDimitry Andric 
1369*0b57cec5SDimitry Andric     LoadInst *
1370*0b57cec5SDimitry Andric     lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
1371*0b57cec5SDimitry Andric 
1372*0b57cec5SDimitry Andric     bool needsCmpXchgNb(Type *MemType) const;
1373*0b57cec5SDimitry Andric 
1374*0b57cec5SDimitry Andric     void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
1375*0b57cec5SDimitry Andric                                 MachineBasicBlock *DispatchBB, int FI) const;
1376*0b57cec5SDimitry Andric 
1377*0b57cec5SDimitry Andric     // Utility function to emit the low-level va_arg code for X86-64.
1378*0b57cec5SDimitry Andric     MachineBasicBlock *
1379*0b57cec5SDimitry Andric     EmitVAARG64WithCustomInserter(MachineInstr &MI,
1380*0b57cec5SDimitry Andric                                   MachineBasicBlock *MBB) const;
1381*0b57cec5SDimitry Andric 
1382*0b57cec5SDimitry Andric     /// Utility function to emit the xmm reg save portion of va_start.
1383*0b57cec5SDimitry Andric     MachineBasicBlock *
1384*0b57cec5SDimitry Andric     EmitVAStartSaveXMMRegsWithCustomInserter(MachineInstr &BInstr,
1385*0b57cec5SDimitry Andric                                              MachineBasicBlock *BB) const;
1386*0b57cec5SDimitry Andric 
1387*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredCascadedSelect(MachineInstr &MI1,
1388*0b57cec5SDimitry Andric                                                  MachineInstr &MI2,
1389*0b57cec5SDimitry Andric                                                  MachineBasicBlock *BB) const;
1390*0b57cec5SDimitry Andric 
1391*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
1392*0b57cec5SDimitry Andric                                          MachineBasicBlock *BB) const;
1393*0b57cec5SDimitry Andric 
1394*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr &I,
1395*0b57cec5SDimitry Andric                                            MachineBasicBlock *BB) const;
1396*0b57cec5SDimitry Andric 
1397*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
1398*0b57cec5SDimitry Andric                                            MachineBasicBlock *BB) const;
1399*0b57cec5SDimitry Andric 
1400*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
1401*0b57cec5SDimitry Andric                                            MachineBasicBlock *BB) const;
1402*0b57cec5SDimitry Andric 
1403*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
1404*0b57cec5SDimitry Andric                                             MachineBasicBlock *BB) const;
1405*0b57cec5SDimitry Andric 
1406*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
1407*0b57cec5SDimitry Andric                                           MachineBasicBlock *BB) const;
1408*0b57cec5SDimitry Andric 
1409*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
1410*0b57cec5SDimitry Andric                                           MachineBasicBlock *BB) const;
1411*0b57cec5SDimitry Andric 
1412*0b57cec5SDimitry Andric     MachineBasicBlock *EmitLoweredRetpoline(MachineInstr &MI,
1413*0b57cec5SDimitry Andric                                             MachineBasicBlock *BB) const;
1414*0b57cec5SDimitry Andric 
1415*0b57cec5SDimitry Andric     MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
1416*0b57cec5SDimitry Andric                                         MachineBasicBlock *MBB) const;
1417*0b57cec5SDimitry Andric 
1418*0b57cec5SDimitry Andric     void emitSetJmpShadowStackFix(MachineInstr &MI,
1419*0b57cec5SDimitry Andric                                   MachineBasicBlock *MBB) const;
1420*0b57cec5SDimitry Andric 
1421*0b57cec5SDimitry Andric     MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
1422*0b57cec5SDimitry Andric                                          MachineBasicBlock *MBB) const;
1423*0b57cec5SDimitry Andric 
1424*0b57cec5SDimitry Andric     MachineBasicBlock *emitLongJmpShadowStackFix(MachineInstr &MI,
1425*0b57cec5SDimitry Andric                                                  MachineBasicBlock *MBB) const;
1426*0b57cec5SDimitry Andric 
1427*0b57cec5SDimitry Andric     MachineBasicBlock *emitFMA3Instr(MachineInstr &MI,
1428*0b57cec5SDimitry Andric                                      MachineBasicBlock *MBB) const;
1429*0b57cec5SDimitry Andric 
1430*0b57cec5SDimitry Andric     MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
1431*0b57cec5SDimitry Andric                                              MachineBasicBlock *MBB) const;
1432*0b57cec5SDimitry Andric 
1433*0b57cec5SDimitry Andric     /// Emit nodes that will be selected as "cmp Op0,Op1", or something
1434*0b57cec5SDimitry Andric     /// equivalent, for use with the given x86 condition code.
1435*0b57cec5SDimitry Andric     SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, const SDLoc &dl,
1436*0b57cec5SDimitry Andric                     SelectionDAG &DAG) const;
1437*0b57cec5SDimitry Andric 
1438*0b57cec5SDimitry Andric     /// Convert a comparison if required by the subtarget.
1439*0b57cec5SDimitry Andric     SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const;
1440*0b57cec5SDimitry Andric 
1441*0b57cec5SDimitry Andric     /// Emit flags for the given setcc condition and operands. Also returns the
1442*0b57cec5SDimitry Andric     /// corresponding X86 condition code constant in X86CC.
1443*0b57cec5SDimitry Andric     SDValue emitFlagsForSetcc(SDValue Op0, SDValue Op1,
1444*0b57cec5SDimitry Andric                               ISD::CondCode CC, const SDLoc &dl,
1445*0b57cec5SDimitry Andric                               SelectionDAG &DAG,
1446*0b57cec5SDimitry Andric                               SDValue &X86CC) const;
1447*0b57cec5SDimitry Andric 
1448*0b57cec5SDimitry Andric     /// Check if replacement of SQRT with RSQRT should be disabled.
1449*0b57cec5SDimitry Andric     bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override;
1450*0b57cec5SDimitry Andric 
1451*0b57cec5SDimitry Andric     /// Use rsqrt* to speed up sqrt calculations.
1452*0b57cec5SDimitry Andric     SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1453*0b57cec5SDimitry Andric                             int &RefinementSteps, bool &UseOneConstNR,
1454*0b57cec5SDimitry Andric                             bool Reciprocal) const override;
1455*0b57cec5SDimitry Andric 
1456*0b57cec5SDimitry Andric     /// Use rcp* to speed up fdiv calculations.
1457*0b57cec5SDimitry Andric     SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1458*0b57cec5SDimitry Andric                              int &RefinementSteps) const override;
1459*0b57cec5SDimitry Andric 
1460*0b57cec5SDimitry Andric     /// Reassociate floating point divisions into multiply by reciprocal.
1461*0b57cec5SDimitry Andric     unsigned combineRepeatedFPDivisors() const override;
1462*0b57cec5SDimitry Andric   };
1463*0b57cec5SDimitry Andric 
1464*0b57cec5SDimitry Andric   namespace X86 {
1465*0b57cec5SDimitry Andric     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1466*0b57cec5SDimitry Andric                              const TargetLibraryInfo *libInfo);
1467*0b57cec5SDimitry Andric   } // end namespace X86
1468*0b57cec5SDimitry Andric 
1469*0b57cec5SDimitry Andric   // Base class for all X86 non-masked store operations.
1470*0b57cec5SDimitry Andric   class X86StoreSDNode : public MemSDNode {
1471*0b57cec5SDimitry Andric   public:
1472*0b57cec5SDimitry Andric     X86StoreSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1473*0b57cec5SDimitry Andric                    SDVTList VTs, EVT MemVT,
1474*0b57cec5SDimitry Andric                    MachineMemOperand *MMO)
1475*0b57cec5SDimitry Andric       :MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1476*0b57cec5SDimitry Andric     const SDValue &getValue() const { return getOperand(1); }
1477*0b57cec5SDimitry Andric     const SDValue &getBasePtr() const { return getOperand(2); }
1478*0b57cec5SDimitry Andric 
1479*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1480*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::VTRUNCSTORES ||
1481*0b57cec5SDimitry Andric         N->getOpcode() == X86ISD::VTRUNCSTOREUS;
1482*0b57cec5SDimitry Andric     }
1483*0b57cec5SDimitry Andric   };
1484*0b57cec5SDimitry Andric 
1485*0b57cec5SDimitry Andric   // Base class for all X86 masked store operations.
1486*0b57cec5SDimitry Andric   // The class has the same order of operands as MaskedStoreSDNode for
1487*0b57cec5SDimitry Andric   // convenience.
1488*0b57cec5SDimitry Andric   class X86MaskedStoreSDNode : public MemSDNode {
1489*0b57cec5SDimitry Andric   public:
1490*0b57cec5SDimitry Andric     X86MaskedStoreSDNode(unsigned Opcode, unsigned Order,
1491*0b57cec5SDimitry Andric                          const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1492*0b57cec5SDimitry Andric                          MachineMemOperand *MMO)
1493*0b57cec5SDimitry Andric       : MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1494*0b57cec5SDimitry Andric 
1495*0b57cec5SDimitry Andric     const SDValue &getValue()   const { return getOperand(1); }
1496*0b57cec5SDimitry Andric     const SDValue &getBasePtr() const { return getOperand(2); }
1497*0b57cec5SDimitry Andric     const SDValue &getMask()    const { return getOperand(3); }
1498*0b57cec5SDimitry Andric 
1499*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1500*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::VMTRUNCSTORES ||
1501*0b57cec5SDimitry Andric         N->getOpcode() == X86ISD::VMTRUNCSTOREUS;
1502*0b57cec5SDimitry Andric     }
1503*0b57cec5SDimitry Andric   };
1504*0b57cec5SDimitry Andric 
1505*0b57cec5SDimitry Andric   // X86 Truncating Store with Signed saturation.
1506*0b57cec5SDimitry Andric   class TruncSStoreSDNode : public X86StoreSDNode {
1507*0b57cec5SDimitry Andric   public:
1508*0b57cec5SDimitry Andric     TruncSStoreSDNode(unsigned Order, const DebugLoc &dl,
1509*0b57cec5SDimitry Andric                         SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
1510*0b57cec5SDimitry Andric       : X86StoreSDNode(X86ISD::VTRUNCSTORES, Order, dl, VTs, MemVT, MMO) {}
1511*0b57cec5SDimitry Andric 
1512*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1513*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::VTRUNCSTORES;
1514*0b57cec5SDimitry Andric     }
1515*0b57cec5SDimitry Andric   };
1516*0b57cec5SDimitry Andric 
1517*0b57cec5SDimitry Andric   // X86 Truncating Store with Unsigned saturation.
1518*0b57cec5SDimitry Andric   class TruncUSStoreSDNode : public X86StoreSDNode {
1519*0b57cec5SDimitry Andric   public:
1520*0b57cec5SDimitry Andric     TruncUSStoreSDNode(unsigned Order, const DebugLoc &dl,
1521*0b57cec5SDimitry Andric                       SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
1522*0b57cec5SDimitry Andric       : X86StoreSDNode(X86ISD::VTRUNCSTOREUS, Order, dl, VTs, MemVT, MMO) {}
1523*0b57cec5SDimitry Andric 
1524*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1525*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::VTRUNCSTOREUS;
1526*0b57cec5SDimitry Andric     }
1527*0b57cec5SDimitry Andric   };
1528*0b57cec5SDimitry Andric 
1529*0b57cec5SDimitry Andric   // X86 Truncating Masked Store with Signed saturation.
1530*0b57cec5SDimitry Andric   class MaskedTruncSStoreSDNode : public X86MaskedStoreSDNode {
1531*0b57cec5SDimitry Andric   public:
1532*0b57cec5SDimitry Andric     MaskedTruncSStoreSDNode(unsigned Order,
1533*0b57cec5SDimitry Andric                          const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1534*0b57cec5SDimitry Andric                          MachineMemOperand *MMO)
1535*0b57cec5SDimitry Andric       : X86MaskedStoreSDNode(X86ISD::VMTRUNCSTORES, Order, dl, VTs, MemVT, MMO) {}
1536*0b57cec5SDimitry Andric 
1537*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1538*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::VMTRUNCSTORES;
1539*0b57cec5SDimitry Andric     }
1540*0b57cec5SDimitry Andric   };
1541*0b57cec5SDimitry Andric 
1542*0b57cec5SDimitry Andric   // X86 Truncating Masked Store with Unsigned saturation.
1543*0b57cec5SDimitry Andric   class MaskedTruncUSStoreSDNode : public X86MaskedStoreSDNode {
1544*0b57cec5SDimitry Andric   public:
1545*0b57cec5SDimitry Andric     MaskedTruncUSStoreSDNode(unsigned Order,
1546*0b57cec5SDimitry Andric                             const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1547*0b57cec5SDimitry Andric                             MachineMemOperand *MMO)
1548*0b57cec5SDimitry Andric       : X86MaskedStoreSDNode(X86ISD::VMTRUNCSTOREUS, Order, dl, VTs, MemVT, MMO) {}
1549*0b57cec5SDimitry Andric 
1550*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1551*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::VMTRUNCSTOREUS;
1552*0b57cec5SDimitry Andric     }
1553*0b57cec5SDimitry Andric   };
1554*0b57cec5SDimitry Andric 
1555*0b57cec5SDimitry Andric   // X86 specific Gather/Scatter nodes.
1556*0b57cec5SDimitry Andric   // The class has the same order of operands as MaskedGatherScatterSDNode for
1557*0b57cec5SDimitry Andric   // convenience.
1558*0b57cec5SDimitry Andric   class X86MaskedGatherScatterSDNode : public MemSDNode {
1559*0b57cec5SDimitry Andric   public:
1560*0b57cec5SDimitry Andric     X86MaskedGatherScatterSDNode(unsigned Opc, unsigned Order,
1561*0b57cec5SDimitry Andric                                  const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1562*0b57cec5SDimitry Andric                                  MachineMemOperand *MMO)
1563*0b57cec5SDimitry Andric         : MemSDNode(Opc, Order, dl, VTs, MemVT, MMO) {}
1564*0b57cec5SDimitry Andric 
1565*0b57cec5SDimitry Andric     const SDValue &getBasePtr() const { return getOperand(3); }
1566*0b57cec5SDimitry Andric     const SDValue &getIndex()   const { return getOperand(4); }
1567*0b57cec5SDimitry Andric     const SDValue &getMask()    const { return getOperand(2); }
1568*0b57cec5SDimitry Andric     const SDValue &getScale()   const { return getOperand(5); }
1569*0b57cec5SDimitry Andric 
1570*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1571*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MGATHER ||
1572*0b57cec5SDimitry Andric              N->getOpcode() == X86ISD::MSCATTER;
1573*0b57cec5SDimitry Andric     }
1574*0b57cec5SDimitry Andric   };
1575*0b57cec5SDimitry Andric 
1576*0b57cec5SDimitry Andric   class X86MaskedGatherSDNode : public X86MaskedGatherScatterSDNode {
1577*0b57cec5SDimitry Andric   public:
1578*0b57cec5SDimitry Andric     X86MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
1579*0b57cec5SDimitry Andric                           EVT MemVT, MachineMemOperand *MMO)
1580*0b57cec5SDimitry Andric         : X86MaskedGatherScatterSDNode(X86ISD::MGATHER, Order, dl, VTs, MemVT,
1581*0b57cec5SDimitry Andric                                        MMO) {}
1582*0b57cec5SDimitry Andric 
1583*0b57cec5SDimitry Andric     const SDValue &getPassThru() const { return getOperand(1); }
1584*0b57cec5SDimitry Andric 
1585*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1586*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MGATHER;
1587*0b57cec5SDimitry Andric     }
1588*0b57cec5SDimitry Andric   };
1589*0b57cec5SDimitry Andric 
1590*0b57cec5SDimitry Andric   class X86MaskedScatterSDNode : public X86MaskedGatherScatterSDNode {
1591*0b57cec5SDimitry Andric   public:
1592*0b57cec5SDimitry Andric     X86MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
1593*0b57cec5SDimitry Andric                            EVT MemVT, MachineMemOperand *MMO)
1594*0b57cec5SDimitry Andric         : X86MaskedGatherScatterSDNode(X86ISD::MSCATTER, Order, dl, VTs, MemVT,
1595*0b57cec5SDimitry Andric                                        MMO) {}
1596*0b57cec5SDimitry Andric 
1597*0b57cec5SDimitry Andric     const SDValue &getValue() const { return getOperand(1); }
1598*0b57cec5SDimitry Andric 
1599*0b57cec5SDimitry Andric     static bool classof(const SDNode *N) {
1600*0b57cec5SDimitry Andric       return N->getOpcode() == X86ISD::MSCATTER;
1601*0b57cec5SDimitry Andric     }
1602*0b57cec5SDimitry Andric   };
1603*0b57cec5SDimitry Andric 
1604*0b57cec5SDimitry Andric   /// Generate unpacklo/unpackhi shuffle mask.
1605*0b57cec5SDimitry Andric   template <typename T = int>
1606*0b57cec5SDimitry Andric   void createUnpackShuffleMask(MVT VT, SmallVectorImpl<T> &Mask, bool Lo,
1607*0b57cec5SDimitry Andric                                bool Unary) {
1608*0b57cec5SDimitry Andric     assert(Mask.empty() && "Expected an empty shuffle mask vector");
1609*0b57cec5SDimitry Andric     int NumElts = VT.getVectorNumElements();
1610*0b57cec5SDimitry Andric     int NumEltsInLane = 128 / VT.getScalarSizeInBits();
1611*0b57cec5SDimitry Andric     for (int i = 0; i < NumElts; ++i) {
1612*0b57cec5SDimitry Andric       unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
1613*0b57cec5SDimitry Andric       int Pos = (i % NumEltsInLane) / 2 + LaneStart;
1614*0b57cec5SDimitry Andric       Pos += (Unary ? 0 : NumElts * (i % 2));
1615*0b57cec5SDimitry Andric       Pos += (Lo ? 0 : NumEltsInLane / 2);
1616*0b57cec5SDimitry Andric       Mask.push_back(Pos);
1617*0b57cec5SDimitry Andric     }
1618*0b57cec5SDimitry Andric   }
1619*0b57cec5SDimitry Andric 
1620*0b57cec5SDimitry Andric   /// Helper function to scale a shuffle or target shuffle mask, replacing each
1621*0b57cec5SDimitry Andric   /// mask index with the scaled sequential indices for an equivalent narrowed
1622*0b57cec5SDimitry Andric   /// mask. This is the reverse process to canWidenShuffleElements, but can
1623*0b57cec5SDimitry Andric   /// always succeed.
1624*0b57cec5SDimitry Andric   template <typename T>
1625*0b57cec5SDimitry Andric   void scaleShuffleMask(int Scale, ArrayRef<T> Mask,
1626*0b57cec5SDimitry Andric                         SmallVectorImpl<T> &ScaledMask) {
1627*0b57cec5SDimitry Andric     assert(0 < Scale && "Unexpected scaling factor");
1628*0b57cec5SDimitry Andric     size_t NumElts = Mask.size();
1629*0b57cec5SDimitry Andric     ScaledMask.assign(NumElts * Scale, -1);
1630*0b57cec5SDimitry Andric 
1631*0b57cec5SDimitry Andric     for (int i = 0; i != (int)NumElts; ++i) {
1632*0b57cec5SDimitry Andric       int M = Mask[i];
1633*0b57cec5SDimitry Andric 
1634*0b57cec5SDimitry Andric       // Repeat sentinel values in every mask element.
1635*0b57cec5SDimitry Andric       if (M < 0) {
1636*0b57cec5SDimitry Andric         for (int s = 0; s != Scale; ++s)
1637*0b57cec5SDimitry Andric           ScaledMask[(Scale * i) + s] = M;
1638*0b57cec5SDimitry Andric         continue;
1639*0b57cec5SDimitry Andric       }
1640*0b57cec5SDimitry Andric 
1641*0b57cec5SDimitry Andric       // Scale mask element and increment across each mask element.
1642*0b57cec5SDimitry Andric       for (int s = 0; s != Scale; ++s)
1643*0b57cec5SDimitry Andric         ScaledMask[(Scale * i) + s] = (Scale * M) + s;
1644*0b57cec5SDimitry Andric     }
1645*0b57cec5SDimitry Andric   }
1646*0b57cec5SDimitry Andric } // end namespace llvm
1647*0b57cec5SDimitry Andric 
1648*0b57cec5SDimitry Andric #endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
1649