xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86.h (revision dc318a4ffabcbfa23bb56a33403aad36e6de30af)
1 //===-- X86.h - Top-level interface for X86 representation ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the entry points for global functions defined in the x86
10 // target library, as used by the LLVM JIT.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_X86_X86_H
15 #define LLVM_LIB_TARGET_X86_X86_H
16 
17 #include "llvm/Support/CodeGen.h"
18 
19 namespace llvm {
20 
21 class FunctionPass;
22 class InstructionSelector;
23 class PassRegistry;
24 class X86RegisterBankInfo;
25 class X86Subtarget;
26 class X86TargetMachine;
27 
28 /// This pass converts a legalized DAG into a X86-specific DAG, ready for
29 /// instruction scheduling.
30 FunctionPass *createX86ISelDag(X86TargetMachine &TM,
31                                CodeGenOpt::Level OptLevel);
32 
33 /// This pass initializes a global base register for PIC on x86-32.
34 FunctionPass *createX86GlobalBaseRegPass();
35 
36 /// This pass combines multiple accesses to local-dynamic TLS variables so that
37 /// the TLS base address for the module is only fetched once per execution path
38 /// through the function.
39 FunctionPass *createCleanupLocalDynamicTLSPass();
40 
41 /// This function returns a pass which converts floating-point register
42 /// references and pseudo instructions into floating-point stack references and
43 /// physical instructions.
44 FunctionPass *createX86FloatingPointStackifierPass();
45 
46 /// This pass inserts AVX vzeroupper instructions before each call to avoid
47 /// transition penalty between functions encoded with AVX and SSE.
48 FunctionPass *createX86IssueVZeroUpperPass();
49 
50 /// This pass inserts ENDBR instructions before indirect jump/call
51 /// destinations as part of CET IBT mechanism.
52 FunctionPass *createX86IndirectBranchTrackingPass();
53 
54 /// Return a pass that pads short functions with NOOPs.
55 /// This will prevent a stall when returning on the Atom.
56 FunctionPass *createX86PadShortFunctions();
57 
58 /// Return a pass that selectively replaces certain instructions (like add,
59 /// sub, inc, dec, some shifts, and some multiplies) by equivalent LEA
60 /// instructions, in order to eliminate execution delays in some processors.
61 FunctionPass *createX86FixupLEAs();
62 
63 /// Return a pass that removes redundant LEA instructions and redundant address
64 /// recalculations.
65 FunctionPass *createX86OptimizeLEAs();
66 
67 /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
68 FunctionPass *createX86FixupSetCC();
69 
70 /// Return a pass that folds conditional branch jumps.
71 FunctionPass *createX86CondBrFolding();
72 
73 /// Return a pass that avoids creating store forward block issues in the hardware.
74 FunctionPass *createX86AvoidStoreForwardingBlocks();
75 
76 /// Return a pass that lowers EFLAGS copy pseudo instructions.
77 FunctionPass *createX86FlagsCopyLoweringPass();
78 
79 /// Return a pass that expands WinAlloca pseudo-instructions.
80 FunctionPass *createX86WinAllocaExpander();
81 
82 /// Return a pass that inserts int3 at the end of the function if it ends with a
83 /// CALL instruction. The pass does the same for each funclet as well. This
84 /// ensures that the open interval of function start and end PCs contains all
85 /// return addresses for the benefit of the Windows x64 unwinder.
86 FunctionPass *createX86AvoidTrailingCallPass();
87 
88 /// Return a pass that optimizes the code-size of x86 call sequences. This is
89 /// done by replacing esp-relative movs with pushes.
90 FunctionPass *createX86CallFrameOptimization();
91 
92 /// Return an IR pass that inserts EH registration stack objects and explicit
93 /// EH state updates. This pass must run after EH preparation, which does
94 /// Windows-specific but architecture-neutral preparation.
95 FunctionPass *createX86WinEHStatePass();
96 
97 /// Return a Machine IR pass that expands X86-specific pseudo
98 /// instructions into a sequence of actual instructions. This pass
99 /// must run after prologue/epilogue insertion and before lowering
100 /// the MachineInstr to MC.
101 FunctionPass *createX86ExpandPseudoPass();
102 
103 /// This pass converts X86 cmov instructions into branch when profitable.
104 FunctionPass *createX86CmovConverterPass();
105 
106 /// Return a Machine IR pass that selectively replaces
107 /// certain byte and word instructions by equivalent 32 bit instructions,
108 /// in order to eliminate partial register usage, false dependences on
109 /// the upper portions of registers, and to save code size.
110 FunctionPass *createX86FixupBWInsts();
111 
112 /// Return a Machine IR pass that reassigns instruction chains from one domain
113 /// to another, when profitable.
114 FunctionPass *createX86DomainReassignmentPass();
115 
116 /// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
117 /// encoding when possible in order to reduce code size.
118 FunctionPass *createX86EvexToVexInsts();
119 
120 /// This pass creates the thunks for the retpoline feature.
121 FunctionPass *createX86IndirectThunksPass();
122 
123 /// This pass ensures instructions featuring a memory operand
124 /// have distinctive <LineNumber, Discriminator> (with respect to eachother)
125 FunctionPass *createX86DiscriminateMemOpsPass();
126 
127 /// This pass applies profiling information to insert cache prefetches.
128 FunctionPass *createX86InsertPrefetchPass();
129 
130 /// This pass insert wait instruction after X87 instructions which could raise
131 /// fp exceptions when strict-fp enabled.
132 FunctionPass *createX86InsertX87waitPass();
133 
134 /// This pass optimizes arithmetic based on knowledge that is only used by
135 /// a reduction sequence and is therefore safe to reassociate in interesting
136 /// ways.
137 FunctionPass *createX86PartialReductionPass();
138 
139 InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
140                                                   X86Subtarget &,
141                                                   X86RegisterBankInfo &);
142 
143 FunctionPass *createX86LoadValueInjectionLoadHardeningPass();
144 FunctionPass *createX86LoadValueInjectionRetHardeningPass();
145 FunctionPass *createX86SpeculativeLoadHardeningPass();
146 FunctionPass *createX86SpeculativeExecutionSideEffectSuppression();
147 
148 void initializeEvexToVexInstPassPass(PassRegistry &);
149 void initializeFixupBWInstPassPass(PassRegistry &);
150 void initializeFixupLEAPassPass(PassRegistry &);
151 void initializeFPSPass(PassRegistry &);
152 void initializeWinEHStatePassPass(PassRegistry &);
153 void initializeX86AvoidSFBPassPass(PassRegistry &);
154 void initializeX86AvoidTrailingCallPassPass(PassRegistry &);
155 void initializeX86CallFrameOptimizationPass(PassRegistry &);
156 void initializeX86CmovConverterPassPass(PassRegistry &);
157 void initializeX86CondBrFoldingPassPass(PassRegistry &);
158 void initializeX86DomainReassignmentPass(PassRegistry &);
159 void initializeX86ExecutionDomainFixPass(PassRegistry &);
160 void initializeX86ExpandPseudoPass(PassRegistry &);
161 void initializeX86FixupSetCCPassPass(PassRegistry &);
162 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
163 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
164 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
165 void initializeX86OptimizeLEAPassPass(PassRegistry &);
166 void initializeX86PartialReductionPass(PassRegistry &);
167 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
168 void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &);
169 
170 namespace X86AS {
171 enum : unsigned {
172   GS = 256,
173   FS = 257,
174   SS = 258,
175   PTR32_SPTR = 270,
176   PTR32_UPTR = 271,
177   PTR64 = 272
178 };
179 } // End X86AS namespace
180 
181 } // End llvm namespace
182 
183 #endif
184