1 //===-- X86.h - Top-level interface for X86 representation ------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the entry points for global functions defined in the x86 10 // target library, as used by the LLVM JIT. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_TARGET_X86_X86_H 15 #define LLVM_LIB_TARGET_X86_X86_H 16 17 #include "llvm/Support/CodeGen.h" 18 19 namespace llvm { 20 21 class FunctionPass; 22 class InstructionSelector; 23 class PassRegistry; 24 class X86RegisterBankInfo; 25 class X86Subtarget; 26 class X86TargetMachine; 27 28 /// This pass converts a legalized DAG into a X86-specific DAG, ready for 29 /// instruction scheduling. 30 FunctionPass *createX86ISelDag(X86TargetMachine &TM, CodeGenOptLevel OptLevel); 31 32 /// This pass initializes a global base register for PIC on x86-32. 33 FunctionPass *createX86GlobalBaseRegPass(); 34 35 /// This pass combines multiple accesses to local-dynamic TLS variables so that 36 /// the TLS base address for the module is only fetched once per execution path 37 /// through the function. 38 FunctionPass *createCleanupLocalDynamicTLSPass(); 39 40 /// This function returns a pass which converts floating-point register 41 /// references and pseudo instructions into floating-point stack references and 42 /// physical instructions. 43 FunctionPass *createX86FloatingPointStackifierPass(); 44 45 /// This pass inserts AVX vzeroupper instructions before each call to avoid 46 /// transition penalty between functions encoded with AVX and SSE. 47 FunctionPass *createX86IssueVZeroUpperPass(); 48 49 /// This pass inserts ENDBR instructions before indirect jump/call 50 /// destinations as part of CET IBT mechanism. 51 FunctionPass *createX86IndirectBranchTrackingPass(); 52 53 /// Return a pass that pads short functions with NOOPs. 54 /// This will prevent a stall when returning on the Atom. 55 FunctionPass *createX86PadShortFunctions(); 56 57 /// Return a pass that selectively replaces certain instructions (like add, 58 /// sub, inc, dec, some shifts, and some multiplies) by equivalent LEA 59 /// instructions, in order to eliminate execution delays in some processors. 60 FunctionPass *createX86FixupLEAs(); 61 62 /// Return a pass that replaces equivalent slower instructions with faster 63 /// ones. 64 FunctionPass *createX86FixupInstTuning(); 65 66 /// Return a pass that reduces the size of vector constant pool loads. 67 FunctionPass *createX86FixupVectorConstants(); 68 69 /// Return a pass that removes redundant LEA instructions and redundant address 70 /// recalculations. 71 FunctionPass *createX86OptimizeLEAs(); 72 73 /// Return a pass that transforms setcc + movzx pairs into xor + setcc. 74 FunctionPass *createX86FixupSetCC(); 75 76 /// Return a pass that avoids creating store forward block issues in the hardware. 77 FunctionPass *createX86AvoidStoreForwardingBlocks(); 78 79 /// Return a pass that lowers EFLAGS copy pseudo instructions. 80 FunctionPass *createX86FlagsCopyLoweringPass(); 81 82 /// Return a pass that expands DynAlloca pseudo-instructions. 83 FunctionPass *createX86DynAllocaExpander(); 84 85 /// Return a pass that config the tile registers. 86 FunctionPass *createX86TileConfigPass(); 87 88 /// Return a pass that preconfig the tile registers before fast reg allocation. 89 FunctionPass *createX86FastPreTileConfigPass(); 90 91 /// Return a pass that config the tile registers after fast reg allocation. 92 FunctionPass *createX86FastTileConfigPass(); 93 94 /// Return a pass that insert pseudo tile config instruction. 95 FunctionPass *createX86PreTileConfigPass(); 96 97 /// Return a pass that lower the tile copy instruction. 98 FunctionPass *createX86LowerTileCopyPass(); 99 100 /// Return a pass that inserts int3 at the end of the function if it ends with a 101 /// CALL instruction. The pass does the same for each funclet as well. This 102 /// ensures that the open interval of function start and end PCs contains all 103 /// return addresses for the benefit of the Windows x64 unwinder. 104 FunctionPass *createX86AvoidTrailingCallPass(); 105 106 /// Return a pass that optimizes the code-size of x86 call sequences. This is 107 /// done by replacing esp-relative movs with pushes. 108 FunctionPass *createX86CallFrameOptimization(); 109 110 /// Return an IR pass that inserts EH registration stack objects and explicit 111 /// EH state updates. This pass must run after EH preparation, which does 112 /// Windows-specific but architecture-neutral preparation. 113 FunctionPass *createX86WinEHStatePass(); 114 115 /// Return a Machine IR pass that expands X86-specific pseudo 116 /// instructions into a sequence of actual instructions. This pass 117 /// must run after prologue/epilogue insertion and before lowering 118 /// the MachineInstr to MC. 119 FunctionPass *createX86ExpandPseudoPass(); 120 121 /// This pass converts X86 cmov instructions into branch when profitable. 122 FunctionPass *createX86CmovConverterPass(); 123 124 /// Return a Machine IR pass that selectively replaces 125 /// certain byte and word instructions by equivalent 32 bit instructions, 126 /// in order to eliminate partial register usage, false dependences on 127 /// the upper portions of registers, and to save code size. 128 FunctionPass *createX86FixupBWInsts(); 129 130 /// Return a Machine IR pass that reassigns instruction chains from one domain 131 /// to another, when profitable. 132 FunctionPass *createX86DomainReassignmentPass(); 133 134 /// This pass compress instructions from EVEX space to legacy/VEX/EVEX space when 135 /// possible in order to reduce code size or facilitate HW decoding. 136 FunctionPass *createX86CompressEVEXPass(); 137 138 /// This pass creates the thunks for the retpoline feature. 139 FunctionPass *createX86IndirectThunksPass(); 140 141 /// This pass replaces ret instructions with jmp's to __x86_return thunk. 142 FunctionPass *createX86ReturnThunksPass(); 143 144 /// This pass ensures instructions featuring a memory operand 145 /// have distinctive <LineNumber, Discriminator> (with respect to eachother) 146 FunctionPass *createX86DiscriminateMemOpsPass(); 147 148 /// This pass applies profiling information to insert cache prefetches. 149 FunctionPass *createX86InsertPrefetchPass(); 150 151 /// This pass insert wait instruction after X87 instructions which could raise 152 /// fp exceptions when strict-fp enabled. 153 FunctionPass *createX86InsertX87waitPass(); 154 155 /// This pass optimizes arithmetic based on knowledge that is only used by 156 /// a reduction sequence and is therefore safe to reassociate in interesting 157 /// ways. 158 FunctionPass *createX86PartialReductionPass(); 159 160 InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM, 161 X86Subtarget &, 162 X86RegisterBankInfo &); 163 164 FunctionPass *createX86LoadValueInjectionLoadHardeningPass(); 165 FunctionPass *createX86LoadValueInjectionRetHardeningPass(); 166 FunctionPass *createX86SpeculativeLoadHardeningPass(); 167 FunctionPass *createX86SpeculativeExecutionSideEffectSuppression(); 168 FunctionPass *createX86ArgumentStackSlotPass(); 169 170 void initializeCompressEVEXPassPass(PassRegistry &); 171 void initializeFPSPass(PassRegistry &); 172 void initializeFixupBWInstPassPass(PassRegistry &); 173 void initializeFixupLEAPassPass(PassRegistry &); 174 void initializeX86ArgumentStackSlotPassPass(PassRegistry &); 175 void initializeX86FixupInstTuningPassPass(PassRegistry &); 176 void initializeX86FixupVectorConstantsPassPass(PassRegistry &); 177 void initializeWinEHStatePassPass(PassRegistry &); 178 void initializeX86AvoidSFBPassPass(PassRegistry &); 179 void initializeX86AvoidTrailingCallPassPass(PassRegistry &); 180 void initializeX86CallFrameOptimizationPass(PassRegistry &); 181 void initializeX86CmovConverterPassPass(PassRegistry &); 182 void initializeX86DAGToDAGISelPass(PassRegistry &); 183 void initializeX86DomainReassignmentPass(PassRegistry &); 184 void initializeX86ExecutionDomainFixPass(PassRegistry &); 185 void initializeX86ExpandPseudoPass(PassRegistry &); 186 void initializeX86FastPreTileConfigPass(PassRegistry &); 187 void initializeX86FastTileConfigPass(PassRegistry &); 188 void initializeX86FixupSetCCPassPass(PassRegistry &); 189 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &); 190 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &); 191 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &); 192 void initializeX86LowerAMXIntrinsicsLegacyPassPass(PassRegistry &); 193 void initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &); 194 void initializeX86LowerTileCopyPass(PassRegistry &); 195 void initializeX86OptimizeLEAPassPass(PassRegistry &); 196 void initializeX86PartialReductionPass(PassRegistry &); 197 void initializeX86PreTileConfigPass(PassRegistry &); 198 void initializeX86ReturnThunksPass(PassRegistry &); 199 void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &); 200 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &); 201 void initializeX86TileConfigPass(PassRegistry &); 202 203 namespace X86AS { 204 enum : unsigned { 205 GS = 256, 206 FS = 257, 207 SS = 258, 208 PTR32_SPTR = 270, 209 PTR32_UPTR = 271, 210 PTR64 = 272 211 }; 212 } // End X86AS namespace 213 214 } // End llvm namespace 215 216 #endif 217