xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "X86InstrInfo.h"
14 #include "X86.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrFoldTables.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Sequence.h"
22 #include "llvm/CodeGen/LiveIntervals.h"
23 #include "llvm/CodeGen/LivePhysRegs.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineDominators.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineModuleInfo.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/IR/DebugInfoMetadata.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/MC/MCAsmInfo.h"
40 #include "llvm/MC/MCExpr.h"
41 #include "llvm/MC/MCInst.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include <optional>
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "x86-instr-info"
52 
53 #define GET_INSTRINFO_CTOR_DTOR
54 #include "X86GenInstrInfo.inc"
55 
56 extern cl::opt<bool> X86EnableAPXForRelocation;
57 
58 static cl::opt<bool>
59     NoFusing("disable-spill-fusing",
60              cl::desc("Disable fusing of spill code into instructions"),
61              cl::Hidden);
62 static cl::opt<bool>
63     PrintFailedFusing("print-failed-fuse-candidates",
64                       cl::desc("Print instructions that the allocator wants to"
65                                " fuse, but the X86 backend currently can't"),
66                       cl::Hidden);
67 static cl::opt<bool>
68     ReMatPICStubLoad("remat-pic-stub-load",
69                      cl::desc("Re-materialize load from stub in PIC mode"),
70                      cl::init(false), cl::Hidden);
71 static cl::opt<unsigned>
72     PartialRegUpdateClearance("partial-reg-update-clearance",
73                               cl::desc("Clearance between two register writes "
74                                        "for inserting XOR to avoid partial "
75                                        "register update"),
76                               cl::init(64), cl::Hidden);
77 static cl::opt<unsigned> UndefRegClearance(
78     "undef-reg-clearance",
79     cl::desc("How many idle instructions we would like before "
80              "certain undef register reads"),
81     cl::init(128), cl::Hidden);
82 
83 // Pin the vtable to this file.
anchor()84 void X86InstrInfo::anchor() {}
85 
X86InstrInfo(X86Subtarget & STI)86 X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
87     : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
88                                                : X86::ADJCALLSTACKDOWN32),
89                       (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
90                                                : X86::ADJCALLSTACKUP32),
91                       X86::CATCHRET, (STI.is64Bit() ? X86::RET64 : X86::RET32)),
92       Subtarget(STI), RI(STI.getTargetTriple()) {}
93 
94 const TargetRegisterClass *
getRegClass(const MCInstrDesc & MCID,unsigned OpNum,const TargetRegisterInfo * TRI,const MachineFunction & MF) const95 X86InstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
96                           const TargetRegisterInfo *TRI,
97                           const MachineFunction &MF) const {
98   auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum, TRI, MF);
99   // If the target does not have egpr, then r16-r31 will be resereved for all
100   // instructions.
101   if (!RC || !Subtarget.hasEGPR())
102     return RC;
103 
104   if (X86II::canUseApxExtendedReg(MCID))
105     return RC;
106 
107   const X86RegisterInfo *RI = Subtarget.getRegisterInfo();
108   return RI->constrainRegClassToNonRex2(RC);
109 }
110 
isCoalescableExtInstr(const MachineInstr & MI,Register & SrcReg,Register & DstReg,unsigned & SubIdx) const111 bool X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
112                                          Register &SrcReg, Register &DstReg,
113                                          unsigned &SubIdx) const {
114   switch (MI.getOpcode()) {
115   default:
116     break;
117   case X86::MOVSX16rr8:
118   case X86::MOVZX16rr8:
119   case X86::MOVSX32rr8:
120   case X86::MOVZX32rr8:
121   case X86::MOVSX64rr8:
122     if (!Subtarget.is64Bit())
123       // It's not always legal to reference the low 8-bit of the larger
124       // register in 32-bit mode.
125       return false;
126     [[fallthrough]];
127   case X86::MOVSX32rr16:
128   case X86::MOVZX32rr16:
129   case X86::MOVSX64rr16:
130   case X86::MOVSX64rr32: {
131     if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
132       // Be conservative.
133       return false;
134     SrcReg = MI.getOperand(1).getReg();
135     DstReg = MI.getOperand(0).getReg();
136     switch (MI.getOpcode()) {
137     default:
138       llvm_unreachable("Unreachable!");
139     case X86::MOVSX16rr8:
140     case X86::MOVZX16rr8:
141     case X86::MOVSX32rr8:
142     case X86::MOVZX32rr8:
143     case X86::MOVSX64rr8:
144       SubIdx = X86::sub_8bit;
145       break;
146     case X86::MOVSX32rr16:
147     case X86::MOVZX32rr16:
148     case X86::MOVSX64rr16:
149       SubIdx = X86::sub_16bit;
150       break;
151     case X86::MOVSX64rr32:
152       SubIdx = X86::sub_32bit;
153       break;
154     }
155     return true;
156   }
157   }
158   return false;
159 }
160 
isDataInvariant(MachineInstr & MI)161 bool X86InstrInfo::isDataInvariant(MachineInstr &MI) {
162   if (MI.mayLoad() || MI.mayStore())
163     return false;
164 
165   // Some target-independent operations that trivially lower to data-invariant
166   // instructions.
167   if (MI.isCopyLike() || MI.isInsertSubreg())
168     return true;
169 
170   unsigned Opcode = MI.getOpcode();
171   using namespace X86;
172   // On x86 it is believed that imul is constant time w.r.t. the loaded data.
173   // However, they set flags and are perhaps the most surprisingly constant
174   // time operations so we call them out here separately.
175   if (isIMUL(Opcode))
176     return true;
177   // Bit scanning and counting instructions that are somewhat surprisingly
178   // constant time as they scan across bits and do other fairly complex
179   // operations like popcnt, but are believed to be constant time on x86.
180   // However, these set flags.
181   if (isBSF(Opcode) || isBSR(Opcode) || isLZCNT(Opcode) || isPOPCNT(Opcode) ||
182       isTZCNT(Opcode))
183     return true;
184   // Bit manipulation instructions are effectively combinations of basic
185   // arithmetic ops, and should still execute in constant time. These also
186   // set flags.
187   if (isBLCFILL(Opcode) || isBLCI(Opcode) || isBLCIC(Opcode) ||
188       isBLCMSK(Opcode) || isBLCS(Opcode) || isBLSFILL(Opcode) ||
189       isBLSI(Opcode) || isBLSIC(Opcode) || isBLSMSK(Opcode) || isBLSR(Opcode) ||
190       isTZMSK(Opcode))
191     return true;
192   // Bit extracting and clearing instructions should execute in constant time,
193   // and set flags.
194   if (isBEXTR(Opcode) || isBZHI(Opcode))
195     return true;
196   // Shift and rotate.
197   if (isROL(Opcode) || isROR(Opcode) || isSAR(Opcode) || isSHL(Opcode) ||
198       isSHR(Opcode) || isSHLD(Opcode) || isSHRD(Opcode))
199     return true;
200   // Basic arithmetic is constant time on the input but does set flags.
201   if (isADC(Opcode) || isADD(Opcode) || isAND(Opcode) || isOR(Opcode) ||
202       isSBB(Opcode) || isSUB(Opcode) || isXOR(Opcode))
203     return true;
204   // Arithmetic with just 32-bit and 64-bit variants and no immediates.
205   if (isANDN(Opcode))
206     return true;
207   // Unary arithmetic operations.
208   if (isDEC(Opcode) || isINC(Opcode) || isNEG(Opcode))
209     return true;
210   // Unlike other arithmetic, NOT doesn't set EFLAGS.
211   if (isNOT(Opcode))
212     return true;
213   // Various move instructions used to zero or sign extend things. Note that we
214   // intentionally don't support the _NOREX variants as we can't handle that
215   // register constraint anyways.
216   if (isMOVSX(Opcode) || isMOVZX(Opcode) || isMOVSXD(Opcode) || isMOV(Opcode))
217     return true;
218   // Arithmetic instructions that are both constant time and don't set flags.
219   if (isRORX(Opcode) || isSARX(Opcode) || isSHLX(Opcode) || isSHRX(Opcode))
220     return true;
221   // LEA doesn't actually access memory, and its arithmetic is constant time.
222   if (isLEA(Opcode))
223     return true;
224   // By default, assume that the instruction is not data invariant.
225   return false;
226 }
227 
isDataInvariantLoad(MachineInstr & MI)228 bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) {
229   switch (MI.getOpcode()) {
230   default:
231     // By default, assume that the load will immediately leak.
232     return false;
233 
234   // On x86 it is believed that imul is constant time w.r.t. the loaded data.
235   // However, they set flags and are perhaps the most surprisingly constant
236   // time operations so we call them out here separately.
237   case X86::IMUL16rm:
238   case X86::IMUL16rmi:
239   case X86::IMUL32rm:
240   case X86::IMUL32rmi:
241   case X86::IMUL64rm:
242   case X86::IMUL64rmi32:
243 
244   // Bit scanning and counting instructions that are somewhat surprisingly
245   // constant time as they scan across bits and do other fairly complex
246   // operations like popcnt, but are believed to be constant time on x86.
247   // However, these set flags.
248   case X86::BSF16rm:
249   case X86::BSF32rm:
250   case X86::BSF64rm:
251   case X86::BSR16rm:
252   case X86::BSR32rm:
253   case X86::BSR64rm:
254   case X86::LZCNT16rm:
255   case X86::LZCNT32rm:
256   case X86::LZCNT64rm:
257   case X86::POPCNT16rm:
258   case X86::POPCNT32rm:
259   case X86::POPCNT64rm:
260   case X86::TZCNT16rm:
261   case X86::TZCNT32rm:
262   case X86::TZCNT64rm:
263 
264   // Bit manipulation instructions are effectively combinations of basic
265   // arithmetic ops, and should still execute in constant time. These also
266   // set flags.
267   case X86::BLCFILL32rm:
268   case X86::BLCFILL64rm:
269   case X86::BLCI32rm:
270   case X86::BLCI64rm:
271   case X86::BLCIC32rm:
272   case X86::BLCIC64rm:
273   case X86::BLCMSK32rm:
274   case X86::BLCMSK64rm:
275   case X86::BLCS32rm:
276   case X86::BLCS64rm:
277   case X86::BLSFILL32rm:
278   case X86::BLSFILL64rm:
279   case X86::BLSI32rm:
280   case X86::BLSI64rm:
281   case X86::BLSIC32rm:
282   case X86::BLSIC64rm:
283   case X86::BLSMSK32rm:
284   case X86::BLSMSK64rm:
285   case X86::BLSR32rm:
286   case X86::BLSR64rm:
287   case X86::TZMSK32rm:
288   case X86::TZMSK64rm:
289 
290   // Bit extracting and clearing instructions should execute in constant time,
291   // and set flags.
292   case X86::BEXTR32rm:
293   case X86::BEXTR64rm:
294   case X86::BEXTRI32mi:
295   case X86::BEXTRI64mi:
296   case X86::BZHI32rm:
297   case X86::BZHI64rm:
298 
299   // Basic arithmetic is constant time on the input but does set flags.
300   case X86::ADC8rm:
301   case X86::ADC16rm:
302   case X86::ADC32rm:
303   case X86::ADC64rm:
304   case X86::ADD8rm:
305   case X86::ADD16rm:
306   case X86::ADD32rm:
307   case X86::ADD64rm:
308   case X86::AND8rm:
309   case X86::AND16rm:
310   case X86::AND32rm:
311   case X86::AND64rm:
312   case X86::ANDN32rm:
313   case X86::ANDN64rm:
314   case X86::OR8rm:
315   case X86::OR16rm:
316   case X86::OR32rm:
317   case X86::OR64rm:
318   case X86::SBB8rm:
319   case X86::SBB16rm:
320   case X86::SBB32rm:
321   case X86::SBB64rm:
322   case X86::SUB8rm:
323   case X86::SUB16rm:
324   case X86::SUB32rm:
325   case X86::SUB64rm:
326   case X86::XOR8rm:
327   case X86::XOR16rm:
328   case X86::XOR32rm:
329   case X86::XOR64rm:
330 
331   // Integer multiply w/o affecting flags is still believed to be constant
332   // time on x86. Called out separately as this is among the most surprising
333   // instructions to exhibit that behavior.
334   case X86::MULX32rm:
335   case X86::MULX64rm:
336 
337   // Arithmetic instructions that are both constant time and don't set flags.
338   case X86::RORX32mi:
339   case X86::RORX64mi:
340   case X86::SARX32rm:
341   case X86::SARX64rm:
342   case X86::SHLX32rm:
343   case X86::SHLX64rm:
344   case X86::SHRX32rm:
345   case X86::SHRX64rm:
346 
347   // Conversions are believed to be constant time and don't set flags.
348   case X86::CVTTSD2SI64rm:
349   case X86::VCVTTSD2SI64rm:
350   case X86::VCVTTSD2SI64Zrm:
351   case X86::CVTTSD2SIrm:
352   case X86::VCVTTSD2SIrm:
353   case X86::VCVTTSD2SIZrm:
354   case X86::CVTTSS2SI64rm:
355   case X86::VCVTTSS2SI64rm:
356   case X86::VCVTTSS2SI64Zrm:
357   case X86::CVTTSS2SIrm:
358   case X86::VCVTTSS2SIrm:
359   case X86::VCVTTSS2SIZrm:
360   case X86::CVTSI2SDrm:
361   case X86::VCVTSI2SDrm:
362   case X86::VCVTSI2SDZrm:
363   case X86::CVTSI2SSrm:
364   case X86::VCVTSI2SSrm:
365   case X86::VCVTSI2SSZrm:
366   case X86::CVTSI642SDrm:
367   case X86::VCVTSI642SDrm:
368   case X86::VCVTSI642SDZrm:
369   case X86::CVTSI642SSrm:
370   case X86::VCVTSI642SSrm:
371   case X86::VCVTSI642SSZrm:
372   case X86::CVTSS2SDrm:
373   case X86::VCVTSS2SDrm:
374   case X86::VCVTSS2SDZrm:
375   case X86::CVTSD2SSrm:
376   case X86::VCVTSD2SSrm:
377   case X86::VCVTSD2SSZrm:
378   // AVX512 added unsigned integer conversions.
379   case X86::VCVTTSD2USI64Zrm:
380   case X86::VCVTTSD2USIZrm:
381   case X86::VCVTTSS2USI64Zrm:
382   case X86::VCVTTSS2USIZrm:
383   case X86::VCVTUSI2SDZrm:
384   case X86::VCVTUSI642SDZrm:
385   case X86::VCVTUSI2SSZrm:
386   case X86::VCVTUSI642SSZrm:
387 
388   // Loads to register don't set flags.
389   case X86::MOV8rm:
390   case X86::MOV8rm_NOREX:
391   case X86::MOV16rm:
392   case X86::MOV32rm:
393   case X86::MOV64rm:
394   case X86::MOVSX16rm8:
395   case X86::MOVSX32rm16:
396   case X86::MOVSX32rm8:
397   case X86::MOVSX32rm8_NOREX:
398   case X86::MOVSX64rm16:
399   case X86::MOVSX64rm32:
400   case X86::MOVSX64rm8:
401   case X86::MOVZX16rm8:
402   case X86::MOVZX32rm16:
403   case X86::MOVZX32rm8:
404   case X86::MOVZX32rm8_NOREX:
405   case X86::MOVZX64rm16:
406   case X86::MOVZX64rm8:
407     return true;
408   }
409 }
410 
getSPAdjust(const MachineInstr & MI) const411 int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
412   const MachineFunction *MF = MI.getParent()->getParent();
413   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
414 
415   if (isFrameInstr(MI)) {
416     int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign());
417     SPAdj -= getFrameAdjustment(MI);
418     if (!isFrameSetup(MI))
419       SPAdj = -SPAdj;
420     return SPAdj;
421   }
422 
423   // To know whether a call adjusts the stack, we need information
424   // that is bound to the following ADJCALLSTACKUP pseudo.
425   // Look for the next ADJCALLSTACKUP that follows the call.
426   if (MI.isCall()) {
427     const MachineBasicBlock *MBB = MI.getParent();
428     auto I = ++MachineBasicBlock::const_iterator(MI);
429     for (auto E = MBB->end(); I != E; ++I) {
430       if (I->getOpcode() == getCallFrameDestroyOpcode() || I->isCall())
431         break;
432     }
433 
434     // If we could not find a frame destroy opcode, then it has already
435     // been simplified, so we don't care.
436     if (I->getOpcode() != getCallFrameDestroyOpcode())
437       return 0;
438 
439     return -(I->getOperand(1).getImm());
440   }
441 
442   // Currently handle only PUSHes we can reasonably expect to see
443   // in call sequences
444   switch (MI.getOpcode()) {
445   default:
446     return 0;
447   case X86::PUSH32r:
448   case X86::PUSH32rmm:
449   case X86::PUSH32rmr:
450   case X86::PUSH32i:
451     return 4;
452   case X86::PUSH64r:
453   case X86::PUSH64rmm:
454   case X86::PUSH64rmr:
455   case X86::PUSH64i32:
456     return 8;
457   }
458 }
459 
460 /// Return true and the FrameIndex if the specified
461 /// operand and follow operands form a reference to the stack frame.
isFrameOperand(const MachineInstr & MI,unsigned int Op,int & FrameIndex) const462 bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
463                                   int &FrameIndex) const {
464   if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
465       MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
466       MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
467       MI.getOperand(Op + X86::AddrDisp).isImm() &&
468       MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
469       MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
470       MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
471     FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
472     return true;
473   }
474   return false;
475 }
476 
isFrameLoadOpcode(int Opcode,TypeSize & MemBytes)477 static bool isFrameLoadOpcode(int Opcode, TypeSize &MemBytes) {
478   switch (Opcode) {
479   default:
480     return false;
481   case X86::MOV8rm:
482   case X86::KMOVBkm:
483   case X86::KMOVBkm_EVEX:
484     MemBytes = TypeSize::getFixed(1);
485     return true;
486   case X86::MOV16rm:
487   case X86::KMOVWkm:
488   case X86::KMOVWkm_EVEX:
489   case X86::VMOVSHZrm:
490   case X86::VMOVSHZrm_alt:
491     MemBytes = TypeSize::getFixed(2);
492     return true;
493   case X86::MOV32rm:
494   case X86::MOVSSrm:
495   case X86::MOVSSrm_alt:
496   case X86::VMOVSSrm:
497   case X86::VMOVSSrm_alt:
498   case X86::VMOVSSZrm:
499   case X86::VMOVSSZrm_alt:
500   case X86::KMOVDkm:
501   case X86::KMOVDkm_EVEX:
502     MemBytes = TypeSize::getFixed(4);
503     return true;
504   case X86::MOV64rm:
505   case X86::LD_Fp64m:
506   case X86::MOVSDrm:
507   case X86::MOVSDrm_alt:
508   case X86::VMOVSDrm:
509   case X86::VMOVSDrm_alt:
510   case X86::VMOVSDZrm:
511   case X86::VMOVSDZrm_alt:
512   case X86::MMX_MOVD64rm:
513   case X86::MMX_MOVQ64rm:
514   case X86::KMOVQkm:
515   case X86::KMOVQkm_EVEX:
516     MemBytes = TypeSize::getFixed(8);
517     return true;
518   case X86::MOVAPSrm:
519   case X86::MOVUPSrm:
520   case X86::MOVAPDrm:
521   case X86::MOVUPDrm:
522   case X86::MOVDQArm:
523   case X86::MOVDQUrm:
524   case X86::VMOVAPSrm:
525   case X86::VMOVUPSrm:
526   case X86::VMOVAPDrm:
527   case X86::VMOVUPDrm:
528   case X86::VMOVDQArm:
529   case X86::VMOVDQUrm:
530   case X86::VMOVAPSZ128rm:
531   case X86::VMOVUPSZ128rm:
532   case X86::VMOVAPSZ128rm_NOVLX:
533   case X86::VMOVUPSZ128rm_NOVLX:
534   case X86::VMOVAPDZ128rm:
535   case X86::VMOVUPDZ128rm:
536   case X86::VMOVDQU8Z128rm:
537   case X86::VMOVDQU16Z128rm:
538   case X86::VMOVDQA32Z128rm:
539   case X86::VMOVDQU32Z128rm:
540   case X86::VMOVDQA64Z128rm:
541   case X86::VMOVDQU64Z128rm:
542     MemBytes = TypeSize::getFixed(16);
543     return true;
544   case X86::VMOVAPSYrm:
545   case X86::VMOVUPSYrm:
546   case X86::VMOVAPDYrm:
547   case X86::VMOVUPDYrm:
548   case X86::VMOVDQAYrm:
549   case X86::VMOVDQUYrm:
550   case X86::VMOVAPSZ256rm:
551   case X86::VMOVUPSZ256rm:
552   case X86::VMOVAPSZ256rm_NOVLX:
553   case X86::VMOVUPSZ256rm_NOVLX:
554   case X86::VMOVAPDZ256rm:
555   case X86::VMOVUPDZ256rm:
556   case X86::VMOVDQU8Z256rm:
557   case X86::VMOVDQU16Z256rm:
558   case X86::VMOVDQA32Z256rm:
559   case X86::VMOVDQU32Z256rm:
560   case X86::VMOVDQA64Z256rm:
561   case X86::VMOVDQU64Z256rm:
562     MemBytes = TypeSize::getFixed(32);
563     return true;
564   case X86::VMOVAPSZrm:
565   case X86::VMOVUPSZrm:
566   case X86::VMOVAPDZrm:
567   case X86::VMOVUPDZrm:
568   case X86::VMOVDQU8Zrm:
569   case X86::VMOVDQU16Zrm:
570   case X86::VMOVDQA32Zrm:
571   case X86::VMOVDQU32Zrm:
572   case X86::VMOVDQA64Zrm:
573   case X86::VMOVDQU64Zrm:
574     MemBytes = TypeSize::getFixed(64);
575     return true;
576   }
577 }
578 
isFrameStoreOpcode(int Opcode,TypeSize & MemBytes)579 static bool isFrameStoreOpcode(int Opcode, TypeSize &MemBytes) {
580   switch (Opcode) {
581   default:
582     return false;
583   case X86::MOV8mr:
584   case X86::KMOVBmk:
585   case X86::KMOVBmk_EVEX:
586     MemBytes = TypeSize::getFixed(1);
587     return true;
588   case X86::MOV16mr:
589   case X86::KMOVWmk:
590   case X86::KMOVWmk_EVEX:
591   case X86::VMOVSHZmr:
592     MemBytes = TypeSize::getFixed(2);
593     return true;
594   case X86::MOV32mr:
595   case X86::MOVSSmr:
596   case X86::VMOVSSmr:
597   case X86::VMOVSSZmr:
598   case X86::KMOVDmk:
599   case X86::KMOVDmk_EVEX:
600     MemBytes = TypeSize::getFixed(4);
601     return true;
602   case X86::MOV64mr:
603   case X86::ST_FpP64m:
604   case X86::MOVSDmr:
605   case X86::VMOVSDmr:
606   case X86::VMOVSDZmr:
607   case X86::MMX_MOVD64mr:
608   case X86::MMX_MOVQ64mr:
609   case X86::MMX_MOVNTQmr:
610   case X86::KMOVQmk:
611   case X86::KMOVQmk_EVEX:
612     MemBytes = TypeSize::getFixed(8);
613     return true;
614   case X86::MOVAPSmr:
615   case X86::MOVUPSmr:
616   case X86::MOVAPDmr:
617   case X86::MOVUPDmr:
618   case X86::MOVDQAmr:
619   case X86::MOVDQUmr:
620   case X86::VMOVAPSmr:
621   case X86::VMOVUPSmr:
622   case X86::VMOVAPDmr:
623   case X86::VMOVUPDmr:
624   case X86::VMOVDQAmr:
625   case X86::VMOVDQUmr:
626   case X86::VMOVUPSZ128mr:
627   case X86::VMOVAPSZ128mr:
628   case X86::VMOVUPSZ128mr_NOVLX:
629   case X86::VMOVAPSZ128mr_NOVLX:
630   case X86::VMOVUPDZ128mr:
631   case X86::VMOVAPDZ128mr:
632   case X86::VMOVDQA32Z128mr:
633   case X86::VMOVDQU32Z128mr:
634   case X86::VMOVDQA64Z128mr:
635   case X86::VMOVDQU64Z128mr:
636   case X86::VMOVDQU8Z128mr:
637   case X86::VMOVDQU16Z128mr:
638     MemBytes = TypeSize::getFixed(16);
639     return true;
640   case X86::VMOVUPSYmr:
641   case X86::VMOVAPSYmr:
642   case X86::VMOVUPDYmr:
643   case X86::VMOVAPDYmr:
644   case X86::VMOVDQUYmr:
645   case X86::VMOVDQAYmr:
646   case X86::VMOVUPSZ256mr:
647   case X86::VMOVAPSZ256mr:
648   case X86::VMOVUPSZ256mr_NOVLX:
649   case X86::VMOVAPSZ256mr_NOVLX:
650   case X86::VMOVUPDZ256mr:
651   case X86::VMOVAPDZ256mr:
652   case X86::VMOVDQU8Z256mr:
653   case X86::VMOVDQU16Z256mr:
654   case X86::VMOVDQA32Z256mr:
655   case X86::VMOVDQU32Z256mr:
656   case X86::VMOVDQA64Z256mr:
657   case X86::VMOVDQU64Z256mr:
658     MemBytes = TypeSize::getFixed(32);
659     return true;
660   case X86::VMOVUPSZmr:
661   case X86::VMOVAPSZmr:
662   case X86::VMOVUPDZmr:
663   case X86::VMOVAPDZmr:
664   case X86::VMOVDQU8Zmr:
665   case X86::VMOVDQU16Zmr:
666   case X86::VMOVDQA32Zmr:
667   case X86::VMOVDQU32Zmr:
668   case X86::VMOVDQA64Zmr:
669   case X86::VMOVDQU64Zmr:
670     MemBytes = TypeSize::getFixed(64);
671     return true;
672   }
673   return false;
674 }
675 
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const676 Register X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
677                                            int &FrameIndex) const {
678   TypeSize Dummy = TypeSize::getZero();
679   return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
680 }
681 
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex,TypeSize & MemBytes) const682 Register X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
683                                            int &FrameIndex,
684                                            TypeSize &MemBytes) const {
685   if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
686     if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
687       return MI.getOperand(0).getReg();
688   return Register();
689 }
690 
isLoadFromStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const691 Register X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
692                                                  int &FrameIndex) const {
693   TypeSize Dummy = TypeSize::getZero();
694   if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
695     if (Register Reg = isLoadFromStackSlot(MI, FrameIndex))
696       return Reg;
697     // Check for post-frame index elimination operations
698     SmallVector<const MachineMemOperand *, 1> Accesses;
699     if (hasLoadFromStackSlot(MI, Accesses)) {
700       FrameIndex =
701           cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
702               ->getFrameIndex();
703       return MI.getOperand(0).getReg();
704     }
705   }
706   return Register();
707 }
708 
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const709 Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
710                                           int &FrameIndex) const {
711   TypeSize Dummy = TypeSize::getZero();
712   return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
713 }
714 
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex,TypeSize & MemBytes) const715 Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
716                                           int &FrameIndex,
717                                           TypeSize &MemBytes) const {
718   if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
719     if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
720         isFrameOperand(MI, 0, FrameIndex))
721       return MI.getOperand(X86::AddrNumOperands).getReg();
722   return Register();
723 }
724 
isStoreToStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const725 Register X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
726                                                 int &FrameIndex) const {
727   TypeSize Dummy = TypeSize::getZero();
728   if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
729     if (Register Reg = isStoreToStackSlot(MI, FrameIndex))
730       return Reg;
731     // Check for post-frame index elimination operations
732     SmallVector<const MachineMemOperand *, 1> Accesses;
733     if (hasStoreToStackSlot(MI, Accesses)) {
734       FrameIndex =
735           cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
736               ->getFrameIndex();
737       return MI.getOperand(X86::AddrNumOperands).getReg();
738     }
739   }
740   return Register();
741 }
742 
743 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
regIsPICBase(Register BaseReg,const MachineRegisterInfo & MRI)744 static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) {
745   // Don't waste compile time scanning use-def chains of physregs.
746   if (!BaseReg.isVirtual())
747     return false;
748   bool isPICBase = false;
749   for (const MachineInstr &DefMI : MRI.def_instructions(BaseReg)) {
750     if (DefMI.getOpcode() != X86::MOVPC32r)
751       return false;
752     assert(!isPICBase && "More than one PIC base?");
753     isPICBase = true;
754   }
755   return isPICBase;
756 }
757 
isReallyTriviallyReMaterializable(const MachineInstr & MI) const758 bool X86InstrInfo::isReallyTriviallyReMaterializable(
759     const MachineInstr &MI) const {
760   switch (MI.getOpcode()) {
761   default:
762     // This function should only be called for opcodes with the ReMaterializable
763     // flag set.
764     llvm_unreachable("Unknown rematerializable operation!");
765     break;
766   case X86::IMPLICIT_DEF:
767     // Defer to generic logic.
768     break;
769   case X86::LOAD_STACK_GUARD:
770   case X86::LD_Fp032:
771   case X86::LD_Fp064:
772   case X86::LD_Fp080:
773   case X86::LD_Fp132:
774   case X86::LD_Fp164:
775   case X86::LD_Fp180:
776   case X86::AVX1_SETALLONES:
777   case X86::AVX2_SETALLONES:
778   case X86::AVX512_128_SET0:
779   case X86::AVX512_256_SET0:
780   case X86::AVX512_512_SET0:
781   case X86::AVX512_512_SETALLONES:
782   case X86::AVX512_FsFLD0SD:
783   case X86::AVX512_FsFLD0SH:
784   case X86::AVX512_FsFLD0SS:
785   case X86::AVX512_FsFLD0F128:
786   case X86::AVX_SET0:
787   case X86::FsFLD0SD:
788   case X86::FsFLD0SS:
789   case X86::FsFLD0SH:
790   case X86::FsFLD0F128:
791   case X86::KSET0D:
792   case X86::KSET0Q:
793   case X86::KSET0W:
794   case X86::KSET1D:
795   case X86::KSET1Q:
796   case X86::KSET1W:
797   case X86::MMX_SET0:
798   case X86::MOV32ImmSExti8:
799   case X86::MOV32r0:
800   case X86::MOV32r1:
801   case X86::MOV32r_1:
802   case X86::MOV32ri64:
803   case X86::MOV64ImmSExti8:
804   case X86::V_SET0:
805   case X86::V_SETALLONES:
806   case X86::MOV16ri:
807   case X86::MOV32ri:
808   case X86::MOV64ri:
809   case X86::MOV64ri32:
810   case X86::MOV8ri:
811   case X86::PTILEZEROV:
812     return true;
813 
814   case X86::MOV8rm:
815   case X86::MOV8rm_NOREX:
816   case X86::MOV16rm:
817   case X86::MOV32rm:
818   case X86::MOV64rm:
819   case X86::MOVSSrm:
820   case X86::MOVSSrm_alt:
821   case X86::MOVSDrm:
822   case X86::MOVSDrm_alt:
823   case X86::MOVAPSrm:
824   case X86::MOVUPSrm:
825   case X86::MOVAPDrm:
826   case X86::MOVUPDrm:
827   case X86::MOVDQArm:
828   case X86::MOVDQUrm:
829   case X86::VMOVSSrm:
830   case X86::VMOVSSrm_alt:
831   case X86::VMOVSDrm:
832   case X86::VMOVSDrm_alt:
833   case X86::VMOVAPSrm:
834   case X86::VMOVUPSrm:
835   case X86::VMOVAPDrm:
836   case X86::VMOVUPDrm:
837   case X86::VMOVDQArm:
838   case X86::VMOVDQUrm:
839   case X86::VMOVAPSYrm:
840   case X86::VMOVUPSYrm:
841   case X86::VMOVAPDYrm:
842   case X86::VMOVUPDYrm:
843   case X86::VMOVDQAYrm:
844   case X86::VMOVDQUYrm:
845   case X86::MMX_MOVD64rm:
846   case X86::MMX_MOVQ64rm:
847   case X86::VBROADCASTSSrm:
848   case X86::VBROADCASTSSYrm:
849   case X86::VBROADCASTSDYrm:
850   // AVX-512
851   case X86::VPBROADCASTBZ128rm:
852   case X86::VPBROADCASTBZ256rm:
853   case X86::VPBROADCASTBZrm:
854   case X86::VBROADCASTF32X2Z256rm:
855   case X86::VBROADCASTF32X2Zrm:
856   case X86::VBROADCASTI32X2Z128rm:
857   case X86::VBROADCASTI32X2Z256rm:
858   case X86::VBROADCASTI32X2Zrm:
859   case X86::VPBROADCASTWZ128rm:
860   case X86::VPBROADCASTWZ256rm:
861   case X86::VPBROADCASTWZrm:
862   case X86::VPBROADCASTDZ128rm:
863   case X86::VPBROADCASTDZ256rm:
864   case X86::VPBROADCASTDZrm:
865   case X86::VBROADCASTSSZ128rm:
866   case X86::VBROADCASTSSZ256rm:
867   case X86::VBROADCASTSSZrm:
868   case X86::VPBROADCASTQZ128rm:
869   case X86::VPBROADCASTQZ256rm:
870   case X86::VPBROADCASTQZrm:
871   case X86::VBROADCASTSDZ256rm:
872   case X86::VBROADCASTSDZrm:
873   case X86::VMOVSSZrm:
874   case X86::VMOVSSZrm_alt:
875   case X86::VMOVSDZrm:
876   case X86::VMOVSDZrm_alt:
877   case X86::VMOVSHZrm:
878   case X86::VMOVSHZrm_alt:
879   case X86::VMOVAPDZ128rm:
880   case X86::VMOVAPDZ256rm:
881   case X86::VMOVAPDZrm:
882   case X86::VMOVAPSZ128rm:
883   case X86::VMOVAPSZ256rm:
884   case X86::VMOVAPSZ128rm_NOVLX:
885   case X86::VMOVAPSZ256rm_NOVLX:
886   case X86::VMOVAPSZrm:
887   case X86::VMOVDQA32Z128rm:
888   case X86::VMOVDQA32Z256rm:
889   case X86::VMOVDQA32Zrm:
890   case X86::VMOVDQA64Z128rm:
891   case X86::VMOVDQA64Z256rm:
892   case X86::VMOVDQA64Zrm:
893   case X86::VMOVDQU16Z128rm:
894   case X86::VMOVDQU16Z256rm:
895   case X86::VMOVDQU16Zrm:
896   case X86::VMOVDQU32Z128rm:
897   case X86::VMOVDQU32Z256rm:
898   case X86::VMOVDQU32Zrm:
899   case X86::VMOVDQU64Z128rm:
900   case X86::VMOVDQU64Z256rm:
901   case X86::VMOVDQU64Zrm:
902   case X86::VMOVDQU8Z128rm:
903   case X86::VMOVDQU8Z256rm:
904   case X86::VMOVDQU8Zrm:
905   case X86::VMOVUPDZ128rm:
906   case X86::VMOVUPDZ256rm:
907   case X86::VMOVUPDZrm:
908   case X86::VMOVUPSZ128rm:
909   case X86::VMOVUPSZ256rm:
910   case X86::VMOVUPSZ128rm_NOVLX:
911   case X86::VMOVUPSZ256rm_NOVLX:
912   case X86::VMOVUPSZrm: {
913     // Loads from constant pools are trivially rematerializable.
914     if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
915         MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
916         MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
917         MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
918         MI.isDereferenceableInvariantLoad()) {
919       Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
920       if (BaseReg == 0 || BaseReg == X86::RIP)
921         return true;
922       // Allow re-materialization of PIC load.
923       if (!(!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())) {
924         const MachineFunction &MF = *MI.getParent()->getParent();
925         const MachineRegisterInfo &MRI = MF.getRegInfo();
926         if (regIsPICBase(BaseReg, MRI))
927           return true;
928       }
929     }
930     break;
931   }
932 
933   case X86::LEA32r:
934   case X86::LEA64r: {
935     if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
936         MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
937         MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
938         !MI.getOperand(1 + X86::AddrDisp).isReg()) {
939       // lea fi#, lea GV, etc. are all rematerializable.
940       if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
941         return true;
942       Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
943       if (BaseReg == 0)
944         return true;
945       // Allow re-materialization of lea PICBase + x.
946       const MachineFunction &MF = *MI.getParent()->getParent();
947       const MachineRegisterInfo &MRI = MF.getRegInfo();
948       if (regIsPICBase(BaseReg, MRI))
949         return true;
950     }
951     break;
952   }
953   }
954   return TargetInstrInfo::isReallyTriviallyReMaterializable(MI);
955 }
956 
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,unsigned SubIdx,const MachineInstr & Orig,const TargetRegisterInfo & TRI) const957 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
958                                  MachineBasicBlock::iterator I,
959                                  Register DestReg, unsigned SubIdx,
960                                  const MachineInstr &Orig,
961                                  const TargetRegisterInfo &TRI) const {
962   bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
963   if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
964                             MachineBasicBlock::LQR_Dead) {
965     // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
966     // effects.
967     int Value;
968     switch (Orig.getOpcode()) {
969     case X86::MOV32r0:
970       Value = 0;
971       break;
972     case X86::MOV32r1:
973       Value = 1;
974       break;
975     case X86::MOV32r_1:
976       Value = -1;
977       break;
978     default:
979       llvm_unreachable("Unexpected instruction!");
980     }
981 
982     const DebugLoc &DL = Orig.getDebugLoc();
983     BuildMI(MBB, I, DL, get(X86::MOV32ri))
984         .add(Orig.getOperand(0))
985         .addImm(Value);
986   } else {
987     MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
988     MBB.insert(I, MI);
989   }
990 
991   MachineInstr &NewMI = *std::prev(I);
992   NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
993 }
994 
995 /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
hasLiveCondCodeDef(MachineInstr & MI) const996 bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
997   for (const MachineOperand &MO : MI.operands()) {
998     if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS &&
999         !MO.isDead()) {
1000       return true;
1001     }
1002   }
1003   return false;
1004 }
1005 
1006 /// Check whether the shift count for a machine operand is non-zero.
getTruncatedShiftCount(const MachineInstr & MI,unsigned ShiftAmtOperandIdx)1007 inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
1008                                               unsigned ShiftAmtOperandIdx) {
1009   // The shift count is six bits with the REX.W prefix and five bits without.
1010   unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
1011   unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
1012   return Imm & ShiftCountMask;
1013 }
1014 
1015 /// Check whether the given shift count is appropriate
1016 /// can be represented by a LEA instruction.
isTruncatedShiftCountForLEA(unsigned ShAmt)1017 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
1018   // Left shift instructions can be transformed into load-effective-address
1019   // instructions if we can encode them appropriately.
1020   // A LEA instruction utilizes a SIB byte to encode its scale factor.
1021   // The SIB.scale field is two bits wide which means that we can encode any
1022   // shift amount less than 4.
1023   return ShAmt < 4 && ShAmt > 0;
1024 }
1025 
1026 static bool
findRedundantFlagInstr(MachineInstr & CmpInstr,MachineInstr & CmpValDefInstr,const MachineRegisterInfo * MRI,MachineInstr ** AndInstr,const TargetRegisterInfo * TRI,const X86Subtarget & ST,bool & NoSignFlag,bool & ClearsOverflowFlag)1027 findRedundantFlagInstr(MachineInstr &CmpInstr, MachineInstr &CmpValDefInstr,
1028                        const MachineRegisterInfo *MRI, MachineInstr **AndInstr,
1029                        const TargetRegisterInfo *TRI, const X86Subtarget &ST,
1030                        bool &NoSignFlag, bool &ClearsOverflowFlag) {
1031   if (!(CmpValDefInstr.getOpcode() == X86::SUBREG_TO_REG &&
1032         CmpInstr.getOpcode() == X86::TEST64rr) &&
1033       !(CmpValDefInstr.getOpcode() == X86::COPY &&
1034         CmpInstr.getOpcode() == X86::TEST16rr))
1035     return false;
1036 
1037   // CmpInstr is a TEST16rr/TEST64rr instruction, and
1038   // `X86InstrInfo::analyzeCompare` guarantees that it's analyzable only if two
1039   // registers are identical.
1040   assert((CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) &&
1041          "CmpInstr is an analyzable TEST16rr/TEST64rr, and "
1042          "`X86InstrInfo::analyzeCompare` requires two reg operands are the"
1043          "same.");
1044 
1045   // Caller (`X86InstrInfo::optimizeCompareInstr`) guarantees that
1046   // `CmpValDefInstr` defines the value that's used by `CmpInstr`; in this case
1047   // if `CmpValDefInstr` sets the EFLAGS, it is likely that `CmpInstr` is
1048   // redundant.
1049   assert(
1050       (MRI->getVRegDef(CmpInstr.getOperand(0).getReg()) == &CmpValDefInstr) &&
1051       "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG or TEST16rr "
1052       "is a user of COPY sub16bit.");
1053   MachineInstr *VregDefInstr = nullptr;
1054   if (CmpInstr.getOpcode() == X86::TEST16rr) {
1055     if (!CmpValDefInstr.getOperand(1).getReg().isVirtual())
1056       return false;
1057     VregDefInstr = MRI->getVRegDef(CmpValDefInstr.getOperand(1).getReg());
1058     if (!VregDefInstr)
1059       return false;
1060     // We can only remove test when AND32ri or AND64ri32 whose imm can fit 16bit
1061     // size, others 32/64 bit ops would test higher bits which test16rr don't
1062     // want to.
1063     if (!((VregDefInstr->getOpcode() == X86::AND32ri ||
1064            VregDefInstr->getOpcode() == X86::AND64ri32) &&
1065           isUInt<16>(VregDefInstr->getOperand(2).getImm())))
1066       return false;
1067   }
1068 
1069   if (CmpInstr.getOpcode() == X86::TEST64rr) {
1070     // As seen in X86 td files, CmpValDefInstr.getOperand(1).getImm() is
1071     // typically 0.
1072     if (CmpValDefInstr.getOperand(1).getImm() != 0)
1073       return false;
1074 
1075     // As seen in X86 td files, CmpValDefInstr.getOperand(3) is typically
1076     // sub_32bit or sub_xmm.
1077     if (CmpValDefInstr.getOperand(3).getImm() != X86::sub_32bit)
1078       return false;
1079 
1080     VregDefInstr = MRI->getVRegDef(CmpValDefInstr.getOperand(2).getReg());
1081   }
1082 
1083   assert(VregDefInstr && "Must have a definition (SSA)");
1084 
1085   // Requires `CmpValDefInstr` and `VregDefInstr` are from the same MBB
1086   // to simplify the subsequent analysis.
1087   //
1088   // FIXME: If `VregDefInstr->getParent()` is the only predecessor of
1089   // `CmpValDefInstr.getParent()`, this could be handled.
1090   if (VregDefInstr->getParent() != CmpValDefInstr.getParent())
1091     return false;
1092 
1093   if (X86::isAND(VregDefInstr->getOpcode()) &&
1094       (!ST.hasNF() || VregDefInstr->modifiesRegister(X86::EFLAGS, TRI))) {
1095     // Get a sequence of instructions like
1096     //   %reg = and* ...                    // Set EFLAGS
1097     //   ...                                // EFLAGS not changed
1098     //   %extended_reg = subreg_to_reg 0, %reg, %subreg.sub_32bit
1099     //   test64rr %extended_reg, %extended_reg, implicit-def $eflags
1100     // or
1101     //   %reg = and32* ...
1102     //   ...                         // EFLAGS not changed.
1103     //   %src_reg = copy %reg.sub_16bit:gr32
1104     //   test16rr %src_reg, %src_reg, implicit-def $eflags
1105     //
1106     // If subsequent readers use a subset of bits that don't change
1107     // after `and*` instructions, it's likely that the test64rr could
1108     // be optimized away.
1109     for (const MachineInstr &Instr :
1110          make_range(std::next(MachineBasicBlock::iterator(VregDefInstr)),
1111                     MachineBasicBlock::iterator(CmpValDefInstr))) {
1112       // There are instructions between 'VregDefInstr' and
1113       // 'CmpValDefInstr' that modifies EFLAGS.
1114       if (Instr.modifiesRegister(X86::EFLAGS, TRI))
1115         return false;
1116     }
1117 
1118     *AndInstr = VregDefInstr;
1119 
1120     // AND instruction will essentially update SF and clear OF, so
1121     // NoSignFlag should be false in the sense that SF is modified by `AND`.
1122     //
1123     // However, the implementation artifically sets `NoSignFlag` to true
1124     // to poison the SF bit; that is to say, if SF is looked at later, the
1125     // optimization (to erase TEST64rr) will be disabled.
1126     //
1127     // The reason to poison SF bit is that SF bit value could be different
1128     // in the `AND` and `TEST` operation; signed bit is not known for `AND`,
1129     // and is known to be 0 as a result of `TEST64rr`.
1130     //
1131     // FIXME: As opposed to poisoning the SF bit directly, consider peeking into
1132     // the AND instruction and using the static information to guide peephole
1133     // optimization if possible. For example, it's possible to fold a
1134     // conditional move into a copy if the relevant EFLAG bits could be deduced
1135     // from an immediate operand of and operation.
1136     //
1137     NoSignFlag = true;
1138     // ClearsOverflowFlag is true for AND operation (no surprise).
1139     ClearsOverflowFlag = true;
1140     return true;
1141   }
1142   return false;
1143 }
1144 
classifyLEAReg(MachineInstr & MI,const MachineOperand & Src,unsigned Opc,bool AllowSP,Register & NewSrc,unsigned & NewSrcSubReg,bool & isKill,MachineOperand & ImplicitOp,LiveVariables * LV,LiveIntervals * LIS) const1145 bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
1146                                   unsigned Opc, bool AllowSP, Register &NewSrc,
1147                                   unsigned &NewSrcSubReg, bool &isKill,
1148                                   MachineOperand &ImplicitOp, LiveVariables *LV,
1149                                   LiveIntervals *LIS) const {
1150   MachineFunction &MF = *MI.getParent()->getParent();
1151   const TargetRegisterClass *RC;
1152   if (AllowSP) {
1153     RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1154   } else {
1155     RC = Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1156   }
1157   Register SrcReg = Src.getReg();
1158   unsigned SubReg = Src.getSubReg();
1159   isKill = MI.killsRegister(SrcReg, /*TRI=*/nullptr);
1160 
1161   NewSrcSubReg = X86::NoSubRegister;
1162 
1163   // For both LEA64 and LEA32 the register already has essentially the right
1164   // type (32-bit or 64-bit) we may just need to forbid SP.
1165   if (Opc != X86::LEA64_32r) {
1166     NewSrc = SrcReg;
1167     NewSrcSubReg = SubReg;
1168     assert(!Src.isUndef() && "Undef op doesn't need optimization");
1169 
1170     if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC))
1171       return false;
1172 
1173     return true;
1174   }
1175 
1176   // This is for an LEA64_32r and incoming registers are 32-bit. One way or
1177   // another we need to add 64-bit registers to the final MI.
1178   if (SrcReg.isPhysical()) {
1179     ImplicitOp = Src;
1180     ImplicitOp.setImplicit();
1181 
1182     NewSrc = getX86SubSuperRegister(SrcReg, 64);
1183     assert(!SubReg && "no superregister for source");
1184     assert(NewSrc.isValid() && "Invalid Operand");
1185     assert(!Src.isUndef() && "Undef op doesn't need optimization");
1186   } else {
1187     // Virtual register of the wrong class, we have to create a temporary 64-bit
1188     // vreg to feed into the LEA.
1189     NewSrc = MF.getRegInfo().createVirtualRegister(RC);
1190     NewSrcSubReg = X86::NoSubRegister;
1191     MachineInstr *Copy =
1192         BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1193             .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
1194             .addReg(SrcReg, getKillRegState(isKill), SubReg);
1195 
1196     // Which is obviously going to be dead after we're done with it.
1197     isKill = true;
1198 
1199     if (LV)
1200       LV->replaceKillInstruction(SrcReg, MI, *Copy);
1201 
1202     if (LIS) {
1203       SlotIndex CopyIdx = LIS->InsertMachineInstrInMaps(*Copy);
1204       SlotIndex Idx = LIS->getInstructionIndex(MI);
1205       LiveInterval &LI = LIS->getInterval(SrcReg);
1206       LiveRange::Segment *S = LI.getSegmentContaining(Idx);
1207       if (S->end.getBaseIndex() == Idx)
1208         S->end = CopyIdx.getRegSlot();
1209     }
1210   }
1211 
1212   // We've set all the parameters without issue.
1213   return true;
1214 }
1215 
convertToThreeAddressWithLEA(unsigned MIOpc,MachineInstr & MI,LiveVariables * LV,LiveIntervals * LIS,bool Is8BitOp) const1216 MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
1217                                                          MachineInstr &MI,
1218                                                          LiveVariables *LV,
1219                                                          LiveIntervals *LIS,
1220                                                          bool Is8BitOp) const {
1221   // We handle 8-bit adds and various 16-bit opcodes in the switch below.
1222   MachineBasicBlock &MBB = *MI.getParent();
1223   MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
1224   assert((Is8BitOp ||
1225           RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1226               *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
1227          "Unexpected type for LEA transform");
1228 
1229   // TODO: For a 32-bit target, we need to adjust the LEA variables with
1230   // something like this:
1231   //   Opcode = X86::LEA32r;
1232   //   InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1233   //   OutRegLEA =
1234   //       Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
1235   //                : RegInfo.createVirtualRegister(&X86::GR32RegClass);
1236   if (!Subtarget.is64Bit())
1237     return nullptr;
1238 
1239   unsigned Opcode = X86::LEA64_32r;
1240   Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1241   Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1242   Register InRegLEA2;
1243 
1244   // Build and insert into an implicit UNDEF value. This is OK because
1245   // we will be shifting and then extracting the lower 8/16-bits.
1246   // This has the potential to cause partial register stall. e.g.
1247   //   movw    (%rbp,%rcx,2), %dx
1248   //   leal    -65(%rdx), %esi
1249   // But testing has shown this *does* help performance in 64-bit mode (at
1250   // least on modern x86 machines).
1251   MachineBasicBlock::iterator MBBI = MI.getIterator();
1252   Register Dest = MI.getOperand(0).getReg();
1253   Register Src = MI.getOperand(1).getReg();
1254   unsigned SrcSubReg = MI.getOperand(1).getSubReg();
1255   Register Src2;
1256   unsigned Src2SubReg;
1257   bool IsDead = MI.getOperand(0).isDead();
1258   bool IsKill = MI.getOperand(1).isKill();
1259   unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1260   assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
1261   MachineInstr *ImpDef =
1262       BuildMI(MBB, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
1263   MachineInstr *InsMI =
1264       BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1265           .addReg(InRegLEA, RegState::Define, SubReg)
1266           .addReg(Src, getKillRegState(IsKill), SrcSubReg);
1267   MachineInstr *ImpDef2 = nullptr;
1268   MachineInstr *InsMI2 = nullptr;
1269 
1270   MachineInstrBuilder MIB =
1271       BuildMI(MBB, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
1272 #define CASE_NF(OP)                                                            \
1273   case X86::OP:                                                                \
1274   case X86::OP##_NF:
1275   switch (MIOpc) {
1276   default:
1277     llvm_unreachable("Unreachable!");
1278   CASE_NF(SHL8ri)
1279   CASE_NF(SHL16ri) {
1280     unsigned ShAmt = MI.getOperand(2).getImm();
1281     MIB.addReg(0)
1282         .addImm(1LL << ShAmt)
1283         .addReg(InRegLEA, RegState::Kill)
1284         .addImm(0)
1285         .addReg(0);
1286     break;
1287   }
1288   CASE_NF(INC8r)
1289   CASE_NF(INC16r)
1290     addRegOffset(MIB, InRegLEA, true, 1);
1291     break;
1292   CASE_NF(DEC8r)
1293   CASE_NF(DEC16r)
1294     addRegOffset(MIB, InRegLEA, true, -1);
1295     break;
1296   CASE_NF(ADD8ri)
1297   CASE_NF(ADD16ri)
1298   case X86::ADD8ri_DB:
1299   case X86::ADD16ri_DB:
1300     addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
1301     break;
1302   CASE_NF(ADD8rr)
1303   CASE_NF(ADD16rr)
1304   case X86::ADD8rr_DB:
1305   case X86::ADD16rr_DB: {
1306     Src2 = MI.getOperand(2).getReg();
1307     Src2SubReg = MI.getOperand(2).getSubReg();
1308     bool IsKill2 = MI.getOperand(2).isKill();
1309     assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
1310     if (Src == Src2) {
1311       // ADD8rr/ADD16rr killed %reg1028, %reg1028
1312       // just a single insert_subreg.
1313       addRegReg(MIB, InRegLEA, true, X86::NoSubRegister, InRegLEA, false,
1314                 X86::NoSubRegister);
1315     } else {
1316       if (Subtarget.is64Bit())
1317         InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1318       else
1319         InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1320       // Build and insert into an implicit UNDEF value. This is OK because
1321       // we will be shifting and then extracting the lower 8/16-bits.
1322       ImpDef2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF),
1323                         InRegLEA2);
1324       InsMI2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
1325                    .addReg(InRegLEA2, RegState::Define, SubReg)
1326                    .addReg(Src2, getKillRegState(IsKill2), Src2SubReg);
1327       addRegReg(MIB, InRegLEA, true, X86::NoSubRegister, InRegLEA2, true,
1328                 X86::NoSubRegister);
1329     }
1330     if (LV && IsKill2 && InsMI2)
1331       LV->replaceKillInstruction(Src2, MI, *InsMI2);
1332     break;
1333   }
1334   }
1335 
1336   MachineInstr *NewMI = MIB;
1337   MachineInstr *ExtMI =
1338       BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1339           .addReg(Dest, RegState::Define | getDeadRegState(IsDead))
1340           .addReg(OutRegLEA, RegState::Kill, SubReg);
1341 
1342   if (LV) {
1343     // Update live variables.
1344     LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
1345     if (InRegLEA2)
1346       LV->getVarInfo(InRegLEA2).Kills.push_back(NewMI);
1347     LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
1348     if (IsKill)
1349       LV->replaceKillInstruction(Src, MI, *InsMI);
1350     if (IsDead)
1351       LV->replaceKillInstruction(Dest, MI, *ExtMI);
1352   }
1353 
1354   if (LIS) {
1355     LIS->InsertMachineInstrInMaps(*ImpDef);
1356     SlotIndex InsIdx = LIS->InsertMachineInstrInMaps(*InsMI);
1357     if (ImpDef2)
1358       LIS->InsertMachineInstrInMaps(*ImpDef2);
1359     SlotIndex Ins2Idx;
1360     if (InsMI2)
1361       Ins2Idx = LIS->InsertMachineInstrInMaps(*InsMI2);
1362     SlotIndex NewIdx = LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
1363     SlotIndex ExtIdx = LIS->InsertMachineInstrInMaps(*ExtMI);
1364     LIS->getInterval(InRegLEA);
1365     LIS->getInterval(OutRegLEA);
1366     if (InRegLEA2)
1367       LIS->getInterval(InRegLEA2);
1368 
1369     // Move the use of Src up to InsMI.
1370     LiveInterval &SrcLI = LIS->getInterval(Src);
1371     LiveRange::Segment *SrcSeg = SrcLI.getSegmentContaining(NewIdx);
1372     if (SrcSeg->end == NewIdx.getRegSlot())
1373       SrcSeg->end = InsIdx.getRegSlot();
1374 
1375     if (InsMI2) {
1376       // Move the use of Src2 up to InsMI2.
1377       LiveInterval &Src2LI = LIS->getInterval(Src2);
1378       LiveRange::Segment *Src2Seg = Src2LI.getSegmentContaining(NewIdx);
1379       if (Src2Seg->end == NewIdx.getRegSlot())
1380         Src2Seg->end = Ins2Idx.getRegSlot();
1381     }
1382 
1383     // Move the definition of Dest down to ExtMI.
1384     LiveInterval &DestLI = LIS->getInterval(Dest);
1385     LiveRange::Segment *DestSeg =
1386         DestLI.getSegmentContaining(NewIdx.getRegSlot());
1387     assert(DestSeg->start == NewIdx.getRegSlot() &&
1388            DestSeg->valno->def == NewIdx.getRegSlot());
1389     DestSeg->start = ExtIdx.getRegSlot();
1390     DestSeg->valno->def = ExtIdx.getRegSlot();
1391   }
1392 
1393   return ExtMI;
1394 }
1395 
1396 /// This method must be implemented by targets that
1397 /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
1398 /// may be able to convert a two-address instruction into a true
1399 /// three-address instruction on demand.  This allows the X86 target (for
1400 /// example) to convert ADD and SHL instructions into LEA instructions if they
1401 /// would require register copies due to two-addressness.
1402 ///
1403 /// This method returns a null pointer if the transformation cannot be
1404 /// performed, otherwise it returns the new instruction.
1405 ///
convertToThreeAddress(MachineInstr & MI,LiveVariables * LV,LiveIntervals * LIS) const1406 MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
1407                                                   LiveVariables *LV,
1408                                                   LiveIntervals *LIS) const {
1409   // The following opcodes also sets the condition code register(s). Only
1410   // convert them to equivalent lea if the condition code register def's
1411   // are dead!
1412   if (hasLiveCondCodeDef(MI))
1413     return nullptr;
1414 
1415   MachineFunction &MF = *MI.getParent()->getParent();
1416   // All instructions input are two-addr instructions.  Get the known operands.
1417   const MachineOperand &Dest = MI.getOperand(0);
1418   const MachineOperand &Src = MI.getOperand(1);
1419 
1420   // Ideally, operations with undef should be folded before we get here, but we
1421   // can't guarantee it. Bail out because optimizing undefs is a waste of time.
1422   // Without this, we have to forward undef state to new register operands to
1423   // avoid machine verifier errors.
1424   if (Src.isUndef())
1425     return nullptr;
1426   if (MI.getNumOperands() > 2)
1427     if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
1428       return nullptr;
1429 
1430   MachineInstr *NewMI = nullptr;
1431   Register SrcReg, SrcReg2;
1432   unsigned SrcSubReg, SrcSubReg2;
1433   bool Is64Bit = Subtarget.is64Bit();
1434 
1435   bool Is8BitOp = false;
1436   unsigned NumRegOperands = 2;
1437   unsigned MIOpc = MI.getOpcode();
1438   switch (MIOpc) {
1439   default:
1440     llvm_unreachable("Unreachable!");
1441   CASE_NF(SHL64ri) {
1442     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1443     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1444     if (!isTruncatedShiftCountForLEA(ShAmt))
1445       return nullptr;
1446 
1447     // LEA can't handle RSP.
1448     if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass(
1449                                         Src.getReg(), &X86::GR64_NOSPRegClass))
1450       return nullptr;
1451 
1452     NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
1453                 .add(Dest)
1454                 .addReg(0)
1455                 .addImm(1LL << ShAmt)
1456                 .add(Src)
1457                 .addImm(0)
1458                 .addReg(0);
1459     break;
1460   }
1461   CASE_NF(SHL32ri) {
1462     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1463     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1464     if (!isTruncatedShiftCountForLEA(ShAmt))
1465       return nullptr;
1466 
1467     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1468 
1469     // LEA can't handle ESP.
1470     bool isKill;
1471     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1472     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg,
1473                         isKill, ImplicitOp, LV, LIS))
1474       return nullptr;
1475 
1476     MachineInstrBuilder MIB =
1477         BuildMI(MF, MI.getDebugLoc(), get(Opc))
1478             .add(Dest)
1479             .addReg(0)
1480             .addImm(1LL << ShAmt)
1481             .addReg(SrcReg, getKillRegState(isKill), SrcSubReg)
1482             .addImm(0)
1483             .addReg(0);
1484     if (ImplicitOp.getReg() != 0)
1485       MIB.add(ImplicitOp);
1486     NewMI = MIB;
1487 
1488     // Add kills if classifyLEAReg created a new register.
1489     if (LV && SrcReg != Src.getReg())
1490       LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1491     break;
1492   }
1493   CASE_NF(SHL8ri)
1494     Is8BitOp = true;
1495     [[fallthrough]];
1496   CASE_NF(SHL16ri) {
1497     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1498     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1499     if (!isTruncatedShiftCountForLEA(ShAmt))
1500       return nullptr;
1501     return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1502   }
1503   CASE_NF(INC64r)
1504   CASE_NF(INC32r) {
1505     assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
1506     unsigned Opc = (MIOpc == X86::INC64r || MIOpc == X86::INC64r_NF)
1507                        ? X86::LEA64r
1508                        : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1509     bool isKill;
1510     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1511     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg,
1512                         isKill, ImplicitOp, LV, LIS))
1513       return nullptr;
1514 
1515     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1516                                   .add(Dest)
1517                                   .addReg(SrcReg, getKillRegState(isKill));
1518     if (ImplicitOp.getReg() != 0)
1519       MIB.add(ImplicitOp);
1520 
1521     NewMI = addOffset(MIB, 1);
1522 
1523     // Add kills if classifyLEAReg created a new register.
1524     if (LV && SrcReg != Src.getReg())
1525       LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1526     break;
1527   }
1528   CASE_NF(DEC64r)
1529   CASE_NF(DEC32r) {
1530     assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
1531     unsigned Opc = (MIOpc == X86::DEC64r || MIOpc == X86::DEC64r_NF)
1532                        ? X86::LEA64r
1533                        : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1534 
1535     bool isKill;
1536     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1537     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg,
1538                         isKill, ImplicitOp, LV, LIS))
1539       return nullptr;
1540 
1541     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1542                                   .add(Dest)
1543                                   .addReg(SrcReg, getKillRegState(isKill));
1544     if (ImplicitOp.getReg() != 0)
1545       MIB.add(ImplicitOp);
1546 
1547     NewMI = addOffset(MIB, -1);
1548 
1549     // Add kills if classifyLEAReg created a new register.
1550     if (LV && SrcReg != Src.getReg())
1551       LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1552     break;
1553   }
1554   CASE_NF(DEC8r)
1555   CASE_NF(INC8r)
1556     Is8BitOp = true;
1557     [[fallthrough]];
1558   CASE_NF(DEC16r)
1559   CASE_NF(INC16r)
1560     return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1561   CASE_NF(ADD64rr)
1562   CASE_NF(ADD32rr)
1563   case X86::ADD64rr_DB:
1564   case X86::ADD32rr_DB: {
1565     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1566     unsigned Opc;
1567     if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_NF ||
1568         MIOpc == X86::ADD64rr_DB)
1569       Opc = X86::LEA64r;
1570     else
1571       Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1572 
1573     const MachineOperand &Src2 = MI.getOperand(2);
1574     bool isKill2;
1575     MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
1576     if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/false, SrcReg2, SrcSubReg2,
1577                         isKill2, ImplicitOp2, LV, LIS))
1578       return nullptr;
1579 
1580     bool isKill;
1581     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1582     if (Src.getReg() == Src2.getReg()) {
1583       // Don't call classify LEAReg a second time on the same register, in case
1584       // the first call inserted a COPY from Src2 and marked it as killed.
1585       isKill = isKill2;
1586       SrcReg = SrcReg2;
1587       SrcSubReg = SrcSubReg2;
1588     } else {
1589       if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg,
1590                           isKill, ImplicitOp, LV, LIS))
1591         return nullptr;
1592     }
1593 
1594     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
1595     if (ImplicitOp.getReg() != 0)
1596       MIB.add(ImplicitOp);
1597     if (ImplicitOp2.getReg() != 0)
1598       MIB.add(ImplicitOp2);
1599 
1600     NewMI =
1601         addRegReg(MIB, SrcReg, isKill, SrcSubReg, SrcReg2, isKill2, SrcSubReg2);
1602 
1603     // Add kills if classifyLEAReg created a new register.
1604     if (LV) {
1605       if (SrcReg2 != Src2.getReg())
1606         LV->getVarInfo(SrcReg2).Kills.push_back(NewMI);
1607       if (SrcReg != SrcReg2 && SrcReg != Src.getReg())
1608         LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1609     }
1610     NumRegOperands = 3;
1611     break;
1612   }
1613   CASE_NF(ADD8rr)
1614   case X86::ADD8rr_DB:
1615     Is8BitOp = true;
1616     [[fallthrough]];
1617   CASE_NF(ADD16rr)
1618   case X86::ADD16rr_DB:
1619     return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1620   CASE_NF(ADD64ri32)
1621   case X86::ADD64ri32_DB:
1622     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1623     NewMI = addOffset(
1624         BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
1625         MI.getOperand(2));
1626     break;
1627   CASE_NF(ADD32ri)
1628   case X86::ADD32ri_DB: {
1629     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1630     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1631 
1632     bool isKill;
1633     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1634     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg,
1635                         isKill, ImplicitOp, LV, LIS))
1636       return nullptr;
1637 
1638     MachineInstrBuilder MIB =
1639         BuildMI(MF, MI.getDebugLoc(), get(Opc))
1640             .add(Dest)
1641             .addReg(SrcReg, getKillRegState(isKill), SrcSubReg);
1642     if (ImplicitOp.getReg() != 0)
1643       MIB.add(ImplicitOp);
1644 
1645     NewMI = addOffset(MIB, MI.getOperand(2));
1646 
1647     // Add kills if classifyLEAReg created a new register.
1648     if (LV && SrcReg != Src.getReg())
1649       LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1650     break;
1651   }
1652   CASE_NF(ADD8ri)
1653   case X86::ADD8ri_DB:
1654     Is8BitOp = true;
1655     [[fallthrough]];
1656   CASE_NF(ADD16ri)
1657   case X86::ADD16ri_DB:
1658     return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1659   CASE_NF(SUB8ri)
1660   CASE_NF(SUB16ri)
1661     /// FIXME: Support these similar to ADD8ri/ADD16ri*.
1662     return nullptr;
1663   CASE_NF(SUB32ri) {
1664     if (!MI.getOperand(2).isImm())
1665       return nullptr;
1666     int64_t Imm = MI.getOperand(2).getImm();
1667     if (!isInt<32>(-Imm))
1668       return nullptr;
1669 
1670     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1671     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1672 
1673     bool isKill;
1674     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1675     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg,
1676                         isKill, ImplicitOp, LV, LIS))
1677       return nullptr;
1678 
1679     MachineInstrBuilder MIB =
1680         BuildMI(MF, MI.getDebugLoc(), get(Opc))
1681             .add(Dest)
1682             .addReg(SrcReg, getKillRegState(isKill), SrcSubReg);
1683     if (ImplicitOp.getReg() != 0)
1684       MIB.add(ImplicitOp);
1685 
1686     NewMI = addOffset(MIB, -Imm);
1687 
1688     // Add kills if classifyLEAReg created a new register.
1689     if (LV && SrcReg != Src.getReg())
1690       LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1691     break;
1692   }
1693 
1694   CASE_NF(SUB64ri32) {
1695     if (!MI.getOperand(2).isImm())
1696       return nullptr;
1697     int64_t Imm = MI.getOperand(2).getImm();
1698     if (!isInt<32>(-Imm))
1699       return nullptr;
1700 
1701     assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
1702 
1703     MachineInstrBuilder MIB =
1704         BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src);
1705     NewMI = addOffset(MIB, -Imm);
1706     break;
1707   }
1708 
1709   case X86::VMOVDQU8Z128rmk:
1710   case X86::VMOVDQU8Z256rmk:
1711   case X86::VMOVDQU8Zrmk:
1712   case X86::VMOVDQU16Z128rmk:
1713   case X86::VMOVDQU16Z256rmk:
1714   case X86::VMOVDQU16Zrmk:
1715   case X86::VMOVDQU32Z128rmk:
1716   case X86::VMOVDQA32Z128rmk:
1717   case X86::VMOVDQU32Z256rmk:
1718   case X86::VMOVDQA32Z256rmk:
1719   case X86::VMOVDQU32Zrmk:
1720   case X86::VMOVDQA32Zrmk:
1721   case X86::VMOVDQU64Z128rmk:
1722   case X86::VMOVDQA64Z128rmk:
1723   case X86::VMOVDQU64Z256rmk:
1724   case X86::VMOVDQA64Z256rmk:
1725   case X86::VMOVDQU64Zrmk:
1726   case X86::VMOVDQA64Zrmk:
1727   case X86::VMOVUPDZ128rmk:
1728   case X86::VMOVAPDZ128rmk:
1729   case X86::VMOVUPDZ256rmk:
1730   case X86::VMOVAPDZ256rmk:
1731   case X86::VMOVUPDZrmk:
1732   case X86::VMOVAPDZrmk:
1733   case X86::VMOVUPSZ128rmk:
1734   case X86::VMOVAPSZ128rmk:
1735   case X86::VMOVUPSZ256rmk:
1736   case X86::VMOVAPSZ256rmk:
1737   case X86::VMOVUPSZrmk:
1738   case X86::VMOVAPSZrmk:
1739   case X86::VBROADCASTSDZ256rmk:
1740   case X86::VBROADCASTSDZrmk:
1741   case X86::VBROADCASTSSZ128rmk:
1742   case X86::VBROADCASTSSZ256rmk:
1743   case X86::VBROADCASTSSZrmk:
1744   case X86::VPBROADCASTDZ128rmk:
1745   case X86::VPBROADCASTDZ256rmk:
1746   case X86::VPBROADCASTDZrmk:
1747   case X86::VPBROADCASTQZ128rmk:
1748   case X86::VPBROADCASTQZ256rmk:
1749   case X86::VPBROADCASTQZrmk: {
1750     unsigned Opc;
1751     switch (MIOpc) {
1752     default:
1753       llvm_unreachable("Unreachable!");
1754     case X86::VMOVDQU8Z128rmk:
1755       Opc = X86::VPBLENDMBZ128rmk;
1756       break;
1757     case X86::VMOVDQU8Z256rmk:
1758       Opc = X86::VPBLENDMBZ256rmk;
1759       break;
1760     case X86::VMOVDQU8Zrmk:
1761       Opc = X86::VPBLENDMBZrmk;
1762       break;
1763     case X86::VMOVDQU16Z128rmk:
1764       Opc = X86::VPBLENDMWZ128rmk;
1765       break;
1766     case X86::VMOVDQU16Z256rmk:
1767       Opc = X86::VPBLENDMWZ256rmk;
1768       break;
1769     case X86::VMOVDQU16Zrmk:
1770       Opc = X86::VPBLENDMWZrmk;
1771       break;
1772     case X86::VMOVDQU32Z128rmk:
1773       Opc = X86::VPBLENDMDZ128rmk;
1774       break;
1775     case X86::VMOVDQU32Z256rmk:
1776       Opc = X86::VPBLENDMDZ256rmk;
1777       break;
1778     case X86::VMOVDQU32Zrmk:
1779       Opc = X86::VPBLENDMDZrmk;
1780       break;
1781     case X86::VMOVDQU64Z128rmk:
1782       Opc = X86::VPBLENDMQZ128rmk;
1783       break;
1784     case X86::VMOVDQU64Z256rmk:
1785       Opc = X86::VPBLENDMQZ256rmk;
1786       break;
1787     case X86::VMOVDQU64Zrmk:
1788       Opc = X86::VPBLENDMQZrmk;
1789       break;
1790     case X86::VMOVUPDZ128rmk:
1791       Opc = X86::VBLENDMPDZ128rmk;
1792       break;
1793     case X86::VMOVUPDZ256rmk:
1794       Opc = X86::VBLENDMPDZ256rmk;
1795       break;
1796     case X86::VMOVUPDZrmk:
1797       Opc = X86::VBLENDMPDZrmk;
1798       break;
1799     case X86::VMOVUPSZ128rmk:
1800       Opc = X86::VBLENDMPSZ128rmk;
1801       break;
1802     case X86::VMOVUPSZ256rmk:
1803       Opc = X86::VBLENDMPSZ256rmk;
1804       break;
1805     case X86::VMOVUPSZrmk:
1806       Opc = X86::VBLENDMPSZrmk;
1807       break;
1808     case X86::VMOVDQA32Z128rmk:
1809       Opc = X86::VPBLENDMDZ128rmk;
1810       break;
1811     case X86::VMOVDQA32Z256rmk:
1812       Opc = X86::VPBLENDMDZ256rmk;
1813       break;
1814     case X86::VMOVDQA32Zrmk:
1815       Opc = X86::VPBLENDMDZrmk;
1816       break;
1817     case X86::VMOVDQA64Z128rmk:
1818       Opc = X86::VPBLENDMQZ128rmk;
1819       break;
1820     case X86::VMOVDQA64Z256rmk:
1821       Opc = X86::VPBLENDMQZ256rmk;
1822       break;
1823     case X86::VMOVDQA64Zrmk:
1824       Opc = X86::VPBLENDMQZrmk;
1825       break;
1826     case X86::VMOVAPDZ128rmk:
1827       Opc = X86::VBLENDMPDZ128rmk;
1828       break;
1829     case X86::VMOVAPDZ256rmk:
1830       Opc = X86::VBLENDMPDZ256rmk;
1831       break;
1832     case X86::VMOVAPDZrmk:
1833       Opc = X86::VBLENDMPDZrmk;
1834       break;
1835     case X86::VMOVAPSZ128rmk:
1836       Opc = X86::VBLENDMPSZ128rmk;
1837       break;
1838     case X86::VMOVAPSZ256rmk:
1839       Opc = X86::VBLENDMPSZ256rmk;
1840       break;
1841     case X86::VMOVAPSZrmk:
1842       Opc = X86::VBLENDMPSZrmk;
1843       break;
1844     case X86::VBROADCASTSDZ256rmk:
1845       Opc = X86::VBLENDMPDZ256rmbk;
1846       break;
1847     case X86::VBROADCASTSDZrmk:
1848       Opc = X86::VBLENDMPDZrmbk;
1849       break;
1850     case X86::VBROADCASTSSZ128rmk:
1851       Opc = X86::VBLENDMPSZ128rmbk;
1852       break;
1853     case X86::VBROADCASTSSZ256rmk:
1854       Opc = X86::VBLENDMPSZ256rmbk;
1855       break;
1856     case X86::VBROADCASTSSZrmk:
1857       Opc = X86::VBLENDMPSZrmbk;
1858       break;
1859     case X86::VPBROADCASTDZ128rmk:
1860       Opc = X86::VPBLENDMDZ128rmbk;
1861       break;
1862     case X86::VPBROADCASTDZ256rmk:
1863       Opc = X86::VPBLENDMDZ256rmbk;
1864       break;
1865     case X86::VPBROADCASTDZrmk:
1866       Opc = X86::VPBLENDMDZrmbk;
1867       break;
1868     case X86::VPBROADCASTQZ128rmk:
1869       Opc = X86::VPBLENDMQZ128rmbk;
1870       break;
1871     case X86::VPBROADCASTQZ256rmk:
1872       Opc = X86::VPBLENDMQZ256rmbk;
1873       break;
1874     case X86::VPBROADCASTQZrmk:
1875       Opc = X86::VPBLENDMQZrmbk;
1876       break;
1877     }
1878 
1879     NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1880                 .add(Dest)
1881                 .add(MI.getOperand(2))
1882                 .add(Src)
1883                 .add(MI.getOperand(3))
1884                 .add(MI.getOperand(4))
1885                 .add(MI.getOperand(5))
1886                 .add(MI.getOperand(6))
1887                 .add(MI.getOperand(7));
1888     NumRegOperands = 4;
1889     break;
1890   }
1891 
1892   case X86::VMOVDQU8Z128rrk:
1893   case X86::VMOVDQU8Z256rrk:
1894   case X86::VMOVDQU8Zrrk:
1895   case X86::VMOVDQU16Z128rrk:
1896   case X86::VMOVDQU16Z256rrk:
1897   case X86::VMOVDQU16Zrrk:
1898   case X86::VMOVDQU32Z128rrk:
1899   case X86::VMOVDQA32Z128rrk:
1900   case X86::VMOVDQU32Z256rrk:
1901   case X86::VMOVDQA32Z256rrk:
1902   case X86::VMOVDQU32Zrrk:
1903   case X86::VMOVDQA32Zrrk:
1904   case X86::VMOVDQU64Z128rrk:
1905   case X86::VMOVDQA64Z128rrk:
1906   case X86::VMOVDQU64Z256rrk:
1907   case X86::VMOVDQA64Z256rrk:
1908   case X86::VMOVDQU64Zrrk:
1909   case X86::VMOVDQA64Zrrk:
1910   case X86::VMOVUPDZ128rrk:
1911   case X86::VMOVAPDZ128rrk:
1912   case X86::VMOVUPDZ256rrk:
1913   case X86::VMOVAPDZ256rrk:
1914   case X86::VMOVUPDZrrk:
1915   case X86::VMOVAPDZrrk:
1916   case X86::VMOVUPSZ128rrk:
1917   case X86::VMOVAPSZ128rrk:
1918   case X86::VMOVUPSZ256rrk:
1919   case X86::VMOVAPSZ256rrk:
1920   case X86::VMOVUPSZrrk:
1921   case X86::VMOVAPSZrrk: {
1922     unsigned Opc;
1923     switch (MIOpc) {
1924     default:
1925       llvm_unreachable("Unreachable!");
1926     case X86::VMOVDQU8Z128rrk:
1927       Opc = X86::VPBLENDMBZ128rrk;
1928       break;
1929     case X86::VMOVDQU8Z256rrk:
1930       Opc = X86::VPBLENDMBZ256rrk;
1931       break;
1932     case X86::VMOVDQU8Zrrk:
1933       Opc = X86::VPBLENDMBZrrk;
1934       break;
1935     case X86::VMOVDQU16Z128rrk:
1936       Opc = X86::VPBLENDMWZ128rrk;
1937       break;
1938     case X86::VMOVDQU16Z256rrk:
1939       Opc = X86::VPBLENDMWZ256rrk;
1940       break;
1941     case X86::VMOVDQU16Zrrk:
1942       Opc = X86::VPBLENDMWZrrk;
1943       break;
1944     case X86::VMOVDQU32Z128rrk:
1945       Opc = X86::VPBLENDMDZ128rrk;
1946       break;
1947     case X86::VMOVDQU32Z256rrk:
1948       Opc = X86::VPBLENDMDZ256rrk;
1949       break;
1950     case X86::VMOVDQU32Zrrk:
1951       Opc = X86::VPBLENDMDZrrk;
1952       break;
1953     case X86::VMOVDQU64Z128rrk:
1954       Opc = X86::VPBLENDMQZ128rrk;
1955       break;
1956     case X86::VMOVDQU64Z256rrk:
1957       Opc = X86::VPBLENDMQZ256rrk;
1958       break;
1959     case X86::VMOVDQU64Zrrk:
1960       Opc = X86::VPBLENDMQZrrk;
1961       break;
1962     case X86::VMOVUPDZ128rrk:
1963       Opc = X86::VBLENDMPDZ128rrk;
1964       break;
1965     case X86::VMOVUPDZ256rrk:
1966       Opc = X86::VBLENDMPDZ256rrk;
1967       break;
1968     case X86::VMOVUPDZrrk:
1969       Opc = X86::VBLENDMPDZrrk;
1970       break;
1971     case X86::VMOVUPSZ128rrk:
1972       Opc = X86::VBLENDMPSZ128rrk;
1973       break;
1974     case X86::VMOVUPSZ256rrk:
1975       Opc = X86::VBLENDMPSZ256rrk;
1976       break;
1977     case X86::VMOVUPSZrrk:
1978       Opc = X86::VBLENDMPSZrrk;
1979       break;
1980     case X86::VMOVDQA32Z128rrk:
1981       Opc = X86::VPBLENDMDZ128rrk;
1982       break;
1983     case X86::VMOVDQA32Z256rrk:
1984       Opc = X86::VPBLENDMDZ256rrk;
1985       break;
1986     case X86::VMOVDQA32Zrrk:
1987       Opc = X86::VPBLENDMDZrrk;
1988       break;
1989     case X86::VMOVDQA64Z128rrk:
1990       Opc = X86::VPBLENDMQZ128rrk;
1991       break;
1992     case X86::VMOVDQA64Z256rrk:
1993       Opc = X86::VPBLENDMQZ256rrk;
1994       break;
1995     case X86::VMOVDQA64Zrrk:
1996       Opc = X86::VPBLENDMQZrrk;
1997       break;
1998     case X86::VMOVAPDZ128rrk:
1999       Opc = X86::VBLENDMPDZ128rrk;
2000       break;
2001     case X86::VMOVAPDZ256rrk:
2002       Opc = X86::VBLENDMPDZ256rrk;
2003       break;
2004     case X86::VMOVAPDZrrk:
2005       Opc = X86::VBLENDMPDZrrk;
2006       break;
2007     case X86::VMOVAPSZ128rrk:
2008       Opc = X86::VBLENDMPSZ128rrk;
2009       break;
2010     case X86::VMOVAPSZ256rrk:
2011       Opc = X86::VBLENDMPSZ256rrk;
2012       break;
2013     case X86::VMOVAPSZrrk:
2014       Opc = X86::VBLENDMPSZrrk;
2015       break;
2016     }
2017 
2018     NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
2019                 .add(Dest)
2020                 .add(MI.getOperand(2))
2021                 .add(Src)
2022                 .add(MI.getOperand(3));
2023     NumRegOperands = 4;
2024     break;
2025   }
2026   }
2027 #undef CASE_NF
2028 
2029   if (!NewMI)
2030     return nullptr;
2031 
2032   if (LV) { // Update live variables
2033     for (unsigned I = 0; I < NumRegOperands; ++I) {
2034       MachineOperand &Op = MI.getOperand(I);
2035       if (Op.isReg() && (Op.isDead() || Op.isKill()))
2036         LV->replaceKillInstruction(Op.getReg(), MI, *NewMI);
2037     }
2038   }
2039 
2040   MachineBasicBlock &MBB = *MI.getParent();
2041   MBB.insert(MI.getIterator(), NewMI); // Insert the new inst
2042 
2043   if (LIS) {
2044     LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
2045     if (SrcReg)
2046       LIS->getInterval(SrcReg);
2047     if (SrcReg2)
2048       LIS->getInterval(SrcReg2);
2049   }
2050 
2051   return NewMI;
2052 }
2053 
2054 /// This determines which of three possible cases of a three source commute
2055 /// the source indexes correspond to taking into account any mask operands.
2056 /// All prevents commuting a passthru operand. Returns -1 if the commute isn't
2057 /// possible.
2058 /// Case 0 - Possible to commute the first and second operands.
2059 /// Case 1 - Possible to commute the first and third operands.
2060 /// Case 2 - Possible to commute the second and third operands.
getThreeSrcCommuteCase(uint64_t TSFlags,unsigned SrcOpIdx1,unsigned SrcOpIdx2)2061 static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
2062                                        unsigned SrcOpIdx2) {
2063   // Put the lowest index to SrcOpIdx1 to simplify the checks below.
2064   if (SrcOpIdx1 > SrcOpIdx2)
2065     std::swap(SrcOpIdx1, SrcOpIdx2);
2066 
2067   unsigned Op1 = 1, Op2 = 2, Op3 = 3;
2068   if (X86II::isKMasked(TSFlags)) {
2069     Op2++;
2070     Op3++;
2071   }
2072 
2073   if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
2074     return 0;
2075   if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
2076     return 1;
2077   if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
2078     return 2;
2079   llvm_unreachable("Unknown three src commute case.");
2080 }
2081 
getFMA3OpcodeToCommuteOperands(const MachineInstr & MI,unsigned SrcOpIdx1,unsigned SrcOpIdx2,const X86InstrFMA3Group & FMA3Group) const2082 unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
2083     const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
2084     const X86InstrFMA3Group &FMA3Group) const {
2085 
2086   unsigned Opc = MI.getOpcode();
2087 
2088   // TODO: Commuting the 1st operand of FMA*_Int requires some additional
2089   // analysis. The commute optimization is legal only if all users of FMA*_Int
2090   // use only the lowest element of the FMA*_Int instruction. Such analysis are
2091   // not implemented yet. So, just return 0 in that case.
2092   // When such analysis are available this place will be the right place for
2093   // calling it.
2094   assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
2095          "Intrinsic instructions can't commute operand 1");
2096 
2097   // Determine which case this commute is or if it can't be done.
2098   unsigned Case =
2099       getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, SrcOpIdx2);
2100   assert(Case < 3 && "Unexpected case number!");
2101 
2102   // Define the FMA forms mapping array that helps to map input FMA form
2103   // to output FMA form to preserve the operation semantics after
2104   // commuting the operands.
2105   const unsigned Form132Index = 0;
2106   const unsigned Form213Index = 1;
2107   const unsigned Form231Index = 2;
2108   static const unsigned FormMapping[][3] = {
2109       // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
2110       // FMA132 A, C, b; ==> FMA231 C, A, b;
2111       // FMA213 B, A, c; ==> FMA213 A, B, c;
2112       // FMA231 C, A, b; ==> FMA132 A, C, b;
2113       {Form231Index, Form213Index, Form132Index},
2114       // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
2115       // FMA132 A, c, B; ==> FMA132 B, c, A;
2116       // FMA213 B, a, C; ==> FMA231 C, a, B;
2117       // FMA231 C, a, B; ==> FMA213 B, a, C;
2118       {Form132Index, Form231Index, Form213Index},
2119       // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
2120       // FMA132 a, C, B; ==> FMA213 a, B, C;
2121       // FMA213 b, A, C; ==> FMA132 b, C, A;
2122       // FMA231 c, A, B; ==> FMA231 c, B, A;
2123       {Form213Index, Form132Index, Form231Index}};
2124 
2125   unsigned FMAForms[3];
2126   FMAForms[0] = FMA3Group.get132Opcode();
2127   FMAForms[1] = FMA3Group.get213Opcode();
2128   FMAForms[2] = FMA3Group.get231Opcode();
2129 
2130   // Everything is ready, just adjust the FMA opcode and return it.
2131   for (unsigned FormIndex = 0; FormIndex < 3; FormIndex++)
2132     if (Opc == FMAForms[FormIndex])
2133       return FMAForms[FormMapping[Case][FormIndex]];
2134 
2135   llvm_unreachable("Illegal FMA3 format");
2136 }
2137 
commuteVPTERNLOG(MachineInstr & MI,unsigned SrcOpIdx1,unsigned SrcOpIdx2)2138 static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
2139                              unsigned SrcOpIdx2) {
2140   // Determine which case this commute is or if it can't be done.
2141   unsigned Case =
2142       getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, SrcOpIdx2);
2143   assert(Case < 3 && "Unexpected case value!");
2144 
2145   // For each case we need to swap two pairs of bits in the final immediate.
2146   static const uint8_t SwapMasks[3][4] = {
2147       {0x04, 0x10, 0x08, 0x20}, // Swap bits 2/4 and 3/5.
2148       {0x02, 0x10, 0x08, 0x40}, // Swap bits 1/4 and 3/6.
2149       {0x02, 0x04, 0x20, 0x40}, // Swap bits 1/2 and 5/6.
2150   };
2151 
2152   uint8_t Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2153   // Clear out the bits we are swapping.
2154   uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
2155                            SwapMasks[Case][2] | SwapMasks[Case][3]);
2156   // If the immediate had a bit of the pair set, then set the opposite bit.
2157   if (Imm & SwapMasks[Case][0])
2158     NewImm |= SwapMasks[Case][1];
2159   if (Imm & SwapMasks[Case][1])
2160     NewImm |= SwapMasks[Case][0];
2161   if (Imm & SwapMasks[Case][2])
2162     NewImm |= SwapMasks[Case][3];
2163   if (Imm & SwapMasks[Case][3])
2164     NewImm |= SwapMasks[Case][2];
2165   MI.getOperand(MI.getNumOperands() - 1).setImm(NewImm);
2166 }
2167 
2168 // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
2169 // commuted.
isCommutableVPERMV3Instruction(unsigned Opcode)2170 static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
2171 #define VPERM_CASES(Suffix)                                                    \
2172   case X86::VPERMI2##Suffix##Z128rr:                                           \
2173   case X86::VPERMT2##Suffix##Z128rr:                                           \
2174   case X86::VPERMI2##Suffix##Z256rr:                                           \
2175   case X86::VPERMT2##Suffix##Z256rr:                                           \
2176   case X86::VPERMI2##Suffix##Zrr:                                              \
2177   case X86::VPERMT2##Suffix##Zrr:                                              \
2178   case X86::VPERMI2##Suffix##Z128rm:                                           \
2179   case X86::VPERMT2##Suffix##Z128rm:                                           \
2180   case X86::VPERMI2##Suffix##Z256rm:                                           \
2181   case X86::VPERMT2##Suffix##Z256rm:                                           \
2182   case X86::VPERMI2##Suffix##Zrm:                                              \
2183   case X86::VPERMT2##Suffix##Zrm:                                              \
2184   case X86::VPERMI2##Suffix##Z128rrkz:                                         \
2185   case X86::VPERMT2##Suffix##Z128rrkz:                                         \
2186   case X86::VPERMI2##Suffix##Z256rrkz:                                         \
2187   case X86::VPERMT2##Suffix##Z256rrkz:                                         \
2188   case X86::VPERMI2##Suffix##Zrrkz:                                            \
2189   case X86::VPERMT2##Suffix##Zrrkz:                                            \
2190   case X86::VPERMI2##Suffix##Z128rmkz:                                         \
2191   case X86::VPERMT2##Suffix##Z128rmkz:                                         \
2192   case X86::VPERMI2##Suffix##Z256rmkz:                                         \
2193   case X86::VPERMT2##Suffix##Z256rmkz:                                         \
2194   case X86::VPERMI2##Suffix##Zrmkz:                                            \
2195   case X86::VPERMT2##Suffix##Zrmkz:
2196 
2197 #define VPERM_CASES_BROADCAST(Suffix)                                          \
2198   VPERM_CASES(Suffix)                                                          \
2199   case X86::VPERMI2##Suffix##Z128rmb:                                          \
2200   case X86::VPERMT2##Suffix##Z128rmb:                                          \
2201   case X86::VPERMI2##Suffix##Z256rmb:                                          \
2202   case X86::VPERMT2##Suffix##Z256rmb:                                          \
2203   case X86::VPERMI2##Suffix##Zrmb:                                             \
2204   case X86::VPERMT2##Suffix##Zrmb:                                             \
2205   case X86::VPERMI2##Suffix##Z128rmbkz:                                        \
2206   case X86::VPERMT2##Suffix##Z128rmbkz:                                        \
2207   case X86::VPERMI2##Suffix##Z256rmbkz:                                        \
2208   case X86::VPERMT2##Suffix##Z256rmbkz:                                        \
2209   case X86::VPERMI2##Suffix##Zrmbkz:                                           \
2210   case X86::VPERMT2##Suffix##Zrmbkz:
2211 
2212   switch (Opcode) {
2213   default:
2214     return false;
2215     VPERM_CASES(B)
2216     VPERM_CASES_BROADCAST(D)
2217     VPERM_CASES_BROADCAST(PD)
2218     VPERM_CASES_BROADCAST(PS)
2219     VPERM_CASES_BROADCAST(Q)
2220     VPERM_CASES(W)
2221     return true;
2222   }
2223 #undef VPERM_CASES_BROADCAST
2224 #undef VPERM_CASES
2225 }
2226 
2227 // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
2228 // from the I opcode to the T opcode and vice versa.
getCommutedVPERMV3Opcode(unsigned Opcode)2229 static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
2230 #define VPERM_CASES(Orig, New)                                                 \
2231   case X86::Orig##Z128rr:                                                      \
2232     return X86::New##Z128rr;                                                   \
2233   case X86::Orig##Z128rrkz:                                                    \
2234     return X86::New##Z128rrkz;                                                 \
2235   case X86::Orig##Z128rm:                                                      \
2236     return X86::New##Z128rm;                                                   \
2237   case X86::Orig##Z128rmkz:                                                    \
2238     return X86::New##Z128rmkz;                                                 \
2239   case X86::Orig##Z256rr:                                                      \
2240     return X86::New##Z256rr;                                                   \
2241   case X86::Orig##Z256rrkz:                                                    \
2242     return X86::New##Z256rrkz;                                                 \
2243   case X86::Orig##Z256rm:                                                      \
2244     return X86::New##Z256rm;                                                   \
2245   case X86::Orig##Z256rmkz:                                                    \
2246     return X86::New##Z256rmkz;                                                 \
2247   case X86::Orig##Zrr:                                                         \
2248     return X86::New##Zrr;                                                      \
2249   case X86::Orig##Zrrkz:                                                       \
2250     return X86::New##Zrrkz;                                                    \
2251   case X86::Orig##Zrm:                                                         \
2252     return X86::New##Zrm;                                                      \
2253   case X86::Orig##Zrmkz:                                                       \
2254     return X86::New##Zrmkz;
2255 
2256 #define VPERM_CASES_BROADCAST(Orig, New)                                       \
2257   VPERM_CASES(Orig, New)                                                       \
2258   case X86::Orig##Z128rmb:                                                     \
2259     return X86::New##Z128rmb;                                                  \
2260   case X86::Orig##Z128rmbkz:                                                   \
2261     return X86::New##Z128rmbkz;                                                \
2262   case X86::Orig##Z256rmb:                                                     \
2263     return X86::New##Z256rmb;                                                  \
2264   case X86::Orig##Z256rmbkz:                                                   \
2265     return X86::New##Z256rmbkz;                                                \
2266   case X86::Orig##Zrmb:                                                        \
2267     return X86::New##Zrmb;                                                     \
2268   case X86::Orig##Zrmbkz:                                                      \
2269     return X86::New##Zrmbkz;
2270 
2271   switch (Opcode) {
2272     VPERM_CASES(VPERMI2B, VPERMT2B)
2273     VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D)
2274     VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
2275     VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
2276     VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q)
2277     VPERM_CASES(VPERMI2W, VPERMT2W)
2278     VPERM_CASES(VPERMT2B, VPERMI2B)
2279     VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D)
2280     VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
2281     VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
2282     VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q)
2283     VPERM_CASES(VPERMT2W, VPERMI2W)
2284   }
2285 
2286   llvm_unreachable("Unreachable!");
2287 #undef VPERM_CASES_BROADCAST
2288 #undef VPERM_CASES
2289 }
2290 
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const2291 MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2292                                                    unsigned OpIdx1,
2293                                                    unsigned OpIdx2) const {
2294   auto CloneIfNew = [&](MachineInstr &MI) {
2295     return std::exchange(NewMI, false)
2296                ? MI.getParent()->getParent()->CloneMachineInstr(&MI)
2297                : &MI;
2298   };
2299   MachineInstr *WorkingMI = nullptr;
2300   unsigned Opc = MI.getOpcode();
2301 
2302 #define CASE_ND(OP)                                                            \
2303   case X86::OP:                                                                \
2304   case X86::OP##_ND:
2305 
2306   switch (Opc) {
2307   // SHLD B, C, I <-> SHRD C, B, (BitWidth - I)
2308   CASE_ND(SHRD16rri8)
2309   CASE_ND(SHLD16rri8)
2310   CASE_ND(SHRD32rri8)
2311   CASE_ND(SHLD32rri8)
2312   CASE_ND(SHRD64rri8)
2313   CASE_ND(SHLD64rri8) {
2314     unsigned Size;
2315     switch (Opc) {
2316     default:
2317       llvm_unreachable("Unreachable!");
2318 #define FROM_TO_SIZE(A, B, S)                                                  \
2319   case X86::A:                                                                 \
2320     Opc = X86::B;                                                              \
2321     Size = S;                                                                  \
2322     break;                                                                     \
2323   case X86::A##_ND:                                                            \
2324     Opc = X86::B##_ND;                                                         \
2325     Size = S;                                                                  \
2326     break;                                                                     \
2327   case X86::B:                                                                 \
2328     Opc = X86::A;                                                              \
2329     Size = S;                                                                  \
2330     break;                                                                     \
2331   case X86::B##_ND:                                                            \
2332     Opc = X86::A##_ND;                                                         \
2333     Size = S;                                                                  \
2334     break;
2335 
2336     FROM_TO_SIZE(SHRD16rri8, SHLD16rri8, 16)
2337     FROM_TO_SIZE(SHRD32rri8, SHLD32rri8, 32)
2338     FROM_TO_SIZE(SHRD64rri8, SHLD64rri8, 64)
2339 #undef FROM_TO_SIZE
2340     }
2341     WorkingMI = CloneIfNew(MI);
2342     WorkingMI->setDesc(get(Opc));
2343     WorkingMI->getOperand(3).setImm(Size - MI.getOperand(3).getImm());
2344     break;
2345   }
2346   case X86::PFSUBrr:
2347   case X86::PFSUBRrr:
2348     // PFSUB  x, y: x = x - y
2349     // PFSUBR x, y: x = y - x
2350     WorkingMI = CloneIfNew(MI);
2351     WorkingMI->setDesc(
2352         get(X86::PFSUBRrr == Opc ? X86::PFSUBrr : X86::PFSUBRrr));
2353     break;
2354   case X86::BLENDPDrri:
2355   case X86::BLENDPSrri:
2356   case X86::PBLENDWrri:
2357   case X86::VBLENDPDrri:
2358   case X86::VBLENDPSrri:
2359   case X86::VBLENDPDYrri:
2360   case X86::VBLENDPSYrri:
2361   case X86::VPBLENDDrri:
2362   case X86::VPBLENDWrri:
2363   case X86::VPBLENDDYrri:
2364   case X86::VPBLENDWYrri: {
2365     int8_t Mask;
2366     switch (Opc) {
2367     default:
2368       llvm_unreachable("Unreachable!");
2369     case X86::BLENDPDrri:
2370       Mask = (int8_t)0x03;
2371       break;
2372     case X86::BLENDPSrri:
2373       Mask = (int8_t)0x0F;
2374       break;
2375     case X86::PBLENDWrri:
2376       Mask = (int8_t)0xFF;
2377       break;
2378     case X86::VBLENDPDrri:
2379       Mask = (int8_t)0x03;
2380       break;
2381     case X86::VBLENDPSrri:
2382       Mask = (int8_t)0x0F;
2383       break;
2384     case X86::VBLENDPDYrri:
2385       Mask = (int8_t)0x0F;
2386       break;
2387     case X86::VBLENDPSYrri:
2388       Mask = (int8_t)0xFF;
2389       break;
2390     case X86::VPBLENDDrri:
2391       Mask = (int8_t)0x0F;
2392       break;
2393     case X86::VPBLENDWrri:
2394       Mask = (int8_t)0xFF;
2395       break;
2396     case X86::VPBLENDDYrri:
2397       Mask = (int8_t)0xFF;
2398       break;
2399     case X86::VPBLENDWYrri:
2400       Mask = (int8_t)0xFF;
2401       break;
2402     }
2403     // Only the least significant bits of Imm are used.
2404     // Using int8_t to ensure it will be sign extended to the int64_t that
2405     // setImm takes in order to match isel behavior.
2406     int8_t Imm = MI.getOperand(3).getImm() & Mask;
2407     WorkingMI = CloneIfNew(MI);
2408     WorkingMI->getOperand(3).setImm(Mask ^ Imm);
2409     break;
2410   }
2411   case X86::INSERTPSrri:
2412   case X86::VINSERTPSrri:
2413   case X86::VINSERTPSZrri: {
2414     unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2415     unsigned ZMask = Imm & 15;
2416     unsigned DstIdx = (Imm >> 4) & 3;
2417     unsigned SrcIdx = (Imm >> 6) & 3;
2418 
2419     // We can commute insertps if we zero 2 of the elements, the insertion is
2420     // "inline" and we don't override the insertion with a zero.
2421     if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2422         llvm::popcount(ZMask) == 2) {
2423       unsigned AltIdx = llvm::countr_zero((ZMask | (1 << DstIdx)) ^ 15);
2424       assert(AltIdx < 4 && "Illegal insertion index");
2425       unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2426       WorkingMI = CloneIfNew(MI);
2427       WorkingMI->getOperand(MI.getNumOperands() - 1).setImm(AltImm);
2428       break;
2429     }
2430     return nullptr;
2431   }
2432   case X86::MOVSDrr:
2433   case X86::MOVSSrr:
2434   case X86::VMOVSDrr:
2435   case X86::VMOVSSrr: {
2436     // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
2437     if (Subtarget.hasSSE41()) {
2438       unsigned Mask;
2439       switch (Opc) {
2440       default:
2441         llvm_unreachable("Unreachable!");
2442       case X86::MOVSDrr:
2443         Opc = X86::BLENDPDrri;
2444         Mask = 0x02;
2445         break;
2446       case X86::MOVSSrr:
2447         Opc = X86::BLENDPSrri;
2448         Mask = 0x0E;
2449         break;
2450       case X86::VMOVSDrr:
2451         Opc = X86::VBLENDPDrri;
2452         Mask = 0x02;
2453         break;
2454       case X86::VMOVSSrr:
2455         Opc = X86::VBLENDPSrri;
2456         Mask = 0x0E;
2457         break;
2458       }
2459 
2460       WorkingMI = CloneIfNew(MI);
2461       WorkingMI->setDesc(get(Opc));
2462       WorkingMI->addOperand(MachineOperand::CreateImm(Mask));
2463       break;
2464     }
2465 
2466     assert(Opc == X86::MOVSDrr && "Only MOVSD can commute to SHUFPD");
2467     WorkingMI = CloneIfNew(MI);
2468     WorkingMI->setDesc(get(X86::SHUFPDrri));
2469     WorkingMI->addOperand(MachineOperand::CreateImm(0x02));
2470     break;
2471   }
2472   case X86::SHUFPDrri: {
2473     // Commute to MOVSD.
2474     assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
2475     WorkingMI = CloneIfNew(MI);
2476     WorkingMI->setDesc(get(X86::MOVSDrr));
2477     WorkingMI->removeOperand(3);
2478     break;
2479   }
2480   case X86::PCLMULQDQrri:
2481   case X86::VPCLMULQDQrri:
2482   case X86::VPCLMULQDQYrri:
2483   case X86::VPCLMULQDQZrri:
2484   case X86::VPCLMULQDQZ128rri:
2485   case X86::VPCLMULQDQZ256rri: {
2486     // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
2487     // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
2488     unsigned Imm = MI.getOperand(3).getImm();
2489     unsigned Src1Hi = Imm & 0x01;
2490     unsigned Src2Hi = Imm & 0x10;
2491     WorkingMI = CloneIfNew(MI);
2492     WorkingMI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
2493     break;
2494   }
2495   case X86::VPCMPBZ128rri:
2496   case X86::VPCMPUBZ128rri:
2497   case X86::VPCMPBZ256rri:
2498   case X86::VPCMPUBZ256rri:
2499   case X86::VPCMPBZrri:
2500   case X86::VPCMPUBZrri:
2501   case X86::VPCMPDZ128rri:
2502   case X86::VPCMPUDZ128rri:
2503   case X86::VPCMPDZ256rri:
2504   case X86::VPCMPUDZ256rri:
2505   case X86::VPCMPDZrri:
2506   case X86::VPCMPUDZrri:
2507   case X86::VPCMPQZ128rri:
2508   case X86::VPCMPUQZ128rri:
2509   case X86::VPCMPQZ256rri:
2510   case X86::VPCMPUQZ256rri:
2511   case X86::VPCMPQZrri:
2512   case X86::VPCMPUQZrri:
2513   case X86::VPCMPWZ128rri:
2514   case X86::VPCMPUWZ128rri:
2515   case X86::VPCMPWZ256rri:
2516   case X86::VPCMPUWZ256rri:
2517   case X86::VPCMPWZrri:
2518   case X86::VPCMPUWZrri:
2519   case X86::VPCMPBZ128rrik:
2520   case X86::VPCMPUBZ128rrik:
2521   case X86::VPCMPBZ256rrik:
2522   case X86::VPCMPUBZ256rrik:
2523   case X86::VPCMPBZrrik:
2524   case X86::VPCMPUBZrrik:
2525   case X86::VPCMPDZ128rrik:
2526   case X86::VPCMPUDZ128rrik:
2527   case X86::VPCMPDZ256rrik:
2528   case X86::VPCMPUDZ256rrik:
2529   case X86::VPCMPDZrrik:
2530   case X86::VPCMPUDZrrik:
2531   case X86::VPCMPQZ128rrik:
2532   case X86::VPCMPUQZ128rrik:
2533   case X86::VPCMPQZ256rrik:
2534   case X86::VPCMPUQZ256rrik:
2535   case X86::VPCMPQZrrik:
2536   case X86::VPCMPUQZrrik:
2537   case X86::VPCMPWZ128rrik:
2538   case X86::VPCMPUWZ128rrik:
2539   case X86::VPCMPWZ256rrik:
2540   case X86::VPCMPUWZ256rrik:
2541   case X86::VPCMPWZrrik:
2542   case X86::VPCMPUWZrrik:
2543     WorkingMI = CloneIfNew(MI);
2544     // Flip comparison mode immediate (if necessary).
2545     WorkingMI->getOperand(MI.getNumOperands() - 1)
2546         .setImm(X86::getSwappedVPCMPImm(
2547             MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7));
2548     break;
2549   case X86::VPCOMBri:
2550   case X86::VPCOMUBri:
2551   case X86::VPCOMDri:
2552   case X86::VPCOMUDri:
2553   case X86::VPCOMQri:
2554   case X86::VPCOMUQri:
2555   case X86::VPCOMWri:
2556   case X86::VPCOMUWri:
2557     WorkingMI = CloneIfNew(MI);
2558     // Flip comparison mode immediate (if necessary).
2559     WorkingMI->getOperand(3).setImm(
2560         X86::getSwappedVPCOMImm(MI.getOperand(3).getImm() & 0x7));
2561     break;
2562   case X86::VCMPSDZrri:
2563   case X86::VCMPSSZrri:
2564   case X86::VCMPPDZrri:
2565   case X86::VCMPPSZrri:
2566   case X86::VCMPSHZrri:
2567   case X86::VCMPPHZrri:
2568   case X86::VCMPPHZ128rri:
2569   case X86::VCMPPHZ256rri:
2570   case X86::VCMPPDZ128rri:
2571   case X86::VCMPPSZ128rri:
2572   case X86::VCMPPDZ256rri:
2573   case X86::VCMPPSZ256rri:
2574   case X86::VCMPPDZrrik:
2575   case X86::VCMPPSZrrik:
2576   case X86::VCMPPDZ128rrik:
2577   case X86::VCMPPSZ128rrik:
2578   case X86::VCMPPDZ256rrik:
2579   case X86::VCMPPSZ256rrik:
2580     WorkingMI = CloneIfNew(MI);
2581     WorkingMI->getOperand(MI.getNumExplicitOperands() - 1)
2582         .setImm(X86::getSwappedVCMPImm(
2583             MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f));
2584     break;
2585   case X86::VPERM2F128rri:
2586   case X86::VPERM2I128rri:
2587     // Flip permute source immediate.
2588     // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
2589     // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
2590     WorkingMI = CloneIfNew(MI);
2591     WorkingMI->getOperand(3).setImm((MI.getOperand(3).getImm() & 0xFF) ^ 0x22);
2592     break;
2593   case X86::MOVHLPSrr:
2594   case X86::UNPCKHPDrr:
2595   case X86::VMOVHLPSrr:
2596   case X86::VUNPCKHPDrr:
2597   case X86::VMOVHLPSZrr:
2598   case X86::VUNPCKHPDZ128rr:
2599     assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
2600 
2601     switch (Opc) {
2602     default:
2603       llvm_unreachable("Unreachable!");
2604     case X86::MOVHLPSrr:
2605       Opc = X86::UNPCKHPDrr;
2606       break;
2607     case X86::UNPCKHPDrr:
2608       Opc = X86::MOVHLPSrr;
2609       break;
2610     case X86::VMOVHLPSrr:
2611       Opc = X86::VUNPCKHPDrr;
2612       break;
2613     case X86::VUNPCKHPDrr:
2614       Opc = X86::VMOVHLPSrr;
2615       break;
2616     case X86::VMOVHLPSZrr:
2617       Opc = X86::VUNPCKHPDZ128rr;
2618       break;
2619     case X86::VUNPCKHPDZ128rr:
2620       Opc = X86::VMOVHLPSZrr;
2621       break;
2622     }
2623     WorkingMI = CloneIfNew(MI);
2624     WorkingMI->setDesc(get(Opc));
2625     break;
2626   CASE_ND(CMOV16rr)
2627   CASE_ND(CMOV32rr)
2628   CASE_ND(CMOV64rr) {
2629     WorkingMI = CloneIfNew(MI);
2630     unsigned OpNo = MI.getDesc().getNumOperands() - 1;
2631     X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
2632     WorkingMI->getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
2633     break;
2634   }
2635   case X86::VPTERNLOGDZrri:
2636   case X86::VPTERNLOGDZrmi:
2637   case X86::VPTERNLOGDZ128rri:
2638   case X86::VPTERNLOGDZ128rmi:
2639   case X86::VPTERNLOGDZ256rri:
2640   case X86::VPTERNLOGDZ256rmi:
2641   case X86::VPTERNLOGQZrri:
2642   case X86::VPTERNLOGQZrmi:
2643   case X86::VPTERNLOGQZ128rri:
2644   case X86::VPTERNLOGQZ128rmi:
2645   case X86::VPTERNLOGQZ256rri:
2646   case X86::VPTERNLOGQZ256rmi:
2647   case X86::VPTERNLOGDZrrik:
2648   case X86::VPTERNLOGDZ128rrik:
2649   case X86::VPTERNLOGDZ256rrik:
2650   case X86::VPTERNLOGQZrrik:
2651   case X86::VPTERNLOGQZ128rrik:
2652   case X86::VPTERNLOGQZ256rrik:
2653   case X86::VPTERNLOGDZrrikz:
2654   case X86::VPTERNLOGDZrmikz:
2655   case X86::VPTERNLOGDZ128rrikz:
2656   case X86::VPTERNLOGDZ128rmikz:
2657   case X86::VPTERNLOGDZ256rrikz:
2658   case X86::VPTERNLOGDZ256rmikz:
2659   case X86::VPTERNLOGQZrrikz:
2660   case X86::VPTERNLOGQZrmikz:
2661   case X86::VPTERNLOGQZ128rrikz:
2662   case X86::VPTERNLOGQZ128rmikz:
2663   case X86::VPTERNLOGQZ256rrikz:
2664   case X86::VPTERNLOGQZ256rmikz:
2665   case X86::VPTERNLOGDZ128rmbi:
2666   case X86::VPTERNLOGDZ256rmbi:
2667   case X86::VPTERNLOGDZrmbi:
2668   case X86::VPTERNLOGQZ128rmbi:
2669   case X86::VPTERNLOGQZ256rmbi:
2670   case X86::VPTERNLOGQZrmbi:
2671   case X86::VPTERNLOGDZ128rmbikz:
2672   case X86::VPTERNLOGDZ256rmbikz:
2673   case X86::VPTERNLOGDZrmbikz:
2674   case X86::VPTERNLOGQZ128rmbikz:
2675   case X86::VPTERNLOGQZ256rmbikz:
2676   case X86::VPTERNLOGQZrmbikz: {
2677     WorkingMI = CloneIfNew(MI);
2678     commuteVPTERNLOG(*WorkingMI, OpIdx1, OpIdx2);
2679     break;
2680   }
2681   default:
2682     if (isCommutableVPERMV3Instruction(Opc)) {
2683       WorkingMI = CloneIfNew(MI);
2684       WorkingMI->setDesc(get(getCommutedVPERMV3Opcode(Opc)));
2685       break;
2686     }
2687 
2688     if (auto *FMA3Group = getFMA3Group(Opc, MI.getDesc().TSFlags)) {
2689       WorkingMI = CloneIfNew(MI);
2690       WorkingMI->setDesc(
2691           get(getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group)));
2692       break;
2693     }
2694   }
2695   return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2696 }
2697 
findThreeSrcCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2,bool IsIntrinsic) const2698 bool X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
2699                                                  unsigned &SrcOpIdx1,
2700                                                  unsigned &SrcOpIdx2,
2701                                                  bool IsIntrinsic) const {
2702   uint64_t TSFlags = MI.getDesc().TSFlags;
2703 
2704   unsigned FirstCommutableVecOp = 1;
2705   unsigned LastCommutableVecOp = 3;
2706   unsigned KMaskOp = -1U;
2707   if (X86II::isKMasked(TSFlags)) {
2708     // For k-zero-masked operations it is Ok to commute the first vector
2709     // operand. Unless this is an intrinsic instruction.
2710     // For regular k-masked operations a conservative choice is done as the
2711     // elements of the first vector operand, for which the corresponding bit
2712     // in the k-mask operand is set to 0, are copied to the result of the
2713     // instruction.
2714     // TODO/FIXME: The commute still may be legal if it is known that the
2715     // k-mask operand is set to either all ones or all zeroes.
2716     // It is also Ok to commute the 1st operand if all users of MI use only
2717     // the elements enabled by the k-mask operand. For example,
2718     //   v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
2719     //                                                     : v1[i];
2720     //   VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
2721     //                                  // Ok, to commute v1 in FMADD213PSZrk.
2722 
2723     // The k-mask operand has index = 2 for masked and zero-masked operations.
2724     KMaskOp = 2;
2725 
2726     // The operand with index = 1 is used as a source for those elements for
2727     // which the corresponding bit in the k-mask is set to 0.
2728     if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic)
2729       FirstCommutableVecOp = 3;
2730 
2731     LastCommutableVecOp++;
2732   } else if (IsIntrinsic) {
2733     // Commuting the first operand of an intrinsic instruction isn't possible
2734     // unless we can prove that only the lowest element of the result is used.
2735     FirstCommutableVecOp = 2;
2736   }
2737 
2738   if (isMem(MI, LastCommutableVecOp))
2739     LastCommutableVecOp--;
2740 
2741   // Only the first RegOpsNum operands are commutable.
2742   // Also, the value 'CommuteAnyOperandIndex' is valid here as it means
2743   // that the operand is not specified/fixed.
2744   if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2745       (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2746        SrcOpIdx1 == KMaskOp))
2747     return false;
2748   if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2749       (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2750        SrcOpIdx2 == KMaskOp))
2751     return false;
2752 
2753   // Look for two different register operands assumed to be commutable
2754   // regardless of the FMA opcode. The FMA opcode is adjusted later.
2755   if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2756       SrcOpIdx2 == CommuteAnyOperandIndex) {
2757     unsigned CommutableOpIdx2 = SrcOpIdx2;
2758 
2759     // At least one of operands to be commuted is not specified and
2760     // this method is free to choose appropriate commutable operands.
2761     if (SrcOpIdx1 == SrcOpIdx2)
2762       // Both of operands are not fixed. By default set one of commutable
2763       // operands to the last register operand of the instruction.
2764       CommutableOpIdx2 = LastCommutableVecOp;
2765     else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2766       // Only one of operands is not fixed.
2767       CommutableOpIdx2 = SrcOpIdx1;
2768 
2769     // CommutableOpIdx2 is well defined now. Let's choose another commutable
2770     // operand and assign its index to CommutableOpIdx1.
2771     Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
2772 
2773     unsigned CommutableOpIdx1;
2774     for (CommutableOpIdx1 = LastCommutableVecOp;
2775          CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2776       // Just ignore and skip the k-mask operand.
2777       if (CommutableOpIdx1 == KMaskOp)
2778         continue;
2779 
2780       // The commuted operands must have different registers.
2781       // Otherwise, the commute transformation does not change anything and
2782       // is useless then.
2783       if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
2784         break;
2785     }
2786 
2787     // No appropriate commutable operands were found.
2788     if (CommutableOpIdx1 < FirstCommutableVecOp)
2789       return false;
2790 
2791     // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
2792     // to return those values.
2793     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2794                               CommutableOpIdx2))
2795       return false;
2796   }
2797 
2798   return true;
2799 }
2800 
findCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const2801 bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2802                                          unsigned &SrcOpIdx1,
2803                                          unsigned &SrcOpIdx2) const {
2804   const MCInstrDesc &Desc = MI.getDesc();
2805   if (!Desc.isCommutable())
2806     return false;
2807 
2808   switch (MI.getOpcode()) {
2809   case X86::CMPSDrri:
2810   case X86::CMPSSrri:
2811   case X86::CMPPDrri:
2812   case X86::CMPPSrri:
2813   case X86::VCMPSDrri:
2814   case X86::VCMPSSrri:
2815   case X86::VCMPPDrri:
2816   case X86::VCMPPSrri:
2817   case X86::VCMPPDYrri:
2818   case X86::VCMPPSYrri:
2819   case X86::VCMPSDZrri:
2820   case X86::VCMPSSZrri:
2821   case X86::VCMPPDZrri:
2822   case X86::VCMPPSZrri:
2823   case X86::VCMPSHZrri:
2824   case X86::VCMPPHZrri:
2825   case X86::VCMPPHZ128rri:
2826   case X86::VCMPPHZ256rri:
2827   case X86::VCMPPDZ128rri:
2828   case X86::VCMPPSZ128rri:
2829   case X86::VCMPPDZ256rri:
2830   case X86::VCMPPSZ256rri:
2831   case X86::VCMPPDZrrik:
2832   case X86::VCMPPSZrrik:
2833   case X86::VCMPPDZ128rrik:
2834   case X86::VCMPPSZ128rrik:
2835   case X86::VCMPPDZ256rrik:
2836   case X86::VCMPPSZ256rrik: {
2837     unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
2838 
2839     // Float comparison can be safely commuted for
2840     // Ordered/Unordered/Equal/NotEqual tests
2841     unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
2842     switch (Imm) {
2843     default:
2844       // EVEX versions can be commuted.
2845       if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX)
2846         break;
2847       return false;
2848     case 0x00: // EQUAL
2849     case 0x03: // UNORDERED
2850     case 0x04: // NOT EQUAL
2851     case 0x07: // ORDERED
2852       break;
2853     }
2854 
2855     // The indices of the commutable operands are 1 and 2 (or 2 and 3
2856     // when masked).
2857     // Assign them to the returned operand indices here.
2858     return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2859                                 2 + OpOffset);
2860   }
2861   case X86::MOVSSrr:
2862     // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
2863     // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
2864     // AVX implies sse4.1.
2865     if (Subtarget.hasSSE41())
2866       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2867     return false;
2868   case X86::SHUFPDrri:
2869     // We can commute this to MOVSD.
2870     if (MI.getOperand(3).getImm() == 0x02)
2871       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2872     return false;
2873   case X86::MOVHLPSrr:
2874   case X86::UNPCKHPDrr:
2875   case X86::VMOVHLPSrr:
2876   case X86::VUNPCKHPDrr:
2877   case X86::VMOVHLPSZrr:
2878   case X86::VUNPCKHPDZ128rr:
2879     if (Subtarget.hasSSE2())
2880       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2881     return false;
2882   case X86::VPTERNLOGDZrri:
2883   case X86::VPTERNLOGDZrmi:
2884   case X86::VPTERNLOGDZ128rri:
2885   case X86::VPTERNLOGDZ128rmi:
2886   case X86::VPTERNLOGDZ256rri:
2887   case X86::VPTERNLOGDZ256rmi:
2888   case X86::VPTERNLOGQZrri:
2889   case X86::VPTERNLOGQZrmi:
2890   case X86::VPTERNLOGQZ128rri:
2891   case X86::VPTERNLOGQZ128rmi:
2892   case X86::VPTERNLOGQZ256rri:
2893   case X86::VPTERNLOGQZ256rmi:
2894   case X86::VPTERNLOGDZrrik:
2895   case X86::VPTERNLOGDZ128rrik:
2896   case X86::VPTERNLOGDZ256rrik:
2897   case X86::VPTERNLOGQZrrik:
2898   case X86::VPTERNLOGQZ128rrik:
2899   case X86::VPTERNLOGQZ256rrik:
2900   case X86::VPTERNLOGDZrrikz:
2901   case X86::VPTERNLOGDZrmikz:
2902   case X86::VPTERNLOGDZ128rrikz:
2903   case X86::VPTERNLOGDZ128rmikz:
2904   case X86::VPTERNLOGDZ256rrikz:
2905   case X86::VPTERNLOGDZ256rmikz:
2906   case X86::VPTERNLOGQZrrikz:
2907   case X86::VPTERNLOGQZrmikz:
2908   case X86::VPTERNLOGQZ128rrikz:
2909   case X86::VPTERNLOGQZ128rmikz:
2910   case X86::VPTERNLOGQZ256rrikz:
2911   case X86::VPTERNLOGQZ256rmikz:
2912   case X86::VPTERNLOGDZ128rmbi:
2913   case X86::VPTERNLOGDZ256rmbi:
2914   case X86::VPTERNLOGDZrmbi:
2915   case X86::VPTERNLOGQZ128rmbi:
2916   case X86::VPTERNLOGQZ256rmbi:
2917   case X86::VPTERNLOGQZrmbi:
2918   case X86::VPTERNLOGDZ128rmbikz:
2919   case X86::VPTERNLOGDZ256rmbikz:
2920   case X86::VPTERNLOGDZrmbikz:
2921   case X86::VPTERNLOGQZ128rmbikz:
2922   case X86::VPTERNLOGQZ256rmbikz:
2923   case X86::VPTERNLOGQZrmbikz:
2924     return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2925   case X86::VPDPWSSDYrr:
2926   case X86::VPDPWSSDrr:
2927   case X86::VPDPWSSDSYrr:
2928   case X86::VPDPWSSDSrr:
2929   case X86::VPDPWUUDrr:
2930   case X86::VPDPWUUDYrr:
2931   case X86::VPDPWUUDSrr:
2932   case X86::VPDPWUUDSYrr:
2933   case X86::VPDPBSSDSrr:
2934   case X86::VPDPBSSDSYrr:
2935   case X86::VPDPBSSDrr:
2936   case X86::VPDPBSSDYrr:
2937   case X86::VPDPBUUDSrr:
2938   case X86::VPDPBUUDSYrr:
2939   case X86::VPDPBUUDrr:
2940   case X86::VPDPBUUDYrr:
2941   case X86::VPDPBSSDSZ128r:
2942   case X86::VPDPBSSDSZ128rk:
2943   case X86::VPDPBSSDSZ128rkz:
2944   case X86::VPDPBSSDSZ256r:
2945   case X86::VPDPBSSDSZ256rk:
2946   case X86::VPDPBSSDSZ256rkz:
2947   case X86::VPDPBSSDSZr:
2948   case X86::VPDPBSSDSZrk:
2949   case X86::VPDPBSSDSZrkz:
2950   case X86::VPDPBSSDZ128r:
2951   case X86::VPDPBSSDZ128rk:
2952   case X86::VPDPBSSDZ128rkz:
2953   case X86::VPDPBSSDZ256r:
2954   case X86::VPDPBSSDZ256rk:
2955   case X86::VPDPBSSDZ256rkz:
2956   case X86::VPDPBSSDZr:
2957   case X86::VPDPBSSDZrk:
2958   case X86::VPDPBSSDZrkz:
2959   case X86::VPDPBUUDSZ128r:
2960   case X86::VPDPBUUDSZ128rk:
2961   case X86::VPDPBUUDSZ128rkz:
2962   case X86::VPDPBUUDSZ256r:
2963   case X86::VPDPBUUDSZ256rk:
2964   case X86::VPDPBUUDSZ256rkz:
2965   case X86::VPDPBUUDSZr:
2966   case X86::VPDPBUUDSZrk:
2967   case X86::VPDPBUUDSZrkz:
2968   case X86::VPDPBUUDZ128r:
2969   case X86::VPDPBUUDZ128rk:
2970   case X86::VPDPBUUDZ128rkz:
2971   case X86::VPDPBUUDZ256r:
2972   case X86::VPDPBUUDZ256rk:
2973   case X86::VPDPBUUDZ256rkz:
2974   case X86::VPDPBUUDZr:
2975   case X86::VPDPBUUDZrk:
2976   case X86::VPDPBUUDZrkz:
2977   case X86::VPDPWSSDZ128r:
2978   case X86::VPDPWSSDZ128rk:
2979   case X86::VPDPWSSDZ128rkz:
2980   case X86::VPDPWSSDZ256r:
2981   case X86::VPDPWSSDZ256rk:
2982   case X86::VPDPWSSDZ256rkz:
2983   case X86::VPDPWSSDZr:
2984   case X86::VPDPWSSDZrk:
2985   case X86::VPDPWSSDZrkz:
2986   case X86::VPDPWSSDSZ128r:
2987   case X86::VPDPWSSDSZ128rk:
2988   case X86::VPDPWSSDSZ128rkz:
2989   case X86::VPDPWSSDSZ256r:
2990   case X86::VPDPWSSDSZ256rk:
2991   case X86::VPDPWSSDSZ256rkz:
2992   case X86::VPDPWSSDSZr:
2993   case X86::VPDPWSSDSZrk:
2994   case X86::VPDPWSSDSZrkz:
2995   case X86::VPDPWUUDZ128r:
2996   case X86::VPDPWUUDZ128rk:
2997   case X86::VPDPWUUDZ128rkz:
2998   case X86::VPDPWUUDZ256r:
2999   case X86::VPDPWUUDZ256rk:
3000   case X86::VPDPWUUDZ256rkz:
3001   case X86::VPDPWUUDZr:
3002   case X86::VPDPWUUDZrk:
3003   case X86::VPDPWUUDZrkz:
3004   case X86::VPDPWUUDSZ128r:
3005   case X86::VPDPWUUDSZ128rk:
3006   case X86::VPDPWUUDSZ128rkz:
3007   case X86::VPDPWUUDSZ256r:
3008   case X86::VPDPWUUDSZ256rk:
3009   case X86::VPDPWUUDSZ256rkz:
3010   case X86::VPDPWUUDSZr:
3011   case X86::VPDPWUUDSZrk:
3012   case X86::VPDPWUUDSZrkz:
3013   case X86::VPMADD52HUQrr:
3014   case X86::VPMADD52HUQYrr:
3015   case X86::VPMADD52HUQZ128r:
3016   case X86::VPMADD52HUQZ128rk:
3017   case X86::VPMADD52HUQZ128rkz:
3018   case X86::VPMADD52HUQZ256r:
3019   case X86::VPMADD52HUQZ256rk:
3020   case X86::VPMADD52HUQZ256rkz:
3021   case X86::VPMADD52HUQZr:
3022   case X86::VPMADD52HUQZrk:
3023   case X86::VPMADD52HUQZrkz:
3024   case X86::VPMADD52LUQrr:
3025   case X86::VPMADD52LUQYrr:
3026   case X86::VPMADD52LUQZ128r:
3027   case X86::VPMADD52LUQZ128rk:
3028   case X86::VPMADD52LUQZ128rkz:
3029   case X86::VPMADD52LUQZ256r:
3030   case X86::VPMADD52LUQZ256rk:
3031   case X86::VPMADD52LUQZ256rkz:
3032   case X86::VPMADD52LUQZr:
3033   case X86::VPMADD52LUQZrk:
3034   case X86::VPMADD52LUQZrkz:
3035   case X86::VFMADDCPHZr:
3036   case X86::VFMADDCPHZrk:
3037   case X86::VFMADDCPHZrkz:
3038   case X86::VFMADDCPHZ128r:
3039   case X86::VFMADDCPHZ128rk:
3040   case X86::VFMADDCPHZ128rkz:
3041   case X86::VFMADDCPHZ256r:
3042   case X86::VFMADDCPHZ256rk:
3043   case X86::VFMADDCPHZ256rkz:
3044   case X86::VFMADDCSHZr:
3045   case X86::VFMADDCSHZrk:
3046   case X86::VFMADDCSHZrkz: {
3047     unsigned CommutableOpIdx1 = 2;
3048     unsigned CommutableOpIdx2 = 3;
3049     if (X86II::isKMasked(Desc.TSFlags)) {
3050       // Skip the mask register.
3051       ++CommutableOpIdx1;
3052       ++CommutableOpIdx2;
3053     }
3054     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3055                               CommutableOpIdx2))
3056       return false;
3057     if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
3058       // No idea.
3059       return false;
3060     return true;
3061   }
3062 
3063   default:
3064     const X86InstrFMA3Group *FMA3Group =
3065         getFMA3Group(MI.getOpcode(), MI.getDesc().TSFlags);
3066     if (FMA3Group)
3067       return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
3068                                            FMA3Group->isIntrinsic());
3069 
3070     // Handled masked instructions since we need to skip over the mask input
3071     // and the preserved input.
3072     if (X86II::isKMasked(Desc.TSFlags)) {
3073       // First assume that the first input is the mask operand and skip past it.
3074       unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
3075       unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
3076       // Check if the first input is tied. If there isn't one then we only
3077       // need to skip the mask operand which we did above.
3078       if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
3079                                              MCOI::TIED_TO) != -1)) {
3080         // If this is zero masking instruction with a tied operand, we need to
3081         // move the first index back to the first input since this must
3082         // be a 3 input instruction and we want the first two non-mask inputs.
3083         // Otherwise this is a 2 input instruction with a preserved input and
3084         // mask, so we need to move the indices to skip one more input.
3085         if (X86II::isKMergeMasked(Desc.TSFlags)) {
3086           ++CommutableOpIdx1;
3087           ++CommutableOpIdx2;
3088         } else {
3089           --CommutableOpIdx1;
3090         }
3091       }
3092 
3093       if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3094                                 CommutableOpIdx2))
3095         return false;
3096 
3097       if (!MI.getOperand(SrcOpIdx1).isReg() ||
3098           !MI.getOperand(SrcOpIdx2).isReg())
3099         // No idea.
3100         return false;
3101       return true;
3102     }
3103 
3104     return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
3105   }
3106   return false;
3107 }
3108 
isConvertibleLEA(MachineInstr * MI)3109 static bool isConvertibleLEA(MachineInstr *MI) {
3110   unsigned Opcode = MI->getOpcode();
3111   if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
3112       Opcode != X86::LEA64_32r)
3113     return false;
3114 
3115   const MachineOperand &Scale = MI->getOperand(1 + X86::AddrScaleAmt);
3116   const MachineOperand &Disp = MI->getOperand(1 + X86::AddrDisp);
3117   const MachineOperand &Segment = MI->getOperand(1 + X86::AddrSegmentReg);
3118 
3119   if (Segment.getReg() != 0 || !Disp.isImm() || Disp.getImm() != 0 ||
3120       Scale.getImm() > 1)
3121     return false;
3122 
3123   return true;
3124 }
3125 
hasCommutePreference(MachineInstr & MI,bool & Commute) const3126 bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const {
3127   // Currently we're interested in following sequence only.
3128   //   r3 = lea r1, r2
3129   //   r5 = add r3, r4
3130   // Both r3 and r4 are killed in add, we hope the add instruction has the
3131   // operand order
3132   //   r5 = add r4, r3
3133   // So later in X86FixupLEAs the lea instruction can be rewritten as add.
3134   unsigned Opcode = MI.getOpcode();
3135   if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
3136     return false;
3137 
3138   const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3139   Register Reg1 = MI.getOperand(1).getReg();
3140   Register Reg2 = MI.getOperand(2).getReg();
3141 
3142   // Check if Reg1 comes from LEA in the same MBB.
3143   if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg1)) {
3144     if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
3145       Commute = true;
3146       return true;
3147     }
3148   }
3149 
3150   // Check if Reg2 comes from LEA in the same MBB.
3151   if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg2)) {
3152     if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
3153       Commute = false;
3154       return true;
3155     }
3156   }
3157 
3158   return false;
3159 }
3160 
getCondSrcNoFromDesc(const MCInstrDesc & MCID)3161 int X86::getCondSrcNoFromDesc(const MCInstrDesc &MCID) {
3162   unsigned Opcode = MCID.getOpcode();
3163   if (!(X86::isJCC(Opcode) || X86::isSETCC(Opcode) || X86::isSETZUCC(Opcode) ||
3164         X86::isCMOVCC(Opcode) || X86::isCFCMOVCC(Opcode) ||
3165         X86::isCCMPCC(Opcode) || X86::isCTESTCC(Opcode)))
3166     return -1;
3167   // Assume that condition code is always the last use operand.
3168   unsigned NumUses = MCID.getNumOperands() - MCID.getNumDefs();
3169   return NumUses - 1;
3170 }
3171 
getCondFromMI(const MachineInstr & MI)3172 X86::CondCode X86::getCondFromMI(const MachineInstr &MI) {
3173   const MCInstrDesc &MCID = MI.getDesc();
3174   int CondNo = getCondSrcNoFromDesc(MCID);
3175   if (CondNo < 0)
3176     return X86::COND_INVALID;
3177   CondNo += MCID.getNumDefs();
3178   return static_cast<X86::CondCode>(MI.getOperand(CondNo).getImm());
3179 }
3180 
getCondFromBranch(const MachineInstr & MI)3181 X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
3182   return X86::isJCC(MI.getOpcode()) ? X86::getCondFromMI(MI)
3183                                     : X86::COND_INVALID;
3184 }
3185 
getCondFromSETCC(const MachineInstr & MI)3186 X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
3187   return X86::isSETCC(MI.getOpcode()) || X86::isSETZUCC(MI.getOpcode())
3188              ? X86::getCondFromMI(MI)
3189              : X86::COND_INVALID;
3190 }
3191 
getCondFromCMov(const MachineInstr & MI)3192 X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
3193   return X86::isCMOVCC(MI.getOpcode()) ? X86::getCondFromMI(MI)
3194                                        : X86::COND_INVALID;
3195 }
3196 
getCondFromCFCMov(const MachineInstr & MI)3197 X86::CondCode X86::getCondFromCFCMov(const MachineInstr &MI) {
3198   return X86::isCFCMOVCC(MI.getOpcode()) ? X86::getCondFromMI(MI)
3199                                          : X86::COND_INVALID;
3200 }
3201 
getCondFromCCMP(const MachineInstr & MI)3202 X86::CondCode X86::getCondFromCCMP(const MachineInstr &MI) {
3203   return X86::isCCMPCC(MI.getOpcode()) || X86::isCTESTCC(MI.getOpcode())
3204              ? X86::getCondFromMI(MI)
3205              : X86::COND_INVALID;
3206 }
3207 
getCCMPCondFlagsFromCondCode(X86::CondCode CC)3208 int X86::getCCMPCondFlagsFromCondCode(X86::CondCode CC) {
3209   // CCMP/CTEST has two conditional operands:
3210   // - SCC: source conditonal code (same as CMOV)
3211   // - DCF: destination conditional flags, which has 4 valid bits
3212   //
3213   // +----+----+----+----+
3214   // | OF | SF | ZF | CF |
3215   // +----+----+----+----+
3216   //
3217   // If SCC(source conditional code) evaluates to false, CCMP/CTEST will updates
3218   // the conditional flags by as follows:
3219   //
3220   // OF = DCF.OF
3221   // SF = DCF.SF
3222   // ZF = DCF.ZF
3223   // CF = DCF.CF
3224   // PF = DCF.CF
3225   // AF = 0 (Auxiliary Carry Flag)
3226   //
3227   // Otherwise, the CMP or TEST is executed and it updates the
3228   // CSPAZO flags normally.
3229   //
3230   // NOTE:
3231   // If SCC = P, then SCC evaluates to true regardless of the CSPAZO value.
3232   // If SCC = NP, then SCC evaluates to false regardless of the CSPAZO value.
3233 
3234   enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
3235 
3236   switch (CC) {
3237   default:
3238     llvm_unreachable("Illegal condition code!");
3239   case X86::COND_NO:
3240   case X86::COND_NE:
3241   case X86::COND_GE:
3242   case X86::COND_G:
3243   case X86::COND_AE:
3244   case X86::COND_A:
3245   case X86::COND_NS:
3246   case X86::COND_NP:
3247     return 0;
3248   case X86::COND_O:
3249     return OF;
3250   case X86::COND_B:
3251   case X86::COND_BE:
3252     return CF;
3253     break;
3254   case X86::COND_E:
3255   case X86::COND_LE:
3256     return ZF;
3257   case X86::COND_S:
3258   case X86::COND_L:
3259     return SF;
3260   case X86::COND_P:
3261     return PF;
3262   }
3263 }
3264 
3265 #define GET_X86_NF_TRANSFORM_TABLE
3266 #define GET_X86_ND2NONND_TABLE
3267 #include "X86GenInstrMapping.inc"
3268 
getNewOpcFromTable(ArrayRef<X86TableEntry> Table,unsigned Opc)3269 static unsigned getNewOpcFromTable(ArrayRef<X86TableEntry> Table,
3270                                    unsigned Opc) {
3271   const auto I = llvm::lower_bound(Table, Opc);
3272   return (I == Table.end() || I->OldOpc != Opc) ? 0U : I->NewOpc;
3273 }
getNFVariant(unsigned Opc)3274 unsigned X86::getNFVariant(unsigned Opc) {
3275 #if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3276   // Make sure the tables are sorted.
3277   static std::atomic<bool> NFTableChecked(false);
3278   if (!NFTableChecked.load(std::memory_order_relaxed)) {
3279     assert(llvm::is_sorted(X86NFTransformTable) &&
3280            "X86NFTransformTable is not sorted!");
3281     NFTableChecked.store(true, std::memory_order_relaxed);
3282   }
3283 #endif
3284   return getNewOpcFromTable(X86NFTransformTable, Opc);
3285 }
3286 
getNonNDVariant(unsigned Opc)3287 unsigned X86::getNonNDVariant(unsigned Opc) {
3288 #if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3289   // Make sure the tables are sorted.
3290   static std::atomic<bool> NDTableChecked(false);
3291   if (!NDTableChecked.load(std::memory_order_relaxed)) {
3292     assert(llvm::is_sorted(X86ND2NonNDTable) &&
3293            "X86ND2NonNDTableis not sorted!");
3294     NDTableChecked.store(true, std::memory_order_relaxed);
3295   }
3296 #endif
3297   return getNewOpcFromTable(X86ND2NonNDTable, Opc);
3298 }
3299 
3300 /// Return the inverse of the specified condition,
3301 /// e.g. turning COND_E to COND_NE.
GetOppositeBranchCondition(X86::CondCode CC)3302 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
3303   switch (CC) {
3304   default:
3305     llvm_unreachable("Illegal condition code!");
3306   case X86::COND_E:
3307     return X86::COND_NE;
3308   case X86::COND_NE:
3309     return X86::COND_E;
3310   case X86::COND_L:
3311     return X86::COND_GE;
3312   case X86::COND_LE:
3313     return X86::COND_G;
3314   case X86::COND_G:
3315     return X86::COND_LE;
3316   case X86::COND_GE:
3317     return X86::COND_L;
3318   case X86::COND_B:
3319     return X86::COND_AE;
3320   case X86::COND_BE:
3321     return X86::COND_A;
3322   case X86::COND_A:
3323     return X86::COND_BE;
3324   case X86::COND_AE:
3325     return X86::COND_B;
3326   case X86::COND_S:
3327     return X86::COND_NS;
3328   case X86::COND_NS:
3329     return X86::COND_S;
3330   case X86::COND_P:
3331     return X86::COND_NP;
3332   case X86::COND_NP:
3333     return X86::COND_P;
3334   case X86::COND_O:
3335     return X86::COND_NO;
3336   case X86::COND_NO:
3337     return X86::COND_O;
3338   case X86::COND_NE_OR_P:
3339     return X86::COND_E_AND_NP;
3340   case X86::COND_E_AND_NP:
3341     return X86::COND_NE_OR_P;
3342   }
3343 }
3344 
3345 /// Assuming the flags are set by MI(a,b), return the condition code if we
3346 /// modify the instructions such that flags are set by MI(b,a).
getSwappedCondition(X86::CondCode CC)3347 static X86::CondCode getSwappedCondition(X86::CondCode CC) {
3348   switch (CC) {
3349   default:
3350     return X86::COND_INVALID;
3351   case X86::COND_E:
3352     return X86::COND_E;
3353   case X86::COND_NE:
3354     return X86::COND_NE;
3355   case X86::COND_L:
3356     return X86::COND_G;
3357   case X86::COND_LE:
3358     return X86::COND_GE;
3359   case X86::COND_G:
3360     return X86::COND_L;
3361   case X86::COND_GE:
3362     return X86::COND_LE;
3363   case X86::COND_B:
3364     return X86::COND_A;
3365   case X86::COND_BE:
3366     return X86::COND_AE;
3367   case X86::COND_A:
3368     return X86::COND_B;
3369   case X86::COND_AE:
3370     return X86::COND_BE;
3371   }
3372 }
3373 
3374 std::pair<X86::CondCode, bool>
getX86ConditionCode(CmpInst::Predicate Predicate)3375 X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
3376   X86::CondCode CC = X86::COND_INVALID;
3377   bool NeedSwap = false;
3378   switch (Predicate) {
3379   default:
3380     break;
3381   // Floating-point Predicates
3382   case CmpInst::FCMP_UEQ:
3383     CC = X86::COND_E;
3384     break;
3385   case CmpInst::FCMP_OLT:
3386     NeedSwap = true;
3387     [[fallthrough]];
3388   case CmpInst::FCMP_OGT:
3389     CC = X86::COND_A;
3390     break;
3391   case CmpInst::FCMP_OLE:
3392     NeedSwap = true;
3393     [[fallthrough]];
3394   case CmpInst::FCMP_OGE:
3395     CC = X86::COND_AE;
3396     break;
3397   case CmpInst::FCMP_UGT:
3398     NeedSwap = true;
3399     [[fallthrough]];
3400   case CmpInst::FCMP_ULT:
3401     CC = X86::COND_B;
3402     break;
3403   case CmpInst::FCMP_UGE:
3404     NeedSwap = true;
3405     [[fallthrough]];
3406   case CmpInst::FCMP_ULE:
3407     CC = X86::COND_BE;
3408     break;
3409   case CmpInst::FCMP_ONE:
3410     CC = X86::COND_NE;
3411     break;
3412   case CmpInst::FCMP_UNO:
3413     CC = X86::COND_P;
3414     break;
3415   case CmpInst::FCMP_ORD:
3416     CC = X86::COND_NP;
3417     break;
3418   case CmpInst::FCMP_OEQ:
3419     [[fallthrough]];
3420   case CmpInst::FCMP_UNE:
3421     CC = X86::COND_INVALID;
3422     break;
3423 
3424   // Integer Predicates
3425   case CmpInst::ICMP_EQ:
3426     CC = X86::COND_E;
3427     break;
3428   case CmpInst::ICMP_NE:
3429     CC = X86::COND_NE;
3430     break;
3431   case CmpInst::ICMP_UGT:
3432     CC = X86::COND_A;
3433     break;
3434   case CmpInst::ICMP_UGE:
3435     CC = X86::COND_AE;
3436     break;
3437   case CmpInst::ICMP_ULT:
3438     CC = X86::COND_B;
3439     break;
3440   case CmpInst::ICMP_ULE:
3441     CC = X86::COND_BE;
3442     break;
3443   case CmpInst::ICMP_SGT:
3444     CC = X86::COND_G;
3445     break;
3446   case CmpInst::ICMP_SGE:
3447     CC = X86::COND_GE;
3448     break;
3449   case CmpInst::ICMP_SLT:
3450     CC = X86::COND_L;
3451     break;
3452   case CmpInst::ICMP_SLE:
3453     CC = X86::COND_LE;
3454     break;
3455   }
3456 
3457   return std::make_pair(CC, NeedSwap);
3458 }
3459 
3460 /// Return a cmov opcode for the given register size in bytes, and operand type.
getCMovOpcode(unsigned RegBytes,bool HasMemoryOperand,bool HasNDD)3461 unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand,
3462                             bool HasNDD) {
3463   switch (RegBytes) {
3464   default:
3465     llvm_unreachable("Illegal register size!");
3466 #define GET_ND_IF_ENABLED(OPC) (HasNDD ? OPC##_ND : OPC)
3467   case 2:
3468     return HasMemoryOperand ? GET_ND_IF_ENABLED(X86::CMOV16rm)
3469                             : GET_ND_IF_ENABLED(X86::CMOV16rr);
3470   case 4:
3471     return HasMemoryOperand ? GET_ND_IF_ENABLED(X86::CMOV32rm)
3472                             : GET_ND_IF_ENABLED(X86::CMOV32rr);
3473   case 8:
3474     return HasMemoryOperand ? GET_ND_IF_ENABLED(X86::CMOV64rm)
3475                             : GET_ND_IF_ENABLED(X86::CMOV64rr);
3476   }
3477 }
3478 
3479 /// Get the VPCMP immediate for the given condition.
getVPCMPImmForCond(ISD::CondCode CC)3480 unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
3481   switch (CC) {
3482   default:
3483     llvm_unreachable("Unexpected SETCC condition");
3484   case ISD::SETNE:
3485     return 4;
3486   case ISD::SETEQ:
3487     return 0;
3488   case ISD::SETULT:
3489   case ISD::SETLT:
3490     return 1;
3491   case ISD::SETUGT:
3492   case ISD::SETGT:
3493     return 6;
3494   case ISD::SETUGE:
3495   case ISD::SETGE:
3496     return 5;
3497   case ISD::SETULE:
3498   case ISD::SETLE:
3499     return 2;
3500   }
3501 }
3502 
3503 /// Get the VPCMP immediate if the operands are swapped.
getSwappedVPCMPImm(unsigned Imm)3504 unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
3505   switch (Imm) {
3506   default:
3507     llvm_unreachable("Unreachable!");
3508   case 0x01:
3509     Imm = 0x06;
3510     break; // LT  -> NLE
3511   case 0x02:
3512     Imm = 0x05;
3513     break; // LE  -> NLT
3514   case 0x05:
3515     Imm = 0x02;
3516     break; // NLT -> LE
3517   case 0x06:
3518     Imm = 0x01;
3519     break;   // NLE -> LT
3520   case 0x00: // EQ
3521   case 0x03: // FALSE
3522   case 0x04: // NE
3523   case 0x07: // TRUE
3524     break;
3525   }
3526 
3527   return Imm;
3528 }
3529 
3530 /// Get the VPCOM immediate if the operands are swapped.
getSwappedVPCOMImm(unsigned Imm)3531 unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
3532   switch (Imm) {
3533   default:
3534     llvm_unreachable("Unreachable!");
3535   case 0x00:
3536     Imm = 0x02;
3537     break; // LT -> GT
3538   case 0x01:
3539     Imm = 0x03;
3540     break; // LE -> GE
3541   case 0x02:
3542     Imm = 0x00;
3543     break; // GT -> LT
3544   case 0x03:
3545     Imm = 0x01;
3546     break;   // GE -> LE
3547   case 0x04: // EQ
3548   case 0x05: // NE
3549   case 0x06: // FALSE
3550   case 0x07: // TRUE
3551     break;
3552   }
3553 
3554   return Imm;
3555 }
3556 
3557 /// Get the VCMP immediate if the operands are swapped.
getSwappedVCMPImm(unsigned Imm)3558 unsigned X86::getSwappedVCMPImm(unsigned Imm) {
3559   // Only need the lower 2 bits to distinquish.
3560   switch (Imm & 0x3) {
3561   default:
3562     llvm_unreachable("Unreachable!");
3563   case 0x00:
3564   case 0x03:
3565     // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted.
3566     break;
3567   case 0x01:
3568   case 0x02:
3569     // Need to toggle bits 3:0. Bit 4 stays the same.
3570     Imm ^= 0xf;
3571     break;
3572   }
3573 
3574   return Imm;
3575 }
3576 
getVectorRegisterWidth(const MCOperandInfo & Info)3577 unsigned X86::getVectorRegisterWidth(const MCOperandInfo &Info) {
3578   if (Info.RegClass == X86::VR128RegClassID ||
3579       Info.RegClass == X86::VR128XRegClassID)
3580     return 128;
3581   if (Info.RegClass == X86::VR256RegClassID ||
3582       Info.RegClass == X86::VR256XRegClassID)
3583     return 256;
3584   if (Info.RegClass == X86::VR512RegClassID)
3585     return 512;
3586   llvm_unreachable("Unknown register class!");
3587 }
3588 
3589 /// Return true if the Reg is X87 register.
isX87Reg(Register Reg)3590 static bool isX87Reg(Register Reg) {
3591   return (Reg == X86::FPCW || Reg == X86::FPSW ||
3592           (Reg >= X86::ST0 && Reg <= X86::ST7));
3593 }
3594 
3595 /// check if the instruction is X87 instruction
isX87Instruction(MachineInstr & MI)3596 bool X86::isX87Instruction(MachineInstr &MI) {
3597   // Call and inlineasm defs X87 register, so we special case it here because
3598   // otherwise calls are incorrectly flagged as x87 instructions
3599   // as a result.
3600   if (MI.isCall() || MI.isInlineAsm())
3601     return false;
3602   for (const MachineOperand &MO : MI.operands()) {
3603     if (!MO.isReg())
3604       continue;
3605     if (isX87Reg(MO.getReg()))
3606       return true;
3607   }
3608   return false;
3609 }
3610 
getFirstAddrOperandIdx(const MachineInstr & MI)3611 int X86::getFirstAddrOperandIdx(const MachineInstr &MI) {
3612   auto IsMemOp = [](const MCOperandInfo &OpInfo) {
3613     return OpInfo.OperandType == MCOI::OPERAND_MEMORY;
3614   };
3615 
3616   const MCInstrDesc &Desc = MI.getDesc();
3617 
3618   // Directly invoke the MC-layer routine for real (i.e., non-pseudo)
3619   // instructions (fast case).
3620   if (!X86II::isPseudo(Desc.TSFlags)) {
3621     int MemRefIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
3622     if (MemRefIdx >= 0)
3623       return MemRefIdx + X86II::getOperandBias(Desc);
3624 #ifdef EXPENSIVE_CHECKS
3625     assert(none_of(Desc.operands(), IsMemOp) &&
3626            "Got false negative from X86II::getMemoryOperandNo()!");
3627 #endif
3628     return -1;
3629   }
3630 
3631   // Otherwise, handle pseudo instructions by examining the type of their
3632   // operands (slow case). An instruction cannot have a memory reference if it
3633   // has fewer than AddrNumOperands (= 5) explicit operands.
3634   unsigned NumOps = Desc.getNumOperands();
3635   if (NumOps < X86::AddrNumOperands) {
3636 #ifdef EXPENSIVE_CHECKS
3637     assert(none_of(Desc.operands(), IsMemOp) &&
3638            "Expected no operands to have OPERAND_MEMORY type!");
3639 #endif
3640     return -1;
3641   }
3642 
3643   // The first operand with type OPERAND_MEMORY indicates the start of a memory
3644   // reference. We expect the following AddrNumOperand-1 operands to also have
3645   // OPERAND_MEMORY type.
3646   for (unsigned I = 0, E = NumOps - X86::AddrNumOperands; I != E; ++I) {
3647     if (IsMemOp(Desc.operands()[I])) {
3648 #ifdef EXPENSIVE_CHECKS
3649       assert(std::all_of(Desc.operands().begin() + I,
3650                          Desc.operands().begin() + I + X86::AddrNumOperands,
3651                          IsMemOp) &&
3652              "Expected all five operands in the memory reference to have "
3653              "OPERAND_MEMORY type!");
3654 #endif
3655       return I;
3656     }
3657   }
3658 
3659   return -1;
3660 }
3661 
getConstantFromPool(const MachineInstr & MI,unsigned OpNo)3662 const Constant *X86::getConstantFromPool(const MachineInstr &MI,
3663                                          unsigned OpNo) {
3664   assert(MI.getNumOperands() >= (OpNo + X86::AddrNumOperands) &&
3665          "Unexpected number of operands!");
3666 
3667   const MachineOperand &Index = MI.getOperand(OpNo + X86::AddrIndexReg);
3668   if (!Index.isReg() || Index.getReg() != X86::NoRegister)
3669     return nullptr;
3670 
3671   const MachineOperand &Disp = MI.getOperand(OpNo + X86::AddrDisp);
3672   if (!Disp.isCPI() || Disp.getOffset() != 0)
3673     return nullptr;
3674 
3675   ArrayRef<MachineConstantPoolEntry> Constants =
3676       MI.getParent()->getParent()->getConstantPool()->getConstants();
3677   const MachineConstantPoolEntry &ConstantEntry = Constants[Disp.getIndex()];
3678 
3679   // Bail if this is a machine constant pool entry, we won't be able to dig out
3680   // anything useful.
3681   if (ConstantEntry.isMachineConstantPoolEntry())
3682     return nullptr;
3683 
3684   return ConstantEntry.Val.ConstVal;
3685 }
3686 
isUnconditionalTailCall(const MachineInstr & MI) const3687 bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
3688   switch (MI.getOpcode()) {
3689   case X86::TCRETURNdi:
3690   case X86::TCRETURNri:
3691   case X86::TCRETURNmi:
3692   case X86::TCRETURNdi64:
3693   case X86::TCRETURNri64:
3694   case X86::TCRETURNri64_ImpCall:
3695   case X86::TCRETURNmi64:
3696     return true;
3697   default:
3698     return false;
3699   }
3700 }
3701 
canMakeTailCallConditional(SmallVectorImpl<MachineOperand> & BranchCond,const MachineInstr & TailCall) const3702 bool X86InstrInfo::canMakeTailCallConditional(
3703     SmallVectorImpl<MachineOperand> &BranchCond,
3704     const MachineInstr &TailCall) const {
3705 
3706   const MachineFunction *MF = TailCall.getMF();
3707 
3708   if (MF->getTarget().getCodeModel() == CodeModel::Kernel) {
3709     // Kernel patches thunk calls in runtime, these should never be conditional.
3710     const MachineOperand &Target = TailCall.getOperand(0);
3711     if (Target.isSymbol()) {
3712       StringRef Symbol(Target.getSymbolName());
3713       // this is currently only relevant to r11/kernel indirect thunk.
3714       if (Symbol == "__x86_indirect_thunk_r11")
3715         return false;
3716     }
3717   }
3718 
3719   if (TailCall.getOpcode() != X86::TCRETURNdi &&
3720       TailCall.getOpcode() != X86::TCRETURNdi64) {
3721     // Only direct calls can be done with a conditional branch.
3722     return false;
3723   }
3724 
3725   if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
3726     // Conditional tail calls confuse the Win64 unwinder.
3727     return false;
3728   }
3729 
3730   assert(BranchCond.size() == 1);
3731   if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
3732     // Can't make a conditional tail call with this condition.
3733     return false;
3734   }
3735 
3736   const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
3737   if (X86FI->getTCReturnAddrDelta() != 0 ||
3738       TailCall.getOperand(1).getImm() != 0) {
3739     // A conditional tail call cannot do any stack adjustment.
3740     return false;
3741   }
3742 
3743   return true;
3744 }
3745 
replaceBranchWithTailCall(MachineBasicBlock & MBB,SmallVectorImpl<MachineOperand> & BranchCond,const MachineInstr & TailCall) const3746 void X86InstrInfo::replaceBranchWithTailCall(
3747     MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
3748     const MachineInstr &TailCall) const {
3749   assert(canMakeTailCallConditional(BranchCond, TailCall));
3750 
3751   MachineBasicBlock::iterator I = MBB.end();
3752   while (I != MBB.begin()) {
3753     --I;
3754     if (I->isDebugInstr())
3755       continue;
3756     if (!I->isBranch())
3757       assert(0 && "Can't find the branch to replace!");
3758 
3759     X86::CondCode CC = X86::getCondFromBranch(*I);
3760     assert(BranchCond.size() == 1);
3761     if (CC != BranchCond[0].getImm())
3762       continue;
3763 
3764     break;
3765   }
3766 
3767   unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
3768                                                          : X86::TCRETURNdi64cc;
3769 
3770   auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
3771   MIB->addOperand(TailCall.getOperand(0)); // Destination.
3772   MIB.addImm(0);                           // Stack offset (not used).
3773   MIB->addOperand(BranchCond[0]);          // Condition.
3774   MIB.copyImplicitOps(TailCall);           // Regmask and (imp-used) parameters.
3775 
3776   // Add implicit uses and defs of all live regs potentially clobbered by the
3777   // call. This way they still appear live across the call.
3778   LivePhysRegs LiveRegs(getRegisterInfo());
3779   LiveRegs.addLiveOuts(MBB);
3780   SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
3781   LiveRegs.stepForward(*MIB, Clobbers);
3782   for (const auto &C : Clobbers) {
3783     MIB.addReg(C.first, RegState::Implicit);
3784     MIB.addReg(C.first, RegState::Implicit | RegState::Define);
3785   }
3786 
3787   I->eraseFromParent();
3788 }
3789 
3790 // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
3791 // not be a fallthrough MBB now due to layout changes). Return nullptr if the
3792 // fallthrough MBB cannot be identified.
getFallThroughMBB(MachineBasicBlock * MBB,MachineBasicBlock * TBB)3793 static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
3794                                             MachineBasicBlock *TBB) {
3795   // Look for non-EHPad successors other than TBB. If we find exactly one, it
3796   // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
3797   // and fallthrough MBB. If we find more than one, we cannot identify the
3798   // fallthrough MBB and should return nullptr.
3799   MachineBasicBlock *FallthroughBB = nullptr;
3800   for (MachineBasicBlock *Succ : MBB->successors()) {
3801     if (Succ->isEHPad() || (Succ == TBB && FallthroughBB))
3802       continue;
3803     // Return a nullptr if we found more than one fallthrough successor.
3804     if (FallthroughBB && FallthroughBB != TBB)
3805       return nullptr;
3806     FallthroughBB = Succ;
3807   }
3808   return FallthroughBB;
3809 }
3810 
analyzeBranchImpl(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,SmallVectorImpl<MachineInstr * > & CondBranches,bool AllowModify) const3811 bool X86InstrInfo::analyzeBranchImpl(
3812     MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
3813     SmallVectorImpl<MachineOperand> &Cond,
3814     SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
3815 
3816   // Start from the bottom of the block and work up, examining the
3817   // terminator instructions.
3818   MachineBasicBlock::iterator I = MBB.end();
3819   MachineBasicBlock::iterator UnCondBrIter = MBB.end();
3820   while (I != MBB.begin()) {
3821     --I;
3822     if (I->isDebugInstr())
3823       continue;
3824 
3825     // Working from the bottom, when we see a non-terminator instruction, we're
3826     // done.
3827     if (!isUnpredicatedTerminator(*I))
3828       break;
3829 
3830     // A terminator that isn't a branch can't easily be handled by this
3831     // analysis.
3832     if (!I->isBranch())
3833       return true;
3834 
3835     // Handle unconditional branches.
3836     if (I->getOpcode() == X86::JMP_1) {
3837       UnCondBrIter = I;
3838 
3839       if (!AllowModify) {
3840         TBB = I->getOperand(0).getMBB();
3841         continue;
3842       }
3843 
3844       // If the block has any instructions after a JMP, delete them.
3845       MBB.erase(std::next(I), MBB.end());
3846 
3847       Cond.clear();
3848       FBB = nullptr;
3849 
3850       // Delete the JMP if it's equivalent to a fall-through.
3851       if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
3852         TBB = nullptr;
3853         I->eraseFromParent();
3854         I = MBB.end();
3855         UnCondBrIter = MBB.end();
3856         continue;
3857       }
3858 
3859       // TBB is used to indicate the unconditional destination.
3860       TBB = I->getOperand(0).getMBB();
3861       continue;
3862     }
3863 
3864     // Handle conditional branches.
3865     X86::CondCode BranchCode = X86::getCondFromBranch(*I);
3866     if (BranchCode == X86::COND_INVALID)
3867       return true; // Can't handle indirect branch.
3868 
3869     // In practice we should never have an undef eflags operand, if we do
3870     // abort here as we are not prepared to preserve the flag.
3871     if (I->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->isUndef())
3872       return true;
3873 
3874     // Working from the bottom, handle the first conditional branch.
3875     if (Cond.empty()) {
3876       FBB = TBB;
3877       TBB = I->getOperand(0).getMBB();
3878       Cond.push_back(MachineOperand::CreateImm(BranchCode));
3879       CondBranches.push_back(&*I);
3880       continue;
3881     }
3882 
3883     // Handle subsequent conditional branches. Only handle the case where all
3884     // conditional branches branch to the same destination and their condition
3885     // opcodes fit one of the special multi-branch idioms.
3886     assert(Cond.size() == 1);
3887     assert(TBB);
3888 
3889     // If the conditions are the same, we can leave them alone.
3890     X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
3891     auto NewTBB = I->getOperand(0).getMBB();
3892     if (OldBranchCode == BranchCode && TBB == NewTBB)
3893       continue;
3894 
3895     // If they differ, see if they fit one of the known patterns. Theoretically,
3896     // we could handle more patterns here, but we shouldn't expect to see them
3897     // if instruction selection has done a reasonable job.
3898     if (TBB == NewTBB &&
3899         ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
3900          (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
3901       BranchCode = X86::COND_NE_OR_P;
3902     } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
3903                (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
3904       if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
3905         return true;
3906 
3907       // X86::COND_E_AND_NP usually has two different branch destinations.
3908       //
3909       // JP B1
3910       // JE B2
3911       // JMP B1
3912       // B1:
3913       // B2:
3914       //
3915       // Here this condition branches to B2 only if NP && E. It has another
3916       // equivalent form:
3917       //
3918       // JNE B1
3919       // JNP B2
3920       // JMP B1
3921       // B1:
3922       // B2:
3923       //
3924       // Similarly it branches to B2 only if E && NP. That is why this condition
3925       // is named with COND_E_AND_NP.
3926       BranchCode = X86::COND_E_AND_NP;
3927     } else
3928       return true;
3929 
3930     // Update the MachineOperand.
3931     Cond[0].setImm(BranchCode);
3932     CondBranches.push_back(&*I);
3933   }
3934 
3935   return false;
3936 }
3937 
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const3938 bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
3939                                  MachineBasicBlock *&TBB,
3940                                  MachineBasicBlock *&FBB,
3941                                  SmallVectorImpl<MachineOperand> &Cond,
3942                                  bool AllowModify) const {
3943   SmallVector<MachineInstr *, 4> CondBranches;
3944   return analyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
3945 }
3946 
getJumpTableIndexFromAddr(const MachineInstr & MI)3947 static int getJumpTableIndexFromAddr(const MachineInstr &MI) {
3948   const MCInstrDesc &Desc = MI.getDesc();
3949   int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3950   assert(MemRefBegin >= 0 && "instr should have memory operand");
3951   MemRefBegin += X86II::getOperandBias(Desc);
3952 
3953   const MachineOperand &MO = MI.getOperand(MemRefBegin + X86::AddrDisp);
3954   if (!MO.isJTI())
3955     return -1;
3956 
3957   return MO.getIndex();
3958 }
3959 
getJumpTableIndexFromReg(const MachineRegisterInfo & MRI,Register Reg)3960 static int getJumpTableIndexFromReg(const MachineRegisterInfo &MRI,
3961                                     Register Reg) {
3962   if (!Reg.isVirtual())
3963     return -1;
3964   MachineInstr *MI = MRI.getUniqueVRegDef(Reg);
3965   if (MI == nullptr)
3966     return -1;
3967   unsigned Opcode = MI->getOpcode();
3968   if (Opcode != X86::LEA64r && Opcode != X86::LEA32r)
3969     return -1;
3970   return getJumpTableIndexFromAddr(*MI);
3971 }
3972 
getJumpTableIndex(const MachineInstr & MI) const3973 int X86InstrInfo::getJumpTableIndex(const MachineInstr &MI) const {
3974   unsigned Opcode = MI.getOpcode();
3975   // Switch-jump pattern for non-PIC code looks like:
3976   //   JMP64m $noreg, 8, %X, %jump-table.X, $noreg
3977   if (Opcode == X86::JMP64m || Opcode == X86::JMP32m) {
3978     return getJumpTableIndexFromAddr(MI);
3979   }
3980   // The pattern for PIC code looks like:
3981   //   %0 = LEA64r $rip, 1, $noreg, %jump-table.X
3982   //   %1 = MOVSX64rm32 %0, 4, XX, 0, $noreg
3983   //   %2 = ADD64rr %1, %0
3984   //   JMP64r %2
3985   if (Opcode == X86::JMP64r || Opcode == X86::JMP32r) {
3986     Register Reg = MI.getOperand(0).getReg();
3987     if (!Reg.isVirtual())
3988       return -1;
3989     const MachineFunction &MF = *MI.getParent()->getParent();
3990     const MachineRegisterInfo &MRI = MF.getRegInfo();
3991     MachineInstr *Add = MRI.getUniqueVRegDef(Reg);
3992     if (Add == nullptr)
3993       return -1;
3994     if (Add->getOpcode() != X86::ADD64rr && Add->getOpcode() != X86::ADD32rr)
3995       return -1;
3996     int JTI1 = getJumpTableIndexFromReg(MRI, Add->getOperand(1).getReg());
3997     if (JTI1 >= 0)
3998       return JTI1;
3999     int JTI2 = getJumpTableIndexFromReg(MRI, Add->getOperand(2).getReg());
4000     if (JTI2 >= 0)
4001       return JTI2;
4002   }
4003   return -1;
4004 }
4005 
analyzeBranchPredicate(MachineBasicBlock & MBB,MachineBranchPredicate & MBP,bool AllowModify) const4006 bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
4007                                           MachineBranchPredicate &MBP,
4008                                           bool AllowModify) const {
4009   using namespace std::placeholders;
4010 
4011   SmallVector<MachineOperand, 4> Cond;
4012   SmallVector<MachineInstr *, 4> CondBranches;
4013   if (analyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
4014                         AllowModify))
4015     return true;
4016 
4017   if (Cond.size() != 1)
4018     return true;
4019 
4020   assert(MBP.TrueDest && "expected!");
4021 
4022   if (!MBP.FalseDest)
4023     MBP.FalseDest = MBB.getNextNode();
4024 
4025   const TargetRegisterInfo *TRI = &getRegisterInfo();
4026 
4027   MachineInstr *ConditionDef = nullptr;
4028   bool SingleUseCondition = true;
4029 
4030   for (MachineInstr &MI : llvm::drop_begin(llvm::reverse(MBB))) {
4031     if (MI.modifiesRegister(X86::EFLAGS, TRI)) {
4032       ConditionDef = &MI;
4033       break;
4034     }
4035 
4036     if (MI.readsRegister(X86::EFLAGS, TRI))
4037       SingleUseCondition = false;
4038   }
4039 
4040   if (!ConditionDef)
4041     return true;
4042 
4043   if (SingleUseCondition) {
4044     for (auto *Succ : MBB.successors())
4045       if (Succ->isLiveIn(X86::EFLAGS))
4046         SingleUseCondition = false;
4047   }
4048 
4049   MBP.ConditionDef = ConditionDef;
4050   MBP.SingleUseCondition = SingleUseCondition;
4051 
4052   // Currently we only recognize the simple pattern:
4053   //
4054   //   test %reg, %reg
4055   //   je %label
4056   //
4057   const unsigned TestOpcode =
4058       Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
4059 
4060   if (ConditionDef->getOpcode() == TestOpcode &&
4061       ConditionDef->getNumOperands() == 3 &&
4062       ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
4063       (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
4064     MBP.LHS = ConditionDef->getOperand(0);
4065     MBP.RHS = MachineOperand::CreateImm(0);
4066     MBP.Predicate = Cond[0].getImm() == X86::COND_NE
4067                         ? MachineBranchPredicate::PRED_NE
4068                         : MachineBranchPredicate::PRED_EQ;
4069     return false;
4070   }
4071 
4072   return true;
4073 }
4074 
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const4075 unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
4076                                     int *BytesRemoved) const {
4077   assert(!BytesRemoved && "code size not handled");
4078 
4079   MachineBasicBlock::iterator I = MBB.end();
4080   unsigned Count = 0;
4081 
4082   while (I != MBB.begin()) {
4083     --I;
4084     if (I->isDebugInstr())
4085       continue;
4086     if (I->getOpcode() != X86::JMP_1 &&
4087         X86::getCondFromBranch(*I) == X86::COND_INVALID)
4088       break;
4089     // Remove the branch.
4090     I->eraseFromParent();
4091     I = MBB.end();
4092     ++Count;
4093   }
4094 
4095   return Count;
4096 }
4097 
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const4098 unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
4099                                     MachineBasicBlock *TBB,
4100                                     MachineBasicBlock *FBB,
4101                                     ArrayRef<MachineOperand> Cond,
4102                                     const DebugLoc &DL, int *BytesAdded) const {
4103   // Shouldn't be a fall through.
4104   assert(TBB && "insertBranch must not be told to insert a fallthrough");
4105   assert((Cond.size() == 1 || Cond.size() == 0) &&
4106          "X86 branch conditions have one component!");
4107   assert(!BytesAdded && "code size not handled");
4108 
4109   if (Cond.empty()) {
4110     // Unconditional branch?
4111     assert(!FBB && "Unconditional branch with multiple successors!");
4112     BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
4113     return 1;
4114   }
4115 
4116   // If FBB is null, it is implied to be a fall-through block.
4117   bool FallThru = FBB == nullptr;
4118 
4119   // Conditional branch.
4120   unsigned Count = 0;
4121   X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
4122   switch (CC) {
4123   case X86::COND_NE_OR_P:
4124     // Synthesize NE_OR_P with two branches.
4125     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
4126     ++Count;
4127     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
4128     ++Count;
4129     break;
4130   case X86::COND_E_AND_NP:
4131     // Use the next block of MBB as FBB if it is null.
4132     if (FBB == nullptr) {
4133       FBB = getFallThroughMBB(&MBB, TBB);
4134       assert(FBB && "MBB cannot be the last block in function when the false "
4135                     "body is a fall-through.");
4136     }
4137     // Synthesize COND_E_AND_NP with two branches.
4138     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
4139     ++Count;
4140     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
4141     ++Count;
4142     break;
4143   default: {
4144     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
4145     ++Count;
4146   }
4147   }
4148   if (!FallThru) {
4149     // Two-way Conditional branch. Insert the second branch.
4150     BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
4151     ++Count;
4152   }
4153   return Count;
4154 }
4155 
canInsertSelect(const MachineBasicBlock & MBB,ArrayRef<MachineOperand> Cond,Register DstReg,Register TrueReg,Register FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const4156 bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
4157                                    ArrayRef<MachineOperand> Cond,
4158                                    Register DstReg, Register TrueReg,
4159                                    Register FalseReg, int &CondCycles,
4160                                    int &TrueCycles, int &FalseCycles) const {
4161   // Not all subtargets have cmov instructions.
4162   if (!Subtarget.canUseCMOV())
4163     return false;
4164   if (Cond.size() != 1)
4165     return false;
4166   // We cannot do the composite conditions, at least not in SSA form.
4167   if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND)
4168     return false;
4169 
4170   // Check register classes.
4171   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4172   const TargetRegisterClass *RC =
4173       RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
4174   if (!RC)
4175     return false;
4176 
4177   // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
4178   if (X86::GR16RegClass.hasSubClassEq(RC) ||
4179       X86::GR32RegClass.hasSubClassEq(RC) ||
4180       X86::GR64RegClass.hasSubClassEq(RC)) {
4181     // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
4182     // Bridge. Probably Ivy Bridge as well.
4183     CondCycles = 2;
4184     TrueCycles = 2;
4185     FalseCycles = 2;
4186     return true;
4187   }
4188 
4189   // Can't do vectors.
4190   return false;
4191 }
4192 
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,ArrayRef<MachineOperand> Cond,Register TrueReg,Register FalseReg) const4193 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
4194                                 MachineBasicBlock::iterator I,
4195                                 const DebugLoc &DL, Register DstReg,
4196                                 ArrayRef<MachineOperand> Cond, Register TrueReg,
4197                                 Register FalseReg) const {
4198   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4199   const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
4200   const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
4201   assert(Cond.size() == 1 && "Invalid Cond array");
4202   unsigned Opc =
4203       X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
4204                          false /*HasMemoryOperand*/, Subtarget.hasNDD());
4205   BuildMI(MBB, I, DL, get(Opc), DstReg)
4206       .addReg(FalseReg)
4207       .addReg(TrueReg)
4208       .addImm(Cond[0].getImm());
4209 }
4210 
4211 /// Test if the given register is a physical h register.
isHReg(Register Reg)4212 static bool isHReg(Register Reg) {
4213   return X86::GR8_ABCD_HRegClass.contains(Reg);
4214 }
4215 
4216 // Try and copy between VR128/VR64 and GR64 registers.
CopyToFromAsymmetricReg(Register DestReg,Register SrcReg,const X86Subtarget & Subtarget)4217 static unsigned CopyToFromAsymmetricReg(Register DestReg, Register SrcReg,
4218                                         const X86Subtarget &Subtarget) {
4219   bool HasAVX = Subtarget.hasAVX();
4220   bool HasAVX512 = Subtarget.hasAVX512();
4221   bool HasEGPR = Subtarget.hasEGPR();
4222 
4223   // SrcReg(MaskReg) -> DestReg(GR64)
4224   // SrcReg(MaskReg) -> DestReg(GR32)
4225 
4226   // All KMASK RegClasses hold the same k registers, can be tested against
4227   // anyone.
4228   if (X86::VK16RegClass.contains(SrcReg)) {
4229     if (X86::GR64RegClass.contains(DestReg)) {
4230       assert(Subtarget.hasBWI());
4231       return HasEGPR ? X86::KMOVQrk_EVEX : X86::KMOVQrk;
4232     }
4233     if (X86::GR32RegClass.contains(DestReg))
4234       return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDrk_EVEX : X86::KMOVDrk)
4235                                 : (HasEGPR ? X86::KMOVWrk_EVEX : X86::KMOVWrk);
4236   }
4237 
4238   // SrcReg(GR64) -> DestReg(MaskReg)
4239   // SrcReg(GR32) -> DestReg(MaskReg)
4240 
4241   // All KMASK RegClasses hold the same k registers, can be tested against
4242   // anyone.
4243   if (X86::VK16RegClass.contains(DestReg)) {
4244     if (X86::GR64RegClass.contains(SrcReg)) {
4245       assert(Subtarget.hasBWI());
4246       return HasEGPR ? X86::KMOVQkr_EVEX : X86::KMOVQkr;
4247     }
4248     if (X86::GR32RegClass.contains(SrcReg))
4249       return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDkr_EVEX : X86::KMOVDkr)
4250                                 : (HasEGPR ? X86::KMOVWkr_EVEX : X86::KMOVWkr);
4251   }
4252 
4253   // SrcReg(VR128) -> DestReg(GR64)
4254   // SrcReg(VR64)  -> DestReg(GR64)
4255   // SrcReg(GR64)  -> DestReg(VR128)
4256   // SrcReg(GR64)  -> DestReg(VR64)
4257 
4258   if (X86::GR64RegClass.contains(DestReg)) {
4259     if (X86::VR128XRegClass.contains(SrcReg))
4260       // Copy from a VR128 register to a GR64 register.
4261       return HasAVX512 ? X86::VMOVPQIto64Zrr
4262              : HasAVX  ? X86::VMOVPQIto64rr
4263                        : X86::MOVPQIto64rr;
4264     if (X86::VR64RegClass.contains(SrcReg))
4265       // Copy from a VR64 register to a GR64 register.
4266       return X86::MMX_MOVD64from64rr;
4267   } else if (X86::GR64RegClass.contains(SrcReg)) {
4268     // Copy from a GR64 register to a VR128 register.
4269     if (X86::VR128XRegClass.contains(DestReg))
4270       return HasAVX512 ? X86::VMOV64toPQIZrr
4271              : HasAVX  ? X86::VMOV64toPQIrr
4272                        : X86::MOV64toPQIrr;
4273     // Copy from a GR64 register to a VR64 register.
4274     if (X86::VR64RegClass.contains(DestReg))
4275       return X86::MMX_MOVD64to64rr;
4276   }
4277 
4278   // SrcReg(VR128) -> DestReg(GR32)
4279   // SrcReg(GR32)  -> DestReg(VR128)
4280 
4281   if (X86::GR32RegClass.contains(DestReg) &&
4282       X86::VR128XRegClass.contains(SrcReg))
4283     // Copy from a VR128 register to a GR32 register.
4284     return HasAVX512 ? X86::VMOVPDI2DIZrr
4285            : HasAVX  ? X86::VMOVPDI2DIrr
4286                      : X86::MOVPDI2DIrr;
4287 
4288   if (X86::VR128XRegClass.contains(DestReg) &&
4289       X86::GR32RegClass.contains(SrcReg))
4290     // Copy from a VR128 register to a VR128 register.
4291     return HasAVX512 ? X86::VMOVDI2PDIZrr
4292            : HasAVX  ? X86::VMOVDI2PDIrr
4293                      : X86::MOVDI2PDIrr;
4294   return 0;
4295 }
4296 
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const DebugLoc & DL,Register DestReg,Register SrcReg,bool KillSrc,bool RenamableDest,bool RenamableSrc) const4297 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
4298                                MachineBasicBlock::iterator MI,
4299                                const DebugLoc &DL, Register DestReg,
4300                                Register SrcReg, bool KillSrc,
4301                                bool RenamableDest, bool RenamableSrc) const {
4302   // First deal with the normal symmetric copies.
4303   bool HasAVX = Subtarget.hasAVX();
4304   bool HasVLX = Subtarget.hasVLX();
4305   bool HasEGPR = Subtarget.hasEGPR();
4306   unsigned Opc = 0;
4307   if (X86::GR64RegClass.contains(DestReg, SrcReg))
4308     Opc = X86::MOV64rr;
4309   else if (X86::GR32RegClass.contains(DestReg, SrcReg))
4310     Opc = X86::MOV32rr;
4311   else if (X86::GR16RegClass.contains(DestReg, SrcReg))
4312     Opc = X86::MOV16rr;
4313   else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
4314     // Copying to or from a physical H register on x86-64 requires a NOREX
4315     // move.  Otherwise use a normal move.
4316     if ((isHReg(DestReg) || isHReg(SrcReg)) && Subtarget.is64Bit()) {
4317       Opc = X86::MOV8rr_NOREX;
4318       // Both operands must be encodable without an REX prefix.
4319       assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
4320              "8-bit H register can not be copied outside GR8_NOREX");
4321     } else
4322       Opc = X86::MOV8rr;
4323   } else if (X86::VR64RegClass.contains(DestReg, SrcReg))
4324     Opc = X86::MMX_MOVQ64rr;
4325   else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
4326     if (HasVLX)
4327       Opc = X86::VMOVAPSZ128rr;
4328     else if (X86::VR128RegClass.contains(DestReg, SrcReg))
4329       Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
4330     else {
4331       // If this an extended register and we don't have VLX we need to use a
4332       // 512-bit move.
4333       Opc = X86::VMOVAPSZrr;
4334       const TargetRegisterInfo *TRI = &getRegisterInfo();
4335       DestReg =
4336           TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, &X86::VR512RegClass);
4337       SrcReg =
4338           TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4339     }
4340   } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
4341     if (HasVLX)
4342       Opc = X86::VMOVAPSZ256rr;
4343     else if (X86::VR256RegClass.contains(DestReg, SrcReg))
4344       Opc = X86::VMOVAPSYrr;
4345     else {
4346       // If this an extended register and we don't have VLX we need to use a
4347       // 512-bit move.
4348       Opc = X86::VMOVAPSZrr;
4349       const TargetRegisterInfo *TRI = &getRegisterInfo();
4350       DestReg =
4351           TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, &X86::VR512RegClass);
4352       SrcReg =
4353           TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4354     }
4355   } else if (X86::VR512RegClass.contains(DestReg, SrcReg))
4356     Opc = X86::VMOVAPSZrr;
4357   // All KMASK RegClasses hold the same k registers, can be tested against
4358   // anyone.
4359   else if (X86::VK16RegClass.contains(DestReg, SrcReg))
4360     Opc = Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVQkk)
4361                              : (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVWkk);
4362   if (!Opc)
4363     Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
4364 
4365   if (Opc) {
4366     BuildMI(MBB, MI, DL, get(Opc), DestReg)
4367         .addReg(SrcReg, getKillRegState(KillSrc));
4368     return;
4369   }
4370 
4371   if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
4372     // FIXME: We use a fatal error here because historically LLVM has tried
4373     // lower some of these physreg copies and we want to ensure we get
4374     // reasonable bug reports if someone encounters a case no other testing
4375     // found. This path should be removed after the LLVM 7 release.
4376     report_fatal_error("Unable to copy EFLAGS physical register!");
4377   }
4378 
4379   LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
4380                     << RI.getName(DestReg) << '\n');
4381   report_fatal_error("Cannot emit physreg copy instruction");
4382 }
4383 
4384 std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr & MI) const4385 X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
4386   if (MI.isMoveReg()) {
4387     // FIXME: Dirty hack for apparent invariant that doesn't hold when
4388     // subreg_to_reg is coalesced with ordinary copies, such that the bits that
4389     // were asserted as 0 are now undef.
4390     if (MI.getOperand(0).isUndef() && MI.getOperand(0).getSubReg())
4391       return std::nullopt;
4392 
4393     return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
4394   }
4395   return std::nullopt;
4396 }
4397 
getLoadStoreOpcodeForFP16(bool Load,const X86Subtarget & STI)4398 static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI) {
4399   if (STI.hasFP16())
4400     return Load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr;
4401   if (Load)
4402     return STI.hasAVX512() ? X86::VMOVSSZrm
4403            : STI.hasAVX()  ? X86::VMOVSSrm
4404                            : X86::MOVSSrm;
4405   else
4406     return STI.hasAVX512() ? X86::VMOVSSZmr
4407            : STI.hasAVX()  ? X86::VMOVSSmr
4408                            : X86::MOVSSmr;
4409 }
4410 
getLoadStoreRegOpcode(Register Reg,const TargetRegisterClass * RC,bool IsStackAligned,const X86Subtarget & STI,bool Load)4411 static unsigned getLoadStoreRegOpcode(Register Reg,
4412                                       const TargetRegisterClass *RC,
4413                                       bool IsStackAligned,
4414                                       const X86Subtarget &STI, bool Load) {
4415   bool HasAVX = STI.hasAVX();
4416   bool HasAVX512 = STI.hasAVX512();
4417   bool HasVLX = STI.hasVLX();
4418   bool HasEGPR = STI.hasEGPR();
4419 
4420   assert(RC != nullptr && "Invalid target register class");
4421   switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
4422   default:
4423     llvm_unreachable("Unknown spill size");
4424   case 1:
4425     assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
4426     if (STI.is64Bit())
4427       // Copying to or from a physical H register on x86-64 requires a NOREX
4428       // move.  Otherwise use a normal move.
4429       if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
4430         return Load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
4431     return Load ? X86::MOV8rm : X86::MOV8mr;
4432   case 2:
4433     if (X86::VK16RegClass.hasSubClassEq(RC))
4434       return Load ? (HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm)
4435                   : (HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk);
4436     assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
4437     return Load ? X86::MOV16rm : X86::MOV16mr;
4438   case 4:
4439     if (X86::GR32RegClass.hasSubClassEq(RC))
4440       return Load ? X86::MOV32rm : X86::MOV32mr;
4441     if (X86::FR32XRegClass.hasSubClassEq(RC))
4442       return Load ? (HasAVX512 ? X86::VMOVSSZrm_alt
4443                      : HasAVX  ? X86::VMOVSSrm_alt
4444                                : X86::MOVSSrm_alt)
4445                   : (HasAVX512 ? X86::VMOVSSZmr
4446                      : HasAVX  ? X86::VMOVSSmr
4447                                : X86::MOVSSmr);
4448     if (X86::RFP32RegClass.hasSubClassEq(RC))
4449       return Load ? X86::LD_Fp32m : X86::ST_Fp32m;
4450     if (X86::VK32RegClass.hasSubClassEq(RC)) {
4451       assert(STI.hasBWI() && "KMOVD requires BWI");
4452       return Load ? (HasEGPR ? X86::KMOVDkm_EVEX : X86::KMOVDkm)
4453                   : (HasEGPR ? X86::KMOVDmk_EVEX : X86::KMOVDmk);
4454     }
4455     // All of these mask pair classes have the same spill size, the same kind
4456     // of kmov instructions can be used with all of them.
4457     if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
4458         X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
4459         X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
4460         X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
4461         X86::VK16PAIRRegClass.hasSubClassEq(RC))
4462       return Load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
4463     if (X86::FR16RegClass.hasSubClassEq(RC) ||
4464         X86::FR16XRegClass.hasSubClassEq(RC))
4465       return getLoadStoreOpcodeForFP16(Load, STI);
4466     llvm_unreachable("Unknown 4-byte regclass");
4467   case 8:
4468     if (X86::GR64RegClass.hasSubClassEq(RC))
4469       return Load ? X86::MOV64rm : X86::MOV64mr;
4470     if (X86::FR64XRegClass.hasSubClassEq(RC))
4471       return Load ? (HasAVX512 ? X86::VMOVSDZrm_alt
4472                      : HasAVX  ? X86::VMOVSDrm_alt
4473                                : X86::MOVSDrm_alt)
4474                   : (HasAVX512 ? X86::VMOVSDZmr
4475                      : HasAVX  ? X86::VMOVSDmr
4476                                : X86::MOVSDmr);
4477     if (X86::VR64RegClass.hasSubClassEq(RC))
4478       return Load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
4479     if (X86::RFP64RegClass.hasSubClassEq(RC))
4480       return Load ? X86::LD_Fp64m : X86::ST_Fp64m;
4481     if (X86::VK64RegClass.hasSubClassEq(RC)) {
4482       assert(STI.hasBWI() && "KMOVQ requires BWI");
4483       return Load ? (HasEGPR ? X86::KMOVQkm_EVEX : X86::KMOVQkm)
4484                   : (HasEGPR ? X86::KMOVQmk_EVEX : X86::KMOVQmk);
4485     }
4486     llvm_unreachable("Unknown 8-byte regclass");
4487   case 10:
4488     assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
4489     return Load ? X86::LD_Fp80m : X86::ST_FpP80m;
4490   case 16: {
4491     if (X86::VR128XRegClass.hasSubClassEq(RC)) {
4492       // If stack is realigned we can use aligned stores.
4493       if (IsStackAligned)
4494         return Load ? (HasVLX      ? X86::VMOVAPSZ128rm
4495                        : HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX
4496                        : HasAVX    ? X86::VMOVAPSrm
4497                                    : X86::MOVAPSrm)
4498                     : (HasVLX      ? X86::VMOVAPSZ128mr
4499                        : HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX
4500                        : HasAVX    ? X86::VMOVAPSmr
4501                                    : X86::MOVAPSmr);
4502       else
4503         return Load ? (HasVLX      ? X86::VMOVUPSZ128rm
4504                        : HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX
4505                        : HasAVX    ? X86::VMOVUPSrm
4506                                    : X86::MOVUPSrm)
4507                     : (HasVLX      ? X86::VMOVUPSZ128mr
4508                        : HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX
4509                        : HasAVX    ? X86::VMOVUPSmr
4510                                    : X86::MOVUPSmr);
4511     }
4512     llvm_unreachable("Unknown 16-byte regclass");
4513   }
4514   case 32:
4515     assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
4516     // If stack is realigned we can use aligned stores.
4517     if (IsStackAligned)
4518       return Load ? (HasVLX      ? X86::VMOVAPSZ256rm
4519                      : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
4520                                  : X86::VMOVAPSYrm)
4521                   : (HasVLX      ? X86::VMOVAPSZ256mr
4522                      : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
4523                                  : X86::VMOVAPSYmr);
4524     else
4525       return Load ? (HasVLX      ? X86::VMOVUPSZ256rm
4526                      : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
4527                                  : X86::VMOVUPSYrm)
4528                   : (HasVLX      ? X86::VMOVUPSZ256mr
4529                      : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
4530                                  : X86::VMOVUPSYmr);
4531   case 64:
4532     assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
4533     assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
4534     if (IsStackAligned)
4535       return Load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
4536     else
4537       return Load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
4538   case 1024:
4539     assert(X86::TILERegClass.hasSubClassEq(RC) && "Unknown 1024-byte regclass");
4540     assert(STI.hasAMXTILE() && "Using 8*1024-bit register requires AMX-TILE");
4541 #define GET_EGPR_IF_ENABLED(OPC) (STI.hasEGPR() ? OPC##_EVEX : OPC)
4542     return Load ? GET_EGPR_IF_ENABLED(X86::TILELOADD)
4543                 : GET_EGPR_IF_ENABLED(X86::TILESTORED);
4544 #undef GET_EGPR_IF_ENABLED
4545   case 2048:
4546     assert(X86::TILEPAIRRegClass.hasSubClassEq(RC) &&
4547            "Unknown 2048-byte regclass");
4548     assert(STI.hasAMXTILE() && "Using 2048-bit register requires AMX-TILE");
4549     return Load ? X86::PTILEPAIRLOAD : X86::PTILEPAIRSTORE;
4550   }
4551 }
4552 
4553 std::optional<ExtAddrMode>
getAddrModeFromMemoryOp(const MachineInstr & MemI,const TargetRegisterInfo * TRI) const4554 X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
4555                                       const TargetRegisterInfo *TRI) const {
4556   const MCInstrDesc &Desc = MemI.getDesc();
4557   int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
4558   if (MemRefBegin < 0)
4559     return std::nullopt;
4560 
4561   MemRefBegin += X86II::getOperandBias(Desc);
4562 
4563   auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg);
4564   if (!BaseOp.isReg()) // Can be an MO_FrameIndex
4565     return std::nullopt;
4566 
4567   const MachineOperand &DispMO = MemI.getOperand(MemRefBegin + X86::AddrDisp);
4568   // Displacement can be symbolic
4569   if (!DispMO.isImm())
4570     return std::nullopt;
4571 
4572   ExtAddrMode AM;
4573   AM.BaseReg = BaseOp.getReg();
4574   AM.ScaledReg = MemI.getOperand(MemRefBegin + X86::AddrIndexReg).getReg();
4575   AM.Scale = MemI.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm();
4576   AM.Displacement = DispMO.getImm();
4577   return AM;
4578 }
4579 
verifyInstruction(const MachineInstr & MI,StringRef & ErrInfo) const4580 bool X86InstrInfo::verifyInstruction(const MachineInstr &MI,
4581                                      StringRef &ErrInfo) const {
4582   std::optional<ExtAddrMode> AMOrNone = getAddrModeFromMemoryOp(MI, nullptr);
4583   if (!AMOrNone)
4584     return true;
4585 
4586   ExtAddrMode AM = *AMOrNone;
4587   assert(AM.Form == ExtAddrMode::Formula::Basic);
4588   if (AM.ScaledReg != X86::NoRegister) {
4589     switch (AM.Scale) {
4590     case 1:
4591     case 2:
4592     case 4:
4593     case 8:
4594       break;
4595     default:
4596       ErrInfo = "Scale factor in address must be 1, 2, 4 or 8";
4597       return false;
4598     }
4599   }
4600   if (!isInt<32>(AM.Displacement)) {
4601     ErrInfo = "Displacement in address must fit into 32-bit signed "
4602               "integer";
4603     return false;
4604   }
4605 
4606   return true;
4607 }
4608 
getConstValDefinedInReg(const MachineInstr & MI,const Register Reg,int64_t & ImmVal) const4609 bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
4610                                            const Register Reg,
4611                                            int64_t &ImmVal) const {
4612   Register MovReg = Reg;
4613   const MachineInstr *MovMI = &MI;
4614 
4615   // Follow use-def for SUBREG_TO_REG to find the real move immediate
4616   // instruction. It is quite common for x86-64.
4617   if (MI.isSubregToReg()) {
4618     // We use following pattern to setup 64b immediate.
4619     //      %8:gr32 = MOV32r0 implicit-def dead $eflags
4620     //      %6:gr64 = SUBREG_TO_REG 0, killed %8:gr32, %subreg.sub_32bit
4621     if (!MI.getOperand(1).isImm())
4622       return false;
4623     unsigned FillBits = MI.getOperand(1).getImm();
4624     unsigned SubIdx = MI.getOperand(3).getImm();
4625     MovReg = MI.getOperand(2).getReg();
4626     if (SubIdx != X86::sub_32bit || FillBits != 0)
4627       return false;
4628     const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4629     MovMI = MRI.getUniqueVRegDef(MovReg);
4630     if (!MovMI)
4631       return false;
4632   }
4633 
4634   if (MovMI->getOpcode() == X86::MOV32r0 &&
4635       MovMI->getOperand(0).getReg() == MovReg) {
4636     ImmVal = 0;
4637     return true;
4638   }
4639 
4640   if (MovMI->getOpcode() != X86::MOV32ri &&
4641       MovMI->getOpcode() != X86::MOV64ri &&
4642       MovMI->getOpcode() != X86::MOV32ri64 && MovMI->getOpcode() != X86::MOV8ri)
4643     return false;
4644   // Mov Src can be a global address.
4645   if (!MovMI->getOperand(1).isImm() || MovMI->getOperand(0).getReg() != MovReg)
4646     return false;
4647   ImmVal = MovMI->getOperand(1).getImm();
4648   return true;
4649 }
4650 
preservesZeroValueInReg(const MachineInstr * MI,const Register NullValueReg,const TargetRegisterInfo * TRI) const4651 bool X86InstrInfo::preservesZeroValueInReg(
4652     const MachineInstr *MI, const Register NullValueReg,
4653     const TargetRegisterInfo *TRI) const {
4654   if (!MI->modifiesRegister(NullValueReg, TRI))
4655     return true;
4656   switch (MI->getOpcode()) {
4657   // Shift right/left of a null unto itself is still a null, i.e. rax = shl rax
4658   // X.
4659   case X86::SHR64ri:
4660   case X86::SHR32ri:
4661   case X86::SHL64ri:
4662   case X86::SHL32ri:
4663     assert(MI->getOperand(0).isDef() && MI->getOperand(1).isUse() &&
4664            "expected for shift opcode!");
4665     return MI->getOperand(0).getReg() == NullValueReg &&
4666            MI->getOperand(1).getReg() == NullValueReg;
4667   // Zero extend of a sub-reg of NullValueReg into itself does not change the
4668   // null value.
4669   case X86::MOV32rr:
4670     return llvm::all_of(MI->operands(), [&](const MachineOperand &MO) {
4671       return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
4672     });
4673   default:
4674     return false;
4675   }
4676   llvm_unreachable("Should be handled above!");
4677 }
4678 
getMemOperandsWithOffsetWidth(const MachineInstr & MemOp,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,LocationSize & Width,const TargetRegisterInfo * TRI) const4679 bool X86InstrInfo::getMemOperandsWithOffsetWidth(
4680     const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
4681     int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
4682     const TargetRegisterInfo *TRI) const {
4683   const MCInstrDesc &Desc = MemOp.getDesc();
4684   int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
4685   if (MemRefBegin < 0)
4686     return false;
4687 
4688   MemRefBegin += X86II::getOperandBias(Desc);
4689 
4690   const MachineOperand *BaseOp =
4691       &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
4692   if (!BaseOp->isReg()) // Can be an MO_FrameIndex
4693     return false;
4694 
4695   if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
4696     return false;
4697 
4698   if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
4699       X86::NoRegister)
4700     return false;
4701 
4702   const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
4703 
4704   // Displacement can be symbolic
4705   if (!DispMO.isImm())
4706     return false;
4707 
4708   Offset = DispMO.getImm();
4709 
4710   if (!BaseOp->isReg())
4711     return false;
4712 
4713   OffsetIsScalable = false;
4714   // FIXME: Relying on memoperands() may not be right thing to do here. Check
4715   // with X86 maintainers, and fix it accordingly. For now, it is ok, since
4716   // there is no use of `Width` for X86 back-end at the moment.
4717   Width = !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize()
4718                                      : LocationSize::precise(0);
4719   BaseOps.push_back(BaseOp);
4720   return true;
4721 }
4722 
getStoreRegOpcode(Register SrcReg,const TargetRegisterClass * RC,bool IsStackAligned,const X86Subtarget & STI)4723 static unsigned getStoreRegOpcode(Register SrcReg,
4724                                   const TargetRegisterClass *RC,
4725                                   bool IsStackAligned,
4726                                   const X86Subtarget &STI) {
4727   return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false);
4728 }
4729 
getLoadRegOpcode(Register DestReg,const TargetRegisterClass * RC,bool IsStackAligned,const X86Subtarget & STI)4730 static unsigned getLoadRegOpcode(Register DestReg,
4731                                  const TargetRegisterClass *RC,
4732                                  bool IsStackAligned, const X86Subtarget &STI) {
4733   return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true);
4734 }
4735 
isAMXOpcode(unsigned Opc)4736 static bool isAMXOpcode(unsigned Opc) {
4737   switch (Opc) {
4738   default:
4739     return false;
4740   case X86::TILELOADD:
4741   case X86::TILESTORED:
4742   case X86::TILELOADD_EVEX:
4743   case X86::TILESTORED_EVEX:
4744   case X86::PTILEPAIRLOAD:
4745   case X86::PTILEPAIRSTORE:
4746     return true;
4747   }
4748 }
4749 
loadStoreTileReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned Opc,Register Reg,int FrameIdx,bool isKill) const4750 void X86InstrInfo::loadStoreTileReg(MachineBasicBlock &MBB,
4751                                     MachineBasicBlock::iterator MI,
4752                                     unsigned Opc, Register Reg, int FrameIdx,
4753                                     bool isKill) const {
4754   switch (Opc) {
4755   default:
4756     llvm_unreachable("Unexpected special opcode!");
4757   case X86::TILESTORED:
4758   case X86::TILESTORED_EVEX:
4759   case X86::PTILEPAIRSTORE: {
4760     // tilestored %tmm, (%sp, %idx)
4761     MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
4762     Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4763     BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
4764     MachineInstr *NewMI =
4765         addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
4766             .addReg(Reg, getKillRegState(isKill));
4767     MachineOperand &MO = NewMI->getOperand(X86::AddrIndexReg);
4768     MO.setReg(VirtReg);
4769     MO.setIsKill(true);
4770     break;
4771   }
4772   case X86::TILELOADD:
4773   case X86::TILELOADD_EVEX:
4774   case X86::PTILEPAIRLOAD: {
4775     // tileloadd (%sp, %idx), %tmm
4776     MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
4777     Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4778     BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
4779     MachineInstr *NewMI = addFrameReference(
4780         BuildMI(MBB, MI, DebugLoc(), get(Opc), Reg), FrameIdx);
4781     MachineOperand &MO = NewMI->getOperand(1 + X86::AddrIndexReg);
4782     MO.setReg(VirtReg);
4783     MO.setIsKill(true);
4784     break;
4785   }
4786   }
4787 }
4788 
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register SrcReg,bool isKill,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI,Register VReg,MachineInstr::MIFlag Flags) const4789 void X86InstrInfo::storeRegToStackSlot(
4790     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
4791     bool isKill, int FrameIdx, const TargetRegisterClass *RC,
4792     const TargetRegisterInfo *TRI, Register VReg,
4793     MachineInstr::MIFlag Flags) const {
4794   const MachineFunction &MF = *MBB.getParent();
4795   const MachineFrameInfo &MFI = MF.getFrameInfo();
4796   assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
4797          "Stack slot too small for store");
4798 
4799   unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
4800   bool isAligned =
4801       (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4802       (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
4803 
4804   unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
4805   if (isAMXOpcode(Opc))
4806     loadStoreTileReg(MBB, MI, Opc, SrcReg, FrameIdx, isKill);
4807   else
4808     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
4809         .addReg(SrcReg, getKillRegState(isKill))
4810         .setMIFlag(Flags);
4811 }
4812 
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register DestReg,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI,Register VReg,MachineInstr::MIFlag Flags) const4813 void X86InstrInfo::loadRegFromStackSlot(
4814     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
4815     int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
4816     Register VReg, MachineInstr::MIFlag Flags) const {
4817   const MachineFunction &MF = *MBB.getParent();
4818   const MachineFrameInfo &MFI = MF.getFrameInfo();
4819   assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
4820          "Load size exceeds stack slot");
4821   unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
4822   bool isAligned =
4823       (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4824       (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
4825 
4826   unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
4827   if (isAMXOpcode(Opc))
4828     loadStoreTileReg(MBB, MI, Opc, DestReg, FrameIdx);
4829   else
4830     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx)
4831         .setMIFlag(Flags);
4832 }
4833 
analyzeCompare(const MachineInstr & MI,Register & SrcReg,Register & SrcReg2,int64_t & CmpMask,int64_t & CmpValue) const4834 bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
4835                                   Register &SrcReg2, int64_t &CmpMask,
4836                                   int64_t &CmpValue) const {
4837   switch (MI.getOpcode()) {
4838   default:
4839     break;
4840   case X86::CMP64ri32:
4841   case X86::CMP32ri:
4842   case X86::CMP16ri:
4843   case X86::CMP8ri:
4844     SrcReg = MI.getOperand(0).getReg();
4845     SrcReg2 = 0;
4846     if (MI.getOperand(1).isImm()) {
4847       CmpMask = ~0;
4848       CmpValue = MI.getOperand(1).getImm();
4849     } else {
4850       CmpMask = CmpValue = 0;
4851     }
4852     return true;
4853   // A SUB can be used to perform comparison.
4854   CASE_ND(SUB64rm)
4855   CASE_ND(SUB32rm)
4856   CASE_ND(SUB16rm)
4857   CASE_ND(SUB8rm)
4858     SrcReg = MI.getOperand(1).getReg();
4859     SrcReg2 = 0;
4860     CmpMask = 0;
4861     CmpValue = 0;
4862     return true;
4863   CASE_ND(SUB64rr)
4864   CASE_ND(SUB32rr)
4865   CASE_ND(SUB16rr)
4866   CASE_ND(SUB8rr)
4867     SrcReg = MI.getOperand(1).getReg();
4868     SrcReg2 = MI.getOperand(2).getReg();
4869     CmpMask = 0;
4870     CmpValue = 0;
4871     return true;
4872   CASE_ND(SUB64ri32)
4873   CASE_ND(SUB32ri)
4874   CASE_ND(SUB16ri)
4875   CASE_ND(SUB8ri)
4876     SrcReg = MI.getOperand(1).getReg();
4877     SrcReg2 = 0;
4878     if (MI.getOperand(2).isImm()) {
4879       CmpMask = ~0;
4880       CmpValue = MI.getOperand(2).getImm();
4881     } else {
4882       CmpMask = CmpValue = 0;
4883     }
4884     return true;
4885   case X86::CMP64rr:
4886   case X86::CMP32rr:
4887   case X86::CMP16rr:
4888   case X86::CMP8rr:
4889     SrcReg = MI.getOperand(0).getReg();
4890     SrcReg2 = MI.getOperand(1).getReg();
4891     CmpMask = 0;
4892     CmpValue = 0;
4893     return true;
4894   case X86::TEST8rr:
4895   case X86::TEST16rr:
4896   case X86::TEST32rr:
4897   case X86::TEST64rr:
4898     SrcReg = MI.getOperand(0).getReg();
4899     if (MI.getOperand(1).getReg() != SrcReg)
4900       return false;
4901     // Compare against zero.
4902     SrcReg2 = 0;
4903     CmpMask = ~0;
4904     CmpValue = 0;
4905     return true;
4906   }
4907   return false;
4908 }
4909 
isRedundantFlagInstr(const MachineInstr & FlagI,Register SrcReg,Register SrcReg2,int64_t ImmMask,int64_t ImmValue,const MachineInstr & OI,bool * IsSwapped,int64_t * ImmDelta) const4910 bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
4911                                         Register SrcReg, Register SrcReg2,
4912                                         int64_t ImmMask, int64_t ImmValue,
4913                                         const MachineInstr &OI, bool *IsSwapped,
4914                                         int64_t *ImmDelta) const {
4915   switch (OI.getOpcode()) {
4916   case X86::CMP64rr:
4917   case X86::CMP32rr:
4918   case X86::CMP16rr:
4919   case X86::CMP8rr:
4920   CASE_ND(SUB64rr)
4921   CASE_ND(SUB32rr)
4922   CASE_ND(SUB16rr)
4923   CASE_ND(SUB8rr) {
4924     Register OISrcReg;
4925     Register OISrcReg2;
4926     int64_t OIMask;
4927     int64_t OIValue;
4928     if (!analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) ||
4929         OIMask != ImmMask || OIValue != ImmValue)
4930       return false;
4931     if (SrcReg == OISrcReg && SrcReg2 == OISrcReg2) {
4932       *IsSwapped = false;
4933       return true;
4934     }
4935     if (SrcReg == OISrcReg2 && SrcReg2 == OISrcReg) {
4936       *IsSwapped = true;
4937       return true;
4938     }
4939     return false;
4940   }
4941   case X86::CMP64ri32:
4942   case X86::CMP32ri:
4943   case X86::CMP16ri:
4944   case X86::CMP8ri:
4945   CASE_ND(SUB64ri32)
4946   CASE_ND(SUB32ri)
4947   CASE_ND(SUB16ri)
4948   CASE_ND(SUB8ri)
4949   case X86::TEST64rr:
4950   case X86::TEST32rr:
4951   case X86::TEST16rr:
4952   case X86::TEST8rr: {
4953     if (ImmMask != 0) {
4954       Register OISrcReg;
4955       Register OISrcReg2;
4956       int64_t OIMask;
4957       int64_t OIValue;
4958       if (analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) &&
4959           SrcReg == OISrcReg && ImmMask == OIMask) {
4960         if (OIValue == ImmValue) {
4961           *ImmDelta = 0;
4962           return true;
4963         } else if (static_cast<uint64_t>(ImmValue) ==
4964                    static_cast<uint64_t>(OIValue) - 1) {
4965           *ImmDelta = -1;
4966           return true;
4967         } else if (static_cast<uint64_t>(ImmValue) ==
4968                    static_cast<uint64_t>(OIValue) + 1) {
4969           *ImmDelta = 1;
4970           return true;
4971         } else {
4972           return false;
4973         }
4974       }
4975     }
4976     return FlagI.isIdenticalTo(OI);
4977   }
4978   default:
4979     return false;
4980   }
4981 }
4982 
4983 /// Check whether the definition can be converted
4984 /// to remove a comparison against zero.
isDefConvertible(const MachineInstr & MI,bool & NoSignFlag,bool & ClearsOverflowFlag)4985 inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
4986                                     bool &ClearsOverflowFlag) {
4987   NoSignFlag = false;
4988   ClearsOverflowFlag = false;
4989 
4990   // "ELF Handling for Thread-Local Storage" specifies that x86-64 GOTTPOFF, and
4991   // i386 GOTNTPOFF/INDNTPOFF relocations can convert an ADD to a LEA during
4992   // Initial Exec to Local Exec relaxation. In these cases, we must not depend
4993   // on the EFLAGS modification of ADD actually happening in the final binary.
4994   if (MI.getOpcode() == X86::ADD64rm || MI.getOpcode() == X86::ADD32rm) {
4995     unsigned Flags = MI.getOperand(5).getTargetFlags();
4996     if (Flags == X86II::MO_GOTTPOFF || Flags == X86II::MO_INDNTPOFF ||
4997         Flags == X86II::MO_GOTNTPOFF)
4998       return false;
4999   }
5000 
5001   switch (MI.getOpcode()) {
5002   default:
5003     return false;
5004 
5005   // The shift instructions only modify ZF if their shift count is non-zero.
5006   // N.B.: The processor truncates the shift count depending on the encoding.
5007   CASE_ND(SAR8ri)
5008   CASE_ND(SAR16ri)
5009   CASE_ND(SAR32ri)
5010   CASE_ND(SAR64ri)
5011   CASE_ND(SHR8ri)
5012   CASE_ND(SHR16ri)
5013   CASE_ND(SHR32ri)
5014   CASE_ND(SHR64ri)
5015     return getTruncatedShiftCount(MI, 2) != 0;
5016 
5017   // Some left shift instructions can be turned into LEA instructions but only
5018   // if their flags aren't used. Avoid transforming such instructions.
5019   CASE_ND(SHL8ri)
5020   CASE_ND(SHL16ri)
5021   CASE_ND(SHL32ri)
5022   CASE_ND(SHL64ri) {
5023     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
5024     if (isTruncatedShiftCountForLEA(ShAmt))
5025       return false;
5026     return ShAmt != 0;
5027   }
5028 
5029   CASE_ND(SHRD16rri8)
5030   CASE_ND(SHRD32rri8)
5031   CASE_ND(SHRD64rri8)
5032   CASE_ND(SHLD16rri8)
5033   CASE_ND(SHLD32rri8)
5034   CASE_ND(SHLD64rri8)
5035     return getTruncatedShiftCount(MI, 3) != 0;
5036 
5037   CASE_ND(SUB64ri32)
5038   CASE_ND(SUB32ri)
5039   CASE_ND(SUB16ri)
5040   CASE_ND(SUB8ri)
5041   CASE_ND(SUB64rr)
5042   CASE_ND(SUB32rr)
5043   CASE_ND(SUB16rr)
5044   CASE_ND(SUB8rr)
5045   CASE_ND(SUB64rm)
5046   CASE_ND(SUB32rm)
5047   CASE_ND(SUB16rm)
5048   CASE_ND(SUB8rm)
5049   CASE_ND(DEC64r)
5050   CASE_ND(DEC32r)
5051   CASE_ND(DEC16r)
5052   CASE_ND(DEC8r)
5053   CASE_ND(ADD64ri32)
5054   CASE_ND(ADD32ri)
5055   CASE_ND(ADD16ri)
5056   CASE_ND(ADD8ri)
5057   CASE_ND(ADD64rr)
5058   CASE_ND(ADD32rr)
5059   CASE_ND(ADD16rr)
5060   CASE_ND(ADD8rr)
5061   CASE_ND(ADD64rm)
5062   CASE_ND(ADD32rm)
5063   CASE_ND(ADD16rm)
5064   CASE_ND(ADD8rm)
5065   CASE_ND(INC64r)
5066   CASE_ND(INC32r)
5067   CASE_ND(INC16r)
5068   CASE_ND(INC8r)
5069   CASE_ND(ADC64ri32)
5070   CASE_ND(ADC32ri)
5071   CASE_ND(ADC16ri)
5072   CASE_ND(ADC8ri)
5073   CASE_ND(ADC64rr)
5074   CASE_ND(ADC32rr)
5075   CASE_ND(ADC16rr)
5076   CASE_ND(ADC8rr)
5077   CASE_ND(ADC64rm)
5078   CASE_ND(ADC32rm)
5079   CASE_ND(ADC16rm)
5080   CASE_ND(ADC8rm)
5081   CASE_ND(SBB64ri32)
5082   CASE_ND(SBB32ri)
5083   CASE_ND(SBB16ri)
5084   CASE_ND(SBB8ri)
5085   CASE_ND(SBB64rr)
5086   CASE_ND(SBB32rr)
5087   CASE_ND(SBB16rr)
5088   CASE_ND(SBB8rr)
5089   CASE_ND(SBB64rm)
5090   CASE_ND(SBB32rm)
5091   CASE_ND(SBB16rm)
5092   CASE_ND(SBB8rm)
5093   CASE_ND(NEG8r)
5094   CASE_ND(NEG16r)
5095   CASE_ND(NEG32r)
5096   CASE_ND(NEG64r)
5097   case X86::LZCNT16rr:
5098   case X86::LZCNT16rm:
5099   case X86::LZCNT32rr:
5100   case X86::LZCNT32rm:
5101   case X86::LZCNT64rr:
5102   case X86::LZCNT64rm:
5103   case X86::POPCNT16rr:
5104   case X86::POPCNT16rm:
5105   case X86::POPCNT32rr:
5106   case X86::POPCNT32rm:
5107   case X86::POPCNT64rr:
5108   case X86::POPCNT64rm:
5109   case X86::TZCNT16rr:
5110   case X86::TZCNT16rm:
5111   case X86::TZCNT32rr:
5112   case X86::TZCNT32rm:
5113   case X86::TZCNT64rr:
5114   case X86::TZCNT64rm:
5115     return true;
5116   CASE_ND(AND64ri32)
5117   CASE_ND(AND32ri)
5118   CASE_ND(AND16ri)
5119   CASE_ND(AND8ri)
5120   CASE_ND(AND64rr)
5121   CASE_ND(AND32rr)
5122   CASE_ND(AND16rr)
5123   CASE_ND(AND8rr)
5124   CASE_ND(AND64rm)
5125   CASE_ND(AND32rm)
5126   CASE_ND(AND16rm)
5127   CASE_ND(AND8rm)
5128   CASE_ND(XOR64ri32)
5129   CASE_ND(XOR32ri)
5130   CASE_ND(XOR16ri)
5131   CASE_ND(XOR8ri)
5132   CASE_ND(XOR64rr)
5133   CASE_ND(XOR32rr)
5134   CASE_ND(XOR16rr)
5135   CASE_ND(XOR8rr)
5136   CASE_ND(XOR64rm)
5137   CASE_ND(XOR32rm)
5138   CASE_ND(XOR16rm)
5139   CASE_ND(XOR8rm)
5140   CASE_ND(OR64ri32)
5141   CASE_ND(OR32ri)
5142   CASE_ND(OR16ri)
5143   CASE_ND(OR8ri)
5144   CASE_ND(OR64rr)
5145   CASE_ND(OR32rr)
5146   CASE_ND(OR16rr)
5147   CASE_ND(OR8rr)
5148   CASE_ND(OR64rm)
5149   CASE_ND(OR32rm)
5150   CASE_ND(OR16rm)
5151   CASE_ND(OR8rm)
5152   case X86::ANDN32rr:
5153   case X86::ANDN32rm:
5154   case X86::ANDN64rr:
5155   case X86::ANDN64rm:
5156   case X86::BLSI32rr:
5157   case X86::BLSI32rm:
5158   case X86::BLSI64rr:
5159   case X86::BLSI64rm:
5160   case X86::BLSMSK32rr:
5161   case X86::BLSMSK32rm:
5162   case X86::BLSMSK64rr:
5163   case X86::BLSMSK64rm:
5164   case X86::BLSR32rr:
5165   case X86::BLSR32rm:
5166   case X86::BLSR64rr:
5167   case X86::BLSR64rm:
5168   case X86::BLCFILL32rr:
5169   case X86::BLCFILL32rm:
5170   case X86::BLCFILL64rr:
5171   case X86::BLCFILL64rm:
5172   case X86::BLCI32rr:
5173   case X86::BLCI32rm:
5174   case X86::BLCI64rr:
5175   case X86::BLCI64rm:
5176   case X86::BLCIC32rr:
5177   case X86::BLCIC32rm:
5178   case X86::BLCIC64rr:
5179   case X86::BLCIC64rm:
5180   case X86::BLCMSK32rr:
5181   case X86::BLCMSK32rm:
5182   case X86::BLCMSK64rr:
5183   case X86::BLCMSK64rm:
5184   case X86::BLCS32rr:
5185   case X86::BLCS32rm:
5186   case X86::BLCS64rr:
5187   case X86::BLCS64rm:
5188   case X86::BLSFILL32rr:
5189   case X86::BLSFILL32rm:
5190   case X86::BLSFILL64rr:
5191   case X86::BLSFILL64rm:
5192   case X86::BLSIC32rr:
5193   case X86::BLSIC32rm:
5194   case X86::BLSIC64rr:
5195   case X86::BLSIC64rm:
5196   case X86::BZHI32rr:
5197   case X86::BZHI32rm:
5198   case X86::BZHI64rr:
5199   case X86::BZHI64rm:
5200   case X86::T1MSKC32rr:
5201   case X86::T1MSKC32rm:
5202   case X86::T1MSKC64rr:
5203   case X86::T1MSKC64rm:
5204   case X86::TZMSK32rr:
5205   case X86::TZMSK32rm:
5206   case X86::TZMSK64rr:
5207   case X86::TZMSK64rm:
5208     // These instructions clear the overflow flag just like TEST.
5209     // FIXME: These are not the only instructions in this switch that clear the
5210     // overflow flag.
5211     ClearsOverflowFlag = true;
5212     return true;
5213   case X86::BEXTR32rr:
5214   case X86::BEXTR64rr:
5215   case X86::BEXTR32rm:
5216   case X86::BEXTR64rm:
5217   case X86::BEXTRI32ri:
5218   case X86::BEXTRI32mi:
5219   case X86::BEXTRI64ri:
5220   case X86::BEXTRI64mi:
5221     // BEXTR doesn't update the sign flag so we can't use it. It does clear
5222     // the overflow flag, but that's not useful without the sign flag.
5223     NoSignFlag = true;
5224     return true;
5225   }
5226 }
5227 
5228 /// Check whether the use can be converted to remove a comparison against zero.
5229 /// Returns the EFLAGS condition and the operand that we are comparing against zero.
isUseDefConvertible(const MachineInstr & MI)5230 static std::pair<X86::CondCode, unsigned> isUseDefConvertible(const MachineInstr &MI) {
5231   switch (MI.getOpcode()) {
5232   default:
5233     return std::make_pair(X86::COND_INVALID, ~0U);
5234   CASE_ND(NEG8r)
5235   CASE_ND(NEG16r)
5236   CASE_ND(NEG32r)
5237   CASE_ND(NEG64r)
5238     return std::make_pair(X86::COND_AE, 1U);
5239   case X86::LZCNT16rr:
5240   case X86::LZCNT32rr:
5241   case X86::LZCNT64rr:
5242     return std::make_pair(X86::COND_B, 1U);
5243   case X86::POPCNT16rr:
5244   case X86::POPCNT32rr:
5245   case X86::POPCNT64rr:
5246     return std::make_pair(X86::COND_E, 1U);
5247   case X86::TZCNT16rr:
5248   case X86::TZCNT32rr:
5249   case X86::TZCNT64rr:
5250     return std::make_pair(X86::COND_B, 1U);
5251   case X86::BSF16rr:
5252   case X86::BSF32rr:
5253   case X86::BSF64rr:
5254   case X86::BSR16rr:
5255   case X86::BSR32rr:
5256   case X86::BSR64rr:
5257     return std::make_pair(X86::COND_E, 2U);
5258   case X86::BLSI32rr:
5259   case X86::BLSI64rr:
5260     return std::make_pair(X86::COND_AE, 1U);
5261   case X86::BLSR32rr:
5262   case X86::BLSR64rr:
5263   case X86::BLSMSK32rr:
5264   case X86::BLSMSK64rr:
5265     return std::make_pair(X86::COND_B, 1U);
5266     // TODO: TBM instructions.
5267   }
5268 }
5269 
5270 /// Check if there exists an earlier instruction that
5271 /// operates on the same source operands and sets flags in the same way as
5272 /// Compare; remove Compare if possible.
optimizeCompareInstr(MachineInstr & CmpInstr,Register SrcReg,Register SrcReg2,int64_t CmpMask,int64_t CmpValue,const MachineRegisterInfo * MRI) const5273 bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
5274                                         Register SrcReg2, int64_t CmpMask,
5275                                         int64_t CmpValue,
5276                                         const MachineRegisterInfo *MRI) const {
5277   // Check whether we can replace SUB with CMP.
5278   switch (CmpInstr.getOpcode()) {
5279   default:
5280     break;
5281   CASE_ND(SUB64ri32)
5282   CASE_ND(SUB32ri)
5283   CASE_ND(SUB16ri)
5284   CASE_ND(SUB8ri)
5285   CASE_ND(SUB64rm)
5286   CASE_ND(SUB32rm)
5287   CASE_ND(SUB16rm)
5288   CASE_ND(SUB8rm)
5289   CASE_ND(SUB64rr)
5290   CASE_ND(SUB32rr)
5291   CASE_ND(SUB16rr)
5292   CASE_ND(SUB8rr) {
5293     if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
5294       return false;
5295     // There is no use of the destination register, we can replace SUB with CMP.
5296     unsigned NewOpcode = 0;
5297 #define FROM_TO(A, B)                                                          \
5298   CASE_ND(A) NewOpcode = X86::B;                                               \
5299   break;
5300     switch (CmpInstr.getOpcode()) {
5301     default:
5302       llvm_unreachable("Unreachable!");
5303     FROM_TO(SUB64rm, CMP64rm)
5304     FROM_TO(SUB32rm, CMP32rm)
5305     FROM_TO(SUB16rm, CMP16rm)
5306     FROM_TO(SUB8rm, CMP8rm)
5307     FROM_TO(SUB64rr, CMP64rr)
5308     FROM_TO(SUB32rr, CMP32rr)
5309     FROM_TO(SUB16rr, CMP16rr)
5310     FROM_TO(SUB8rr, CMP8rr)
5311     FROM_TO(SUB64ri32, CMP64ri32)
5312     FROM_TO(SUB32ri, CMP32ri)
5313     FROM_TO(SUB16ri, CMP16ri)
5314     FROM_TO(SUB8ri, CMP8ri)
5315     }
5316 #undef FROM_TO
5317     CmpInstr.setDesc(get(NewOpcode));
5318     CmpInstr.removeOperand(0);
5319     // Mutating this instruction invalidates any debug data associated with it.
5320     CmpInstr.dropDebugNumber();
5321     // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
5322     if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
5323         NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
5324       return false;
5325   }
5326   }
5327 
5328   // The following code tries to remove the comparison by re-using EFLAGS
5329   // from earlier instructions.
5330 
5331   bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
5332 
5333   // Transformation currently requires SSA values.
5334   if (SrcReg2.isPhysical())
5335     return false;
5336   MachineInstr *SrcRegDef = MRI->getVRegDef(SrcReg);
5337   assert(SrcRegDef && "Must have a definition (SSA)");
5338 
5339   MachineInstr *MI = nullptr;
5340   MachineInstr *Sub = nullptr;
5341   MachineInstr *Movr0Inst = nullptr;
5342   SmallVector<std::pair<MachineInstr *, unsigned>, 4> InstsToUpdate;
5343   bool NoSignFlag = false;
5344   bool ClearsOverflowFlag = false;
5345   bool ShouldUpdateCC = false;
5346   bool IsSwapped = false;
5347   bool HasNF = Subtarget.hasNF();
5348   unsigned OpNo = 0;
5349   X86::CondCode NewCC = X86::COND_INVALID;
5350   int64_t ImmDelta = 0;
5351 
5352   // Search backward from CmpInstr for the next instruction defining EFLAGS.
5353   const TargetRegisterInfo *TRI = &getRegisterInfo();
5354   MachineBasicBlock &CmpMBB = *CmpInstr.getParent();
5355   MachineBasicBlock::reverse_iterator From =
5356       std::next(MachineBasicBlock::reverse_iterator(CmpInstr));
5357   for (MachineBasicBlock *MBB = &CmpMBB;;) {
5358     for (MachineInstr &Inst : make_range(From, MBB->rend())) {
5359       // Try to use EFLAGS from the instruction defining %SrcReg. Example:
5360       //     %eax = addl ...
5361       //     ...                // EFLAGS not changed
5362       //     testl %eax, %eax   // <-- can be removed
5363       if (&Inst == SrcRegDef) {
5364         if (IsCmpZero &&
5365             isDefConvertible(Inst, NoSignFlag, ClearsOverflowFlag)) {
5366           MI = &Inst;
5367           break;
5368         }
5369 
5370         // Look back for the following pattern, in which case the
5371         // test16rr/test64rr instruction could be erased.
5372         //
5373         // Example for test16rr:
5374         //  %reg = and32ri %in_reg, 5
5375         //  ...                         // EFLAGS not changed.
5376         //  %src_reg = copy %reg.sub_16bit:gr32
5377         //  test16rr %src_reg, %src_reg, implicit-def $eflags
5378         // Example for test64rr:
5379         //  %reg = and32ri %in_reg, 5
5380         //  ...                         // EFLAGS not changed.
5381         //  %src_reg = subreg_to_reg 0, %reg, %subreg.sub_index
5382         //  test64rr %src_reg, %src_reg, implicit-def $eflags
5383         MachineInstr *AndInstr = nullptr;
5384         if (IsCmpZero &&
5385             findRedundantFlagInstr(CmpInstr, Inst, MRI, &AndInstr, TRI,
5386                                    Subtarget, NoSignFlag, ClearsOverflowFlag)) {
5387           assert(AndInstr != nullptr && X86::isAND(AndInstr->getOpcode()));
5388           MI = AndInstr;
5389           break;
5390         }
5391         // Cannot find other candidates before definition of SrcReg.
5392         return false;
5393       }
5394 
5395       if (Inst.modifiesRegister(X86::EFLAGS, TRI)) {
5396         // Try to use EFLAGS produced by an instruction reading %SrcReg.
5397         // Example:
5398         //      %eax = ...
5399         //      ...
5400         //      popcntl %eax
5401         //      ...                 // EFLAGS not changed
5402         //      testl %eax, %eax    // <-- can be removed
5403         if (IsCmpZero) {
5404           std::tie(NewCC, OpNo) = isUseDefConvertible(Inst);
5405           if (NewCC != X86::COND_INVALID && Inst.getOperand(OpNo).isReg() &&
5406               Inst.getOperand(OpNo).getReg() == SrcReg) {
5407             ShouldUpdateCC = true;
5408             MI = &Inst;
5409             break;
5410           }
5411         }
5412 
5413         // Try to use EFLAGS from an instruction with similar flag results.
5414         // Example:
5415         //     sub x, y  or  cmp x, y
5416         //     ...           // EFLAGS not changed
5417         //     cmp x, y      // <-- can be removed
5418         if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
5419                                  Inst, &IsSwapped, &ImmDelta)) {
5420           Sub = &Inst;
5421           break;
5422         }
5423 
5424         // MOV32r0 is implemented with xor which clobbers condition code. It is
5425         // safe to move up, if the definition to EFLAGS is dead and earlier
5426         // instructions do not read or write EFLAGS.
5427         if (!Movr0Inst && Inst.getOpcode() == X86::MOV32r0 &&
5428             Inst.registerDefIsDead(X86::EFLAGS, TRI)) {
5429           Movr0Inst = &Inst;
5430           continue;
5431         }
5432 
5433         // For the instructions are ADDrm/ADDmr with relocation, we'll skip the
5434         // optimization for replacing non-NF with NF. This is to keep backward
5435         // compatiblity with old version of linkers without APX relocation type
5436         // support on Linux OS.
5437         bool IsWithReloc = X86EnableAPXForRelocation
5438                                ? false
5439                                : isAddMemInstrWithRelocation(Inst);
5440 
5441         // Try to replace non-NF with NF instructions.
5442         if (HasNF && Inst.registerDefIsDead(X86::EFLAGS, TRI) && !IsWithReloc) {
5443           unsigned NewOp = X86::getNFVariant(Inst.getOpcode());
5444           if (!NewOp)
5445             return false;
5446 
5447           InstsToUpdate.push_back(std::make_pair(&Inst, NewOp));
5448           continue;
5449         }
5450 
5451         // Cannot do anything for any other EFLAG changes.
5452         return false;
5453       }
5454     }
5455 
5456     if (MI || Sub)
5457       break;
5458 
5459     // Reached begin of basic block. Continue in predecessor if there is
5460     // exactly one.
5461     if (MBB->pred_size() != 1)
5462       return false;
5463     MBB = *MBB->pred_begin();
5464     From = MBB->rbegin();
5465   }
5466 
5467   // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
5468   // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
5469   // If we are done with the basic block, we need to check whether EFLAGS is
5470   // live-out.
5471   bool FlagsMayLiveOut = true;
5472   SmallVector<std::pair<MachineInstr *, X86::CondCode>, 4> OpsToUpdate;
5473   MachineBasicBlock::iterator AfterCmpInstr =
5474       std::next(MachineBasicBlock::iterator(CmpInstr));
5475   for (MachineInstr &Instr : make_range(AfterCmpInstr, CmpMBB.end())) {
5476     bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
5477     bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
5478     // We should check the usage if this instruction uses and updates EFLAGS.
5479     if (!UseEFLAGS && ModifyEFLAGS) {
5480       // It is safe to remove CmpInstr if EFLAGS is updated again.
5481       FlagsMayLiveOut = false;
5482       break;
5483     }
5484     if (!UseEFLAGS && !ModifyEFLAGS)
5485       continue;
5486 
5487     // EFLAGS is used by this instruction.
5488     X86::CondCode OldCC = X86::getCondFromMI(Instr);
5489     if ((MI || IsSwapped || ImmDelta != 0) && OldCC == X86::COND_INVALID)
5490       return false;
5491 
5492     X86::CondCode ReplacementCC = X86::COND_INVALID;
5493     if (MI) {
5494       switch (OldCC) {
5495       default:
5496         break;
5497       case X86::COND_A:
5498       case X86::COND_AE:
5499       case X86::COND_B:
5500       case X86::COND_BE:
5501         // CF is used, we can't perform this optimization.
5502         return false;
5503       case X86::COND_G:
5504       case X86::COND_GE:
5505       case X86::COND_L:
5506       case X86::COND_LE:
5507         // If SF is used, but the instruction doesn't update the SF, then we
5508         // can't do the optimization.
5509         if (NoSignFlag)
5510           return false;
5511         [[fallthrough]];
5512       case X86::COND_O:
5513       case X86::COND_NO:
5514         // If OF is used, the instruction needs to clear it like CmpZero does.
5515         if (!ClearsOverflowFlag)
5516           return false;
5517         break;
5518       case X86::COND_S:
5519       case X86::COND_NS:
5520         // If SF is used, but the instruction doesn't update the SF, then we
5521         // can't do the optimization.
5522         if (NoSignFlag)
5523           return false;
5524         break;
5525       }
5526 
5527       // If we're updating the condition code check if we have to reverse the
5528       // condition.
5529       if (ShouldUpdateCC)
5530         switch (OldCC) {
5531         default:
5532           return false;
5533         case X86::COND_E:
5534           ReplacementCC = NewCC;
5535           break;
5536         case X86::COND_NE:
5537           ReplacementCC = GetOppositeBranchCondition(NewCC);
5538           break;
5539         }
5540     } else if (IsSwapped) {
5541       // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
5542       // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
5543       // We swap the condition code and synthesize the new opcode.
5544       ReplacementCC = getSwappedCondition(OldCC);
5545       if (ReplacementCC == X86::COND_INVALID)
5546         return false;
5547       ShouldUpdateCC = true;
5548     } else if (ImmDelta != 0) {
5549       unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg));
5550       // Shift amount for min/max constants to adjust for 8/16/32 instruction
5551       // sizes.
5552       switch (OldCC) {
5553       case X86::COND_L: // x <s (C + 1)  -->  x <=s C
5554         if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
5555           return false;
5556         ReplacementCC = X86::COND_LE;
5557         break;
5558       case X86::COND_B: // x <u (C + 1)  -->  x <=u C
5559         if (ImmDelta != 1 || CmpValue == 0)
5560           return false;
5561         ReplacementCC = X86::COND_BE;
5562         break;
5563       case X86::COND_GE: // x >=s (C + 1)  -->  x >s C
5564         if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
5565           return false;
5566         ReplacementCC = X86::COND_G;
5567         break;
5568       case X86::COND_AE: // x >=u (C + 1)  -->  x >u C
5569         if (ImmDelta != 1 || CmpValue == 0)
5570           return false;
5571         ReplacementCC = X86::COND_A;
5572         break;
5573       case X86::COND_G: // x >s (C - 1)  -->  x >=s C
5574         if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
5575           return false;
5576         ReplacementCC = X86::COND_GE;
5577         break;
5578       case X86::COND_A: // x >u (C - 1)  -->  x >=u C
5579         if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
5580           return false;
5581         ReplacementCC = X86::COND_AE;
5582         break;
5583       case X86::COND_LE: // x <=s (C - 1)  -->  x <s C
5584         if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
5585           return false;
5586         ReplacementCC = X86::COND_L;
5587         break;
5588       case X86::COND_BE: // x <=u (C - 1)  -->  x <u C
5589         if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
5590           return false;
5591         ReplacementCC = X86::COND_B;
5592         break;
5593       default:
5594         return false;
5595       }
5596       ShouldUpdateCC = true;
5597     }
5598 
5599     if (ShouldUpdateCC && ReplacementCC != OldCC) {
5600       // Push the MachineInstr to OpsToUpdate.
5601       // If it is safe to remove CmpInstr, the condition code of these
5602       // instructions will be modified.
5603       OpsToUpdate.push_back(std::make_pair(&Instr, ReplacementCC));
5604     }
5605     if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
5606       // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
5607       FlagsMayLiveOut = false;
5608       break;
5609     }
5610   }
5611 
5612   // If we have to update users but EFLAGS is live-out abort, since we cannot
5613   // easily find all of the users.
5614   if ((MI != nullptr || ShouldUpdateCC) && FlagsMayLiveOut) {
5615     for (MachineBasicBlock *Successor : CmpMBB.successors())
5616       if (Successor->isLiveIn(X86::EFLAGS))
5617         return false;
5618   }
5619 
5620   // The instruction to be updated is either Sub or MI.
5621   assert((MI == nullptr || Sub == nullptr) && "Should not have Sub and MI set");
5622   Sub = MI != nullptr ? MI : Sub;
5623   MachineBasicBlock *SubBB = Sub->getParent();
5624   // Move Movr0Inst to the appropriate place before Sub.
5625   if (Movr0Inst) {
5626     // Only move within the same block so we don't accidentally move to a
5627     // block with higher execution frequency.
5628     if (&CmpMBB != SubBB)
5629       return false;
5630     // Look backwards until we find a def that doesn't use the current EFLAGS.
5631     MachineBasicBlock::reverse_iterator InsertI = Sub,
5632                                         InsertE = Sub->getParent()->rend();
5633     for (; InsertI != InsertE; ++InsertI) {
5634       MachineInstr *Instr = &*InsertI;
5635       if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
5636           Instr->modifiesRegister(X86::EFLAGS, TRI)) {
5637         Movr0Inst->getParent()->remove(Movr0Inst);
5638         Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
5639                                    Movr0Inst);
5640         break;
5641       }
5642     }
5643     if (InsertI == InsertE)
5644       return false;
5645   }
5646 
5647   // Replace non-NF with NF instructions.
5648   for (auto &Inst : InstsToUpdate) {
5649     Inst.first->setDesc(get(Inst.second));
5650     Inst.first->removeOperand(
5651         Inst.first->findRegisterDefOperandIdx(X86::EFLAGS, /*TRI=*/nullptr));
5652   }
5653 
5654   // Make sure Sub instruction defines EFLAGS and mark the def live.
5655   MachineOperand *FlagDef =
5656       Sub->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
5657   assert(FlagDef && "Unable to locate a def EFLAGS operand");
5658   FlagDef->setIsDead(false);
5659 
5660   CmpInstr.eraseFromParent();
5661 
5662   // Modify the condition code of instructions in OpsToUpdate.
5663   for (auto &Op : OpsToUpdate) {
5664     Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
5665         .setImm(Op.second);
5666   }
5667   // Add EFLAGS to block live-ins between CmpBB and block of flags producer.
5668   for (MachineBasicBlock *MBB = &CmpMBB; MBB != SubBB;
5669        MBB = *MBB->pred_begin()) {
5670     assert(MBB->pred_size() == 1 && "Expected exactly one predecessor");
5671     if (!MBB->isLiveIn(X86::EFLAGS))
5672       MBB->addLiveIn(X86::EFLAGS);
5673   }
5674   return true;
5675 }
5676 
5677 /// \returns true if the instruction can be changed to COPY when imm is 0.
canConvert2Copy(unsigned Opc)5678 static bool canConvert2Copy(unsigned Opc) {
5679   switch (Opc) {
5680   default:
5681     return false;
5682   CASE_ND(ADD64ri32)
5683   CASE_ND(SUB64ri32)
5684   CASE_ND(OR64ri32)
5685   CASE_ND(XOR64ri32)
5686   CASE_ND(ADD32ri)
5687   CASE_ND(SUB32ri)
5688   CASE_ND(OR32ri)
5689   CASE_ND(XOR32ri)
5690     return true;
5691   }
5692 }
5693 
5694 /// Convert an ALUrr opcode to corresponding ALUri opcode. Such as
5695 ///     ADD32rr  ==>  ADD32ri
convertALUrr2ALUri(unsigned Opc)5696 static unsigned convertALUrr2ALUri(unsigned Opc) {
5697   switch (Opc) {
5698   default:
5699     return 0;
5700 #define FROM_TO(FROM, TO)                                                      \
5701   case X86::FROM:                                                              \
5702     return X86::TO;                                                            \
5703   case X86::FROM##_ND:                                                         \
5704     return X86::TO##_ND;
5705     FROM_TO(ADD64rr, ADD64ri32)
5706     FROM_TO(ADC64rr, ADC64ri32)
5707     FROM_TO(SUB64rr, SUB64ri32)
5708     FROM_TO(SBB64rr, SBB64ri32)
5709     FROM_TO(AND64rr, AND64ri32)
5710     FROM_TO(OR64rr, OR64ri32)
5711     FROM_TO(XOR64rr, XOR64ri32)
5712     FROM_TO(SHR64rCL, SHR64ri)
5713     FROM_TO(SHL64rCL, SHL64ri)
5714     FROM_TO(SAR64rCL, SAR64ri)
5715     FROM_TO(ROL64rCL, ROL64ri)
5716     FROM_TO(ROR64rCL, ROR64ri)
5717     FROM_TO(RCL64rCL, RCL64ri)
5718     FROM_TO(RCR64rCL, RCR64ri)
5719     FROM_TO(ADD32rr, ADD32ri)
5720     FROM_TO(ADC32rr, ADC32ri)
5721     FROM_TO(SUB32rr, SUB32ri)
5722     FROM_TO(SBB32rr, SBB32ri)
5723     FROM_TO(AND32rr, AND32ri)
5724     FROM_TO(OR32rr, OR32ri)
5725     FROM_TO(XOR32rr, XOR32ri)
5726     FROM_TO(SHR32rCL, SHR32ri)
5727     FROM_TO(SHL32rCL, SHL32ri)
5728     FROM_TO(SAR32rCL, SAR32ri)
5729     FROM_TO(ROL32rCL, ROL32ri)
5730     FROM_TO(ROR32rCL, ROR32ri)
5731     FROM_TO(RCL32rCL, RCL32ri)
5732     FROM_TO(RCR32rCL, RCR32ri)
5733 #undef FROM_TO
5734 #define FROM_TO(FROM, TO)                                                      \
5735   case X86::FROM:                                                              \
5736     return X86::TO;
5737     FROM_TO(TEST64rr, TEST64ri32)
5738     FROM_TO(CTEST64rr, CTEST64ri32)
5739     FROM_TO(CMP64rr, CMP64ri32)
5740     FROM_TO(CCMP64rr, CCMP64ri32)
5741     FROM_TO(TEST32rr, TEST32ri)
5742     FROM_TO(CTEST32rr, CTEST32ri)
5743     FROM_TO(CMP32rr, CMP32ri)
5744     FROM_TO(CCMP32rr, CCMP32ri)
5745 #undef FROM_TO
5746   }
5747 }
5748 
5749 /// Reg is assigned ImmVal in DefMI, and is used in UseMI.
5750 /// If MakeChange is true, this function tries to replace Reg by ImmVal in
5751 /// UseMI. If MakeChange is false, just check if folding is possible.
5752 //
5753 /// \returns true if folding is successful or possible.
foldImmediateImpl(MachineInstr & UseMI,MachineInstr * DefMI,Register Reg,int64_t ImmVal,MachineRegisterInfo * MRI,bool MakeChange) const5754 bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
5755                                      Register Reg, int64_t ImmVal,
5756                                      MachineRegisterInfo *MRI,
5757                                      bool MakeChange) const {
5758   bool Modified = false;
5759 
5760   // 64 bit operations accept sign extended 32 bit immediates.
5761   // 32 bit operations accept all 32 bit immediates, so we don't need to check
5762   // them.
5763   const TargetRegisterClass *RC = nullptr;
5764   if (Reg.isVirtual())
5765     RC = MRI->getRegClass(Reg);
5766   if ((Reg.isPhysical() && X86::GR64RegClass.contains(Reg)) ||
5767       (Reg.isVirtual() && X86::GR64RegClass.hasSubClassEq(RC))) {
5768     if (!isInt<32>(ImmVal))
5769       return false;
5770   }
5771 
5772   if (UseMI.findRegisterUseOperand(Reg, /*TRI=*/nullptr)->getSubReg())
5773     return false;
5774   // Immediate has larger code size than register. So avoid folding the
5775   // immediate if it has more than 1 use and we are optimizing for size.
5776   if (UseMI.getMF()->getFunction().hasOptSize() && Reg.isVirtual() &&
5777       !MRI->hasOneNonDBGUse(Reg))
5778     return false;
5779 
5780   unsigned Opc = UseMI.getOpcode();
5781   unsigned NewOpc;
5782   if (Opc == TargetOpcode::COPY) {
5783     Register ToReg = UseMI.getOperand(0).getReg();
5784     const TargetRegisterClass *RC = nullptr;
5785     if (ToReg.isVirtual())
5786       RC = MRI->getRegClass(ToReg);
5787     bool GR32Reg = (ToReg.isVirtual() && X86::GR32RegClass.hasSubClassEq(RC)) ||
5788                    (ToReg.isPhysical() && X86::GR32RegClass.contains(ToReg));
5789     bool GR64Reg = (ToReg.isVirtual() && X86::GR64RegClass.hasSubClassEq(RC)) ||
5790                    (ToReg.isPhysical() && X86::GR64RegClass.contains(ToReg));
5791     bool GR8Reg = (ToReg.isVirtual() && X86::GR8RegClass.hasSubClassEq(RC)) ||
5792                   (ToReg.isPhysical() && X86::GR8RegClass.contains(ToReg));
5793 
5794     if (ImmVal == 0) {
5795       // We have MOV32r0 only.
5796       if (!GR32Reg)
5797         return false;
5798     }
5799 
5800     if (GR64Reg) {
5801       if (isUInt<32>(ImmVal))
5802         NewOpc = X86::MOV32ri64;
5803       else
5804         NewOpc = X86::MOV64ri;
5805     } else if (GR32Reg) {
5806       NewOpc = X86::MOV32ri;
5807       if (ImmVal == 0) {
5808         // MOV32r0 clobbers EFLAGS.
5809         const TargetRegisterInfo *TRI = &getRegisterInfo();
5810         if (UseMI.getParent()->computeRegisterLiveness(
5811                 TRI, X86::EFLAGS, UseMI) != MachineBasicBlock::LQR_Dead)
5812           return false;
5813 
5814         // MOV32r0 is different than other cases because it doesn't encode the
5815         // immediate in the instruction. So we directly modify it here.
5816         if (!MakeChange)
5817           return true;
5818         UseMI.setDesc(get(X86::MOV32r0));
5819         UseMI.removeOperand(
5820             UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr));
5821         UseMI.addOperand(MachineOperand::CreateReg(X86::EFLAGS, /*isDef=*/true,
5822                                                    /*isImp=*/true,
5823                                                    /*isKill=*/false,
5824                                                    /*isDead=*/true));
5825         Modified = true;
5826       }
5827     } else if (GR8Reg)
5828       NewOpc = X86::MOV8ri;
5829     else
5830       return false;
5831   } else
5832     NewOpc = convertALUrr2ALUri(Opc);
5833 
5834   if (!NewOpc)
5835     return false;
5836 
5837   // For SUB instructions the immediate can only be the second source operand.
5838   if ((NewOpc == X86::SUB64ri32 || NewOpc == X86::SUB32ri ||
5839        NewOpc == X86::SBB64ri32 || NewOpc == X86::SBB32ri ||
5840        NewOpc == X86::SUB64ri32_ND || NewOpc == X86::SUB32ri_ND ||
5841        NewOpc == X86::SBB64ri32_ND || NewOpc == X86::SBB32ri_ND) &&
5842       UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr) != 2)
5843     return false;
5844   // For CMP instructions the immediate can only be at index 1.
5845   if (((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) ||
5846        (NewOpc == X86::CCMP64ri32 || NewOpc == X86::CCMP32ri)) &&
5847       UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr) != 1)
5848     return false;
5849 
5850   using namespace X86;
5851   if (isSHL(Opc) || isSHR(Opc) || isSAR(Opc) || isROL(Opc) || isROR(Opc) ||
5852       isRCL(Opc) || isRCR(Opc)) {
5853     unsigned RegIdx = UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr);
5854     if (RegIdx < 2)
5855       return false;
5856     if (!isInt<8>(ImmVal))
5857       return false;
5858     assert(Reg == X86::CL);
5859 
5860     if (!MakeChange)
5861       return true;
5862     UseMI.setDesc(get(NewOpc));
5863     UseMI.removeOperand(RegIdx);
5864     UseMI.addOperand(MachineOperand::CreateImm(ImmVal));
5865     // Reg is physical register $cl, so we don't know if DefMI is dead through
5866     // MRI. Let the caller handle it, or pass dead-mi-elimination can delete
5867     // the dead physical register define instruction.
5868     return true;
5869   }
5870 
5871   if (!MakeChange)
5872     return true;
5873 
5874   if (!Modified) {
5875     // Modify the instruction.
5876     if (ImmVal == 0 && canConvert2Copy(NewOpc) &&
5877         UseMI.registerDefIsDead(X86::EFLAGS, /*TRI=*/nullptr)) {
5878       //          %100 = add %101, 0
5879       //    ==>
5880       //          %100 = COPY %101
5881       UseMI.setDesc(get(TargetOpcode::COPY));
5882       UseMI.removeOperand(
5883           UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr));
5884       UseMI.removeOperand(
5885           UseMI.findRegisterDefOperandIdx(X86::EFLAGS, /*TRI=*/nullptr));
5886       UseMI.untieRegOperand(0);
5887       UseMI.clearFlag(MachineInstr::MIFlag::NoSWrap);
5888       UseMI.clearFlag(MachineInstr::MIFlag::NoUWrap);
5889     } else {
5890       unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
5891       unsigned ImmOpNum = 2;
5892       if (!UseMI.getOperand(0).isDef()) {
5893         Op1 = 0; // TEST, CMP, CTEST, CCMP
5894         ImmOpNum = 1;
5895       }
5896       if (Opc == TargetOpcode::COPY)
5897         ImmOpNum = 1;
5898       if (findCommutedOpIndices(UseMI, Op1, Op2) &&
5899           UseMI.getOperand(Op1).getReg() == Reg)
5900         commuteInstruction(UseMI);
5901 
5902       assert(UseMI.getOperand(ImmOpNum).getReg() == Reg);
5903       UseMI.setDesc(get(NewOpc));
5904       UseMI.getOperand(ImmOpNum).ChangeToImmediate(ImmVal);
5905     }
5906   }
5907 
5908   if (Reg.isVirtual() && MRI->use_nodbg_empty(Reg))
5909     DefMI->eraseFromBundle();
5910 
5911   return true;
5912 }
5913 
5914 /// foldImmediate - 'Reg' is known to be defined by a move immediate
5915 /// instruction, try to fold the immediate into the use instruction.
foldImmediate(MachineInstr & UseMI,MachineInstr & DefMI,Register Reg,MachineRegisterInfo * MRI) const5916 bool X86InstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
5917                                  Register Reg, MachineRegisterInfo *MRI) const {
5918   int64_t ImmVal;
5919   if (!getConstValDefinedInReg(DefMI, Reg, ImmVal))
5920     return false;
5921 
5922   return foldImmediateImpl(UseMI, &DefMI, Reg, ImmVal, MRI, true);
5923 }
5924 
5925 /// Expand a single-def pseudo instruction to a two-addr
5926 /// instruction with two undef reads of the register being defined.
5927 /// This is used for mapping:
5928 ///   %xmm4 = V_SET0
5929 /// to:
5930 ///   %xmm4 = PXORrr undef %xmm4, undef %xmm4
5931 ///
Expand2AddrUndef(MachineInstrBuilder & MIB,const MCInstrDesc & Desc)5932 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
5933                              const MCInstrDesc &Desc) {
5934   assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
5935   Register Reg = MIB.getReg(0);
5936   MIB->setDesc(Desc);
5937 
5938   // MachineInstr::addOperand() will insert explicit operands before any
5939   // implicit operands.
5940   MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
5941   // But we don't trust that.
5942   assert(MIB.getReg(1) == Reg && MIB.getReg(2) == Reg && "Misplaced operand");
5943   return true;
5944 }
5945 
5946 /// Expand a single-def pseudo instruction to a two-addr
5947 /// instruction with two %k0 reads.
5948 /// This is used for mapping:
5949 ///   %k4 = K_SET1
5950 /// to:
5951 ///   %k4 = KXNORrr %k0, %k0
Expand2AddrKreg(MachineInstrBuilder & MIB,const MCInstrDesc & Desc,Register Reg)5952 static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
5953                             Register Reg) {
5954   assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
5955   MIB->setDesc(Desc);
5956   MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
5957   return true;
5958 }
5959 
expandMOV32r1(MachineInstrBuilder & MIB,const TargetInstrInfo & TII,bool MinusOne)5960 static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
5961                           bool MinusOne) {
5962   MachineBasicBlock &MBB = *MIB->getParent();
5963   const DebugLoc &DL = MIB->getDebugLoc();
5964   Register Reg = MIB.getReg(0);
5965 
5966   // Insert the XOR.
5967   BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
5968       .addReg(Reg, RegState::Undef)
5969       .addReg(Reg, RegState::Undef);
5970 
5971   // Turn the pseudo into an INC or DEC.
5972   MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
5973   MIB.addReg(Reg);
5974 
5975   return true;
5976 }
5977 
ExpandMOVImmSExti8(MachineInstrBuilder & MIB,const TargetInstrInfo & TII,const X86Subtarget & Subtarget)5978 static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
5979                                const TargetInstrInfo &TII,
5980                                const X86Subtarget &Subtarget) {
5981   MachineBasicBlock &MBB = *MIB->getParent();
5982   const DebugLoc &DL = MIB->getDebugLoc();
5983   int64_t Imm = MIB->getOperand(1).getImm();
5984   assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
5985   MachineBasicBlock::iterator I = MIB.getInstr();
5986 
5987   int StackAdjustment;
5988 
5989   if (Subtarget.is64Bit()) {
5990     assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
5991            MIB->getOpcode() == X86::MOV32ImmSExti8);
5992 
5993     // Can't use push/pop lowering if the function might write to the red zone.
5994     X86MachineFunctionInfo *X86FI =
5995         MBB.getParent()->getInfo<X86MachineFunctionInfo>();
5996     if (X86FI->getUsesRedZone()) {
5997       MIB->setDesc(TII.get(MIB->getOpcode() == X86::MOV32ImmSExti8
5998                                ? X86::MOV32ri
5999                                : X86::MOV64ri));
6000       return true;
6001     }
6002 
6003     // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
6004     // widen the register if necessary.
6005     StackAdjustment = 8;
6006     BuildMI(MBB, I, DL, TII.get(X86::PUSH64i32)).addImm(Imm);
6007     MIB->setDesc(TII.get(X86::POP64r));
6008     MIB->getOperand(0).setReg(getX86SubSuperRegister(MIB.getReg(0), 64));
6009   } else {
6010     assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
6011     StackAdjustment = 4;
6012     BuildMI(MBB, I, DL, TII.get(X86::PUSH32i)).addImm(Imm);
6013     MIB->setDesc(TII.get(X86::POP32r));
6014   }
6015   MIB->removeOperand(1);
6016   MIB->addImplicitDefUseOperands(*MBB.getParent());
6017 
6018   // Build CFI if necessary.
6019   MachineFunction &MF = *MBB.getParent();
6020   const X86FrameLowering *TFL = Subtarget.getFrameLowering();
6021   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
6022   bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
6023   bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
6024   if (EmitCFI) {
6025     TFL->BuildCFI(
6026         MBB, I, DL,
6027         MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
6028     TFL->BuildCFI(
6029         MBB, std::next(I), DL,
6030         MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
6031   }
6032 
6033   return true;
6034 }
6035 
6036 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
6037 // code sequence is needed for other targets.
expandLoadStackGuard(MachineInstrBuilder & MIB,const TargetInstrInfo & TII)6038 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
6039                                  const TargetInstrInfo &TII) {
6040   MachineBasicBlock &MBB = *MIB->getParent();
6041   const DebugLoc &DL = MIB->getDebugLoc();
6042   Register Reg = MIB.getReg(0);
6043   const GlobalValue *GV =
6044       cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
6045   auto Flags = MachineMemOperand::MOLoad |
6046                MachineMemOperand::MODereferenceable |
6047                MachineMemOperand::MOInvariant;
6048   MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
6049       MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8));
6050   MachineBasicBlock::iterator I = MIB.getInstr();
6051 
6052   BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg)
6053       .addReg(X86::RIP)
6054       .addImm(1)
6055       .addReg(0)
6056       .addGlobalAddress(GV, 0, X86II::MO_GOTPCREL)
6057       .addReg(0)
6058       .addMemOperand(MMO);
6059   MIB->setDebugLoc(DL);
6060   MIB->setDesc(TII.get(X86::MOV64rm));
6061   MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
6062 }
6063 
expandXorFP(MachineInstrBuilder & MIB,const TargetInstrInfo & TII)6064 static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
6065   MachineBasicBlock &MBB = *MIB->getParent();
6066   MachineFunction &MF = *MBB.getParent();
6067   const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
6068   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
6069   unsigned XorOp =
6070       MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
6071   MIB->setDesc(TII.get(XorOp));
6072   MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
6073   return true;
6074 }
6075 
6076 // This is used to handle spills for 128/256-bit registers when we have AVX512,
6077 // but not VLX. If it uses an extended register we need to use an instruction
6078 // that loads the lower 128/256-bit, but is available with only AVX512F.
expandNOVLXLoad(MachineInstrBuilder & MIB,const TargetRegisterInfo * TRI,const MCInstrDesc & LoadDesc,const MCInstrDesc & BroadcastDesc,unsigned SubIdx)6079 static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
6080                             const TargetRegisterInfo *TRI,
6081                             const MCInstrDesc &LoadDesc,
6082                             const MCInstrDesc &BroadcastDesc, unsigned SubIdx) {
6083   Register DestReg = MIB.getReg(0);
6084   // Check if DestReg is XMM16-31 or YMM16-31.
6085   if (TRI->getEncodingValue(DestReg) < 16) {
6086     // We can use a normal VEX encoded load.
6087     MIB->setDesc(LoadDesc);
6088   } else {
6089     // Use a 128/256-bit VBROADCAST instruction.
6090     MIB->setDesc(BroadcastDesc);
6091     // Change the destination to a 512-bit register.
6092     DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
6093     MIB->getOperand(0).setReg(DestReg);
6094   }
6095   return true;
6096 }
6097 
6098 // This is used to handle spills for 128/256-bit registers when we have AVX512,
6099 // but not VLX. If it uses an extended register we need to use an instruction
6100 // that stores the lower 128/256-bit, but is available with only AVX512F.
expandNOVLXStore(MachineInstrBuilder & MIB,const TargetRegisterInfo * TRI,const MCInstrDesc & StoreDesc,const MCInstrDesc & ExtractDesc,unsigned SubIdx)6101 static bool expandNOVLXStore(MachineInstrBuilder &MIB,
6102                              const TargetRegisterInfo *TRI,
6103                              const MCInstrDesc &StoreDesc,
6104                              const MCInstrDesc &ExtractDesc, unsigned SubIdx) {
6105   Register SrcReg = MIB.getReg(X86::AddrNumOperands);
6106   // Check if DestReg is XMM16-31 or YMM16-31.
6107   if (TRI->getEncodingValue(SrcReg) < 16) {
6108     // We can use a normal VEX encoded store.
6109     MIB->setDesc(StoreDesc);
6110   } else {
6111     // Use a VEXTRACTF instruction.
6112     MIB->setDesc(ExtractDesc);
6113     // Change the destination to a 512-bit register.
6114     SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
6115     MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
6116     MIB.addImm(0x0); // Append immediate to extract from the lower bits.
6117   }
6118 
6119   return true;
6120 }
6121 
expandSHXDROT(MachineInstrBuilder & MIB,const MCInstrDesc & Desc)6122 static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {
6123   MIB->setDesc(Desc);
6124   int64_t ShiftAmt = MIB->getOperand(2).getImm();
6125   // Temporarily remove the immediate so we can add another source register.
6126   MIB->removeOperand(2);
6127   // Add the register. Don't copy the kill flag if there is one.
6128   MIB.addReg(MIB.getReg(1), getUndefRegState(MIB->getOperand(1).isUndef()));
6129   // Add back the immediate.
6130   MIB.addImm(ShiftAmt);
6131   return true;
6132 }
6133 
expandPostRAPseudo(MachineInstr & MI) const6134 bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
6135   bool HasAVX = Subtarget.hasAVX();
6136   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
6137   switch (MI.getOpcode()) {
6138   case X86::MOV32r0:
6139     return Expand2AddrUndef(MIB, get(X86::XOR32rr));
6140   case X86::MOV32r1:
6141     return expandMOV32r1(MIB, *this, /*MinusOne=*/false);
6142   case X86::MOV32r_1:
6143     return expandMOV32r1(MIB, *this, /*MinusOne=*/true);
6144   case X86::MOV32ImmSExti8:
6145   case X86::MOV64ImmSExti8:
6146     return ExpandMOVImmSExti8(MIB, *this, Subtarget);
6147   case X86::SETB_C32r:
6148     return Expand2AddrUndef(MIB, get(X86::SBB32rr));
6149   case X86::SETB_C64r:
6150     return Expand2AddrUndef(MIB, get(X86::SBB64rr));
6151   case X86::MMX_SET0:
6152     return Expand2AddrUndef(MIB, get(X86::MMX_PXORrr));
6153   case X86::V_SET0:
6154   case X86::FsFLD0SS:
6155   case X86::FsFLD0SD:
6156   case X86::FsFLD0SH:
6157   case X86::FsFLD0F128:
6158     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
6159   case X86::AVX_SET0: {
6160     assert(HasAVX && "AVX not supported");
6161     const TargetRegisterInfo *TRI = &getRegisterInfo();
6162     Register SrcReg = MIB.getReg(0);
6163     Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
6164     MIB->getOperand(0).setReg(XReg);
6165     Expand2AddrUndef(MIB, get(X86::VXORPSrr));
6166     MIB.addReg(SrcReg, RegState::ImplicitDefine);
6167     return true;
6168   }
6169   case X86::AVX512_128_SET0:
6170   case X86::AVX512_FsFLD0SH:
6171   case X86::AVX512_FsFLD0SS:
6172   case X86::AVX512_FsFLD0SD:
6173   case X86::AVX512_FsFLD0F128: {
6174     bool HasVLX = Subtarget.hasVLX();
6175     Register SrcReg = MIB.getReg(0);
6176     const TargetRegisterInfo *TRI = &getRegisterInfo();
6177     if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
6178       return Expand2AddrUndef(MIB,
6179                               get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
6180     // Extended register without VLX. Use a larger XOR.
6181     SrcReg =
6182         TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
6183     MIB->getOperand(0).setReg(SrcReg);
6184     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
6185   }
6186   case X86::AVX512_256_SET0:
6187   case X86::AVX512_512_SET0: {
6188     bool HasVLX = Subtarget.hasVLX();
6189     Register SrcReg = MIB.getReg(0);
6190     const TargetRegisterInfo *TRI = &getRegisterInfo();
6191     if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
6192       Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
6193       MIB->getOperand(0).setReg(XReg);
6194       Expand2AddrUndef(MIB, get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
6195       MIB.addReg(SrcReg, RegState::ImplicitDefine);
6196       return true;
6197     }
6198     if (MI.getOpcode() == X86::AVX512_256_SET0) {
6199       // No VLX so we must reference a zmm.
6200       MCRegister ZReg =
6201           TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
6202       MIB->getOperand(0).setReg(ZReg);
6203     }
6204     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
6205   }
6206   case X86::V_SETALLONES:
6207     return Expand2AddrUndef(MIB,
6208                             get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
6209   case X86::AVX2_SETALLONES:
6210     return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
6211   case X86::AVX1_SETALLONES: {
6212     Register Reg = MIB.getReg(0);
6213     // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
6214     MIB->setDesc(get(X86::VCMPPSYrri));
6215     MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
6216     return true;
6217   }
6218   case X86::AVX512_512_SETALLONES: {
6219     Register Reg = MIB.getReg(0);
6220     MIB->setDesc(get(X86::VPTERNLOGDZrri));
6221     // VPTERNLOGD needs 3 register inputs and an immediate.
6222     // 0xff will return 1s for any input.
6223     MIB.addReg(Reg, RegState::Undef)
6224         .addReg(Reg, RegState::Undef)
6225         .addReg(Reg, RegState::Undef)
6226         .addImm(0xff);
6227     return true;
6228   }
6229   case X86::AVX512_512_SEXT_MASK_32:
6230   case X86::AVX512_512_SEXT_MASK_64: {
6231     Register Reg = MIB.getReg(0);
6232     Register MaskReg = MIB.getReg(1);
6233     unsigned MaskState = getRegState(MIB->getOperand(1));
6234     unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64)
6235                        ? X86::VPTERNLOGQZrrikz
6236                        : X86::VPTERNLOGDZrrikz;
6237     MI.removeOperand(1);
6238     MIB->setDesc(get(Opc));
6239     // VPTERNLOG needs 3 register inputs and an immediate.
6240     // 0xff will return 1s for any input.
6241     MIB.addReg(Reg, RegState::Undef)
6242         .addReg(MaskReg, MaskState)
6243         .addReg(Reg, RegState::Undef)
6244         .addReg(Reg, RegState::Undef)
6245         .addImm(0xff);
6246     return true;
6247   }
6248   case X86::VMOVAPSZ128rm_NOVLX:
6249     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
6250                            get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6251   case X86::VMOVUPSZ128rm_NOVLX:
6252     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
6253                            get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6254   case X86::VMOVAPSZ256rm_NOVLX:
6255     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
6256                            get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6257   case X86::VMOVUPSZ256rm_NOVLX:
6258     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
6259                            get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6260   case X86::VMOVAPSZ128mr_NOVLX:
6261     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
6262                             get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6263   case X86::VMOVUPSZ128mr_NOVLX:
6264     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
6265                             get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6266   case X86::VMOVAPSZ256mr_NOVLX:
6267     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
6268                             get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6269   case X86::VMOVUPSZ256mr_NOVLX:
6270     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
6271                             get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6272   case X86::MOV32ri64: {
6273     Register Reg = MIB.getReg(0);
6274     Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
6275     MI.setDesc(get(X86::MOV32ri));
6276     MIB->getOperand(0).setReg(Reg32);
6277     MIB.addReg(Reg, RegState::ImplicitDefine);
6278     return true;
6279   }
6280 
6281   case X86::RDFLAGS32:
6282   case X86::RDFLAGS64: {
6283     unsigned Is64Bit = MI.getOpcode() == X86::RDFLAGS64;
6284     MachineBasicBlock &MBB = *MIB->getParent();
6285 
6286     MachineInstr *NewMI = BuildMI(MBB, MI, MIB->getDebugLoc(),
6287                                   get(Is64Bit ? X86::PUSHF64 : X86::PUSHF32))
6288                               .getInstr();
6289 
6290     // Permit reads of the EFLAGS and DF registers without them being defined.
6291     // This intrinsic exists to read external processor state in flags, such as
6292     // the trap flag, interrupt flag, and direction flag, none of which are
6293     // modeled by the backend.
6294     assert(NewMI->getOperand(2).getReg() == X86::EFLAGS &&
6295            "Unexpected register in operand! Should be EFLAGS.");
6296     NewMI->getOperand(2).setIsUndef();
6297     assert(NewMI->getOperand(3).getReg() == X86::DF &&
6298            "Unexpected register in operand! Should be DF.");
6299     NewMI->getOperand(3).setIsUndef();
6300 
6301     MIB->setDesc(get(Is64Bit ? X86::POP64r : X86::POP32r));
6302     return true;
6303   }
6304 
6305   case X86::WRFLAGS32:
6306   case X86::WRFLAGS64: {
6307     unsigned Is64Bit = MI.getOpcode() == X86::WRFLAGS64;
6308     MachineBasicBlock &MBB = *MIB->getParent();
6309 
6310     BuildMI(MBB, MI, MIB->getDebugLoc(),
6311             get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
6312         .addReg(MI.getOperand(0).getReg());
6313     BuildMI(MBB, MI, MIB->getDebugLoc(),
6314             get(Is64Bit ? X86::POPF64 : X86::POPF32));
6315     MI.eraseFromParent();
6316     return true;
6317   }
6318 
6319   // KNL does not recognize dependency-breaking idioms for mask registers,
6320   // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
6321   // Using %k0 as the undef input register is a performance heuristic based
6322   // on the assumption that %k0 is used less frequently than the other mask
6323   // registers, since it is not usable as a write mask.
6324   // FIXME: A more advanced approach would be to choose the best input mask
6325   // register based on context.
6326   case X86::KSET0W:
6327     return Expand2AddrKreg(MIB, get(X86::KXORWkk), X86::K0);
6328   case X86::KSET0D:
6329     return Expand2AddrKreg(MIB, get(X86::KXORDkk), X86::K0);
6330   case X86::KSET0Q:
6331     return Expand2AddrKreg(MIB, get(X86::KXORQkk), X86::K0);
6332   case X86::KSET1W:
6333     return Expand2AddrKreg(MIB, get(X86::KXNORWkk), X86::K0);
6334   case X86::KSET1D:
6335     return Expand2AddrKreg(MIB, get(X86::KXNORDkk), X86::K0);
6336   case X86::KSET1Q:
6337     return Expand2AddrKreg(MIB, get(X86::KXNORQkk), X86::K0);
6338   case TargetOpcode::LOAD_STACK_GUARD:
6339     expandLoadStackGuard(MIB, *this);
6340     return true;
6341   case X86::XOR64_FP:
6342   case X86::XOR32_FP:
6343     return expandXorFP(MIB, *this);
6344   case X86::SHLDROT32ri:
6345     return expandSHXDROT(MIB, get(X86::SHLD32rri8));
6346   case X86::SHLDROT64ri:
6347     return expandSHXDROT(MIB, get(X86::SHLD64rri8));
6348   case X86::SHRDROT32ri:
6349     return expandSHXDROT(MIB, get(X86::SHRD32rri8));
6350   case X86::SHRDROT64ri:
6351     return expandSHXDROT(MIB, get(X86::SHRD64rri8));
6352   case X86::ADD8rr_DB:
6353     MIB->setDesc(get(X86::OR8rr));
6354     break;
6355   case X86::ADD16rr_DB:
6356     MIB->setDesc(get(X86::OR16rr));
6357     break;
6358   case X86::ADD32rr_DB:
6359     MIB->setDesc(get(X86::OR32rr));
6360     break;
6361   case X86::ADD64rr_DB:
6362     MIB->setDesc(get(X86::OR64rr));
6363     break;
6364   case X86::ADD8ri_DB:
6365     MIB->setDesc(get(X86::OR8ri));
6366     break;
6367   case X86::ADD16ri_DB:
6368     MIB->setDesc(get(X86::OR16ri));
6369     break;
6370   case X86::ADD32ri_DB:
6371     MIB->setDesc(get(X86::OR32ri));
6372     break;
6373   case X86::ADD64ri32_DB:
6374     MIB->setDesc(get(X86::OR64ri32));
6375     break;
6376   }
6377   return false;
6378 }
6379 
6380 /// Return true for all instructions that only update
6381 /// the first 32 or 64-bits of the destination register and leave the rest
6382 /// unmodified. This can be used to avoid folding loads if the instructions
6383 /// only update part of the destination register, and the non-updated part is
6384 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
6385 /// instructions breaks the partial register dependency and it can improve
6386 /// performance. e.g.:
6387 ///
6388 ///   movss (%rdi), %xmm0
6389 ///   cvtss2sd %xmm0, %xmm0
6390 ///
6391 /// Instead of
6392 ///   cvtss2sd (%rdi), %xmm0
6393 ///
6394 /// FIXME: This should be turned into a TSFlags.
6395 ///
hasPartialRegUpdate(unsigned Opcode,const X86Subtarget & Subtarget,bool ForLoadFold=false)6396 static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget,
6397                                 bool ForLoadFold = false) {
6398   switch (Opcode) {
6399   case X86::CVTSI2SSrr:
6400   case X86::CVTSI2SSrm:
6401   case X86::CVTSI642SSrr:
6402   case X86::CVTSI642SSrm:
6403   case X86::CVTSI2SDrr:
6404   case X86::CVTSI2SDrm:
6405   case X86::CVTSI642SDrr:
6406   case X86::CVTSI642SDrm:
6407     // Load folding won't effect the undef register update since the input is
6408     // a GPR.
6409     return !ForLoadFold;
6410   case X86::CVTSD2SSrr:
6411   case X86::CVTSD2SSrm:
6412   case X86::CVTSS2SDrr:
6413   case X86::CVTSS2SDrm:
6414   case X86::MOVHPDrm:
6415   case X86::MOVHPSrm:
6416   case X86::MOVLPDrm:
6417   case X86::MOVLPSrm:
6418   case X86::RCPSSr:
6419   case X86::RCPSSm:
6420   case X86::RCPSSr_Int:
6421   case X86::RCPSSm_Int:
6422   case X86::ROUNDSDri:
6423   case X86::ROUNDSDmi:
6424   case X86::ROUNDSSri:
6425   case X86::ROUNDSSmi:
6426   case X86::RSQRTSSr:
6427   case X86::RSQRTSSm:
6428   case X86::RSQRTSSr_Int:
6429   case X86::RSQRTSSm_Int:
6430   case X86::SQRTSSr:
6431   case X86::SQRTSSm:
6432   case X86::SQRTSSr_Int:
6433   case X86::SQRTSSm_Int:
6434   case X86::SQRTSDr:
6435   case X86::SQRTSDm:
6436   case X86::SQRTSDr_Int:
6437   case X86::SQRTSDm_Int:
6438     return true;
6439   case X86::VFCMULCPHZ128rm:
6440   case X86::VFCMULCPHZ128rmb:
6441   case X86::VFCMULCPHZ128rmbkz:
6442   case X86::VFCMULCPHZ128rmkz:
6443   case X86::VFCMULCPHZ128rr:
6444   case X86::VFCMULCPHZ128rrkz:
6445   case X86::VFCMULCPHZ256rm:
6446   case X86::VFCMULCPHZ256rmb:
6447   case X86::VFCMULCPHZ256rmbkz:
6448   case X86::VFCMULCPHZ256rmkz:
6449   case X86::VFCMULCPHZ256rr:
6450   case X86::VFCMULCPHZ256rrkz:
6451   case X86::VFCMULCPHZrm:
6452   case X86::VFCMULCPHZrmb:
6453   case X86::VFCMULCPHZrmbkz:
6454   case X86::VFCMULCPHZrmkz:
6455   case X86::VFCMULCPHZrr:
6456   case X86::VFCMULCPHZrrb:
6457   case X86::VFCMULCPHZrrbkz:
6458   case X86::VFCMULCPHZrrkz:
6459   case X86::VFMULCPHZ128rm:
6460   case X86::VFMULCPHZ128rmb:
6461   case X86::VFMULCPHZ128rmbkz:
6462   case X86::VFMULCPHZ128rmkz:
6463   case X86::VFMULCPHZ128rr:
6464   case X86::VFMULCPHZ128rrkz:
6465   case X86::VFMULCPHZ256rm:
6466   case X86::VFMULCPHZ256rmb:
6467   case X86::VFMULCPHZ256rmbkz:
6468   case X86::VFMULCPHZ256rmkz:
6469   case X86::VFMULCPHZ256rr:
6470   case X86::VFMULCPHZ256rrkz:
6471   case X86::VFMULCPHZrm:
6472   case X86::VFMULCPHZrmb:
6473   case X86::VFMULCPHZrmbkz:
6474   case X86::VFMULCPHZrmkz:
6475   case X86::VFMULCPHZrr:
6476   case X86::VFMULCPHZrrb:
6477   case X86::VFMULCPHZrrbkz:
6478   case X86::VFMULCPHZrrkz:
6479   case X86::VFCMULCSHZrm:
6480   case X86::VFCMULCSHZrmkz:
6481   case X86::VFCMULCSHZrr:
6482   case X86::VFCMULCSHZrrb:
6483   case X86::VFCMULCSHZrrbkz:
6484   case X86::VFCMULCSHZrrkz:
6485   case X86::VFMULCSHZrm:
6486   case X86::VFMULCSHZrmkz:
6487   case X86::VFMULCSHZrr:
6488   case X86::VFMULCSHZrrb:
6489   case X86::VFMULCSHZrrbkz:
6490   case X86::VFMULCSHZrrkz:
6491     return Subtarget.hasMULCFalseDeps();
6492   case X86::VPERMDYrm:
6493   case X86::VPERMDYrr:
6494   case X86::VPERMQYmi:
6495   case X86::VPERMQYri:
6496   case X86::VPERMPSYrm:
6497   case X86::VPERMPSYrr:
6498   case X86::VPERMPDYmi:
6499   case X86::VPERMPDYri:
6500   case X86::VPERMDZ256rm:
6501   case X86::VPERMDZ256rmb:
6502   case X86::VPERMDZ256rmbkz:
6503   case X86::VPERMDZ256rmkz:
6504   case X86::VPERMDZ256rr:
6505   case X86::VPERMDZ256rrkz:
6506   case X86::VPERMDZrm:
6507   case X86::VPERMDZrmb:
6508   case X86::VPERMDZrmbkz:
6509   case X86::VPERMDZrmkz:
6510   case X86::VPERMDZrr:
6511   case X86::VPERMDZrrkz:
6512   case X86::VPERMQZ256mbi:
6513   case X86::VPERMQZ256mbikz:
6514   case X86::VPERMQZ256mi:
6515   case X86::VPERMQZ256mikz:
6516   case X86::VPERMQZ256ri:
6517   case X86::VPERMQZ256rikz:
6518   case X86::VPERMQZ256rm:
6519   case X86::VPERMQZ256rmb:
6520   case X86::VPERMQZ256rmbkz:
6521   case X86::VPERMQZ256rmkz:
6522   case X86::VPERMQZ256rr:
6523   case X86::VPERMQZ256rrkz:
6524   case X86::VPERMQZmbi:
6525   case X86::VPERMQZmbikz:
6526   case X86::VPERMQZmi:
6527   case X86::VPERMQZmikz:
6528   case X86::VPERMQZri:
6529   case X86::VPERMQZrikz:
6530   case X86::VPERMQZrm:
6531   case X86::VPERMQZrmb:
6532   case X86::VPERMQZrmbkz:
6533   case X86::VPERMQZrmkz:
6534   case X86::VPERMQZrr:
6535   case X86::VPERMQZrrkz:
6536   case X86::VPERMPSZ256rm:
6537   case X86::VPERMPSZ256rmb:
6538   case X86::VPERMPSZ256rmbkz:
6539   case X86::VPERMPSZ256rmkz:
6540   case X86::VPERMPSZ256rr:
6541   case X86::VPERMPSZ256rrkz:
6542   case X86::VPERMPSZrm:
6543   case X86::VPERMPSZrmb:
6544   case X86::VPERMPSZrmbkz:
6545   case X86::VPERMPSZrmkz:
6546   case X86::VPERMPSZrr:
6547   case X86::VPERMPSZrrkz:
6548   case X86::VPERMPDZ256mbi:
6549   case X86::VPERMPDZ256mbikz:
6550   case X86::VPERMPDZ256mi:
6551   case X86::VPERMPDZ256mikz:
6552   case X86::VPERMPDZ256ri:
6553   case X86::VPERMPDZ256rikz:
6554   case X86::VPERMPDZ256rm:
6555   case X86::VPERMPDZ256rmb:
6556   case X86::VPERMPDZ256rmbkz:
6557   case X86::VPERMPDZ256rmkz:
6558   case X86::VPERMPDZ256rr:
6559   case X86::VPERMPDZ256rrkz:
6560   case X86::VPERMPDZmbi:
6561   case X86::VPERMPDZmbikz:
6562   case X86::VPERMPDZmi:
6563   case X86::VPERMPDZmikz:
6564   case X86::VPERMPDZri:
6565   case X86::VPERMPDZrikz:
6566   case X86::VPERMPDZrm:
6567   case X86::VPERMPDZrmb:
6568   case X86::VPERMPDZrmbkz:
6569   case X86::VPERMPDZrmkz:
6570   case X86::VPERMPDZrr:
6571   case X86::VPERMPDZrrkz:
6572     return Subtarget.hasPERMFalseDeps();
6573   case X86::VRANGEPDZ128rmbi:
6574   case X86::VRANGEPDZ128rmbikz:
6575   case X86::VRANGEPDZ128rmi:
6576   case X86::VRANGEPDZ128rmikz:
6577   case X86::VRANGEPDZ128rri:
6578   case X86::VRANGEPDZ128rrikz:
6579   case X86::VRANGEPDZ256rmbi:
6580   case X86::VRANGEPDZ256rmbikz:
6581   case X86::VRANGEPDZ256rmi:
6582   case X86::VRANGEPDZ256rmikz:
6583   case X86::VRANGEPDZ256rri:
6584   case X86::VRANGEPDZ256rrikz:
6585   case X86::VRANGEPDZrmbi:
6586   case X86::VRANGEPDZrmbikz:
6587   case X86::VRANGEPDZrmi:
6588   case X86::VRANGEPDZrmikz:
6589   case X86::VRANGEPDZrri:
6590   case X86::VRANGEPDZrrib:
6591   case X86::VRANGEPDZrribkz:
6592   case X86::VRANGEPDZrrikz:
6593   case X86::VRANGEPSZ128rmbi:
6594   case X86::VRANGEPSZ128rmbikz:
6595   case X86::VRANGEPSZ128rmi:
6596   case X86::VRANGEPSZ128rmikz:
6597   case X86::VRANGEPSZ128rri:
6598   case X86::VRANGEPSZ128rrikz:
6599   case X86::VRANGEPSZ256rmbi:
6600   case X86::VRANGEPSZ256rmbikz:
6601   case X86::VRANGEPSZ256rmi:
6602   case X86::VRANGEPSZ256rmikz:
6603   case X86::VRANGEPSZ256rri:
6604   case X86::VRANGEPSZ256rrikz:
6605   case X86::VRANGEPSZrmbi:
6606   case X86::VRANGEPSZrmbikz:
6607   case X86::VRANGEPSZrmi:
6608   case X86::VRANGEPSZrmikz:
6609   case X86::VRANGEPSZrri:
6610   case X86::VRANGEPSZrrib:
6611   case X86::VRANGEPSZrribkz:
6612   case X86::VRANGEPSZrrikz:
6613   case X86::VRANGESDZrmi:
6614   case X86::VRANGESDZrmikz:
6615   case X86::VRANGESDZrri:
6616   case X86::VRANGESDZrrib:
6617   case X86::VRANGESDZrribkz:
6618   case X86::VRANGESDZrrikz:
6619   case X86::VRANGESSZrmi:
6620   case X86::VRANGESSZrmikz:
6621   case X86::VRANGESSZrri:
6622   case X86::VRANGESSZrrib:
6623   case X86::VRANGESSZrribkz:
6624   case X86::VRANGESSZrrikz:
6625     return Subtarget.hasRANGEFalseDeps();
6626   case X86::VGETMANTSSZrmi:
6627   case X86::VGETMANTSSZrmikz:
6628   case X86::VGETMANTSSZrri:
6629   case X86::VGETMANTSSZrrib:
6630   case X86::VGETMANTSSZrribkz:
6631   case X86::VGETMANTSSZrrikz:
6632   case X86::VGETMANTSDZrmi:
6633   case X86::VGETMANTSDZrmikz:
6634   case X86::VGETMANTSDZrri:
6635   case X86::VGETMANTSDZrrib:
6636   case X86::VGETMANTSDZrribkz:
6637   case X86::VGETMANTSDZrrikz:
6638   case X86::VGETMANTSHZrmi:
6639   case X86::VGETMANTSHZrmikz:
6640   case X86::VGETMANTSHZrri:
6641   case X86::VGETMANTSHZrrib:
6642   case X86::VGETMANTSHZrribkz:
6643   case X86::VGETMANTSHZrrikz:
6644   case X86::VGETMANTPSZ128rmbi:
6645   case X86::VGETMANTPSZ128rmbikz:
6646   case X86::VGETMANTPSZ128rmi:
6647   case X86::VGETMANTPSZ128rmikz:
6648   case X86::VGETMANTPSZ256rmbi:
6649   case X86::VGETMANTPSZ256rmbikz:
6650   case X86::VGETMANTPSZ256rmi:
6651   case X86::VGETMANTPSZ256rmikz:
6652   case X86::VGETMANTPSZrmbi:
6653   case X86::VGETMANTPSZrmbikz:
6654   case X86::VGETMANTPSZrmi:
6655   case X86::VGETMANTPSZrmikz:
6656   case X86::VGETMANTPDZ128rmbi:
6657   case X86::VGETMANTPDZ128rmbikz:
6658   case X86::VGETMANTPDZ128rmi:
6659   case X86::VGETMANTPDZ128rmikz:
6660   case X86::VGETMANTPDZ256rmbi:
6661   case X86::VGETMANTPDZ256rmbikz:
6662   case X86::VGETMANTPDZ256rmi:
6663   case X86::VGETMANTPDZ256rmikz:
6664   case X86::VGETMANTPDZrmbi:
6665   case X86::VGETMANTPDZrmbikz:
6666   case X86::VGETMANTPDZrmi:
6667   case X86::VGETMANTPDZrmikz:
6668     return Subtarget.hasGETMANTFalseDeps();
6669   case X86::VPMULLQZ128rm:
6670   case X86::VPMULLQZ128rmb:
6671   case X86::VPMULLQZ128rmbkz:
6672   case X86::VPMULLQZ128rmkz:
6673   case X86::VPMULLQZ128rr:
6674   case X86::VPMULLQZ128rrkz:
6675   case X86::VPMULLQZ256rm:
6676   case X86::VPMULLQZ256rmb:
6677   case X86::VPMULLQZ256rmbkz:
6678   case X86::VPMULLQZ256rmkz:
6679   case X86::VPMULLQZ256rr:
6680   case X86::VPMULLQZ256rrkz:
6681   case X86::VPMULLQZrm:
6682   case X86::VPMULLQZrmb:
6683   case X86::VPMULLQZrmbkz:
6684   case X86::VPMULLQZrmkz:
6685   case X86::VPMULLQZrr:
6686   case X86::VPMULLQZrrkz:
6687     return Subtarget.hasMULLQFalseDeps();
6688   // GPR
6689   case X86::POPCNT32rm:
6690   case X86::POPCNT32rr:
6691   case X86::POPCNT64rm:
6692   case X86::POPCNT64rr:
6693     return Subtarget.hasPOPCNTFalseDeps();
6694   case X86::LZCNT32rm:
6695   case X86::LZCNT32rr:
6696   case X86::LZCNT64rm:
6697   case X86::LZCNT64rr:
6698   case X86::TZCNT32rm:
6699   case X86::TZCNT32rr:
6700   case X86::TZCNT64rm:
6701   case X86::TZCNT64rr:
6702     return Subtarget.hasLZCNTFalseDeps();
6703   }
6704 
6705   return false;
6706 }
6707 
6708 /// Inform the BreakFalseDeps pass how many idle
6709 /// instructions we would like before a partial register update.
getPartialRegUpdateClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const6710 unsigned X86InstrInfo::getPartialRegUpdateClearance(
6711     const MachineInstr &MI, unsigned OpNum,
6712     const TargetRegisterInfo *TRI) const {
6713 
6714   if (OpNum != 0)
6715     return 0;
6716 
6717   // NDD ops with 8/16b results may appear to be partial register
6718   // updates after register allocation.
6719   bool HasNDDPartialWrite = false;
6720   if (X86II::hasNewDataDest(MI.getDesc().TSFlags)) {
6721     Register Reg = MI.getOperand(0).getReg();
6722     if (!Reg.isVirtual())
6723       HasNDDPartialWrite =
6724           X86::GR8RegClass.contains(Reg) || X86::GR16RegClass.contains(Reg);
6725   }
6726 
6727   if (!(HasNDDPartialWrite || hasPartialRegUpdate(MI.getOpcode(), Subtarget)))
6728     return 0;
6729 
6730   // Check if the result register is also used as a source.
6731   // For non-NDD ops, this means a partial update is wanted, hence we return 0.
6732   // For NDD ops, this means it is possible to compress the instruction
6733   // to a legacy form in CompressEVEX, which would create an unwanted partial
6734   // update, so we return the clearance.
6735   const MachineOperand &MO = MI.getOperand(0);
6736   Register Reg = MO.getReg();
6737   bool ReadsReg = false;
6738   if (Reg.isVirtual())
6739     ReadsReg = (MO.readsReg() || MI.readsVirtualRegister(Reg));
6740   else
6741     ReadsReg = MI.readsRegister(Reg, TRI);
6742   if (ReadsReg != HasNDDPartialWrite)
6743     return 0;
6744 
6745   // If any instructions in the clearance range are reading Reg, insert a
6746   // dependency breaking instruction, which is inexpensive and is likely to
6747   // be hidden in other instruction's cycles.
6748   return PartialRegUpdateClearance;
6749 }
6750 
6751 // Return true for any instruction the copies the high bits of the first source
6752 // operand into the unused high bits of the destination operand.
6753 // Also returns true for instructions that have two inputs where one may
6754 // be undef and we want it to use the same register as the other input.
hasUndefRegUpdate(unsigned Opcode,unsigned OpNum,bool ForLoadFold=false)6755 static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
6756                               bool ForLoadFold = false) {
6757   // Set the OpNum parameter to the first source operand.
6758   switch (Opcode) {
6759   case X86::MMX_PUNPCKHBWrr:
6760   case X86::MMX_PUNPCKHWDrr:
6761   case X86::MMX_PUNPCKHDQrr:
6762   case X86::MMX_PUNPCKLBWrr:
6763   case X86::MMX_PUNPCKLWDrr:
6764   case X86::MMX_PUNPCKLDQrr:
6765   case X86::MOVHLPSrr:
6766   case X86::PACKSSWBrr:
6767   case X86::PACKUSWBrr:
6768   case X86::PACKSSDWrr:
6769   case X86::PACKUSDWrr:
6770   case X86::PUNPCKHBWrr:
6771   case X86::PUNPCKLBWrr:
6772   case X86::PUNPCKHWDrr:
6773   case X86::PUNPCKLWDrr:
6774   case X86::PUNPCKHDQrr:
6775   case X86::PUNPCKLDQrr:
6776   case X86::PUNPCKHQDQrr:
6777   case X86::PUNPCKLQDQrr:
6778   case X86::SHUFPDrri:
6779   case X86::SHUFPSrri:
6780     // These instructions are sometimes used with an undef first or second
6781     // source. Return true here so BreakFalseDeps will assign this source to the
6782     // same register as the first source to avoid a false dependency.
6783     // Operand 1 of these instructions is tied so they're separate from their
6784     // VEX counterparts.
6785     return OpNum == 2 && !ForLoadFold;
6786 
6787   case X86::VMOVLHPSrr:
6788   case X86::VMOVLHPSZrr:
6789   case X86::VPACKSSWBrr:
6790   case X86::VPACKUSWBrr:
6791   case X86::VPACKSSDWrr:
6792   case X86::VPACKUSDWrr:
6793   case X86::VPACKSSWBZ128rr:
6794   case X86::VPACKUSWBZ128rr:
6795   case X86::VPACKSSDWZ128rr:
6796   case X86::VPACKUSDWZ128rr:
6797   case X86::VPERM2F128rri:
6798   case X86::VPERM2I128rri:
6799   case X86::VSHUFF32X4Z256rri:
6800   case X86::VSHUFF32X4Zrri:
6801   case X86::VSHUFF64X2Z256rri:
6802   case X86::VSHUFF64X2Zrri:
6803   case X86::VSHUFI32X4Z256rri:
6804   case X86::VSHUFI32X4Zrri:
6805   case X86::VSHUFI64X2Z256rri:
6806   case X86::VSHUFI64X2Zrri:
6807   case X86::VPUNPCKHBWrr:
6808   case X86::VPUNPCKLBWrr:
6809   case X86::VPUNPCKHBWYrr:
6810   case X86::VPUNPCKLBWYrr:
6811   case X86::VPUNPCKHBWZ128rr:
6812   case X86::VPUNPCKLBWZ128rr:
6813   case X86::VPUNPCKHBWZ256rr:
6814   case X86::VPUNPCKLBWZ256rr:
6815   case X86::VPUNPCKHBWZrr:
6816   case X86::VPUNPCKLBWZrr:
6817   case X86::VPUNPCKHWDrr:
6818   case X86::VPUNPCKLWDrr:
6819   case X86::VPUNPCKHWDYrr:
6820   case X86::VPUNPCKLWDYrr:
6821   case X86::VPUNPCKHWDZ128rr:
6822   case X86::VPUNPCKLWDZ128rr:
6823   case X86::VPUNPCKHWDZ256rr:
6824   case X86::VPUNPCKLWDZ256rr:
6825   case X86::VPUNPCKHWDZrr:
6826   case X86::VPUNPCKLWDZrr:
6827   case X86::VPUNPCKHDQrr:
6828   case X86::VPUNPCKLDQrr:
6829   case X86::VPUNPCKHDQYrr:
6830   case X86::VPUNPCKLDQYrr:
6831   case X86::VPUNPCKHDQZ128rr:
6832   case X86::VPUNPCKLDQZ128rr:
6833   case X86::VPUNPCKHDQZ256rr:
6834   case X86::VPUNPCKLDQZ256rr:
6835   case X86::VPUNPCKHDQZrr:
6836   case X86::VPUNPCKLDQZrr:
6837   case X86::VPUNPCKHQDQrr:
6838   case X86::VPUNPCKLQDQrr:
6839   case X86::VPUNPCKHQDQYrr:
6840   case X86::VPUNPCKLQDQYrr:
6841   case X86::VPUNPCKHQDQZ128rr:
6842   case X86::VPUNPCKLQDQZ128rr:
6843   case X86::VPUNPCKHQDQZ256rr:
6844   case X86::VPUNPCKLQDQZ256rr:
6845   case X86::VPUNPCKHQDQZrr:
6846   case X86::VPUNPCKLQDQZrr:
6847     // These instructions are sometimes used with an undef first or second
6848     // source. Return true here so BreakFalseDeps will assign this source to the
6849     // same register as the first source to avoid a false dependency.
6850     return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
6851 
6852   case X86::VCVTSI2SSrr:
6853   case X86::VCVTSI2SSrm:
6854   case X86::VCVTSI2SSrr_Int:
6855   case X86::VCVTSI2SSrm_Int:
6856   case X86::VCVTSI642SSrr:
6857   case X86::VCVTSI642SSrm:
6858   case X86::VCVTSI642SSrr_Int:
6859   case X86::VCVTSI642SSrm_Int:
6860   case X86::VCVTSI2SDrr:
6861   case X86::VCVTSI2SDrm:
6862   case X86::VCVTSI2SDrr_Int:
6863   case X86::VCVTSI2SDrm_Int:
6864   case X86::VCVTSI642SDrr:
6865   case X86::VCVTSI642SDrm:
6866   case X86::VCVTSI642SDrr_Int:
6867   case X86::VCVTSI642SDrm_Int:
6868   // AVX-512
6869   case X86::VCVTSI2SSZrr:
6870   case X86::VCVTSI2SSZrm:
6871   case X86::VCVTSI2SSZrr_Int:
6872   case X86::VCVTSI2SSZrrb_Int:
6873   case X86::VCVTSI2SSZrm_Int:
6874   case X86::VCVTSI642SSZrr:
6875   case X86::VCVTSI642SSZrm:
6876   case X86::VCVTSI642SSZrr_Int:
6877   case X86::VCVTSI642SSZrrb_Int:
6878   case X86::VCVTSI642SSZrm_Int:
6879   case X86::VCVTSI2SDZrr:
6880   case X86::VCVTSI2SDZrm:
6881   case X86::VCVTSI2SDZrr_Int:
6882   case X86::VCVTSI2SDZrm_Int:
6883   case X86::VCVTSI642SDZrr:
6884   case X86::VCVTSI642SDZrm:
6885   case X86::VCVTSI642SDZrr_Int:
6886   case X86::VCVTSI642SDZrrb_Int:
6887   case X86::VCVTSI642SDZrm_Int:
6888   case X86::VCVTUSI2SSZrr:
6889   case X86::VCVTUSI2SSZrm:
6890   case X86::VCVTUSI2SSZrr_Int:
6891   case X86::VCVTUSI2SSZrrb_Int:
6892   case X86::VCVTUSI2SSZrm_Int:
6893   case X86::VCVTUSI642SSZrr:
6894   case X86::VCVTUSI642SSZrm:
6895   case X86::VCVTUSI642SSZrr_Int:
6896   case X86::VCVTUSI642SSZrrb_Int:
6897   case X86::VCVTUSI642SSZrm_Int:
6898   case X86::VCVTUSI2SDZrr:
6899   case X86::VCVTUSI2SDZrm:
6900   case X86::VCVTUSI2SDZrr_Int:
6901   case X86::VCVTUSI2SDZrm_Int:
6902   case X86::VCVTUSI642SDZrr:
6903   case X86::VCVTUSI642SDZrm:
6904   case X86::VCVTUSI642SDZrr_Int:
6905   case X86::VCVTUSI642SDZrrb_Int:
6906   case X86::VCVTUSI642SDZrm_Int:
6907   case X86::VCVTSI2SHZrr:
6908   case X86::VCVTSI2SHZrm:
6909   case X86::VCVTSI2SHZrr_Int:
6910   case X86::VCVTSI2SHZrrb_Int:
6911   case X86::VCVTSI2SHZrm_Int:
6912   case X86::VCVTSI642SHZrr:
6913   case X86::VCVTSI642SHZrm:
6914   case X86::VCVTSI642SHZrr_Int:
6915   case X86::VCVTSI642SHZrrb_Int:
6916   case X86::VCVTSI642SHZrm_Int:
6917   case X86::VCVTUSI2SHZrr:
6918   case X86::VCVTUSI2SHZrm:
6919   case X86::VCVTUSI2SHZrr_Int:
6920   case X86::VCVTUSI2SHZrrb_Int:
6921   case X86::VCVTUSI2SHZrm_Int:
6922   case X86::VCVTUSI642SHZrr:
6923   case X86::VCVTUSI642SHZrm:
6924   case X86::VCVTUSI642SHZrr_Int:
6925   case X86::VCVTUSI642SHZrrb_Int:
6926   case X86::VCVTUSI642SHZrm_Int:
6927     // Load folding won't effect the undef register update since the input is
6928     // a GPR.
6929     return OpNum == 1 && !ForLoadFold;
6930   case X86::VCVTSD2SSrr:
6931   case X86::VCVTSD2SSrm:
6932   case X86::VCVTSD2SSrr_Int:
6933   case X86::VCVTSD2SSrm_Int:
6934   case X86::VCVTSS2SDrr:
6935   case X86::VCVTSS2SDrm:
6936   case X86::VCVTSS2SDrr_Int:
6937   case X86::VCVTSS2SDrm_Int:
6938   case X86::VRCPSSr:
6939   case X86::VRCPSSr_Int:
6940   case X86::VRCPSSm:
6941   case X86::VRCPSSm_Int:
6942   case X86::VROUNDSDri:
6943   case X86::VROUNDSDmi:
6944   case X86::VROUNDSDri_Int:
6945   case X86::VROUNDSDmi_Int:
6946   case X86::VROUNDSSri:
6947   case X86::VROUNDSSmi:
6948   case X86::VROUNDSSri_Int:
6949   case X86::VROUNDSSmi_Int:
6950   case X86::VRSQRTSSr:
6951   case X86::VRSQRTSSr_Int:
6952   case X86::VRSQRTSSm:
6953   case X86::VRSQRTSSm_Int:
6954   case X86::VSQRTSSr:
6955   case X86::VSQRTSSr_Int:
6956   case X86::VSQRTSSm:
6957   case X86::VSQRTSSm_Int:
6958   case X86::VSQRTSDr:
6959   case X86::VSQRTSDr_Int:
6960   case X86::VSQRTSDm:
6961   case X86::VSQRTSDm_Int:
6962   // AVX-512
6963   case X86::VCVTSD2SSZrr:
6964   case X86::VCVTSD2SSZrr_Int:
6965   case X86::VCVTSD2SSZrrb_Int:
6966   case X86::VCVTSD2SSZrm:
6967   case X86::VCVTSD2SSZrm_Int:
6968   case X86::VCVTSS2SDZrr:
6969   case X86::VCVTSS2SDZrr_Int:
6970   case X86::VCVTSS2SDZrrb_Int:
6971   case X86::VCVTSS2SDZrm:
6972   case X86::VCVTSS2SDZrm_Int:
6973   case X86::VGETEXPSDZr:
6974   case X86::VGETEXPSDZrb:
6975   case X86::VGETEXPSDZm:
6976   case X86::VGETEXPSSZr:
6977   case X86::VGETEXPSSZrb:
6978   case X86::VGETEXPSSZm:
6979   case X86::VGETMANTSDZrri:
6980   case X86::VGETMANTSDZrrib:
6981   case X86::VGETMANTSDZrmi:
6982   case X86::VGETMANTSSZrri:
6983   case X86::VGETMANTSSZrrib:
6984   case X86::VGETMANTSSZrmi:
6985   case X86::VRNDSCALESDZrri:
6986   case X86::VRNDSCALESDZrri_Int:
6987   case X86::VRNDSCALESDZrrib_Int:
6988   case X86::VRNDSCALESDZrmi:
6989   case X86::VRNDSCALESDZrmi_Int:
6990   case X86::VRNDSCALESSZrri:
6991   case X86::VRNDSCALESSZrri_Int:
6992   case X86::VRNDSCALESSZrrib_Int:
6993   case X86::VRNDSCALESSZrmi:
6994   case X86::VRNDSCALESSZrmi_Int:
6995   case X86::VRCP14SDZrr:
6996   case X86::VRCP14SDZrm:
6997   case X86::VRCP14SSZrr:
6998   case X86::VRCP14SSZrm:
6999   case X86::VRCPSHZrr:
7000   case X86::VRCPSHZrm:
7001   case X86::VRSQRTSHZrr:
7002   case X86::VRSQRTSHZrm:
7003   case X86::VREDUCESHZrmi:
7004   case X86::VREDUCESHZrri:
7005   case X86::VREDUCESHZrrib:
7006   case X86::VGETEXPSHZr:
7007   case X86::VGETEXPSHZrb:
7008   case X86::VGETEXPSHZm:
7009   case X86::VGETMANTSHZrri:
7010   case X86::VGETMANTSHZrrib:
7011   case X86::VGETMANTSHZrmi:
7012   case X86::VRNDSCALESHZrri:
7013   case X86::VRNDSCALESHZrri_Int:
7014   case X86::VRNDSCALESHZrrib_Int:
7015   case X86::VRNDSCALESHZrmi:
7016   case X86::VRNDSCALESHZrmi_Int:
7017   case X86::VSQRTSHZr:
7018   case X86::VSQRTSHZr_Int:
7019   case X86::VSQRTSHZrb_Int:
7020   case X86::VSQRTSHZm:
7021   case X86::VSQRTSHZm_Int:
7022   case X86::VRCP28SDZr:
7023   case X86::VRCP28SDZrb:
7024   case X86::VRCP28SDZm:
7025   case X86::VRCP28SSZr:
7026   case X86::VRCP28SSZrb:
7027   case X86::VRCP28SSZm:
7028   case X86::VREDUCESSZrmi:
7029   case X86::VREDUCESSZrri:
7030   case X86::VREDUCESSZrrib:
7031   case X86::VRSQRT14SDZrr:
7032   case X86::VRSQRT14SDZrm:
7033   case X86::VRSQRT14SSZrr:
7034   case X86::VRSQRT14SSZrm:
7035   case X86::VRSQRT28SDZr:
7036   case X86::VRSQRT28SDZrb:
7037   case X86::VRSQRT28SDZm:
7038   case X86::VRSQRT28SSZr:
7039   case X86::VRSQRT28SSZrb:
7040   case X86::VRSQRT28SSZm:
7041   case X86::VSQRTSSZr:
7042   case X86::VSQRTSSZr_Int:
7043   case X86::VSQRTSSZrb_Int:
7044   case X86::VSQRTSSZm:
7045   case X86::VSQRTSSZm_Int:
7046   case X86::VSQRTSDZr:
7047   case X86::VSQRTSDZr_Int:
7048   case X86::VSQRTSDZrb_Int:
7049   case X86::VSQRTSDZm:
7050   case X86::VSQRTSDZm_Int:
7051   case X86::VCVTSD2SHZrr:
7052   case X86::VCVTSD2SHZrr_Int:
7053   case X86::VCVTSD2SHZrrb_Int:
7054   case X86::VCVTSD2SHZrm:
7055   case X86::VCVTSD2SHZrm_Int:
7056   case X86::VCVTSS2SHZrr:
7057   case X86::VCVTSS2SHZrr_Int:
7058   case X86::VCVTSS2SHZrrb_Int:
7059   case X86::VCVTSS2SHZrm:
7060   case X86::VCVTSS2SHZrm_Int:
7061   case X86::VCVTSH2SDZrr:
7062   case X86::VCVTSH2SDZrr_Int:
7063   case X86::VCVTSH2SDZrrb_Int:
7064   case X86::VCVTSH2SDZrm:
7065   case X86::VCVTSH2SDZrm_Int:
7066   case X86::VCVTSH2SSZrr:
7067   case X86::VCVTSH2SSZrr_Int:
7068   case X86::VCVTSH2SSZrrb_Int:
7069   case X86::VCVTSH2SSZrm:
7070   case X86::VCVTSH2SSZrm_Int:
7071     return OpNum == 1;
7072   case X86::VMOVSSZrrk:
7073   case X86::VMOVSDZrrk:
7074     return OpNum == 3 && !ForLoadFold;
7075   case X86::VMOVSSZrrkz:
7076   case X86::VMOVSDZrrkz:
7077     return OpNum == 2 && !ForLoadFold;
7078   }
7079 
7080   return false;
7081 }
7082 
7083 /// Inform the BreakFalseDeps pass how many idle instructions we would like
7084 /// before certain undef register reads.
7085 ///
7086 /// This catches the VCVTSI2SD family of instructions:
7087 ///
7088 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
7089 ///
7090 /// We should to be careful *not* to catch VXOR idioms which are presumably
7091 /// handled specially in the pipeline:
7092 ///
7093 /// vxorps undef %xmm1, undef %xmm1, %xmm1
7094 ///
7095 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
7096 /// high bits that are passed-through are not live.
7097 unsigned
getUndefRegClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const7098 X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
7099                                    const TargetRegisterInfo *TRI) const {
7100   const MachineOperand &MO = MI.getOperand(OpNum);
7101   if (MO.getReg().isPhysical() && hasUndefRegUpdate(MI.getOpcode(), OpNum))
7102     return UndefRegClearance;
7103 
7104   return 0;
7105 }
7106 
breakPartialRegDependency(MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const7107 void X86InstrInfo::breakPartialRegDependency(
7108     MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
7109   Register Reg = MI.getOperand(OpNum).getReg();
7110   // If MI kills this register, the false dependence is already broken.
7111   if (MI.killsRegister(Reg, TRI))
7112     return;
7113 
7114   if (X86::VR128RegClass.contains(Reg)) {
7115     // These instructions are all floating point domain, so xorps is the best
7116     // choice.
7117     unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
7118     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
7119         .addReg(Reg, RegState::Undef)
7120         .addReg(Reg, RegState::Undef);
7121     MI.addRegisterKilled(Reg, TRI, true);
7122   } else if (X86::VR256RegClass.contains(Reg)) {
7123     // Use vxorps to clear the full ymm register.
7124     // It wants to read and write the xmm sub-register.
7125     Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
7126     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
7127         .addReg(XReg, RegState::Undef)
7128         .addReg(XReg, RegState::Undef)
7129         .addReg(Reg, RegState::ImplicitDefine);
7130     MI.addRegisterKilled(Reg, TRI, true);
7131   } else if (X86::VR128XRegClass.contains(Reg)) {
7132     // Only handle VLX targets.
7133     if (!Subtarget.hasVLX())
7134       return;
7135     // Since vxorps requires AVX512DQ, vpxord should be the best choice.
7136     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VPXORDZ128rr), Reg)
7137         .addReg(Reg, RegState::Undef)
7138         .addReg(Reg, RegState::Undef);
7139     MI.addRegisterKilled(Reg, TRI, true);
7140   } else if (X86::VR256XRegClass.contains(Reg) ||
7141              X86::VR512RegClass.contains(Reg)) {
7142     // Only handle VLX targets.
7143     if (!Subtarget.hasVLX())
7144       return;
7145     // Use vpxord to clear the full ymm/zmm register.
7146     // It wants to read and write the xmm sub-register.
7147     Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
7148     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VPXORDZ128rr), XReg)
7149         .addReg(XReg, RegState::Undef)
7150         .addReg(XReg, RegState::Undef)
7151         .addReg(Reg, RegState::ImplicitDefine);
7152     MI.addRegisterKilled(Reg, TRI, true);
7153   } else if (X86::GR64RegClass.contains(Reg)) {
7154     // Using XOR32rr because it has shorter encoding and zeros up the upper bits
7155     // as well.
7156     Register XReg = TRI->getSubReg(Reg, X86::sub_32bit);
7157     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
7158         .addReg(XReg, RegState::Undef)
7159         .addReg(XReg, RegState::Undef)
7160         .addReg(Reg, RegState::ImplicitDefine);
7161     MI.addRegisterKilled(Reg, TRI, true);
7162   } else if (X86::GR32RegClass.contains(Reg)) {
7163     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
7164         .addReg(Reg, RegState::Undef)
7165         .addReg(Reg, RegState::Undef);
7166     MI.addRegisterKilled(Reg, TRI, true);
7167   } else if ((X86::GR16RegClass.contains(Reg) ||
7168               X86::GR8RegClass.contains(Reg)) &&
7169              X86II::hasNewDataDest(MI.getDesc().TSFlags)) {
7170     // This case is only expected for NDD ops which appear to be partial
7171     // writes, but are not due to the zeroing of the upper part. Here
7172     // we add an implicit def of the superegister, which prevents
7173     // CompressEVEX from converting this to a legacy form.
7174     Register SuperReg = getX86SubSuperRegister(Reg, 64);
7175     MachineInstrBuilder BuildMI(*MI.getParent()->getParent(), &MI);
7176     if (!MI.definesRegister(SuperReg, /*TRI=*/nullptr))
7177       BuildMI.addReg(SuperReg, RegState::ImplicitDefine);
7178   }
7179 }
7180 
addOperands(MachineInstrBuilder & MIB,ArrayRef<MachineOperand> MOs,int PtrOffset=0)7181 static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
7182                         int PtrOffset = 0) {
7183   unsigned NumAddrOps = MOs.size();
7184 
7185   if (NumAddrOps < 4) {
7186     // FrameIndex only - add an immediate offset (whether its zero or not).
7187     for (unsigned i = 0; i != NumAddrOps; ++i)
7188       MIB.add(MOs[i]);
7189     addOffset(MIB, PtrOffset);
7190   } else {
7191     // General Memory Addressing - we need to add any offset to an existing
7192     // offset.
7193     assert(MOs.size() == 5 && "Unexpected memory operand list length");
7194     for (unsigned i = 0; i != NumAddrOps; ++i) {
7195       const MachineOperand &MO = MOs[i];
7196       if (i == 3 && PtrOffset != 0) {
7197         MIB.addDisp(MO, PtrOffset);
7198       } else {
7199         MIB.add(MO);
7200       }
7201     }
7202   }
7203 }
7204 
updateOperandRegConstraints(MachineFunction & MF,MachineInstr & NewMI,const TargetInstrInfo & TII)7205 static void updateOperandRegConstraints(MachineFunction &MF,
7206                                         MachineInstr &NewMI,
7207                                         const TargetInstrInfo &TII) {
7208   MachineRegisterInfo &MRI = MF.getRegInfo();
7209   const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
7210 
7211   for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
7212     MachineOperand &MO = NewMI.getOperand(Idx);
7213     // We only need to update constraints on virtual register operands.
7214     if (!MO.isReg())
7215       continue;
7216     Register Reg = MO.getReg();
7217     if (!Reg.isVirtual())
7218       continue;
7219 
7220     auto *NewRC = MRI.constrainRegClass(
7221         Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
7222     if (!NewRC) {
7223       LLVM_DEBUG(
7224           dbgs() << "WARNING: Unable to update register constraint for operand "
7225                  << Idx << " of instruction:\n";
7226           NewMI.dump(); dbgs() << "\n");
7227     }
7228   }
7229 }
7230 
fuseTwoAddrInst(MachineFunction & MF,unsigned Opcode,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI,const TargetInstrInfo & TII)7231 static MachineInstr *fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
7232                                      ArrayRef<MachineOperand> MOs,
7233                                      MachineBasicBlock::iterator InsertPt,
7234                                      MachineInstr &MI,
7235                                      const TargetInstrInfo &TII) {
7236   // Create the base instruction with the memory operand as the first part.
7237   // Omit the implicit operands, something BuildMI can't do.
7238   MachineInstr *NewMI =
7239       MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
7240   MachineInstrBuilder MIB(MF, NewMI);
7241   addOperands(MIB, MOs);
7242 
7243   // Loop over the rest of the ri operands, converting them over.
7244   unsigned NumOps = MI.getDesc().getNumOperands() - 2;
7245   for (unsigned i = 0; i != NumOps; ++i) {
7246     MachineOperand &MO = MI.getOperand(i + 2);
7247     MIB.add(MO);
7248   }
7249   for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), NumOps + 2))
7250     MIB.add(MO);
7251 
7252   updateOperandRegConstraints(MF, *NewMI, TII);
7253 
7254   MachineBasicBlock *MBB = InsertPt->getParent();
7255   MBB->insert(InsertPt, NewMI);
7256 
7257   return MIB;
7258 }
7259 
fuseInst(MachineFunction & MF,unsigned Opcode,unsigned OpNo,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI,const TargetInstrInfo & TII,int PtrOffset=0)7260 static MachineInstr *fuseInst(MachineFunction &MF, unsigned Opcode,
7261                               unsigned OpNo, ArrayRef<MachineOperand> MOs,
7262                               MachineBasicBlock::iterator InsertPt,
7263                               MachineInstr &MI, const TargetInstrInfo &TII,
7264                               int PtrOffset = 0) {
7265   // Omit the implicit operands, something BuildMI can't do.
7266   MachineInstr *NewMI =
7267       MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
7268   MachineInstrBuilder MIB(MF, NewMI);
7269 
7270   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
7271     MachineOperand &MO = MI.getOperand(i);
7272     if (i == OpNo) {
7273       assert(MO.isReg() && "Expected to fold into reg operand!");
7274       addOperands(MIB, MOs, PtrOffset);
7275     } else {
7276       MIB.add(MO);
7277     }
7278   }
7279 
7280   updateOperandRegConstraints(MF, *NewMI, TII);
7281 
7282   // Copy the NoFPExcept flag from the instruction we're fusing.
7283   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
7284     NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept);
7285 
7286   MachineBasicBlock *MBB = InsertPt->getParent();
7287   MBB->insert(InsertPt, NewMI);
7288 
7289   return MIB;
7290 }
7291 
makeM0Inst(const TargetInstrInfo & TII,unsigned Opcode,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI)7292 static MachineInstr *makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
7293                                 ArrayRef<MachineOperand> MOs,
7294                                 MachineBasicBlock::iterator InsertPt,
7295                                 MachineInstr &MI) {
7296   MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
7297                                     MI.getDebugLoc(), TII.get(Opcode));
7298   addOperands(MIB, MOs);
7299   return MIB.addImm(0);
7300 }
7301 
foldMemoryOperandCustom(MachineFunction & MF,MachineInstr & MI,unsigned OpNum,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,unsigned Size,Align Alignment) const7302 MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
7303     MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
7304     ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
7305     unsigned Size, Align Alignment) const {
7306   switch (MI.getOpcode()) {
7307   case X86::INSERTPSrri:
7308   case X86::VINSERTPSrri:
7309   case X86::VINSERTPSZrri:
7310     // Attempt to convert the load of inserted vector into a fold load
7311     // of a single float.
7312     if (OpNum == 2) {
7313       unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
7314       unsigned ZMask = Imm & 15;
7315       unsigned DstIdx = (Imm >> 4) & 3;
7316       unsigned SrcIdx = (Imm >> 6) & 3;
7317 
7318       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7319       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
7320       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7321       if ((Size == 0 || Size >= 16) && RCSize >= 16 &&
7322           (MI.getOpcode() != X86::INSERTPSrri || Alignment >= Align(4))) {
7323         int PtrOffset = SrcIdx * 4;
7324         unsigned NewImm = (DstIdx << 4) | ZMask;
7325         unsigned NewOpCode =
7326             (MI.getOpcode() == X86::VINSERTPSZrri)  ? X86::VINSERTPSZrmi
7327             : (MI.getOpcode() == X86::VINSERTPSrri) ? X86::VINSERTPSrmi
7328                                                     : X86::INSERTPSrmi;
7329         MachineInstr *NewMI =
7330             fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
7331         NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
7332         return NewMI;
7333       }
7334     }
7335     break;
7336   case X86::MOVHLPSrr:
7337   case X86::VMOVHLPSrr:
7338   case X86::VMOVHLPSZrr:
7339     // Move the upper 64-bits of the second operand to the lower 64-bits.
7340     // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
7341     // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
7342     if (OpNum == 2) {
7343       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7344       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
7345       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7346       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
7347         unsigned NewOpCode =
7348             (MI.getOpcode() == X86::VMOVHLPSZrr)  ? X86::VMOVLPSZ128rm
7349             : (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm
7350                                                   : X86::MOVLPSrm;
7351         MachineInstr *NewMI =
7352             fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
7353         return NewMI;
7354       }
7355     }
7356     break;
7357   case X86::UNPCKLPDrr:
7358     // If we won't be able to fold this to the memory form of UNPCKL, use
7359     // MOVHPD instead. Done as custom because we can't have this in the load
7360     // table twice.
7361     if (OpNum == 2) {
7362       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7363       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
7364       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7365       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
7366         MachineInstr *NewMI =
7367             fuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
7368         return NewMI;
7369       }
7370     }
7371     break;
7372   case X86::MOV32r0:
7373     if (auto *NewMI =
7374             makeM0Inst(*this, (Size == 4) ? X86::MOV32mi : X86::MOV64mi32, MOs,
7375                        InsertPt, MI))
7376       return NewMI;
7377     break;
7378   }
7379 
7380   return nullptr;
7381 }
7382 
shouldPreventUndefRegUpdateMemFold(MachineFunction & MF,MachineInstr & MI)7383 static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
7384                                                MachineInstr &MI) {
7385   if (!hasUndefRegUpdate(MI.getOpcode(), 1, /*ForLoadFold*/ true) ||
7386       !MI.getOperand(1).isReg())
7387     return false;
7388 
7389   // The are two cases we need to handle depending on where in the pipeline
7390   // the folding attempt is being made.
7391   // -Register has the undef flag set.
7392   // -Register is produced by the IMPLICIT_DEF instruction.
7393 
7394   if (MI.getOperand(1).isUndef())
7395     return true;
7396 
7397   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7398   MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
7399   return VRegDef && VRegDef->isImplicitDef();
7400 }
7401 
commuteOperandsForFold(MachineInstr & MI,unsigned Idx1) const7402 unsigned X86InstrInfo::commuteOperandsForFold(MachineInstr &MI,
7403                                               unsigned Idx1) const {
7404   unsigned Idx2 = CommuteAnyOperandIndex;
7405   if (!findCommutedOpIndices(MI, Idx1, Idx2))
7406     return Idx1;
7407 
7408   bool HasDef = MI.getDesc().getNumDefs();
7409   Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
7410   Register Reg1 = MI.getOperand(Idx1).getReg();
7411   Register Reg2 = MI.getOperand(Idx2).getReg();
7412   bool Tied1 = 0 == MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO);
7413   bool Tied2 = 0 == MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO);
7414 
7415   // If either of the commutable operands are tied to the destination
7416   // then we can not commute + fold.
7417   if ((HasDef && Reg0 == Reg1 && Tied1) || (HasDef && Reg0 == Reg2 && Tied2))
7418     return Idx1;
7419 
7420   return commuteInstruction(MI, false, Idx1, Idx2) ? Idx2 : Idx1;
7421 }
7422 
printFailMsgforFold(const MachineInstr & MI,unsigned Idx)7423 static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx) {
7424   if (PrintFailedFusing && !MI.isCopy())
7425     dbgs() << "We failed to fuse operand " << Idx << " in " << MI;
7426 }
7427 
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,unsigned OpNum,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,unsigned Size,Align Alignment,bool AllowCommute) const7428 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
7429     MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
7430     ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
7431     unsigned Size, Align Alignment, bool AllowCommute) const {
7432   bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
7433   unsigned Opc = MI.getOpcode();
7434 
7435   // For CPUs that favor the register form of a call or push,
7436   // do not fold loads into calls or pushes, unless optimizing for size
7437   // aggressively.
7438   if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
7439       (Opc == X86::CALL32r || Opc == X86::CALL64r ||
7440        Opc == X86::CALL64r_ImpCall || Opc == X86::PUSH16r ||
7441        Opc == X86::PUSH32r || Opc == X86::PUSH64r))
7442     return nullptr;
7443 
7444   // Avoid partial and undef register update stalls unless optimizing for size.
7445   if (!MF.getFunction().hasOptSize() &&
7446       (hasPartialRegUpdate(Opc, Subtarget, /*ForLoadFold*/ true) ||
7447        shouldPreventUndefRegUpdateMemFold(MF, MI)))
7448     return nullptr;
7449 
7450   unsigned NumOps = MI.getDesc().getNumOperands();
7451   bool IsTwoAddr = NumOps > 1 && OpNum < 2 && MI.getOperand(0).isReg() &&
7452                    MI.getOperand(1).isReg() &&
7453                    MI.getOperand(0).getReg() == MI.getOperand(1).getReg();
7454 
7455   // FIXME: AsmPrinter doesn't know how to handle
7456   // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
7457   if (Opc == X86::ADD32ri &&
7458       MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
7459     return nullptr;
7460 
7461   // GOTTPOFF relocation loads can only be folded into add instructions.
7462   // FIXME: Need to exclude other relocations that only support specific
7463   // instructions.
7464   if (MOs.size() == X86::AddrNumOperands &&
7465       MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
7466       Opc != X86::ADD64rr)
7467     return nullptr;
7468 
7469   // Don't fold loads into indirect calls that need a KCFI check as we'll
7470   // have to unfold these in X86TargetLowering::EmitKCFICheck anyway.
7471   if (MI.isCall() && MI.getCFIType())
7472     return nullptr;
7473 
7474   // Attempt to fold any custom cases we have.
7475   if (auto *CustomMI = foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt,
7476                                                Size, Alignment))
7477     return CustomMI;
7478 
7479   // Folding a memory location into the two-address part of a two-address
7480   // instruction is different than folding it other places.  It requires
7481   // replacing the *two* registers with the memory location.
7482   //
7483   // Utilize the mapping NonNDD -> RMW for the NDD variant.
7484   unsigned NonNDOpc = Subtarget.hasNDD() ? X86::getNonNDVariant(Opc) : 0U;
7485   const X86FoldTableEntry *I =
7486       IsTwoAddr ? lookupTwoAddrFoldTable(NonNDOpc ? NonNDOpc : Opc)
7487                 : lookupFoldTable(Opc, OpNum);
7488 
7489   MachineInstr *NewMI = nullptr;
7490   if (I) {
7491     unsigned Opcode = I->DstOp;
7492     if (Alignment <
7493         Align(1ULL << ((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT)))
7494       return nullptr;
7495     bool NarrowToMOV32rm = false;
7496     if (Size) {
7497       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7498       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
7499       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7500       // Check if it's safe to fold the load. If the size of the object is
7501       // narrower than the load width, then it's not.
7502       // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
7503       if ((I->Flags & TB_FOLDED_LOAD) && Size < RCSize) {
7504         // If this is a 64-bit load, but the spill slot is 32, then we can do
7505         // a 32-bit load which is implicitly zero-extended. This likely is
7506         // due to live interval analysis remat'ing a load from stack slot.
7507         if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
7508           return nullptr;
7509         if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
7510           return nullptr;
7511         Opcode = X86::MOV32rm;
7512         NarrowToMOV32rm = true;
7513       }
7514       // For stores, make sure the size of the object is equal to the size of
7515       // the store. If the object is larger, the extra bits would be garbage. If
7516       // the object is smaller we might overwrite another object or fault.
7517       if ((I->Flags & TB_FOLDED_STORE) && Size != RCSize)
7518         return nullptr;
7519     }
7520 
7521     NewMI = IsTwoAddr ? fuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this)
7522                       : fuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
7523 
7524     if (NarrowToMOV32rm) {
7525       // If this is the special case where we use a MOV32rm to load a 32-bit
7526       // value and zero-extend the top bits. Change the destination register
7527       // to a 32-bit one.
7528       Register DstReg = NewMI->getOperand(0).getReg();
7529       if (DstReg.isPhysical())
7530         NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
7531       else
7532         NewMI->getOperand(0).setSubReg(X86::sub_32bit);
7533     }
7534     return NewMI;
7535   }
7536 
7537   if (AllowCommute) {
7538     // If the instruction and target operand are commutable, commute the
7539     // instruction and try again.
7540     unsigned CommuteOpIdx2 = commuteOperandsForFold(MI, OpNum);
7541     if (CommuteOpIdx2 == OpNum) {
7542       printFailMsgforFold(MI, OpNum);
7543       return nullptr;
7544     }
7545     // Attempt to fold with the commuted version of the instruction.
7546     NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size,
7547                                   Alignment, /*AllowCommute=*/false);
7548     if (NewMI)
7549       return NewMI;
7550     // Folding failed again - undo the commute before returning.
7551     commuteInstruction(MI, false, OpNum, CommuteOpIdx2);
7552   }
7553 
7554   printFailMsgforFold(MI, OpNum);
7555   return nullptr;
7556 }
7557 
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,int FrameIndex,LiveIntervals * LIS,VirtRegMap * VRM) const7558 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
7559     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
7560     MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
7561     VirtRegMap *VRM) const {
7562   // Check switch flag
7563   if (NoFusing)
7564     return nullptr;
7565 
7566   // Avoid partial and undef register update stalls unless optimizing for size.
7567   if (!MF.getFunction().hasOptSize() &&
7568       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/ true) ||
7569        shouldPreventUndefRegUpdateMemFold(MF, MI)))
7570     return nullptr;
7571 
7572   // Don't fold subreg spills, or reloads that use a high subreg.
7573   for (auto Op : Ops) {
7574     MachineOperand &MO = MI.getOperand(Op);
7575     auto SubReg = MO.getSubReg();
7576     // MOV32r0 is special b/c it's used to clear a 64-bit register too.
7577     // (See patterns for MOV32r0 in TD files).
7578     if (MI.getOpcode() == X86::MOV32r0 && SubReg == X86::sub_32bit)
7579       continue;
7580     if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
7581       return nullptr;
7582   }
7583 
7584   const MachineFrameInfo &MFI = MF.getFrameInfo();
7585   unsigned Size = MFI.getObjectSize(FrameIndex);
7586   Align Alignment = MFI.getObjectAlign(FrameIndex);
7587   // If the function stack isn't realigned we don't want to fold instructions
7588   // that need increased alignment.
7589   if (!RI.hasStackRealignment(MF))
7590     Alignment =
7591         std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
7592 
7593   auto Impl = [&]() {
7594     return foldMemoryOperandImpl(MF, MI, Ops[0],
7595                                  MachineOperand::CreateFI(FrameIndex), InsertPt,
7596                                  Size, Alignment, /*AllowCommute=*/true);
7597   };
7598   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
7599     unsigned NewOpc = 0;
7600     unsigned RCSize = 0;
7601     unsigned Opc = MI.getOpcode();
7602     switch (Opc) {
7603     default:
7604       // NDD can be folded into RMW though its Op0 and Op1 are not tied.
7605       return (Subtarget.hasNDD() ? X86::getNonNDVariant(Opc) : 0U) ? Impl()
7606                                                                    : nullptr;
7607     case X86::TEST8rr:
7608       NewOpc = X86::CMP8ri;
7609       RCSize = 1;
7610       break;
7611     case X86::TEST16rr:
7612       NewOpc = X86::CMP16ri;
7613       RCSize = 2;
7614       break;
7615     case X86::TEST32rr:
7616       NewOpc = X86::CMP32ri;
7617       RCSize = 4;
7618       break;
7619     case X86::TEST64rr:
7620       NewOpc = X86::CMP64ri32;
7621       RCSize = 8;
7622       break;
7623     }
7624     // Check if it's safe to fold the load. If the size of the object is
7625     // narrower than the load width, then it's not.
7626     if (Size < RCSize)
7627       return nullptr;
7628     // Change to CMPXXri r, 0 first.
7629     MI.setDesc(get(NewOpc));
7630     MI.getOperand(1).ChangeToImmediate(0);
7631   } else if (Ops.size() != 1)
7632     return nullptr;
7633 
7634   return Impl();
7635 }
7636 
7637 /// Check if \p LoadMI is a partial register load that we can't fold into \p MI
7638 /// because the latter uses contents that wouldn't be defined in the folded
7639 /// version.  For instance, this transformation isn't legal:
7640 ///   movss (%rdi), %xmm0
7641 ///   addps %xmm0, %xmm0
7642 /// ->
7643 ///   addps (%rdi), %xmm0
7644 ///
7645 /// But this one is:
7646 ///   movss (%rdi), %xmm0
7647 ///   addss %xmm0, %xmm0
7648 /// ->
7649 ///   addss (%rdi), %xmm0
7650 ///
isNonFoldablePartialRegisterLoad(const MachineInstr & LoadMI,const MachineInstr & UserMI,const MachineFunction & MF)7651 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
7652                                              const MachineInstr &UserMI,
7653                                              const MachineFunction &MF) {
7654   unsigned Opc = LoadMI.getOpcode();
7655   unsigned UserOpc = UserMI.getOpcode();
7656   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7657   const TargetRegisterClass *RC =
7658       MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
7659   unsigned RegSize = TRI.getRegSizeInBits(*RC);
7660 
7661   if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
7662        Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
7663        Opc == X86::VMOVSSZrm_alt) &&
7664       RegSize > 32) {
7665     // These instructions only load 32 bits, we can't fold them if the
7666     // destination register is wider than 32 bits (4 bytes), and its user
7667     // instruction isn't scalar (SS).
7668     switch (UserOpc) {
7669     case X86::CVTSS2SDrr_Int:
7670     case X86::VCVTSS2SDrr_Int:
7671     case X86::VCVTSS2SDZrr_Int:
7672     case X86::VCVTSS2SDZrrk_Int:
7673     case X86::VCVTSS2SDZrrkz_Int:
7674     case X86::CVTSS2SIrr_Int:
7675     case X86::CVTSS2SI64rr_Int:
7676     case X86::VCVTSS2SIrr_Int:
7677     case X86::VCVTSS2SI64rr_Int:
7678     case X86::VCVTSS2SIZrr_Int:
7679     case X86::VCVTSS2SI64Zrr_Int:
7680     case X86::CVTTSS2SIrr_Int:
7681     case X86::CVTTSS2SI64rr_Int:
7682     case X86::VCVTTSS2SIrr_Int:
7683     case X86::VCVTTSS2SI64rr_Int:
7684     case X86::VCVTTSS2SIZrr_Int:
7685     case X86::VCVTTSS2SI64Zrr_Int:
7686     case X86::VCVTSS2USIZrr_Int:
7687     case X86::VCVTSS2USI64Zrr_Int:
7688     case X86::VCVTTSS2USIZrr_Int:
7689     case X86::VCVTTSS2USI64Zrr_Int:
7690     case X86::RCPSSr_Int:
7691     case X86::VRCPSSr_Int:
7692     case X86::RSQRTSSr_Int:
7693     case X86::VRSQRTSSr_Int:
7694     case X86::ROUNDSSri_Int:
7695     case X86::VROUNDSSri_Int:
7696     case X86::COMISSrr_Int:
7697     case X86::VCOMISSrr_Int:
7698     case X86::VCOMISSZrr_Int:
7699     case X86::UCOMISSrr_Int:
7700     case X86::VUCOMISSrr_Int:
7701     case X86::VUCOMISSZrr_Int:
7702     case X86::ADDSSrr_Int:
7703     case X86::VADDSSrr_Int:
7704     case X86::VADDSSZrr_Int:
7705     case X86::CMPSSrri_Int:
7706     case X86::VCMPSSrri_Int:
7707     case X86::VCMPSSZrri_Int:
7708     case X86::DIVSSrr_Int:
7709     case X86::VDIVSSrr_Int:
7710     case X86::VDIVSSZrr_Int:
7711     case X86::MAXSSrr_Int:
7712     case X86::VMAXSSrr_Int:
7713     case X86::VMAXSSZrr_Int:
7714     case X86::MINSSrr_Int:
7715     case X86::VMINSSrr_Int:
7716     case X86::VMINSSZrr_Int:
7717     case X86::MULSSrr_Int:
7718     case X86::VMULSSrr_Int:
7719     case X86::VMULSSZrr_Int:
7720     case X86::SQRTSSr_Int:
7721     case X86::VSQRTSSr_Int:
7722     case X86::VSQRTSSZr_Int:
7723     case X86::SUBSSrr_Int:
7724     case X86::VSUBSSrr_Int:
7725     case X86::VSUBSSZrr_Int:
7726     case X86::VADDSSZrrk_Int:
7727     case X86::VADDSSZrrkz_Int:
7728     case X86::VCMPSSZrrik_Int:
7729     case X86::VDIVSSZrrk_Int:
7730     case X86::VDIVSSZrrkz_Int:
7731     case X86::VMAXSSZrrk_Int:
7732     case X86::VMAXSSZrrkz_Int:
7733     case X86::VMINSSZrrk_Int:
7734     case X86::VMINSSZrrkz_Int:
7735     case X86::VMULSSZrrk_Int:
7736     case X86::VMULSSZrrkz_Int:
7737     case X86::VSQRTSSZrk_Int:
7738     case X86::VSQRTSSZrkz_Int:
7739     case X86::VSUBSSZrrk_Int:
7740     case X86::VSUBSSZrrkz_Int:
7741     case X86::VFMADDSS4rr_Int:
7742     case X86::VFNMADDSS4rr_Int:
7743     case X86::VFMSUBSS4rr_Int:
7744     case X86::VFNMSUBSS4rr_Int:
7745     case X86::VFMADD132SSr_Int:
7746     case X86::VFNMADD132SSr_Int:
7747     case X86::VFMADD213SSr_Int:
7748     case X86::VFNMADD213SSr_Int:
7749     case X86::VFMADD231SSr_Int:
7750     case X86::VFNMADD231SSr_Int:
7751     case X86::VFMSUB132SSr_Int:
7752     case X86::VFNMSUB132SSr_Int:
7753     case X86::VFMSUB213SSr_Int:
7754     case X86::VFNMSUB213SSr_Int:
7755     case X86::VFMSUB231SSr_Int:
7756     case X86::VFNMSUB231SSr_Int:
7757     case X86::VFMADD132SSZr_Int:
7758     case X86::VFNMADD132SSZr_Int:
7759     case X86::VFMADD213SSZr_Int:
7760     case X86::VFNMADD213SSZr_Int:
7761     case X86::VFMADD231SSZr_Int:
7762     case X86::VFNMADD231SSZr_Int:
7763     case X86::VFMSUB132SSZr_Int:
7764     case X86::VFNMSUB132SSZr_Int:
7765     case X86::VFMSUB213SSZr_Int:
7766     case X86::VFNMSUB213SSZr_Int:
7767     case X86::VFMSUB231SSZr_Int:
7768     case X86::VFNMSUB231SSZr_Int:
7769     case X86::VFMADD132SSZrk_Int:
7770     case X86::VFNMADD132SSZrk_Int:
7771     case X86::VFMADD213SSZrk_Int:
7772     case X86::VFNMADD213SSZrk_Int:
7773     case X86::VFMADD231SSZrk_Int:
7774     case X86::VFNMADD231SSZrk_Int:
7775     case X86::VFMSUB132SSZrk_Int:
7776     case X86::VFNMSUB132SSZrk_Int:
7777     case X86::VFMSUB213SSZrk_Int:
7778     case X86::VFNMSUB213SSZrk_Int:
7779     case X86::VFMSUB231SSZrk_Int:
7780     case X86::VFNMSUB231SSZrk_Int:
7781     case X86::VFMADD132SSZrkz_Int:
7782     case X86::VFNMADD132SSZrkz_Int:
7783     case X86::VFMADD213SSZrkz_Int:
7784     case X86::VFNMADD213SSZrkz_Int:
7785     case X86::VFMADD231SSZrkz_Int:
7786     case X86::VFNMADD231SSZrkz_Int:
7787     case X86::VFMSUB132SSZrkz_Int:
7788     case X86::VFNMSUB132SSZrkz_Int:
7789     case X86::VFMSUB213SSZrkz_Int:
7790     case X86::VFNMSUB213SSZrkz_Int:
7791     case X86::VFMSUB231SSZrkz_Int:
7792     case X86::VFNMSUB231SSZrkz_Int:
7793     case X86::VFIXUPIMMSSZrri:
7794     case X86::VFIXUPIMMSSZrrik:
7795     case X86::VFIXUPIMMSSZrrikz:
7796     case X86::VFPCLASSSSZri:
7797     case X86::VFPCLASSSSZrik:
7798     case X86::VGETEXPSSZr:
7799     case X86::VGETEXPSSZrk:
7800     case X86::VGETEXPSSZrkz:
7801     case X86::VGETMANTSSZrri:
7802     case X86::VGETMANTSSZrrik:
7803     case X86::VGETMANTSSZrrikz:
7804     case X86::VRANGESSZrri:
7805     case X86::VRANGESSZrrik:
7806     case X86::VRANGESSZrrikz:
7807     case X86::VRCP14SSZrr:
7808     case X86::VRCP14SSZrrk:
7809     case X86::VRCP14SSZrrkz:
7810     case X86::VRCP28SSZr:
7811     case X86::VRCP28SSZrk:
7812     case X86::VRCP28SSZrkz:
7813     case X86::VREDUCESSZrri:
7814     case X86::VREDUCESSZrrik:
7815     case X86::VREDUCESSZrrikz:
7816     case X86::VRNDSCALESSZrri_Int:
7817     case X86::VRNDSCALESSZrrik_Int:
7818     case X86::VRNDSCALESSZrrikz_Int:
7819     case X86::VRSQRT14SSZrr:
7820     case X86::VRSQRT14SSZrrk:
7821     case X86::VRSQRT14SSZrrkz:
7822     case X86::VRSQRT28SSZr:
7823     case X86::VRSQRT28SSZrk:
7824     case X86::VRSQRT28SSZrkz:
7825     case X86::VSCALEFSSZrr:
7826     case X86::VSCALEFSSZrrk:
7827     case X86::VSCALEFSSZrrkz:
7828       return false;
7829     default:
7830       return true;
7831     }
7832   }
7833 
7834   if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
7835        Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
7836        Opc == X86::VMOVSDZrm_alt) &&
7837       RegSize > 64) {
7838     // These instructions only load 64 bits, we can't fold them if the
7839     // destination register is wider than 64 bits (8 bytes), and its user
7840     // instruction isn't scalar (SD).
7841     switch (UserOpc) {
7842     case X86::CVTSD2SSrr_Int:
7843     case X86::VCVTSD2SSrr_Int:
7844     case X86::VCVTSD2SSZrr_Int:
7845     case X86::VCVTSD2SSZrrk_Int:
7846     case X86::VCVTSD2SSZrrkz_Int:
7847     case X86::CVTSD2SIrr_Int:
7848     case X86::CVTSD2SI64rr_Int:
7849     case X86::VCVTSD2SIrr_Int:
7850     case X86::VCVTSD2SI64rr_Int:
7851     case X86::VCVTSD2SIZrr_Int:
7852     case X86::VCVTSD2SI64Zrr_Int:
7853     case X86::CVTTSD2SIrr_Int:
7854     case X86::CVTTSD2SI64rr_Int:
7855     case X86::VCVTTSD2SIrr_Int:
7856     case X86::VCVTTSD2SI64rr_Int:
7857     case X86::VCVTTSD2SIZrr_Int:
7858     case X86::VCVTTSD2SI64Zrr_Int:
7859     case X86::VCVTSD2USIZrr_Int:
7860     case X86::VCVTSD2USI64Zrr_Int:
7861     case X86::VCVTTSD2USIZrr_Int:
7862     case X86::VCVTTSD2USI64Zrr_Int:
7863     case X86::ROUNDSDri_Int:
7864     case X86::VROUNDSDri_Int:
7865     case X86::COMISDrr_Int:
7866     case X86::VCOMISDrr_Int:
7867     case X86::VCOMISDZrr_Int:
7868     case X86::UCOMISDrr_Int:
7869     case X86::VUCOMISDrr_Int:
7870     case X86::VUCOMISDZrr_Int:
7871     case X86::ADDSDrr_Int:
7872     case X86::VADDSDrr_Int:
7873     case X86::VADDSDZrr_Int:
7874     case X86::CMPSDrri_Int:
7875     case X86::VCMPSDrri_Int:
7876     case X86::VCMPSDZrri_Int:
7877     case X86::DIVSDrr_Int:
7878     case X86::VDIVSDrr_Int:
7879     case X86::VDIVSDZrr_Int:
7880     case X86::MAXSDrr_Int:
7881     case X86::VMAXSDrr_Int:
7882     case X86::VMAXSDZrr_Int:
7883     case X86::MINSDrr_Int:
7884     case X86::VMINSDrr_Int:
7885     case X86::VMINSDZrr_Int:
7886     case X86::MULSDrr_Int:
7887     case X86::VMULSDrr_Int:
7888     case X86::VMULSDZrr_Int:
7889     case X86::SQRTSDr_Int:
7890     case X86::VSQRTSDr_Int:
7891     case X86::VSQRTSDZr_Int:
7892     case X86::SUBSDrr_Int:
7893     case X86::VSUBSDrr_Int:
7894     case X86::VSUBSDZrr_Int:
7895     case X86::VADDSDZrrk_Int:
7896     case X86::VADDSDZrrkz_Int:
7897     case X86::VCMPSDZrrik_Int:
7898     case X86::VDIVSDZrrk_Int:
7899     case X86::VDIVSDZrrkz_Int:
7900     case X86::VMAXSDZrrk_Int:
7901     case X86::VMAXSDZrrkz_Int:
7902     case X86::VMINSDZrrk_Int:
7903     case X86::VMINSDZrrkz_Int:
7904     case X86::VMULSDZrrk_Int:
7905     case X86::VMULSDZrrkz_Int:
7906     case X86::VSQRTSDZrk_Int:
7907     case X86::VSQRTSDZrkz_Int:
7908     case X86::VSUBSDZrrk_Int:
7909     case X86::VSUBSDZrrkz_Int:
7910     case X86::VFMADDSD4rr_Int:
7911     case X86::VFNMADDSD4rr_Int:
7912     case X86::VFMSUBSD4rr_Int:
7913     case X86::VFNMSUBSD4rr_Int:
7914     case X86::VFMADD132SDr_Int:
7915     case X86::VFNMADD132SDr_Int:
7916     case X86::VFMADD213SDr_Int:
7917     case X86::VFNMADD213SDr_Int:
7918     case X86::VFMADD231SDr_Int:
7919     case X86::VFNMADD231SDr_Int:
7920     case X86::VFMSUB132SDr_Int:
7921     case X86::VFNMSUB132SDr_Int:
7922     case X86::VFMSUB213SDr_Int:
7923     case X86::VFNMSUB213SDr_Int:
7924     case X86::VFMSUB231SDr_Int:
7925     case X86::VFNMSUB231SDr_Int:
7926     case X86::VFMADD132SDZr_Int:
7927     case X86::VFNMADD132SDZr_Int:
7928     case X86::VFMADD213SDZr_Int:
7929     case X86::VFNMADD213SDZr_Int:
7930     case X86::VFMADD231SDZr_Int:
7931     case X86::VFNMADD231SDZr_Int:
7932     case X86::VFMSUB132SDZr_Int:
7933     case X86::VFNMSUB132SDZr_Int:
7934     case X86::VFMSUB213SDZr_Int:
7935     case X86::VFNMSUB213SDZr_Int:
7936     case X86::VFMSUB231SDZr_Int:
7937     case X86::VFNMSUB231SDZr_Int:
7938     case X86::VFMADD132SDZrk_Int:
7939     case X86::VFNMADD132SDZrk_Int:
7940     case X86::VFMADD213SDZrk_Int:
7941     case X86::VFNMADD213SDZrk_Int:
7942     case X86::VFMADD231SDZrk_Int:
7943     case X86::VFNMADD231SDZrk_Int:
7944     case X86::VFMSUB132SDZrk_Int:
7945     case X86::VFNMSUB132SDZrk_Int:
7946     case X86::VFMSUB213SDZrk_Int:
7947     case X86::VFNMSUB213SDZrk_Int:
7948     case X86::VFMSUB231SDZrk_Int:
7949     case X86::VFNMSUB231SDZrk_Int:
7950     case X86::VFMADD132SDZrkz_Int:
7951     case X86::VFNMADD132SDZrkz_Int:
7952     case X86::VFMADD213SDZrkz_Int:
7953     case X86::VFNMADD213SDZrkz_Int:
7954     case X86::VFMADD231SDZrkz_Int:
7955     case X86::VFNMADD231SDZrkz_Int:
7956     case X86::VFMSUB132SDZrkz_Int:
7957     case X86::VFNMSUB132SDZrkz_Int:
7958     case X86::VFMSUB213SDZrkz_Int:
7959     case X86::VFNMSUB213SDZrkz_Int:
7960     case X86::VFMSUB231SDZrkz_Int:
7961     case X86::VFNMSUB231SDZrkz_Int:
7962     case X86::VFIXUPIMMSDZrri:
7963     case X86::VFIXUPIMMSDZrrik:
7964     case X86::VFIXUPIMMSDZrrikz:
7965     case X86::VFPCLASSSDZri:
7966     case X86::VFPCLASSSDZrik:
7967     case X86::VGETEXPSDZr:
7968     case X86::VGETEXPSDZrk:
7969     case X86::VGETEXPSDZrkz:
7970     case X86::VGETMANTSDZrri:
7971     case X86::VGETMANTSDZrrik:
7972     case X86::VGETMANTSDZrrikz:
7973     case X86::VRANGESDZrri:
7974     case X86::VRANGESDZrrik:
7975     case X86::VRANGESDZrrikz:
7976     case X86::VRCP14SDZrr:
7977     case X86::VRCP14SDZrrk:
7978     case X86::VRCP14SDZrrkz:
7979     case X86::VRCP28SDZr:
7980     case X86::VRCP28SDZrk:
7981     case X86::VRCP28SDZrkz:
7982     case X86::VREDUCESDZrri:
7983     case X86::VREDUCESDZrrik:
7984     case X86::VREDUCESDZrrikz:
7985     case X86::VRNDSCALESDZrri_Int:
7986     case X86::VRNDSCALESDZrrik_Int:
7987     case X86::VRNDSCALESDZrrikz_Int:
7988     case X86::VRSQRT14SDZrr:
7989     case X86::VRSQRT14SDZrrk:
7990     case X86::VRSQRT14SDZrrkz:
7991     case X86::VRSQRT28SDZr:
7992     case X86::VRSQRT28SDZrk:
7993     case X86::VRSQRT28SDZrkz:
7994     case X86::VSCALEFSDZrr:
7995     case X86::VSCALEFSDZrrk:
7996     case X86::VSCALEFSDZrrkz:
7997       return false;
7998     default:
7999       return true;
8000     }
8001   }
8002 
8003   if ((Opc == X86::VMOVSHZrm || Opc == X86::VMOVSHZrm_alt) && RegSize > 16) {
8004     // These instructions only load 16 bits, we can't fold them if the
8005     // destination register is wider than 16 bits (2 bytes), and its user
8006     // instruction isn't scalar (SH).
8007     switch (UserOpc) {
8008     case X86::VADDSHZrr_Int:
8009     case X86::VCMPSHZrri_Int:
8010     case X86::VDIVSHZrr_Int:
8011     case X86::VMAXSHZrr_Int:
8012     case X86::VMINSHZrr_Int:
8013     case X86::VMULSHZrr_Int:
8014     case X86::VSUBSHZrr_Int:
8015     case X86::VADDSHZrrk_Int:
8016     case X86::VADDSHZrrkz_Int:
8017     case X86::VCMPSHZrrik_Int:
8018     case X86::VDIVSHZrrk_Int:
8019     case X86::VDIVSHZrrkz_Int:
8020     case X86::VMAXSHZrrk_Int:
8021     case X86::VMAXSHZrrkz_Int:
8022     case X86::VMINSHZrrk_Int:
8023     case X86::VMINSHZrrkz_Int:
8024     case X86::VMULSHZrrk_Int:
8025     case X86::VMULSHZrrkz_Int:
8026     case X86::VSUBSHZrrk_Int:
8027     case X86::VSUBSHZrrkz_Int:
8028     case X86::VFMADD132SHZr_Int:
8029     case X86::VFNMADD132SHZr_Int:
8030     case X86::VFMADD213SHZr_Int:
8031     case X86::VFNMADD213SHZr_Int:
8032     case X86::VFMADD231SHZr_Int:
8033     case X86::VFNMADD231SHZr_Int:
8034     case X86::VFMSUB132SHZr_Int:
8035     case X86::VFNMSUB132SHZr_Int:
8036     case X86::VFMSUB213SHZr_Int:
8037     case X86::VFNMSUB213SHZr_Int:
8038     case X86::VFMSUB231SHZr_Int:
8039     case X86::VFNMSUB231SHZr_Int:
8040     case X86::VFMADD132SHZrk_Int:
8041     case X86::VFNMADD132SHZrk_Int:
8042     case X86::VFMADD213SHZrk_Int:
8043     case X86::VFNMADD213SHZrk_Int:
8044     case X86::VFMADD231SHZrk_Int:
8045     case X86::VFNMADD231SHZrk_Int:
8046     case X86::VFMSUB132SHZrk_Int:
8047     case X86::VFNMSUB132SHZrk_Int:
8048     case X86::VFMSUB213SHZrk_Int:
8049     case X86::VFNMSUB213SHZrk_Int:
8050     case X86::VFMSUB231SHZrk_Int:
8051     case X86::VFNMSUB231SHZrk_Int:
8052     case X86::VFMADD132SHZrkz_Int:
8053     case X86::VFNMADD132SHZrkz_Int:
8054     case X86::VFMADD213SHZrkz_Int:
8055     case X86::VFNMADD213SHZrkz_Int:
8056     case X86::VFMADD231SHZrkz_Int:
8057     case X86::VFNMADD231SHZrkz_Int:
8058     case X86::VFMSUB132SHZrkz_Int:
8059     case X86::VFNMSUB132SHZrkz_Int:
8060     case X86::VFMSUB213SHZrkz_Int:
8061     case X86::VFNMSUB213SHZrkz_Int:
8062     case X86::VFMSUB231SHZrkz_Int:
8063     case X86::VFNMSUB231SHZrkz_Int:
8064       return false;
8065     default:
8066       return true;
8067     }
8068   }
8069 
8070   return false;
8071 }
8072 
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,MachineInstr & LoadMI,LiveIntervals * LIS) const8073 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
8074     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
8075     MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
8076     LiveIntervals *LIS) const {
8077 
8078   // TODO: Support the case where LoadMI loads a wide register, but MI
8079   // only uses a subreg.
8080   for (auto Op : Ops) {
8081     if (MI.getOperand(Op).getSubReg())
8082       return nullptr;
8083   }
8084 
8085   // If loading from a FrameIndex, fold directly from the FrameIndex.
8086   unsigned NumOps = LoadMI.getDesc().getNumOperands();
8087   int FrameIndex;
8088   if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
8089     if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
8090       return nullptr;
8091     return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
8092   }
8093 
8094   // Check switch flag
8095   if (NoFusing)
8096     return nullptr;
8097 
8098   // Avoid partial and undef register update stalls unless optimizing for size.
8099   if (!MF.getFunction().hasOptSize() &&
8100       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/ true) ||
8101        shouldPreventUndefRegUpdateMemFold(MF, MI)))
8102     return nullptr;
8103 
8104   // Do not fold a NDD instruction and a memory instruction with relocation to
8105   // avoid emit APX relocation when the flag is disabled for backward
8106   // compatibility.
8107   uint64_t TSFlags = MI.getDesc().TSFlags;
8108   if (!X86EnableAPXForRelocation && isMemInstrWithGOTPCREL(LoadMI) &&
8109       X86II::hasNewDataDest(TSFlags))
8110     return nullptr;
8111 
8112   // Determine the alignment of the load.
8113   Align Alignment;
8114   unsigned LoadOpc = LoadMI.getOpcode();
8115   if (LoadMI.hasOneMemOperand())
8116     Alignment = (*LoadMI.memoperands_begin())->getAlign();
8117   else
8118     switch (LoadOpc) {
8119     case X86::AVX512_512_SET0:
8120     case X86::AVX512_512_SETALLONES:
8121       Alignment = Align(64);
8122       break;
8123     case X86::AVX2_SETALLONES:
8124     case X86::AVX1_SETALLONES:
8125     case X86::AVX_SET0:
8126     case X86::AVX512_256_SET0:
8127       Alignment = Align(32);
8128       break;
8129     case X86::V_SET0:
8130     case X86::V_SETALLONES:
8131     case X86::AVX512_128_SET0:
8132     case X86::FsFLD0F128:
8133     case X86::AVX512_FsFLD0F128:
8134       Alignment = Align(16);
8135       break;
8136     case X86::MMX_SET0:
8137     case X86::FsFLD0SD:
8138     case X86::AVX512_FsFLD0SD:
8139       Alignment = Align(8);
8140       break;
8141     case X86::FsFLD0SS:
8142     case X86::AVX512_FsFLD0SS:
8143       Alignment = Align(4);
8144       break;
8145     case X86::FsFLD0SH:
8146     case X86::AVX512_FsFLD0SH:
8147       Alignment = Align(2);
8148       break;
8149     default:
8150       return nullptr;
8151     }
8152   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
8153     unsigned NewOpc = 0;
8154     switch (MI.getOpcode()) {
8155     default:
8156       return nullptr;
8157     case X86::TEST8rr:
8158       NewOpc = X86::CMP8ri;
8159       break;
8160     case X86::TEST16rr:
8161       NewOpc = X86::CMP16ri;
8162       break;
8163     case X86::TEST32rr:
8164       NewOpc = X86::CMP32ri;
8165       break;
8166     case X86::TEST64rr:
8167       NewOpc = X86::CMP64ri32;
8168       break;
8169     }
8170     // Change to CMPXXri r, 0 first.
8171     MI.setDesc(get(NewOpc));
8172     MI.getOperand(1).ChangeToImmediate(0);
8173   } else if (Ops.size() != 1)
8174     return nullptr;
8175 
8176   // Make sure the subregisters match.
8177   // Otherwise we risk changing the size of the load.
8178   if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
8179     return nullptr;
8180 
8181   SmallVector<MachineOperand, X86::AddrNumOperands> MOs;
8182   switch (LoadOpc) {
8183   case X86::MMX_SET0:
8184   case X86::V_SET0:
8185   case X86::V_SETALLONES:
8186   case X86::AVX2_SETALLONES:
8187   case X86::AVX1_SETALLONES:
8188   case X86::AVX_SET0:
8189   case X86::AVX512_128_SET0:
8190   case X86::AVX512_256_SET0:
8191   case X86::AVX512_512_SET0:
8192   case X86::AVX512_512_SETALLONES:
8193   case X86::FsFLD0SH:
8194   case X86::AVX512_FsFLD0SH:
8195   case X86::FsFLD0SD:
8196   case X86::AVX512_FsFLD0SD:
8197   case X86::FsFLD0SS:
8198   case X86::AVX512_FsFLD0SS:
8199   case X86::FsFLD0F128:
8200   case X86::AVX512_FsFLD0F128: {
8201     // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
8202     // Create a constant-pool entry and operands to load from it.
8203 
8204     // Large code model can't fold loads this way.
8205     if (MF.getTarget().getCodeModel() == CodeModel::Large)
8206       return nullptr;
8207 
8208     // x86-32 PIC requires a PIC base register for constant pools.
8209     unsigned PICBase = 0;
8210     // Since we're using Small or Kernel code model, we can always use
8211     // RIP-relative addressing for a smaller encoding.
8212     if (Subtarget.is64Bit()) {
8213       PICBase = X86::RIP;
8214     } else if (MF.getTarget().isPositionIndependent()) {
8215       // FIXME: PICBase = getGlobalBaseReg(&MF);
8216       // This doesn't work for several reasons.
8217       // 1. GlobalBaseReg may have been spilled.
8218       // 2. It may not be live at MI.
8219       return nullptr;
8220     }
8221 
8222     // Create a constant-pool entry.
8223     MachineConstantPool &MCP = *MF.getConstantPool();
8224     Type *Ty;
8225     bool IsAllOnes = false;
8226     switch (LoadOpc) {
8227     case X86::FsFLD0SS:
8228     case X86::AVX512_FsFLD0SS:
8229       Ty = Type::getFloatTy(MF.getFunction().getContext());
8230       break;
8231     case X86::FsFLD0SD:
8232     case X86::AVX512_FsFLD0SD:
8233       Ty = Type::getDoubleTy(MF.getFunction().getContext());
8234       break;
8235     case X86::FsFLD0F128:
8236     case X86::AVX512_FsFLD0F128:
8237       Ty = Type::getFP128Ty(MF.getFunction().getContext());
8238       break;
8239     case X86::FsFLD0SH:
8240     case X86::AVX512_FsFLD0SH:
8241       Ty = Type::getHalfTy(MF.getFunction().getContext());
8242       break;
8243     case X86::AVX512_512_SETALLONES:
8244       IsAllOnes = true;
8245       [[fallthrough]];
8246     case X86::AVX512_512_SET0:
8247       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
8248                                 16);
8249       break;
8250     case X86::AVX1_SETALLONES:
8251     case X86::AVX2_SETALLONES:
8252       IsAllOnes = true;
8253       [[fallthrough]];
8254     case X86::AVX512_256_SET0:
8255     case X86::AVX_SET0:
8256       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
8257                                 8);
8258 
8259       break;
8260     case X86::MMX_SET0:
8261       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
8262                                 2);
8263       break;
8264     case X86::V_SETALLONES:
8265       IsAllOnes = true;
8266       [[fallthrough]];
8267     case X86::V_SET0:
8268     case X86::AVX512_128_SET0:
8269       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
8270                                 4);
8271       break;
8272     }
8273 
8274     const Constant *C =
8275         IsAllOnes ? Constant::getAllOnesValue(Ty) : Constant::getNullValue(Ty);
8276     unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
8277 
8278     // Create operands to load from the constant pool entry.
8279     MOs.push_back(MachineOperand::CreateReg(PICBase, false));
8280     MOs.push_back(MachineOperand::CreateImm(1));
8281     MOs.push_back(MachineOperand::CreateReg(0, false));
8282     MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
8283     MOs.push_back(MachineOperand::CreateReg(0, false));
8284     break;
8285   }
8286   case X86::VPBROADCASTBZ128rm:
8287   case X86::VPBROADCASTBZ256rm:
8288   case X86::VPBROADCASTBZrm:
8289   case X86::VBROADCASTF32X2Z256rm:
8290   case X86::VBROADCASTF32X2Zrm:
8291   case X86::VBROADCASTI32X2Z128rm:
8292   case X86::VBROADCASTI32X2Z256rm:
8293   case X86::VBROADCASTI32X2Zrm:
8294     // No instructions currently fuse with 8bits or 32bits x 2.
8295     return nullptr;
8296 
8297 #define FOLD_BROADCAST(SIZE)                                                   \
8298   MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,          \
8299              LoadMI.operands_begin() + NumOps);                                \
8300   return foldMemoryBroadcast(MF, MI, Ops[0], MOs, InsertPt, /*Size=*/SIZE,     \
8301                              /*AllowCommute=*/true);
8302   case X86::VPBROADCASTWZ128rm:
8303   case X86::VPBROADCASTWZ256rm:
8304   case X86::VPBROADCASTWZrm:
8305     FOLD_BROADCAST(16);
8306   case X86::VPBROADCASTDZ128rm:
8307   case X86::VPBROADCASTDZ256rm:
8308   case X86::VPBROADCASTDZrm:
8309   case X86::VBROADCASTSSZ128rm:
8310   case X86::VBROADCASTSSZ256rm:
8311   case X86::VBROADCASTSSZrm:
8312     FOLD_BROADCAST(32);
8313   case X86::VPBROADCASTQZ128rm:
8314   case X86::VPBROADCASTQZ256rm:
8315   case X86::VPBROADCASTQZrm:
8316   case X86::VBROADCASTSDZ256rm:
8317   case X86::VBROADCASTSDZrm:
8318     FOLD_BROADCAST(64);
8319   default: {
8320     if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
8321       return nullptr;
8322 
8323     // Folding a normal load. Just copy the load's address operands.
8324     MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
8325                LoadMI.operands_begin() + NumOps);
8326     break;
8327   }
8328   }
8329   return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
8330                                /*Size=*/0, Alignment, /*AllowCommute=*/true);
8331 }
8332 
8333 MachineInstr *
foldMemoryBroadcast(MachineFunction & MF,MachineInstr & MI,unsigned OpNum,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,unsigned BitsSize,bool AllowCommute) const8334 X86InstrInfo::foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI,
8335                                   unsigned OpNum, ArrayRef<MachineOperand> MOs,
8336                                   MachineBasicBlock::iterator InsertPt,
8337                                   unsigned BitsSize, bool AllowCommute) const {
8338 
8339   if (auto *I = lookupBroadcastFoldTable(MI.getOpcode(), OpNum))
8340     return matchBroadcastSize(*I, BitsSize)
8341                ? fuseInst(MF, I->DstOp, OpNum, MOs, InsertPt, MI, *this)
8342                : nullptr;
8343 
8344   if (AllowCommute) {
8345     // If the instruction and target operand are commutable, commute the
8346     // instruction and try again.
8347     unsigned CommuteOpIdx2 = commuteOperandsForFold(MI, OpNum);
8348     if (CommuteOpIdx2 == OpNum) {
8349       printFailMsgforFold(MI, OpNum);
8350       return nullptr;
8351     }
8352     MachineInstr *NewMI =
8353         foldMemoryBroadcast(MF, MI, CommuteOpIdx2, MOs, InsertPt, BitsSize,
8354                             /*AllowCommute=*/false);
8355     if (NewMI)
8356       return NewMI;
8357     // Folding failed again - undo the commute before returning.
8358     commuteInstruction(MI, false, OpNum, CommuteOpIdx2);
8359   }
8360 
8361   printFailMsgforFold(MI, OpNum);
8362   return nullptr;
8363 }
8364 
8365 static SmallVector<MachineMemOperand *, 2>
extractLoadMMOs(ArrayRef<MachineMemOperand * > MMOs,MachineFunction & MF)8366 extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
8367   SmallVector<MachineMemOperand *, 2> LoadMMOs;
8368 
8369   for (MachineMemOperand *MMO : MMOs) {
8370     if (!MMO->isLoad())
8371       continue;
8372 
8373     if (!MMO->isStore()) {
8374       // Reuse the MMO.
8375       LoadMMOs.push_back(MMO);
8376     } else {
8377       // Clone the MMO and unset the store flag.
8378       LoadMMOs.push_back(MF.getMachineMemOperand(
8379           MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
8380     }
8381   }
8382 
8383   return LoadMMOs;
8384 }
8385 
8386 static SmallVector<MachineMemOperand *, 2>
extractStoreMMOs(ArrayRef<MachineMemOperand * > MMOs,MachineFunction & MF)8387 extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
8388   SmallVector<MachineMemOperand *, 2> StoreMMOs;
8389 
8390   for (MachineMemOperand *MMO : MMOs) {
8391     if (!MMO->isStore())
8392       continue;
8393 
8394     if (!MMO->isLoad()) {
8395       // Reuse the MMO.
8396       StoreMMOs.push_back(MMO);
8397     } else {
8398       // Clone the MMO and unset the load flag.
8399       StoreMMOs.push_back(MF.getMachineMemOperand(
8400           MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
8401     }
8402   }
8403 
8404   return StoreMMOs;
8405 }
8406 
getBroadcastOpcode(const X86FoldTableEntry * I,const TargetRegisterClass * RC,const X86Subtarget & STI)8407 static unsigned getBroadcastOpcode(const X86FoldTableEntry *I,
8408                                    const TargetRegisterClass *RC,
8409                                    const X86Subtarget &STI) {
8410   assert(STI.hasAVX512() && "Expected at least AVX512!");
8411   unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC);
8412   assert((SpillSize == 64 || STI.hasVLX()) &&
8413          "Can't broadcast less than 64 bytes without AVX512VL!");
8414 
8415 #define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64)                            \
8416   case TYPE:                                                                   \
8417     switch (SpillSize) {                                                       \
8418     default:                                                                   \
8419       llvm_unreachable("Unknown spill size");                                  \
8420     case 16:                                                                   \
8421       return X86::OP16;                                                        \
8422     case 32:                                                                   \
8423       return X86::OP32;                                                        \
8424     case 64:                                                                   \
8425       return X86::OP64;                                                        \
8426     }                                                                          \
8427     break;
8428 
8429   switch (I->Flags & TB_BCAST_MASK) {
8430   default:
8431     llvm_unreachable("Unexpected broadcast type!");
8432     CASE_BCAST_TYPE_OPC(TB_BCAST_W, VPBROADCASTWZ128rm, VPBROADCASTWZ256rm,
8433                         VPBROADCASTWZrm)
8434     CASE_BCAST_TYPE_OPC(TB_BCAST_D, VPBROADCASTDZ128rm, VPBROADCASTDZ256rm,
8435                         VPBROADCASTDZrm)
8436     CASE_BCAST_TYPE_OPC(TB_BCAST_Q, VPBROADCASTQZ128rm, VPBROADCASTQZ256rm,
8437                         VPBROADCASTQZrm)
8438     CASE_BCAST_TYPE_OPC(TB_BCAST_SH, VPBROADCASTWZ128rm, VPBROADCASTWZ256rm,
8439                         VPBROADCASTWZrm)
8440     CASE_BCAST_TYPE_OPC(TB_BCAST_SS, VBROADCASTSSZ128rm, VBROADCASTSSZ256rm,
8441                         VBROADCASTSSZrm)
8442     CASE_BCAST_TYPE_OPC(TB_BCAST_SD, VMOVDDUPZ128rm, VBROADCASTSDZ256rm,
8443                         VBROADCASTSDZrm)
8444   }
8445 }
8446 
unfoldMemoryOperand(MachineFunction & MF,MachineInstr & MI,Register Reg,bool UnfoldLoad,bool UnfoldStore,SmallVectorImpl<MachineInstr * > & NewMIs) const8447 bool X86InstrInfo::unfoldMemoryOperand(
8448     MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad,
8449     bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
8450   const X86FoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
8451   if (I == nullptr)
8452     return false;
8453   unsigned Opc = I->DstOp;
8454   unsigned Index = I->Flags & TB_INDEX_MASK;
8455   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
8456   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
8457   if (UnfoldLoad && !FoldedLoad)
8458     return false;
8459   UnfoldLoad &= FoldedLoad;
8460   if (UnfoldStore && !FoldedStore)
8461     return false;
8462   UnfoldStore &= FoldedStore;
8463 
8464   const MCInstrDesc &MCID = get(Opc);
8465 
8466   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
8467   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8468   // TODO: Check if 32-byte or greater accesses are slow too?
8469   if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
8470       Subtarget.isUnalignedMem16Slow())
8471     // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
8472     // conservatively assume the address is unaligned. That's bad for
8473     // performance.
8474     return false;
8475   SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
8476   SmallVector<MachineOperand, 2> BeforeOps;
8477   SmallVector<MachineOperand, 2> AfterOps;
8478   SmallVector<MachineOperand, 4> ImpOps;
8479   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
8480     MachineOperand &Op = MI.getOperand(i);
8481     if (i >= Index && i < Index + X86::AddrNumOperands)
8482       AddrOps.push_back(Op);
8483     else if (Op.isReg() && Op.isImplicit())
8484       ImpOps.push_back(Op);
8485     else if (i < Index)
8486       BeforeOps.push_back(Op);
8487     else if (i > Index)
8488       AfterOps.push_back(Op);
8489   }
8490 
8491   // Emit the load or broadcast instruction.
8492   if (UnfoldLoad) {
8493     auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
8494 
8495     unsigned Opc;
8496     if (I->Flags & TB_BCAST_MASK) {
8497       Opc = getBroadcastOpcode(I, RC, Subtarget);
8498     } else {
8499       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
8500       bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8501       Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
8502     }
8503 
8504     DebugLoc DL;
8505     MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg);
8506     for (const MachineOperand &AddrOp : AddrOps)
8507       MIB.add(AddrOp);
8508     MIB.setMemRefs(MMOs);
8509     NewMIs.push_back(MIB);
8510 
8511     if (UnfoldStore) {
8512       // Address operands cannot be marked isKill.
8513       for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
8514         MachineOperand &MO = NewMIs[0]->getOperand(i);
8515         if (MO.isReg())
8516           MO.setIsKill(false);
8517       }
8518     }
8519   }
8520 
8521   // Emit the data processing instruction.
8522   MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
8523   MachineInstrBuilder MIB(MF, DataMI);
8524 
8525   if (FoldedStore)
8526     MIB.addReg(Reg, RegState::Define);
8527   for (MachineOperand &BeforeOp : BeforeOps)
8528     MIB.add(BeforeOp);
8529   if (FoldedLoad)
8530     MIB.addReg(Reg);
8531   for (MachineOperand &AfterOp : AfterOps)
8532     MIB.add(AfterOp);
8533   for (MachineOperand &ImpOp : ImpOps) {
8534     MIB.addReg(ImpOp.getReg(), getDefRegState(ImpOp.isDef()) |
8535                                    RegState::Implicit |
8536                                    getKillRegState(ImpOp.isKill()) |
8537                                    getDeadRegState(ImpOp.isDead()) |
8538                                    getUndefRegState(ImpOp.isUndef()));
8539   }
8540   // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
8541   switch (DataMI->getOpcode()) {
8542   default:
8543     break;
8544   case X86::CMP64ri32:
8545   case X86::CMP32ri:
8546   case X86::CMP16ri:
8547   case X86::CMP8ri: {
8548     MachineOperand &MO0 = DataMI->getOperand(0);
8549     MachineOperand &MO1 = DataMI->getOperand(1);
8550     if (MO1.isImm() && MO1.getImm() == 0) {
8551       unsigned NewOpc;
8552       switch (DataMI->getOpcode()) {
8553       default:
8554         llvm_unreachable("Unreachable!");
8555       case X86::CMP64ri32:
8556         NewOpc = X86::TEST64rr;
8557         break;
8558       case X86::CMP32ri:
8559         NewOpc = X86::TEST32rr;
8560         break;
8561       case X86::CMP16ri:
8562         NewOpc = X86::TEST16rr;
8563         break;
8564       case X86::CMP8ri:
8565         NewOpc = X86::TEST8rr;
8566         break;
8567       }
8568       DataMI->setDesc(get(NewOpc));
8569       MO1.ChangeToRegister(MO0.getReg(), false);
8570     }
8571   }
8572   }
8573   NewMIs.push_back(DataMI);
8574 
8575   // Emit the store instruction.
8576   if (UnfoldStore) {
8577     const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
8578     auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
8579     unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
8580     bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8581     unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
8582     DebugLoc DL;
8583     MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
8584     for (const MachineOperand &AddrOp : AddrOps)
8585       MIB.add(AddrOp);
8586     MIB.addReg(Reg, RegState::Kill);
8587     MIB.setMemRefs(MMOs);
8588     NewMIs.push_back(MIB);
8589   }
8590 
8591   return true;
8592 }
8593 
unfoldMemoryOperand(SelectionDAG & DAG,SDNode * N,SmallVectorImpl<SDNode * > & NewNodes) const8594 bool X86InstrInfo::unfoldMemoryOperand(
8595     SelectionDAG &DAG, SDNode *N, SmallVectorImpl<SDNode *> &NewNodes) const {
8596   if (!N->isMachineOpcode())
8597     return false;
8598 
8599   const X86FoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
8600   if (I == nullptr)
8601     return false;
8602   unsigned Opc = I->DstOp;
8603   unsigned Index = I->Flags & TB_INDEX_MASK;
8604   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
8605   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
8606   const MCInstrDesc &MCID = get(Opc);
8607   MachineFunction &MF = DAG.getMachineFunction();
8608   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8609   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
8610   unsigned NumDefs = MCID.NumDefs;
8611   std::vector<SDValue> AddrOps;
8612   std::vector<SDValue> BeforeOps;
8613   std::vector<SDValue> AfterOps;
8614   SDLoc dl(N);
8615   unsigned NumOps = N->getNumOperands();
8616   for (unsigned i = 0; i != NumOps - 1; ++i) {
8617     SDValue Op = N->getOperand(i);
8618     if (i >= Index - NumDefs && i < Index - NumDefs + X86::AddrNumOperands)
8619       AddrOps.push_back(Op);
8620     else if (i < Index - NumDefs)
8621       BeforeOps.push_back(Op);
8622     else if (i > Index - NumDefs)
8623       AfterOps.push_back(Op);
8624   }
8625   SDValue Chain = N->getOperand(NumOps - 1);
8626   AddrOps.push_back(Chain);
8627 
8628   // Emit the load instruction.
8629   SDNode *Load = nullptr;
8630   if (FoldedLoad) {
8631     EVT VT = *TRI.legalclasstypes_begin(*RC);
8632     auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
8633     if (MMOs.empty() && RC == &X86::VR128RegClass &&
8634         Subtarget.isUnalignedMem16Slow())
8635       // Do not introduce a slow unaligned load.
8636       return false;
8637     // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
8638     // memory access is slow above.
8639 
8640     unsigned Opc;
8641     if (I->Flags & TB_BCAST_MASK) {
8642       Opc = getBroadcastOpcode(I, RC, Subtarget);
8643     } else {
8644       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
8645       bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8646       Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
8647     }
8648 
8649     Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps);
8650     NewNodes.push_back(Load);
8651 
8652     // Preserve memory reference information.
8653     DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
8654   }
8655 
8656   // Emit the data processing instruction.
8657   std::vector<EVT> VTs;
8658   const TargetRegisterClass *DstRC = nullptr;
8659   if (MCID.getNumDefs() > 0) {
8660     DstRC = getRegClass(MCID, 0, &RI, MF);
8661     VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
8662   }
8663   for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
8664     EVT VT = N->getValueType(i);
8665     if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
8666       VTs.push_back(VT);
8667   }
8668   if (Load)
8669     BeforeOps.push_back(SDValue(Load, 0));
8670   llvm::append_range(BeforeOps, AfterOps);
8671   // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
8672   switch (Opc) {
8673   default:
8674     break;
8675   case X86::CMP64ri32:
8676   case X86::CMP32ri:
8677   case X86::CMP16ri:
8678   case X86::CMP8ri:
8679     if (isNullConstant(BeforeOps[1])) {
8680       switch (Opc) {
8681       default:
8682         llvm_unreachable("Unreachable!");
8683       case X86::CMP64ri32:
8684         Opc = X86::TEST64rr;
8685         break;
8686       case X86::CMP32ri:
8687         Opc = X86::TEST32rr;
8688         break;
8689       case X86::CMP16ri:
8690         Opc = X86::TEST16rr;
8691         break;
8692       case X86::CMP8ri:
8693         Opc = X86::TEST8rr;
8694         break;
8695       }
8696       BeforeOps[1] = BeforeOps[0];
8697     }
8698   }
8699   SDNode *NewNode = DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
8700   NewNodes.push_back(NewNode);
8701 
8702   // Emit the store instruction.
8703   if (FoldedStore) {
8704     AddrOps.pop_back();
8705     AddrOps.push_back(SDValue(NewNode, 0));
8706     AddrOps.push_back(Chain);
8707     auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
8708     if (MMOs.empty() && RC == &X86::VR128RegClass &&
8709         Subtarget.isUnalignedMem16Slow())
8710       // Do not introduce a slow unaligned store.
8711       return false;
8712     // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
8713     // memory access is slow above.
8714     unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
8715     bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8716     SDNode *Store =
8717         DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
8718                            dl, MVT::Other, AddrOps);
8719     NewNodes.push_back(Store);
8720 
8721     // Preserve memory reference information.
8722     DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
8723   }
8724 
8725   return true;
8726 }
8727 
8728 unsigned
getOpcodeAfterMemoryUnfold(unsigned Opc,bool UnfoldLoad,bool UnfoldStore,unsigned * LoadRegIndex) const8729 X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad,
8730                                          bool UnfoldStore,
8731                                          unsigned *LoadRegIndex) const {
8732   const X86FoldTableEntry *I = lookupUnfoldTable(Opc);
8733   if (I == nullptr)
8734     return 0;
8735   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
8736   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
8737   if (UnfoldLoad && !FoldedLoad)
8738     return 0;
8739   if (UnfoldStore && !FoldedStore)
8740     return 0;
8741   if (LoadRegIndex)
8742     *LoadRegIndex = I->Flags & TB_INDEX_MASK;
8743   return I->DstOp;
8744 }
8745 
areLoadsFromSameBasePtr(SDNode * Load1,SDNode * Load2,int64_t & Offset1,int64_t & Offset2) const8746 bool X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
8747                                            int64_t &Offset1,
8748                                            int64_t &Offset2) const {
8749   if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
8750     return false;
8751 
8752   auto IsLoadOpcode = [&](unsigned Opcode) {
8753     switch (Opcode) {
8754     default:
8755       return false;
8756     case X86::MOV8rm:
8757     case X86::MOV16rm:
8758     case X86::MOV32rm:
8759     case X86::MOV64rm:
8760     case X86::LD_Fp32m:
8761     case X86::LD_Fp64m:
8762     case X86::LD_Fp80m:
8763     case X86::MOVSSrm:
8764     case X86::MOVSSrm_alt:
8765     case X86::MOVSDrm:
8766     case X86::MOVSDrm_alt:
8767     case X86::MMX_MOVD64rm:
8768     case X86::MMX_MOVQ64rm:
8769     case X86::MOVAPSrm:
8770     case X86::MOVUPSrm:
8771     case X86::MOVAPDrm:
8772     case X86::MOVUPDrm:
8773     case X86::MOVDQArm:
8774     case X86::MOVDQUrm:
8775     // AVX load instructions
8776     case X86::VMOVSSrm:
8777     case X86::VMOVSSrm_alt:
8778     case X86::VMOVSDrm:
8779     case X86::VMOVSDrm_alt:
8780     case X86::VMOVAPSrm:
8781     case X86::VMOVUPSrm:
8782     case X86::VMOVAPDrm:
8783     case X86::VMOVUPDrm:
8784     case X86::VMOVDQArm:
8785     case X86::VMOVDQUrm:
8786     case X86::VMOVAPSYrm:
8787     case X86::VMOVUPSYrm:
8788     case X86::VMOVAPDYrm:
8789     case X86::VMOVUPDYrm:
8790     case X86::VMOVDQAYrm:
8791     case X86::VMOVDQUYrm:
8792     // AVX512 load instructions
8793     case X86::VMOVSSZrm:
8794     case X86::VMOVSSZrm_alt:
8795     case X86::VMOVSDZrm:
8796     case X86::VMOVSDZrm_alt:
8797     case X86::VMOVAPSZ128rm:
8798     case X86::VMOVUPSZ128rm:
8799     case X86::VMOVAPSZ128rm_NOVLX:
8800     case X86::VMOVUPSZ128rm_NOVLX:
8801     case X86::VMOVAPDZ128rm:
8802     case X86::VMOVUPDZ128rm:
8803     case X86::VMOVDQU8Z128rm:
8804     case X86::VMOVDQU16Z128rm:
8805     case X86::VMOVDQA32Z128rm:
8806     case X86::VMOVDQU32Z128rm:
8807     case X86::VMOVDQA64Z128rm:
8808     case X86::VMOVDQU64Z128rm:
8809     case X86::VMOVAPSZ256rm:
8810     case X86::VMOVUPSZ256rm:
8811     case X86::VMOVAPSZ256rm_NOVLX:
8812     case X86::VMOVUPSZ256rm_NOVLX:
8813     case X86::VMOVAPDZ256rm:
8814     case X86::VMOVUPDZ256rm:
8815     case X86::VMOVDQU8Z256rm:
8816     case X86::VMOVDQU16Z256rm:
8817     case X86::VMOVDQA32Z256rm:
8818     case X86::VMOVDQU32Z256rm:
8819     case X86::VMOVDQA64Z256rm:
8820     case X86::VMOVDQU64Z256rm:
8821     case X86::VMOVAPSZrm:
8822     case X86::VMOVUPSZrm:
8823     case X86::VMOVAPDZrm:
8824     case X86::VMOVUPDZrm:
8825     case X86::VMOVDQU8Zrm:
8826     case X86::VMOVDQU16Zrm:
8827     case X86::VMOVDQA32Zrm:
8828     case X86::VMOVDQU32Zrm:
8829     case X86::VMOVDQA64Zrm:
8830     case X86::VMOVDQU64Zrm:
8831     case X86::KMOVBkm:
8832     case X86::KMOVBkm_EVEX:
8833     case X86::KMOVWkm:
8834     case X86::KMOVWkm_EVEX:
8835     case X86::KMOVDkm:
8836     case X86::KMOVDkm_EVEX:
8837     case X86::KMOVQkm:
8838     case X86::KMOVQkm_EVEX:
8839       return true;
8840     }
8841   };
8842 
8843   if (!IsLoadOpcode(Load1->getMachineOpcode()) ||
8844       !IsLoadOpcode(Load2->getMachineOpcode()))
8845     return false;
8846 
8847   // Lambda to check if both the loads have the same value for an operand index.
8848   auto HasSameOp = [&](int I) {
8849     return Load1->getOperand(I) == Load2->getOperand(I);
8850   };
8851 
8852   // All operands except the displacement should match.
8853   if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
8854       !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
8855     return false;
8856 
8857   // Chain Operand must be the same.
8858   if (!HasSameOp(5))
8859     return false;
8860 
8861   // Now let's examine if the displacements are constants.
8862   auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
8863   auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
8864   if (!Disp1 || !Disp2)
8865     return false;
8866 
8867   Offset1 = Disp1->getSExtValue();
8868   Offset2 = Disp2->getSExtValue();
8869   return true;
8870 }
8871 
shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) const8872 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
8873                                            int64_t Offset1, int64_t Offset2,
8874                                            unsigned NumLoads) const {
8875   assert(Offset2 > Offset1);
8876   if ((Offset2 - Offset1) / 8 > 64)
8877     return false;
8878 
8879   unsigned Opc1 = Load1->getMachineOpcode();
8880   unsigned Opc2 = Load2->getMachineOpcode();
8881   if (Opc1 != Opc2)
8882     return false; // FIXME: overly conservative?
8883 
8884   switch (Opc1) {
8885   default:
8886     break;
8887   case X86::LD_Fp32m:
8888   case X86::LD_Fp64m:
8889   case X86::LD_Fp80m:
8890   case X86::MMX_MOVD64rm:
8891   case X86::MMX_MOVQ64rm:
8892     return false;
8893   }
8894 
8895   EVT VT = Load1->getValueType(0);
8896   switch (VT.getSimpleVT().SimpleTy) {
8897   default:
8898     // XMM registers. In 64-bit mode we can be a bit more aggressive since we
8899     // have 16 of them to play with.
8900     if (Subtarget.is64Bit()) {
8901       if (NumLoads >= 3)
8902         return false;
8903     } else if (NumLoads) {
8904       return false;
8905     }
8906     break;
8907   case MVT::i8:
8908   case MVT::i16:
8909   case MVT::i32:
8910   case MVT::i64:
8911   case MVT::f32:
8912   case MVT::f64:
8913     if (NumLoads)
8914       return false;
8915     break;
8916   }
8917 
8918   return true;
8919 }
8920 
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const8921 bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
8922                                         const MachineBasicBlock *MBB,
8923                                         const MachineFunction &MF) const {
8924 
8925   // ENDBR instructions should not be scheduled around.
8926   unsigned Opcode = MI.getOpcode();
8927   if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 ||
8928       Opcode == X86::PLDTILECFGV)
8929     return true;
8930 
8931   // Frame setup and destroy can't be scheduled around.
8932   if (MI.getFlag(MachineInstr::FrameSetup) ||
8933       MI.getFlag(MachineInstr::FrameDestroy))
8934     return true;
8935 
8936   return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF);
8937 }
8938 
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const8939 bool X86InstrInfo::reverseBranchCondition(
8940     SmallVectorImpl<MachineOperand> &Cond) const {
8941   assert(Cond.size() == 1 && "Invalid X86 branch condition!");
8942   X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
8943   Cond[0].setImm(GetOppositeBranchCondition(CC));
8944   return false;
8945 }
8946 
isSafeToMoveRegClassDefs(const TargetRegisterClass * RC) const8947 bool X86InstrInfo::isSafeToMoveRegClassDefs(
8948     const TargetRegisterClass *RC) const {
8949   // FIXME: Return false for x87 stack register classes for now. We can't
8950   // allow any loads of these registers before FpGet_ST0_80.
8951   return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
8952            RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
8953            RC == &X86::RFP80RegClass);
8954 }
8955 
8956 /// Return a virtual register initialized with the
8957 /// the global base register value. Output instructions required to
8958 /// initialize the register in the function entry block, if necessary.
8959 ///
8960 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
8961 ///
getGlobalBaseReg(MachineFunction * MF) const8962 Register X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
8963   X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
8964   Register GlobalBaseReg = X86FI->getGlobalBaseReg();
8965   if (GlobalBaseReg)
8966     return GlobalBaseReg;
8967 
8968   // Create the register. The code to initialize it is inserted
8969   // later, by the CGBR pass (below).
8970   MachineRegisterInfo &RegInfo = MF->getRegInfo();
8971   GlobalBaseReg = RegInfo.createVirtualRegister(
8972       Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
8973   X86FI->setGlobalBaseReg(GlobalBaseReg);
8974   return GlobalBaseReg;
8975 }
8976 
8977 // FIXME: Some shuffle and unpack instructions have equivalents in different
8978 // domains, but they require a bit more work than just switching opcodes.
8979 
lookup(unsigned opcode,unsigned domain,ArrayRef<uint16_t[3]> Table)8980 static const uint16_t *lookup(unsigned opcode, unsigned domain,
8981                               ArrayRef<uint16_t[3]> Table) {
8982   for (const uint16_t(&Row)[3] : Table)
8983     if (Row[domain - 1] == opcode)
8984       return Row;
8985   return nullptr;
8986 }
8987 
lookupAVX512(unsigned opcode,unsigned domain,ArrayRef<uint16_t[4]> Table)8988 static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
8989                                     ArrayRef<uint16_t[4]> Table) {
8990   // If this is the integer domain make sure to check both integer columns.
8991   for (const uint16_t(&Row)[4] : Table)
8992     if (Row[domain - 1] == opcode || (domain == 3 && Row[3] == opcode))
8993       return Row;
8994   return nullptr;
8995 }
8996 
8997 // Helper to attempt to widen/narrow blend masks.
AdjustBlendMask(unsigned OldMask,unsigned OldWidth,unsigned NewWidth,unsigned * pNewMask=nullptr)8998 static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
8999                             unsigned NewWidth, unsigned *pNewMask = nullptr) {
9000   assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
9001          "Illegal blend mask scale");
9002   unsigned NewMask = 0;
9003 
9004   if ((OldWidth % NewWidth) == 0) {
9005     unsigned Scale = OldWidth / NewWidth;
9006     unsigned SubMask = (1u << Scale) - 1;
9007     for (unsigned i = 0; i != NewWidth; ++i) {
9008       unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
9009       if (Sub == SubMask)
9010         NewMask |= (1u << i);
9011       else if (Sub != 0x0)
9012         return false;
9013     }
9014   } else {
9015     unsigned Scale = NewWidth / OldWidth;
9016     unsigned SubMask = (1u << Scale) - 1;
9017     for (unsigned i = 0; i != OldWidth; ++i) {
9018       if (OldMask & (1 << i)) {
9019         NewMask |= (SubMask << (i * Scale));
9020       }
9021     }
9022   }
9023 
9024   if (pNewMask)
9025     *pNewMask = NewMask;
9026   return true;
9027 }
9028 
getExecutionDomainCustom(const MachineInstr & MI) const9029 uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
9030   unsigned Opcode = MI.getOpcode();
9031   unsigned NumOperands = MI.getDesc().getNumOperands();
9032 
9033   auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
9034     uint16_t validDomains = 0;
9035     if (MI.getOperand(NumOperands - 1).isImm()) {
9036       unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
9037       if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
9038         validDomains |= 0x2; // PackedSingle
9039       if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
9040         validDomains |= 0x4; // PackedDouble
9041       if (!Is256 || Subtarget.hasAVX2())
9042         validDomains |= 0x8; // PackedInt
9043     }
9044     return validDomains;
9045   };
9046 
9047   switch (Opcode) {
9048   case X86::BLENDPDrmi:
9049   case X86::BLENDPDrri:
9050   case X86::VBLENDPDrmi:
9051   case X86::VBLENDPDrri:
9052     return GetBlendDomains(2, false);
9053   case X86::VBLENDPDYrmi:
9054   case X86::VBLENDPDYrri:
9055     return GetBlendDomains(4, true);
9056   case X86::BLENDPSrmi:
9057   case X86::BLENDPSrri:
9058   case X86::VBLENDPSrmi:
9059   case X86::VBLENDPSrri:
9060   case X86::VPBLENDDrmi:
9061   case X86::VPBLENDDrri:
9062     return GetBlendDomains(4, false);
9063   case X86::VBLENDPSYrmi:
9064   case X86::VBLENDPSYrri:
9065   case X86::VPBLENDDYrmi:
9066   case X86::VPBLENDDYrri:
9067     return GetBlendDomains(8, true);
9068   case X86::PBLENDWrmi:
9069   case X86::PBLENDWrri:
9070   case X86::VPBLENDWrmi:
9071   case X86::VPBLENDWrri:
9072   // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
9073   case X86::VPBLENDWYrmi:
9074   case X86::VPBLENDWYrri:
9075     return GetBlendDomains(8, false);
9076   case X86::VPANDDZ128rr:
9077   case X86::VPANDDZ128rm:
9078   case X86::VPANDDZ256rr:
9079   case X86::VPANDDZ256rm:
9080   case X86::VPANDQZ128rr:
9081   case X86::VPANDQZ128rm:
9082   case X86::VPANDQZ256rr:
9083   case X86::VPANDQZ256rm:
9084   case X86::VPANDNDZ128rr:
9085   case X86::VPANDNDZ128rm:
9086   case X86::VPANDNDZ256rr:
9087   case X86::VPANDNDZ256rm:
9088   case X86::VPANDNQZ128rr:
9089   case X86::VPANDNQZ128rm:
9090   case X86::VPANDNQZ256rr:
9091   case X86::VPANDNQZ256rm:
9092   case X86::VPORDZ128rr:
9093   case X86::VPORDZ128rm:
9094   case X86::VPORDZ256rr:
9095   case X86::VPORDZ256rm:
9096   case X86::VPORQZ128rr:
9097   case X86::VPORQZ128rm:
9098   case X86::VPORQZ256rr:
9099   case X86::VPORQZ256rm:
9100   case X86::VPXORDZ128rr:
9101   case X86::VPXORDZ128rm:
9102   case X86::VPXORDZ256rr:
9103   case X86::VPXORDZ256rm:
9104   case X86::VPXORQZ128rr:
9105   case X86::VPXORQZ128rm:
9106   case X86::VPXORQZ256rr:
9107   case X86::VPXORQZ256rm:
9108     // If we don't have DQI see if we can still switch from an EVEX integer
9109     // instruction to a VEX floating point instruction.
9110     if (Subtarget.hasDQI())
9111       return 0;
9112 
9113     if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
9114       return 0;
9115     if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
9116       return 0;
9117     // Register forms will have 3 operands. Memory form will have more.
9118     if (NumOperands == 3 &&
9119         RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
9120       return 0;
9121 
9122     // All domains are valid.
9123     return 0xe;
9124   case X86::MOVHLPSrr:
9125     // We can swap domains when both inputs are the same register.
9126     // FIXME: This doesn't catch all the cases we would like. If the input
9127     // register isn't KILLed by the instruction, the two address instruction
9128     // pass puts a COPY on one input. The other input uses the original
9129     // register. This prevents the same physical register from being used by
9130     // both inputs.
9131     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
9132         MI.getOperand(0).getSubReg() == 0 &&
9133         MI.getOperand(1).getSubReg() == 0 && MI.getOperand(2).getSubReg() == 0)
9134       return 0x6;
9135     return 0;
9136   case X86::SHUFPDrri:
9137     return 0x6;
9138   }
9139   return 0;
9140 }
9141 
9142 #include "X86ReplaceableInstrs.def"
9143 
setExecutionDomainCustom(MachineInstr & MI,unsigned Domain) const9144 bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
9145                                             unsigned Domain) const {
9146   assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
9147   uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
9148   assert(dom && "Not an SSE instruction");
9149 
9150   unsigned Opcode = MI.getOpcode();
9151   unsigned NumOperands = MI.getDesc().getNumOperands();
9152 
9153   auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
9154     if (MI.getOperand(NumOperands - 1).isImm()) {
9155       unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
9156       Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
9157       unsigned NewImm = Imm;
9158 
9159       const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
9160       if (!table)
9161         table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9162 
9163       if (Domain == 1) { // PackedSingle
9164         AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
9165       } else if (Domain == 2) { // PackedDouble
9166         AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
9167       } else if (Domain == 3) { // PackedInt
9168         if (Subtarget.hasAVX2()) {
9169           // If we are already VPBLENDW use that, else use VPBLENDD.
9170           if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
9171             table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9172             AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
9173           }
9174         } else {
9175           assert(!Is256 && "128-bit vector expected");
9176           AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
9177         }
9178       }
9179 
9180       assert(table && table[Domain - 1] && "Unknown domain op");
9181       MI.setDesc(get(table[Domain - 1]));
9182       MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
9183     }
9184     return true;
9185   };
9186 
9187   switch (Opcode) {
9188   case X86::BLENDPDrmi:
9189   case X86::BLENDPDrri:
9190   case X86::VBLENDPDrmi:
9191   case X86::VBLENDPDrri:
9192     return SetBlendDomain(2, false);
9193   case X86::VBLENDPDYrmi:
9194   case X86::VBLENDPDYrri:
9195     return SetBlendDomain(4, true);
9196   case X86::BLENDPSrmi:
9197   case X86::BLENDPSrri:
9198   case X86::VBLENDPSrmi:
9199   case X86::VBLENDPSrri:
9200   case X86::VPBLENDDrmi:
9201   case X86::VPBLENDDrri:
9202     return SetBlendDomain(4, false);
9203   case X86::VBLENDPSYrmi:
9204   case X86::VBLENDPSYrri:
9205   case X86::VPBLENDDYrmi:
9206   case X86::VPBLENDDYrri:
9207     return SetBlendDomain(8, true);
9208   case X86::PBLENDWrmi:
9209   case X86::PBLENDWrri:
9210   case X86::VPBLENDWrmi:
9211   case X86::VPBLENDWrri:
9212     return SetBlendDomain(8, false);
9213   case X86::VPBLENDWYrmi:
9214   case X86::VPBLENDWYrri:
9215     return SetBlendDomain(16, true);
9216   case X86::VPANDDZ128rr:
9217   case X86::VPANDDZ128rm:
9218   case X86::VPANDDZ256rr:
9219   case X86::VPANDDZ256rm:
9220   case X86::VPANDQZ128rr:
9221   case X86::VPANDQZ128rm:
9222   case X86::VPANDQZ256rr:
9223   case X86::VPANDQZ256rm:
9224   case X86::VPANDNDZ128rr:
9225   case X86::VPANDNDZ128rm:
9226   case X86::VPANDNDZ256rr:
9227   case X86::VPANDNDZ256rm:
9228   case X86::VPANDNQZ128rr:
9229   case X86::VPANDNQZ128rm:
9230   case X86::VPANDNQZ256rr:
9231   case X86::VPANDNQZ256rm:
9232   case X86::VPORDZ128rr:
9233   case X86::VPORDZ128rm:
9234   case X86::VPORDZ256rr:
9235   case X86::VPORDZ256rm:
9236   case X86::VPORQZ128rr:
9237   case X86::VPORQZ128rm:
9238   case X86::VPORQZ256rr:
9239   case X86::VPORQZ256rm:
9240   case X86::VPXORDZ128rr:
9241   case X86::VPXORDZ128rm:
9242   case X86::VPXORDZ256rr:
9243   case X86::VPXORDZ256rm:
9244   case X86::VPXORQZ128rr:
9245   case X86::VPXORQZ128rm:
9246   case X86::VPXORQZ256rr:
9247   case X86::VPXORQZ256rm: {
9248     // Without DQI, convert EVEX instructions to VEX instructions.
9249     if (Subtarget.hasDQI())
9250       return false;
9251 
9252     const uint16_t *table =
9253         lookupAVX512(MI.getOpcode(), dom, ReplaceableCustomAVX512LogicInstrs);
9254     assert(table && "Instruction not found in table?");
9255     // Don't change integer Q instructions to D instructions and
9256     // use D intructions if we started with a PS instruction.
9257     if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
9258       Domain = 4;
9259     MI.setDesc(get(table[Domain - 1]));
9260     return true;
9261   }
9262   case X86::UNPCKHPDrr:
9263   case X86::MOVHLPSrr:
9264     // We just need to commute the instruction which will switch the domains.
9265     if (Domain != dom && Domain != 3 &&
9266         MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
9267         MI.getOperand(0).getSubReg() == 0 &&
9268         MI.getOperand(1).getSubReg() == 0 &&
9269         MI.getOperand(2).getSubReg() == 0) {
9270       commuteInstruction(MI, false);
9271       return true;
9272     }
9273     // We must always return true for MOVHLPSrr.
9274     if (Opcode == X86::MOVHLPSrr)
9275       return true;
9276     break;
9277   case X86::SHUFPDrri: {
9278     if (Domain == 1) {
9279       unsigned Imm = MI.getOperand(3).getImm();
9280       unsigned NewImm = 0x44;
9281       if (Imm & 1)
9282         NewImm |= 0x0a;
9283       if (Imm & 2)
9284         NewImm |= 0xa0;
9285       MI.getOperand(3).setImm(NewImm);
9286       MI.setDesc(get(X86::SHUFPSrri));
9287     }
9288     return true;
9289   }
9290   }
9291   return false;
9292 }
9293 
9294 std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr & MI) const9295 X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
9296   uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
9297   unsigned opcode = MI.getOpcode();
9298   uint16_t validDomains = 0;
9299   if (domain) {
9300     // Attempt to match for custom instructions.
9301     validDomains = getExecutionDomainCustom(MI);
9302     if (validDomains)
9303       return std::make_pair(domain, validDomains);
9304 
9305     if (lookup(opcode, domain, ReplaceableInstrs)) {
9306       validDomains = 0xe;
9307     } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
9308       validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
9309     } else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
9310       validDomains = 0x6;
9311     } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
9312       // Insert/extract instructions should only effect domain if AVX2
9313       // is enabled.
9314       if (!Subtarget.hasAVX2())
9315         return std::make_pair(0, 0);
9316       validDomains = 0xe;
9317     } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
9318       validDomains = 0xe;
9319     } else if (Subtarget.hasDQI() &&
9320                lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQ)) {
9321       validDomains = 0xe;
9322     } else if (Subtarget.hasDQI()) {
9323       if (const uint16_t *table =
9324               lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQMasked)) {
9325         if (domain == 1 || (domain == 3 && table[3] == opcode))
9326           validDomains = 0xa;
9327         else
9328           validDomains = 0xc;
9329       }
9330     }
9331   }
9332   return std::make_pair(domain, validDomains);
9333 }
9334 
setExecutionDomain(MachineInstr & MI,unsigned Domain) const9335 void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
9336   assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
9337   uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
9338   assert(dom && "Not an SSE instruction");
9339 
9340   // Attempt to match for custom instructions.
9341   if (setExecutionDomainCustom(MI, Domain))
9342     return;
9343 
9344   const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
9345   if (!table) { // try the other table
9346     assert((Subtarget.hasAVX2() || Domain < 3) &&
9347            "256-bit vector operations only available in AVX2");
9348     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
9349   }
9350   if (!table) { // try the FP table
9351     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
9352     assert((!table || Domain < 3) &&
9353            "Can only select PackedSingle or PackedDouble");
9354   }
9355   if (!table) { // try the other table
9356     assert(Subtarget.hasAVX2() &&
9357            "256-bit insert/extract only available in AVX2");
9358     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
9359   }
9360   if (!table) { // try the AVX512 table
9361     assert(Subtarget.hasAVX512() && "Requires AVX-512");
9362     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
9363     // Don't change integer Q instructions to D instructions.
9364     if (table && Domain == 3 && table[3] == MI.getOpcode())
9365       Domain = 4;
9366   }
9367   if (!table) { // try the AVX512DQ table
9368     assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
9369     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
9370     // Don't change integer Q instructions to D instructions and
9371     // use D instructions if we started with a PS instruction.
9372     if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
9373       Domain = 4;
9374   }
9375   if (!table) { // try the AVX512DQMasked table
9376     assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
9377     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
9378     if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
9379       Domain = 4;
9380   }
9381   assert(table && "Cannot change domain");
9382   MI.setDesc(get(table[Domain - 1]));
9383 }
9384 
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const9385 void X86InstrInfo::insertNoop(MachineBasicBlock &MBB,
9386                               MachineBasicBlock::iterator MI) const {
9387   DebugLoc DL;
9388   BuildMI(MBB, MI, DL, get(X86::NOOP));
9389 }
9390 
9391 /// Return the noop instruction to use for a noop.
getNop() const9392 MCInst X86InstrInfo::getNop() const {
9393   MCInst Nop;
9394   Nop.setOpcode(X86::NOOP);
9395   return Nop;
9396 }
9397 
isHighLatencyDef(int opc) const9398 bool X86InstrInfo::isHighLatencyDef(int opc) const {
9399   switch (opc) {
9400   default:
9401     return false;
9402   case X86::DIVPDrm:
9403   case X86::DIVPDrr:
9404   case X86::DIVPSrm:
9405   case X86::DIVPSrr:
9406   case X86::DIVSDrm:
9407   case X86::DIVSDrm_Int:
9408   case X86::DIVSDrr:
9409   case X86::DIVSDrr_Int:
9410   case X86::DIVSSrm:
9411   case X86::DIVSSrm_Int:
9412   case X86::DIVSSrr:
9413   case X86::DIVSSrr_Int:
9414   case X86::SQRTPDm:
9415   case X86::SQRTPDr:
9416   case X86::SQRTPSm:
9417   case X86::SQRTPSr:
9418   case X86::SQRTSDm:
9419   case X86::SQRTSDm_Int:
9420   case X86::SQRTSDr:
9421   case X86::SQRTSDr_Int:
9422   case X86::SQRTSSm:
9423   case X86::SQRTSSm_Int:
9424   case X86::SQRTSSr:
9425   case X86::SQRTSSr_Int:
9426   // AVX instructions with high latency
9427   case X86::VDIVPDrm:
9428   case X86::VDIVPDrr:
9429   case X86::VDIVPDYrm:
9430   case X86::VDIVPDYrr:
9431   case X86::VDIVPSrm:
9432   case X86::VDIVPSrr:
9433   case X86::VDIVPSYrm:
9434   case X86::VDIVPSYrr:
9435   case X86::VDIVSDrm:
9436   case X86::VDIVSDrm_Int:
9437   case X86::VDIVSDrr:
9438   case X86::VDIVSDrr_Int:
9439   case X86::VDIVSSrm:
9440   case X86::VDIVSSrm_Int:
9441   case X86::VDIVSSrr:
9442   case X86::VDIVSSrr_Int:
9443   case X86::VSQRTPDm:
9444   case X86::VSQRTPDr:
9445   case X86::VSQRTPDYm:
9446   case X86::VSQRTPDYr:
9447   case X86::VSQRTPSm:
9448   case X86::VSQRTPSr:
9449   case X86::VSQRTPSYm:
9450   case X86::VSQRTPSYr:
9451   case X86::VSQRTSDm:
9452   case X86::VSQRTSDm_Int:
9453   case X86::VSQRTSDr:
9454   case X86::VSQRTSDr_Int:
9455   case X86::VSQRTSSm:
9456   case X86::VSQRTSSm_Int:
9457   case X86::VSQRTSSr:
9458   case X86::VSQRTSSr_Int:
9459   // AVX512 instructions with high latency
9460   case X86::VDIVPDZ128rm:
9461   case X86::VDIVPDZ128rmb:
9462   case X86::VDIVPDZ128rmbk:
9463   case X86::VDIVPDZ128rmbkz:
9464   case X86::VDIVPDZ128rmk:
9465   case X86::VDIVPDZ128rmkz:
9466   case X86::VDIVPDZ128rr:
9467   case X86::VDIVPDZ128rrk:
9468   case X86::VDIVPDZ128rrkz:
9469   case X86::VDIVPDZ256rm:
9470   case X86::VDIVPDZ256rmb:
9471   case X86::VDIVPDZ256rmbk:
9472   case X86::VDIVPDZ256rmbkz:
9473   case X86::VDIVPDZ256rmk:
9474   case X86::VDIVPDZ256rmkz:
9475   case X86::VDIVPDZ256rr:
9476   case X86::VDIVPDZ256rrk:
9477   case X86::VDIVPDZ256rrkz:
9478   case X86::VDIVPDZrrb:
9479   case X86::VDIVPDZrrbk:
9480   case X86::VDIVPDZrrbkz:
9481   case X86::VDIVPDZrm:
9482   case X86::VDIVPDZrmb:
9483   case X86::VDIVPDZrmbk:
9484   case X86::VDIVPDZrmbkz:
9485   case X86::VDIVPDZrmk:
9486   case X86::VDIVPDZrmkz:
9487   case X86::VDIVPDZrr:
9488   case X86::VDIVPDZrrk:
9489   case X86::VDIVPDZrrkz:
9490   case X86::VDIVPSZ128rm:
9491   case X86::VDIVPSZ128rmb:
9492   case X86::VDIVPSZ128rmbk:
9493   case X86::VDIVPSZ128rmbkz:
9494   case X86::VDIVPSZ128rmk:
9495   case X86::VDIVPSZ128rmkz:
9496   case X86::VDIVPSZ128rr:
9497   case X86::VDIVPSZ128rrk:
9498   case X86::VDIVPSZ128rrkz:
9499   case X86::VDIVPSZ256rm:
9500   case X86::VDIVPSZ256rmb:
9501   case X86::VDIVPSZ256rmbk:
9502   case X86::VDIVPSZ256rmbkz:
9503   case X86::VDIVPSZ256rmk:
9504   case X86::VDIVPSZ256rmkz:
9505   case X86::VDIVPSZ256rr:
9506   case X86::VDIVPSZ256rrk:
9507   case X86::VDIVPSZ256rrkz:
9508   case X86::VDIVPSZrrb:
9509   case X86::VDIVPSZrrbk:
9510   case X86::VDIVPSZrrbkz:
9511   case X86::VDIVPSZrm:
9512   case X86::VDIVPSZrmb:
9513   case X86::VDIVPSZrmbk:
9514   case X86::VDIVPSZrmbkz:
9515   case X86::VDIVPSZrmk:
9516   case X86::VDIVPSZrmkz:
9517   case X86::VDIVPSZrr:
9518   case X86::VDIVPSZrrk:
9519   case X86::VDIVPSZrrkz:
9520   case X86::VDIVSDZrm:
9521   case X86::VDIVSDZrr:
9522   case X86::VDIVSDZrm_Int:
9523   case X86::VDIVSDZrmk_Int:
9524   case X86::VDIVSDZrmkz_Int:
9525   case X86::VDIVSDZrr_Int:
9526   case X86::VDIVSDZrrk_Int:
9527   case X86::VDIVSDZrrkz_Int:
9528   case X86::VDIVSDZrrb_Int:
9529   case X86::VDIVSDZrrbk_Int:
9530   case X86::VDIVSDZrrbkz_Int:
9531   case X86::VDIVSSZrm:
9532   case X86::VDIVSSZrr:
9533   case X86::VDIVSSZrm_Int:
9534   case X86::VDIVSSZrmk_Int:
9535   case X86::VDIVSSZrmkz_Int:
9536   case X86::VDIVSSZrr_Int:
9537   case X86::VDIVSSZrrk_Int:
9538   case X86::VDIVSSZrrkz_Int:
9539   case X86::VDIVSSZrrb_Int:
9540   case X86::VDIVSSZrrbk_Int:
9541   case X86::VDIVSSZrrbkz_Int:
9542   case X86::VSQRTPDZ128m:
9543   case X86::VSQRTPDZ128mb:
9544   case X86::VSQRTPDZ128mbk:
9545   case X86::VSQRTPDZ128mbkz:
9546   case X86::VSQRTPDZ128mk:
9547   case X86::VSQRTPDZ128mkz:
9548   case X86::VSQRTPDZ128r:
9549   case X86::VSQRTPDZ128rk:
9550   case X86::VSQRTPDZ128rkz:
9551   case X86::VSQRTPDZ256m:
9552   case X86::VSQRTPDZ256mb:
9553   case X86::VSQRTPDZ256mbk:
9554   case X86::VSQRTPDZ256mbkz:
9555   case X86::VSQRTPDZ256mk:
9556   case X86::VSQRTPDZ256mkz:
9557   case X86::VSQRTPDZ256r:
9558   case X86::VSQRTPDZ256rk:
9559   case X86::VSQRTPDZ256rkz:
9560   case X86::VSQRTPDZm:
9561   case X86::VSQRTPDZmb:
9562   case X86::VSQRTPDZmbk:
9563   case X86::VSQRTPDZmbkz:
9564   case X86::VSQRTPDZmk:
9565   case X86::VSQRTPDZmkz:
9566   case X86::VSQRTPDZr:
9567   case X86::VSQRTPDZrb:
9568   case X86::VSQRTPDZrbk:
9569   case X86::VSQRTPDZrbkz:
9570   case X86::VSQRTPDZrk:
9571   case X86::VSQRTPDZrkz:
9572   case X86::VSQRTPSZ128m:
9573   case X86::VSQRTPSZ128mb:
9574   case X86::VSQRTPSZ128mbk:
9575   case X86::VSQRTPSZ128mbkz:
9576   case X86::VSQRTPSZ128mk:
9577   case X86::VSQRTPSZ128mkz:
9578   case X86::VSQRTPSZ128r:
9579   case X86::VSQRTPSZ128rk:
9580   case X86::VSQRTPSZ128rkz:
9581   case X86::VSQRTPSZ256m:
9582   case X86::VSQRTPSZ256mb:
9583   case X86::VSQRTPSZ256mbk:
9584   case X86::VSQRTPSZ256mbkz:
9585   case X86::VSQRTPSZ256mk:
9586   case X86::VSQRTPSZ256mkz:
9587   case X86::VSQRTPSZ256r:
9588   case X86::VSQRTPSZ256rk:
9589   case X86::VSQRTPSZ256rkz:
9590   case X86::VSQRTPSZm:
9591   case X86::VSQRTPSZmb:
9592   case X86::VSQRTPSZmbk:
9593   case X86::VSQRTPSZmbkz:
9594   case X86::VSQRTPSZmk:
9595   case X86::VSQRTPSZmkz:
9596   case X86::VSQRTPSZr:
9597   case X86::VSQRTPSZrb:
9598   case X86::VSQRTPSZrbk:
9599   case X86::VSQRTPSZrbkz:
9600   case X86::VSQRTPSZrk:
9601   case X86::VSQRTPSZrkz:
9602   case X86::VSQRTSDZm:
9603   case X86::VSQRTSDZm_Int:
9604   case X86::VSQRTSDZmk_Int:
9605   case X86::VSQRTSDZmkz_Int:
9606   case X86::VSQRTSDZr:
9607   case X86::VSQRTSDZr_Int:
9608   case X86::VSQRTSDZrk_Int:
9609   case X86::VSQRTSDZrkz_Int:
9610   case X86::VSQRTSDZrb_Int:
9611   case X86::VSQRTSDZrbk_Int:
9612   case X86::VSQRTSDZrbkz_Int:
9613   case X86::VSQRTSSZm:
9614   case X86::VSQRTSSZm_Int:
9615   case X86::VSQRTSSZmk_Int:
9616   case X86::VSQRTSSZmkz_Int:
9617   case X86::VSQRTSSZr:
9618   case X86::VSQRTSSZr_Int:
9619   case X86::VSQRTSSZrk_Int:
9620   case X86::VSQRTSSZrkz_Int:
9621   case X86::VSQRTSSZrb_Int:
9622   case X86::VSQRTSSZrbk_Int:
9623   case X86::VSQRTSSZrbkz_Int:
9624 
9625   case X86::VGATHERDPDYrm:
9626   case X86::VGATHERDPDZ128rm:
9627   case X86::VGATHERDPDZ256rm:
9628   case X86::VGATHERDPDZrm:
9629   case X86::VGATHERDPDrm:
9630   case X86::VGATHERDPSYrm:
9631   case X86::VGATHERDPSZ128rm:
9632   case X86::VGATHERDPSZ256rm:
9633   case X86::VGATHERDPSZrm:
9634   case X86::VGATHERDPSrm:
9635   case X86::VGATHERPF0DPDm:
9636   case X86::VGATHERPF0DPSm:
9637   case X86::VGATHERPF0QPDm:
9638   case X86::VGATHERPF0QPSm:
9639   case X86::VGATHERPF1DPDm:
9640   case X86::VGATHERPF1DPSm:
9641   case X86::VGATHERPF1QPDm:
9642   case X86::VGATHERPF1QPSm:
9643   case X86::VGATHERQPDYrm:
9644   case X86::VGATHERQPDZ128rm:
9645   case X86::VGATHERQPDZ256rm:
9646   case X86::VGATHERQPDZrm:
9647   case X86::VGATHERQPDrm:
9648   case X86::VGATHERQPSYrm:
9649   case X86::VGATHERQPSZ128rm:
9650   case X86::VGATHERQPSZ256rm:
9651   case X86::VGATHERQPSZrm:
9652   case X86::VGATHERQPSrm:
9653   case X86::VPGATHERDDYrm:
9654   case X86::VPGATHERDDZ128rm:
9655   case X86::VPGATHERDDZ256rm:
9656   case X86::VPGATHERDDZrm:
9657   case X86::VPGATHERDDrm:
9658   case X86::VPGATHERDQYrm:
9659   case X86::VPGATHERDQZ128rm:
9660   case X86::VPGATHERDQZ256rm:
9661   case X86::VPGATHERDQZrm:
9662   case X86::VPGATHERDQrm:
9663   case X86::VPGATHERQDYrm:
9664   case X86::VPGATHERQDZ128rm:
9665   case X86::VPGATHERQDZ256rm:
9666   case X86::VPGATHERQDZrm:
9667   case X86::VPGATHERQDrm:
9668   case X86::VPGATHERQQYrm:
9669   case X86::VPGATHERQQZ128rm:
9670   case X86::VPGATHERQQZ256rm:
9671   case X86::VPGATHERQQZrm:
9672   case X86::VPGATHERQQrm:
9673   case X86::VSCATTERDPDZ128mr:
9674   case X86::VSCATTERDPDZ256mr:
9675   case X86::VSCATTERDPDZmr:
9676   case X86::VSCATTERDPSZ128mr:
9677   case X86::VSCATTERDPSZ256mr:
9678   case X86::VSCATTERDPSZmr:
9679   case X86::VSCATTERPF0DPDm:
9680   case X86::VSCATTERPF0DPSm:
9681   case X86::VSCATTERPF0QPDm:
9682   case X86::VSCATTERPF0QPSm:
9683   case X86::VSCATTERPF1DPDm:
9684   case X86::VSCATTERPF1DPSm:
9685   case X86::VSCATTERPF1QPDm:
9686   case X86::VSCATTERPF1QPSm:
9687   case X86::VSCATTERQPDZ128mr:
9688   case X86::VSCATTERQPDZ256mr:
9689   case X86::VSCATTERQPDZmr:
9690   case X86::VSCATTERQPSZ128mr:
9691   case X86::VSCATTERQPSZ256mr:
9692   case X86::VSCATTERQPSZmr:
9693   case X86::VPSCATTERDDZ128mr:
9694   case X86::VPSCATTERDDZ256mr:
9695   case X86::VPSCATTERDDZmr:
9696   case X86::VPSCATTERDQZ128mr:
9697   case X86::VPSCATTERDQZ256mr:
9698   case X86::VPSCATTERDQZmr:
9699   case X86::VPSCATTERQDZ128mr:
9700   case X86::VPSCATTERQDZ256mr:
9701   case X86::VPSCATTERQDZmr:
9702   case X86::VPSCATTERQQZ128mr:
9703   case X86::VPSCATTERQQZ256mr:
9704   case X86::VPSCATTERQQZmr:
9705     return true;
9706   }
9707 }
9708 
hasHighOperandLatency(const TargetSchedModel & SchedModel,const MachineRegisterInfo * MRI,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const9709 bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
9710                                          const MachineRegisterInfo *MRI,
9711                                          const MachineInstr &DefMI,
9712                                          unsigned DefIdx,
9713                                          const MachineInstr &UseMI,
9714                                          unsigned UseIdx) const {
9715   return isHighLatencyDef(DefMI.getOpcode());
9716 }
9717 
hasReassociableOperands(const MachineInstr & Inst,const MachineBasicBlock * MBB) const9718 bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
9719                                            const MachineBasicBlock *MBB) const {
9720   assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&
9721          Inst.getNumDefs() <= 2 && "Reassociation needs binary operators");
9722 
9723   // Integer binary math/logic instructions have a third source operand:
9724   // the EFLAGS register. That operand must be both defined here and never
9725   // used; ie, it must be dead. If the EFLAGS operand is live, then we can
9726   // not change anything because rearranging the operands could affect other
9727   // instructions that depend on the exact status flags (zero, sign, etc.)
9728   // that are set by using these particular operands with this operation.
9729   const MachineOperand *FlagDef =
9730       Inst.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
9731   assert((Inst.getNumDefs() == 1 || FlagDef) && "Implicit def isn't flags?");
9732   if (FlagDef && !FlagDef->isDead())
9733     return false;
9734 
9735   return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
9736 }
9737 
9738 // TODO: There are many more machine instruction opcodes to match:
9739 //       1. Other data types (integer, vectors)
9740 //       2. Other math / logic operations (xor, or)
9741 //       3. Other forms of the same operation (intrinsics and other variants)
isAssociativeAndCommutative(const MachineInstr & Inst,bool Invert) const9742 bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
9743                                                bool Invert) const {
9744   if (Invert)
9745     return false;
9746   switch (Inst.getOpcode()) {
9747   CASE_ND(ADD8rr)
9748   CASE_ND(ADD16rr)
9749   CASE_ND(ADD32rr)
9750   CASE_ND(ADD64rr)
9751   CASE_ND(AND8rr)
9752   CASE_ND(AND16rr)
9753   CASE_ND(AND32rr)
9754   CASE_ND(AND64rr)
9755   CASE_ND(OR8rr)
9756   CASE_ND(OR16rr)
9757   CASE_ND(OR32rr)
9758   CASE_ND(OR64rr)
9759   CASE_ND(XOR8rr)
9760   CASE_ND(XOR16rr)
9761   CASE_ND(XOR32rr)
9762   CASE_ND(XOR64rr)
9763   CASE_ND(IMUL16rr)
9764   CASE_ND(IMUL32rr)
9765   CASE_ND(IMUL64rr)
9766   case X86::PANDrr:
9767   case X86::PORrr:
9768   case X86::PXORrr:
9769   case X86::ANDPDrr:
9770   case X86::ANDPSrr:
9771   case X86::ORPDrr:
9772   case X86::ORPSrr:
9773   case X86::XORPDrr:
9774   case X86::XORPSrr:
9775   case X86::PADDBrr:
9776   case X86::PADDWrr:
9777   case X86::PADDDrr:
9778   case X86::PADDQrr:
9779   case X86::PMULLWrr:
9780   case X86::PMULLDrr:
9781   case X86::PMAXSBrr:
9782   case X86::PMAXSDrr:
9783   case X86::PMAXSWrr:
9784   case X86::PMAXUBrr:
9785   case X86::PMAXUDrr:
9786   case X86::PMAXUWrr:
9787   case X86::PMINSBrr:
9788   case X86::PMINSDrr:
9789   case X86::PMINSWrr:
9790   case X86::PMINUBrr:
9791   case X86::PMINUDrr:
9792   case X86::PMINUWrr:
9793   case X86::VPANDrr:
9794   case X86::VPANDYrr:
9795   case X86::VPANDDZ128rr:
9796   case X86::VPANDDZ256rr:
9797   case X86::VPANDDZrr:
9798   case X86::VPANDQZ128rr:
9799   case X86::VPANDQZ256rr:
9800   case X86::VPANDQZrr:
9801   case X86::VPORrr:
9802   case X86::VPORYrr:
9803   case X86::VPORDZ128rr:
9804   case X86::VPORDZ256rr:
9805   case X86::VPORDZrr:
9806   case X86::VPORQZ128rr:
9807   case X86::VPORQZ256rr:
9808   case X86::VPORQZrr:
9809   case X86::VPXORrr:
9810   case X86::VPXORYrr:
9811   case X86::VPXORDZ128rr:
9812   case X86::VPXORDZ256rr:
9813   case X86::VPXORDZrr:
9814   case X86::VPXORQZ128rr:
9815   case X86::VPXORQZ256rr:
9816   case X86::VPXORQZrr:
9817   case X86::VANDPDrr:
9818   case X86::VANDPSrr:
9819   case X86::VANDPDYrr:
9820   case X86::VANDPSYrr:
9821   case X86::VANDPDZ128rr:
9822   case X86::VANDPSZ128rr:
9823   case X86::VANDPDZ256rr:
9824   case X86::VANDPSZ256rr:
9825   case X86::VANDPDZrr:
9826   case X86::VANDPSZrr:
9827   case X86::VORPDrr:
9828   case X86::VORPSrr:
9829   case X86::VORPDYrr:
9830   case X86::VORPSYrr:
9831   case X86::VORPDZ128rr:
9832   case X86::VORPSZ128rr:
9833   case X86::VORPDZ256rr:
9834   case X86::VORPSZ256rr:
9835   case X86::VORPDZrr:
9836   case X86::VORPSZrr:
9837   case X86::VXORPDrr:
9838   case X86::VXORPSrr:
9839   case X86::VXORPDYrr:
9840   case X86::VXORPSYrr:
9841   case X86::VXORPDZ128rr:
9842   case X86::VXORPSZ128rr:
9843   case X86::VXORPDZ256rr:
9844   case X86::VXORPSZ256rr:
9845   case X86::VXORPDZrr:
9846   case X86::VXORPSZrr:
9847   case X86::KADDBkk:
9848   case X86::KADDWkk:
9849   case X86::KADDDkk:
9850   case X86::KADDQkk:
9851   case X86::KANDBkk:
9852   case X86::KANDWkk:
9853   case X86::KANDDkk:
9854   case X86::KANDQkk:
9855   case X86::KORBkk:
9856   case X86::KORWkk:
9857   case X86::KORDkk:
9858   case X86::KORQkk:
9859   case X86::KXORBkk:
9860   case X86::KXORWkk:
9861   case X86::KXORDkk:
9862   case X86::KXORQkk:
9863   case X86::VPADDBrr:
9864   case X86::VPADDWrr:
9865   case X86::VPADDDrr:
9866   case X86::VPADDQrr:
9867   case X86::VPADDBYrr:
9868   case X86::VPADDWYrr:
9869   case X86::VPADDDYrr:
9870   case X86::VPADDQYrr:
9871   case X86::VPADDBZ128rr:
9872   case X86::VPADDWZ128rr:
9873   case X86::VPADDDZ128rr:
9874   case X86::VPADDQZ128rr:
9875   case X86::VPADDBZ256rr:
9876   case X86::VPADDWZ256rr:
9877   case X86::VPADDDZ256rr:
9878   case X86::VPADDQZ256rr:
9879   case X86::VPADDBZrr:
9880   case X86::VPADDWZrr:
9881   case X86::VPADDDZrr:
9882   case X86::VPADDQZrr:
9883   case X86::VPMULLWrr:
9884   case X86::VPMULLWYrr:
9885   case X86::VPMULLWZ128rr:
9886   case X86::VPMULLWZ256rr:
9887   case X86::VPMULLWZrr:
9888   case X86::VPMULLDrr:
9889   case X86::VPMULLDYrr:
9890   case X86::VPMULLDZ128rr:
9891   case X86::VPMULLDZ256rr:
9892   case X86::VPMULLDZrr:
9893   case X86::VPMULLQZ128rr:
9894   case X86::VPMULLQZ256rr:
9895   case X86::VPMULLQZrr:
9896   case X86::VPMAXSBrr:
9897   case X86::VPMAXSBYrr:
9898   case X86::VPMAXSBZ128rr:
9899   case X86::VPMAXSBZ256rr:
9900   case X86::VPMAXSBZrr:
9901   case X86::VPMAXSDrr:
9902   case X86::VPMAXSDYrr:
9903   case X86::VPMAXSDZ128rr:
9904   case X86::VPMAXSDZ256rr:
9905   case X86::VPMAXSDZrr:
9906   case X86::VPMAXSQZ128rr:
9907   case X86::VPMAXSQZ256rr:
9908   case X86::VPMAXSQZrr:
9909   case X86::VPMAXSWrr:
9910   case X86::VPMAXSWYrr:
9911   case X86::VPMAXSWZ128rr:
9912   case X86::VPMAXSWZ256rr:
9913   case X86::VPMAXSWZrr:
9914   case X86::VPMAXUBrr:
9915   case X86::VPMAXUBYrr:
9916   case X86::VPMAXUBZ128rr:
9917   case X86::VPMAXUBZ256rr:
9918   case X86::VPMAXUBZrr:
9919   case X86::VPMAXUDrr:
9920   case X86::VPMAXUDYrr:
9921   case X86::VPMAXUDZ128rr:
9922   case X86::VPMAXUDZ256rr:
9923   case X86::VPMAXUDZrr:
9924   case X86::VPMAXUQZ128rr:
9925   case X86::VPMAXUQZ256rr:
9926   case X86::VPMAXUQZrr:
9927   case X86::VPMAXUWrr:
9928   case X86::VPMAXUWYrr:
9929   case X86::VPMAXUWZ128rr:
9930   case X86::VPMAXUWZ256rr:
9931   case X86::VPMAXUWZrr:
9932   case X86::VPMINSBrr:
9933   case X86::VPMINSBYrr:
9934   case X86::VPMINSBZ128rr:
9935   case X86::VPMINSBZ256rr:
9936   case X86::VPMINSBZrr:
9937   case X86::VPMINSDrr:
9938   case X86::VPMINSDYrr:
9939   case X86::VPMINSDZ128rr:
9940   case X86::VPMINSDZ256rr:
9941   case X86::VPMINSDZrr:
9942   case X86::VPMINSQZ128rr:
9943   case X86::VPMINSQZ256rr:
9944   case X86::VPMINSQZrr:
9945   case X86::VPMINSWrr:
9946   case X86::VPMINSWYrr:
9947   case X86::VPMINSWZ128rr:
9948   case X86::VPMINSWZ256rr:
9949   case X86::VPMINSWZrr:
9950   case X86::VPMINUBrr:
9951   case X86::VPMINUBYrr:
9952   case X86::VPMINUBZ128rr:
9953   case X86::VPMINUBZ256rr:
9954   case X86::VPMINUBZrr:
9955   case X86::VPMINUDrr:
9956   case X86::VPMINUDYrr:
9957   case X86::VPMINUDZ128rr:
9958   case X86::VPMINUDZ256rr:
9959   case X86::VPMINUDZrr:
9960   case X86::VPMINUQZ128rr:
9961   case X86::VPMINUQZ256rr:
9962   case X86::VPMINUQZrr:
9963   case X86::VPMINUWrr:
9964   case X86::VPMINUWYrr:
9965   case X86::VPMINUWZ128rr:
9966   case X86::VPMINUWZ256rr:
9967   case X86::VPMINUWZrr:
9968   // Normal min/max instructions are not commutative because of NaN and signed
9969   // zero semantics, but these are. Thus, there's no need to check for global
9970   // relaxed math; the instructions themselves have the properties we need.
9971   case X86::MAXCPDrr:
9972   case X86::MAXCPSrr:
9973   case X86::MAXCSDrr:
9974   case X86::MAXCSSrr:
9975   case X86::MINCPDrr:
9976   case X86::MINCPSrr:
9977   case X86::MINCSDrr:
9978   case X86::MINCSSrr:
9979   case X86::VMAXCPDrr:
9980   case X86::VMAXCPSrr:
9981   case X86::VMAXCPDYrr:
9982   case X86::VMAXCPSYrr:
9983   case X86::VMAXCPDZ128rr:
9984   case X86::VMAXCPSZ128rr:
9985   case X86::VMAXCPDZ256rr:
9986   case X86::VMAXCPSZ256rr:
9987   case X86::VMAXCPDZrr:
9988   case X86::VMAXCPSZrr:
9989   case X86::VMAXCSDrr:
9990   case X86::VMAXCSSrr:
9991   case X86::VMAXCSDZrr:
9992   case X86::VMAXCSSZrr:
9993   case X86::VMINCPDrr:
9994   case X86::VMINCPSrr:
9995   case X86::VMINCPDYrr:
9996   case X86::VMINCPSYrr:
9997   case X86::VMINCPDZ128rr:
9998   case X86::VMINCPSZ128rr:
9999   case X86::VMINCPDZ256rr:
10000   case X86::VMINCPSZ256rr:
10001   case X86::VMINCPDZrr:
10002   case X86::VMINCPSZrr:
10003   case X86::VMINCSDrr:
10004   case X86::VMINCSSrr:
10005   case X86::VMINCSDZrr:
10006   case X86::VMINCSSZrr:
10007   case X86::VMAXCPHZ128rr:
10008   case X86::VMAXCPHZ256rr:
10009   case X86::VMAXCPHZrr:
10010   case X86::VMAXCSHZrr:
10011   case X86::VMINCPHZ128rr:
10012   case X86::VMINCPHZ256rr:
10013   case X86::VMINCPHZrr:
10014   case X86::VMINCSHZrr:
10015     return true;
10016   case X86::ADDPDrr:
10017   case X86::ADDPSrr:
10018   case X86::ADDSDrr:
10019   case X86::ADDSSrr:
10020   case X86::MULPDrr:
10021   case X86::MULPSrr:
10022   case X86::MULSDrr:
10023   case X86::MULSSrr:
10024   case X86::VADDPDrr:
10025   case X86::VADDPSrr:
10026   case X86::VADDPDYrr:
10027   case X86::VADDPSYrr:
10028   case X86::VADDPDZ128rr:
10029   case X86::VADDPSZ128rr:
10030   case X86::VADDPDZ256rr:
10031   case X86::VADDPSZ256rr:
10032   case X86::VADDPDZrr:
10033   case X86::VADDPSZrr:
10034   case X86::VADDSDrr:
10035   case X86::VADDSSrr:
10036   case X86::VADDSDZrr:
10037   case X86::VADDSSZrr:
10038   case X86::VMULPDrr:
10039   case X86::VMULPSrr:
10040   case X86::VMULPDYrr:
10041   case X86::VMULPSYrr:
10042   case X86::VMULPDZ128rr:
10043   case X86::VMULPSZ128rr:
10044   case X86::VMULPDZ256rr:
10045   case X86::VMULPSZ256rr:
10046   case X86::VMULPDZrr:
10047   case X86::VMULPSZrr:
10048   case X86::VMULSDrr:
10049   case X86::VMULSSrr:
10050   case X86::VMULSDZrr:
10051   case X86::VMULSSZrr:
10052   case X86::VADDPHZ128rr:
10053   case X86::VADDPHZ256rr:
10054   case X86::VADDPHZrr:
10055   case X86::VADDSHZrr:
10056   case X86::VMULPHZ128rr:
10057   case X86::VMULPHZ256rr:
10058   case X86::VMULPHZrr:
10059   case X86::VMULSHZrr:
10060     return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
10061            Inst.getFlag(MachineInstr::MIFlag::FmNsz);
10062   default:
10063     return false;
10064   }
10065 }
10066 
10067 /// If \p DescribedReg overlaps with the MOVrr instruction's destination
10068 /// register then, if possible, describe the value in terms of the source
10069 /// register.
10070 static std::optional<ParamLoadedValue>
describeMOVrrLoadedValue(const MachineInstr & MI,Register DescribedReg,const TargetRegisterInfo * TRI)10071 describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
10072                          const TargetRegisterInfo *TRI) {
10073   Register DestReg = MI.getOperand(0).getReg();
10074   Register SrcReg = MI.getOperand(1).getReg();
10075 
10076   auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
10077 
10078   // If the described register is the destination, just return the source.
10079   if (DestReg == DescribedReg)
10080     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
10081 
10082   // If the described register is a sub-register of the destination register,
10083   // then pick out the source register's corresponding sub-register.
10084   if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) {
10085     Register SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx);
10086     return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
10087   }
10088 
10089   // The remaining case to consider is when the described register is a
10090   // super-register of the destination register. MOV8rr and MOV16rr does not
10091   // write to any of the other bytes in the register, meaning that we'd have to
10092   // describe the value using a combination of the source register and the
10093   // non-overlapping bits in the described register, which is not currently
10094   // possible.
10095   if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr ||
10096       !TRI->isSuperRegister(DestReg, DescribedReg))
10097     return std::nullopt;
10098 
10099   assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case");
10100   return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
10101 }
10102 
10103 std::optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const10104 X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
10105   const MachineOperand *Op = nullptr;
10106   DIExpression *Expr = nullptr;
10107 
10108   const TargetRegisterInfo *TRI = &getRegisterInfo();
10109 
10110   switch (MI.getOpcode()) {
10111   case X86::LEA32r:
10112   case X86::LEA64r:
10113   case X86::LEA64_32r: {
10114     // We may need to describe a 64-bit parameter with a 32-bit LEA.
10115     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
10116       return std::nullopt;
10117 
10118     // Operand 4 could be global address. For now we do not support
10119     // such situation.
10120     if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm())
10121       return std::nullopt;
10122 
10123     const MachineOperand &Op1 = MI.getOperand(1);
10124     const MachineOperand &Op2 = MI.getOperand(3);
10125     assert(Op2.isReg() &&
10126            (Op2.getReg() == X86::NoRegister || Op2.getReg().isPhysical()));
10127 
10128     // Omit situations like:
10129     // %rsi = lea %rsi, 4, ...
10130     if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) ||
10131         Op2.getReg() == MI.getOperand(0).getReg())
10132       return std::nullopt;
10133     else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister &&
10134               TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) ||
10135              (Op2.getReg() != X86::NoRegister &&
10136               TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg())))
10137       return std::nullopt;
10138 
10139     int64_t Coef = MI.getOperand(2).getImm();
10140     int64_t Offset = MI.getOperand(4).getImm();
10141     SmallVector<uint64_t, 8> Ops;
10142 
10143     if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) {
10144       Op = &Op1;
10145     } else if (Op1.isFI())
10146       Op = &Op1;
10147 
10148     if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) {
10149       Ops.push_back(dwarf::DW_OP_constu);
10150       Ops.push_back(Coef + 1);
10151       Ops.push_back(dwarf::DW_OP_mul);
10152     } else {
10153       if (Op && Op2.getReg() != X86::NoRegister) {
10154         int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false);
10155         if (dwarfReg < 0)
10156           return std::nullopt;
10157         else if (dwarfReg < 32) {
10158           Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
10159           Ops.push_back(0);
10160         } else {
10161           Ops.push_back(dwarf::DW_OP_bregx);
10162           Ops.push_back(dwarfReg);
10163           Ops.push_back(0);
10164         }
10165       } else if (!Op) {
10166         assert(Op2.getReg() != X86::NoRegister);
10167         Op = &Op2;
10168       }
10169 
10170       if (Coef > 1) {
10171         assert(Op2.getReg() != X86::NoRegister);
10172         Ops.push_back(dwarf::DW_OP_constu);
10173         Ops.push_back(Coef);
10174         Ops.push_back(dwarf::DW_OP_mul);
10175       }
10176 
10177       if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) &&
10178           Op2.getReg() != X86::NoRegister) {
10179         Ops.push_back(dwarf::DW_OP_plus);
10180       }
10181     }
10182 
10183     DIExpression::appendOffset(Ops, Offset);
10184     Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops);
10185 
10186     return ParamLoadedValue(*Op, Expr);
10187   }
10188   case X86::MOV8ri:
10189   case X86::MOV16ri:
10190     // TODO: Handle MOV8ri and MOV16ri.
10191     return std::nullopt;
10192   case X86::MOV32ri:
10193   case X86::MOV64ri:
10194   case X86::MOV64ri32:
10195     // MOV32ri may be used for producing zero-extended 32-bit immediates in
10196     // 64-bit parameters, so we need to consider super-registers.
10197     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
10198       return std::nullopt;
10199     return ParamLoadedValue(MI.getOperand(1), Expr);
10200   case X86::MOV8rr:
10201   case X86::MOV16rr:
10202   case X86::MOV32rr:
10203   case X86::MOV64rr:
10204     return describeMOVrrLoadedValue(MI, Reg, TRI);
10205   case X86::XOR32rr: {
10206     // 64-bit parameters are zero-materialized using XOR32rr, so also consider
10207     // super-registers.
10208     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
10209       return std::nullopt;
10210     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
10211       return ParamLoadedValue(MachineOperand::CreateImm(0), Expr);
10212     return std::nullopt;
10213   }
10214   case X86::MOVSX64rr32: {
10215     // We may need to describe the lower 32 bits of the MOVSX; for example, in
10216     // cases like this:
10217     //
10218     //  $ebx = [...]
10219     //  $rdi = MOVSX64rr32 $ebx
10220     //  $esi = MOV32rr $edi
10221     if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg))
10222       return std::nullopt;
10223 
10224     Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
10225 
10226     // If the described register is the destination register we need to
10227     // sign-extend the source register from 32 bits. The other case we handle
10228     // is when the described register is the 32-bit sub-register of the
10229     // destination register, in case we just need to return the source
10230     // register.
10231     if (Reg == MI.getOperand(0).getReg())
10232       Expr = DIExpression::appendExt(Expr, 32, 64, true);
10233     else
10234       assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&
10235              "Unhandled sub-register case for MOVSX64rr32");
10236 
10237     return ParamLoadedValue(MI.getOperand(1), Expr);
10238   }
10239   default:
10240     assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction");
10241     return TargetInstrInfo::describeLoadedValue(MI, Reg);
10242   }
10243 }
10244 
10245 /// This is an architecture-specific helper function of reassociateOps.
10246 /// Set special operand attributes for new instructions after reassociation.
setSpecialOperandAttr(MachineInstr & OldMI1,MachineInstr & OldMI2,MachineInstr & NewMI1,MachineInstr & NewMI2) const10247 void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
10248                                          MachineInstr &OldMI2,
10249                                          MachineInstr &NewMI1,
10250                                          MachineInstr &NewMI2) const {
10251   // Integer instructions may define an implicit EFLAGS dest register operand.
10252   MachineOperand *OldFlagDef1 =
10253       OldMI1.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10254   MachineOperand *OldFlagDef2 =
10255       OldMI2.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10256 
10257   assert(!OldFlagDef1 == !OldFlagDef2 &&
10258          "Unexpected instruction type for reassociation");
10259 
10260   if (!OldFlagDef1 || !OldFlagDef2)
10261     return;
10262 
10263   assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
10264          "Must have dead EFLAGS operand in reassociable instruction");
10265 
10266   MachineOperand *NewFlagDef1 =
10267       NewMI1.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10268   MachineOperand *NewFlagDef2 =
10269       NewMI2.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10270 
10271   assert(NewFlagDef1 && NewFlagDef2 &&
10272          "Unexpected operand in reassociable instruction");
10273 
10274   // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
10275   // of this pass or other passes. The EFLAGS operands must be dead in these new
10276   // instructions because the EFLAGS operands in the original instructions must
10277   // be dead in order for reassociation to occur.
10278   NewFlagDef1->setIsDead();
10279   NewFlagDef2->setIsDead();
10280 }
10281 
10282 std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const10283 X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
10284   return std::make_pair(TF, 0u);
10285 }
10286 
10287 ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const10288 X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
10289   using namespace X86II;
10290   static const std::pair<unsigned, const char *> TargetFlags[] = {
10291       {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
10292       {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
10293       {MO_GOT, "x86-got"},
10294       {MO_GOTOFF, "x86-gotoff"},
10295       {MO_GOTPCREL, "x86-gotpcrel"},
10296       {MO_GOTPCREL_NORELAX, "x86-gotpcrel-norelax"},
10297       {MO_PLT, "x86-plt"},
10298       {MO_TLSGD, "x86-tlsgd"},
10299       {MO_TLSLD, "x86-tlsld"},
10300       {MO_TLSLDM, "x86-tlsldm"},
10301       {MO_GOTTPOFF, "x86-gottpoff"},
10302       {MO_INDNTPOFF, "x86-indntpoff"},
10303       {MO_TPOFF, "x86-tpoff"},
10304       {MO_DTPOFF, "x86-dtpoff"},
10305       {MO_NTPOFF, "x86-ntpoff"},
10306       {MO_GOTNTPOFF, "x86-gotntpoff"},
10307       {MO_DLLIMPORT, "x86-dllimport"},
10308       {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
10309       {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
10310       {MO_TLVP, "x86-tlvp"},
10311       {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
10312       {MO_SECREL, "x86-secrel"},
10313       {MO_COFFSTUB, "x86-coffstub"}};
10314   return ArrayRef(TargetFlags);
10315 }
10316 
10317 namespace {
10318 /// Create Global Base Reg pass. This initializes the PIC
10319 /// global base register for x86-32.
10320 struct CGBR : public MachineFunctionPass {
10321   static char ID;
CGBR__anonfd83181d0a11::CGBR10322   CGBR() : MachineFunctionPass(ID) {}
10323 
runOnMachineFunction__anonfd83181d0a11::CGBR10324   bool runOnMachineFunction(MachineFunction &MF) override {
10325     const X86TargetMachine *TM =
10326         static_cast<const X86TargetMachine *>(&MF.getTarget());
10327     const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
10328 
10329     // Only emit a global base reg in PIC mode.
10330     if (!TM->isPositionIndependent())
10331       return false;
10332 
10333     X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
10334     Register GlobalBaseReg = X86FI->getGlobalBaseReg();
10335 
10336     // If we didn't need a GlobalBaseReg, don't insert code.
10337     if (GlobalBaseReg == 0)
10338       return false;
10339 
10340     // Insert the set of GlobalBaseReg into the first MBB of the function
10341     MachineBasicBlock &FirstMBB = MF.front();
10342     MachineBasicBlock::iterator MBBI = FirstMBB.begin();
10343     DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
10344     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10345     const X86InstrInfo *TII = STI.getInstrInfo();
10346 
10347     Register PC;
10348     if (STI.isPICStyleGOT())
10349       PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
10350     else
10351       PC = GlobalBaseReg;
10352 
10353     if (STI.is64Bit()) {
10354       if (TM->getCodeModel() == CodeModel::Large) {
10355         // In the large code model, we are aiming for this code, though the
10356         // register allocation may vary:
10357         //   leaq .LN$pb(%rip), %rax
10358         //   movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
10359         //   addq %rcx, %rax
10360         // RAX now holds address of _GLOBAL_OFFSET_TABLE_.
10361         Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
10362         Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
10363         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
10364             .addReg(X86::RIP)
10365             .addImm(0)
10366             .addReg(0)
10367             .addSym(MF.getPICBaseSymbol())
10368             .addReg(0);
10369         std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
10370         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
10371             .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
10372                                X86II::MO_PIC_BASE_OFFSET);
10373         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
10374             .addReg(PBReg, RegState::Kill)
10375             .addReg(GOTReg, RegState::Kill);
10376       } else {
10377         // In other code models, use a RIP-relative LEA to materialize the
10378         // GOT.
10379         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
10380             .addReg(X86::RIP)
10381             .addImm(0)
10382             .addReg(0)
10383             .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
10384             .addReg(0);
10385       }
10386     } else {
10387       // Operand of MovePCtoStack is completely ignored by asm printer. It's
10388       // only used in JIT code emission as displacement to pc.
10389       BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
10390 
10391       // If we're using vanilla 'GOT' PIC style, we should use relative
10392       // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
10393       if (STI.isPICStyleGOT()) {
10394         // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
10395         // %some_register
10396         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
10397             .addReg(PC)
10398             .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
10399                                X86II::MO_GOT_ABSOLUTE_ADDRESS);
10400       }
10401     }
10402 
10403     return true;
10404   }
10405 
getPassName__anonfd83181d0a11::CGBR10406   StringRef getPassName() const override {
10407     return "X86 PIC Global Base Reg Initialization";
10408   }
10409 
getAnalysisUsage__anonfd83181d0a11::CGBR10410   void getAnalysisUsage(AnalysisUsage &AU) const override {
10411     AU.setPreservesCFG();
10412     MachineFunctionPass::getAnalysisUsage(AU);
10413   }
10414 };
10415 } // namespace
10416 
10417 char CGBR::ID = 0;
createX86GlobalBaseRegPass()10418 FunctionPass *llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
10419 
10420 namespace {
10421 struct LDTLSCleanup : public MachineFunctionPass {
10422   static char ID;
LDTLSCleanup__anonfd83181d0b11::LDTLSCleanup10423   LDTLSCleanup() : MachineFunctionPass(ID) {}
10424 
runOnMachineFunction__anonfd83181d0b11::LDTLSCleanup10425   bool runOnMachineFunction(MachineFunction &MF) override {
10426     if (skipFunction(MF.getFunction()))
10427       return false;
10428 
10429     X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
10430     if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
10431       // No point folding accesses if there isn't at least two.
10432       return false;
10433     }
10434 
10435     MachineDominatorTree *DT =
10436         &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
10437     return VisitNode(DT->getRootNode(), Register());
10438   }
10439 
10440   // Visit the dominator subtree rooted at Node in pre-order.
10441   // If TLSBaseAddrReg is non-null, then use that to replace any
10442   // TLS_base_addr instructions. Otherwise, create the register
10443   // when the first such instruction is seen, and then use it
10444   // as we encounter more instructions.
VisitNode__anonfd83181d0b11::LDTLSCleanup10445   bool VisitNode(MachineDomTreeNode *Node, Register TLSBaseAddrReg) {
10446     MachineBasicBlock *BB = Node->getBlock();
10447     bool Changed = false;
10448 
10449     // Traverse the current block.
10450     for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
10451          ++I) {
10452       switch (I->getOpcode()) {
10453       case X86::TLS_base_addr32:
10454       case X86::TLS_base_addr64:
10455         if (TLSBaseAddrReg)
10456           I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
10457         else
10458           I = SetRegister(*I, &TLSBaseAddrReg);
10459         Changed = true;
10460         break;
10461       default:
10462         break;
10463       }
10464     }
10465 
10466     // Visit the children of this block in the dominator tree.
10467     for (auto &I : *Node) {
10468       Changed |= VisitNode(I, TLSBaseAddrReg);
10469     }
10470 
10471     return Changed;
10472   }
10473 
10474   // Replace the TLS_base_addr instruction I with a copy from
10475   // TLSBaseAddrReg, returning the new instruction.
ReplaceTLSBaseAddrCall__anonfd83181d0b11::LDTLSCleanup10476   MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
10477                                        Register TLSBaseAddrReg) {
10478     MachineFunction *MF = I.getParent()->getParent();
10479     const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
10480     const bool is64Bit = STI.is64Bit();
10481     const X86InstrInfo *TII = STI.getInstrInfo();
10482 
10483     // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
10484     MachineInstr *Copy =
10485         BuildMI(*I.getParent(), I, I.getDebugLoc(),
10486                 TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
10487             .addReg(TLSBaseAddrReg);
10488 
10489     // Erase the TLS_base_addr instruction.
10490     I.eraseFromParent();
10491 
10492     return Copy;
10493   }
10494 
10495   // Create a virtual register in *TLSBaseAddrReg, and populate it by
10496   // inserting a copy instruction after I. Returns the new instruction.
SetRegister__anonfd83181d0b11::LDTLSCleanup10497   MachineInstr *SetRegister(MachineInstr &I, Register *TLSBaseAddrReg) {
10498     MachineFunction *MF = I.getParent()->getParent();
10499     const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
10500     const bool is64Bit = STI.is64Bit();
10501     const X86InstrInfo *TII = STI.getInstrInfo();
10502 
10503     // Create a virtual register for the TLS base address.
10504     MachineRegisterInfo &RegInfo = MF->getRegInfo();
10505     *TLSBaseAddrReg = RegInfo.createVirtualRegister(
10506         is64Bit ? &X86::GR64RegClass : &X86::GR32RegClass);
10507 
10508     // Insert a copy from RAX/EAX to TLSBaseAddrReg.
10509     MachineInstr *Next = I.getNextNode();
10510     MachineInstr *Copy = BuildMI(*I.getParent(), Next, I.getDebugLoc(),
10511                                  TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
10512                              .addReg(is64Bit ? X86::RAX : X86::EAX);
10513 
10514     return Copy;
10515   }
10516 
getPassName__anonfd83181d0b11::LDTLSCleanup10517   StringRef getPassName() const override {
10518     return "Local Dynamic TLS Access Clean-up";
10519   }
10520 
getAnalysisUsage__anonfd83181d0b11::LDTLSCleanup10521   void getAnalysisUsage(AnalysisUsage &AU) const override {
10522     AU.setPreservesCFG();
10523     AU.addRequired<MachineDominatorTreeWrapperPass>();
10524     MachineFunctionPass::getAnalysisUsage(AU);
10525   }
10526 };
10527 } // namespace
10528 
10529 char LDTLSCleanup::ID = 0;
createCleanupLocalDynamicTLSPass()10530 FunctionPass *llvm::createCleanupLocalDynamicTLSPass() {
10531   return new LDTLSCleanup();
10532 }
10533 
10534 /// Constants defining how certain sequences should be outlined.
10535 ///
10536 /// \p MachineOutlinerDefault implies that the function is called with a call
10537 /// instruction, and a return must be emitted for the outlined function frame.
10538 ///
10539 /// That is,
10540 ///
10541 /// I1                                 OUTLINED_FUNCTION:
10542 /// I2 --> call OUTLINED_FUNCTION       I1
10543 /// I3                                  I2
10544 ///                                     I3
10545 ///                                     ret
10546 ///
10547 /// * Call construction overhead: 1 (call instruction)
10548 /// * Frame construction overhead: 1 (return instruction)
10549 ///
10550 /// \p MachineOutlinerTailCall implies that the function is being tail called.
10551 /// A jump is emitted instead of a call, and the return is already present in
10552 /// the outlined sequence. That is,
10553 ///
10554 /// I1                                 OUTLINED_FUNCTION:
10555 /// I2 --> jmp OUTLINED_FUNCTION       I1
10556 /// ret                                I2
10557 ///                                    ret
10558 ///
10559 /// * Call construction overhead: 1 (jump instruction)
10560 /// * Frame construction overhead: 0 (don't need to return)
10561 ///
10562 enum MachineOutlinerClass { MachineOutlinerDefault, MachineOutlinerTailCall };
10563 
10564 std::optional<std::unique_ptr<outliner::OutlinedFunction>>
getOutliningCandidateInfo(const MachineModuleInfo & MMI,std::vector<outliner::Candidate> & RepeatedSequenceLocs,unsigned MinRepeats) const10565 X86InstrInfo::getOutliningCandidateInfo(
10566     const MachineModuleInfo &MMI,
10567     std::vector<outliner::Candidate> &RepeatedSequenceLocs,
10568     unsigned MinRepeats) const {
10569   unsigned SequenceSize = 0;
10570   for (auto &MI : RepeatedSequenceLocs[0]) {
10571     // FIXME: x86 doesn't implement getInstSizeInBytes, so
10572     // we can't tell the cost.  Just assume each instruction
10573     // is one byte.
10574     if (MI.isDebugInstr() || MI.isKill())
10575       continue;
10576     SequenceSize += 1;
10577   }
10578 
10579   // We check to see if CFI Instructions are present, and if they are
10580   // we find the number of CFI Instructions in the candidates.
10581   unsigned CFICount = 0;
10582   for (auto &I : RepeatedSequenceLocs[0]) {
10583     if (I.isCFIInstruction())
10584       CFICount++;
10585   }
10586 
10587   // We compare the number of found CFI Instructions to  the number of CFI
10588   // instructions in the parent function for each candidate.  We must check this
10589   // since if we outline one of the CFI instructions in a function, we have to
10590   // outline them all for correctness. If we do not, the address offsets will be
10591   // incorrect between the two sections of the program.
10592   for (outliner::Candidate &C : RepeatedSequenceLocs) {
10593     std::vector<MCCFIInstruction> CFIInstructions =
10594         C.getMF()->getFrameInstructions();
10595 
10596     if (CFICount > 0 && CFICount != CFIInstructions.size())
10597       return std::nullopt;
10598   }
10599 
10600   // FIXME: Use real size in bytes for call and ret instructions.
10601   if (RepeatedSequenceLocs[0].back().isTerminator()) {
10602     for (outliner::Candidate &C : RepeatedSequenceLocs)
10603       C.setCallInfo(MachineOutlinerTailCall, 1);
10604 
10605     return std::make_unique<outliner::OutlinedFunction>(
10606         RepeatedSequenceLocs, SequenceSize,
10607         0,                      // Number of bytes to emit frame.
10608         MachineOutlinerTailCall // Type of frame.
10609     );
10610   }
10611 
10612   if (CFICount > 0)
10613     return std::nullopt;
10614 
10615   for (outliner::Candidate &C : RepeatedSequenceLocs)
10616     C.setCallInfo(MachineOutlinerDefault, 1);
10617 
10618   return std::make_unique<outliner::OutlinedFunction>(
10619       RepeatedSequenceLocs, SequenceSize, 1, MachineOutlinerDefault);
10620 }
10621 
isFunctionSafeToOutlineFrom(MachineFunction & MF,bool OutlineFromLinkOnceODRs) const10622 bool X86InstrInfo::isFunctionSafeToOutlineFrom(
10623     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
10624   const Function &F = MF.getFunction();
10625 
10626   // Does the function use a red zone? If it does, then we can't risk messing
10627   // with the stack.
10628   if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
10629     // It could have a red zone. If it does, then we don't want to touch it.
10630     const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
10631     if (!X86FI || X86FI->getUsesRedZone())
10632       return false;
10633   }
10634 
10635   // If we *don't* want to outline from things that could potentially be deduped
10636   // then return false.
10637   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
10638     return false;
10639 
10640   // This function is viable for outlining, so return true.
10641   return true;
10642 }
10643 
10644 outliner::InstrType
getOutliningTypeImpl(const MachineModuleInfo & MMI,MachineBasicBlock::iterator & MIT,unsigned Flags) const10645 X86InstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
10646                                    MachineBasicBlock::iterator &MIT,
10647                                    unsigned Flags) const {
10648   MachineInstr &MI = *MIT;
10649 
10650   // Is this a terminator for a basic block?
10651   if (MI.isTerminator())
10652     // TargetInstrInfo::getOutliningType has already filtered out anything
10653     // that would break this, so we can allow it here.
10654     return outliner::InstrType::Legal;
10655 
10656   // Don't outline anything that modifies or reads from the stack pointer.
10657   //
10658   // FIXME: There are instructions which are being manually built without
10659   // explicit uses/defs so we also have to check the MCInstrDesc. We should be
10660   // able to remove the extra checks once those are fixed up. For example,
10661   // sometimes we might get something like %rax = POP64r 1. This won't be
10662   // caught by modifiesRegister or readsRegister even though the instruction
10663   // really ought to be formed so that modifiesRegister/readsRegister would
10664   // catch it.
10665   if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
10666       MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
10667       MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
10668     return outliner::InstrType::Illegal;
10669 
10670   // Outlined calls change the instruction pointer, so don't read from it.
10671   if (MI.readsRegister(X86::RIP, &RI) ||
10672       MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
10673       MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
10674     return outliner::InstrType::Illegal;
10675 
10676   // Don't outline CFI instructions.
10677   if (MI.isCFIInstruction())
10678     return outliner::InstrType::Illegal;
10679 
10680   return outliner::InstrType::Legal;
10681 }
10682 
buildOutlinedFrame(MachineBasicBlock & MBB,MachineFunction & MF,const outliner::OutlinedFunction & OF) const10683 void X86InstrInfo::buildOutlinedFrame(
10684     MachineBasicBlock &MBB, MachineFunction &MF,
10685     const outliner::OutlinedFunction &OF) const {
10686   // If we're a tail call, we already have a return, so don't do anything.
10687   if (OF.FrameConstructionID == MachineOutlinerTailCall)
10688     return;
10689 
10690   // We're a normal call, so our sequence doesn't have a return instruction.
10691   // Add it in.
10692   MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RET64));
10693   MBB.insert(MBB.end(), retq);
10694 }
10695 
insertOutlinedCall(Module & M,MachineBasicBlock & MBB,MachineBasicBlock::iterator & It,MachineFunction & MF,outliner::Candidate & C) const10696 MachineBasicBlock::iterator X86InstrInfo::insertOutlinedCall(
10697     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
10698     MachineFunction &MF, outliner::Candidate &C) const {
10699   // Is it a tail call?
10700   if (C.CallConstructionID == MachineOutlinerTailCall) {
10701     // Yes, just insert a JMP.
10702     It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
10703                             .addGlobalAddress(M.getNamedValue(MF.getName())));
10704   } else {
10705     // No, insert a call.
10706     It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
10707                             .addGlobalAddress(M.getNamedValue(MF.getName())));
10708   }
10709 
10710   return It;
10711 }
10712 
buildClearRegister(Register Reg,MachineBasicBlock & MBB,MachineBasicBlock::iterator Iter,DebugLoc & DL,bool AllowSideEffects) const10713 void X86InstrInfo::buildClearRegister(Register Reg, MachineBasicBlock &MBB,
10714                                       MachineBasicBlock::iterator Iter,
10715                                       DebugLoc &DL,
10716                                       bool AllowSideEffects) const {
10717   const MachineFunction &MF = *MBB.getParent();
10718   const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
10719   const TargetRegisterInfo &TRI = getRegisterInfo();
10720 
10721   if (ST.hasMMX() && X86::VR64RegClass.contains(Reg))
10722     // FIXME: Should we ignore MMX registers?
10723     return;
10724 
10725   if (TRI.isGeneralPurposeRegister(MF, Reg)) {
10726     // Convert register to the 32-bit version. Both 'movl' and 'xorl' clear the
10727     // upper bits of a 64-bit register automagically.
10728     Reg = getX86SubSuperRegister(Reg, 32);
10729 
10730     if (!AllowSideEffects)
10731       // XOR affects flags, so use a MOV instead.
10732       BuildMI(MBB, Iter, DL, get(X86::MOV32ri), Reg).addImm(0);
10733     else
10734       BuildMI(MBB, Iter, DL, get(X86::XOR32rr), Reg)
10735           .addReg(Reg, RegState::Undef)
10736           .addReg(Reg, RegState::Undef);
10737   } else if (X86::VR128RegClass.contains(Reg)) {
10738     // XMM#
10739     if (!ST.hasSSE1())
10740       return;
10741 
10742     BuildMI(MBB, Iter, DL, get(X86::V_SET0), Reg);
10743   } else if (X86::VR256RegClass.contains(Reg)) {
10744     // YMM#
10745     if (!ST.hasAVX())
10746       return;
10747 
10748     BuildMI(MBB, Iter, DL, get(X86::AVX_SET0), Reg);
10749   } else if (X86::VR512RegClass.contains(Reg)) {
10750     // ZMM#
10751     if (!ST.hasAVX512())
10752       return;
10753 
10754     BuildMI(MBB, Iter, DL, get(X86::AVX512_512_SET0), Reg);
10755   } else if (X86::VK1RegClass.contains(Reg) || X86::VK2RegClass.contains(Reg) ||
10756              X86::VK4RegClass.contains(Reg) || X86::VK8RegClass.contains(Reg) ||
10757              X86::VK16RegClass.contains(Reg)) {
10758     if (!ST.hasVLX())
10759       return;
10760 
10761     unsigned Op = ST.hasBWI() ? X86::KSET0Q : X86::KSET0W;
10762     BuildMI(MBB, Iter, DL, get(Op), Reg);
10763   }
10764 }
10765 
getMachineCombinerPatterns(MachineInstr & Root,SmallVectorImpl<unsigned> & Patterns,bool DoRegPressureReduce) const10766 bool X86InstrInfo::getMachineCombinerPatterns(
10767     MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
10768     bool DoRegPressureReduce) const {
10769   unsigned Opc = Root.getOpcode();
10770   switch (Opc) {
10771   case X86::VPDPWSSDrr:
10772   case X86::VPDPWSSDrm:
10773   case X86::VPDPWSSDYrr:
10774   case X86::VPDPWSSDYrm: {
10775     if (!Subtarget.hasFastDPWSSD()) {
10776       Patterns.push_back(X86MachineCombinerPattern::DPWSSD);
10777       return true;
10778     }
10779     break;
10780   }
10781   case X86::VPDPWSSDZ128r:
10782   case X86::VPDPWSSDZ128m:
10783   case X86::VPDPWSSDZ256r:
10784   case X86::VPDPWSSDZ256m:
10785   case X86::VPDPWSSDZr:
10786   case X86::VPDPWSSDZm: {
10787    if (Subtarget.hasBWI() && !Subtarget.hasFastDPWSSD()) {
10788      Patterns.push_back(X86MachineCombinerPattern::DPWSSD);
10789      return true;
10790     }
10791     break;
10792   }
10793   }
10794   return TargetInstrInfo::getMachineCombinerPatterns(Root,
10795                                                      Patterns, DoRegPressureReduce);
10796 }
10797 
10798 static void
genAlternativeDpCodeSequence(MachineInstr & Root,const TargetInstrInfo & TII,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<Register,unsigned> & InstrIdxForVirtReg)10799 genAlternativeDpCodeSequence(MachineInstr &Root, const TargetInstrInfo &TII,
10800                              SmallVectorImpl<MachineInstr *> &InsInstrs,
10801                              SmallVectorImpl<MachineInstr *> &DelInstrs,
10802                              DenseMap<Register, unsigned> &InstrIdxForVirtReg) {
10803   MachineFunction *MF = Root.getMF();
10804   MachineRegisterInfo &RegInfo = MF->getRegInfo();
10805 
10806   unsigned Opc = Root.getOpcode();
10807   unsigned AddOpc = 0;
10808   unsigned MaddOpc = 0;
10809   switch (Opc) {
10810   default:
10811     assert(false && "It should not reach here");
10812     break;
10813   // vpdpwssd xmm2,xmm3,xmm1
10814   // -->
10815   // vpmaddwd xmm3,xmm3,xmm1
10816   // vpaddd xmm2,xmm2,xmm3
10817   case X86::VPDPWSSDrr:
10818     MaddOpc = X86::VPMADDWDrr;
10819     AddOpc = X86::VPADDDrr;
10820     break;
10821   case X86::VPDPWSSDrm:
10822     MaddOpc = X86::VPMADDWDrm;
10823     AddOpc = X86::VPADDDrr;
10824     break;
10825   case X86::VPDPWSSDZ128r:
10826     MaddOpc = X86::VPMADDWDZ128rr;
10827     AddOpc = X86::VPADDDZ128rr;
10828     break;
10829   case X86::VPDPWSSDZ128m:
10830     MaddOpc = X86::VPMADDWDZ128rm;
10831     AddOpc = X86::VPADDDZ128rr;
10832     break;
10833   // vpdpwssd ymm2,ymm3,ymm1
10834   // -->
10835   // vpmaddwd ymm3,ymm3,ymm1
10836   // vpaddd ymm2,ymm2,ymm3
10837   case X86::VPDPWSSDYrr:
10838     MaddOpc = X86::VPMADDWDYrr;
10839     AddOpc = X86::VPADDDYrr;
10840     break;
10841   case X86::VPDPWSSDYrm:
10842     MaddOpc = X86::VPMADDWDYrm;
10843     AddOpc = X86::VPADDDYrr;
10844     break;
10845   case X86::VPDPWSSDZ256r:
10846     MaddOpc = X86::VPMADDWDZ256rr;
10847     AddOpc = X86::VPADDDZ256rr;
10848     break;
10849   case X86::VPDPWSSDZ256m:
10850     MaddOpc = X86::VPMADDWDZ256rm;
10851     AddOpc = X86::VPADDDZ256rr;
10852     break;
10853   // vpdpwssd zmm2,zmm3,zmm1
10854   // -->
10855   // vpmaddwd zmm3,zmm3,zmm1
10856   // vpaddd zmm2,zmm2,zmm3
10857   case X86::VPDPWSSDZr:
10858     MaddOpc = X86::VPMADDWDZrr;
10859     AddOpc = X86::VPADDDZrr;
10860     break;
10861   case X86::VPDPWSSDZm:
10862     MaddOpc = X86::VPMADDWDZrm;
10863     AddOpc = X86::VPADDDZrr;
10864     break;
10865   }
10866   // Create vpmaddwd.
10867   const TargetRegisterClass *RC =
10868       RegInfo.getRegClass(Root.getOperand(0).getReg());
10869   Register NewReg = RegInfo.createVirtualRegister(RC);
10870   MachineInstr *Madd = Root.getMF()->CloneMachineInstr(&Root);
10871   Madd->setDesc(TII.get(MaddOpc));
10872   Madd->untieRegOperand(1);
10873   Madd->removeOperand(1);
10874   Madd->getOperand(0).setReg(NewReg);
10875   InstrIdxForVirtReg.insert(std::make_pair(NewReg, 0));
10876   // Create vpaddd.
10877   Register DstReg = Root.getOperand(0).getReg();
10878   bool IsKill = Root.getOperand(1).isKill();
10879   MachineInstr *Add =
10880       BuildMI(*MF, MIMetadata(Root), TII.get(AddOpc), DstReg)
10881           .addReg(Root.getOperand(1).getReg(), getKillRegState(IsKill))
10882           .addReg(Madd->getOperand(0).getReg(), getKillRegState(true));
10883   InsInstrs.push_back(Madd);
10884   InsInstrs.push_back(Add);
10885   DelInstrs.push_back(&Root);
10886 }
10887 
genAlternativeCodeSequence(MachineInstr & Root,unsigned Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<Register,unsigned> & InstrIdxForVirtReg) const10888 void X86InstrInfo::genAlternativeCodeSequence(
10889     MachineInstr &Root, unsigned Pattern,
10890     SmallVectorImpl<MachineInstr *> &InsInstrs,
10891     SmallVectorImpl<MachineInstr *> &DelInstrs,
10892     DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
10893   switch (Pattern) {
10894   default:
10895     // Reassociate instructions.
10896     TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
10897                                                 DelInstrs, InstrIdxForVirtReg);
10898     return;
10899   case X86MachineCombinerPattern::DPWSSD:
10900     genAlternativeDpCodeSequence(Root, *this, InsInstrs, DelInstrs,
10901                                  InstrIdxForVirtReg);
10902     return;
10903   }
10904 }
10905 
10906 // See also: X86DAGToDAGISel::SelectInlineAsmMemoryOperand().
getFrameIndexOperands(SmallVectorImpl<MachineOperand> & Ops,int FI) const10907 void X86InstrInfo::getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops,
10908                                          int FI) const {
10909   X86AddressMode M;
10910   M.BaseType = X86AddressMode::FrameIndexBase;
10911   M.Base.FrameIndex = FI;
10912   M.getFullAddress(Ops);
10913 }
10914 
10915 #define GET_INSTRINFO_HELPERS
10916 #include "X86GenInstrInfo.inc"
10917