xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp (revision a3266ba2697a383d2ede56803320d941866c7e76)
1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "X86InstrInfo.h"
14 #include "X86.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrFoldTables.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Sequence.h"
22 #include "llvm/CodeGen/LivePhysRegs.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/MC/MCExpr.h"
36 #include "llvm/MC/MCInst.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetOptions.h"
42 
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "x86-instr-info"
46 
47 #define GET_INSTRINFO_CTOR_DTOR
48 #include "X86GenInstrInfo.inc"
49 
50 static cl::opt<bool>
51     NoFusing("disable-spill-fusing",
52              cl::desc("Disable fusing of spill code into instructions"),
53              cl::Hidden);
54 static cl::opt<bool>
55 PrintFailedFusing("print-failed-fuse-candidates",
56                   cl::desc("Print instructions that the allocator wants to"
57                            " fuse, but the X86 backend currently can't"),
58                   cl::Hidden);
59 static cl::opt<bool>
60 ReMatPICStubLoad("remat-pic-stub-load",
61                  cl::desc("Re-materialize load from stub in PIC mode"),
62                  cl::init(false), cl::Hidden);
63 static cl::opt<unsigned>
64 PartialRegUpdateClearance("partial-reg-update-clearance",
65                           cl::desc("Clearance between two register writes "
66                                    "for inserting XOR to avoid partial "
67                                    "register update"),
68                           cl::init(64), cl::Hidden);
69 static cl::opt<unsigned>
70 UndefRegClearance("undef-reg-clearance",
71                   cl::desc("How many idle instructions we would like before "
72                            "certain undef register reads"),
73                   cl::init(128), cl::Hidden);
74 
75 
76 // Pin the vtable to this file.
77 void X86InstrInfo::anchor() {}
78 
79 X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
80     : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
81                                                : X86::ADJCALLSTACKDOWN32),
82                       (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
83                                                : X86::ADJCALLSTACKUP32),
84                       X86::CATCHRET,
85                       (STI.is64Bit() ? X86::RETQ : X86::RETL)),
86       Subtarget(STI), RI(STI.getTargetTriple()) {
87 }
88 
89 bool
90 X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
91                                     Register &SrcReg, Register &DstReg,
92                                     unsigned &SubIdx) const {
93   switch (MI.getOpcode()) {
94   default: break;
95   case X86::MOVSX16rr8:
96   case X86::MOVZX16rr8:
97   case X86::MOVSX32rr8:
98   case X86::MOVZX32rr8:
99   case X86::MOVSX64rr8:
100     if (!Subtarget.is64Bit())
101       // It's not always legal to reference the low 8-bit of the larger
102       // register in 32-bit mode.
103       return false;
104     LLVM_FALLTHROUGH;
105   case X86::MOVSX32rr16:
106   case X86::MOVZX32rr16:
107   case X86::MOVSX64rr16:
108   case X86::MOVSX64rr32: {
109     if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
110       // Be conservative.
111       return false;
112     SrcReg = MI.getOperand(1).getReg();
113     DstReg = MI.getOperand(0).getReg();
114     switch (MI.getOpcode()) {
115     default: llvm_unreachable("Unreachable!");
116     case X86::MOVSX16rr8:
117     case X86::MOVZX16rr8:
118     case X86::MOVSX32rr8:
119     case X86::MOVZX32rr8:
120     case X86::MOVSX64rr8:
121       SubIdx = X86::sub_8bit;
122       break;
123     case X86::MOVSX32rr16:
124     case X86::MOVZX32rr16:
125     case X86::MOVSX64rr16:
126       SubIdx = X86::sub_16bit;
127       break;
128     case X86::MOVSX64rr32:
129       SubIdx = X86::sub_32bit;
130       break;
131     }
132     return true;
133   }
134   }
135   return false;
136 }
137 
138 bool X86InstrInfo::isDataInvariant(MachineInstr &MI) {
139   switch (MI.getOpcode()) {
140   default:
141     // By default, assume that the instruction is not data invariant.
142     return false;
143 
144     // Some target-independent operations that trivially lower to data-invariant
145     // instructions.
146   case TargetOpcode::COPY:
147   case TargetOpcode::INSERT_SUBREG:
148   case TargetOpcode::SUBREG_TO_REG:
149     return true;
150 
151   // On x86 it is believed that imul is constant time w.r.t. the loaded data.
152   // However, they set flags and are perhaps the most surprisingly constant
153   // time operations so we call them out here separately.
154   case X86::IMUL16rr:
155   case X86::IMUL16rri8:
156   case X86::IMUL16rri:
157   case X86::IMUL32rr:
158   case X86::IMUL32rri8:
159   case X86::IMUL32rri:
160   case X86::IMUL64rr:
161   case X86::IMUL64rri32:
162   case X86::IMUL64rri8:
163 
164   // Bit scanning and counting instructions that are somewhat surprisingly
165   // constant time as they scan across bits and do other fairly complex
166   // operations like popcnt, but are believed to be constant time on x86.
167   // However, these set flags.
168   case X86::BSF16rr:
169   case X86::BSF32rr:
170   case X86::BSF64rr:
171   case X86::BSR16rr:
172   case X86::BSR32rr:
173   case X86::BSR64rr:
174   case X86::LZCNT16rr:
175   case X86::LZCNT32rr:
176   case X86::LZCNT64rr:
177   case X86::POPCNT16rr:
178   case X86::POPCNT32rr:
179   case X86::POPCNT64rr:
180   case X86::TZCNT16rr:
181   case X86::TZCNT32rr:
182   case X86::TZCNT64rr:
183 
184   // Bit manipulation instructions are effectively combinations of basic
185   // arithmetic ops, and should still execute in constant time. These also
186   // set flags.
187   case X86::BLCFILL32rr:
188   case X86::BLCFILL64rr:
189   case X86::BLCI32rr:
190   case X86::BLCI64rr:
191   case X86::BLCIC32rr:
192   case X86::BLCIC64rr:
193   case X86::BLCMSK32rr:
194   case X86::BLCMSK64rr:
195   case X86::BLCS32rr:
196   case X86::BLCS64rr:
197   case X86::BLSFILL32rr:
198   case X86::BLSFILL64rr:
199   case X86::BLSI32rr:
200   case X86::BLSI64rr:
201   case X86::BLSIC32rr:
202   case X86::BLSIC64rr:
203   case X86::BLSMSK32rr:
204   case X86::BLSMSK64rr:
205   case X86::BLSR32rr:
206   case X86::BLSR64rr:
207   case X86::TZMSK32rr:
208   case X86::TZMSK64rr:
209 
210   // Bit extracting and clearing instructions should execute in constant time,
211   // and set flags.
212   case X86::BEXTR32rr:
213   case X86::BEXTR64rr:
214   case X86::BEXTRI32ri:
215   case X86::BEXTRI64ri:
216   case X86::BZHI32rr:
217   case X86::BZHI64rr:
218 
219   // Shift and rotate.
220   case X86::ROL8r1:
221   case X86::ROL16r1:
222   case X86::ROL32r1:
223   case X86::ROL64r1:
224   case X86::ROL8rCL:
225   case X86::ROL16rCL:
226   case X86::ROL32rCL:
227   case X86::ROL64rCL:
228   case X86::ROL8ri:
229   case X86::ROL16ri:
230   case X86::ROL32ri:
231   case X86::ROL64ri:
232   case X86::ROR8r1:
233   case X86::ROR16r1:
234   case X86::ROR32r1:
235   case X86::ROR64r1:
236   case X86::ROR8rCL:
237   case X86::ROR16rCL:
238   case X86::ROR32rCL:
239   case X86::ROR64rCL:
240   case X86::ROR8ri:
241   case X86::ROR16ri:
242   case X86::ROR32ri:
243   case X86::ROR64ri:
244   case X86::SAR8r1:
245   case X86::SAR16r1:
246   case X86::SAR32r1:
247   case X86::SAR64r1:
248   case X86::SAR8rCL:
249   case X86::SAR16rCL:
250   case X86::SAR32rCL:
251   case X86::SAR64rCL:
252   case X86::SAR8ri:
253   case X86::SAR16ri:
254   case X86::SAR32ri:
255   case X86::SAR64ri:
256   case X86::SHL8r1:
257   case X86::SHL16r1:
258   case X86::SHL32r1:
259   case X86::SHL64r1:
260   case X86::SHL8rCL:
261   case X86::SHL16rCL:
262   case X86::SHL32rCL:
263   case X86::SHL64rCL:
264   case X86::SHL8ri:
265   case X86::SHL16ri:
266   case X86::SHL32ri:
267   case X86::SHL64ri:
268   case X86::SHR8r1:
269   case X86::SHR16r1:
270   case X86::SHR32r1:
271   case X86::SHR64r1:
272   case X86::SHR8rCL:
273   case X86::SHR16rCL:
274   case X86::SHR32rCL:
275   case X86::SHR64rCL:
276   case X86::SHR8ri:
277   case X86::SHR16ri:
278   case X86::SHR32ri:
279   case X86::SHR64ri:
280   case X86::SHLD16rrCL:
281   case X86::SHLD32rrCL:
282   case X86::SHLD64rrCL:
283   case X86::SHLD16rri8:
284   case X86::SHLD32rri8:
285   case X86::SHLD64rri8:
286   case X86::SHRD16rrCL:
287   case X86::SHRD32rrCL:
288   case X86::SHRD64rrCL:
289   case X86::SHRD16rri8:
290   case X86::SHRD32rri8:
291   case X86::SHRD64rri8:
292 
293   // Basic arithmetic is constant time on the input but does set flags.
294   case X86::ADC8rr:
295   case X86::ADC8ri:
296   case X86::ADC16rr:
297   case X86::ADC16ri:
298   case X86::ADC16ri8:
299   case X86::ADC32rr:
300   case X86::ADC32ri:
301   case X86::ADC32ri8:
302   case X86::ADC64rr:
303   case X86::ADC64ri8:
304   case X86::ADC64ri32:
305   case X86::ADD8rr:
306   case X86::ADD8ri:
307   case X86::ADD16rr:
308   case X86::ADD16ri:
309   case X86::ADD16ri8:
310   case X86::ADD32rr:
311   case X86::ADD32ri:
312   case X86::ADD32ri8:
313   case X86::ADD64rr:
314   case X86::ADD64ri8:
315   case X86::ADD64ri32:
316   case X86::AND8rr:
317   case X86::AND8ri:
318   case X86::AND16rr:
319   case X86::AND16ri:
320   case X86::AND16ri8:
321   case X86::AND32rr:
322   case X86::AND32ri:
323   case X86::AND32ri8:
324   case X86::AND64rr:
325   case X86::AND64ri8:
326   case X86::AND64ri32:
327   case X86::OR8rr:
328   case X86::OR8ri:
329   case X86::OR16rr:
330   case X86::OR16ri:
331   case X86::OR16ri8:
332   case X86::OR32rr:
333   case X86::OR32ri:
334   case X86::OR32ri8:
335   case X86::OR64rr:
336   case X86::OR64ri8:
337   case X86::OR64ri32:
338   case X86::SBB8rr:
339   case X86::SBB8ri:
340   case X86::SBB16rr:
341   case X86::SBB16ri:
342   case X86::SBB16ri8:
343   case X86::SBB32rr:
344   case X86::SBB32ri:
345   case X86::SBB32ri8:
346   case X86::SBB64rr:
347   case X86::SBB64ri8:
348   case X86::SBB64ri32:
349   case X86::SUB8rr:
350   case X86::SUB8ri:
351   case X86::SUB16rr:
352   case X86::SUB16ri:
353   case X86::SUB16ri8:
354   case X86::SUB32rr:
355   case X86::SUB32ri:
356   case X86::SUB32ri8:
357   case X86::SUB64rr:
358   case X86::SUB64ri8:
359   case X86::SUB64ri32:
360   case X86::XOR8rr:
361   case X86::XOR8ri:
362   case X86::XOR16rr:
363   case X86::XOR16ri:
364   case X86::XOR16ri8:
365   case X86::XOR32rr:
366   case X86::XOR32ri:
367   case X86::XOR32ri8:
368   case X86::XOR64rr:
369   case X86::XOR64ri8:
370   case X86::XOR64ri32:
371   // Arithmetic with just 32-bit and 64-bit variants and no immediates.
372   case X86::ADCX32rr:
373   case X86::ADCX64rr:
374   case X86::ADOX32rr:
375   case X86::ADOX64rr:
376   case X86::ANDN32rr:
377   case X86::ANDN64rr:
378   // Unary arithmetic operations.
379   case X86::DEC8r:
380   case X86::DEC16r:
381   case X86::DEC32r:
382   case X86::DEC64r:
383   case X86::INC8r:
384   case X86::INC16r:
385   case X86::INC32r:
386   case X86::INC64r:
387   case X86::NEG8r:
388   case X86::NEG16r:
389   case X86::NEG32r:
390   case X86::NEG64r:
391 
392   // Unlike other arithmetic, NOT doesn't set EFLAGS.
393   case X86::NOT8r:
394   case X86::NOT16r:
395   case X86::NOT32r:
396   case X86::NOT64r:
397 
398   // Various move instructions used to zero or sign extend things. Note that we
399   // intentionally don't support the _NOREX variants as we can't handle that
400   // register constraint anyways.
401   case X86::MOVSX16rr8:
402   case X86::MOVSX32rr8:
403   case X86::MOVSX32rr16:
404   case X86::MOVSX64rr8:
405   case X86::MOVSX64rr16:
406   case X86::MOVSX64rr32:
407   case X86::MOVZX16rr8:
408   case X86::MOVZX32rr8:
409   case X86::MOVZX32rr16:
410   case X86::MOVZX64rr8:
411   case X86::MOVZX64rr16:
412   case X86::MOV32rr:
413 
414   // Arithmetic instructions that are both constant time and don't set flags.
415   case X86::RORX32ri:
416   case X86::RORX64ri:
417   case X86::SARX32rr:
418   case X86::SARX64rr:
419   case X86::SHLX32rr:
420   case X86::SHLX64rr:
421   case X86::SHRX32rr:
422   case X86::SHRX64rr:
423 
424   // LEA doesn't actually access memory, and its arithmetic is constant time.
425   case X86::LEA16r:
426   case X86::LEA32r:
427   case X86::LEA64_32r:
428   case X86::LEA64r:
429     return true;
430   }
431 }
432 
433 bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) {
434   switch (MI.getOpcode()) {
435   default:
436     // By default, assume that the load will immediately leak.
437     return false;
438 
439   // On x86 it is believed that imul is constant time w.r.t. the loaded data.
440   // However, they set flags and are perhaps the most surprisingly constant
441   // time operations so we call them out here separately.
442   case X86::IMUL16rm:
443   case X86::IMUL16rmi8:
444   case X86::IMUL16rmi:
445   case X86::IMUL32rm:
446   case X86::IMUL32rmi8:
447   case X86::IMUL32rmi:
448   case X86::IMUL64rm:
449   case X86::IMUL64rmi32:
450   case X86::IMUL64rmi8:
451 
452   // Bit scanning and counting instructions that are somewhat surprisingly
453   // constant time as they scan across bits and do other fairly complex
454   // operations like popcnt, but are believed to be constant time on x86.
455   // However, these set flags.
456   case X86::BSF16rm:
457   case X86::BSF32rm:
458   case X86::BSF64rm:
459   case X86::BSR16rm:
460   case X86::BSR32rm:
461   case X86::BSR64rm:
462   case X86::LZCNT16rm:
463   case X86::LZCNT32rm:
464   case X86::LZCNT64rm:
465   case X86::POPCNT16rm:
466   case X86::POPCNT32rm:
467   case X86::POPCNT64rm:
468   case X86::TZCNT16rm:
469   case X86::TZCNT32rm:
470   case X86::TZCNT64rm:
471 
472   // Bit manipulation instructions are effectively combinations of basic
473   // arithmetic ops, and should still execute in constant time. These also
474   // set flags.
475   case X86::BLCFILL32rm:
476   case X86::BLCFILL64rm:
477   case X86::BLCI32rm:
478   case X86::BLCI64rm:
479   case X86::BLCIC32rm:
480   case X86::BLCIC64rm:
481   case X86::BLCMSK32rm:
482   case X86::BLCMSK64rm:
483   case X86::BLCS32rm:
484   case X86::BLCS64rm:
485   case X86::BLSFILL32rm:
486   case X86::BLSFILL64rm:
487   case X86::BLSI32rm:
488   case X86::BLSI64rm:
489   case X86::BLSIC32rm:
490   case X86::BLSIC64rm:
491   case X86::BLSMSK32rm:
492   case X86::BLSMSK64rm:
493   case X86::BLSR32rm:
494   case X86::BLSR64rm:
495   case X86::TZMSK32rm:
496   case X86::TZMSK64rm:
497 
498   // Bit extracting and clearing instructions should execute in constant time,
499   // and set flags.
500   case X86::BEXTR32rm:
501   case X86::BEXTR64rm:
502   case X86::BEXTRI32mi:
503   case X86::BEXTRI64mi:
504   case X86::BZHI32rm:
505   case X86::BZHI64rm:
506 
507   // Basic arithmetic is constant time on the input but does set flags.
508   case X86::ADC8rm:
509   case X86::ADC16rm:
510   case X86::ADC32rm:
511   case X86::ADC64rm:
512   case X86::ADCX32rm:
513   case X86::ADCX64rm:
514   case X86::ADD8rm:
515   case X86::ADD16rm:
516   case X86::ADD32rm:
517   case X86::ADD64rm:
518   case X86::ADOX32rm:
519   case X86::ADOX64rm:
520   case X86::AND8rm:
521   case X86::AND16rm:
522   case X86::AND32rm:
523   case X86::AND64rm:
524   case X86::ANDN32rm:
525   case X86::ANDN64rm:
526   case X86::OR8rm:
527   case X86::OR16rm:
528   case X86::OR32rm:
529   case X86::OR64rm:
530   case X86::SBB8rm:
531   case X86::SBB16rm:
532   case X86::SBB32rm:
533   case X86::SBB64rm:
534   case X86::SUB8rm:
535   case X86::SUB16rm:
536   case X86::SUB32rm:
537   case X86::SUB64rm:
538   case X86::XOR8rm:
539   case X86::XOR16rm:
540   case X86::XOR32rm:
541   case X86::XOR64rm:
542 
543   // Integer multiply w/o affecting flags is still believed to be constant
544   // time on x86. Called out separately as this is among the most surprising
545   // instructions to exhibit that behavior.
546   case X86::MULX32rm:
547   case X86::MULX64rm:
548 
549   // Arithmetic instructions that are both constant time and don't set flags.
550   case X86::RORX32mi:
551   case X86::RORX64mi:
552   case X86::SARX32rm:
553   case X86::SARX64rm:
554   case X86::SHLX32rm:
555   case X86::SHLX64rm:
556   case X86::SHRX32rm:
557   case X86::SHRX64rm:
558 
559   // Conversions are believed to be constant time and don't set flags.
560   case X86::CVTTSD2SI64rm:
561   case X86::VCVTTSD2SI64rm:
562   case X86::VCVTTSD2SI64Zrm:
563   case X86::CVTTSD2SIrm:
564   case X86::VCVTTSD2SIrm:
565   case X86::VCVTTSD2SIZrm:
566   case X86::CVTTSS2SI64rm:
567   case X86::VCVTTSS2SI64rm:
568   case X86::VCVTTSS2SI64Zrm:
569   case X86::CVTTSS2SIrm:
570   case X86::VCVTTSS2SIrm:
571   case X86::VCVTTSS2SIZrm:
572   case X86::CVTSI2SDrm:
573   case X86::VCVTSI2SDrm:
574   case X86::VCVTSI2SDZrm:
575   case X86::CVTSI2SSrm:
576   case X86::VCVTSI2SSrm:
577   case X86::VCVTSI2SSZrm:
578   case X86::CVTSI642SDrm:
579   case X86::VCVTSI642SDrm:
580   case X86::VCVTSI642SDZrm:
581   case X86::CVTSI642SSrm:
582   case X86::VCVTSI642SSrm:
583   case X86::VCVTSI642SSZrm:
584   case X86::CVTSS2SDrm:
585   case X86::VCVTSS2SDrm:
586   case X86::VCVTSS2SDZrm:
587   case X86::CVTSD2SSrm:
588   case X86::VCVTSD2SSrm:
589   case X86::VCVTSD2SSZrm:
590   // AVX512 added unsigned integer conversions.
591   case X86::VCVTTSD2USI64Zrm:
592   case X86::VCVTTSD2USIZrm:
593   case X86::VCVTTSS2USI64Zrm:
594   case X86::VCVTTSS2USIZrm:
595   case X86::VCVTUSI2SDZrm:
596   case X86::VCVTUSI642SDZrm:
597   case X86::VCVTUSI2SSZrm:
598   case X86::VCVTUSI642SSZrm:
599 
600   // Loads to register don't set flags.
601   case X86::MOV8rm:
602   case X86::MOV8rm_NOREX:
603   case X86::MOV16rm:
604   case X86::MOV32rm:
605   case X86::MOV64rm:
606   case X86::MOVSX16rm8:
607   case X86::MOVSX32rm16:
608   case X86::MOVSX32rm8:
609   case X86::MOVSX32rm8_NOREX:
610   case X86::MOVSX64rm16:
611   case X86::MOVSX64rm32:
612   case X86::MOVSX64rm8:
613   case X86::MOVZX16rm8:
614   case X86::MOVZX32rm16:
615   case X86::MOVZX32rm8:
616   case X86::MOVZX32rm8_NOREX:
617   case X86::MOVZX64rm16:
618   case X86::MOVZX64rm8:
619     return true;
620   }
621 }
622 
623 int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
624   const MachineFunction *MF = MI.getParent()->getParent();
625   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
626 
627   if (isFrameInstr(MI)) {
628     int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign());
629     SPAdj -= getFrameAdjustment(MI);
630     if (!isFrameSetup(MI))
631       SPAdj = -SPAdj;
632     return SPAdj;
633   }
634 
635   // To know whether a call adjusts the stack, we need information
636   // that is bound to the following ADJCALLSTACKUP pseudo.
637   // Look for the next ADJCALLSTACKUP that follows the call.
638   if (MI.isCall()) {
639     const MachineBasicBlock *MBB = MI.getParent();
640     auto I = ++MachineBasicBlock::const_iterator(MI);
641     for (auto E = MBB->end(); I != E; ++I) {
642       if (I->getOpcode() == getCallFrameDestroyOpcode() ||
643           I->isCall())
644         break;
645     }
646 
647     // If we could not find a frame destroy opcode, then it has already
648     // been simplified, so we don't care.
649     if (I->getOpcode() != getCallFrameDestroyOpcode())
650       return 0;
651 
652     return -(I->getOperand(1).getImm());
653   }
654 
655   // Currently handle only PUSHes we can reasonably expect to see
656   // in call sequences
657   switch (MI.getOpcode()) {
658   default:
659     return 0;
660   case X86::PUSH32i8:
661   case X86::PUSH32r:
662   case X86::PUSH32rmm:
663   case X86::PUSH32rmr:
664   case X86::PUSHi32:
665     return 4;
666   case X86::PUSH64i8:
667   case X86::PUSH64r:
668   case X86::PUSH64rmm:
669   case X86::PUSH64rmr:
670   case X86::PUSH64i32:
671     return 8;
672   }
673 }
674 
675 /// Return true and the FrameIndex if the specified
676 /// operand and follow operands form a reference to the stack frame.
677 bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
678                                   int &FrameIndex) const {
679   if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
680       MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
681       MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
682       MI.getOperand(Op + X86::AddrDisp).isImm() &&
683       MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
684       MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
685       MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
686     FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
687     return true;
688   }
689   return false;
690 }
691 
692 static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {
693   switch (Opcode) {
694   default:
695     return false;
696   case X86::MOV8rm:
697   case X86::KMOVBkm:
698     MemBytes = 1;
699     return true;
700   case X86::MOV16rm:
701   case X86::KMOVWkm:
702     MemBytes = 2;
703     return true;
704   case X86::MOV32rm:
705   case X86::MOVSSrm:
706   case X86::MOVSSrm_alt:
707   case X86::VMOVSSrm:
708   case X86::VMOVSSrm_alt:
709   case X86::VMOVSSZrm:
710   case X86::VMOVSSZrm_alt:
711   case X86::KMOVDkm:
712     MemBytes = 4;
713     return true;
714   case X86::MOV64rm:
715   case X86::LD_Fp64m:
716   case X86::MOVSDrm:
717   case X86::MOVSDrm_alt:
718   case X86::VMOVSDrm:
719   case X86::VMOVSDrm_alt:
720   case X86::VMOVSDZrm:
721   case X86::VMOVSDZrm_alt:
722   case X86::MMX_MOVD64rm:
723   case X86::MMX_MOVQ64rm:
724   case X86::KMOVQkm:
725     MemBytes = 8;
726     return true;
727   case X86::MOVAPSrm:
728   case X86::MOVUPSrm:
729   case X86::MOVAPDrm:
730   case X86::MOVUPDrm:
731   case X86::MOVDQArm:
732   case X86::MOVDQUrm:
733   case X86::VMOVAPSrm:
734   case X86::VMOVUPSrm:
735   case X86::VMOVAPDrm:
736   case X86::VMOVUPDrm:
737   case X86::VMOVDQArm:
738   case X86::VMOVDQUrm:
739   case X86::VMOVAPSZ128rm:
740   case X86::VMOVUPSZ128rm:
741   case X86::VMOVAPSZ128rm_NOVLX:
742   case X86::VMOVUPSZ128rm_NOVLX:
743   case X86::VMOVAPDZ128rm:
744   case X86::VMOVUPDZ128rm:
745   case X86::VMOVDQU8Z128rm:
746   case X86::VMOVDQU16Z128rm:
747   case X86::VMOVDQA32Z128rm:
748   case X86::VMOVDQU32Z128rm:
749   case X86::VMOVDQA64Z128rm:
750   case X86::VMOVDQU64Z128rm:
751     MemBytes = 16;
752     return true;
753   case X86::VMOVAPSYrm:
754   case X86::VMOVUPSYrm:
755   case X86::VMOVAPDYrm:
756   case X86::VMOVUPDYrm:
757   case X86::VMOVDQAYrm:
758   case X86::VMOVDQUYrm:
759   case X86::VMOVAPSZ256rm:
760   case X86::VMOVUPSZ256rm:
761   case X86::VMOVAPSZ256rm_NOVLX:
762   case X86::VMOVUPSZ256rm_NOVLX:
763   case X86::VMOVAPDZ256rm:
764   case X86::VMOVUPDZ256rm:
765   case X86::VMOVDQU8Z256rm:
766   case X86::VMOVDQU16Z256rm:
767   case X86::VMOVDQA32Z256rm:
768   case X86::VMOVDQU32Z256rm:
769   case X86::VMOVDQA64Z256rm:
770   case X86::VMOVDQU64Z256rm:
771     MemBytes = 32;
772     return true;
773   case X86::VMOVAPSZrm:
774   case X86::VMOVUPSZrm:
775   case X86::VMOVAPDZrm:
776   case X86::VMOVUPDZrm:
777   case X86::VMOVDQU8Zrm:
778   case X86::VMOVDQU16Zrm:
779   case X86::VMOVDQA32Zrm:
780   case X86::VMOVDQU32Zrm:
781   case X86::VMOVDQA64Zrm:
782   case X86::VMOVDQU64Zrm:
783     MemBytes = 64;
784     return true;
785   }
786 }
787 
788 static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {
789   switch (Opcode) {
790   default:
791     return false;
792   case X86::MOV8mr:
793   case X86::KMOVBmk:
794     MemBytes = 1;
795     return true;
796   case X86::MOV16mr:
797   case X86::KMOVWmk:
798     MemBytes = 2;
799     return true;
800   case X86::MOV32mr:
801   case X86::MOVSSmr:
802   case X86::VMOVSSmr:
803   case X86::VMOVSSZmr:
804   case X86::KMOVDmk:
805     MemBytes = 4;
806     return true;
807   case X86::MOV64mr:
808   case X86::ST_FpP64m:
809   case X86::MOVSDmr:
810   case X86::VMOVSDmr:
811   case X86::VMOVSDZmr:
812   case X86::MMX_MOVD64mr:
813   case X86::MMX_MOVQ64mr:
814   case X86::MMX_MOVNTQmr:
815   case X86::KMOVQmk:
816     MemBytes = 8;
817     return true;
818   case X86::MOVAPSmr:
819   case X86::MOVUPSmr:
820   case X86::MOVAPDmr:
821   case X86::MOVUPDmr:
822   case X86::MOVDQAmr:
823   case X86::MOVDQUmr:
824   case X86::VMOVAPSmr:
825   case X86::VMOVUPSmr:
826   case X86::VMOVAPDmr:
827   case X86::VMOVUPDmr:
828   case X86::VMOVDQAmr:
829   case X86::VMOVDQUmr:
830   case X86::VMOVUPSZ128mr:
831   case X86::VMOVAPSZ128mr:
832   case X86::VMOVUPSZ128mr_NOVLX:
833   case X86::VMOVAPSZ128mr_NOVLX:
834   case X86::VMOVUPDZ128mr:
835   case X86::VMOVAPDZ128mr:
836   case X86::VMOVDQA32Z128mr:
837   case X86::VMOVDQU32Z128mr:
838   case X86::VMOVDQA64Z128mr:
839   case X86::VMOVDQU64Z128mr:
840   case X86::VMOVDQU8Z128mr:
841   case X86::VMOVDQU16Z128mr:
842     MemBytes = 16;
843     return true;
844   case X86::VMOVUPSYmr:
845   case X86::VMOVAPSYmr:
846   case X86::VMOVUPDYmr:
847   case X86::VMOVAPDYmr:
848   case X86::VMOVDQUYmr:
849   case X86::VMOVDQAYmr:
850   case X86::VMOVUPSZ256mr:
851   case X86::VMOVAPSZ256mr:
852   case X86::VMOVUPSZ256mr_NOVLX:
853   case X86::VMOVAPSZ256mr_NOVLX:
854   case X86::VMOVUPDZ256mr:
855   case X86::VMOVAPDZ256mr:
856   case X86::VMOVDQU8Z256mr:
857   case X86::VMOVDQU16Z256mr:
858   case X86::VMOVDQA32Z256mr:
859   case X86::VMOVDQU32Z256mr:
860   case X86::VMOVDQA64Z256mr:
861   case X86::VMOVDQU64Z256mr:
862     MemBytes = 32;
863     return true;
864   case X86::VMOVUPSZmr:
865   case X86::VMOVAPSZmr:
866   case X86::VMOVUPDZmr:
867   case X86::VMOVAPDZmr:
868   case X86::VMOVDQU8Zmr:
869   case X86::VMOVDQU16Zmr:
870   case X86::VMOVDQA32Zmr:
871   case X86::VMOVDQU32Zmr:
872   case X86::VMOVDQA64Zmr:
873   case X86::VMOVDQU64Zmr:
874     MemBytes = 64;
875     return true;
876   }
877   return false;
878 }
879 
880 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
881                                            int &FrameIndex) const {
882   unsigned Dummy;
883   return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
884 }
885 
886 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
887                                            int &FrameIndex,
888                                            unsigned &MemBytes) const {
889   if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
890     if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
891       return MI.getOperand(0).getReg();
892   return 0;
893 }
894 
895 unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
896                                                  int &FrameIndex) const {
897   unsigned Dummy;
898   if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
899     unsigned Reg;
900     if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
901       return Reg;
902     // Check for post-frame index elimination operations
903     SmallVector<const MachineMemOperand *, 1> Accesses;
904     if (hasLoadFromStackSlot(MI, Accesses)) {
905       FrameIndex =
906           cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
907               ->getFrameIndex();
908       return 1;
909     }
910   }
911   return 0;
912 }
913 
914 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
915                                           int &FrameIndex) const {
916   unsigned Dummy;
917   return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
918 }
919 
920 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
921                                           int &FrameIndex,
922                                           unsigned &MemBytes) const {
923   if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
924     if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
925         isFrameOperand(MI, 0, FrameIndex))
926       return MI.getOperand(X86::AddrNumOperands).getReg();
927   return 0;
928 }
929 
930 unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
931                                                 int &FrameIndex) const {
932   unsigned Dummy;
933   if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
934     unsigned Reg;
935     if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
936       return Reg;
937     // Check for post-frame index elimination operations
938     SmallVector<const MachineMemOperand *, 1> Accesses;
939     if (hasStoreToStackSlot(MI, Accesses)) {
940       FrameIndex =
941           cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
942               ->getFrameIndex();
943       return 1;
944     }
945   }
946   return 0;
947 }
948 
949 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
950 static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) {
951   // Don't waste compile time scanning use-def chains of physregs.
952   if (!BaseReg.isVirtual())
953     return false;
954   bool isPICBase = false;
955   for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
956          E = MRI.def_instr_end(); I != E; ++I) {
957     MachineInstr *DefMI = &*I;
958     if (DefMI->getOpcode() != X86::MOVPC32r)
959       return false;
960     assert(!isPICBase && "More than one PIC base?");
961     isPICBase = true;
962   }
963   return isPICBase;
964 }
965 
966 bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
967                                                      AAResults *AA) const {
968   switch (MI.getOpcode()) {
969   default:
970     // This function should only be called for opcodes with the ReMaterializable
971     // flag set.
972     llvm_unreachable("Unknown rematerializable operation!");
973     break;
974 
975   case X86::LOAD_STACK_GUARD:
976   case X86::AVX1_SETALLONES:
977   case X86::AVX2_SETALLONES:
978   case X86::AVX512_128_SET0:
979   case X86::AVX512_256_SET0:
980   case X86::AVX512_512_SET0:
981   case X86::AVX512_512_SETALLONES:
982   case X86::AVX512_FsFLD0SD:
983   case X86::AVX512_FsFLD0SS:
984   case X86::AVX512_FsFLD0F128:
985   case X86::AVX_SET0:
986   case X86::FsFLD0SD:
987   case X86::FsFLD0SS:
988   case X86::FsFLD0F128:
989   case X86::KSET0D:
990   case X86::KSET0Q:
991   case X86::KSET0W:
992   case X86::KSET1D:
993   case X86::KSET1Q:
994   case X86::KSET1W:
995   case X86::MMX_SET0:
996   case X86::MOV32ImmSExti8:
997   case X86::MOV32r0:
998   case X86::MOV32r1:
999   case X86::MOV32r_1:
1000   case X86::MOV32ri64:
1001   case X86::MOV64ImmSExti8:
1002   case X86::V_SET0:
1003   case X86::V_SETALLONES:
1004   case X86::MOV16ri:
1005   case X86::MOV32ri:
1006   case X86::MOV64ri:
1007   case X86::MOV64ri32:
1008   case X86::MOV8ri:
1009     return true;
1010 
1011   case X86::MOV8rm:
1012   case X86::MOV8rm_NOREX:
1013   case X86::MOV16rm:
1014   case X86::MOV32rm:
1015   case X86::MOV64rm:
1016   case X86::MOVSSrm:
1017   case X86::MOVSSrm_alt:
1018   case X86::MOVSDrm:
1019   case X86::MOVSDrm_alt:
1020   case X86::MOVAPSrm:
1021   case X86::MOVUPSrm:
1022   case X86::MOVAPDrm:
1023   case X86::MOVUPDrm:
1024   case X86::MOVDQArm:
1025   case X86::MOVDQUrm:
1026   case X86::VMOVSSrm:
1027   case X86::VMOVSSrm_alt:
1028   case X86::VMOVSDrm:
1029   case X86::VMOVSDrm_alt:
1030   case X86::VMOVAPSrm:
1031   case X86::VMOVUPSrm:
1032   case X86::VMOVAPDrm:
1033   case X86::VMOVUPDrm:
1034   case X86::VMOVDQArm:
1035   case X86::VMOVDQUrm:
1036   case X86::VMOVAPSYrm:
1037   case X86::VMOVUPSYrm:
1038   case X86::VMOVAPDYrm:
1039   case X86::VMOVUPDYrm:
1040   case X86::VMOVDQAYrm:
1041   case X86::VMOVDQUYrm:
1042   case X86::MMX_MOVD64rm:
1043   case X86::MMX_MOVQ64rm:
1044   // AVX-512
1045   case X86::VMOVSSZrm:
1046   case X86::VMOVSSZrm_alt:
1047   case X86::VMOVSDZrm:
1048   case X86::VMOVSDZrm_alt:
1049   case X86::VMOVAPDZ128rm:
1050   case X86::VMOVAPDZ256rm:
1051   case X86::VMOVAPDZrm:
1052   case X86::VMOVAPSZ128rm:
1053   case X86::VMOVAPSZ256rm:
1054   case X86::VMOVAPSZ128rm_NOVLX:
1055   case X86::VMOVAPSZ256rm_NOVLX:
1056   case X86::VMOVAPSZrm:
1057   case X86::VMOVDQA32Z128rm:
1058   case X86::VMOVDQA32Z256rm:
1059   case X86::VMOVDQA32Zrm:
1060   case X86::VMOVDQA64Z128rm:
1061   case X86::VMOVDQA64Z256rm:
1062   case X86::VMOVDQA64Zrm:
1063   case X86::VMOVDQU16Z128rm:
1064   case X86::VMOVDQU16Z256rm:
1065   case X86::VMOVDQU16Zrm:
1066   case X86::VMOVDQU32Z128rm:
1067   case X86::VMOVDQU32Z256rm:
1068   case X86::VMOVDQU32Zrm:
1069   case X86::VMOVDQU64Z128rm:
1070   case X86::VMOVDQU64Z256rm:
1071   case X86::VMOVDQU64Zrm:
1072   case X86::VMOVDQU8Z128rm:
1073   case X86::VMOVDQU8Z256rm:
1074   case X86::VMOVDQU8Zrm:
1075   case X86::VMOVUPDZ128rm:
1076   case X86::VMOVUPDZ256rm:
1077   case X86::VMOVUPDZrm:
1078   case X86::VMOVUPSZ128rm:
1079   case X86::VMOVUPSZ256rm:
1080   case X86::VMOVUPSZ128rm_NOVLX:
1081   case X86::VMOVUPSZ256rm_NOVLX:
1082   case X86::VMOVUPSZrm: {
1083     // Loads from constant pools are trivially rematerializable.
1084     if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
1085         MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1086         MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1087         MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1088         MI.isDereferenceableInvariantLoad(AA)) {
1089       Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1090       if (BaseReg == 0 || BaseReg == X86::RIP)
1091         return true;
1092       // Allow re-materialization of PIC load.
1093       if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
1094         return false;
1095       const MachineFunction &MF = *MI.getParent()->getParent();
1096       const MachineRegisterInfo &MRI = MF.getRegInfo();
1097       return regIsPICBase(BaseReg, MRI);
1098     }
1099     return false;
1100   }
1101 
1102   case X86::LEA32r:
1103   case X86::LEA64r: {
1104     if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1105         MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1106         MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1107         !MI.getOperand(1 + X86::AddrDisp).isReg()) {
1108       // lea fi#, lea GV, etc. are all rematerializable.
1109       if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
1110         return true;
1111       Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1112       if (BaseReg == 0)
1113         return true;
1114       // Allow re-materialization of lea PICBase + x.
1115       const MachineFunction &MF = *MI.getParent()->getParent();
1116       const MachineRegisterInfo &MRI = MF.getRegInfo();
1117       return regIsPICBase(BaseReg, MRI);
1118     }
1119     return false;
1120   }
1121   }
1122 }
1123 
1124 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
1125                                  MachineBasicBlock::iterator I,
1126                                  Register DestReg, unsigned SubIdx,
1127                                  const MachineInstr &Orig,
1128                                  const TargetRegisterInfo &TRI) const {
1129   bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
1130   if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
1131                             MachineBasicBlock::LQR_Dead) {
1132     // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
1133     // effects.
1134     int Value;
1135     switch (Orig.getOpcode()) {
1136     case X86::MOV32r0:  Value = 0; break;
1137     case X86::MOV32r1:  Value = 1; break;
1138     case X86::MOV32r_1: Value = -1; break;
1139     default:
1140       llvm_unreachable("Unexpected instruction!");
1141     }
1142 
1143     const DebugLoc &DL = Orig.getDebugLoc();
1144     BuildMI(MBB, I, DL, get(X86::MOV32ri))
1145         .add(Orig.getOperand(0))
1146         .addImm(Value);
1147   } else {
1148     MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1149     MBB.insert(I, MI);
1150   }
1151 
1152   MachineInstr &NewMI = *std::prev(I);
1153   NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1154 }
1155 
1156 /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
1157 bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
1158   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1159     MachineOperand &MO = MI.getOperand(i);
1160     if (MO.isReg() && MO.isDef() &&
1161         MO.getReg() == X86::EFLAGS && !MO.isDead()) {
1162       return true;
1163     }
1164   }
1165   return false;
1166 }
1167 
1168 /// Check whether the shift count for a machine operand is non-zero.
1169 inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
1170                                               unsigned ShiftAmtOperandIdx) {
1171   // The shift count is six bits with the REX.W prefix and five bits without.
1172   unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
1173   unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
1174   return Imm & ShiftCountMask;
1175 }
1176 
1177 /// Check whether the given shift count is appropriate
1178 /// can be represented by a LEA instruction.
1179 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
1180   // Left shift instructions can be transformed into load-effective-address
1181   // instructions if we can encode them appropriately.
1182   // A LEA instruction utilizes a SIB byte to encode its scale factor.
1183   // The SIB.scale field is two bits wide which means that we can encode any
1184   // shift amount less than 4.
1185   return ShAmt < 4 && ShAmt > 0;
1186 }
1187 
1188 bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
1189                                   unsigned Opc, bool AllowSP, Register &NewSrc,
1190                                   bool &isKill, MachineOperand &ImplicitOp,
1191                                   LiveVariables *LV) const {
1192   MachineFunction &MF = *MI.getParent()->getParent();
1193   const TargetRegisterClass *RC;
1194   if (AllowSP) {
1195     RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1196   } else {
1197     RC = Opc != X86::LEA32r ?
1198       &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1199   }
1200   Register SrcReg = Src.getReg();
1201 
1202   // For both LEA64 and LEA32 the register already has essentially the right
1203   // type (32-bit or 64-bit) we may just need to forbid SP.
1204   if (Opc != X86::LEA64_32r) {
1205     NewSrc = SrcReg;
1206     isKill = Src.isKill();
1207     assert(!Src.isUndef() && "Undef op doesn't need optimization");
1208 
1209     if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC))
1210       return false;
1211 
1212     return true;
1213   }
1214 
1215   // This is for an LEA64_32r and incoming registers are 32-bit. One way or
1216   // another we need to add 64-bit registers to the final MI.
1217   if (SrcReg.isPhysical()) {
1218     ImplicitOp = Src;
1219     ImplicitOp.setImplicit();
1220 
1221     NewSrc = getX86SubSuperRegister(Src.getReg(), 64);
1222     isKill = Src.isKill();
1223     assert(!Src.isUndef() && "Undef op doesn't need optimization");
1224   } else {
1225     // Virtual register of the wrong class, we have to create a temporary 64-bit
1226     // vreg to feed into the LEA.
1227     NewSrc = MF.getRegInfo().createVirtualRegister(RC);
1228     MachineInstr *Copy =
1229         BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1230             .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
1231             .add(Src);
1232 
1233     // Which is obviously going to be dead after we're done with it.
1234     isKill = true;
1235 
1236     if (LV)
1237       LV->replaceKillInstruction(SrcReg, MI, *Copy);
1238   }
1239 
1240   // We've set all the parameters without issue.
1241   return true;
1242 }
1243 
1244 MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
1245     unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
1246     LiveVariables *LV, bool Is8BitOp) const {
1247   // We handle 8-bit adds and various 16-bit opcodes in the switch below.
1248   MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
1249   assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1250               *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
1251          "Unexpected type for LEA transform");
1252 
1253   // TODO: For a 32-bit target, we need to adjust the LEA variables with
1254   // something like this:
1255   //   Opcode = X86::LEA32r;
1256   //   InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1257   //   OutRegLEA =
1258   //       Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
1259   //                : RegInfo.createVirtualRegister(&X86::GR32RegClass);
1260   if (!Subtarget.is64Bit())
1261     return nullptr;
1262 
1263   unsigned Opcode = X86::LEA64_32r;
1264   Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1265   Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1266 
1267   // Build and insert into an implicit UNDEF value. This is OK because
1268   // we will be shifting and then extracting the lower 8/16-bits.
1269   // This has the potential to cause partial register stall. e.g.
1270   //   movw    (%rbp,%rcx,2), %dx
1271   //   leal    -65(%rdx), %esi
1272   // But testing has shown this *does* help performance in 64-bit mode (at
1273   // least on modern x86 machines).
1274   MachineBasicBlock::iterator MBBI = MI.getIterator();
1275   Register Dest = MI.getOperand(0).getReg();
1276   Register Src = MI.getOperand(1).getReg();
1277   bool IsDead = MI.getOperand(0).isDead();
1278   bool IsKill = MI.getOperand(1).isKill();
1279   unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1280   assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
1281   BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
1282   MachineInstr *InsMI =
1283       BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1284           .addReg(InRegLEA, RegState::Define, SubReg)
1285           .addReg(Src, getKillRegState(IsKill));
1286 
1287   MachineInstrBuilder MIB =
1288       BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
1289   switch (MIOpc) {
1290   default: llvm_unreachable("Unreachable!");
1291   case X86::SHL8ri:
1292   case X86::SHL16ri: {
1293     unsigned ShAmt = MI.getOperand(2).getImm();
1294     MIB.addReg(0).addImm(1ULL << ShAmt)
1295        .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0);
1296     break;
1297   }
1298   case X86::INC8r:
1299   case X86::INC16r:
1300     addRegOffset(MIB, InRegLEA, true, 1);
1301     break;
1302   case X86::DEC8r:
1303   case X86::DEC16r:
1304     addRegOffset(MIB, InRegLEA, true, -1);
1305     break;
1306   case X86::ADD8ri:
1307   case X86::ADD8ri_DB:
1308   case X86::ADD16ri:
1309   case X86::ADD16ri8:
1310   case X86::ADD16ri_DB:
1311   case X86::ADD16ri8_DB:
1312     addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
1313     break;
1314   case X86::ADD8rr:
1315   case X86::ADD8rr_DB:
1316   case X86::ADD16rr:
1317   case X86::ADD16rr_DB: {
1318     Register Src2 = MI.getOperand(2).getReg();
1319     bool IsKill2 = MI.getOperand(2).isKill();
1320     assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
1321     unsigned InRegLEA2 = 0;
1322     MachineInstr *InsMI2 = nullptr;
1323     if (Src == Src2) {
1324       // ADD8rr/ADD16rr killed %reg1028, %reg1028
1325       // just a single insert_subreg.
1326       addRegReg(MIB, InRegLEA, true, InRegLEA, false);
1327     } else {
1328       if (Subtarget.is64Bit())
1329         InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1330       else
1331         InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1332       // Build and insert into an implicit UNDEF value. This is OK because
1333       // we will be shifting and then extracting the lower 8/16-bits.
1334       BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
1335       InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
1336                    .addReg(InRegLEA2, RegState::Define, SubReg)
1337                    .addReg(Src2, getKillRegState(IsKill2));
1338       addRegReg(MIB, InRegLEA, true, InRegLEA2, true);
1339     }
1340     if (LV && IsKill2 && InsMI2)
1341       LV->replaceKillInstruction(Src2, MI, *InsMI2);
1342     break;
1343   }
1344   }
1345 
1346   MachineInstr *NewMI = MIB;
1347   MachineInstr *ExtMI =
1348       BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1349           .addReg(Dest, RegState::Define | getDeadRegState(IsDead))
1350           .addReg(OutRegLEA, RegState::Kill, SubReg);
1351 
1352   if (LV) {
1353     // Update live variables.
1354     LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
1355     LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
1356     if (IsKill)
1357       LV->replaceKillInstruction(Src, MI, *InsMI);
1358     if (IsDead)
1359       LV->replaceKillInstruction(Dest, MI, *ExtMI);
1360   }
1361 
1362   return ExtMI;
1363 }
1364 
1365 /// This method must be implemented by targets that
1366 /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
1367 /// may be able to convert a two-address instruction into a true
1368 /// three-address instruction on demand.  This allows the X86 target (for
1369 /// example) to convert ADD and SHL instructions into LEA instructions if they
1370 /// would require register copies due to two-addressness.
1371 ///
1372 /// This method returns a null pointer if the transformation cannot be
1373 /// performed, otherwise it returns the new instruction.
1374 ///
1375 MachineInstr *
1376 X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
1377                                     MachineInstr &MI, LiveVariables *LV) const {
1378   // The following opcodes also sets the condition code register(s). Only
1379   // convert them to equivalent lea if the condition code register def's
1380   // are dead!
1381   if (hasLiveCondCodeDef(MI))
1382     return nullptr;
1383 
1384   MachineFunction &MF = *MI.getParent()->getParent();
1385   // All instructions input are two-addr instructions.  Get the known operands.
1386   const MachineOperand &Dest = MI.getOperand(0);
1387   const MachineOperand &Src = MI.getOperand(1);
1388 
1389   // Ideally, operations with undef should be folded before we get here, but we
1390   // can't guarantee it. Bail out because optimizing undefs is a waste of time.
1391   // Without this, we have to forward undef state to new register operands to
1392   // avoid machine verifier errors.
1393   if (Src.isUndef())
1394     return nullptr;
1395   if (MI.getNumOperands() > 2)
1396     if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
1397       return nullptr;
1398 
1399   MachineInstr *NewMI = nullptr;
1400   bool Is64Bit = Subtarget.is64Bit();
1401 
1402   bool Is8BitOp = false;
1403   unsigned MIOpc = MI.getOpcode();
1404   switch (MIOpc) {
1405   default: llvm_unreachable("Unreachable!");
1406   case X86::SHL64ri: {
1407     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1408     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1409     if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1410 
1411     // LEA can't handle RSP.
1412     if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass(
1413                                         Src.getReg(), &X86::GR64_NOSPRegClass))
1414       return nullptr;
1415 
1416     NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
1417                 .add(Dest)
1418                 .addReg(0)
1419                 .addImm(1ULL << ShAmt)
1420                 .add(Src)
1421                 .addImm(0)
1422                 .addReg(0);
1423     break;
1424   }
1425   case X86::SHL32ri: {
1426     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1427     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1428     if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1429 
1430     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1431 
1432     // LEA can't handle ESP.
1433     bool isKill;
1434     Register SrcReg;
1435     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1436     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
1437                         SrcReg, isKill, ImplicitOp, LV))
1438       return nullptr;
1439 
1440     MachineInstrBuilder MIB =
1441         BuildMI(MF, MI.getDebugLoc(), get(Opc))
1442             .add(Dest)
1443             .addReg(0)
1444             .addImm(1ULL << ShAmt)
1445             .addReg(SrcReg, getKillRegState(isKill))
1446             .addImm(0)
1447             .addReg(0);
1448     if (ImplicitOp.getReg() != 0)
1449       MIB.add(ImplicitOp);
1450     NewMI = MIB;
1451 
1452     break;
1453   }
1454   case X86::SHL8ri:
1455     Is8BitOp = true;
1456     LLVM_FALLTHROUGH;
1457   case X86::SHL16ri: {
1458     assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1459     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1460     if (!isTruncatedShiftCountForLEA(ShAmt))
1461       return nullptr;
1462     return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1463   }
1464   case X86::INC64r:
1465   case X86::INC32r: {
1466     assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
1467     unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r :
1468         (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1469     bool isKill;
1470     Register SrcReg;
1471     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1472     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1473                         ImplicitOp, LV))
1474       return nullptr;
1475 
1476     MachineInstrBuilder MIB =
1477         BuildMI(MF, MI.getDebugLoc(), get(Opc))
1478             .add(Dest)
1479             .addReg(SrcReg, getKillRegState(isKill));
1480     if (ImplicitOp.getReg() != 0)
1481       MIB.add(ImplicitOp);
1482 
1483     NewMI = addOffset(MIB, 1);
1484     break;
1485   }
1486   case X86::DEC64r:
1487   case X86::DEC32r: {
1488     assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
1489     unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1490         : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1491 
1492     bool isKill;
1493     Register SrcReg;
1494     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1495     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1496                         ImplicitOp, LV))
1497       return nullptr;
1498 
1499     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1500                                   .add(Dest)
1501                                   .addReg(SrcReg, getKillRegState(isKill));
1502     if (ImplicitOp.getReg() != 0)
1503       MIB.add(ImplicitOp);
1504 
1505     NewMI = addOffset(MIB, -1);
1506 
1507     break;
1508   }
1509   case X86::DEC8r:
1510   case X86::INC8r:
1511     Is8BitOp = true;
1512     LLVM_FALLTHROUGH;
1513   case X86::DEC16r:
1514   case X86::INC16r:
1515     return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1516   case X86::ADD64rr:
1517   case X86::ADD64rr_DB:
1518   case X86::ADD32rr:
1519   case X86::ADD32rr_DB: {
1520     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1521     unsigned Opc;
1522     if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
1523       Opc = X86::LEA64r;
1524     else
1525       Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1526 
1527     bool isKill;
1528     Register SrcReg;
1529     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1530     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1531                         SrcReg, isKill, ImplicitOp, LV))
1532       return nullptr;
1533 
1534     const MachineOperand &Src2 = MI.getOperand(2);
1535     bool isKill2;
1536     Register SrcReg2;
1537     MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
1538     if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
1539                         SrcReg2, isKill2, ImplicitOp2, LV))
1540       return nullptr;
1541 
1542     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
1543     if (ImplicitOp.getReg() != 0)
1544       MIB.add(ImplicitOp);
1545     if (ImplicitOp2.getReg() != 0)
1546       MIB.add(ImplicitOp2);
1547 
1548     NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
1549     if (LV && Src2.isKill())
1550       LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
1551     break;
1552   }
1553   case X86::ADD8rr:
1554   case X86::ADD8rr_DB:
1555     Is8BitOp = true;
1556     LLVM_FALLTHROUGH;
1557   case X86::ADD16rr:
1558   case X86::ADD16rr_DB:
1559     return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1560   case X86::ADD64ri32:
1561   case X86::ADD64ri8:
1562   case X86::ADD64ri32_DB:
1563   case X86::ADD64ri8_DB:
1564     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1565     NewMI = addOffset(
1566         BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
1567         MI.getOperand(2));
1568     break;
1569   case X86::ADD32ri:
1570   case X86::ADD32ri8:
1571   case X86::ADD32ri_DB:
1572   case X86::ADD32ri8_DB: {
1573     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1574     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1575 
1576     bool isKill;
1577     Register SrcReg;
1578     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1579     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1580                         SrcReg, isKill, ImplicitOp, LV))
1581       return nullptr;
1582 
1583     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1584                                   .add(Dest)
1585                                   .addReg(SrcReg, getKillRegState(isKill));
1586     if (ImplicitOp.getReg() != 0)
1587       MIB.add(ImplicitOp);
1588 
1589     NewMI = addOffset(MIB, MI.getOperand(2));
1590     break;
1591   }
1592   case X86::ADD8ri:
1593   case X86::ADD8ri_DB:
1594     Is8BitOp = true;
1595     LLVM_FALLTHROUGH;
1596   case X86::ADD16ri:
1597   case X86::ADD16ri8:
1598   case X86::ADD16ri_DB:
1599   case X86::ADD16ri8_DB:
1600     return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1601   case X86::SUB8ri:
1602   case X86::SUB16ri8:
1603   case X86::SUB16ri:
1604     /// FIXME: Support these similar to ADD8ri/ADD16ri*.
1605     return nullptr;
1606   case X86::SUB32ri8:
1607   case X86::SUB32ri: {
1608     if (!MI.getOperand(2).isImm())
1609       return nullptr;
1610     int64_t Imm = MI.getOperand(2).getImm();
1611     if (!isInt<32>(-Imm))
1612       return nullptr;
1613 
1614     assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1615     unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1616 
1617     bool isKill;
1618     Register SrcReg;
1619     MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1620     if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1621                         SrcReg, isKill, ImplicitOp, LV))
1622       return nullptr;
1623 
1624     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1625                                   .add(Dest)
1626                                   .addReg(SrcReg, getKillRegState(isKill));
1627     if (ImplicitOp.getReg() != 0)
1628       MIB.add(ImplicitOp);
1629 
1630     NewMI = addOffset(MIB, -Imm);
1631     break;
1632   }
1633 
1634   case X86::SUB64ri8:
1635   case X86::SUB64ri32: {
1636     if (!MI.getOperand(2).isImm())
1637       return nullptr;
1638     int64_t Imm = MI.getOperand(2).getImm();
1639     if (!isInt<32>(-Imm))
1640       return nullptr;
1641 
1642     assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
1643 
1644     MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(),
1645                                       get(X86::LEA64r)).add(Dest).add(Src);
1646     NewMI = addOffset(MIB, -Imm);
1647     break;
1648   }
1649 
1650   case X86::VMOVDQU8Z128rmk:
1651   case X86::VMOVDQU8Z256rmk:
1652   case X86::VMOVDQU8Zrmk:
1653   case X86::VMOVDQU16Z128rmk:
1654   case X86::VMOVDQU16Z256rmk:
1655   case X86::VMOVDQU16Zrmk:
1656   case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk:
1657   case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk:
1658   case X86::VMOVDQU32Zrmk:    case X86::VMOVDQA32Zrmk:
1659   case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk:
1660   case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk:
1661   case X86::VMOVDQU64Zrmk:    case X86::VMOVDQA64Zrmk:
1662   case X86::VMOVUPDZ128rmk:   case X86::VMOVAPDZ128rmk:
1663   case X86::VMOVUPDZ256rmk:   case X86::VMOVAPDZ256rmk:
1664   case X86::VMOVUPDZrmk:      case X86::VMOVAPDZrmk:
1665   case X86::VMOVUPSZ128rmk:   case X86::VMOVAPSZ128rmk:
1666   case X86::VMOVUPSZ256rmk:   case X86::VMOVAPSZ256rmk:
1667   case X86::VMOVUPSZrmk:      case X86::VMOVAPSZrmk:
1668   case X86::VBROADCASTSDZ256rmk:
1669   case X86::VBROADCASTSDZrmk:
1670   case X86::VBROADCASTSSZ128rmk:
1671   case X86::VBROADCASTSSZ256rmk:
1672   case X86::VBROADCASTSSZrmk:
1673   case X86::VPBROADCASTDZ128rmk:
1674   case X86::VPBROADCASTDZ256rmk:
1675   case X86::VPBROADCASTDZrmk:
1676   case X86::VPBROADCASTQZ128rmk:
1677   case X86::VPBROADCASTQZ256rmk:
1678   case X86::VPBROADCASTQZrmk: {
1679     unsigned Opc;
1680     switch (MIOpc) {
1681     default: llvm_unreachable("Unreachable!");
1682     case X86::VMOVDQU8Z128rmk:     Opc = X86::VPBLENDMBZ128rmk; break;
1683     case X86::VMOVDQU8Z256rmk:     Opc = X86::VPBLENDMBZ256rmk; break;
1684     case X86::VMOVDQU8Zrmk:        Opc = X86::VPBLENDMBZrmk;    break;
1685     case X86::VMOVDQU16Z128rmk:    Opc = X86::VPBLENDMWZ128rmk; break;
1686     case X86::VMOVDQU16Z256rmk:    Opc = X86::VPBLENDMWZ256rmk; break;
1687     case X86::VMOVDQU16Zrmk:       Opc = X86::VPBLENDMWZrmk;    break;
1688     case X86::VMOVDQU32Z128rmk:    Opc = X86::VPBLENDMDZ128rmk; break;
1689     case X86::VMOVDQU32Z256rmk:    Opc = X86::VPBLENDMDZ256rmk; break;
1690     case X86::VMOVDQU32Zrmk:       Opc = X86::VPBLENDMDZrmk;    break;
1691     case X86::VMOVDQU64Z128rmk:    Opc = X86::VPBLENDMQZ128rmk; break;
1692     case X86::VMOVDQU64Z256rmk:    Opc = X86::VPBLENDMQZ256rmk; break;
1693     case X86::VMOVDQU64Zrmk:       Opc = X86::VPBLENDMQZrmk;    break;
1694     case X86::VMOVUPDZ128rmk:      Opc = X86::VBLENDMPDZ128rmk; break;
1695     case X86::VMOVUPDZ256rmk:      Opc = X86::VBLENDMPDZ256rmk; break;
1696     case X86::VMOVUPDZrmk:         Opc = X86::VBLENDMPDZrmk;    break;
1697     case X86::VMOVUPSZ128rmk:      Opc = X86::VBLENDMPSZ128rmk; break;
1698     case X86::VMOVUPSZ256rmk:      Opc = X86::VBLENDMPSZ256rmk; break;
1699     case X86::VMOVUPSZrmk:         Opc = X86::VBLENDMPSZrmk;    break;
1700     case X86::VMOVDQA32Z128rmk:    Opc = X86::VPBLENDMDZ128rmk; break;
1701     case X86::VMOVDQA32Z256rmk:    Opc = X86::VPBLENDMDZ256rmk; break;
1702     case X86::VMOVDQA32Zrmk:       Opc = X86::VPBLENDMDZrmk;    break;
1703     case X86::VMOVDQA64Z128rmk:    Opc = X86::VPBLENDMQZ128rmk; break;
1704     case X86::VMOVDQA64Z256rmk:    Opc = X86::VPBLENDMQZ256rmk; break;
1705     case X86::VMOVDQA64Zrmk:       Opc = X86::VPBLENDMQZrmk;    break;
1706     case X86::VMOVAPDZ128rmk:      Opc = X86::VBLENDMPDZ128rmk; break;
1707     case X86::VMOVAPDZ256rmk:      Opc = X86::VBLENDMPDZ256rmk; break;
1708     case X86::VMOVAPDZrmk:         Opc = X86::VBLENDMPDZrmk;    break;
1709     case X86::VMOVAPSZ128rmk:      Opc = X86::VBLENDMPSZ128rmk; break;
1710     case X86::VMOVAPSZ256rmk:      Opc = X86::VBLENDMPSZ256rmk; break;
1711     case X86::VMOVAPSZrmk:         Opc = X86::VBLENDMPSZrmk;    break;
1712     case X86::VBROADCASTSDZ256rmk: Opc = X86::VBLENDMPDZ256rmbk; break;
1713     case X86::VBROADCASTSDZrmk:    Opc = X86::VBLENDMPDZrmbk;    break;
1714     case X86::VBROADCASTSSZ128rmk: Opc = X86::VBLENDMPSZ128rmbk; break;
1715     case X86::VBROADCASTSSZ256rmk: Opc = X86::VBLENDMPSZ256rmbk; break;
1716     case X86::VBROADCASTSSZrmk:    Opc = X86::VBLENDMPSZrmbk;    break;
1717     case X86::VPBROADCASTDZ128rmk: Opc = X86::VPBLENDMDZ128rmbk; break;
1718     case X86::VPBROADCASTDZ256rmk: Opc = X86::VPBLENDMDZ256rmbk; break;
1719     case X86::VPBROADCASTDZrmk:    Opc = X86::VPBLENDMDZrmbk;    break;
1720     case X86::VPBROADCASTQZ128rmk: Opc = X86::VPBLENDMQZ128rmbk; break;
1721     case X86::VPBROADCASTQZ256rmk: Opc = X86::VPBLENDMQZ256rmbk; break;
1722     case X86::VPBROADCASTQZrmk:    Opc = X86::VPBLENDMQZrmbk;    break;
1723     }
1724 
1725     NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1726               .add(Dest)
1727               .add(MI.getOperand(2))
1728               .add(Src)
1729               .add(MI.getOperand(3))
1730               .add(MI.getOperand(4))
1731               .add(MI.getOperand(5))
1732               .add(MI.getOperand(6))
1733               .add(MI.getOperand(7));
1734     break;
1735   }
1736 
1737   case X86::VMOVDQU8Z128rrk:
1738   case X86::VMOVDQU8Z256rrk:
1739   case X86::VMOVDQU8Zrrk:
1740   case X86::VMOVDQU16Z128rrk:
1741   case X86::VMOVDQU16Z256rrk:
1742   case X86::VMOVDQU16Zrrk:
1743   case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk:
1744   case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk:
1745   case X86::VMOVDQU32Zrrk:    case X86::VMOVDQA32Zrrk:
1746   case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk:
1747   case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk:
1748   case X86::VMOVDQU64Zrrk:    case X86::VMOVDQA64Zrrk:
1749   case X86::VMOVUPDZ128rrk:   case X86::VMOVAPDZ128rrk:
1750   case X86::VMOVUPDZ256rrk:   case X86::VMOVAPDZ256rrk:
1751   case X86::VMOVUPDZrrk:      case X86::VMOVAPDZrrk:
1752   case X86::VMOVUPSZ128rrk:   case X86::VMOVAPSZ128rrk:
1753   case X86::VMOVUPSZ256rrk:   case X86::VMOVAPSZ256rrk:
1754   case X86::VMOVUPSZrrk:      case X86::VMOVAPSZrrk: {
1755     unsigned Opc;
1756     switch (MIOpc) {
1757     default: llvm_unreachable("Unreachable!");
1758     case X86::VMOVDQU8Z128rrk:  Opc = X86::VPBLENDMBZ128rrk; break;
1759     case X86::VMOVDQU8Z256rrk:  Opc = X86::VPBLENDMBZ256rrk; break;
1760     case X86::VMOVDQU8Zrrk:     Opc = X86::VPBLENDMBZrrk;    break;
1761     case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break;
1762     case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break;
1763     case X86::VMOVDQU16Zrrk:    Opc = X86::VPBLENDMWZrrk;    break;
1764     case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1765     case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1766     case X86::VMOVDQU32Zrrk:    Opc = X86::VPBLENDMDZrrk;    break;
1767     case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1768     case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1769     case X86::VMOVDQU64Zrrk:    Opc = X86::VPBLENDMQZrrk;    break;
1770     case X86::VMOVUPDZ128rrk:   Opc = X86::VBLENDMPDZ128rrk; break;
1771     case X86::VMOVUPDZ256rrk:   Opc = X86::VBLENDMPDZ256rrk; break;
1772     case X86::VMOVUPDZrrk:      Opc = X86::VBLENDMPDZrrk;    break;
1773     case X86::VMOVUPSZ128rrk:   Opc = X86::VBLENDMPSZ128rrk; break;
1774     case X86::VMOVUPSZ256rrk:   Opc = X86::VBLENDMPSZ256rrk; break;
1775     case X86::VMOVUPSZrrk:      Opc = X86::VBLENDMPSZrrk;    break;
1776     case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1777     case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1778     case X86::VMOVDQA32Zrrk:    Opc = X86::VPBLENDMDZrrk;    break;
1779     case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1780     case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1781     case X86::VMOVDQA64Zrrk:    Opc = X86::VPBLENDMQZrrk;    break;
1782     case X86::VMOVAPDZ128rrk:   Opc = X86::VBLENDMPDZ128rrk; break;
1783     case X86::VMOVAPDZ256rrk:   Opc = X86::VBLENDMPDZ256rrk; break;
1784     case X86::VMOVAPDZrrk:      Opc = X86::VBLENDMPDZrrk;    break;
1785     case X86::VMOVAPSZ128rrk:   Opc = X86::VBLENDMPSZ128rrk; break;
1786     case X86::VMOVAPSZ256rrk:   Opc = X86::VBLENDMPSZ256rrk; break;
1787     case X86::VMOVAPSZrrk:      Opc = X86::VBLENDMPSZrrk;    break;
1788     }
1789 
1790     NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1791               .add(Dest)
1792               .add(MI.getOperand(2))
1793               .add(Src)
1794               .add(MI.getOperand(3));
1795     break;
1796   }
1797   }
1798 
1799   if (!NewMI) return nullptr;
1800 
1801   if (LV) {  // Update live variables
1802     if (Src.isKill())
1803       LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
1804     if (Dest.isDead())
1805       LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
1806   }
1807 
1808   MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
1809   return NewMI;
1810 }
1811 
1812 /// This determines which of three possible cases of a three source commute
1813 /// the source indexes correspond to taking into account any mask operands.
1814 /// All prevents commuting a passthru operand. Returns -1 if the commute isn't
1815 /// possible.
1816 /// Case 0 - Possible to commute the first and second operands.
1817 /// Case 1 - Possible to commute the first and third operands.
1818 /// Case 2 - Possible to commute the second and third operands.
1819 static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
1820                                        unsigned SrcOpIdx2) {
1821   // Put the lowest index to SrcOpIdx1 to simplify the checks below.
1822   if (SrcOpIdx1 > SrcOpIdx2)
1823     std::swap(SrcOpIdx1, SrcOpIdx2);
1824 
1825   unsigned Op1 = 1, Op2 = 2, Op3 = 3;
1826   if (X86II::isKMasked(TSFlags)) {
1827     Op2++;
1828     Op3++;
1829   }
1830 
1831   if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
1832     return 0;
1833   if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
1834     return 1;
1835   if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
1836     return 2;
1837   llvm_unreachable("Unknown three src commute case.");
1838 }
1839 
1840 unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
1841     const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
1842     const X86InstrFMA3Group &FMA3Group) const {
1843 
1844   unsigned Opc = MI.getOpcode();
1845 
1846   // TODO: Commuting the 1st operand of FMA*_Int requires some additional
1847   // analysis. The commute optimization is legal only if all users of FMA*_Int
1848   // use only the lowest element of the FMA*_Int instruction. Such analysis are
1849   // not implemented yet. So, just return 0 in that case.
1850   // When such analysis are available this place will be the right place for
1851   // calling it.
1852   assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
1853          "Intrinsic instructions can't commute operand 1");
1854 
1855   // Determine which case this commute is or if it can't be done.
1856   unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1857                                          SrcOpIdx2);
1858   assert(Case < 3 && "Unexpected case number!");
1859 
1860   // Define the FMA forms mapping array that helps to map input FMA form
1861   // to output FMA form to preserve the operation semantics after
1862   // commuting the operands.
1863   const unsigned Form132Index = 0;
1864   const unsigned Form213Index = 1;
1865   const unsigned Form231Index = 2;
1866   static const unsigned FormMapping[][3] = {
1867     // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
1868     // FMA132 A, C, b; ==> FMA231 C, A, b;
1869     // FMA213 B, A, c; ==> FMA213 A, B, c;
1870     // FMA231 C, A, b; ==> FMA132 A, C, b;
1871     { Form231Index, Form213Index, Form132Index },
1872     // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
1873     // FMA132 A, c, B; ==> FMA132 B, c, A;
1874     // FMA213 B, a, C; ==> FMA231 C, a, B;
1875     // FMA231 C, a, B; ==> FMA213 B, a, C;
1876     { Form132Index, Form231Index, Form213Index },
1877     // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
1878     // FMA132 a, C, B; ==> FMA213 a, B, C;
1879     // FMA213 b, A, C; ==> FMA132 b, C, A;
1880     // FMA231 c, A, B; ==> FMA231 c, B, A;
1881     { Form213Index, Form132Index, Form231Index }
1882   };
1883 
1884   unsigned FMAForms[3];
1885   FMAForms[0] = FMA3Group.get132Opcode();
1886   FMAForms[1] = FMA3Group.get213Opcode();
1887   FMAForms[2] = FMA3Group.get231Opcode();
1888   unsigned FormIndex;
1889   for (FormIndex = 0; FormIndex < 3; FormIndex++)
1890     if (Opc == FMAForms[FormIndex])
1891       break;
1892 
1893   // Everything is ready, just adjust the FMA opcode and return it.
1894   FormIndex = FormMapping[Case][FormIndex];
1895   return FMAForms[FormIndex];
1896 }
1897 
1898 static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
1899                              unsigned SrcOpIdx2) {
1900   // Determine which case this commute is or if it can't be done.
1901   unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1902                                          SrcOpIdx2);
1903   assert(Case < 3 && "Unexpected case value!");
1904 
1905   // For each case we need to swap two pairs of bits in the final immediate.
1906   static const uint8_t SwapMasks[3][4] = {
1907     { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5.
1908     { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6.
1909     { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6.
1910   };
1911 
1912   uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm();
1913   // Clear out the bits we are swapping.
1914   uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
1915                            SwapMasks[Case][2] | SwapMasks[Case][3]);
1916   // If the immediate had a bit of the pair set, then set the opposite bit.
1917   if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1];
1918   if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0];
1919   if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3];
1920   if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2];
1921   MI.getOperand(MI.getNumOperands()-1).setImm(NewImm);
1922 }
1923 
1924 // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
1925 // commuted.
1926 static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
1927 #define VPERM_CASES(Suffix) \
1928   case X86::VPERMI2##Suffix##128rr:    case X86::VPERMT2##Suffix##128rr:    \
1929   case X86::VPERMI2##Suffix##256rr:    case X86::VPERMT2##Suffix##256rr:    \
1930   case X86::VPERMI2##Suffix##rr:       case X86::VPERMT2##Suffix##rr:       \
1931   case X86::VPERMI2##Suffix##128rm:    case X86::VPERMT2##Suffix##128rm:    \
1932   case X86::VPERMI2##Suffix##256rm:    case X86::VPERMT2##Suffix##256rm:    \
1933   case X86::VPERMI2##Suffix##rm:       case X86::VPERMT2##Suffix##rm:       \
1934   case X86::VPERMI2##Suffix##128rrkz:  case X86::VPERMT2##Suffix##128rrkz:  \
1935   case X86::VPERMI2##Suffix##256rrkz:  case X86::VPERMT2##Suffix##256rrkz:  \
1936   case X86::VPERMI2##Suffix##rrkz:     case X86::VPERMT2##Suffix##rrkz:     \
1937   case X86::VPERMI2##Suffix##128rmkz:  case X86::VPERMT2##Suffix##128rmkz:  \
1938   case X86::VPERMI2##Suffix##256rmkz:  case X86::VPERMT2##Suffix##256rmkz:  \
1939   case X86::VPERMI2##Suffix##rmkz:     case X86::VPERMT2##Suffix##rmkz:
1940 
1941 #define VPERM_CASES_BROADCAST(Suffix) \
1942   VPERM_CASES(Suffix) \
1943   case X86::VPERMI2##Suffix##128rmb:   case X86::VPERMT2##Suffix##128rmb:   \
1944   case X86::VPERMI2##Suffix##256rmb:   case X86::VPERMT2##Suffix##256rmb:   \
1945   case X86::VPERMI2##Suffix##rmb:      case X86::VPERMT2##Suffix##rmb:      \
1946   case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \
1947   case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \
1948   case X86::VPERMI2##Suffix##rmbkz:    case X86::VPERMT2##Suffix##rmbkz:
1949 
1950   switch (Opcode) {
1951   default: return false;
1952   VPERM_CASES(B)
1953   VPERM_CASES_BROADCAST(D)
1954   VPERM_CASES_BROADCAST(PD)
1955   VPERM_CASES_BROADCAST(PS)
1956   VPERM_CASES_BROADCAST(Q)
1957   VPERM_CASES(W)
1958     return true;
1959   }
1960 #undef VPERM_CASES_BROADCAST
1961 #undef VPERM_CASES
1962 }
1963 
1964 // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
1965 // from the I opcode to the T opcode and vice versa.
1966 static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
1967 #define VPERM_CASES(Orig, New) \
1968   case X86::Orig##128rr:    return X86::New##128rr;   \
1969   case X86::Orig##128rrkz:  return X86::New##128rrkz; \
1970   case X86::Orig##128rm:    return X86::New##128rm;   \
1971   case X86::Orig##128rmkz:  return X86::New##128rmkz; \
1972   case X86::Orig##256rr:    return X86::New##256rr;   \
1973   case X86::Orig##256rrkz:  return X86::New##256rrkz; \
1974   case X86::Orig##256rm:    return X86::New##256rm;   \
1975   case X86::Orig##256rmkz:  return X86::New##256rmkz; \
1976   case X86::Orig##rr:       return X86::New##rr;      \
1977   case X86::Orig##rrkz:     return X86::New##rrkz;    \
1978   case X86::Orig##rm:       return X86::New##rm;      \
1979   case X86::Orig##rmkz:     return X86::New##rmkz;
1980 
1981 #define VPERM_CASES_BROADCAST(Orig, New) \
1982   VPERM_CASES(Orig, New) \
1983   case X86::Orig##128rmb:   return X86::New##128rmb;   \
1984   case X86::Orig##128rmbkz: return X86::New##128rmbkz; \
1985   case X86::Orig##256rmb:   return X86::New##256rmb;   \
1986   case X86::Orig##256rmbkz: return X86::New##256rmbkz; \
1987   case X86::Orig##rmb:      return X86::New##rmb;      \
1988   case X86::Orig##rmbkz:    return X86::New##rmbkz;
1989 
1990   switch (Opcode) {
1991   VPERM_CASES(VPERMI2B, VPERMT2B)
1992   VPERM_CASES_BROADCAST(VPERMI2D,  VPERMT2D)
1993   VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
1994   VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
1995   VPERM_CASES_BROADCAST(VPERMI2Q,  VPERMT2Q)
1996   VPERM_CASES(VPERMI2W, VPERMT2W)
1997   VPERM_CASES(VPERMT2B, VPERMI2B)
1998   VPERM_CASES_BROADCAST(VPERMT2D,  VPERMI2D)
1999   VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
2000   VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
2001   VPERM_CASES_BROADCAST(VPERMT2Q,  VPERMI2Q)
2002   VPERM_CASES(VPERMT2W, VPERMI2W)
2003   }
2004 
2005   llvm_unreachable("Unreachable!");
2006 #undef VPERM_CASES_BROADCAST
2007 #undef VPERM_CASES
2008 }
2009 
2010 MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2011                                                    unsigned OpIdx1,
2012                                                    unsigned OpIdx2) const {
2013   auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
2014     if (NewMI)
2015       return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
2016     return MI;
2017   };
2018 
2019   switch (MI.getOpcode()) {
2020   case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
2021   case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
2022   case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
2023   case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
2024   case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
2025   case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
2026     unsigned Opc;
2027     unsigned Size;
2028     switch (MI.getOpcode()) {
2029     default: llvm_unreachable("Unreachable!");
2030     case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
2031     case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
2032     case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
2033     case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
2034     case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
2035     case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
2036     }
2037     unsigned Amt = MI.getOperand(3).getImm();
2038     auto &WorkingMI = cloneIfNew(MI);
2039     WorkingMI.setDesc(get(Opc));
2040     WorkingMI.getOperand(3).setImm(Size - Amt);
2041     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2042                                                    OpIdx1, OpIdx2);
2043   }
2044   case X86::PFSUBrr:
2045   case X86::PFSUBRrr: {
2046     // PFSUB  x, y: x = x - y
2047     // PFSUBR x, y: x = y - x
2048     unsigned Opc =
2049         (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr);
2050     auto &WorkingMI = cloneIfNew(MI);
2051     WorkingMI.setDesc(get(Opc));
2052     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2053                                                    OpIdx1, OpIdx2);
2054   }
2055   case X86::BLENDPDrri:
2056   case X86::BLENDPSrri:
2057   case X86::VBLENDPDrri:
2058   case X86::VBLENDPSrri:
2059     // If we're optimizing for size, try to use MOVSD/MOVSS.
2060     if (MI.getParent()->getParent()->getFunction().hasOptSize()) {
2061       unsigned Mask, Opc;
2062       switch (MI.getOpcode()) {
2063       default: llvm_unreachable("Unreachable!");
2064       case X86::BLENDPDrri:  Opc = X86::MOVSDrr;  Mask = 0x03; break;
2065       case X86::BLENDPSrri:  Opc = X86::MOVSSrr;  Mask = 0x0F; break;
2066       case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break;
2067       case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break;
2068       }
2069       if ((MI.getOperand(3).getImm() ^ Mask) == 1) {
2070         auto &WorkingMI = cloneIfNew(MI);
2071         WorkingMI.setDesc(get(Opc));
2072         WorkingMI.RemoveOperand(3);
2073         return TargetInstrInfo::commuteInstructionImpl(WorkingMI,
2074                                                        /*NewMI=*/false,
2075                                                        OpIdx1, OpIdx2);
2076       }
2077     }
2078     LLVM_FALLTHROUGH;
2079   case X86::PBLENDWrri:
2080   case X86::VBLENDPDYrri:
2081   case X86::VBLENDPSYrri:
2082   case X86::VPBLENDDrri:
2083   case X86::VPBLENDWrri:
2084   case X86::VPBLENDDYrri:
2085   case X86::VPBLENDWYrri:{
2086     int8_t Mask;
2087     switch (MI.getOpcode()) {
2088     default: llvm_unreachable("Unreachable!");
2089     case X86::BLENDPDrri:    Mask = (int8_t)0x03; break;
2090     case X86::BLENDPSrri:    Mask = (int8_t)0x0F; break;
2091     case X86::PBLENDWrri:    Mask = (int8_t)0xFF; break;
2092     case X86::VBLENDPDrri:   Mask = (int8_t)0x03; break;
2093     case X86::VBLENDPSrri:   Mask = (int8_t)0x0F; break;
2094     case X86::VBLENDPDYrri:  Mask = (int8_t)0x0F; break;
2095     case X86::VBLENDPSYrri:  Mask = (int8_t)0xFF; break;
2096     case X86::VPBLENDDrri:   Mask = (int8_t)0x0F; break;
2097     case X86::VPBLENDWrri:   Mask = (int8_t)0xFF; break;
2098     case X86::VPBLENDDYrri:  Mask = (int8_t)0xFF; break;
2099     case X86::VPBLENDWYrri:  Mask = (int8_t)0xFF; break;
2100     }
2101     // Only the least significant bits of Imm are used.
2102     // Using int8_t to ensure it will be sign extended to the int64_t that
2103     // setImm takes in order to match isel behavior.
2104     int8_t Imm = MI.getOperand(3).getImm() & Mask;
2105     auto &WorkingMI = cloneIfNew(MI);
2106     WorkingMI.getOperand(3).setImm(Mask ^ Imm);
2107     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2108                                                    OpIdx1, OpIdx2);
2109   }
2110   case X86::INSERTPSrr:
2111   case X86::VINSERTPSrr:
2112   case X86::VINSERTPSZrr: {
2113     unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2114     unsigned ZMask = Imm & 15;
2115     unsigned DstIdx = (Imm >> 4) & 3;
2116     unsigned SrcIdx = (Imm >> 6) & 3;
2117 
2118     // We can commute insertps if we zero 2 of the elements, the insertion is
2119     // "inline" and we don't override the insertion with a zero.
2120     if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2121         countPopulation(ZMask) == 2) {
2122       unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
2123       assert(AltIdx < 4 && "Illegal insertion index");
2124       unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2125       auto &WorkingMI = cloneIfNew(MI);
2126       WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm);
2127       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2128                                                      OpIdx1, OpIdx2);
2129     }
2130     return nullptr;
2131   }
2132   case X86::MOVSDrr:
2133   case X86::MOVSSrr:
2134   case X86::VMOVSDrr:
2135   case X86::VMOVSSrr:{
2136     // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
2137     if (Subtarget.hasSSE41()) {
2138       unsigned Mask, Opc;
2139       switch (MI.getOpcode()) {
2140       default: llvm_unreachable("Unreachable!");
2141       case X86::MOVSDrr:  Opc = X86::BLENDPDrri;  Mask = 0x02; break;
2142       case X86::MOVSSrr:  Opc = X86::BLENDPSrri;  Mask = 0x0E; break;
2143       case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break;
2144       case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break;
2145       }
2146 
2147       auto &WorkingMI = cloneIfNew(MI);
2148       WorkingMI.setDesc(get(Opc));
2149       WorkingMI.addOperand(MachineOperand::CreateImm(Mask));
2150       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2151                                                      OpIdx1, OpIdx2);
2152     }
2153 
2154     // Convert to SHUFPD.
2155     assert(MI.getOpcode() == X86::MOVSDrr &&
2156            "Can only commute MOVSDrr without SSE4.1");
2157 
2158     auto &WorkingMI = cloneIfNew(MI);
2159     WorkingMI.setDesc(get(X86::SHUFPDrri));
2160     WorkingMI.addOperand(MachineOperand::CreateImm(0x02));
2161     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2162                                                    OpIdx1, OpIdx2);
2163   }
2164   case X86::SHUFPDrri: {
2165     // Commute to MOVSD.
2166     assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
2167     auto &WorkingMI = cloneIfNew(MI);
2168     WorkingMI.setDesc(get(X86::MOVSDrr));
2169     WorkingMI.RemoveOperand(3);
2170     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2171                                                    OpIdx1, OpIdx2);
2172   }
2173   case X86::PCLMULQDQrr:
2174   case X86::VPCLMULQDQrr:
2175   case X86::VPCLMULQDQYrr:
2176   case X86::VPCLMULQDQZrr:
2177   case X86::VPCLMULQDQZ128rr:
2178   case X86::VPCLMULQDQZ256rr: {
2179     // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
2180     // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
2181     unsigned Imm = MI.getOperand(3).getImm();
2182     unsigned Src1Hi = Imm & 0x01;
2183     unsigned Src2Hi = Imm & 0x10;
2184     auto &WorkingMI = cloneIfNew(MI);
2185     WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
2186     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2187                                                    OpIdx1, OpIdx2);
2188   }
2189   case X86::VPCMPBZ128rri:  case X86::VPCMPUBZ128rri:
2190   case X86::VPCMPBZ256rri:  case X86::VPCMPUBZ256rri:
2191   case X86::VPCMPBZrri:     case X86::VPCMPUBZrri:
2192   case X86::VPCMPDZ128rri:  case X86::VPCMPUDZ128rri:
2193   case X86::VPCMPDZ256rri:  case X86::VPCMPUDZ256rri:
2194   case X86::VPCMPDZrri:     case X86::VPCMPUDZrri:
2195   case X86::VPCMPQZ128rri:  case X86::VPCMPUQZ128rri:
2196   case X86::VPCMPQZ256rri:  case X86::VPCMPUQZ256rri:
2197   case X86::VPCMPQZrri:     case X86::VPCMPUQZrri:
2198   case X86::VPCMPWZ128rri:  case X86::VPCMPUWZ128rri:
2199   case X86::VPCMPWZ256rri:  case X86::VPCMPUWZ256rri:
2200   case X86::VPCMPWZrri:     case X86::VPCMPUWZrri:
2201   case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik:
2202   case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik:
2203   case X86::VPCMPBZrrik:    case X86::VPCMPUBZrrik:
2204   case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik:
2205   case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik:
2206   case X86::VPCMPDZrrik:    case X86::VPCMPUDZrrik:
2207   case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik:
2208   case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik:
2209   case X86::VPCMPQZrrik:    case X86::VPCMPUQZrrik:
2210   case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik:
2211   case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik:
2212   case X86::VPCMPWZrrik:    case X86::VPCMPUWZrrik: {
2213     // Flip comparison mode immediate (if necessary).
2214     unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7;
2215     Imm = X86::getSwappedVPCMPImm(Imm);
2216     auto &WorkingMI = cloneIfNew(MI);
2217     WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm);
2218     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2219                                                    OpIdx1, OpIdx2);
2220   }
2221   case X86::VPCOMBri: case X86::VPCOMUBri:
2222   case X86::VPCOMDri: case X86::VPCOMUDri:
2223   case X86::VPCOMQri: case X86::VPCOMUQri:
2224   case X86::VPCOMWri: case X86::VPCOMUWri: {
2225     // Flip comparison mode immediate (if necessary).
2226     unsigned Imm = MI.getOperand(3).getImm() & 0x7;
2227     Imm = X86::getSwappedVPCOMImm(Imm);
2228     auto &WorkingMI = cloneIfNew(MI);
2229     WorkingMI.getOperand(3).setImm(Imm);
2230     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2231                                                    OpIdx1, OpIdx2);
2232   }
2233   case X86::VCMPSDZrr:
2234   case X86::VCMPSSZrr:
2235   case X86::VCMPPDZrri:
2236   case X86::VCMPPSZrri:
2237   case X86::VCMPPDZ128rri:
2238   case X86::VCMPPSZ128rri:
2239   case X86::VCMPPDZ256rri:
2240   case X86::VCMPPSZ256rri:
2241   case X86::VCMPPDZrrik:
2242   case X86::VCMPPSZrrik:
2243   case X86::VCMPPDZ128rrik:
2244   case X86::VCMPPSZ128rrik:
2245   case X86::VCMPPDZ256rrik:
2246   case X86::VCMPPSZ256rrik: {
2247     unsigned Imm =
2248                 MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f;
2249     Imm = X86::getSwappedVCMPImm(Imm);
2250     auto &WorkingMI = cloneIfNew(MI);
2251     WorkingMI.getOperand(MI.getNumExplicitOperands() - 1).setImm(Imm);
2252     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2253                                                    OpIdx1, OpIdx2);
2254   }
2255   case X86::VPERM2F128rr:
2256   case X86::VPERM2I128rr: {
2257     // Flip permute source immediate.
2258     // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
2259     // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
2260     int8_t Imm = MI.getOperand(3).getImm() & 0xFF;
2261     auto &WorkingMI = cloneIfNew(MI);
2262     WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
2263     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2264                                                    OpIdx1, OpIdx2);
2265   }
2266   case X86::MOVHLPSrr:
2267   case X86::UNPCKHPDrr:
2268   case X86::VMOVHLPSrr:
2269   case X86::VUNPCKHPDrr:
2270   case X86::VMOVHLPSZrr:
2271   case X86::VUNPCKHPDZ128rr: {
2272     assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
2273 
2274     unsigned Opc = MI.getOpcode();
2275     switch (Opc) {
2276     default: llvm_unreachable("Unreachable!");
2277     case X86::MOVHLPSrr:       Opc = X86::UNPCKHPDrr;      break;
2278     case X86::UNPCKHPDrr:      Opc = X86::MOVHLPSrr;       break;
2279     case X86::VMOVHLPSrr:      Opc = X86::VUNPCKHPDrr;     break;
2280     case X86::VUNPCKHPDrr:     Opc = X86::VMOVHLPSrr;      break;
2281     case X86::VMOVHLPSZrr:     Opc = X86::VUNPCKHPDZ128rr; break;
2282     case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr;     break;
2283     }
2284     auto &WorkingMI = cloneIfNew(MI);
2285     WorkingMI.setDesc(get(Opc));
2286     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2287                                                    OpIdx1, OpIdx2);
2288   }
2289   case X86::CMOV16rr:  case X86::CMOV32rr:  case X86::CMOV64rr: {
2290     auto &WorkingMI = cloneIfNew(MI);
2291     unsigned OpNo = MI.getDesc().getNumOperands() - 1;
2292     X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
2293     WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
2294     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2295                                                    OpIdx1, OpIdx2);
2296   }
2297   case X86::VPTERNLOGDZrri:      case X86::VPTERNLOGDZrmi:
2298   case X86::VPTERNLOGDZ128rri:   case X86::VPTERNLOGDZ128rmi:
2299   case X86::VPTERNLOGDZ256rri:   case X86::VPTERNLOGDZ256rmi:
2300   case X86::VPTERNLOGQZrri:      case X86::VPTERNLOGQZrmi:
2301   case X86::VPTERNLOGQZ128rri:   case X86::VPTERNLOGQZ128rmi:
2302   case X86::VPTERNLOGQZ256rri:   case X86::VPTERNLOGQZ256rmi:
2303   case X86::VPTERNLOGDZrrik:
2304   case X86::VPTERNLOGDZ128rrik:
2305   case X86::VPTERNLOGDZ256rrik:
2306   case X86::VPTERNLOGQZrrik:
2307   case X86::VPTERNLOGQZ128rrik:
2308   case X86::VPTERNLOGQZ256rrik:
2309   case X86::VPTERNLOGDZrrikz:    case X86::VPTERNLOGDZrmikz:
2310   case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2311   case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2312   case X86::VPTERNLOGQZrrikz:    case X86::VPTERNLOGQZrmikz:
2313   case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2314   case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2315   case X86::VPTERNLOGDZ128rmbi:
2316   case X86::VPTERNLOGDZ256rmbi:
2317   case X86::VPTERNLOGDZrmbi:
2318   case X86::VPTERNLOGQZ128rmbi:
2319   case X86::VPTERNLOGQZ256rmbi:
2320   case X86::VPTERNLOGQZrmbi:
2321   case X86::VPTERNLOGDZ128rmbikz:
2322   case X86::VPTERNLOGDZ256rmbikz:
2323   case X86::VPTERNLOGDZrmbikz:
2324   case X86::VPTERNLOGQZ128rmbikz:
2325   case X86::VPTERNLOGQZ256rmbikz:
2326   case X86::VPTERNLOGQZrmbikz: {
2327     auto &WorkingMI = cloneIfNew(MI);
2328     commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2);
2329     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2330                                                    OpIdx1, OpIdx2);
2331   }
2332   default: {
2333     if (isCommutableVPERMV3Instruction(MI.getOpcode())) {
2334       unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode());
2335       auto &WorkingMI = cloneIfNew(MI);
2336       WorkingMI.setDesc(get(Opc));
2337       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2338                                                      OpIdx1, OpIdx2);
2339     }
2340 
2341     const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2342                                                       MI.getDesc().TSFlags);
2343     if (FMA3Group) {
2344       unsigned Opc =
2345         getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group);
2346       auto &WorkingMI = cloneIfNew(MI);
2347       WorkingMI.setDesc(get(Opc));
2348       return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2349                                                      OpIdx1, OpIdx2);
2350     }
2351 
2352     return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2353   }
2354   }
2355 }
2356 
2357 bool
2358 X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
2359                                             unsigned &SrcOpIdx1,
2360                                             unsigned &SrcOpIdx2,
2361                                             bool IsIntrinsic) const {
2362   uint64_t TSFlags = MI.getDesc().TSFlags;
2363 
2364   unsigned FirstCommutableVecOp = 1;
2365   unsigned LastCommutableVecOp = 3;
2366   unsigned KMaskOp = -1U;
2367   if (X86II::isKMasked(TSFlags)) {
2368     // For k-zero-masked operations it is Ok to commute the first vector
2369     // operand. Unless this is an intrinsic instruction.
2370     // For regular k-masked operations a conservative choice is done as the
2371     // elements of the first vector operand, for which the corresponding bit
2372     // in the k-mask operand is set to 0, are copied to the result of the
2373     // instruction.
2374     // TODO/FIXME: The commute still may be legal if it is known that the
2375     // k-mask operand is set to either all ones or all zeroes.
2376     // It is also Ok to commute the 1st operand if all users of MI use only
2377     // the elements enabled by the k-mask operand. For example,
2378     //   v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
2379     //                                                     : v1[i];
2380     //   VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
2381     //                                  // Ok, to commute v1 in FMADD213PSZrk.
2382 
2383     // The k-mask operand has index = 2 for masked and zero-masked operations.
2384     KMaskOp = 2;
2385 
2386     // The operand with index = 1 is used as a source for those elements for
2387     // which the corresponding bit in the k-mask is set to 0.
2388     if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic)
2389       FirstCommutableVecOp = 3;
2390 
2391     LastCommutableVecOp++;
2392   } else if (IsIntrinsic) {
2393     // Commuting the first operand of an intrinsic instruction isn't possible
2394     // unless we can prove that only the lowest element of the result is used.
2395     FirstCommutableVecOp = 2;
2396   }
2397 
2398   if (isMem(MI, LastCommutableVecOp))
2399     LastCommutableVecOp--;
2400 
2401   // Only the first RegOpsNum operands are commutable.
2402   // Also, the value 'CommuteAnyOperandIndex' is valid here as it means
2403   // that the operand is not specified/fixed.
2404   if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2405       (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2406        SrcOpIdx1 == KMaskOp))
2407     return false;
2408   if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2409       (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2410        SrcOpIdx2 == KMaskOp))
2411     return false;
2412 
2413   // Look for two different register operands assumed to be commutable
2414   // regardless of the FMA opcode. The FMA opcode is adjusted later.
2415   if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2416       SrcOpIdx2 == CommuteAnyOperandIndex) {
2417     unsigned CommutableOpIdx2 = SrcOpIdx2;
2418 
2419     // At least one of operands to be commuted is not specified and
2420     // this method is free to choose appropriate commutable operands.
2421     if (SrcOpIdx1 == SrcOpIdx2)
2422       // Both of operands are not fixed. By default set one of commutable
2423       // operands to the last register operand of the instruction.
2424       CommutableOpIdx2 = LastCommutableVecOp;
2425     else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2426       // Only one of operands is not fixed.
2427       CommutableOpIdx2 = SrcOpIdx1;
2428 
2429     // CommutableOpIdx2 is well defined now. Let's choose another commutable
2430     // operand and assign its index to CommutableOpIdx1.
2431     Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
2432 
2433     unsigned CommutableOpIdx1;
2434     for (CommutableOpIdx1 = LastCommutableVecOp;
2435          CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2436       // Just ignore and skip the k-mask operand.
2437       if (CommutableOpIdx1 == KMaskOp)
2438         continue;
2439 
2440       // The commuted operands must have different registers.
2441       // Otherwise, the commute transformation does not change anything and
2442       // is useless then.
2443       if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
2444         break;
2445     }
2446 
2447     // No appropriate commutable operands were found.
2448     if (CommutableOpIdx1 < FirstCommutableVecOp)
2449       return false;
2450 
2451     // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
2452     // to return those values.
2453     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2454                               CommutableOpIdx1, CommutableOpIdx2))
2455       return false;
2456   }
2457 
2458   return true;
2459 }
2460 
2461 bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2462                                          unsigned &SrcOpIdx1,
2463                                          unsigned &SrcOpIdx2) const {
2464   const MCInstrDesc &Desc = MI.getDesc();
2465   if (!Desc.isCommutable())
2466     return false;
2467 
2468   switch (MI.getOpcode()) {
2469   case X86::CMPSDrr:
2470   case X86::CMPSSrr:
2471   case X86::CMPPDrri:
2472   case X86::CMPPSrri:
2473   case X86::VCMPSDrr:
2474   case X86::VCMPSSrr:
2475   case X86::VCMPPDrri:
2476   case X86::VCMPPSrri:
2477   case X86::VCMPPDYrri:
2478   case X86::VCMPPSYrri:
2479   case X86::VCMPSDZrr:
2480   case X86::VCMPSSZrr:
2481   case X86::VCMPPDZrri:
2482   case X86::VCMPPSZrri:
2483   case X86::VCMPPDZ128rri:
2484   case X86::VCMPPSZ128rri:
2485   case X86::VCMPPDZ256rri:
2486   case X86::VCMPPSZ256rri:
2487   case X86::VCMPPDZrrik:
2488   case X86::VCMPPSZrrik:
2489   case X86::VCMPPDZ128rrik:
2490   case X86::VCMPPSZ128rrik:
2491   case X86::VCMPPDZ256rrik:
2492   case X86::VCMPPSZ256rrik: {
2493     unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
2494 
2495     // Float comparison can be safely commuted for
2496     // Ordered/Unordered/Equal/NotEqual tests
2497     unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
2498     switch (Imm) {
2499     default:
2500       // EVEX versions can be commuted.
2501       if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX)
2502         break;
2503       return false;
2504     case 0x00: // EQUAL
2505     case 0x03: // UNORDERED
2506     case 0x04: // NOT EQUAL
2507     case 0x07: // ORDERED
2508       break;
2509     }
2510 
2511     // The indices of the commutable operands are 1 and 2 (or 2 and 3
2512     // when masked).
2513     // Assign them to the returned operand indices here.
2514     return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2515                                 2 + OpOffset);
2516   }
2517   case X86::MOVSSrr:
2518     // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
2519     // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
2520     // AVX implies sse4.1.
2521     if (Subtarget.hasSSE41())
2522       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2523     return false;
2524   case X86::SHUFPDrri:
2525     // We can commute this to MOVSD.
2526     if (MI.getOperand(3).getImm() == 0x02)
2527       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2528     return false;
2529   case X86::MOVHLPSrr:
2530   case X86::UNPCKHPDrr:
2531   case X86::VMOVHLPSrr:
2532   case X86::VUNPCKHPDrr:
2533   case X86::VMOVHLPSZrr:
2534   case X86::VUNPCKHPDZ128rr:
2535     if (Subtarget.hasSSE2())
2536       return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2537     return false;
2538   case X86::VPTERNLOGDZrri:      case X86::VPTERNLOGDZrmi:
2539   case X86::VPTERNLOGDZ128rri:   case X86::VPTERNLOGDZ128rmi:
2540   case X86::VPTERNLOGDZ256rri:   case X86::VPTERNLOGDZ256rmi:
2541   case X86::VPTERNLOGQZrri:      case X86::VPTERNLOGQZrmi:
2542   case X86::VPTERNLOGQZ128rri:   case X86::VPTERNLOGQZ128rmi:
2543   case X86::VPTERNLOGQZ256rri:   case X86::VPTERNLOGQZ256rmi:
2544   case X86::VPTERNLOGDZrrik:
2545   case X86::VPTERNLOGDZ128rrik:
2546   case X86::VPTERNLOGDZ256rrik:
2547   case X86::VPTERNLOGQZrrik:
2548   case X86::VPTERNLOGQZ128rrik:
2549   case X86::VPTERNLOGQZ256rrik:
2550   case X86::VPTERNLOGDZrrikz:    case X86::VPTERNLOGDZrmikz:
2551   case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2552   case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2553   case X86::VPTERNLOGQZrrikz:    case X86::VPTERNLOGQZrmikz:
2554   case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2555   case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2556   case X86::VPTERNLOGDZ128rmbi:
2557   case X86::VPTERNLOGDZ256rmbi:
2558   case X86::VPTERNLOGDZrmbi:
2559   case X86::VPTERNLOGQZ128rmbi:
2560   case X86::VPTERNLOGQZ256rmbi:
2561   case X86::VPTERNLOGQZrmbi:
2562   case X86::VPTERNLOGDZ128rmbikz:
2563   case X86::VPTERNLOGDZ256rmbikz:
2564   case X86::VPTERNLOGDZrmbikz:
2565   case X86::VPTERNLOGQZ128rmbikz:
2566   case X86::VPTERNLOGQZ256rmbikz:
2567   case X86::VPTERNLOGQZrmbikz:
2568     return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2569   case X86::VPDPWSSDYrr:
2570   case X86::VPDPWSSDrr:
2571   case X86::VPDPWSSDSYrr:
2572   case X86::VPDPWSSDSrr:
2573   case X86::VPDPWSSDZ128r:
2574   case X86::VPDPWSSDZ128rk:
2575   case X86::VPDPWSSDZ128rkz:
2576   case X86::VPDPWSSDZ256r:
2577   case X86::VPDPWSSDZ256rk:
2578   case X86::VPDPWSSDZ256rkz:
2579   case X86::VPDPWSSDZr:
2580   case X86::VPDPWSSDZrk:
2581   case X86::VPDPWSSDZrkz:
2582   case X86::VPDPWSSDSZ128r:
2583   case X86::VPDPWSSDSZ128rk:
2584   case X86::VPDPWSSDSZ128rkz:
2585   case X86::VPDPWSSDSZ256r:
2586   case X86::VPDPWSSDSZ256rk:
2587   case X86::VPDPWSSDSZ256rkz:
2588   case X86::VPDPWSSDSZr:
2589   case X86::VPDPWSSDSZrk:
2590   case X86::VPDPWSSDSZrkz:
2591   case X86::VPMADD52HUQZ128r:
2592   case X86::VPMADD52HUQZ128rk:
2593   case X86::VPMADD52HUQZ128rkz:
2594   case X86::VPMADD52HUQZ256r:
2595   case X86::VPMADD52HUQZ256rk:
2596   case X86::VPMADD52HUQZ256rkz:
2597   case X86::VPMADD52HUQZr:
2598   case X86::VPMADD52HUQZrk:
2599   case X86::VPMADD52HUQZrkz:
2600   case X86::VPMADD52LUQZ128r:
2601   case X86::VPMADD52LUQZ128rk:
2602   case X86::VPMADD52LUQZ128rkz:
2603   case X86::VPMADD52LUQZ256r:
2604   case X86::VPMADD52LUQZ256rk:
2605   case X86::VPMADD52LUQZ256rkz:
2606   case X86::VPMADD52LUQZr:
2607   case X86::VPMADD52LUQZrk:
2608   case X86::VPMADD52LUQZrkz: {
2609     unsigned CommutableOpIdx1 = 2;
2610     unsigned CommutableOpIdx2 = 3;
2611     if (X86II::isKMasked(Desc.TSFlags)) {
2612       // Skip the mask register.
2613       ++CommutableOpIdx1;
2614       ++CommutableOpIdx2;
2615     }
2616     if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2617                               CommutableOpIdx1, CommutableOpIdx2))
2618       return false;
2619     if (!MI.getOperand(SrcOpIdx1).isReg() ||
2620         !MI.getOperand(SrcOpIdx2).isReg())
2621       // No idea.
2622       return false;
2623     return true;
2624   }
2625 
2626   default:
2627     const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2628                                                       MI.getDesc().TSFlags);
2629     if (FMA3Group)
2630       return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
2631                                            FMA3Group->isIntrinsic());
2632 
2633     // Handled masked instructions since we need to skip over the mask input
2634     // and the preserved input.
2635     if (X86II::isKMasked(Desc.TSFlags)) {
2636       // First assume that the first input is the mask operand and skip past it.
2637       unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
2638       unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
2639       // Check if the first input is tied. If there isn't one then we only
2640       // need to skip the mask operand which we did above.
2641       if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
2642                                              MCOI::TIED_TO) != -1)) {
2643         // If this is zero masking instruction with a tied operand, we need to
2644         // move the first index back to the first input since this must
2645         // be a 3 input instruction and we want the first two non-mask inputs.
2646         // Otherwise this is a 2 input instruction with a preserved input and
2647         // mask, so we need to move the indices to skip one more input.
2648         if (X86II::isKMergeMasked(Desc.TSFlags)) {
2649           ++CommutableOpIdx1;
2650           ++CommutableOpIdx2;
2651         } else {
2652           --CommutableOpIdx1;
2653         }
2654       }
2655 
2656       if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2657                                 CommutableOpIdx1, CommutableOpIdx2))
2658         return false;
2659 
2660       if (!MI.getOperand(SrcOpIdx1).isReg() ||
2661           !MI.getOperand(SrcOpIdx2).isReg())
2662         // No idea.
2663         return false;
2664       return true;
2665     }
2666 
2667     return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2668   }
2669   return false;
2670 }
2671 
2672 X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
2673   switch (MI.getOpcode()) {
2674   default: return X86::COND_INVALID;
2675   case X86::JCC_1:
2676     return static_cast<X86::CondCode>(
2677         MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2678   }
2679 }
2680 
2681 /// Return condition code of a SETCC opcode.
2682 X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
2683   switch (MI.getOpcode()) {
2684   default: return X86::COND_INVALID;
2685   case X86::SETCCr: case X86::SETCCm:
2686     return static_cast<X86::CondCode>(
2687         MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2688   }
2689 }
2690 
2691 /// Return condition code of a CMov opcode.
2692 X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
2693   switch (MI.getOpcode()) {
2694   default: return X86::COND_INVALID;
2695   case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr:
2696   case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm:
2697     return static_cast<X86::CondCode>(
2698         MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2699   }
2700 }
2701 
2702 /// Return the inverse of the specified condition,
2703 /// e.g. turning COND_E to COND_NE.
2704 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
2705   switch (CC) {
2706   default: llvm_unreachable("Illegal condition code!");
2707   case X86::COND_E:  return X86::COND_NE;
2708   case X86::COND_NE: return X86::COND_E;
2709   case X86::COND_L:  return X86::COND_GE;
2710   case X86::COND_LE: return X86::COND_G;
2711   case X86::COND_G:  return X86::COND_LE;
2712   case X86::COND_GE: return X86::COND_L;
2713   case X86::COND_B:  return X86::COND_AE;
2714   case X86::COND_BE: return X86::COND_A;
2715   case X86::COND_A:  return X86::COND_BE;
2716   case X86::COND_AE: return X86::COND_B;
2717   case X86::COND_S:  return X86::COND_NS;
2718   case X86::COND_NS: return X86::COND_S;
2719   case X86::COND_P:  return X86::COND_NP;
2720   case X86::COND_NP: return X86::COND_P;
2721   case X86::COND_O:  return X86::COND_NO;
2722   case X86::COND_NO: return X86::COND_O;
2723   case X86::COND_NE_OR_P:  return X86::COND_E_AND_NP;
2724   case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
2725   }
2726 }
2727 
2728 /// Assuming the flags are set by MI(a,b), return the condition code if we
2729 /// modify the instructions such that flags are set by MI(b,a).
2730 static X86::CondCode getSwappedCondition(X86::CondCode CC) {
2731   switch (CC) {
2732   default: return X86::COND_INVALID;
2733   case X86::COND_E:  return X86::COND_E;
2734   case X86::COND_NE: return X86::COND_NE;
2735   case X86::COND_L:  return X86::COND_G;
2736   case X86::COND_LE: return X86::COND_GE;
2737   case X86::COND_G:  return X86::COND_L;
2738   case X86::COND_GE: return X86::COND_LE;
2739   case X86::COND_B:  return X86::COND_A;
2740   case X86::COND_BE: return X86::COND_AE;
2741   case X86::COND_A:  return X86::COND_B;
2742   case X86::COND_AE: return X86::COND_BE;
2743   }
2744 }
2745 
2746 std::pair<X86::CondCode, bool>
2747 X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
2748   X86::CondCode CC = X86::COND_INVALID;
2749   bool NeedSwap = false;
2750   switch (Predicate) {
2751   default: break;
2752   // Floating-point Predicates
2753   case CmpInst::FCMP_UEQ: CC = X86::COND_E;       break;
2754   case CmpInst::FCMP_OLT: NeedSwap = true;        LLVM_FALLTHROUGH;
2755   case CmpInst::FCMP_OGT: CC = X86::COND_A;       break;
2756   case CmpInst::FCMP_OLE: NeedSwap = true;        LLVM_FALLTHROUGH;
2757   case CmpInst::FCMP_OGE: CC = X86::COND_AE;      break;
2758   case CmpInst::FCMP_UGT: NeedSwap = true;        LLVM_FALLTHROUGH;
2759   case CmpInst::FCMP_ULT: CC = X86::COND_B;       break;
2760   case CmpInst::FCMP_UGE: NeedSwap = true;        LLVM_FALLTHROUGH;
2761   case CmpInst::FCMP_ULE: CC = X86::COND_BE;      break;
2762   case CmpInst::FCMP_ONE: CC = X86::COND_NE;      break;
2763   case CmpInst::FCMP_UNO: CC = X86::COND_P;       break;
2764   case CmpInst::FCMP_ORD: CC = X86::COND_NP;      break;
2765   case CmpInst::FCMP_OEQ:                         LLVM_FALLTHROUGH;
2766   case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
2767 
2768   // Integer Predicates
2769   case CmpInst::ICMP_EQ:  CC = X86::COND_E;       break;
2770   case CmpInst::ICMP_NE:  CC = X86::COND_NE;      break;
2771   case CmpInst::ICMP_UGT: CC = X86::COND_A;       break;
2772   case CmpInst::ICMP_UGE: CC = X86::COND_AE;      break;
2773   case CmpInst::ICMP_ULT: CC = X86::COND_B;       break;
2774   case CmpInst::ICMP_ULE: CC = X86::COND_BE;      break;
2775   case CmpInst::ICMP_SGT: CC = X86::COND_G;       break;
2776   case CmpInst::ICMP_SGE: CC = X86::COND_GE;      break;
2777   case CmpInst::ICMP_SLT: CC = X86::COND_L;       break;
2778   case CmpInst::ICMP_SLE: CC = X86::COND_LE;      break;
2779   }
2780 
2781   return std::make_pair(CC, NeedSwap);
2782 }
2783 
2784 /// Return a setcc opcode based on whether it has memory operand.
2785 unsigned X86::getSETOpc(bool HasMemoryOperand) {
2786   return HasMemoryOperand ? X86::SETCCr : X86::SETCCm;
2787 }
2788 
2789 /// Return a cmov opcode for the given register size in bytes, and operand type.
2790 unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) {
2791   switch(RegBytes) {
2792   default: llvm_unreachable("Illegal register size!");
2793   case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr;
2794   case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr;
2795   case 8: return HasMemoryOperand ? X86::CMOV64rm : X86::CMOV64rr;
2796   }
2797 }
2798 
2799 /// Get the VPCMP immediate for the given condition.
2800 unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
2801   switch (CC) {
2802   default: llvm_unreachable("Unexpected SETCC condition");
2803   case ISD::SETNE:  return 4;
2804   case ISD::SETEQ:  return 0;
2805   case ISD::SETULT:
2806   case ISD::SETLT: return 1;
2807   case ISD::SETUGT:
2808   case ISD::SETGT: return 6;
2809   case ISD::SETUGE:
2810   case ISD::SETGE: return 5;
2811   case ISD::SETULE:
2812   case ISD::SETLE: return 2;
2813   }
2814 }
2815 
2816 /// Get the VPCMP immediate if the operands are swapped.
2817 unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
2818   switch (Imm) {
2819   default: llvm_unreachable("Unreachable!");
2820   case 0x01: Imm = 0x06; break; // LT  -> NLE
2821   case 0x02: Imm = 0x05; break; // LE  -> NLT
2822   case 0x05: Imm = 0x02; break; // NLT -> LE
2823   case 0x06: Imm = 0x01; break; // NLE -> LT
2824   case 0x00: // EQ
2825   case 0x03: // FALSE
2826   case 0x04: // NE
2827   case 0x07: // TRUE
2828     break;
2829   }
2830 
2831   return Imm;
2832 }
2833 
2834 /// Get the VPCOM immediate if the operands are swapped.
2835 unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
2836   switch (Imm) {
2837   default: llvm_unreachable("Unreachable!");
2838   case 0x00: Imm = 0x02; break; // LT -> GT
2839   case 0x01: Imm = 0x03; break; // LE -> GE
2840   case 0x02: Imm = 0x00; break; // GT -> LT
2841   case 0x03: Imm = 0x01; break; // GE -> LE
2842   case 0x04: // EQ
2843   case 0x05: // NE
2844   case 0x06: // FALSE
2845   case 0x07: // TRUE
2846     break;
2847   }
2848 
2849   return Imm;
2850 }
2851 
2852 /// Get the VCMP immediate if the operands are swapped.
2853 unsigned X86::getSwappedVCMPImm(unsigned Imm) {
2854   // Only need the lower 2 bits to distinquish.
2855   switch (Imm & 0x3) {
2856   default: llvm_unreachable("Unreachable!");
2857   case 0x00: case 0x03:
2858     // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted.
2859     break;
2860   case 0x01: case 0x02:
2861     // Need to toggle bits 3:0. Bit 4 stays the same.
2862     Imm ^= 0xf;
2863     break;
2864   }
2865 
2866   return Imm;
2867 }
2868 
2869 bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
2870   switch (MI.getOpcode()) {
2871   case X86::TCRETURNdi:
2872   case X86::TCRETURNri:
2873   case X86::TCRETURNmi:
2874   case X86::TCRETURNdi64:
2875   case X86::TCRETURNri64:
2876   case X86::TCRETURNmi64:
2877     return true;
2878   default:
2879     return false;
2880   }
2881 }
2882 
2883 bool X86InstrInfo::canMakeTailCallConditional(
2884     SmallVectorImpl<MachineOperand> &BranchCond,
2885     const MachineInstr &TailCall) const {
2886   if (TailCall.getOpcode() != X86::TCRETURNdi &&
2887       TailCall.getOpcode() != X86::TCRETURNdi64) {
2888     // Only direct calls can be done with a conditional branch.
2889     return false;
2890   }
2891 
2892   const MachineFunction *MF = TailCall.getParent()->getParent();
2893   if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
2894     // Conditional tail calls confuse the Win64 unwinder.
2895     return false;
2896   }
2897 
2898   assert(BranchCond.size() == 1);
2899   if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
2900     // Can't make a conditional tail call with this condition.
2901     return false;
2902   }
2903 
2904   const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
2905   if (X86FI->getTCReturnAddrDelta() != 0 ||
2906       TailCall.getOperand(1).getImm() != 0) {
2907     // A conditional tail call cannot do any stack adjustment.
2908     return false;
2909   }
2910 
2911   return true;
2912 }
2913 
2914 void X86InstrInfo::replaceBranchWithTailCall(
2915     MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
2916     const MachineInstr &TailCall) const {
2917   assert(canMakeTailCallConditional(BranchCond, TailCall));
2918 
2919   MachineBasicBlock::iterator I = MBB.end();
2920   while (I != MBB.begin()) {
2921     --I;
2922     if (I->isDebugInstr())
2923       continue;
2924     if (!I->isBranch())
2925       assert(0 && "Can't find the branch to replace!");
2926 
2927     X86::CondCode CC = X86::getCondFromBranch(*I);
2928     assert(BranchCond.size() == 1);
2929     if (CC != BranchCond[0].getImm())
2930       continue;
2931 
2932     break;
2933   }
2934 
2935   unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
2936                                                          : X86::TCRETURNdi64cc;
2937 
2938   auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
2939   MIB->addOperand(TailCall.getOperand(0)); // Destination.
2940   MIB.addImm(0); // Stack offset (not used).
2941   MIB->addOperand(BranchCond[0]); // Condition.
2942   MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
2943 
2944   // Add implicit uses and defs of all live regs potentially clobbered by the
2945   // call. This way they still appear live across the call.
2946   LivePhysRegs LiveRegs(getRegisterInfo());
2947   LiveRegs.addLiveOuts(MBB);
2948   SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
2949   LiveRegs.stepForward(*MIB, Clobbers);
2950   for (const auto &C : Clobbers) {
2951     MIB.addReg(C.first, RegState::Implicit);
2952     MIB.addReg(C.first, RegState::Implicit | RegState::Define);
2953   }
2954 
2955   I->eraseFromParent();
2956 }
2957 
2958 // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
2959 // not be a fallthrough MBB now due to layout changes). Return nullptr if the
2960 // fallthrough MBB cannot be identified.
2961 static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
2962                                             MachineBasicBlock *TBB) {
2963   // Look for non-EHPad successors other than TBB. If we find exactly one, it
2964   // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
2965   // and fallthrough MBB. If we find more than one, we cannot identify the
2966   // fallthrough MBB and should return nullptr.
2967   MachineBasicBlock *FallthroughBB = nullptr;
2968   for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
2969     if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
2970       continue;
2971     // Return a nullptr if we found more than one fallthrough successor.
2972     if (FallthroughBB && FallthroughBB != TBB)
2973       return nullptr;
2974     FallthroughBB = *SI;
2975   }
2976   return FallthroughBB;
2977 }
2978 
2979 bool X86InstrInfo::AnalyzeBranchImpl(
2980     MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
2981     SmallVectorImpl<MachineOperand> &Cond,
2982     SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
2983 
2984   // Start from the bottom of the block and work up, examining the
2985   // terminator instructions.
2986   MachineBasicBlock::iterator I = MBB.end();
2987   MachineBasicBlock::iterator UnCondBrIter = MBB.end();
2988   while (I != MBB.begin()) {
2989     --I;
2990     if (I->isDebugInstr())
2991       continue;
2992 
2993     // Working from the bottom, when we see a non-terminator instruction, we're
2994     // done.
2995     if (!isUnpredicatedTerminator(*I))
2996       break;
2997 
2998     // A terminator that isn't a branch can't easily be handled by this
2999     // analysis.
3000     if (!I->isBranch())
3001       return true;
3002 
3003     // Handle unconditional branches.
3004     if (I->getOpcode() == X86::JMP_1) {
3005       UnCondBrIter = I;
3006 
3007       if (!AllowModify) {
3008         TBB = I->getOperand(0).getMBB();
3009         continue;
3010       }
3011 
3012       // If the block has any instructions after a JMP, delete them.
3013       while (std::next(I) != MBB.end())
3014         std::next(I)->eraseFromParent();
3015 
3016       Cond.clear();
3017       FBB = nullptr;
3018 
3019       // Delete the JMP if it's equivalent to a fall-through.
3020       if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
3021         TBB = nullptr;
3022         I->eraseFromParent();
3023         I = MBB.end();
3024         UnCondBrIter = MBB.end();
3025         continue;
3026       }
3027 
3028       // TBB is used to indicate the unconditional destination.
3029       TBB = I->getOperand(0).getMBB();
3030       continue;
3031     }
3032 
3033     // Handle conditional branches.
3034     X86::CondCode BranchCode = X86::getCondFromBranch(*I);
3035     if (BranchCode == X86::COND_INVALID)
3036       return true;  // Can't handle indirect branch.
3037 
3038     // In practice we should never have an undef eflags operand, if we do
3039     // abort here as we are not prepared to preserve the flag.
3040     if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef())
3041       return true;
3042 
3043     // Working from the bottom, handle the first conditional branch.
3044     if (Cond.empty()) {
3045       MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
3046       if (AllowModify && UnCondBrIter != MBB.end() &&
3047           MBB.isLayoutSuccessor(TargetBB)) {
3048         // If we can modify the code and it ends in something like:
3049         //
3050         //     jCC L1
3051         //     jmp L2
3052         //   L1:
3053         //     ...
3054         //   L2:
3055         //
3056         // Then we can change this to:
3057         //
3058         //     jnCC L2
3059         //   L1:
3060         //     ...
3061         //   L2:
3062         //
3063         // Which is a bit more efficient.
3064         // We conditionally jump to the fall-through block.
3065         BranchCode = GetOppositeBranchCondition(BranchCode);
3066         MachineBasicBlock::iterator OldInst = I;
3067 
3068         BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1))
3069           .addMBB(UnCondBrIter->getOperand(0).getMBB())
3070           .addImm(BranchCode);
3071         BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
3072           .addMBB(TargetBB);
3073 
3074         OldInst->eraseFromParent();
3075         UnCondBrIter->eraseFromParent();
3076 
3077         // Restart the analysis.
3078         UnCondBrIter = MBB.end();
3079         I = MBB.end();
3080         continue;
3081       }
3082 
3083       FBB = TBB;
3084       TBB = I->getOperand(0).getMBB();
3085       Cond.push_back(MachineOperand::CreateImm(BranchCode));
3086       CondBranches.push_back(&*I);
3087       continue;
3088     }
3089 
3090     // Handle subsequent conditional branches. Only handle the case where all
3091     // conditional branches branch to the same destination and their condition
3092     // opcodes fit one of the special multi-branch idioms.
3093     assert(Cond.size() == 1);
3094     assert(TBB);
3095 
3096     // If the conditions are the same, we can leave them alone.
3097     X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
3098     auto NewTBB = I->getOperand(0).getMBB();
3099     if (OldBranchCode == BranchCode && TBB == NewTBB)
3100       continue;
3101 
3102     // If they differ, see if they fit one of the known patterns. Theoretically,
3103     // we could handle more patterns here, but we shouldn't expect to see them
3104     // if instruction selection has done a reasonable job.
3105     if (TBB == NewTBB &&
3106                ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
3107                 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
3108       BranchCode = X86::COND_NE_OR_P;
3109     } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
3110                (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
3111       if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
3112         return true;
3113 
3114       // X86::COND_E_AND_NP usually has two different branch destinations.
3115       //
3116       // JP B1
3117       // JE B2
3118       // JMP B1
3119       // B1:
3120       // B2:
3121       //
3122       // Here this condition branches to B2 only if NP && E. It has another
3123       // equivalent form:
3124       //
3125       // JNE B1
3126       // JNP B2
3127       // JMP B1
3128       // B1:
3129       // B2:
3130       //
3131       // Similarly it branches to B2 only if E && NP. That is why this condition
3132       // is named with COND_E_AND_NP.
3133       BranchCode = X86::COND_E_AND_NP;
3134     } else
3135       return true;
3136 
3137     // Update the MachineOperand.
3138     Cond[0].setImm(BranchCode);
3139     CondBranches.push_back(&*I);
3140   }
3141 
3142   return false;
3143 }
3144 
3145 bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
3146                                  MachineBasicBlock *&TBB,
3147                                  MachineBasicBlock *&FBB,
3148                                  SmallVectorImpl<MachineOperand> &Cond,
3149                                  bool AllowModify) const {
3150   SmallVector<MachineInstr *, 4> CondBranches;
3151   return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
3152 }
3153 
3154 bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
3155                                           MachineBranchPredicate &MBP,
3156                                           bool AllowModify) const {
3157   using namespace std::placeholders;
3158 
3159   SmallVector<MachineOperand, 4> Cond;
3160   SmallVector<MachineInstr *, 4> CondBranches;
3161   if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
3162                         AllowModify))
3163     return true;
3164 
3165   if (Cond.size() != 1)
3166     return true;
3167 
3168   assert(MBP.TrueDest && "expected!");
3169 
3170   if (!MBP.FalseDest)
3171     MBP.FalseDest = MBB.getNextNode();
3172 
3173   const TargetRegisterInfo *TRI = &getRegisterInfo();
3174 
3175   MachineInstr *ConditionDef = nullptr;
3176   bool SingleUseCondition = true;
3177 
3178   for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) {
3179     if (I->modifiesRegister(X86::EFLAGS, TRI)) {
3180       ConditionDef = &*I;
3181       break;
3182     }
3183 
3184     if (I->readsRegister(X86::EFLAGS, TRI))
3185       SingleUseCondition = false;
3186   }
3187 
3188   if (!ConditionDef)
3189     return true;
3190 
3191   if (SingleUseCondition) {
3192     for (auto *Succ : MBB.successors())
3193       if (Succ->isLiveIn(X86::EFLAGS))
3194         SingleUseCondition = false;
3195   }
3196 
3197   MBP.ConditionDef = ConditionDef;
3198   MBP.SingleUseCondition = SingleUseCondition;
3199 
3200   // Currently we only recognize the simple pattern:
3201   //
3202   //   test %reg, %reg
3203   //   je %label
3204   //
3205   const unsigned TestOpcode =
3206       Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
3207 
3208   if (ConditionDef->getOpcode() == TestOpcode &&
3209       ConditionDef->getNumOperands() == 3 &&
3210       ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
3211       (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
3212     MBP.LHS = ConditionDef->getOperand(0);
3213     MBP.RHS = MachineOperand::CreateImm(0);
3214     MBP.Predicate = Cond[0].getImm() == X86::COND_NE
3215                         ? MachineBranchPredicate::PRED_NE
3216                         : MachineBranchPredicate::PRED_EQ;
3217     return false;
3218   }
3219 
3220   return true;
3221 }
3222 
3223 unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
3224                                     int *BytesRemoved) const {
3225   assert(!BytesRemoved && "code size not handled");
3226 
3227   MachineBasicBlock::iterator I = MBB.end();
3228   unsigned Count = 0;
3229 
3230   while (I != MBB.begin()) {
3231     --I;
3232     if (I->isDebugInstr())
3233       continue;
3234     if (I->getOpcode() != X86::JMP_1 &&
3235         X86::getCondFromBranch(*I) == X86::COND_INVALID)
3236       break;
3237     // Remove the branch.
3238     I->eraseFromParent();
3239     I = MBB.end();
3240     ++Count;
3241   }
3242 
3243   return Count;
3244 }
3245 
3246 unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
3247                                     MachineBasicBlock *TBB,
3248                                     MachineBasicBlock *FBB,
3249                                     ArrayRef<MachineOperand> Cond,
3250                                     const DebugLoc &DL,
3251                                     int *BytesAdded) const {
3252   // Shouldn't be a fall through.
3253   assert(TBB && "insertBranch must not be told to insert a fallthrough");
3254   assert((Cond.size() == 1 || Cond.size() == 0) &&
3255          "X86 branch conditions have one component!");
3256   assert(!BytesAdded && "code size not handled");
3257 
3258   if (Cond.empty()) {
3259     // Unconditional branch?
3260     assert(!FBB && "Unconditional branch with multiple successors!");
3261     BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
3262     return 1;
3263   }
3264 
3265   // If FBB is null, it is implied to be a fall-through block.
3266   bool FallThru = FBB == nullptr;
3267 
3268   // Conditional branch.
3269   unsigned Count = 0;
3270   X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
3271   switch (CC) {
3272   case X86::COND_NE_OR_P:
3273     // Synthesize NE_OR_P with two branches.
3274     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
3275     ++Count;
3276     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
3277     ++Count;
3278     break;
3279   case X86::COND_E_AND_NP:
3280     // Use the next block of MBB as FBB if it is null.
3281     if (FBB == nullptr) {
3282       FBB = getFallThroughMBB(&MBB, TBB);
3283       assert(FBB && "MBB cannot be the last block in function when the false "
3284                     "body is a fall-through.");
3285     }
3286     // Synthesize COND_E_AND_NP with two branches.
3287     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
3288     ++Count;
3289     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
3290     ++Count;
3291     break;
3292   default: {
3293     BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
3294     ++Count;
3295   }
3296   }
3297   if (!FallThru) {
3298     // Two-way Conditional branch. Insert the second branch.
3299     BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
3300     ++Count;
3301   }
3302   return Count;
3303 }
3304 
3305 bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
3306                                    ArrayRef<MachineOperand> Cond,
3307                                    Register DstReg, Register TrueReg,
3308                                    Register FalseReg, int &CondCycles,
3309                                    int &TrueCycles, int &FalseCycles) const {
3310   // Not all subtargets have cmov instructions.
3311   if (!Subtarget.hasCMov())
3312     return false;
3313   if (Cond.size() != 1)
3314     return false;
3315   // We cannot do the composite conditions, at least not in SSA form.
3316   if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND)
3317     return false;
3318 
3319   // Check register classes.
3320   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3321   const TargetRegisterClass *RC =
3322     RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
3323   if (!RC)
3324     return false;
3325 
3326   // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
3327   if (X86::GR16RegClass.hasSubClassEq(RC) ||
3328       X86::GR32RegClass.hasSubClassEq(RC) ||
3329       X86::GR64RegClass.hasSubClassEq(RC)) {
3330     // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
3331     // Bridge. Probably Ivy Bridge as well.
3332     CondCycles = 2;
3333     TrueCycles = 2;
3334     FalseCycles = 2;
3335     return true;
3336   }
3337 
3338   // Can't do vectors.
3339   return false;
3340 }
3341 
3342 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
3343                                 MachineBasicBlock::iterator I,
3344                                 const DebugLoc &DL, Register DstReg,
3345                                 ArrayRef<MachineOperand> Cond, Register TrueReg,
3346                                 Register FalseReg) const {
3347   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3348   const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
3349   const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
3350   assert(Cond.size() == 1 && "Invalid Cond array");
3351   unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
3352                                     false /*HasMemoryOperand*/);
3353   BuildMI(MBB, I, DL, get(Opc), DstReg)
3354       .addReg(FalseReg)
3355       .addReg(TrueReg)
3356       .addImm(Cond[0].getImm());
3357 }
3358 
3359 /// Test if the given register is a physical h register.
3360 static bool isHReg(unsigned Reg) {
3361   return X86::GR8_ABCD_HRegClass.contains(Reg);
3362 }
3363 
3364 // Try and copy between VR128/VR64 and GR64 registers.
3365 static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
3366                                         const X86Subtarget &Subtarget) {
3367   bool HasAVX = Subtarget.hasAVX();
3368   bool HasAVX512 = Subtarget.hasAVX512();
3369 
3370   // SrcReg(MaskReg) -> DestReg(GR64)
3371   // SrcReg(MaskReg) -> DestReg(GR32)
3372 
3373   // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3374   if (X86::VK16RegClass.contains(SrcReg)) {
3375     if (X86::GR64RegClass.contains(DestReg)) {
3376       assert(Subtarget.hasBWI());
3377       return X86::KMOVQrk;
3378     }
3379     if (X86::GR32RegClass.contains(DestReg))
3380       return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk;
3381   }
3382 
3383   // SrcReg(GR64) -> DestReg(MaskReg)
3384   // SrcReg(GR32) -> DestReg(MaskReg)
3385 
3386   // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3387   if (X86::VK16RegClass.contains(DestReg)) {
3388     if (X86::GR64RegClass.contains(SrcReg)) {
3389       assert(Subtarget.hasBWI());
3390       return X86::KMOVQkr;
3391     }
3392     if (X86::GR32RegClass.contains(SrcReg))
3393       return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr;
3394   }
3395 
3396 
3397   // SrcReg(VR128) -> DestReg(GR64)
3398   // SrcReg(VR64)  -> DestReg(GR64)
3399   // SrcReg(GR64)  -> DestReg(VR128)
3400   // SrcReg(GR64)  -> DestReg(VR64)
3401 
3402   if (X86::GR64RegClass.contains(DestReg)) {
3403     if (X86::VR128XRegClass.contains(SrcReg))
3404       // Copy from a VR128 register to a GR64 register.
3405       return HasAVX512 ? X86::VMOVPQIto64Zrr :
3406              HasAVX    ? X86::VMOVPQIto64rr  :
3407                          X86::MOVPQIto64rr;
3408     if (X86::VR64RegClass.contains(SrcReg))
3409       // Copy from a VR64 register to a GR64 register.
3410       return X86::MMX_MOVD64from64rr;
3411   } else if (X86::GR64RegClass.contains(SrcReg)) {
3412     // Copy from a GR64 register to a VR128 register.
3413     if (X86::VR128XRegClass.contains(DestReg))
3414       return HasAVX512 ? X86::VMOV64toPQIZrr :
3415              HasAVX    ? X86::VMOV64toPQIrr  :
3416                          X86::MOV64toPQIrr;
3417     // Copy from a GR64 register to a VR64 register.
3418     if (X86::VR64RegClass.contains(DestReg))
3419       return X86::MMX_MOVD64to64rr;
3420   }
3421 
3422   // SrcReg(VR128) -> DestReg(GR32)
3423   // SrcReg(GR32)  -> DestReg(VR128)
3424 
3425   if (X86::GR32RegClass.contains(DestReg) &&
3426       X86::VR128XRegClass.contains(SrcReg))
3427     // Copy from a VR128 register to a GR32 register.
3428     return HasAVX512 ? X86::VMOVPDI2DIZrr :
3429            HasAVX    ? X86::VMOVPDI2DIrr  :
3430                        X86::MOVPDI2DIrr;
3431 
3432   if (X86::VR128XRegClass.contains(DestReg) &&
3433       X86::GR32RegClass.contains(SrcReg))
3434     // Copy from a VR128 register to a VR128 register.
3435     return HasAVX512 ? X86::VMOVDI2PDIZrr :
3436            HasAVX    ? X86::VMOVDI2PDIrr  :
3437                        X86::MOVDI2PDIrr;
3438   return 0;
3439 }
3440 
3441 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
3442                                MachineBasicBlock::iterator MI,
3443                                const DebugLoc &DL, MCRegister DestReg,
3444                                MCRegister SrcReg, bool KillSrc) const {
3445   // First deal with the normal symmetric copies.
3446   bool HasAVX = Subtarget.hasAVX();
3447   bool HasVLX = Subtarget.hasVLX();
3448   unsigned Opc = 0;
3449   if (X86::GR64RegClass.contains(DestReg, SrcReg))
3450     Opc = X86::MOV64rr;
3451   else if (X86::GR32RegClass.contains(DestReg, SrcReg))
3452     Opc = X86::MOV32rr;
3453   else if (X86::GR16RegClass.contains(DestReg, SrcReg))
3454     Opc = X86::MOV16rr;
3455   else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
3456     // Copying to or from a physical H register on x86-64 requires a NOREX
3457     // move.  Otherwise use a normal move.
3458     if ((isHReg(DestReg) || isHReg(SrcReg)) &&
3459         Subtarget.is64Bit()) {
3460       Opc = X86::MOV8rr_NOREX;
3461       // Both operands must be encodable without an REX prefix.
3462       assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
3463              "8-bit H register can not be copied outside GR8_NOREX");
3464     } else
3465       Opc = X86::MOV8rr;
3466   }
3467   else if (X86::VR64RegClass.contains(DestReg, SrcReg))
3468     Opc = X86::MMX_MOVQ64rr;
3469   else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
3470     if (HasVLX)
3471       Opc = X86::VMOVAPSZ128rr;
3472     else if (X86::VR128RegClass.contains(DestReg, SrcReg))
3473       Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
3474     else {
3475       // If this an extended register and we don't have VLX we need to use a
3476       // 512-bit move.
3477       Opc = X86::VMOVAPSZrr;
3478       const TargetRegisterInfo *TRI = &getRegisterInfo();
3479       DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm,
3480                                          &X86::VR512RegClass);
3481       SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm,
3482                                         &X86::VR512RegClass);
3483     }
3484   } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
3485     if (HasVLX)
3486       Opc = X86::VMOVAPSZ256rr;
3487     else if (X86::VR256RegClass.contains(DestReg, SrcReg))
3488       Opc = X86::VMOVAPSYrr;
3489     else {
3490       // If this an extended register and we don't have VLX we need to use a
3491       // 512-bit move.
3492       Opc = X86::VMOVAPSZrr;
3493       const TargetRegisterInfo *TRI = &getRegisterInfo();
3494       DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm,
3495                                          &X86::VR512RegClass);
3496       SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm,
3497                                         &X86::VR512RegClass);
3498     }
3499   } else if (X86::VR512RegClass.contains(DestReg, SrcReg))
3500     Opc = X86::VMOVAPSZrr;
3501   // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3502   else if (X86::VK16RegClass.contains(DestReg, SrcReg))
3503     Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk;
3504   if (!Opc)
3505     Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
3506 
3507   if (Opc) {
3508     BuildMI(MBB, MI, DL, get(Opc), DestReg)
3509       .addReg(SrcReg, getKillRegState(KillSrc));
3510     return;
3511   }
3512 
3513   if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
3514     // FIXME: We use a fatal error here because historically LLVM has tried
3515     // lower some of these physreg copies and we want to ensure we get
3516     // reasonable bug reports if someone encounters a case no other testing
3517     // found. This path should be removed after the LLVM 7 release.
3518     report_fatal_error("Unable to copy EFLAGS physical register!");
3519   }
3520 
3521   LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
3522                     << RI.getName(DestReg) << '\n');
3523   report_fatal_error("Cannot emit physreg copy instruction");
3524 }
3525 
3526 Optional<DestSourcePair>
3527 X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
3528   if (MI.isMoveReg())
3529     return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
3530   return None;
3531 }
3532 
3533 static unsigned getLoadStoreRegOpcode(Register Reg,
3534                                       const TargetRegisterClass *RC,
3535                                       bool IsStackAligned,
3536                                       const X86Subtarget &STI, bool load) {
3537   bool HasAVX = STI.hasAVX();
3538   bool HasAVX512 = STI.hasAVX512();
3539   bool HasVLX = STI.hasVLX();
3540 
3541   switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
3542   default:
3543     llvm_unreachable("Unknown spill size");
3544   case 1:
3545     assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
3546     if (STI.is64Bit())
3547       // Copying to or from a physical H register on x86-64 requires a NOREX
3548       // move.  Otherwise use a normal move.
3549       if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
3550         return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
3551     return load ? X86::MOV8rm : X86::MOV8mr;
3552   case 2:
3553     if (X86::VK16RegClass.hasSubClassEq(RC))
3554       return load ? X86::KMOVWkm : X86::KMOVWmk;
3555     assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
3556     return load ? X86::MOV16rm : X86::MOV16mr;
3557   case 4:
3558     if (X86::GR32RegClass.hasSubClassEq(RC))
3559       return load ? X86::MOV32rm : X86::MOV32mr;
3560     if (X86::FR32XRegClass.hasSubClassEq(RC))
3561       return load ?
3562         (HasAVX512 ? X86::VMOVSSZrm_alt :
3563          HasAVX    ? X86::VMOVSSrm_alt :
3564                      X86::MOVSSrm_alt) :
3565         (HasAVX512 ? X86::VMOVSSZmr :
3566          HasAVX    ? X86::VMOVSSmr :
3567                      X86::MOVSSmr);
3568     if (X86::RFP32RegClass.hasSubClassEq(RC))
3569       return load ? X86::LD_Fp32m : X86::ST_Fp32m;
3570     if (X86::VK32RegClass.hasSubClassEq(RC)) {
3571       assert(STI.hasBWI() && "KMOVD requires BWI");
3572       return load ? X86::KMOVDkm : X86::KMOVDmk;
3573     }
3574     // All of these mask pair classes have the same spill size, the same kind
3575     // of kmov instructions can be used with all of them.
3576     if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
3577         X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
3578         X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
3579         X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
3580         X86::VK16PAIRRegClass.hasSubClassEq(RC))
3581       return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
3582     llvm_unreachable("Unknown 4-byte regclass");
3583   case 8:
3584     if (X86::GR64RegClass.hasSubClassEq(RC))
3585       return load ? X86::MOV64rm : X86::MOV64mr;
3586     if (X86::FR64XRegClass.hasSubClassEq(RC))
3587       return load ?
3588         (HasAVX512 ? X86::VMOVSDZrm_alt :
3589          HasAVX    ? X86::VMOVSDrm_alt :
3590                      X86::MOVSDrm_alt) :
3591         (HasAVX512 ? X86::VMOVSDZmr :
3592          HasAVX    ? X86::VMOVSDmr :
3593                      X86::MOVSDmr);
3594     if (X86::VR64RegClass.hasSubClassEq(RC))
3595       return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
3596     if (X86::RFP64RegClass.hasSubClassEq(RC))
3597       return load ? X86::LD_Fp64m : X86::ST_Fp64m;
3598     if (X86::VK64RegClass.hasSubClassEq(RC)) {
3599       assert(STI.hasBWI() && "KMOVQ requires BWI");
3600       return load ? X86::KMOVQkm : X86::KMOVQmk;
3601     }
3602     llvm_unreachable("Unknown 8-byte regclass");
3603   case 10:
3604     assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
3605     return load ? X86::LD_Fp80m : X86::ST_FpP80m;
3606   case 16: {
3607     if (X86::VR128XRegClass.hasSubClassEq(RC)) {
3608       // If stack is realigned we can use aligned stores.
3609       if (IsStackAligned)
3610         return load ?
3611           (HasVLX    ? X86::VMOVAPSZ128rm :
3612            HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX :
3613            HasAVX    ? X86::VMOVAPSrm :
3614                        X86::MOVAPSrm):
3615           (HasVLX    ? X86::VMOVAPSZ128mr :
3616            HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX :
3617            HasAVX    ? X86::VMOVAPSmr :
3618                        X86::MOVAPSmr);
3619       else
3620         return load ?
3621           (HasVLX    ? X86::VMOVUPSZ128rm :
3622            HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX :
3623            HasAVX    ? X86::VMOVUPSrm :
3624                        X86::MOVUPSrm):
3625           (HasVLX    ? X86::VMOVUPSZ128mr :
3626            HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX :
3627            HasAVX    ? X86::VMOVUPSmr :
3628                        X86::MOVUPSmr);
3629     }
3630     if (X86::BNDRRegClass.hasSubClassEq(RC)) {
3631       if (STI.is64Bit())
3632         return load ? X86::BNDMOV64rm : X86::BNDMOV64mr;
3633       else
3634         return load ? X86::BNDMOV32rm : X86::BNDMOV32mr;
3635     }
3636     llvm_unreachable("Unknown 16-byte regclass");
3637   }
3638   case 32:
3639     assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
3640     // If stack is realigned we can use aligned stores.
3641     if (IsStackAligned)
3642       return load ?
3643         (HasVLX    ? X86::VMOVAPSZ256rm :
3644          HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX :
3645                      X86::VMOVAPSYrm) :
3646         (HasVLX    ? X86::VMOVAPSZ256mr :
3647          HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX :
3648                      X86::VMOVAPSYmr);
3649     else
3650       return load ?
3651         (HasVLX    ? X86::VMOVUPSZ256rm :
3652          HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX :
3653                      X86::VMOVUPSYrm) :
3654         (HasVLX    ? X86::VMOVUPSZ256mr :
3655          HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX :
3656                      X86::VMOVUPSYmr);
3657   case 64:
3658     assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
3659     assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
3660     if (IsStackAligned)
3661       return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
3662     else
3663       return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
3664   }
3665 }
3666 
3667 Optional<ExtAddrMode>
3668 X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
3669                                       const TargetRegisterInfo *TRI) const {
3670   const MCInstrDesc &Desc = MemI.getDesc();
3671   int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3672   if (MemRefBegin < 0)
3673     return None;
3674 
3675   MemRefBegin += X86II::getOperandBias(Desc);
3676 
3677   auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg);
3678   if (!BaseOp.isReg()) // Can be an MO_FrameIndex
3679     return None;
3680 
3681   const MachineOperand &DispMO = MemI.getOperand(MemRefBegin + X86::AddrDisp);
3682   // Displacement can be symbolic
3683   if (!DispMO.isImm())
3684     return None;
3685 
3686   ExtAddrMode AM;
3687   AM.BaseReg = BaseOp.getReg();
3688   AM.ScaledReg = MemI.getOperand(MemRefBegin + X86::AddrIndexReg).getReg();
3689   AM.Scale = MemI.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm();
3690   AM.Displacement = DispMO.getImm();
3691   return AM;
3692 }
3693 
3694 bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
3695                                            const Register Reg,
3696                                            int64_t &ImmVal) const {
3697   if (MI.getOpcode() != X86::MOV32ri && MI.getOpcode() != X86::MOV64ri)
3698     return false;
3699   // Mov Src can be a global address.
3700   if (!MI.getOperand(1).isImm() || MI.getOperand(0).getReg() != Reg)
3701     return false;
3702   ImmVal = MI.getOperand(1).getImm();
3703   return true;
3704 }
3705 
3706 bool X86InstrInfo::preservesZeroValueInReg(
3707     const MachineInstr *MI, const Register NullValueReg,
3708     const TargetRegisterInfo *TRI) const {
3709   if (!MI->modifiesRegister(NullValueReg, TRI))
3710     return true;
3711   switch (MI->getOpcode()) {
3712   // Shift right/left of a null unto itself is still a null, i.e. rax = shl rax
3713   // X.
3714   case X86::SHR64ri:
3715   case X86::SHR32ri:
3716   case X86::SHL64ri:
3717   case X86::SHL32ri:
3718     assert(MI->getOperand(0).isDef() && MI->getOperand(1).isUse() &&
3719            "expected for shift opcode!");
3720     return MI->getOperand(0).getReg() == NullValueReg &&
3721            MI->getOperand(1).getReg() == NullValueReg;
3722   // Zero extend of a sub-reg of NullValueReg into itself does not change the
3723   // null value.
3724   case X86::MOV32rr:
3725     return llvm::all_of(MI->operands(), [&](const MachineOperand &MO) {
3726       return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
3727     });
3728   default:
3729     return false;
3730   }
3731   llvm_unreachable("Should be handled above!");
3732 }
3733 
3734 bool X86InstrInfo::getMemOperandsWithOffsetWidth(
3735     const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
3736     int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
3737     const TargetRegisterInfo *TRI) const {
3738   const MCInstrDesc &Desc = MemOp.getDesc();
3739   int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3740   if (MemRefBegin < 0)
3741     return false;
3742 
3743   MemRefBegin += X86II::getOperandBias(Desc);
3744 
3745   const MachineOperand *BaseOp =
3746       &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
3747   if (!BaseOp->isReg()) // Can be an MO_FrameIndex
3748     return false;
3749 
3750   if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
3751     return false;
3752 
3753   if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
3754       X86::NoRegister)
3755     return false;
3756 
3757   const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
3758 
3759   // Displacement can be symbolic
3760   if (!DispMO.isImm())
3761     return false;
3762 
3763   Offset = DispMO.getImm();
3764 
3765   if (!BaseOp->isReg())
3766     return false;
3767 
3768   OffsetIsScalable = false;
3769   // FIXME: Relying on memoperands() may not be right thing to do here. Check
3770   // with X86 maintainers, and fix it accordingly. For now, it is ok, since
3771   // there is no use of `Width` for X86 back-end at the moment.
3772   Width =
3773       !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize() : 0;
3774   BaseOps.push_back(BaseOp);
3775   return true;
3776 }
3777 
3778 static unsigned getStoreRegOpcode(Register SrcReg,
3779                                   const TargetRegisterClass *RC,
3780                                   bool IsStackAligned,
3781                                   const X86Subtarget &STI) {
3782   return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false);
3783 }
3784 
3785 static unsigned getLoadRegOpcode(Register DestReg,
3786                                  const TargetRegisterClass *RC,
3787                                  bool IsStackAligned, const X86Subtarget &STI) {
3788   return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true);
3789 }
3790 
3791 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
3792                                        MachineBasicBlock::iterator MI,
3793                                        Register SrcReg, bool isKill, int FrameIdx,
3794                                        const TargetRegisterClass *RC,
3795                                        const TargetRegisterInfo *TRI) const {
3796   const MachineFunction &MF = *MBB.getParent();
3797   assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
3798          "Stack slot too small for store");
3799   if (RC->getID() == X86::TILERegClassID) {
3800     unsigned Opc = X86::TILESTORED;
3801     // tilestored %tmm, (%sp, %idx)
3802     MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
3803     Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
3804     BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
3805     MachineInstr *NewMI =
3806         addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3807             .addReg(SrcReg, getKillRegState(isKill));
3808     MachineOperand &MO = NewMI->getOperand(2);
3809     MO.setReg(VirtReg);
3810     MO.setIsKill(true);
3811   } else if (RC->getID() == X86::TILECFGRegClassID) {
3812     unsigned Opc = X86::PSTTILECFG;
3813     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3814         .addReg(SrcReg, getKillRegState(isKill));
3815   } else {
3816     unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3817     bool isAligned =
3818         (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3819         RI.canRealignStack(MF);
3820     unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
3821     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3822         .addReg(SrcReg, getKillRegState(isKill));
3823   }
3824 }
3825 
3826 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
3827                                         MachineBasicBlock::iterator MI,
3828                                         Register DestReg, int FrameIdx,
3829                                         const TargetRegisterClass *RC,
3830                                         const TargetRegisterInfo *TRI) const {
3831   if (RC->getID() == X86::TILERegClassID) {
3832     unsigned Opc = X86::TILELOADD;
3833     // tileloadd (%sp, %idx), %tmm
3834     MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
3835     Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
3836     MachineInstr *NewMI =
3837         BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
3838     NewMI = addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg),
3839                               FrameIdx);
3840     MachineOperand &MO = NewMI->getOperand(3);
3841     MO.setReg(VirtReg);
3842     MO.setIsKill(true);
3843   } else if (RC->getID() == X86::TILECFGRegClassID) {
3844     unsigned Opc = X86::PLDTILECFG;
3845     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg),
3846                       FrameIdx);
3847   } else {
3848     const MachineFunction &MF = *MBB.getParent();
3849     unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3850     bool isAligned =
3851         (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3852         RI.canRealignStack(MF);
3853     unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
3854     addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg),
3855                       FrameIdx);
3856   }
3857 }
3858 
3859 bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
3860                                   Register &SrcReg2, int &CmpMask,
3861                                   int &CmpValue) const {
3862   switch (MI.getOpcode()) {
3863   default: break;
3864   case X86::CMP64ri32:
3865   case X86::CMP64ri8:
3866   case X86::CMP32ri:
3867   case X86::CMP32ri8:
3868   case X86::CMP16ri:
3869   case X86::CMP16ri8:
3870   case X86::CMP8ri:
3871     SrcReg = MI.getOperand(0).getReg();
3872     SrcReg2 = 0;
3873     if (MI.getOperand(1).isImm()) {
3874       CmpMask = ~0;
3875       CmpValue = MI.getOperand(1).getImm();
3876     } else {
3877       CmpMask = CmpValue = 0;
3878     }
3879     return true;
3880   // A SUB can be used to perform comparison.
3881   case X86::SUB64rm:
3882   case X86::SUB32rm:
3883   case X86::SUB16rm:
3884   case X86::SUB8rm:
3885     SrcReg = MI.getOperand(1).getReg();
3886     SrcReg2 = 0;
3887     CmpMask = 0;
3888     CmpValue = 0;
3889     return true;
3890   case X86::SUB64rr:
3891   case X86::SUB32rr:
3892   case X86::SUB16rr:
3893   case X86::SUB8rr:
3894     SrcReg = MI.getOperand(1).getReg();
3895     SrcReg2 = MI.getOperand(2).getReg();
3896     CmpMask = 0;
3897     CmpValue = 0;
3898     return true;
3899   case X86::SUB64ri32:
3900   case X86::SUB64ri8:
3901   case X86::SUB32ri:
3902   case X86::SUB32ri8:
3903   case X86::SUB16ri:
3904   case X86::SUB16ri8:
3905   case X86::SUB8ri:
3906     SrcReg = MI.getOperand(1).getReg();
3907     SrcReg2 = 0;
3908     if (MI.getOperand(2).isImm()) {
3909       CmpMask = ~0;
3910       CmpValue = MI.getOperand(2).getImm();
3911     } else {
3912       CmpMask = CmpValue = 0;
3913     }
3914     return true;
3915   case X86::CMP64rr:
3916   case X86::CMP32rr:
3917   case X86::CMP16rr:
3918   case X86::CMP8rr:
3919     SrcReg = MI.getOperand(0).getReg();
3920     SrcReg2 = MI.getOperand(1).getReg();
3921     CmpMask = 0;
3922     CmpValue = 0;
3923     return true;
3924   case X86::TEST8rr:
3925   case X86::TEST16rr:
3926   case X86::TEST32rr:
3927   case X86::TEST64rr:
3928     SrcReg = MI.getOperand(0).getReg();
3929     if (MI.getOperand(1).getReg() != SrcReg)
3930       return false;
3931     // Compare against zero.
3932     SrcReg2 = 0;
3933     CmpMask = ~0;
3934     CmpValue = 0;
3935     return true;
3936   }
3937   return false;
3938 }
3939 
3940 /// Check whether the first instruction, whose only
3941 /// purpose is to update flags, can be made redundant.
3942 /// CMPrr can be made redundant by SUBrr if the operands are the same.
3943 /// This function can be extended later on.
3944 /// SrcReg, SrcRegs: register operands for FlagI.
3945 /// ImmValue: immediate for FlagI if it takes an immediate.
3946 inline static bool isRedundantFlagInstr(const MachineInstr &FlagI,
3947                                         Register SrcReg, Register SrcReg2,
3948                                         int ImmMask, int ImmValue,
3949                                         const MachineInstr &OI) {
3950   if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
3951        (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
3952        (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
3953        (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
3954       ((OI.getOperand(1).getReg() == SrcReg &&
3955         OI.getOperand(2).getReg() == SrcReg2) ||
3956        (OI.getOperand(1).getReg() == SrcReg2 &&
3957         OI.getOperand(2).getReg() == SrcReg)))
3958     return true;
3959 
3960   if (ImmMask != 0 &&
3961       ((FlagI.getOpcode() == X86::CMP64ri32 &&
3962         OI.getOpcode() == X86::SUB64ri32) ||
3963        (FlagI.getOpcode() == X86::CMP64ri8 &&
3964         OI.getOpcode() == X86::SUB64ri8) ||
3965        (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
3966        (FlagI.getOpcode() == X86::CMP32ri8 &&
3967         OI.getOpcode() == X86::SUB32ri8) ||
3968        (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
3969        (FlagI.getOpcode() == X86::CMP16ri8 &&
3970         OI.getOpcode() == X86::SUB16ri8) ||
3971        (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
3972       OI.getOperand(1).getReg() == SrcReg &&
3973       OI.getOperand(2).getImm() == ImmValue)
3974     return true;
3975   return false;
3976 }
3977 
3978 /// Check whether the definition can be converted
3979 /// to remove a comparison against zero.
3980 inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag) {
3981   NoSignFlag = false;
3982 
3983   switch (MI.getOpcode()) {
3984   default: return false;
3985 
3986   // The shift instructions only modify ZF if their shift count is non-zero.
3987   // N.B.: The processor truncates the shift count depending on the encoding.
3988   case X86::SAR8ri:    case X86::SAR16ri:  case X86::SAR32ri:case X86::SAR64ri:
3989   case X86::SHR8ri:    case X86::SHR16ri:  case X86::SHR32ri:case X86::SHR64ri:
3990      return getTruncatedShiftCount(MI, 2) != 0;
3991 
3992   // Some left shift instructions can be turned into LEA instructions but only
3993   // if their flags aren't used. Avoid transforming such instructions.
3994   case X86::SHL8ri:    case X86::SHL16ri:  case X86::SHL32ri:case X86::SHL64ri:{
3995     unsigned ShAmt = getTruncatedShiftCount(MI, 2);
3996     if (isTruncatedShiftCountForLEA(ShAmt)) return false;
3997     return ShAmt != 0;
3998   }
3999 
4000   case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
4001   case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
4002      return getTruncatedShiftCount(MI, 3) != 0;
4003 
4004   case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
4005   case X86::SUB32ri8:  case X86::SUB16ri:  case X86::SUB16ri8:
4006   case X86::SUB8ri:    case X86::SUB64rr:  case X86::SUB32rr:
4007   case X86::SUB16rr:   case X86::SUB8rr:   case X86::SUB64rm:
4008   case X86::SUB32rm:   case X86::SUB16rm:  case X86::SUB8rm:
4009   case X86::DEC64r:    case X86::DEC32r:   case X86::DEC16r: case X86::DEC8r:
4010   case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
4011   case X86::ADD32ri8:  case X86::ADD16ri:  case X86::ADD16ri8:
4012   case X86::ADD8ri:    case X86::ADD64rr:  case X86::ADD32rr:
4013   case X86::ADD16rr:   case X86::ADD8rr:   case X86::ADD64rm:
4014   case X86::ADD32rm:   case X86::ADD16rm:  case X86::ADD8rm:
4015   case X86::INC64r:    case X86::INC32r:   case X86::INC16r: case X86::INC8r:
4016   case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
4017   case X86::AND32ri8:  case X86::AND16ri:  case X86::AND16ri8:
4018   case X86::AND8ri:    case X86::AND64rr:  case X86::AND32rr:
4019   case X86::AND16rr:   case X86::AND8rr:   case X86::AND64rm:
4020   case X86::AND32rm:   case X86::AND16rm:  case X86::AND8rm:
4021   case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
4022   case X86::XOR32ri8:  case X86::XOR16ri:  case X86::XOR16ri8:
4023   case X86::XOR8ri:    case X86::XOR64rr:  case X86::XOR32rr:
4024   case X86::XOR16rr:   case X86::XOR8rr:   case X86::XOR64rm:
4025   case X86::XOR32rm:   case X86::XOR16rm:  case X86::XOR8rm:
4026   case X86::OR64ri32:  case X86::OR64ri8:  case X86::OR32ri:
4027   case X86::OR32ri8:   case X86::OR16ri:   case X86::OR16ri8:
4028   case X86::OR8ri:     case X86::OR64rr:   case X86::OR32rr:
4029   case X86::OR16rr:    case X86::OR8rr:    case X86::OR64rm:
4030   case X86::OR32rm:    case X86::OR16rm:   case X86::OR8rm:
4031   case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
4032   case X86::ADC32ri8:  case X86::ADC16ri:  case X86::ADC16ri8:
4033   case X86::ADC8ri:    case X86::ADC64rr:  case X86::ADC32rr:
4034   case X86::ADC16rr:   case X86::ADC8rr:   case X86::ADC64rm:
4035   case X86::ADC32rm:   case X86::ADC16rm:  case X86::ADC8rm:
4036   case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
4037   case X86::SBB32ri8:  case X86::SBB16ri:  case X86::SBB16ri8:
4038   case X86::SBB8ri:    case X86::SBB64rr:  case X86::SBB32rr:
4039   case X86::SBB16rr:   case X86::SBB8rr:   case X86::SBB64rm:
4040   case X86::SBB32rm:   case X86::SBB16rm:  case X86::SBB8rm:
4041   case X86::NEG8r:     case X86::NEG16r:   case X86::NEG32r: case X86::NEG64r:
4042   case X86::SAR8r1:    case X86::SAR16r1:  case X86::SAR32r1:case X86::SAR64r1:
4043   case X86::SHR8r1:    case X86::SHR16r1:  case X86::SHR32r1:case X86::SHR64r1:
4044   case X86::SHL8r1:    case X86::SHL16r1:  case X86::SHL32r1:case X86::SHL64r1:
4045   case X86::ANDN32rr:  case X86::ANDN32rm:
4046   case X86::ANDN64rr:  case X86::ANDN64rm:
4047   case X86::BLSI32rr:  case X86::BLSI32rm:
4048   case X86::BLSI64rr:  case X86::BLSI64rm:
4049   case X86::BLSMSK32rr:case X86::BLSMSK32rm:
4050   case X86::BLSMSK64rr:case X86::BLSMSK64rm:
4051   case X86::BLSR32rr:  case X86::BLSR32rm:
4052   case X86::BLSR64rr:  case X86::BLSR64rm:
4053   case X86::BZHI32rr:  case X86::BZHI32rm:
4054   case X86::BZHI64rr:  case X86::BZHI64rm:
4055   case X86::LZCNT16rr: case X86::LZCNT16rm:
4056   case X86::LZCNT32rr: case X86::LZCNT32rm:
4057   case X86::LZCNT64rr: case X86::LZCNT64rm:
4058   case X86::POPCNT16rr:case X86::POPCNT16rm:
4059   case X86::POPCNT32rr:case X86::POPCNT32rm:
4060   case X86::POPCNT64rr:case X86::POPCNT64rm:
4061   case X86::TZCNT16rr: case X86::TZCNT16rm:
4062   case X86::TZCNT32rr: case X86::TZCNT32rm:
4063   case X86::TZCNT64rr: case X86::TZCNT64rm:
4064   case X86::BLCFILL32rr: case X86::BLCFILL32rm:
4065   case X86::BLCFILL64rr: case X86::BLCFILL64rm:
4066   case X86::BLCI32rr:    case X86::BLCI32rm:
4067   case X86::BLCI64rr:    case X86::BLCI64rm:
4068   case X86::BLCIC32rr:   case X86::BLCIC32rm:
4069   case X86::BLCIC64rr:   case X86::BLCIC64rm:
4070   case X86::BLCMSK32rr:  case X86::BLCMSK32rm:
4071   case X86::BLCMSK64rr:  case X86::BLCMSK64rm:
4072   case X86::BLCS32rr:    case X86::BLCS32rm:
4073   case X86::BLCS64rr:    case X86::BLCS64rm:
4074   case X86::BLSFILL32rr: case X86::BLSFILL32rm:
4075   case X86::BLSFILL64rr: case X86::BLSFILL64rm:
4076   case X86::BLSIC32rr:   case X86::BLSIC32rm:
4077   case X86::BLSIC64rr:   case X86::BLSIC64rm:
4078   case X86::T1MSKC32rr:  case X86::T1MSKC32rm:
4079   case X86::T1MSKC64rr:  case X86::T1MSKC64rm:
4080   case X86::TZMSK32rr:   case X86::TZMSK32rm:
4081   case X86::TZMSK64rr:   case X86::TZMSK64rm:
4082     return true;
4083   case X86::BEXTR32rr:   case X86::BEXTR64rr:
4084   case X86::BEXTR32rm:   case X86::BEXTR64rm:
4085   case X86::BEXTRI32ri:  case X86::BEXTRI32mi:
4086   case X86::BEXTRI64ri:  case X86::BEXTRI64mi:
4087     // BEXTR doesn't update the sign flag so we can't use it.
4088     NoSignFlag = true;
4089     return true;
4090   }
4091 }
4092 
4093 /// Check whether the use can be converted to remove a comparison against zero.
4094 static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
4095   switch (MI.getOpcode()) {
4096   default: return X86::COND_INVALID;
4097   case X86::NEG8r:
4098   case X86::NEG16r:
4099   case X86::NEG32r:
4100   case X86::NEG64r:
4101     return X86::COND_AE;
4102   case X86::LZCNT16rr:
4103   case X86::LZCNT32rr:
4104   case X86::LZCNT64rr:
4105     return X86::COND_B;
4106   case X86::POPCNT16rr:
4107   case X86::POPCNT32rr:
4108   case X86::POPCNT64rr:
4109     return X86::COND_E;
4110   case X86::TZCNT16rr:
4111   case X86::TZCNT32rr:
4112   case X86::TZCNT64rr:
4113     return X86::COND_B;
4114   case X86::BSF16rr:
4115   case X86::BSF32rr:
4116   case X86::BSF64rr:
4117   case X86::BSR16rr:
4118   case X86::BSR32rr:
4119   case X86::BSR64rr:
4120     return X86::COND_E;
4121   case X86::BLSI32rr:
4122   case X86::BLSI64rr:
4123     return X86::COND_AE;
4124   case X86::BLSR32rr:
4125   case X86::BLSR64rr:
4126   case X86::BLSMSK32rr:
4127   case X86::BLSMSK64rr:
4128     return X86::COND_B;
4129   // TODO: TBM instructions.
4130   }
4131 }
4132 
4133 /// Check if there exists an earlier instruction that
4134 /// operates on the same source operands and sets flags in the same way as
4135 /// Compare; remove Compare if possible.
4136 bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
4137                                         Register SrcReg2, int CmpMask,
4138                                         int CmpValue,
4139                                         const MachineRegisterInfo *MRI) const {
4140   // Check whether we can replace SUB with CMP.
4141   switch (CmpInstr.getOpcode()) {
4142   default: break;
4143   case X86::SUB64ri32:
4144   case X86::SUB64ri8:
4145   case X86::SUB32ri:
4146   case X86::SUB32ri8:
4147   case X86::SUB16ri:
4148   case X86::SUB16ri8:
4149   case X86::SUB8ri:
4150   case X86::SUB64rm:
4151   case X86::SUB32rm:
4152   case X86::SUB16rm:
4153   case X86::SUB8rm:
4154   case X86::SUB64rr:
4155   case X86::SUB32rr:
4156   case X86::SUB16rr:
4157   case X86::SUB8rr: {
4158     if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
4159       return false;
4160     // There is no use of the destination register, we can replace SUB with CMP.
4161     unsigned NewOpcode = 0;
4162     switch (CmpInstr.getOpcode()) {
4163     default: llvm_unreachable("Unreachable!");
4164     case X86::SUB64rm:   NewOpcode = X86::CMP64rm;   break;
4165     case X86::SUB32rm:   NewOpcode = X86::CMP32rm;   break;
4166     case X86::SUB16rm:   NewOpcode = X86::CMP16rm;   break;
4167     case X86::SUB8rm:    NewOpcode = X86::CMP8rm;    break;
4168     case X86::SUB64rr:   NewOpcode = X86::CMP64rr;   break;
4169     case X86::SUB32rr:   NewOpcode = X86::CMP32rr;   break;
4170     case X86::SUB16rr:   NewOpcode = X86::CMP16rr;   break;
4171     case X86::SUB8rr:    NewOpcode = X86::CMP8rr;    break;
4172     case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
4173     case X86::SUB64ri8:  NewOpcode = X86::CMP64ri8;  break;
4174     case X86::SUB32ri:   NewOpcode = X86::CMP32ri;   break;
4175     case X86::SUB32ri8:  NewOpcode = X86::CMP32ri8;  break;
4176     case X86::SUB16ri:   NewOpcode = X86::CMP16ri;   break;
4177     case X86::SUB16ri8:  NewOpcode = X86::CMP16ri8;  break;
4178     case X86::SUB8ri:    NewOpcode = X86::CMP8ri;    break;
4179     }
4180     CmpInstr.setDesc(get(NewOpcode));
4181     CmpInstr.RemoveOperand(0);
4182     // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
4183     if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
4184         NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
4185       return false;
4186   }
4187   }
4188 
4189   // Get the unique definition of SrcReg.
4190   MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
4191   if (!MI) return false;
4192 
4193   // CmpInstr is the first instruction of the BB.
4194   MachineBasicBlock::iterator I = CmpInstr, Def = MI;
4195 
4196   // If we are comparing against zero, check whether we can use MI to update
4197   // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
4198   bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
4199   if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
4200     return false;
4201 
4202   // If we have a use of the source register between the def and our compare
4203   // instruction we can eliminate the compare iff the use sets EFLAGS in the
4204   // right way.
4205   bool ShouldUpdateCC = false;
4206   bool NoSignFlag = false;
4207   X86::CondCode NewCC = X86::COND_INVALID;
4208   if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag)) {
4209     // Scan forward from the use until we hit the use we're looking for or the
4210     // compare instruction.
4211     for (MachineBasicBlock::iterator J = MI;; ++J) {
4212       // Do we have a convertible instruction?
4213       NewCC = isUseDefConvertible(*J);
4214       if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
4215           J->getOperand(1).getReg() == SrcReg) {
4216         assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
4217         ShouldUpdateCC = true; // Update CC later on.
4218         // This is not a def of SrcReg, but still a def of EFLAGS. Keep going
4219         // with the new def.
4220         Def = J;
4221         MI = &*Def;
4222         break;
4223       }
4224 
4225       if (J == I)
4226         return false;
4227     }
4228   }
4229 
4230   // We are searching for an earlier instruction that can make CmpInstr
4231   // redundant and that instruction will be saved in Sub.
4232   MachineInstr *Sub = nullptr;
4233   const TargetRegisterInfo *TRI = &getRegisterInfo();
4234 
4235   // We iterate backward, starting from the instruction before CmpInstr and
4236   // stop when reaching the definition of a source register or done with the BB.
4237   // RI points to the instruction before CmpInstr.
4238   // If the definition is in this basic block, RE points to the definition;
4239   // otherwise, RE is the rend of the basic block.
4240   MachineBasicBlock::reverse_iterator
4241       RI = ++I.getReverse(),
4242       RE = CmpInstr.getParent() == MI->getParent()
4243                ? Def.getReverse() /* points to MI */
4244                : CmpInstr.getParent()->rend();
4245   MachineInstr *Movr0Inst = nullptr;
4246   for (; RI != RE; ++RI) {
4247     MachineInstr &Instr = *RI;
4248     // Check whether CmpInstr can be made redundant by the current instruction.
4249     if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask,
4250                                            CmpValue, Instr)) {
4251       Sub = &Instr;
4252       break;
4253     }
4254 
4255     if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
4256         Instr.readsRegister(X86::EFLAGS, TRI)) {
4257       // This instruction modifies or uses EFLAGS.
4258 
4259       // MOV32r0 etc. are implemented with xor which clobbers condition code.
4260       // They are safe to move up, if the definition to EFLAGS is dead and
4261       // earlier instructions do not read or write EFLAGS.
4262       if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
4263           Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
4264         Movr0Inst = &Instr;
4265         continue;
4266       }
4267 
4268       // We can't remove CmpInstr.
4269       return false;
4270     }
4271   }
4272 
4273   // Return false if no candidates exist.
4274   if (!IsCmpZero && !Sub)
4275     return false;
4276 
4277   bool IsSwapped =
4278       (SrcReg2 != 0 && Sub && Sub->getOperand(1).getReg() == SrcReg2 &&
4279        Sub->getOperand(2).getReg() == SrcReg);
4280 
4281   // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
4282   // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
4283   // If we are done with the basic block, we need to check whether EFLAGS is
4284   // live-out.
4285   bool IsSafe = false;
4286   SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate;
4287   MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
4288   for (++I; I != E; ++I) {
4289     const MachineInstr &Instr = *I;
4290     bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
4291     bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
4292     // We should check the usage if this instruction uses and updates EFLAGS.
4293     if (!UseEFLAGS && ModifyEFLAGS) {
4294       // It is safe to remove CmpInstr if EFLAGS is updated again.
4295       IsSafe = true;
4296       break;
4297     }
4298     if (!UseEFLAGS && !ModifyEFLAGS)
4299       continue;
4300 
4301     // EFLAGS is used by this instruction.
4302     X86::CondCode OldCC = X86::COND_INVALID;
4303     if (IsCmpZero || IsSwapped) {
4304       // We decode the condition code from opcode.
4305       if (Instr.isBranch())
4306         OldCC = X86::getCondFromBranch(Instr);
4307       else {
4308         OldCC = X86::getCondFromSETCC(Instr);
4309         if (OldCC == X86::COND_INVALID)
4310           OldCC = X86::getCondFromCMov(Instr);
4311       }
4312       if (OldCC == X86::COND_INVALID) return false;
4313     }
4314     X86::CondCode ReplacementCC = X86::COND_INVALID;
4315     if (IsCmpZero) {
4316       switch (OldCC) {
4317       default: break;
4318       case X86::COND_A: case X86::COND_AE:
4319       case X86::COND_B: case X86::COND_BE:
4320       case X86::COND_G: case X86::COND_GE:
4321       case X86::COND_L: case X86::COND_LE:
4322       case X86::COND_O: case X86::COND_NO:
4323         // CF and OF are used, we can't perform this optimization.
4324         return false;
4325       case X86::COND_S: case X86::COND_NS:
4326         // If SF is used, but the instruction doesn't update the SF, then we
4327         // can't do the optimization.
4328         if (NoSignFlag)
4329           return false;
4330         break;
4331       }
4332 
4333       // If we're updating the condition code check if we have to reverse the
4334       // condition.
4335       if (ShouldUpdateCC)
4336         switch (OldCC) {
4337         default:
4338           return false;
4339         case X86::COND_E:
4340           ReplacementCC = NewCC;
4341           break;
4342         case X86::COND_NE:
4343           ReplacementCC = GetOppositeBranchCondition(NewCC);
4344           break;
4345         }
4346     } else if (IsSwapped) {
4347       // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
4348       // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
4349       // We swap the condition code and synthesize the new opcode.
4350       ReplacementCC = getSwappedCondition(OldCC);
4351       if (ReplacementCC == X86::COND_INVALID) return false;
4352     }
4353 
4354     if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
4355       // Push the MachineInstr to OpsToUpdate.
4356       // If it is safe to remove CmpInstr, the condition code of these
4357       // instructions will be modified.
4358       OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC));
4359     }
4360     if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
4361       // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
4362       IsSafe = true;
4363       break;
4364     }
4365   }
4366 
4367   // If EFLAGS is not killed nor re-defined, we should check whether it is
4368   // live-out. If it is live-out, do not optimize.
4369   if ((IsCmpZero || IsSwapped) && !IsSafe) {
4370     MachineBasicBlock *MBB = CmpInstr.getParent();
4371     for (MachineBasicBlock *Successor : MBB->successors())
4372       if (Successor->isLiveIn(X86::EFLAGS))
4373         return false;
4374   }
4375 
4376   // The instruction to be updated is either Sub or MI.
4377   Sub = IsCmpZero ? MI : Sub;
4378   // Move Movr0Inst to the appropriate place before Sub.
4379   if (Movr0Inst) {
4380     // Look backwards until we find a def that doesn't use the current EFLAGS.
4381     Def = Sub;
4382     MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(),
4383                                         InsertE = Sub->getParent()->rend();
4384     for (; InsertI != InsertE; ++InsertI) {
4385       MachineInstr *Instr = &*InsertI;
4386       if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
4387           Instr->modifiesRegister(X86::EFLAGS, TRI)) {
4388         Sub->getParent()->remove(Movr0Inst);
4389         Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
4390                                    Movr0Inst);
4391         break;
4392       }
4393     }
4394     if (InsertI == InsertE)
4395       return false;
4396   }
4397 
4398   // Make sure Sub instruction defines EFLAGS and mark the def live.
4399   MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS);
4400   assert(FlagDef && "Unable to locate a def EFLAGS operand");
4401   FlagDef->setIsDead(false);
4402 
4403   CmpInstr.eraseFromParent();
4404 
4405   // Modify the condition code of instructions in OpsToUpdate.
4406   for (auto &Op : OpsToUpdate) {
4407     Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
4408         .setImm(Op.second);
4409   }
4410   return true;
4411 }
4412 
4413 /// Try to remove the load by folding it to a register
4414 /// operand at the use. We fold the load instructions if load defines a virtual
4415 /// register, the virtual register is used once in the same BB, and the
4416 /// instructions in-between do not load or store, and have no side effects.
4417 MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
4418                                               const MachineRegisterInfo *MRI,
4419                                               Register &FoldAsLoadDefReg,
4420                                               MachineInstr *&DefMI) const {
4421   // Check whether we can move DefMI here.
4422   DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
4423   assert(DefMI);
4424   bool SawStore = false;
4425   if (!DefMI->isSafeToMove(nullptr, SawStore))
4426     return nullptr;
4427 
4428   // Collect information about virtual register operands of MI.
4429   SmallVector<unsigned, 1> SrcOperandIds;
4430   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4431     MachineOperand &MO = MI.getOperand(i);
4432     if (!MO.isReg())
4433       continue;
4434     Register Reg = MO.getReg();
4435     if (Reg != FoldAsLoadDefReg)
4436       continue;
4437     // Do not fold if we have a subreg use or a def.
4438     if (MO.getSubReg() || MO.isDef())
4439       return nullptr;
4440     SrcOperandIds.push_back(i);
4441   }
4442   if (SrcOperandIds.empty())
4443     return nullptr;
4444 
4445   // Check whether we can fold the def into SrcOperandId.
4446   if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
4447     FoldAsLoadDefReg = 0;
4448     return FoldMI;
4449   }
4450 
4451   return nullptr;
4452 }
4453 
4454 /// Expand a single-def pseudo instruction to a two-addr
4455 /// instruction with two undef reads of the register being defined.
4456 /// This is used for mapping:
4457 ///   %xmm4 = V_SET0
4458 /// to:
4459 ///   %xmm4 = PXORrr undef %xmm4, undef %xmm4
4460 ///
4461 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
4462                              const MCInstrDesc &Desc) {
4463   assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4464   Register Reg = MIB.getReg(0);
4465   MIB->setDesc(Desc);
4466 
4467   // MachineInstr::addOperand() will insert explicit operands before any
4468   // implicit operands.
4469   MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4470   // But we don't trust that.
4471   assert(MIB.getReg(1) == Reg &&
4472          MIB.getReg(2) == Reg && "Misplaced operand");
4473   return true;
4474 }
4475 
4476 /// Expand a single-def pseudo instruction to a two-addr
4477 /// instruction with two %k0 reads.
4478 /// This is used for mapping:
4479 ///   %k4 = K_SET1
4480 /// to:
4481 ///   %k4 = KXNORrr %k0, %k0
4482 static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
4483                             Register Reg) {
4484   assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4485   MIB->setDesc(Desc);
4486   MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4487   return true;
4488 }
4489 
4490 static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
4491                           bool MinusOne) {
4492   MachineBasicBlock &MBB = *MIB->getParent();
4493   DebugLoc DL = MIB->getDebugLoc();
4494   Register Reg = MIB.getReg(0);
4495 
4496   // Insert the XOR.
4497   BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
4498       .addReg(Reg, RegState::Undef)
4499       .addReg(Reg, RegState::Undef);
4500 
4501   // Turn the pseudo into an INC or DEC.
4502   MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
4503   MIB.addReg(Reg);
4504 
4505   return true;
4506 }
4507 
4508 static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
4509                                const TargetInstrInfo &TII,
4510                                const X86Subtarget &Subtarget) {
4511   MachineBasicBlock &MBB = *MIB->getParent();
4512   DebugLoc DL = MIB->getDebugLoc();
4513   int64_t Imm = MIB->getOperand(1).getImm();
4514   assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
4515   MachineBasicBlock::iterator I = MIB.getInstr();
4516 
4517   int StackAdjustment;
4518 
4519   if (Subtarget.is64Bit()) {
4520     assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
4521            MIB->getOpcode() == X86::MOV32ImmSExti8);
4522 
4523     // Can't use push/pop lowering if the function might write to the red zone.
4524     X86MachineFunctionInfo *X86FI =
4525         MBB.getParent()->getInfo<X86MachineFunctionInfo>();
4526     if (X86FI->getUsesRedZone()) {
4527       MIB->setDesc(TII.get(MIB->getOpcode() ==
4528                            X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri));
4529       return true;
4530     }
4531 
4532     // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
4533     // widen the register if necessary.
4534     StackAdjustment = 8;
4535     BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm);
4536     MIB->setDesc(TII.get(X86::POP64r));
4537     MIB->getOperand(0)
4538         .setReg(getX86SubSuperRegister(MIB.getReg(0), 64));
4539   } else {
4540     assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
4541     StackAdjustment = 4;
4542     BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
4543     MIB->setDesc(TII.get(X86::POP32r));
4544   }
4545   MIB->RemoveOperand(1);
4546   MIB->addImplicitDefUseOperands(*MBB.getParent());
4547 
4548   // Build CFI if necessary.
4549   MachineFunction &MF = *MBB.getParent();
4550   const X86FrameLowering *TFL = Subtarget.getFrameLowering();
4551   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
4552   bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
4553   bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
4554   if (EmitCFI) {
4555     TFL->BuildCFI(MBB, I, DL,
4556         MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
4557     TFL->BuildCFI(MBB, std::next(I), DL,
4558         MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
4559   }
4560 
4561   return true;
4562 }
4563 
4564 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
4565 // code sequence is needed for other targets.
4566 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
4567                                  const TargetInstrInfo &TII) {
4568   MachineBasicBlock &MBB = *MIB->getParent();
4569   DebugLoc DL = MIB->getDebugLoc();
4570   Register Reg = MIB.getReg(0);
4571   const GlobalValue *GV =
4572       cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
4573   auto Flags = MachineMemOperand::MOLoad |
4574                MachineMemOperand::MODereferenceable |
4575                MachineMemOperand::MOInvariant;
4576   MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4577       MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8));
4578   MachineBasicBlock::iterator I = MIB.getInstr();
4579 
4580   BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
4581       .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
4582       .addMemOperand(MMO);
4583   MIB->setDebugLoc(DL);
4584   MIB->setDesc(TII.get(X86::MOV64rm));
4585   MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
4586 }
4587 
4588 static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
4589   MachineBasicBlock &MBB = *MIB->getParent();
4590   MachineFunction &MF = *MBB.getParent();
4591   const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
4592   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4593   unsigned XorOp =
4594       MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
4595   MIB->setDesc(TII.get(XorOp));
4596   MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
4597   return true;
4598 }
4599 
4600 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4601 // but not VLX. If it uses an extended register we need to use an instruction
4602 // that loads the lower 128/256-bit, but is available with only AVX512F.
4603 static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
4604                             const TargetRegisterInfo *TRI,
4605                             const MCInstrDesc &LoadDesc,
4606                             const MCInstrDesc &BroadcastDesc,
4607                             unsigned SubIdx) {
4608   Register DestReg = MIB.getReg(0);
4609   // Check if DestReg is XMM16-31 or YMM16-31.
4610   if (TRI->getEncodingValue(DestReg) < 16) {
4611     // We can use a normal VEX encoded load.
4612     MIB->setDesc(LoadDesc);
4613   } else {
4614     // Use a 128/256-bit VBROADCAST instruction.
4615     MIB->setDesc(BroadcastDesc);
4616     // Change the destination to a 512-bit register.
4617     DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
4618     MIB->getOperand(0).setReg(DestReg);
4619   }
4620   return true;
4621 }
4622 
4623 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4624 // but not VLX. If it uses an extended register we need to use an instruction
4625 // that stores the lower 128/256-bit, but is available with only AVX512F.
4626 static bool expandNOVLXStore(MachineInstrBuilder &MIB,
4627                              const TargetRegisterInfo *TRI,
4628                              const MCInstrDesc &StoreDesc,
4629                              const MCInstrDesc &ExtractDesc,
4630                              unsigned SubIdx) {
4631   Register SrcReg = MIB.getReg(X86::AddrNumOperands);
4632   // Check if DestReg is XMM16-31 or YMM16-31.
4633   if (TRI->getEncodingValue(SrcReg) < 16) {
4634     // We can use a normal VEX encoded store.
4635     MIB->setDesc(StoreDesc);
4636   } else {
4637     // Use a VEXTRACTF instruction.
4638     MIB->setDesc(ExtractDesc);
4639     // Change the destination to a 512-bit register.
4640     SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
4641     MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
4642     MIB.addImm(0x0); // Append immediate to extract from the lower bits.
4643   }
4644 
4645   return true;
4646 }
4647 
4648 static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {
4649   MIB->setDesc(Desc);
4650   int64_t ShiftAmt = MIB->getOperand(2).getImm();
4651   // Temporarily remove the immediate so we can add another source register.
4652   MIB->RemoveOperand(2);
4653   // Add the register. Don't copy the kill flag if there is one.
4654   MIB.addReg(MIB.getReg(1),
4655              getUndefRegState(MIB->getOperand(1).isUndef()));
4656   // Add back the immediate.
4657   MIB.addImm(ShiftAmt);
4658   return true;
4659 }
4660 
4661 bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
4662   bool HasAVX = Subtarget.hasAVX();
4663   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
4664   switch (MI.getOpcode()) {
4665   case X86::MOV32r0:
4666     return Expand2AddrUndef(MIB, get(X86::XOR32rr));
4667   case X86::MOV32r1:
4668     return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
4669   case X86::MOV32r_1:
4670     return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
4671   case X86::MOV32ImmSExti8:
4672   case X86::MOV64ImmSExti8:
4673     return ExpandMOVImmSExti8(MIB, *this, Subtarget);
4674   case X86::SETB_C32r:
4675     return Expand2AddrUndef(MIB, get(X86::SBB32rr));
4676   case X86::SETB_C64r:
4677     return Expand2AddrUndef(MIB, get(X86::SBB64rr));
4678   case X86::MMX_SET0:
4679     return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr));
4680   case X86::V_SET0:
4681   case X86::FsFLD0SS:
4682   case X86::FsFLD0SD:
4683   case X86::FsFLD0F128:
4684     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
4685   case X86::AVX_SET0: {
4686     assert(HasAVX && "AVX not supported");
4687     const TargetRegisterInfo *TRI = &getRegisterInfo();
4688     Register SrcReg = MIB.getReg(0);
4689     Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4690     MIB->getOperand(0).setReg(XReg);
4691     Expand2AddrUndef(MIB, get(X86::VXORPSrr));
4692     MIB.addReg(SrcReg, RegState::ImplicitDefine);
4693     return true;
4694   }
4695   case X86::AVX512_128_SET0:
4696   case X86::AVX512_FsFLD0SS:
4697   case X86::AVX512_FsFLD0SD:
4698   case X86::AVX512_FsFLD0F128: {
4699     bool HasVLX = Subtarget.hasVLX();
4700     Register SrcReg = MIB.getReg(0);
4701     const TargetRegisterInfo *TRI = &getRegisterInfo();
4702     if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
4703       return Expand2AddrUndef(MIB,
4704                               get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4705     // Extended register without VLX. Use a larger XOR.
4706     SrcReg =
4707         TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4708     MIB->getOperand(0).setReg(SrcReg);
4709     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4710   }
4711   case X86::AVX512_256_SET0:
4712   case X86::AVX512_512_SET0: {
4713     bool HasVLX = Subtarget.hasVLX();
4714     Register SrcReg = MIB.getReg(0);
4715     const TargetRegisterInfo *TRI = &getRegisterInfo();
4716     if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
4717       Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4718       MIB->getOperand(0).setReg(XReg);
4719       Expand2AddrUndef(MIB,
4720                        get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4721       MIB.addReg(SrcReg, RegState::ImplicitDefine);
4722       return true;
4723     }
4724     if (MI.getOpcode() == X86::AVX512_256_SET0) {
4725       // No VLX so we must reference a zmm.
4726       unsigned ZReg =
4727         TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4728       MIB->getOperand(0).setReg(ZReg);
4729     }
4730     return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4731   }
4732   case X86::V_SETALLONES:
4733     return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
4734   case X86::AVX2_SETALLONES:
4735     return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
4736   case X86::AVX1_SETALLONES: {
4737     Register Reg = MIB.getReg(0);
4738     // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
4739     MIB->setDesc(get(X86::VCMPPSYrri));
4740     MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
4741     return true;
4742   }
4743   case X86::AVX512_512_SETALLONES: {
4744     Register Reg = MIB.getReg(0);
4745     MIB->setDesc(get(X86::VPTERNLOGDZrri));
4746     // VPTERNLOGD needs 3 register inputs and an immediate.
4747     // 0xff will return 1s for any input.
4748     MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
4749        .addReg(Reg, RegState::Undef).addImm(0xff);
4750     return true;
4751   }
4752   case X86::AVX512_512_SEXT_MASK_32:
4753   case X86::AVX512_512_SEXT_MASK_64: {
4754     Register Reg = MIB.getReg(0);
4755     Register MaskReg = MIB.getReg(1);
4756     unsigned MaskState = getRegState(MIB->getOperand(1));
4757     unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ?
4758                    X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz;
4759     MI.RemoveOperand(1);
4760     MIB->setDesc(get(Opc));
4761     // VPTERNLOG needs 3 register inputs and an immediate.
4762     // 0xff will return 1s for any input.
4763     MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState)
4764        .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff);
4765     return true;
4766   }
4767   case X86::VMOVAPSZ128rm_NOVLX:
4768     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
4769                            get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4770   case X86::VMOVUPSZ128rm_NOVLX:
4771     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
4772                            get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4773   case X86::VMOVAPSZ256rm_NOVLX:
4774     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
4775                            get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4776   case X86::VMOVUPSZ256rm_NOVLX:
4777     return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
4778                            get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4779   case X86::VMOVAPSZ128mr_NOVLX:
4780     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
4781                             get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4782   case X86::VMOVUPSZ128mr_NOVLX:
4783     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
4784                             get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4785   case X86::VMOVAPSZ256mr_NOVLX:
4786     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
4787                             get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4788   case X86::VMOVUPSZ256mr_NOVLX:
4789     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
4790                             get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4791   case X86::MOV32ri64: {
4792     Register Reg = MIB.getReg(0);
4793     Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
4794     MI.setDesc(get(X86::MOV32ri));
4795     MIB->getOperand(0).setReg(Reg32);
4796     MIB.addReg(Reg, RegState::ImplicitDefine);
4797     return true;
4798   }
4799 
4800   // KNL does not recognize dependency-breaking idioms for mask registers,
4801   // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
4802   // Using %k0 as the undef input register is a performance heuristic based
4803   // on the assumption that %k0 is used less frequently than the other mask
4804   // registers, since it is not usable as a write mask.
4805   // FIXME: A more advanced approach would be to choose the best input mask
4806   // register based on context.
4807   case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0);
4808   case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0);
4809   case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0);
4810   case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0);
4811   case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0);
4812   case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0);
4813   case TargetOpcode::LOAD_STACK_GUARD:
4814     expandLoadStackGuard(MIB, *this);
4815     return true;
4816   case X86::XOR64_FP:
4817   case X86::XOR32_FP:
4818     return expandXorFP(MIB, *this);
4819   case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8));
4820   case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8));
4821   case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8));
4822   case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8));
4823   case X86::ADD8rr_DB:    MIB->setDesc(get(X86::OR8rr));    break;
4824   case X86::ADD16rr_DB:   MIB->setDesc(get(X86::OR16rr));   break;
4825   case X86::ADD32rr_DB:   MIB->setDesc(get(X86::OR32rr));   break;
4826   case X86::ADD64rr_DB:   MIB->setDesc(get(X86::OR64rr));   break;
4827   case X86::ADD8ri_DB:    MIB->setDesc(get(X86::OR8ri));    break;
4828   case X86::ADD16ri_DB:   MIB->setDesc(get(X86::OR16ri));   break;
4829   case X86::ADD32ri_DB:   MIB->setDesc(get(X86::OR32ri));   break;
4830   case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break;
4831   case X86::ADD16ri8_DB:  MIB->setDesc(get(X86::OR16ri8));  break;
4832   case X86::ADD32ri8_DB:  MIB->setDesc(get(X86::OR32ri8));  break;
4833   case X86::ADD64ri8_DB:  MIB->setDesc(get(X86::OR64ri8));  break;
4834   }
4835   return false;
4836 }
4837 
4838 /// Return true for all instructions that only update
4839 /// the first 32 or 64-bits of the destination register and leave the rest
4840 /// unmodified. This can be used to avoid folding loads if the instructions
4841 /// only update part of the destination register, and the non-updated part is
4842 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
4843 /// instructions breaks the partial register dependency and it can improve
4844 /// performance. e.g.:
4845 ///
4846 ///   movss (%rdi), %xmm0
4847 ///   cvtss2sd %xmm0, %xmm0
4848 ///
4849 /// Instead of
4850 ///   cvtss2sd (%rdi), %xmm0
4851 ///
4852 /// FIXME: This should be turned into a TSFlags.
4853 ///
4854 static bool hasPartialRegUpdate(unsigned Opcode,
4855                                 const X86Subtarget &Subtarget,
4856                                 bool ForLoadFold = false) {
4857   switch (Opcode) {
4858   case X86::CVTSI2SSrr:
4859   case X86::CVTSI2SSrm:
4860   case X86::CVTSI642SSrr:
4861   case X86::CVTSI642SSrm:
4862   case X86::CVTSI2SDrr:
4863   case X86::CVTSI2SDrm:
4864   case X86::CVTSI642SDrr:
4865   case X86::CVTSI642SDrm:
4866     // Load folding won't effect the undef register update since the input is
4867     // a GPR.
4868     return !ForLoadFold;
4869   case X86::CVTSD2SSrr:
4870   case X86::CVTSD2SSrm:
4871   case X86::CVTSS2SDrr:
4872   case X86::CVTSS2SDrm:
4873   case X86::MOVHPDrm:
4874   case X86::MOVHPSrm:
4875   case X86::MOVLPDrm:
4876   case X86::MOVLPSrm:
4877   case X86::RCPSSr:
4878   case X86::RCPSSm:
4879   case X86::RCPSSr_Int:
4880   case X86::RCPSSm_Int:
4881   case X86::ROUNDSDr:
4882   case X86::ROUNDSDm:
4883   case X86::ROUNDSSr:
4884   case X86::ROUNDSSm:
4885   case X86::RSQRTSSr:
4886   case X86::RSQRTSSm:
4887   case X86::RSQRTSSr_Int:
4888   case X86::RSQRTSSm_Int:
4889   case X86::SQRTSSr:
4890   case X86::SQRTSSm:
4891   case X86::SQRTSSr_Int:
4892   case X86::SQRTSSm_Int:
4893   case X86::SQRTSDr:
4894   case X86::SQRTSDm:
4895   case X86::SQRTSDr_Int:
4896   case X86::SQRTSDm_Int:
4897     return true;
4898   // GPR
4899   case X86::POPCNT32rm:
4900   case X86::POPCNT32rr:
4901   case X86::POPCNT64rm:
4902   case X86::POPCNT64rr:
4903     return Subtarget.hasPOPCNTFalseDeps();
4904   case X86::LZCNT32rm:
4905   case X86::LZCNT32rr:
4906   case X86::LZCNT64rm:
4907   case X86::LZCNT64rr:
4908   case X86::TZCNT32rm:
4909   case X86::TZCNT32rr:
4910   case X86::TZCNT64rm:
4911   case X86::TZCNT64rr:
4912     return Subtarget.hasLZCNTFalseDeps();
4913   }
4914 
4915   return false;
4916 }
4917 
4918 /// Inform the BreakFalseDeps pass how many idle
4919 /// instructions we would like before a partial register update.
4920 unsigned X86InstrInfo::getPartialRegUpdateClearance(
4921     const MachineInstr &MI, unsigned OpNum,
4922     const TargetRegisterInfo *TRI) const {
4923   if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget))
4924     return 0;
4925 
4926   // If MI is marked as reading Reg, the partial register update is wanted.
4927   const MachineOperand &MO = MI.getOperand(0);
4928   Register Reg = MO.getReg();
4929   if (Reg.isVirtual()) {
4930     if (MO.readsReg() || MI.readsVirtualRegister(Reg))
4931       return 0;
4932   } else {
4933     if (MI.readsRegister(Reg, TRI))
4934       return 0;
4935   }
4936 
4937   // If any instructions in the clearance range are reading Reg, insert a
4938   // dependency breaking instruction, which is inexpensive and is likely to
4939   // be hidden in other instruction's cycles.
4940   return PartialRegUpdateClearance;
4941 }
4942 
4943 // Return true for any instruction the copies the high bits of the first source
4944 // operand into the unused high bits of the destination operand.
4945 // Also returns true for instructions that have two inputs where one may
4946 // be undef and we want it to use the same register as the other input.
4947 static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
4948                               bool ForLoadFold = false) {
4949   // Set the OpNum parameter to the first source operand.
4950   switch (Opcode) {
4951   case X86::MMX_PUNPCKHBWirr:
4952   case X86::MMX_PUNPCKHWDirr:
4953   case X86::MMX_PUNPCKHDQirr:
4954   case X86::MMX_PUNPCKLBWirr:
4955   case X86::MMX_PUNPCKLWDirr:
4956   case X86::MMX_PUNPCKLDQirr:
4957   case X86::MOVHLPSrr:
4958   case X86::PACKSSWBrr:
4959   case X86::PACKUSWBrr:
4960   case X86::PACKSSDWrr:
4961   case X86::PACKUSDWrr:
4962   case X86::PUNPCKHBWrr:
4963   case X86::PUNPCKLBWrr:
4964   case X86::PUNPCKHWDrr:
4965   case X86::PUNPCKLWDrr:
4966   case X86::PUNPCKHDQrr:
4967   case X86::PUNPCKLDQrr:
4968   case X86::PUNPCKHQDQrr:
4969   case X86::PUNPCKLQDQrr:
4970   case X86::SHUFPDrri:
4971   case X86::SHUFPSrri:
4972     // These instructions are sometimes used with an undef first or second
4973     // source. Return true here so BreakFalseDeps will assign this source to the
4974     // same register as the first source to avoid a false dependency.
4975     // Operand 1 of these instructions is tied so they're separate from their
4976     // VEX counterparts.
4977     return OpNum == 2 && !ForLoadFold;
4978 
4979   case X86::VMOVLHPSrr:
4980   case X86::VMOVLHPSZrr:
4981   case X86::VPACKSSWBrr:
4982   case X86::VPACKUSWBrr:
4983   case X86::VPACKSSDWrr:
4984   case X86::VPACKUSDWrr:
4985   case X86::VPACKSSWBZ128rr:
4986   case X86::VPACKUSWBZ128rr:
4987   case X86::VPACKSSDWZ128rr:
4988   case X86::VPACKUSDWZ128rr:
4989   case X86::VPERM2F128rr:
4990   case X86::VPERM2I128rr:
4991   case X86::VSHUFF32X4Z256rri:
4992   case X86::VSHUFF32X4Zrri:
4993   case X86::VSHUFF64X2Z256rri:
4994   case X86::VSHUFF64X2Zrri:
4995   case X86::VSHUFI32X4Z256rri:
4996   case X86::VSHUFI32X4Zrri:
4997   case X86::VSHUFI64X2Z256rri:
4998   case X86::VSHUFI64X2Zrri:
4999   case X86::VPUNPCKHBWrr:
5000   case X86::VPUNPCKLBWrr:
5001   case X86::VPUNPCKHBWYrr:
5002   case X86::VPUNPCKLBWYrr:
5003   case X86::VPUNPCKHBWZ128rr:
5004   case X86::VPUNPCKLBWZ128rr:
5005   case X86::VPUNPCKHBWZ256rr:
5006   case X86::VPUNPCKLBWZ256rr:
5007   case X86::VPUNPCKHBWZrr:
5008   case X86::VPUNPCKLBWZrr:
5009   case X86::VPUNPCKHWDrr:
5010   case X86::VPUNPCKLWDrr:
5011   case X86::VPUNPCKHWDYrr:
5012   case X86::VPUNPCKLWDYrr:
5013   case X86::VPUNPCKHWDZ128rr:
5014   case X86::VPUNPCKLWDZ128rr:
5015   case X86::VPUNPCKHWDZ256rr:
5016   case X86::VPUNPCKLWDZ256rr:
5017   case X86::VPUNPCKHWDZrr:
5018   case X86::VPUNPCKLWDZrr:
5019   case X86::VPUNPCKHDQrr:
5020   case X86::VPUNPCKLDQrr:
5021   case X86::VPUNPCKHDQYrr:
5022   case X86::VPUNPCKLDQYrr:
5023   case X86::VPUNPCKHDQZ128rr:
5024   case X86::VPUNPCKLDQZ128rr:
5025   case X86::VPUNPCKHDQZ256rr:
5026   case X86::VPUNPCKLDQZ256rr:
5027   case X86::VPUNPCKHDQZrr:
5028   case X86::VPUNPCKLDQZrr:
5029   case X86::VPUNPCKHQDQrr:
5030   case X86::VPUNPCKLQDQrr:
5031   case X86::VPUNPCKHQDQYrr:
5032   case X86::VPUNPCKLQDQYrr:
5033   case X86::VPUNPCKHQDQZ128rr:
5034   case X86::VPUNPCKLQDQZ128rr:
5035   case X86::VPUNPCKHQDQZ256rr:
5036   case X86::VPUNPCKLQDQZ256rr:
5037   case X86::VPUNPCKHQDQZrr:
5038   case X86::VPUNPCKLQDQZrr:
5039     // These instructions are sometimes used with an undef first or second
5040     // source. Return true here so BreakFalseDeps will assign this source to the
5041     // same register as the first source to avoid a false dependency.
5042     return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
5043 
5044   case X86::VCVTSI2SSrr:
5045   case X86::VCVTSI2SSrm:
5046   case X86::VCVTSI2SSrr_Int:
5047   case X86::VCVTSI2SSrm_Int:
5048   case X86::VCVTSI642SSrr:
5049   case X86::VCVTSI642SSrm:
5050   case X86::VCVTSI642SSrr_Int:
5051   case X86::VCVTSI642SSrm_Int:
5052   case X86::VCVTSI2SDrr:
5053   case X86::VCVTSI2SDrm:
5054   case X86::VCVTSI2SDrr_Int:
5055   case X86::VCVTSI2SDrm_Int:
5056   case X86::VCVTSI642SDrr:
5057   case X86::VCVTSI642SDrm:
5058   case X86::VCVTSI642SDrr_Int:
5059   case X86::VCVTSI642SDrm_Int:
5060   // AVX-512
5061   case X86::VCVTSI2SSZrr:
5062   case X86::VCVTSI2SSZrm:
5063   case X86::VCVTSI2SSZrr_Int:
5064   case X86::VCVTSI2SSZrrb_Int:
5065   case X86::VCVTSI2SSZrm_Int:
5066   case X86::VCVTSI642SSZrr:
5067   case X86::VCVTSI642SSZrm:
5068   case X86::VCVTSI642SSZrr_Int:
5069   case X86::VCVTSI642SSZrrb_Int:
5070   case X86::VCVTSI642SSZrm_Int:
5071   case X86::VCVTSI2SDZrr:
5072   case X86::VCVTSI2SDZrm:
5073   case X86::VCVTSI2SDZrr_Int:
5074   case X86::VCVTSI2SDZrm_Int:
5075   case X86::VCVTSI642SDZrr:
5076   case X86::VCVTSI642SDZrm:
5077   case X86::VCVTSI642SDZrr_Int:
5078   case X86::VCVTSI642SDZrrb_Int:
5079   case X86::VCVTSI642SDZrm_Int:
5080   case X86::VCVTUSI2SSZrr:
5081   case X86::VCVTUSI2SSZrm:
5082   case X86::VCVTUSI2SSZrr_Int:
5083   case X86::VCVTUSI2SSZrrb_Int:
5084   case X86::VCVTUSI2SSZrm_Int:
5085   case X86::VCVTUSI642SSZrr:
5086   case X86::VCVTUSI642SSZrm:
5087   case X86::VCVTUSI642SSZrr_Int:
5088   case X86::VCVTUSI642SSZrrb_Int:
5089   case X86::VCVTUSI642SSZrm_Int:
5090   case X86::VCVTUSI2SDZrr:
5091   case X86::VCVTUSI2SDZrm:
5092   case X86::VCVTUSI2SDZrr_Int:
5093   case X86::VCVTUSI2SDZrm_Int:
5094   case X86::VCVTUSI642SDZrr:
5095   case X86::VCVTUSI642SDZrm:
5096   case X86::VCVTUSI642SDZrr_Int:
5097   case X86::VCVTUSI642SDZrrb_Int:
5098   case X86::VCVTUSI642SDZrm_Int:
5099     // Load folding won't effect the undef register update since the input is
5100     // a GPR.
5101     return OpNum == 1 && !ForLoadFold;
5102   case X86::VCVTSD2SSrr:
5103   case X86::VCVTSD2SSrm:
5104   case X86::VCVTSD2SSrr_Int:
5105   case X86::VCVTSD2SSrm_Int:
5106   case X86::VCVTSS2SDrr:
5107   case X86::VCVTSS2SDrm:
5108   case X86::VCVTSS2SDrr_Int:
5109   case X86::VCVTSS2SDrm_Int:
5110   case X86::VRCPSSr:
5111   case X86::VRCPSSr_Int:
5112   case X86::VRCPSSm:
5113   case X86::VRCPSSm_Int:
5114   case X86::VROUNDSDr:
5115   case X86::VROUNDSDm:
5116   case X86::VROUNDSDr_Int:
5117   case X86::VROUNDSDm_Int:
5118   case X86::VROUNDSSr:
5119   case X86::VROUNDSSm:
5120   case X86::VROUNDSSr_Int:
5121   case X86::VROUNDSSm_Int:
5122   case X86::VRSQRTSSr:
5123   case X86::VRSQRTSSr_Int:
5124   case X86::VRSQRTSSm:
5125   case X86::VRSQRTSSm_Int:
5126   case X86::VSQRTSSr:
5127   case X86::VSQRTSSr_Int:
5128   case X86::VSQRTSSm:
5129   case X86::VSQRTSSm_Int:
5130   case X86::VSQRTSDr:
5131   case X86::VSQRTSDr_Int:
5132   case X86::VSQRTSDm:
5133   case X86::VSQRTSDm_Int:
5134   // AVX-512
5135   case X86::VCVTSD2SSZrr:
5136   case X86::VCVTSD2SSZrr_Int:
5137   case X86::VCVTSD2SSZrrb_Int:
5138   case X86::VCVTSD2SSZrm:
5139   case X86::VCVTSD2SSZrm_Int:
5140   case X86::VCVTSS2SDZrr:
5141   case X86::VCVTSS2SDZrr_Int:
5142   case X86::VCVTSS2SDZrrb_Int:
5143   case X86::VCVTSS2SDZrm:
5144   case X86::VCVTSS2SDZrm_Int:
5145   case X86::VGETEXPSDZr:
5146   case X86::VGETEXPSDZrb:
5147   case X86::VGETEXPSDZm:
5148   case X86::VGETEXPSSZr:
5149   case X86::VGETEXPSSZrb:
5150   case X86::VGETEXPSSZm:
5151   case X86::VGETMANTSDZrri:
5152   case X86::VGETMANTSDZrrib:
5153   case X86::VGETMANTSDZrmi:
5154   case X86::VGETMANTSSZrri:
5155   case X86::VGETMANTSSZrrib:
5156   case X86::VGETMANTSSZrmi:
5157   case X86::VRNDSCALESDZr:
5158   case X86::VRNDSCALESDZr_Int:
5159   case X86::VRNDSCALESDZrb_Int:
5160   case X86::VRNDSCALESDZm:
5161   case X86::VRNDSCALESDZm_Int:
5162   case X86::VRNDSCALESSZr:
5163   case X86::VRNDSCALESSZr_Int:
5164   case X86::VRNDSCALESSZrb_Int:
5165   case X86::VRNDSCALESSZm:
5166   case X86::VRNDSCALESSZm_Int:
5167   case X86::VRCP14SDZrr:
5168   case X86::VRCP14SDZrm:
5169   case X86::VRCP14SSZrr:
5170   case X86::VRCP14SSZrm:
5171   case X86::VRCP28SDZr:
5172   case X86::VRCP28SDZrb:
5173   case X86::VRCP28SDZm:
5174   case X86::VRCP28SSZr:
5175   case X86::VRCP28SSZrb:
5176   case X86::VRCP28SSZm:
5177   case X86::VREDUCESSZrmi:
5178   case X86::VREDUCESSZrri:
5179   case X86::VREDUCESSZrrib:
5180   case X86::VRSQRT14SDZrr:
5181   case X86::VRSQRT14SDZrm:
5182   case X86::VRSQRT14SSZrr:
5183   case X86::VRSQRT14SSZrm:
5184   case X86::VRSQRT28SDZr:
5185   case X86::VRSQRT28SDZrb:
5186   case X86::VRSQRT28SDZm:
5187   case X86::VRSQRT28SSZr:
5188   case X86::VRSQRT28SSZrb:
5189   case X86::VRSQRT28SSZm:
5190   case X86::VSQRTSSZr:
5191   case X86::VSQRTSSZr_Int:
5192   case X86::VSQRTSSZrb_Int:
5193   case X86::VSQRTSSZm:
5194   case X86::VSQRTSSZm_Int:
5195   case X86::VSQRTSDZr:
5196   case X86::VSQRTSDZr_Int:
5197   case X86::VSQRTSDZrb_Int:
5198   case X86::VSQRTSDZm:
5199   case X86::VSQRTSDZm_Int:
5200     return OpNum == 1;
5201   case X86::VMOVSSZrrk:
5202   case X86::VMOVSDZrrk:
5203     return OpNum == 3 && !ForLoadFold;
5204   case X86::VMOVSSZrrkz:
5205   case X86::VMOVSDZrrkz:
5206     return OpNum == 2 && !ForLoadFold;
5207   }
5208 
5209   return false;
5210 }
5211 
5212 /// Inform the BreakFalseDeps pass how many idle instructions we would like
5213 /// before certain undef register reads.
5214 ///
5215 /// This catches the VCVTSI2SD family of instructions:
5216 ///
5217 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
5218 ///
5219 /// We should to be careful *not* to catch VXOR idioms which are presumably
5220 /// handled specially in the pipeline:
5221 ///
5222 /// vxorps undef %xmm1, undef %xmm1, %xmm1
5223 ///
5224 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
5225 /// high bits that are passed-through are not live.
5226 unsigned
5227 X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
5228                                    const TargetRegisterInfo *TRI) const {
5229   const MachineOperand &MO = MI.getOperand(OpNum);
5230   if (Register::isPhysicalRegister(MO.getReg()) &&
5231       hasUndefRegUpdate(MI.getOpcode(), OpNum))
5232     return UndefRegClearance;
5233 
5234   return 0;
5235 }
5236 
5237 void X86InstrInfo::breakPartialRegDependency(
5238     MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5239   Register Reg = MI.getOperand(OpNum).getReg();
5240   // If MI kills this register, the false dependence is already broken.
5241   if (MI.killsRegister(Reg, TRI))
5242     return;
5243 
5244   if (X86::VR128RegClass.contains(Reg)) {
5245     // These instructions are all floating point domain, so xorps is the best
5246     // choice.
5247     unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
5248     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
5249         .addReg(Reg, RegState::Undef)
5250         .addReg(Reg, RegState::Undef);
5251     MI.addRegisterKilled(Reg, TRI, true);
5252   } else if (X86::VR256RegClass.contains(Reg)) {
5253     // Use vxorps to clear the full ymm register.
5254     // It wants to read and write the xmm sub-register.
5255     Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
5256     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
5257         .addReg(XReg, RegState::Undef)
5258         .addReg(XReg, RegState::Undef)
5259         .addReg(Reg, RegState::ImplicitDefine);
5260     MI.addRegisterKilled(Reg, TRI, true);
5261   } else if (X86::GR64RegClass.contains(Reg)) {
5262     // Using XOR32rr because it has shorter encoding and zeros up the upper bits
5263     // as well.
5264     Register XReg = TRI->getSubReg(Reg, X86::sub_32bit);
5265     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
5266         .addReg(XReg, RegState::Undef)
5267         .addReg(XReg, RegState::Undef)
5268         .addReg(Reg, RegState::ImplicitDefine);
5269     MI.addRegisterKilled(Reg, TRI, true);
5270   } else if (X86::GR32RegClass.contains(Reg)) {
5271     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
5272         .addReg(Reg, RegState::Undef)
5273         .addReg(Reg, RegState::Undef);
5274     MI.addRegisterKilled(Reg, TRI, true);
5275   }
5276 }
5277 
5278 static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
5279                         int PtrOffset = 0) {
5280   unsigned NumAddrOps = MOs.size();
5281 
5282   if (NumAddrOps < 4) {
5283     // FrameIndex only - add an immediate offset (whether its zero or not).
5284     for (unsigned i = 0; i != NumAddrOps; ++i)
5285       MIB.add(MOs[i]);
5286     addOffset(MIB, PtrOffset);
5287   } else {
5288     // General Memory Addressing - we need to add any offset to an existing
5289     // offset.
5290     assert(MOs.size() == 5 && "Unexpected memory operand list length");
5291     for (unsigned i = 0; i != NumAddrOps; ++i) {
5292       const MachineOperand &MO = MOs[i];
5293       if (i == 3 && PtrOffset != 0) {
5294         MIB.addDisp(MO, PtrOffset);
5295       } else {
5296         MIB.add(MO);
5297       }
5298     }
5299   }
5300 }
5301 
5302 static void updateOperandRegConstraints(MachineFunction &MF,
5303                                         MachineInstr &NewMI,
5304                                         const TargetInstrInfo &TII) {
5305   MachineRegisterInfo &MRI = MF.getRegInfo();
5306   const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
5307 
5308   for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
5309     MachineOperand &MO = NewMI.getOperand(Idx);
5310     // We only need to update constraints on virtual register operands.
5311     if (!MO.isReg())
5312       continue;
5313     Register Reg = MO.getReg();
5314     if (!Reg.isVirtual())
5315       continue;
5316 
5317     auto *NewRC = MRI.constrainRegClass(
5318         Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
5319     if (!NewRC) {
5320       LLVM_DEBUG(
5321           dbgs() << "WARNING: Unable to update register constraint for operand "
5322                  << Idx << " of instruction:\n";
5323           NewMI.dump(); dbgs() << "\n");
5324     }
5325   }
5326 }
5327 
5328 static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
5329                                      ArrayRef<MachineOperand> MOs,
5330                                      MachineBasicBlock::iterator InsertPt,
5331                                      MachineInstr &MI,
5332                                      const TargetInstrInfo &TII) {
5333   // Create the base instruction with the memory operand as the first part.
5334   // Omit the implicit operands, something BuildMI can't do.
5335   MachineInstr *NewMI =
5336       MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5337   MachineInstrBuilder MIB(MF, NewMI);
5338   addOperands(MIB, MOs);
5339 
5340   // Loop over the rest of the ri operands, converting them over.
5341   unsigned NumOps = MI.getDesc().getNumOperands() - 2;
5342   for (unsigned i = 0; i != NumOps; ++i) {
5343     MachineOperand &MO = MI.getOperand(i + 2);
5344     MIB.add(MO);
5345   }
5346   for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
5347     MachineOperand &MO = MI.getOperand(i);
5348     MIB.add(MO);
5349   }
5350 
5351   updateOperandRegConstraints(MF, *NewMI, TII);
5352 
5353   MachineBasicBlock *MBB = InsertPt->getParent();
5354   MBB->insert(InsertPt, NewMI);
5355 
5356   return MIB;
5357 }
5358 
5359 static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
5360                               unsigned OpNo, ArrayRef<MachineOperand> MOs,
5361                               MachineBasicBlock::iterator InsertPt,
5362                               MachineInstr &MI, const TargetInstrInfo &TII,
5363                               int PtrOffset = 0) {
5364   // Omit the implicit operands, something BuildMI can't do.
5365   MachineInstr *NewMI =
5366       MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5367   MachineInstrBuilder MIB(MF, NewMI);
5368 
5369   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
5370     MachineOperand &MO = MI.getOperand(i);
5371     if (i == OpNo) {
5372       assert(MO.isReg() && "Expected to fold into reg operand!");
5373       addOperands(MIB, MOs, PtrOffset);
5374     } else {
5375       MIB.add(MO);
5376     }
5377   }
5378 
5379   updateOperandRegConstraints(MF, *NewMI, TII);
5380 
5381   // Copy the NoFPExcept flag from the instruction we're fusing.
5382   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
5383     NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept);
5384 
5385   MachineBasicBlock *MBB = InsertPt->getParent();
5386   MBB->insert(InsertPt, NewMI);
5387 
5388   return MIB;
5389 }
5390 
5391 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
5392                                 ArrayRef<MachineOperand> MOs,
5393                                 MachineBasicBlock::iterator InsertPt,
5394                                 MachineInstr &MI) {
5395   MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
5396                                     MI.getDebugLoc(), TII.get(Opcode));
5397   addOperands(MIB, MOs);
5398   return MIB.addImm(0);
5399 }
5400 
5401 MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
5402     MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5403     ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5404     unsigned Size, Align Alignment) const {
5405   switch (MI.getOpcode()) {
5406   case X86::INSERTPSrr:
5407   case X86::VINSERTPSrr:
5408   case X86::VINSERTPSZrr:
5409     // Attempt to convert the load of inserted vector into a fold load
5410     // of a single float.
5411     if (OpNum == 2) {
5412       unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
5413       unsigned ZMask = Imm & 15;
5414       unsigned DstIdx = (Imm >> 4) & 3;
5415       unsigned SrcIdx = (Imm >> 6) & 3;
5416 
5417       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5418       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5419       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5420       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(4)) {
5421         int PtrOffset = SrcIdx * 4;
5422         unsigned NewImm = (DstIdx << 4) | ZMask;
5423         unsigned NewOpCode =
5424             (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm :
5425             (MI.getOpcode() == X86::VINSERTPSrr)  ? X86::VINSERTPSrm  :
5426                                                     X86::INSERTPSrm;
5427         MachineInstr *NewMI =
5428             FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
5429         NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
5430         return NewMI;
5431       }
5432     }
5433     break;
5434   case X86::MOVHLPSrr:
5435   case X86::VMOVHLPSrr:
5436   case X86::VMOVHLPSZrr:
5437     // Move the upper 64-bits of the second operand to the lower 64-bits.
5438     // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
5439     // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
5440     if (OpNum == 2) {
5441       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5442       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5443       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5444       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
5445         unsigned NewOpCode =
5446             (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
5447             (MI.getOpcode() == X86::VMOVHLPSrr)  ? X86::VMOVLPSrm     :
5448                                                    X86::MOVLPSrm;
5449         MachineInstr *NewMI =
5450             FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
5451         return NewMI;
5452       }
5453     }
5454     break;
5455   case X86::UNPCKLPDrr:
5456     // If we won't be able to fold this to the memory form of UNPCKL, use
5457     // MOVHPD instead. Done as custom because we can't have this in the load
5458     // table twice.
5459     if (OpNum == 2) {
5460       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5461       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5462       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5463       if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
5464         MachineInstr *NewMI =
5465             FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
5466         return NewMI;
5467       }
5468     }
5469     break;
5470   }
5471 
5472   return nullptr;
5473 }
5474 
5475 static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
5476                                                MachineInstr &MI) {
5477   if (!hasUndefRegUpdate(MI.getOpcode(), 1, /*ForLoadFold*/true) ||
5478       !MI.getOperand(1).isReg())
5479     return false;
5480 
5481   // The are two cases we need to handle depending on where in the pipeline
5482   // the folding attempt is being made.
5483   // -Register has the undef flag set.
5484   // -Register is produced by the IMPLICIT_DEF instruction.
5485 
5486   if (MI.getOperand(1).isUndef())
5487     return true;
5488 
5489   MachineRegisterInfo &RegInfo = MF.getRegInfo();
5490   MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
5491   return VRegDef && VRegDef->isImplicitDef();
5492 }
5493 
5494 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
5495     MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5496     ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5497     unsigned Size, Align Alignment, bool AllowCommute) const {
5498   bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
5499   bool isTwoAddrFold = false;
5500 
5501   // For CPUs that favor the register form of a call or push,
5502   // do not fold loads into calls or pushes, unless optimizing for size
5503   // aggressively.
5504   if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
5505       (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
5506        MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
5507        MI.getOpcode() == X86::PUSH64r))
5508     return nullptr;
5509 
5510   // Avoid partial and undef register update stalls unless optimizing for size.
5511   if (!MF.getFunction().hasOptSize() &&
5512       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5513        shouldPreventUndefRegUpdateMemFold(MF, MI)))
5514     return nullptr;
5515 
5516   unsigned NumOps = MI.getDesc().getNumOperands();
5517   bool isTwoAddr =
5518       NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
5519 
5520   // FIXME: AsmPrinter doesn't know how to handle
5521   // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
5522   if (MI.getOpcode() == X86::ADD32ri &&
5523       MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
5524     return nullptr;
5525 
5526   // GOTTPOFF relocation loads can only be folded into add instructions.
5527   // FIXME: Need to exclude other relocations that only support specific
5528   // instructions.
5529   if (MOs.size() == X86::AddrNumOperands &&
5530       MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
5531       MI.getOpcode() != X86::ADD64rr)
5532     return nullptr;
5533 
5534   MachineInstr *NewMI = nullptr;
5535 
5536   // Attempt to fold any custom cases we have.
5537   if (MachineInstr *CustomMI = foldMemoryOperandCustom(
5538           MF, MI, OpNum, MOs, InsertPt, Size, Alignment))
5539     return CustomMI;
5540 
5541   const X86MemoryFoldTableEntry *I = nullptr;
5542 
5543   // Folding a memory location into the two-address part of a two-address
5544   // instruction is different than folding it other places.  It requires
5545   // replacing the *two* registers with the memory location.
5546   if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
5547       MI.getOperand(1).isReg() &&
5548       MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
5549     I = lookupTwoAddrFoldTable(MI.getOpcode());
5550     isTwoAddrFold = true;
5551   } else {
5552     if (OpNum == 0) {
5553       if (MI.getOpcode() == X86::MOV32r0) {
5554         NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
5555         if (NewMI)
5556           return NewMI;
5557       }
5558     }
5559 
5560     I = lookupFoldTable(MI.getOpcode(), OpNum);
5561   }
5562 
5563   if (I != nullptr) {
5564     unsigned Opcode = I->DstOp;
5565     bool FoldedLoad =
5566         isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_LOAD) || OpNum > 0;
5567     bool FoldedStore =
5568         isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_STORE);
5569     MaybeAlign MinAlign =
5570         decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT);
5571     if (MinAlign && Alignment < *MinAlign)
5572       return nullptr;
5573     bool NarrowToMOV32rm = false;
5574     if (Size) {
5575       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5576       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
5577                                                   &RI, MF);
5578       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5579       // Check if it's safe to fold the load. If the size of the object is
5580       // narrower than the load width, then it's not.
5581       // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
5582       if (FoldedLoad && Size < RCSize) {
5583         // If this is a 64-bit load, but the spill slot is 32, then we can do
5584         // a 32-bit load which is implicitly zero-extended. This likely is
5585         // due to live interval analysis remat'ing a load from stack slot.
5586         if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
5587           return nullptr;
5588         if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
5589           return nullptr;
5590         Opcode = X86::MOV32rm;
5591         NarrowToMOV32rm = true;
5592       }
5593       // For stores, make sure the size of the object is equal to the size of
5594       // the store. If the object is larger, the extra bits would be garbage. If
5595       // the object is smaller we might overwrite another object or fault.
5596       if (FoldedStore && Size != RCSize)
5597         return nullptr;
5598     }
5599 
5600     if (isTwoAddrFold)
5601       NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
5602     else
5603       NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
5604 
5605     if (NarrowToMOV32rm) {
5606       // If this is the special case where we use a MOV32rm to load a 32-bit
5607       // value and zero-extend the top bits. Change the destination register
5608       // to a 32-bit one.
5609       Register DstReg = NewMI->getOperand(0).getReg();
5610       if (DstReg.isPhysical())
5611         NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
5612       else
5613         NewMI->getOperand(0).setSubReg(X86::sub_32bit);
5614     }
5615     return NewMI;
5616   }
5617 
5618   // If the instruction and target operand are commutable, commute the
5619   // instruction and try again.
5620   if (AllowCommute) {
5621     unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
5622     if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
5623       bool HasDef = MI.getDesc().getNumDefs();
5624       Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
5625       Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
5626       Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
5627       bool Tied1 =
5628           0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
5629       bool Tied2 =
5630           0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
5631 
5632       // If either of the commutable operands are tied to the destination
5633       // then we can not commute + fold.
5634       if ((HasDef && Reg0 == Reg1 && Tied1) ||
5635           (HasDef && Reg0 == Reg2 && Tied2))
5636         return nullptr;
5637 
5638       MachineInstr *CommutedMI =
5639           commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5640       if (!CommutedMI) {
5641         // Unable to commute.
5642         return nullptr;
5643       }
5644       if (CommutedMI != &MI) {
5645         // New instruction. We can't fold from this.
5646         CommutedMI->eraseFromParent();
5647         return nullptr;
5648       }
5649 
5650       // Attempt to fold with the commuted version of the instruction.
5651       NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size,
5652                                     Alignment, /*AllowCommute=*/false);
5653       if (NewMI)
5654         return NewMI;
5655 
5656       // Folding failed again - undo the commute before returning.
5657       MachineInstr *UncommutedMI =
5658           commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5659       if (!UncommutedMI) {
5660         // Unable to commute.
5661         return nullptr;
5662       }
5663       if (UncommutedMI != &MI) {
5664         // New instruction. It doesn't need to be kept.
5665         UncommutedMI->eraseFromParent();
5666         return nullptr;
5667       }
5668 
5669       // Return here to prevent duplicate fuse failure report.
5670       return nullptr;
5671     }
5672   }
5673 
5674   // No fusion
5675   if (PrintFailedFusing && !MI.isCopy())
5676     dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
5677   return nullptr;
5678 }
5679 
5680 MachineInstr *
5681 X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
5682                                     ArrayRef<unsigned> Ops,
5683                                     MachineBasicBlock::iterator InsertPt,
5684                                     int FrameIndex, LiveIntervals *LIS,
5685                                     VirtRegMap *VRM) const {
5686   // Check switch flag
5687   if (NoFusing)
5688     return nullptr;
5689 
5690   // Avoid partial and undef register update stalls unless optimizing for size.
5691   if (!MF.getFunction().hasOptSize() &&
5692       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5693        shouldPreventUndefRegUpdateMemFold(MF, MI)))
5694     return nullptr;
5695 
5696   // Don't fold subreg spills, or reloads that use a high subreg.
5697   for (auto Op : Ops) {
5698     MachineOperand &MO = MI.getOperand(Op);
5699     auto SubReg = MO.getSubReg();
5700     if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
5701       return nullptr;
5702   }
5703 
5704   const MachineFrameInfo &MFI = MF.getFrameInfo();
5705   unsigned Size = MFI.getObjectSize(FrameIndex);
5706   Align Alignment = MFI.getObjectAlign(FrameIndex);
5707   // If the function stack isn't realigned we don't want to fold instructions
5708   // that need increased alignment.
5709   if (!RI.needsStackRealignment(MF))
5710     Alignment =
5711         std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
5712   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
5713     unsigned NewOpc = 0;
5714     unsigned RCSize = 0;
5715     switch (MI.getOpcode()) {
5716     default: return nullptr;
5717     case X86::TEST8rr:  NewOpc = X86::CMP8ri; RCSize = 1; break;
5718     case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
5719     case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
5720     case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
5721     }
5722     // Check if it's safe to fold the load. If the size of the object is
5723     // narrower than the load width, then it's not.
5724     if (Size < RCSize)
5725       return nullptr;
5726     // Change to CMPXXri r, 0 first.
5727     MI.setDesc(get(NewOpc));
5728     MI.getOperand(1).ChangeToImmediate(0);
5729   } else if (Ops.size() != 1)
5730     return nullptr;
5731 
5732   return foldMemoryOperandImpl(MF, MI, Ops[0],
5733                                MachineOperand::CreateFI(FrameIndex), InsertPt,
5734                                Size, Alignment, /*AllowCommute=*/true);
5735 }
5736 
5737 /// Check if \p LoadMI is a partial register load that we can't fold into \p MI
5738 /// because the latter uses contents that wouldn't be defined in the folded
5739 /// version.  For instance, this transformation isn't legal:
5740 ///   movss (%rdi), %xmm0
5741 ///   addps %xmm0, %xmm0
5742 /// ->
5743 ///   addps (%rdi), %xmm0
5744 ///
5745 /// But this one is:
5746 ///   movss (%rdi), %xmm0
5747 ///   addss %xmm0, %xmm0
5748 /// ->
5749 ///   addss (%rdi), %xmm0
5750 ///
5751 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
5752                                              const MachineInstr &UserMI,
5753                                              const MachineFunction &MF) {
5754   unsigned Opc = LoadMI.getOpcode();
5755   unsigned UserOpc = UserMI.getOpcode();
5756   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5757   const TargetRegisterClass *RC =
5758       MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
5759   unsigned RegSize = TRI.getRegSizeInBits(*RC);
5760 
5761   if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
5762        Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
5763        Opc == X86::VMOVSSZrm_alt) &&
5764       RegSize > 32) {
5765     // These instructions only load 32 bits, we can't fold them if the
5766     // destination register is wider than 32 bits (4 bytes), and its user
5767     // instruction isn't scalar (SS).
5768     switch (UserOpc) {
5769     case X86::CVTSS2SDrr_Int:
5770     case X86::VCVTSS2SDrr_Int:
5771     case X86::VCVTSS2SDZrr_Int:
5772     case X86::VCVTSS2SDZrr_Intk:
5773     case X86::VCVTSS2SDZrr_Intkz:
5774     case X86::CVTSS2SIrr_Int:     case X86::CVTSS2SI64rr_Int:
5775     case X86::VCVTSS2SIrr_Int:    case X86::VCVTSS2SI64rr_Int:
5776     case X86::VCVTSS2SIZrr_Int:   case X86::VCVTSS2SI64Zrr_Int:
5777     case X86::CVTTSS2SIrr_Int:    case X86::CVTTSS2SI64rr_Int:
5778     case X86::VCVTTSS2SIrr_Int:   case X86::VCVTTSS2SI64rr_Int:
5779     case X86::VCVTTSS2SIZrr_Int:  case X86::VCVTTSS2SI64Zrr_Int:
5780     case X86::VCVTSS2USIZrr_Int:  case X86::VCVTSS2USI64Zrr_Int:
5781     case X86::VCVTTSS2USIZrr_Int: case X86::VCVTTSS2USI64Zrr_Int:
5782     case X86::RCPSSr_Int:   case X86::VRCPSSr_Int:
5783     case X86::RSQRTSSr_Int: case X86::VRSQRTSSr_Int:
5784     case X86::ROUNDSSr_Int: case X86::VROUNDSSr_Int:
5785     case X86::COMISSrr_Int: case X86::VCOMISSrr_Int: case X86::VCOMISSZrr_Int:
5786     case X86::UCOMISSrr_Int:case X86::VUCOMISSrr_Int:case X86::VUCOMISSZrr_Int:
5787     case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
5788     case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
5789     case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
5790     case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
5791     case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
5792     case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
5793     case X86::SQRTSSr_Int: case X86::VSQRTSSr_Int: case X86::VSQRTSSZr_Int:
5794     case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
5795     case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz:
5796     case X86::VCMPSSZrr_Intk:
5797     case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz:
5798     case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz:
5799     case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz:
5800     case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz:
5801     case X86::VSQRTSSZr_Intk: case X86::VSQRTSSZr_Intkz:
5802     case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz:
5803     case X86::VFMADDSS4rr_Int:   case X86::VFNMADDSS4rr_Int:
5804     case X86::VFMSUBSS4rr_Int:   case X86::VFNMSUBSS4rr_Int:
5805     case X86::VFMADD132SSr_Int:  case X86::VFNMADD132SSr_Int:
5806     case X86::VFMADD213SSr_Int:  case X86::VFNMADD213SSr_Int:
5807     case X86::VFMADD231SSr_Int:  case X86::VFNMADD231SSr_Int:
5808     case X86::VFMSUB132SSr_Int:  case X86::VFNMSUB132SSr_Int:
5809     case X86::VFMSUB213SSr_Int:  case X86::VFNMSUB213SSr_Int:
5810     case X86::VFMSUB231SSr_Int:  case X86::VFNMSUB231SSr_Int:
5811     case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int:
5812     case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int:
5813     case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int:
5814     case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int:
5815     case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int:
5816     case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int:
5817     case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk:
5818     case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk:
5819     case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk:
5820     case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk:
5821     case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk:
5822     case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk:
5823     case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz:
5824     case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz:
5825     case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz:
5826     case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz:
5827     case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz:
5828     case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz:
5829     case X86::VFIXUPIMMSSZrri:
5830     case X86::VFIXUPIMMSSZrrik:
5831     case X86::VFIXUPIMMSSZrrikz:
5832     case X86::VFPCLASSSSZrr:
5833     case X86::VFPCLASSSSZrrk:
5834     case X86::VGETEXPSSZr:
5835     case X86::VGETEXPSSZrk:
5836     case X86::VGETEXPSSZrkz:
5837     case X86::VGETMANTSSZrri:
5838     case X86::VGETMANTSSZrrik:
5839     case X86::VGETMANTSSZrrikz:
5840     case X86::VRANGESSZrri:
5841     case X86::VRANGESSZrrik:
5842     case X86::VRANGESSZrrikz:
5843     case X86::VRCP14SSZrr:
5844     case X86::VRCP14SSZrrk:
5845     case X86::VRCP14SSZrrkz:
5846     case X86::VRCP28SSZr:
5847     case X86::VRCP28SSZrk:
5848     case X86::VRCP28SSZrkz:
5849     case X86::VREDUCESSZrri:
5850     case X86::VREDUCESSZrrik:
5851     case X86::VREDUCESSZrrikz:
5852     case X86::VRNDSCALESSZr_Int:
5853     case X86::VRNDSCALESSZr_Intk:
5854     case X86::VRNDSCALESSZr_Intkz:
5855     case X86::VRSQRT14SSZrr:
5856     case X86::VRSQRT14SSZrrk:
5857     case X86::VRSQRT14SSZrrkz:
5858     case X86::VRSQRT28SSZr:
5859     case X86::VRSQRT28SSZrk:
5860     case X86::VRSQRT28SSZrkz:
5861     case X86::VSCALEFSSZrr:
5862     case X86::VSCALEFSSZrrk:
5863     case X86::VSCALEFSSZrrkz:
5864       return false;
5865     default:
5866       return true;
5867     }
5868   }
5869 
5870   if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
5871        Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
5872        Opc == X86::VMOVSDZrm_alt) &&
5873       RegSize > 64) {
5874     // These instructions only load 64 bits, we can't fold them if the
5875     // destination register is wider than 64 bits (8 bytes), and its user
5876     // instruction isn't scalar (SD).
5877     switch (UserOpc) {
5878     case X86::CVTSD2SSrr_Int:
5879     case X86::VCVTSD2SSrr_Int:
5880     case X86::VCVTSD2SSZrr_Int:
5881     case X86::VCVTSD2SSZrr_Intk:
5882     case X86::VCVTSD2SSZrr_Intkz:
5883     case X86::CVTSD2SIrr_Int:     case X86::CVTSD2SI64rr_Int:
5884     case X86::VCVTSD2SIrr_Int:    case X86::VCVTSD2SI64rr_Int:
5885     case X86::VCVTSD2SIZrr_Int:   case X86::VCVTSD2SI64Zrr_Int:
5886     case X86::CVTTSD2SIrr_Int:    case X86::CVTTSD2SI64rr_Int:
5887     case X86::VCVTTSD2SIrr_Int:   case X86::VCVTTSD2SI64rr_Int:
5888     case X86::VCVTTSD2SIZrr_Int:  case X86::VCVTTSD2SI64Zrr_Int:
5889     case X86::VCVTSD2USIZrr_Int:  case X86::VCVTSD2USI64Zrr_Int:
5890     case X86::VCVTTSD2USIZrr_Int: case X86::VCVTTSD2USI64Zrr_Int:
5891     case X86::ROUNDSDr_Int: case X86::VROUNDSDr_Int:
5892     case X86::COMISDrr_Int: case X86::VCOMISDrr_Int: case X86::VCOMISDZrr_Int:
5893     case X86::UCOMISDrr_Int:case X86::VUCOMISDrr_Int:case X86::VUCOMISDZrr_Int:
5894     case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
5895     case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
5896     case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
5897     case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
5898     case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
5899     case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
5900     case X86::SQRTSDr_Int: case X86::VSQRTSDr_Int: case X86::VSQRTSDZr_Int:
5901     case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
5902     case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz:
5903     case X86::VCMPSDZrr_Intk:
5904     case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz:
5905     case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz:
5906     case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz:
5907     case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz:
5908     case X86::VSQRTSDZr_Intk: case X86::VSQRTSDZr_Intkz:
5909     case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz:
5910     case X86::VFMADDSD4rr_Int:   case X86::VFNMADDSD4rr_Int:
5911     case X86::VFMSUBSD4rr_Int:   case X86::VFNMSUBSD4rr_Int:
5912     case X86::VFMADD132SDr_Int:  case X86::VFNMADD132SDr_Int:
5913     case X86::VFMADD213SDr_Int:  case X86::VFNMADD213SDr_Int:
5914     case X86::VFMADD231SDr_Int:  case X86::VFNMADD231SDr_Int:
5915     case X86::VFMSUB132SDr_Int:  case X86::VFNMSUB132SDr_Int:
5916     case X86::VFMSUB213SDr_Int:  case X86::VFNMSUB213SDr_Int:
5917     case X86::VFMSUB231SDr_Int:  case X86::VFNMSUB231SDr_Int:
5918     case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int:
5919     case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int:
5920     case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int:
5921     case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int:
5922     case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int:
5923     case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int:
5924     case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk:
5925     case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk:
5926     case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk:
5927     case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk:
5928     case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk:
5929     case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk:
5930     case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz:
5931     case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz:
5932     case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz:
5933     case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz:
5934     case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz:
5935     case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz:
5936     case X86::VFIXUPIMMSDZrri:
5937     case X86::VFIXUPIMMSDZrrik:
5938     case X86::VFIXUPIMMSDZrrikz:
5939     case X86::VFPCLASSSDZrr:
5940     case X86::VFPCLASSSDZrrk:
5941     case X86::VGETEXPSDZr:
5942     case X86::VGETEXPSDZrk:
5943     case X86::VGETEXPSDZrkz:
5944     case X86::VGETMANTSDZrri:
5945     case X86::VGETMANTSDZrrik:
5946     case X86::VGETMANTSDZrrikz:
5947     case X86::VRANGESDZrri:
5948     case X86::VRANGESDZrrik:
5949     case X86::VRANGESDZrrikz:
5950     case X86::VRCP14SDZrr:
5951     case X86::VRCP14SDZrrk:
5952     case X86::VRCP14SDZrrkz:
5953     case X86::VRCP28SDZr:
5954     case X86::VRCP28SDZrk:
5955     case X86::VRCP28SDZrkz:
5956     case X86::VREDUCESDZrri:
5957     case X86::VREDUCESDZrrik:
5958     case X86::VREDUCESDZrrikz:
5959     case X86::VRNDSCALESDZr_Int:
5960     case X86::VRNDSCALESDZr_Intk:
5961     case X86::VRNDSCALESDZr_Intkz:
5962     case X86::VRSQRT14SDZrr:
5963     case X86::VRSQRT14SDZrrk:
5964     case X86::VRSQRT14SDZrrkz:
5965     case X86::VRSQRT28SDZr:
5966     case X86::VRSQRT28SDZrk:
5967     case X86::VRSQRT28SDZrkz:
5968     case X86::VSCALEFSDZrr:
5969     case X86::VSCALEFSDZrrk:
5970     case X86::VSCALEFSDZrrkz:
5971       return false;
5972     default:
5973       return true;
5974     }
5975   }
5976 
5977   return false;
5978 }
5979 
5980 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
5981     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
5982     MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
5983     LiveIntervals *LIS) const {
5984 
5985   // TODO: Support the case where LoadMI loads a wide register, but MI
5986   // only uses a subreg.
5987   for (auto Op : Ops) {
5988     if (MI.getOperand(Op).getSubReg())
5989       return nullptr;
5990   }
5991 
5992   // If loading from a FrameIndex, fold directly from the FrameIndex.
5993   unsigned NumOps = LoadMI.getDesc().getNumOperands();
5994   int FrameIndex;
5995   if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
5996     if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
5997       return nullptr;
5998     return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
5999   }
6000 
6001   // Check switch flag
6002   if (NoFusing) return nullptr;
6003 
6004   // Avoid partial and undef register update stalls unless optimizing for size.
6005   if (!MF.getFunction().hasOptSize() &&
6006       (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
6007        shouldPreventUndefRegUpdateMemFold(MF, MI)))
6008     return nullptr;
6009 
6010   // Determine the alignment of the load.
6011   Align Alignment;
6012   if (LoadMI.hasOneMemOperand())
6013     Alignment = (*LoadMI.memoperands_begin())->getAlign();
6014   else
6015     switch (LoadMI.getOpcode()) {
6016     case X86::AVX512_512_SET0:
6017     case X86::AVX512_512_SETALLONES:
6018       Alignment = Align(64);
6019       break;
6020     case X86::AVX2_SETALLONES:
6021     case X86::AVX1_SETALLONES:
6022     case X86::AVX_SET0:
6023     case X86::AVX512_256_SET0:
6024       Alignment = Align(32);
6025       break;
6026     case X86::V_SET0:
6027     case X86::V_SETALLONES:
6028     case X86::AVX512_128_SET0:
6029     case X86::FsFLD0F128:
6030     case X86::AVX512_FsFLD0F128:
6031       Alignment = Align(16);
6032       break;
6033     case X86::MMX_SET0:
6034     case X86::FsFLD0SD:
6035     case X86::AVX512_FsFLD0SD:
6036       Alignment = Align(8);
6037       break;
6038     case X86::FsFLD0SS:
6039     case X86::AVX512_FsFLD0SS:
6040       Alignment = Align(4);
6041       break;
6042     default:
6043       return nullptr;
6044     }
6045   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
6046     unsigned NewOpc = 0;
6047     switch (MI.getOpcode()) {
6048     default: return nullptr;
6049     case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
6050     case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
6051     case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
6052     case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
6053     }
6054     // Change to CMPXXri r, 0 first.
6055     MI.setDesc(get(NewOpc));
6056     MI.getOperand(1).ChangeToImmediate(0);
6057   } else if (Ops.size() != 1)
6058     return nullptr;
6059 
6060   // Make sure the subregisters match.
6061   // Otherwise we risk changing the size of the load.
6062   if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
6063     return nullptr;
6064 
6065   SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
6066   switch (LoadMI.getOpcode()) {
6067   case X86::MMX_SET0:
6068   case X86::V_SET0:
6069   case X86::V_SETALLONES:
6070   case X86::AVX2_SETALLONES:
6071   case X86::AVX1_SETALLONES:
6072   case X86::AVX_SET0:
6073   case X86::AVX512_128_SET0:
6074   case X86::AVX512_256_SET0:
6075   case X86::AVX512_512_SET0:
6076   case X86::AVX512_512_SETALLONES:
6077   case X86::FsFLD0SD:
6078   case X86::AVX512_FsFLD0SD:
6079   case X86::FsFLD0SS:
6080   case X86::AVX512_FsFLD0SS:
6081   case X86::FsFLD0F128:
6082   case X86::AVX512_FsFLD0F128: {
6083     // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
6084     // Create a constant-pool entry and operands to load from it.
6085 
6086     // Medium and large mode can't fold loads this way.
6087     if (MF.getTarget().getCodeModel() != CodeModel::Small &&
6088         MF.getTarget().getCodeModel() != CodeModel::Kernel)
6089       return nullptr;
6090 
6091     // x86-32 PIC requires a PIC base register for constant pools.
6092     unsigned PICBase = 0;
6093     if (MF.getTarget().isPositionIndependent()) {
6094       if (Subtarget.is64Bit())
6095         PICBase = X86::RIP;
6096       else
6097         // FIXME: PICBase = getGlobalBaseReg(&MF);
6098         // This doesn't work for several reasons.
6099         // 1. GlobalBaseReg may have been spilled.
6100         // 2. It may not be live at MI.
6101         return nullptr;
6102     }
6103 
6104     // Create a constant-pool entry.
6105     MachineConstantPool &MCP = *MF.getConstantPool();
6106     Type *Ty;
6107     unsigned Opc = LoadMI.getOpcode();
6108     if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
6109       Ty = Type::getFloatTy(MF.getFunction().getContext());
6110     else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
6111       Ty = Type::getDoubleTy(MF.getFunction().getContext());
6112     else if (Opc == X86::FsFLD0F128 || Opc == X86::AVX512_FsFLD0F128)
6113       Ty = Type::getFP128Ty(MF.getFunction().getContext());
6114     else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
6115       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6116                                 16);
6117     else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
6118              Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
6119       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6120                                 8);
6121     else if (Opc == X86::MMX_SET0)
6122       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6123                                 2);
6124     else
6125       Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6126                                 4);
6127 
6128     bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
6129                       Opc == X86::AVX512_512_SETALLONES ||
6130                       Opc == X86::AVX1_SETALLONES);
6131     const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
6132                                     Constant::getNullValue(Ty);
6133     unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
6134 
6135     // Create operands to load from the constant pool entry.
6136     MOs.push_back(MachineOperand::CreateReg(PICBase, false));
6137     MOs.push_back(MachineOperand::CreateImm(1));
6138     MOs.push_back(MachineOperand::CreateReg(0, false));
6139     MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
6140     MOs.push_back(MachineOperand::CreateReg(0, false));
6141     break;
6142   }
6143   default: {
6144     if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
6145       return nullptr;
6146 
6147     // Folding a normal load. Just copy the load's address operands.
6148     MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
6149                LoadMI.operands_begin() + NumOps);
6150     break;
6151   }
6152   }
6153   return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
6154                                /*Size=*/0, Alignment, /*AllowCommute=*/true);
6155 }
6156 
6157 static SmallVector<MachineMemOperand *, 2>
6158 extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6159   SmallVector<MachineMemOperand *, 2> LoadMMOs;
6160 
6161   for (MachineMemOperand *MMO : MMOs) {
6162     if (!MMO->isLoad())
6163       continue;
6164 
6165     if (!MMO->isStore()) {
6166       // Reuse the MMO.
6167       LoadMMOs.push_back(MMO);
6168     } else {
6169       // Clone the MMO and unset the store flag.
6170       LoadMMOs.push_back(MF.getMachineMemOperand(
6171           MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
6172     }
6173   }
6174 
6175   return LoadMMOs;
6176 }
6177 
6178 static SmallVector<MachineMemOperand *, 2>
6179 extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6180   SmallVector<MachineMemOperand *, 2> StoreMMOs;
6181 
6182   for (MachineMemOperand *MMO : MMOs) {
6183     if (!MMO->isStore())
6184       continue;
6185 
6186     if (!MMO->isLoad()) {
6187       // Reuse the MMO.
6188       StoreMMOs.push_back(MMO);
6189     } else {
6190       // Clone the MMO and unset the load flag.
6191       StoreMMOs.push_back(MF.getMachineMemOperand(
6192           MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
6193     }
6194   }
6195 
6196   return StoreMMOs;
6197 }
6198 
6199 static unsigned getBroadcastOpcode(const X86MemoryFoldTableEntry *I,
6200                                    const TargetRegisterClass *RC,
6201                                    const X86Subtarget &STI) {
6202   assert(STI.hasAVX512() && "Expected at least AVX512!");
6203   unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC);
6204   assert((SpillSize == 64 || STI.hasVLX()) &&
6205          "Can't broadcast less than 64 bytes without AVX512VL!");
6206 
6207   switch (I->Flags & TB_BCAST_MASK) {
6208   default: llvm_unreachable("Unexpected broadcast type!");
6209   case TB_BCAST_D:
6210     switch (SpillSize) {
6211     default: llvm_unreachable("Unknown spill size");
6212     case 16: return X86::VPBROADCASTDZ128rm;
6213     case 32: return X86::VPBROADCASTDZ256rm;
6214     case 64: return X86::VPBROADCASTDZrm;
6215     }
6216     break;
6217   case TB_BCAST_Q:
6218     switch (SpillSize) {
6219     default: llvm_unreachable("Unknown spill size");
6220     case 16: return X86::VPBROADCASTQZ128rm;
6221     case 32: return X86::VPBROADCASTQZ256rm;
6222     case 64: return X86::VPBROADCASTQZrm;
6223     }
6224     break;
6225   case TB_BCAST_SS:
6226     switch (SpillSize) {
6227     default: llvm_unreachable("Unknown spill size");
6228     case 16: return X86::VBROADCASTSSZ128rm;
6229     case 32: return X86::VBROADCASTSSZ256rm;
6230     case 64: return X86::VBROADCASTSSZrm;
6231     }
6232     break;
6233   case TB_BCAST_SD:
6234     switch (SpillSize) {
6235     default: llvm_unreachable("Unknown spill size");
6236     case 16: return X86::VMOVDDUPZ128rm;
6237     case 32: return X86::VBROADCASTSDZ256rm;
6238     case 64: return X86::VBROADCASTSDZrm;
6239     }
6240     break;
6241   }
6242 }
6243 
6244 bool X86InstrInfo::unfoldMemoryOperand(
6245     MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
6246     bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
6247   const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
6248   if (I == nullptr)
6249     return false;
6250   unsigned Opc = I->DstOp;
6251   unsigned Index = I->Flags & TB_INDEX_MASK;
6252   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6253   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6254   bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6255   if (UnfoldLoad && !FoldedLoad)
6256     return false;
6257   UnfoldLoad &= FoldedLoad;
6258   if (UnfoldStore && !FoldedStore)
6259     return false;
6260   UnfoldStore &= FoldedStore;
6261 
6262   const MCInstrDesc &MCID = get(Opc);
6263 
6264   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6265   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6266   // TODO: Check if 32-byte or greater accesses are slow too?
6267   if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
6268       Subtarget.isUnalignedMem16Slow())
6269     // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
6270     // conservatively assume the address is unaligned. That's bad for
6271     // performance.
6272     return false;
6273   SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
6274   SmallVector<MachineOperand,2> BeforeOps;
6275   SmallVector<MachineOperand,2> AfterOps;
6276   SmallVector<MachineOperand,4> ImpOps;
6277   for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6278     MachineOperand &Op = MI.getOperand(i);
6279     if (i >= Index && i < Index + X86::AddrNumOperands)
6280       AddrOps.push_back(Op);
6281     else if (Op.isReg() && Op.isImplicit())
6282       ImpOps.push_back(Op);
6283     else if (i < Index)
6284       BeforeOps.push_back(Op);
6285     else if (i > Index)
6286       AfterOps.push_back(Op);
6287   }
6288 
6289   // Emit the load or broadcast instruction.
6290   if (UnfoldLoad) {
6291     auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
6292 
6293     unsigned Opc;
6294     if (FoldedBCast) {
6295       Opc = getBroadcastOpcode(I, RC, Subtarget);
6296     } else {
6297       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6298       bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6299       Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
6300     }
6301 
6302     DebugLoc DL;
6303     MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg);
6304     for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6305       MIB.add(AddrOps[i]);
6306     MIB.setMemRefs(MMOs);
6307     NewMIs.push_back(MIB);
6308 
6309     if (UnfoldStore) {
6310       // Address operands cannot be marked isKill.
6311       for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
6312         MachineOperand &MO = NewMIs[0]->getOperand(i);
6313         if (MO.isReg())
6314           MO.setIsKill(false);
6315       }
6316     }
6317   }
6318 
6319   // Emit the data processing instruction.
6320   MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
6321   MachineInstrBuilder MIB(MF, DataMI);
6322 
6323   if (FoldedStore)
6324     MIB.addReg(Reg, RegState::Define);
6325   for (MachineOperand &BeforeOp : BeforeOps)
6326     MIB.add(BeforeOp);
6327   if (FoldedLoad)
6328     MIB.addReg(Reg);
6329   for (MachineOperand &AfterOp : AfterOps)
6330     MIB.add(AfterOp);
6331   for (MachineOperand &ImpOp : ImpOps) {
6332     MIB.addReg(ImpOp.getReg(),
6333                getDefRegState(ImpOp.isDef()) |
6334                RegState::Implicit |
6335                getKillRegState(ImpOp.isKill()) |
6336                getDeadRegState(ImpOp.isDead()) |
6337                getUndefRegState(ImpOp.isUndef()));
6338   }
6339   // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6340   switch (DataMI->getOpcode()) {
6341   default: break;
6342   case X86::CMP64ri32:
6343   case X86::CMP64ri8:
6344   case X86::CMP32ri:
6345   case X86::CMP32ri8:
6346   case X86::CMP16ri:
6347   case X86::CMP16ri8:
6348   case X86::CMP8ri: {
6349     MachineOperand &MO0 = DataMI->getOperand(0);
6350     MachineOperand &MO1 = DataMI->getOperand(1);
6351     if (MO1.getImm() == 0) {
6352       unsigned NewOpc;
6353       switch (DataMI->getOpcode()) {
6354       default: llvm_unreachable("Unreachable!");
6355       case X86::CMP64ri8:
6356       case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
6357       case X86::CMP32ri8:
6358       case X86::CMP32ri:   NewOpc = X86::TEST32rr; break;
6359       case X86::CMP16ri8:
6360       case X86::CMP16ri:   NewOpc = X86::TEST16rr; break;
6361       case X86::CMP8ri:    NewOpc = X86::TEST8rr; break;
6362       }
6363       DataMI->setDesc(get(NewOpc));
6364       MO1.ChangeToRegister(MO0.getReg(), false);
6365     }
6366   }
6367   }
6368   NewMIs.push_back(DataMI);
6369 
6370   // Emit the store instruction.
6371   if (UnfoldStore) {
6372     const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
6373     auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
6374     unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
6375     bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6376     unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
6377     DebugLoc DL;
6378     MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
6379     for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6380       MIB.add(AddrOps[i]);
6381     MIB.addReg(Reg, RegState::Kill);
6382     MIB.setMemRefs(MMOs);
6383     NewMIs.push_back(MIB);
6384   }
6385 
6386   return true;
6387 }
6388 
6389 bool
6390 X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
6391                                   SmallVectorImpl<SDNode*> &NewNodes) const {
6392   if (!N->isMachineOpcode())
6393     return false;
6394 
6395   const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
6396   if (I == nullptr)
6397     return false;
6398   unsigned Opc = I->DstOp;
6399   unsigned Index = I->Flags & TB_INDEX_MASK;
6400   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6401   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6402   bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6403   const MCInstrDesc &MCID = get(Opc);
6404   MachineFunction &MF = DAG.getMachineFunction();
6405   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6406   const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6407   unsigned NumDefs = MCID.NumDefs;
6408   std::vector<SDValue> AddrOps;
6409   std::vector<SDValue> BeforeOps;
6410   std::vector<SDValue> AfterOps;
6411   SDLoc dl(N);
6412   unsigned NumOps = N->getNumOperands();
6413   for (unsigned i = 0; i != NumOps-1; ++i) {
6414     SDValue Op = N->getOperand(i);
6415     if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
6416       AddrOps.push_back(Op);
6417     else if (i < Index-NumDefs)
6418       BeforeOps.push_back(Op);
6419     else if (i > Index-NumDefs)
6420       AfterOps.push_back(Op);
6421   }
6422   SDValue Chain = N->getOperand(NumOps-1);
6423   AddrOps.push_back(Chain);
6424 
6425   // Emit the load instruction.
6426   SDNode *Load = nullptr;
6427   if (FoldedLoad) {
6428     EVT VT = *TRI.legalclasstypes_begin(*RC);
6429     auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6430     if (MMOs.empty() && RC == &X86::VR128RegClass &&
6431         Subtarget.isUnalignedMem16Slow())
6432       // Do not introduce a slow unaligned load.
6433       return false;
6434     // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6435     // memory access is slow above.
6436 
6437     unsigned Opc;
6438     if (FoldedBCast) {
6439       Opc = getBroadcastOpcode(I, RC, Subtarget);
6440     } else {
6441       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6442       bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6443       Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
6444     }
6445 
6446     Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps);
6447     NewNodes.push_back(Load);
6448 
6449     // Preserve memory reference information.
6450     DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
6451   }
6452 
6453   // Emit the data processing instruction.
6454   std::vector<EVT> VTs;
6455   const TargetRegisterClass *DstRC = nullptr;
6456   if (MCID.getNumDefs() > 0) {
6457     DstRC = getRegClass(MCID, 0, &RI, MF);
6458     VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
6459   }
6460   for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
6461     EVT VT = N->getValueType(i);
6462     if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
6463       VTs.push_back(VT);
6464   }
6465   if (Load)
6466     BeforeOps.push_back(SDValue(Load, 0));
6467   llvm::append_range(BeforeOps, AfterOps);
6468   // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6469   switch (Opc) {
6470     default: break;
6471     case X86::CMP64ri32:
6472     case X86::CMP64ri8:
6473     case X86::CMP32ri:
6474     case X86::CMP32ri8:
6475     case X86::CMP16ri:
6476     case X86::CMP16ri8:
6477     case X86::CMP8ri:
6478       if (isNullConstant(BeforeOps[1])) {
6479         switch (Opc) {
6480           default: llvm_unreachable("Unreachable!");
6481           case X86::CMP64ri8:
6482           case X86::CMP64ri32: Opc = X86::TEST64rr; break;
6483           case X86::CMP32ri8:
6484           case X86::CMP32ri:   Opc = X86::TEST32rr; break;
6485           case X86::CMP16ri8:
6486           case X86::CMP16ri:   Opc = X86::TEST16rr; break;
6487           case X86::CMP8ri:    Opc = X86::TEST8rr; break;
6488         }
6489         BeforeOps[1] = BeforeOps[0];
6490       }
6491   }
6492   SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
6493   NewNodes.push_back(NewNode);
6494 
6495   // Emit the store instruction.
6496   if (FoldedStore) {
6497     AddrOps.pop_back();
6498     AddrOps.push_back(SDValue(NewNode, 0));
6499     AddrOps.push_back(Chain);
6500     auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6501     if (MMOs.empty() && RC == &X86::VR128RegClass &&
6502         Subtarget.isUnalignedMem16Slow())
6503       // Do not introduce a slow unaligned store.
6504       return false;
6505     // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6506     // memory access is slow above.
6507     unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6508     bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6509     SDNode *Store =
6510         DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
6511                            dl, MVT::Other, AddrOps);
6512     NewNodes.push_back(Store);
6513 
6514     // Preserve memory reference information.
6515     DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
6516   }
6517 
6518   return true;
6519 }
6520 
6521 unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
6522                                       bool UnfoldLoad, bool UnfoldStore,
6523                                       unsigned *LoadRegIndex) const {
6524   const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc);
6525   if (I == nullptr)
6526     return 0;
6527   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6528   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6529   if (UnfoldLoad && !FoldedLoad)
6530     return 0;
6531   if (UnfoldStore && !FoldedStore)
6532     return 0;
6533   if (LoadRegIndex)
6534     *LoadRegIndex = I->Flags & TB_INDEX_MASK;
6535   return I->DstOp;
6536 }
6537 
6538 bool
6539 X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
6540                                      int64_t &Offset1, int64_t &Offset2) const {
6541   if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
6542     return false;
6543   unsigned Opc1 = Load1->getMachineOpcode();
6544   unsigned Opc2 = Load2->getMachineOpcode();
6545   switch (Opc1) {
6546   default: return false;
6547   case X86::MOV8rm:
6548   case X86::MOV16rm:
6549   case X86::MOV32rm:
6550   case X86::MOV64rm:
6551   case X86::LD_Fp32m:
6552   case X86::LD_Fp64m:
6553   case X86::LD_Fp80m:
6554   case X86::MOVSSrm:
6555   case X86::MOVSSrm_alt:
6556   case X86::MOVSDrm:
6557   case X86::MOVSDrm_alt:
6558   case X86::MMX_MOVD64rm:
6559   case X86::MMX_MOVQ64rm:
6560   case X86::MOVAPSrm:
6561   case X86::MOVUPSrm:
6562   case X86::MOVAPDrm:
6563   case X86::MOVUPDrm:
6564   case X86::MOVDQArm:
6565   case X86::MOVDQUrm:
6566   // AVX load instructions
6567   case X86::VMOVSSrm:
6568   case X86::VMOVSSrm_alt:
6569   case X86::VMOVSDrm:
6570   case X86::VMOVSDrm_alt:
6571   case X86::VMOVAPSrm:
6572   case X86::VMOVUPSrm:
6573   case X86::VMOVAPDrm:
6574   case X86::VMOVUPDrm:
6575   case X86::VMOVDQArm:
6576   case X86::VMOVDQUrm:
6577   case X86::VMOVAPSYrm:
6578   case X86::VMOVUPSYrm:
6579   case X86::VMOVAPDYrm:
6580   case X86::VMOVUPDYrm:
6581   case X86::VMOVDQAYrm:
6582   case X86::VMOVDQUYrm:
6583   // AVX512 load instructions
6584   case X86::VMOVSSZrm:
6585   case X86::VMOVSSZrm_alt:
6586   case X86::VMOVSDZrm:
6587   case X86::VMOVSDZrm_alt:
6588   case X86::VMOVAPSZ128rm:
6589   case X86::VMOVUPSZ128rm:
6590   case X86::VMOVAPSZ128rm_NOVLX:
6591   case X86::VMOVUPSZ128rm_NOVLX:
6592   case X86::VMOVAPDZ128rm:
6593   case X86::VMOVUPDZ128rm:
6594   case X86::VMOVDQU8Z128rm:
6595   case X86::VMOVDQU16Z128rm:
6596   case X86::VMOVDQA32Z128rm:
6597   case X86::VMOVDQU32Z128rm:
6598   case X86::VMOVDQA64Z128rm:
6599   case X86::VMOVDQU64Z128rm:
6600   case X86::VMOVAPSZ256rm:
6601   case X86::VMOVUPSZ256rm:
6602   case X86::VMOVAPSZ256rm_NOVLX:
6603   case X86::VMOVUPSZ256rm_NOVLX:
6604   case X86::VMOVAPDZ256rm:
6605   case X86::VMOVUPDZ256rm:
6606   case X86::VMOVDQU8Z256rm:
6607   case X86::VMOVDQU16Z256rm:
6608   case X86::VMOVDQA32Z256rm:
6609   case X86::VMOVDQU32Z256rm:
6610   case X86::VMOVDQA64Z256rm:
6611   case X86::VMOVDQU64Z256rm:
6612   case X86::VMOVAPSZrm:
6613   case X86::VMOVUPSZrm:
6614   case X86::VMOVAPDZrm:
6615   case X86::VMOVUPDZrm:
6616   case X86::VMOVDQU8Zrm:
6617   case X86::VMOVDQU16Zrm:
6618   case X86::VMOVDQA32Zrm:
6619   case X86::VMOVDQU32Zrm:
6620   case X86::VMOVDQA64Zrm:
6621   case X86::VMOVDQU64Zrm:
6622   case X86::KMOVBkm:
6623   case X86::KMOVWkm:
6624   case X86::KMOVDkm:
6625   case X86::KMOVQkm:
6626     break;
6627   }
6628   switch (Opc2) {
6629   default: return false;
6630   case X86::MOV8rm:
6631   case X86::MOV16rm:
6632   case X86::MOV32rm:
6633   case X86::MOV64rm:
6634   case X86::LD_Fp32m:
6635   case X86::LD_Fp64m:
6636   case X86::LD_Fp80m:
6637   case X86::MOVSSrm:
6638   case X86::MOVSSrm_alt:
6639   case X86::MOVSDrm:
6640   case X86::MOVSDrm_alt:
6641   case X86::MMX_MOVD64rm:
6642   case X86::MMX_MOVQ64rm:
6643   case X86::MOVAPSrm:
6644   case X86::MOVUPSrm:
6645   case X86::MOVAPDrm:
6646   case X86::MOVUPDrm:
6647   case X86::MOVDQArm:
6648   case X86::MOVDQUrm:
6649   // AVX load instructions
6650   case X86::VMOVSSrm:
6651   case X86::VMOVSSrm_alt:
6652   case X86::VMOVSDrm:
6653   case X86::VMOVSDrm_alt:
6654   case X86::VMOVAPSrm:
6655   case X86::VMOVUPSrm:
6656   case X86::VMOVAPDrm:
6657   case X86::VMOVUPDrm:
6658   case X86::VMOVDQArm:
6659   case X86::VMOVDQUrm:
6660   case X86::VMOVAPSYrm:
6661   case X86::VMOVUPSYrm:
6662   case X86::VMOVAPDYrm:
6663   case X86::VMOVUPDYrm:
6664   case X86::VMOVDQAYrm:
6665   case X86::VMOVDQUYrm:
6666   // AVX512 load instructions
6667   case X86::VMOVSSZrm:
6668   case X86::VMOVSSZrm_alt:
6669   case X86::VMOVSDZrm:
6670   case X86::VMOVSDZrm_alt:
6671   case X86::VMOVAPSZ128rm:
6672   case X86::VMOVUPSZ128rm:
6673   case X86::VMOVAPSZ128rm_NOVLX:
6674   case X86::VMOVUPSZ128rm_NOVLX:
6675   case X86::VMOVAPDZ128rm:
6676   case X86::VMOVUPDZ128rm:
6677   case X86::VMOVDQU8Z128rm:
6678   case X86::VMOVDQU16Z128rm:
6679   case X86::VMOVDQA32Z128rm:
6680   case X86::VMOVDQU32Z128rm:
6681   case X86::VMOVDQA64Z128rm:
6682   case X86::VMOVDQU64Z128rm:
6683   case X86::VMOVAPSZ256rm:
6684   case X86::VMOVUPSZ256rm:
6685   case X86::VMOVAPSZ256rm_NOVLX:
6686   case X86::VMOVUPSZ256rm_NOVLX:
6687   case X86::VMOVAPDZ256rm:
6688   case X86::VMOVUPDZ256rm:
6689   case X86::VMOVDQU8Z256rm:
6690   case X86::VMOVDQU16Z256rm:
6691   case X86::VMOVDQA32Z256rm:
6692   case X86::VMOVDQU32Z256rm:
6693   case X86::VMOVDQA64Z256rm:
6694   case X86::VMOVDQU64Z256rm:
6695   case X86::VMOVAPSZrm:
6696   case X86::VMOVUPSZrm:
6697   case X86::VMOVAPDZrm:
6698   case X86::VMOVUPDZrm:
6699   case X86::VMOVDQU8Zrm:
6700   case X86::VMOVDQU16Zrm:
6701   case X86::VMOVDQA32Zrm:
6702   case X86::VMOVDQU32Zrm:
6703   case X86::VMOVDQA64Zrm:
6704   case X86::VMOVDQU64Zrm:
6705   case X86::KMOVBkm:
6706   case X86::KMOVWkm:
6707   case X86::KMOVDkm:
6708   case X86::KMOVQkm:
6709     break;
6710   }
6711 
6712   // Lambda to check if both the loads have the same value for an operand index.
6713   auto HasSameOp = [&](int I) {
6714     return Load1->getOperand(I) == Load2->getOperand(I);
6715   };
6716 
6717   // All operands except the displacement should match.
6718   if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
6719       !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
6720     return false;
6721 
6722   // Chain Operand must be the same.
6723   if (!HasSameOp(5))
6724     return false;
6725 
6726   // Now let's examine if the displacements are constants.
6727   auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
6728   auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
6729   if (!Disp1 || !Disp2)
6730     return false;
6731 
6732   Offset1 = Disp1->getSExtValue();
6733   Offset2 = Disp2->getSExtValue();
6734   return true;
6735 }
6736 
6737 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
6738                                            int64_t Offset1, int64_t Offset2,
6739                                            unsigned NumLoads) const {
6740   assert(Offset2 > Offset1);
6741   if ((Offset2 - Offset1) / 8 > 64)
6742     return false;
6743 
6744   unsigned Opc1 = Load1->getMachineOpcode();
6745   unsigned Opc2 = Load2->getMachineOpcode();
6746   if (Opc1 != Opc2)
6747     return false;  // FIXME: overly conservative?
6748 
6749   switch (Opc1) {
6750   default: break;
6751   case X86::LD_Fp32m:
6752   case X86::LD_Fp64m:
6753   case X86::LD_Fp80m:
6754   case X86::MMX_MOVD64rm:
6755   case X86::MMX_MOVQ64rm:
6756     return false;
6757   }
6758 
6759   EVT VT = Load1->getValueType(0);
6760   switch (VT.getSimpleVT().SimpleTy) {
6761   default:
6762     // XMM registers. In 64-bit mode we can be a bit more aggressive since we
6763     // have 16 of them to play with.
6764     if (Subtarget.is64Bit()) {
6765       if (NumLoads >= 3)
6766         return false;
6767     } else if (NumLoads) {
6768       return false;
6769     }
6770     break;
6771   case MVT::i8:
6772   case MVT::i16:
6773   case MVT::i32:
6774   case MVT::i64:
6775   case MVT::f32:
6776   case MVT::f64:
6777     if (NumLoads)
6778       return false;
6779     break;
6780   }
6781 
6782   return true;
6783 }
6784 
6785 bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
6786                                         const MachineBasicBlock *MBB,
6787                                         const MachineFunction &MF) const {
6788 
6789   // ENDBR instructions should not be scheduled around.
6790   unsigned Opcode = MI.getOpcode();
6791   if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32)
6792     return true;
6793 
6794   return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF);
6795 }
6796 
6797 bool X86InstrInfo::
6798 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
6799   assert(Cond.size() == 1 && "Invalid X86 branch condition!");
6800   X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
6801   Cond[0].setImm(GetOppositeBranchCondition(CC));
6802   return false;
6803 }
6804 
6805 bool X86InstrInfo::
6806 isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
6807   // FIXME: Return false for x87 stack register classes for now. We can't
6808   // allow any loads of these registers before FpGet_ST0_80.
6809   return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
6810            RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
6811            RC == &X86::RFP80RegClass);
6812 }
6813 
6814 /// Return a virtual register initialized with the
6815 /// the global base register value. Output instructions required to
6816 /// initialize the register in the function entry block, if necessary.
6817 ///
6818 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
6819 ///
6820 unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
6821   assert((!Subtarget.is64Bit() ||
6822           MF->getTarget().getCodeModel() == CodeModel::Medium ||
6823           MF->getTarget().getCodeModel() == CodeModel::Large) &&
6824          "X86-64 PIC uses RIP relative addressing");
6825 
6826   X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
6827   Register GlobalBaseReg = X86FI->getGlobalBaseReg();
6828   if (GlobalBaseReg != 0)
6829     return GlobalBaseReg;
6830 
6831   // Create the register. The code to initialize it is inserted
6832   // later, by the CGBR pass (below).
6833   MachineRegisterInfo &RegInfo = MF->getRegInfo();
6834   GlobalBaseReg = RegInfo.createVirtualRegister(
6835       Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
6836   X86FI->setGlobalBaseReg(GlobalBaseReg);
6837   return GlobalBaseReg;
6838 }
6839 
6840 // These are the replaceable SSE instructions. Some of these have Int variants
6841 // that we don't include here. We don't want to replace instructions selected
6842 // by intrinsics.
6843 static const uint16_t ReplaceableInstrs[][3] = {
6844   //PackedSingle     PackedDouble    PackedInt
6845   { X86::MOVAPSmr,   X86::MOVAPDmr,  X86::MOVDQAmr  },
6846   { X86::MOVAPSrm,   X86::MOVAPDrm,  X86::MOVDQArm  },
6847   { X86::MOVAPSrr,   X86::MOVAPDrr,  X86::MOVDQArr  },
6848   { X86::MOVUPSmr,   X86::MOVUPDmr,  X86::MOVDQUmr  },
6849   { X86::MOVUPSrm,   X86::MOVUPDrm,  X86::MOVDQUrm  },
6850   { X86::MOVLPSmr,   X86::MOVLPDmr,  X86::MOVPQI2QImr },
6851   { X86::MOVSDmr,    X86::MOVSDmr,   X86::MOVPQI2QImr },
6852   { X86::MOVSSmr,    X86::MOVSSmr,   X86::MOVPDI2DImr },
6853   { X86::MOVSDrm,    X86::MOVSDrm,   X86::MOVQI2PQIrm },
6854   { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm },
6855   { X86::MOVSSrm,    X86::MOVSSrm,   X86::MOVDI2PDIrm },
6856   { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm },
6857   { X86::MOVNTPSmr,  X86::MOVNTPDmr, X86::MOVNTDQmr },
6858   { X86::ANDNPSrm,   X86::ANDNPDrm,  X86::PANDNrm   },
6859   { X86::ANDNPSrr,   X86::ANDNPDrr,  X86::PANDNrr   },
6860   { X86::ANDPSrm,    X86::ANDPDrm,   X86::PANDrm    },
6861   { X86::ANDPSrr,    X86::ANDPDrr,   X86::PANDrr    },
6862   { X86::ORPSrm,     X86::ORPDrm,    X86::PORrm     },
6863   { X86::ORPSrr,     X86::ORPDrr,    X86::PORrr     },
6864   { X86::XORPSrm,    X86::XORPDrm,   X86::PXORrm    },
6865   { X86::XORPSrr,    X86::XORPDrr,   X86::PXORrr    },
6866   { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm },
6867   { X86::MOVLHPSrr,  X86::UNPCKLPDrr, X86::PUNPCKLQDQrr },
6868   { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm },
6869   { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr },
6870   { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm },
6871   { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr },
6872   { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm },
6873   { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr },
6874   { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr },
6875   { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr },
6876   // AVX 128-bit support
6877   { X86::VMOVAPSmr,  X86::VMOVAPDmr,  X86::VMOVDQAmr  },
6878   { X86::VMOVAPSrm,  X86::VMOVAPDrm,  X86::VMOVDQArm  },
6879   { X86::VMOVAPSrr,  X86::VMOVAPDrr,  X86::VMOVDQArr  },
6880   { X86::VMOVUPSmr,  X86::VMOVUPDmr,  X86::VMOVDQUmr  },
6881   { X86::VMOVUPSrm,  X86::VMOVUPDrm,  X86::VMOVDQUrm  },
6882   { X86::VMOVLPSmr,  X86::VMOVLPDmr,  X86::VMOVPQI2QImr },
6883   { X86::VMOVSDmr,   X86::VMOVSDmr,   X86::VMOVPQI2QImr },
6884   { X86::VMOVSSmr,   X86::VMOVSSmr,   X86::VMOVPDI2DImr },
6885   { X86::VMOVSDrm,   X86::VMOVSDrm,   X86::VMOVQI2PQIrm },
6886   { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm },
6887   { X86::VMOVSSrm,   X86::VMOVSSrm,   X86::VMOVDI2PDIrm },
6888   { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm },
6889   { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
6890   { X86::VANDNPSrm,  X86::VANDNPDrm,  X86::VPANDNrm   },
6891   { X86::VANDNPSrr,  X86::VANDNPDrr,  X86::VPANDNrr   },
6892   { X86::VANDPSrm,   X86::VANDPDrm,   X86::VPANDrm    },
6893   { X86::VANDPSrr,   X86::VANDPDrr,   X86::VPANDrr    },
6894   { X86::VORPSrm,    X86::VORPDrm,    X86::VPORrm     },
6895   { X86::VORPSrr,    X86::VORPDrr,    X86::VPORrr     },
6896   { X86::VXORPSrm,   X86::VXORPDrm,   X86::VPXORrm    },
6897   { X86::VXORPSrr,   X86::VXORPDrr,   X86::VPXORrr    },
6898   { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm },
6899   { X86::VMOVLHPSrr,  X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr },
6900   { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm },
6901   { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr },
6902   { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm },
6903   { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr },
6904   { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm },
6905   { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr },
6906   { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr },
6907   { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr },
6908   // AVX 256-bit support
6909   { X86::VMOVAPSYmr,   X86::VMOVAPDYmr,   X86::VMOVDQAYmr  },
6910   { X86::VMOVAPSYrm,   X86::VMOVAPDYrm,   X86::VMOVDQAYrm  },
6911   { X86::VMOVAPSYrr,   X86::VMOVAPDYrr,   X86::VMOVDQAYrr  },
6912   { X86::VMOVUPSYmr,   X86::VMOVUPDYmr,   X86::VMOVDQUYmr  },
6913   { X86::VMOVUPSYrm,   X86::VMOVUPDYrm,   X86::VMOVDQUYrm  },
6914   { X86::VMOVNTPSYmr,  X86::VMOVNTPDYmr,  X86::VMOVNTDQYmr },
6915   { X86::VPERMPSYrm,   X86::VPERMPSYrm,   X86::VPERMDYrm },
6916   { X86::VPERMPSYrr,   X86::VPERMPSYrr,   X86::VPERMDYrr },
6917   { X86::VPERMPDYmi,   X86::VPERMPDYmi,   X86::VPERMQYmi },
6918   { X86::VPERMPDYri,   X86::VPERMPDYri,   X86::VPERMQYri },
6919   // AVX512 support
6920   { X86::VMOVLPSZ128mr,  X86::VMOVLPDZ128mr,  X86::VMOVPQI2QIZmr  },
6921   { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr },
6922   { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr },
6923   { X86::VMOVNTPSZmr,    X86::VMOVNTPDZmr,    X86::VMOVNTDQZmr    },
6924   { X86::VMOVSDZmr,      X86::VMOVSDZmr,      X86::VMOVPQI2QIZmr  },
6925   { X86::VMOVSSZmr,      X86::VMOVSSZmr,      X86::VMOVPDI2DIZmr  },
6926   { X86::VMOVSDZrm,      X86::VMOVSDZrm,      X86::VMOVQI2PQIZrm  },
6927   { X86::VMOVSDZrm_alt,  X86::VMOVSDZrm_alt,  X86::VMOVQI2PQIZrm  },
6928   { X86::VMOVSSZrm,      X86::VMOVSSZrm,      X86::VMOVDI2PDIZrm  },
6929   { X86::VMOVSSZrm_alt,  X86::VMOVSSZrm_alt,  X86::VMOVDI2PDIZrm  },
6930   { X86::VBROADCASTSSZ128rr,X86::VBROADCASTSSZ128rr,X86::VPBROADCASTDZ128rr },
6931   { X86::VBROADCASTSSZ128rm,X86::VBROADCASTSSZ128rm,X86::VPBROADCASTDZ128rm },
6932   { X86::VBROADCASTSSZ256rr,X86::VBROADCASTSSZ256rr,X86::VPBROADCASTDZ256rr },
6933   { X86::VBROADCASTSSZ256rm,X86::VBROADCASTSSZ256rm,X86::VPBROADCASTDZ256rm },
6934   { X86::VBROADCASTSSZrr,   X86::VBROADCASTSSZrr,   X86::VPBROADCASTDZrr },
6935   { X86::VBROADCASTSSZrm,   X86::VBROADCASTSSZrm,   X86::VPBROADCASTDZrm },
6936   { X86::VMOVDDUPZ128rr,    X86::VMOVDDUPZ128rr,    X86::VPBROADCASTQZ128rr },
6937   { X86::VMOVDDUPZ128rm,    X86::VMOVDDUPZ128rm,    X86::VPBROADCASTQZ128rm },
6938   { X86::VBROADCASTSDZ256rr,X86::VBROADCASTSDZ256rr,X86::VPBROADCASTQZ256rr },
6939   { X86::VBROADCASTSDZ256rm,X86::VBROADCASTSDZ256rm,X86::VPBROADCASTQZ256rm },
6940   { X86::VBROADCASTSDZrr,   X86::VBROADCASTSDZrr,   X86::VPBROADCASTQZrr },
6941   { X86::VBROADCASTSDZrm,   X86::VBROADCASTSDZrm,   X86::VPBROADCASTQZrm },
6942   { X86::VINSERTF32x4Zrr,   X86::VINSERTF32x4Zrr,   X86::VINSERTI32x4Zrr },
6943   { X86::VINSERTF32x4Zrm,   X86::VINSERTF32x4Zrm,   X86::VINSERTI32x4Zrm },
6944   { X86::VINSERTF32x8Zrr,   X86::VINSERTF32x8Zrr,   X86::VINSERTI32x8Zrr },
6945   { X86::VINSERTF32x8Zrm,   X86::VINSERTF32x8Zrm,   X86::VINSERTI32x8Zrm },
6946   { X86::VINSERTF64x2Zrr,   X86::VINSERTF64x2Zrr,   X86::VINSERTI64x2Zrr },
6947   { X86::VINSERTF64x2Zrm,   X86::VINSERTF64x2Zrm,   X86::VINSERTI64x2Zrm },
6948   { X86::VINSERTF64x4Zrr,   X86::VINSERTF64x4Zrr,   X86::VINSERTI64x4Zrr },
6949   { X86::VINSERTF64x4Zrm,   X86::VINSERTF64x4Zrm,   X86::VINSERTI64x4Zrm },
6950   { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr },
6951   { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm },
6952   { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr },
6953   { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm },
6954   { X86::VEXTRACTF32x4Zrr,   X86::VEXTRACTF32x4Zrr,   X86::VEXTRACTI32x4Zrr },
6955   { X86::VEXTRACTF32x4Zmr,   X86::VEXTRACTF32x4Zmr,   X86::VEXTRACTI32x4Zmr },
6956   { X86::VEXTRACTF32x8Zrr,   X86::VEXTRACTF32x8Zrr,   X86::VEXTRACTI32x8Zrr },
6957   { X86::VEXTRACTF32x8Zmr,   X86::VEXTRACTF32x8Zmr,   X86::VEXTRACTI32x8Zmr },
6958   { X86::VEXTRACTF64x2Zrr,   X86::VEXTRACTF64x2Zrr,   X86::VEXTRACTI64x2Zrr },
6959   { X86::VEXTRACTF64x2Zmr,   X86::VEXTRACTF64x2Zmr,   X86::VEXTRACTI64x2Zmr },
6960   { X86::VEXTRACTF64x4Zrr,   X86::VEXTRACTF64x4Zrr,   X86::VEXTRACTI64x4Zrr },
6961   { X86::VEXTRACTF64x4Zmr,   X86::VEXTRACTF64x4Zmr,   X86::VEXTRACTI64x4Zmr },
6962   { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr },
6963   { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr },
6964   { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr },
6965   { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr },
6966   { X86::VPERMILPSmi,        X86::VPERMILPSmi,        X86::VPSHUFDmi },
6967   { X86::VPERMILPSri,        X86::VPERMILPSri,        X86::VPSHUFDri },
6968   { X86::VPERMILPSZ128mi,    X86::VPERMILPSZ128mi,    X86::VPSHUFDZ128mi },
6969   { X86::VPERMILPSZ128ri,    X86::VPERMILPSZ128ri,    X86::VPSHUFDZ128ri },
6970   { X86::VPERMILPSZ256mi,    X86::VPERMILPSZ256mi,    X86::VPSHUFDZ256mi },
6971   { X86::VPERMILPSZ256ri,    X86::VPERMILPSZ256ri,    X86::VPSHUFDZ256ri },
6972   { X86::VPERMILPSZmi,       X86::VPERMILPSZmi,       X86::VPSHUFDZmi },
6973   { X86::VPERMILPSZri,       X86::VPERMILPSZri,       X86::VPSHUFDZri },
6974   { X86::VPERMPSZ256rm,      X86::VPERMPSZ256rm,      X86::VPERMDZ256rm },
6975   { X86::VPERMPSZ256rr,      X86::VPERMPSZ256rr,      X86::VPERMDZ256rr },
6976   { X86::VPERMPDZ256mi,      X86::VPERMPDZ256mi,      X86::VPERMQZ256mi },
6977   { X86::VPERMPDZ256ri,      X86::VPERMPDZ256ri,      X86::VPERMQZ256ri },
6978   { X86::VPERMPDZ256rm,      X86::VPERMPDZ256rm,      X86::VPERMQZ256rm },
6979   { X86::VPERMPDZ256rr,      X86::VPERMPDZ256rr,      X86::VPERMQZ256rr },
6980   { X86::VPERMPSZrm,         X86::VPERMPSZrm,         X86::VPERMDZrm },
6981   { X86::VPERMPSZrr,         X86::VPERMPSZrr,         X86::VPERMDZrr },
6982   { X86::VPERMPDZmi,         X86::VPERMPDZmi,         X86::VPERMQZmi },
6983   { X86::VPERMPDZri,         X86::VPERMPDZri,         X86::VPERMQZri },
6984   { X86::VPERMPDZrm,         X86::VPERMPDZrm,         X86::VPERMQZrm },
6985   { X86::VPERMPDZrr,         X86::VPERMPDZrr,         X86::VPERMQZrr },
6986   { X86::VUNPCKLPDZ256rm,    X86::VUNPCKLPDZ256rm,    X86::VPUNPCKLQDQZ256rm },
6987   { X86::VUNPCKLPDZ256rr,    X86::VUNPCKLPDZ256rr,    X86::VPUNPCKLQDQZ256rr },
6988   { X86::VUNPCKHPDZ256rm,    X86::VUNPCKHPDZ256rm,    X86::VPUNPCKHQDQZ256rm },
6989   { X86::VUNPCKHPDZ256rr,    X86::VUNPCKHPDZ256rr,    X86::VPUNPCKHQDQZ256rr },
6990   { X86::VUNPCKLPSZ256rm,    X86::VUNPCKLPSZ256rm,    X86::VPUNPCKLDQZ256rm },
6991   { X86::VUNPCKLPSZ256rr,    X86::VUNPCKLPSZ256rr,    X86::VPUNPCKLDQZ256rr },
6992   { X86::VUNPCKHPSZ256rm,    X86::VUNPCKHPSZ256rm,    X86::VPUNPCKHDQZ256rm },
6993   { X86::VUNPCKHPSZ256rr,    X86::VUNPCKHPSZ256rr,    X86::VPUNPCKHDQZ256rr },
6994   { X86::VUNPCKLPDZ128rm,    X86::VUNPCKLPDZ128rm,    X86::VPUNPCKLQDQZ128rm },
6995   { X86::VMOVLHPSZrr,        X86::VUNPCKLPDZ128rr,    X86::VPUNPCKLQDQZ128rr },
6996   { X86::VUNPCKHPDZ128rm,    X86::VUNPCKHPDZ128rm,    X86::VPUNPCKHQDQZ128rm },
6997   { X86::VUNPCKHPDZ128rr,    X86::VUNPCKHPDZ128rr,    X86::VPUNPCKHQDQZ128rr },
6998   { X86::VUNPCKLPSZ128rm,    X86::VUNPCKLPSZ128rm,    X86::VPUNPCKLDQZ128rm },
6999   { X86::VUNPCKLPSZ128rr,    X86::VUNPCKLPSZ128rr,    X86::VPUNPCKLDQZ128rr },
7000   { X86::VUNPCKHPSZ128rm,    X86::VUNPCKHPSZ128rm,    X86::VPUNPCKHDQZ128rm },
7001   { X86::VUNPCKHPSZ128rr,    X86::VUNPCKHPSZ128rr,    X86::VPUNPCKHDQZ128rr },
7002   { X86::VUNPCKLPDZrm,       X86::VUNPCKLPDZrm,       X86::VPUNPCKLQDQZrm },
7003   { X86::VUNPCKLPDZrr,       X86::VUNPCKLPDZrr,       X86::VPUNPCKLQDQZrr },
7004   { X86::VUNPCKHPDZrm,       X86::VUNPCKHPDZrm,       X86::VPUNPCKHQDQZrm },
7005   { X86::VUNPCKHPDZrr,       X86::VUNPCKHPDZrr,       X86::VPUNPCKHQDQZrr },
7006   { X86::VUNPCKLPSZrm,       X86::VUNPCKLPSZrm,       X86::VPUNPCKLDQZrm },
7007   { X86::VUNPCKLPSZrr,       X86::VUNPCKLPSZrr,       X86::VPUNPCKLDQZrr },
7008   { X86::VUNPCKHPSZrm,       X86::VUNPCKHPSZrm,       X86::VPUNPCKHDQZrm },
7009   { X86::VUNPCKHPSZrr,       X86::VUNPCKHPSZrr,       X86::VPUNPCKHDQZrr },
7010   { X86::VEXTRACTPSZmr,      X86::VEXTRACTPSZmr,      X86::VPEXTRDZmr },
7011   { X86::VEXTRACTPSZrr,      X86::VEXTRACTPSZrr,      X86::VPEXTRDZrr },
7012 };
7013 
7014 static const uint16_t ReplaceableInstrsAVX2[][3] = {
7015   //PackedSingle       PackedDouble       PackedInt
7016   { X86::VANDNPSYrm,   X86::VANDNPDYrm,   X86::VPANDNYrm   },
7017   { X86::VANDNPSYrr,   X86::VANDNPDYrr,   X86::VPANDNYrr   },
7018   { X86::VANDPSYrm,    X86::VANDPDYrm,    X86::VPANDYrm    },
7019   { X86::VANDPSYrr,    X86::VANDPDYrr,    X86::VPANDYrr    },
7020   { X86::VORPSYrm,     X86::VORPDYrm,     X86::VPORYrm     },
7021   { X86::VORPSYrr,     X86::VORPDYrr,     X86::VPORYrr     },
7022   { X86::VXORPSYrm,    X86::VXORPDYrm,    X86::VPXORYrm    },
7023   { X86::VXORPSYrr,    X86::VXORPDYrr,    X86::VPXORYrr    },
7024   { X86::VPERM2F128rm,   X86::VPERM2F128rm,   X86::VPERM2I128rm },
7025   { X86::VPERM2F128rr,   X86::VPERM2F128rr,   X86::VPERM2I128rr },
7026   { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
7027   { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
7028   { X86::VMOVDDUPrm,     X86::VMOVDDUPrm,     X86::VPBROADCASTQrm},
7029   { X86::VMOVDDUPrr,     X86::VMOVDDUPrr,     X86::VPBROADCASTQrr},
7030   { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
7031   { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
7032   { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
7033   { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm},
7034   { X86::VBROADCASTF128,  X86::VBROADCASTF128,  X86::VBROADCASTI128 },
7035   { X86::VBLENDPSYrri,    X86::VBLENDPSYrri,    X86::VPBLENDDYrri },
7036   { X86::VBLENDPSYrmi,    X86::VBLENDPSYrmi,    X86::VPBLENDDYrmi },
7037   { X86::VPERMILPSYmi,    X86::VPERMILPSYmi,    X86::VPSHUFDYmi },
7038   { X86::VPERMILPSYri,    X86::VPERMILPSYri,    X86::VPSHUFDYri },
7039   { X86::VUNPCKLPDYrm,    X86::VUNPCKLPDYrm,    X86::VPUNPCKLQDQYrm },
7040   { X86::VUNPCKLPDYrr,    X86::VUNPCKLPDYrr,    X86::VPUNPCKLQDQYrr },
7041   { X86::VUNPCKHPDYrm,    X86::VUNPCKHPDYrm,    X86::VPUNPCKHQDQYrm },
7042   { X86::VUNPCKHPDYrr,    X86::VUNPCKHPDYrr,    X86::VPUNPCKHQDQYrr },
7043   { X86::VUNPCKLPSYrm,    X86::VUNPCKLPSYrm,    X86::VPUNPCKLDQYrm },
7044   { X86::VUNPCKLPSYrr,    X86::VUNPCKLPSYrr,    X86::VPUNPCKLDQYrr },
7045   { X86::VUNPCKHPSYrm,    X86::VUNPCKHPSYrm,    X86::VPUNPCKHDQYrm },
7046   { X86::VUNPCKHPSYrr,    X86::VUNPCKHPSYrr,    X86::VPUNPCKHDQYrr },
7047 };
7048 
7049 static const uint16_t ReplaceableInstrsFP[][3] = {
7050   //PackedSingle         PackedDouble
7051   { X86::MOVLPSrm,       X86::MOVLPDrm,      X86::INSTRUCTION_LIST_END },
7052   { X86::MOVHPSrm,       X86::MOVHPDrm,      X86::INSTRUCTION_LIST_END },
7053   { X86::MOVHPSmr,       X86::MOVHPDmr,      X86::INSTRUCTION_LIST_END },
7054   { X86::VMOVLPSrm,      X86::VMOVLPDrm,     X86::INSTRUCTION_LIST_END },
7055   { X86::VMOVHPSrm,      X86::VMOVHPDrm,     X86::INSTRUCTION_LIST_END },
7056   { X86::VMOVHPSmr,      X86::VMOVHPDmr,     X86::INSTRUCTION_LIST_END },
7057   { X86::VMOVLPSZ128rm,  X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END },
7058   { X86::VMOVHPSZ128rm,  X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END },
7059   { X86::VMOVHPSZ128mr,  X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END },
7060 };
7061 
7062 static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
7063   //PackedSingle       PackedDouble       PackedInt
7064   { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
7065   { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
7066   { X86::VINSERTF128rm,  X86::VINSERTF128rm,  X86::VINSERTI128rm },
7067   { X86::VINSERTF128rr,  X86::VINSERTF128rr,  X86::VINSERTI128rr },
7068 };
7069 
7070 static const uint16_t ReplaceableInstrsAVX512[][4] = {
7071   // Two integer columns for 64-bit and 32-bit elements.
7072   //PackedSingle        PackedDouble        PackedInt             PackedInt
7073   { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr  },
7074   { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm  },
7075   { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr  },
7076   { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr  },
7077   { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm  },
7078   { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr  },
7079   { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm  },
7080   { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr  },
7081   { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr  },
7082   { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm  },
7083   { X86::VMOVAPSZmr,    X86::VMOVAPDZmr,    X86::VMOVDQA64Zmr,    X86::VMOVDQA32Zmr     },
7084   { X86::VMOVAPSZrm,    X86::VMOVAPDZrm,    X86::VMOVDQA64Zrm,    X86::VMOVDQA32Zrm     },
7085   { X86::VMOVAPSZrr,    X86::VMOVAPDZrr,    X86::VMOVDQA64Zrr,    X86::VMOVDQA32Zrr     },
7086   { X86::VMOVUPSZmr,    X86::VMOVUPDZmr,    X86::VMOVDQU64Zmr,    X86::VMOVDQU32Zmr     },
7087   { X86::VMOVUPSZrm,    X86::VMOVUPDZrm,    X86::VMOVDQU64Zrm,    X86::VMOVDQU32Zrm     },
7088 };
7089 
7090 static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
7091   // Two integer columns for 64-bit and 32-bit elements.
7092   //PackedSingle        PackedDouble        PackedInt           PackedInt
7093   { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
7094   { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
7095   { X86::VANDPSZ128rm,  X86::VANDPDZ128rm,  X86::VPANDQZ128rm,  X86::VPANDDZ128rm  },
7096   { X86::VANDPSZ128rr,  X86::VANDPDZ128rr,  X86::VPANDQZ128rr,  X86::VPANDDZ128rr  },
7097   { X86::VORPSZ128rm,   X86::VORPDZ128rm,   X86::VPORQZ128rm,   X86::VPORDZ128rm   },
7098   { X86::VORPSZ128rr,   X86::VORPDZ128rr,   X86::VPORQZ128rr,   X86::VPORDZ128rr   },
7099   { X86::VXORPSZ128rm,  X86::VXORPDZ128rm,  X86::VPXORQZ128rm,  X86::VPXORDZ128rm  },
7100   { X86::VXORPSZ128rr,  X86::VXORPDZ128rr,  X86::VPXORQZ128rr,  X86::VPXORDZ128rr  },
7101   { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
7102   { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
7103   { X86::VANDPSZ256rm,  X86::VANDPDZ256rm,  X86::VPANDQZ256rm,  X86::VPANDDZ256rm  },
7104   { X86::VANDPSZ256rr,  X86::VANDPDZ256rr,  X86::VPANDQZ256rr,  X86::VPANDDZ256rr  },
7105   { X86::VORPSZ256rm,   X86::VORPDZ256rm,   X86::VPORQZ256rm,   X86::VPORDZ256rm   },
7106   { X86::VORPSZ256rr,   X86::VORPDZ256rr,   X86::VPORQZ256rr,   X86::VPORDZ256rr   },
7107   { X86::VXORPSZ256rm,  X86::VXORPDZ256rm,  X86::VPXORQZ256rm,  X86::VPXORDZ256rm  },
7108   { X86::VXORPSZ256rr,  X86::VXORPDZ256rr,  X86::VPXORQZ256rr,  X86::VPXORDZ256rr  },
7109   { X86::VANDNPSZrm,    X86::VANDNPDZrm,    X86::VPANDNQZrm,    X86::VPANDNDZrm    },
7110   { X86::VANDNPSZrr,    X86::VANDNPDZrr,    X86::VPANDNQZrr,    X86::VPANDNDZrr    },
7111   { X86::VANDPSZrm,     X86::VANDPDZrm,     X86::VPANDQZrm,     X86::VPANDDZrm     },
7112   { X86::VANDPSZrr,     X86::VANDPDZrr,     X86::VPANDQZrr,     X86::VPANDDZrr     },
7113   { X86::VORPSZrm,      X86::VORPDZrm,      X86::VPORQZrm,      X86::VPORDZrm      },
7114   { X86::VORPSZrr,      X86::VORPDZrr,      X86::VPORQZrr,      X86::VPORDZrr      },
7115   { X86::VXORPSZrm,     X86::VXORPDZrm,     X86::VPXORQZrm,     X86::VPXORDZrm     },
7116   { X86::VXORPSZrr,     X86::VXORPDZrr,     X86::VPXORQZrr,     X86::VPXORDZrr     },
7117 };
7118 
7119 static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = {
7120   // Two integer columns for 64-bit and 32-bit elements.
7121   //PackedSingle          PackedDouble
7122   //PackedInt             PackedInt
7123   { X86::VANDNPSZ128rmk,  X86::VANDNPDZ128rmk,
7124     X86::VPANDNQZ128rmk,  X86::VPANDNDZ128rmk  },
7125   { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz,
7126     X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz },
7127   { X86::VANDNPSZ128rrk,  X86::VANDNPDZ128rrk,
7128     X86::VPANDNQZ128rrk,  X86::VPANDNDZ128rrk  },
7129   { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz,
7130     X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz },
7131   { X86::VANDPSZ128rmk,   X86::VANDPDZ128rmk,
7132     X86::VPANDQZ128rmk,   X86::VPANDDZ128rmk   },
7133   { X86::VANDPSZ128rmkz,  X86::VANDPDZ128rmkz,
7134     X86::VPANDQZ128rmkz,  X86::VPANDDZ128rmkz  },
7135   { X86::VANDPSZ128rrk,   X86::VANDPDZ128rrk,
7136     X86::VPANDQZ128rrk,   X86::VPANDDZ128rrk   },
7137   { X86::VANDPSZ128rrkz,  X86::VANDPDZ128rrkz,
7138     X86::VPANDQZ128rrkz,  X86::VPANDDZ128rrkz  },
7139   { X86::VORPSZ128rmk,    X86::VORPDZ128rmk,
7140     X86::VPORQZ128rmk,    X86::VPORDZ128rmk    },
7141   { X86::VORPSZ128rmkz,   X86::VORPDZ128rmkz,
7142     X86::VPORQZ128rmkz,   X86::VPORDZ128rmkz   },
7143   { X86::VORPSZ128rrk,    X86::VORPDZ128rrk,
7144     X86::VPORQZ128rrk,    X86::VPORDZ128rrk    },
7145   { X86::VORPSZ128rrkz,   X86::VORPDZ128rrkz,
7146     X86::VPORQZ128rrkz,   X86::VPORDZ128rrkz   },
7147   { X86::VXORPSZ128rmk,   X86::VXORPDZ128rmk,
7148     X86::VPXORQZ128rmk,   X86::VPXORDZ128rmk   },
7149   { X86::VXORPSZ128rmkz,  X86::VXORPDZ128rmkz,
7150     X86::VPXORQZ128rmkz,  X86::VPXORDZ128rmkz  },
7151   { X86::VXORPSZ128rrk,   X86::VXORPDZ128rrk,
7152     X86::VPXORQZ128rrk,   X86::VPXORDZ128rrk   },
7153   { X86::VXORPSZ128rrkz,  X86::VXORPDZ128rrkz,
7154     X86::VPXORQZ128rrkz,  X86::VPXORDZ128rrkz  },
7155   { X86::VANDNPSZ256rmk,  X86::VANDNPDZ256rmk,
7156     X86::VPANDNQZ256rmk,  X86::VPANDNDZ256rmk  },
7157   { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz,
7158     X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz },
7159   { X86::VANDNPSZ256rrk,  X86::VANDNPDZ256rrk,
7160     X86::VPANDNQZ256rrk,  X86::VPANDNDZ256rrk  },
7161   { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz,
7162     X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz },
7163   { X86::VANDPSZ256rmk,   X86::VANDPDZ256rmk,
7164     X86::VPANDQZ256rmk,   X86::VPANDDZ256rmk   },
7165   { X86::VANDPSZ256rmkz,  X86::VANDPDZ256rmkz,
7166     X86::VPANDQZ256rmkz,  X86::VPANDDZ256rmkz  },
7167   { X86::VANDPSZ256rrk,   X86::VANDPDZ256rrk,
7168     X86::VPANDQZ256rrk,   X86::VPANDDZ256rrk   },
7169   { X86::VANDPSZ256rrkz,  X86::VANDPDZ256rrkz,
7170     X86::VPANDQZ256rrkz,  X86::VPANDDZ256rrkz  },
7171   { X86::VORPSZ256rmk,    X86::VORPDZ256rmk,
7172     X86::VPORQZ256rmk,    X86::VPORDZ256rmk    },
7173   { X86::VORPSZ256rmkz,   X86::VORPDZ256rmkz,
7174     X86::VPORQZ256rmkz,   X86::VPORDZ256rmkz   },
7175   { X86::VORPSZ256rrk,    X86::VORPDZ256rrk,
7176     X86::VPORQZ256rrk,    X86::VPORDZ256rrk    },
7177   { X86::VORPSZ256rrkz,   X86::VORPDZ256rrkz,
7178     X86::VPORQZ256rrkz,   X86::VPORDZ256rrkz   },
7179   { X86::VXORPSZ256rmk,   X86::VXORPDZ256rmk,
7180     X86::VPXORQZ256rmk,   X86::VPXORDZ256rmk   },
7181   { X86::VXORPSZ256rmkz,  X86::VXORPDZ256rmkz,
7182     X86::VPXORQZ256rmkz,  X86::VPXORDZ256rmkz  },
7183   { X86::VXORPSZ256rrk,   X86::VXORPDZ256rrk,
7184     X86::VPXORQZ256rrk,   X86::VPXORDZ256rrk   },
7185   { X86::VXORPSZ256rrkz,  X86::VXORPDZ256rrkz,
7186     X86::VPXORQZ256rrkz,  X86::VPXORDZ256rrkz  },
7187   { X86::VANDNPSZrmk,     X86::VANDNPDZrmk,
7188     X86::VPANDNQZrmk,     X86::VPANDNDZrmk     },
7189   { X86::VANDNPSZrmkz,    X86::VANDNPDZrmkz,
7190     X86::VPANDNQZrmkz,    X86::VPANDNDZrmkz    },
7191   { X86::VANDNPSZrrk,     X86::VANDNPDZrrk,
7192     X86::VPANDNQZrrk,     X86::VPANDNDZrrk     },
7193   { X86::VANDNPSZrrkz,    X86::VANDNPDZrrkz,
7194     X86::VPANDNQZrrkz,    X86::VPANDNDZrrkz    },
7195   { X86::VANDPSZrmk,      X86::VANDPDZrmk,
7196     X86::VPANDQZrmk,      X86::VPANDDZrmk      },
7197   { X86::VANDPSZrmkz,     X86::VANDPDZrmkz,
7198     X86::VPANDQZrmkz,     X86::VPANDDZrmkz     },
7199   { X86::VANDPSZrrk,      X86::VANDPDZrrk,
7200     X86::VPANDQZrrk,      X86::VPANDDZrrk      },
7201   { X86::VANDPSZrrkz,     X86::VANDPDZrrkz,
7202     X86::VPANDQZrrkz,     X86::VPANDDZrrkz     },
7203   { X86::VORPSZrmk,       X86::VORPDZrmk,
7204     X86::VPORQZrmk,       X86::VPORDZrmk       },
7205   { X86::VORPSZrmkz,      X86::VORPDZrmkz,
7206     X86::VPORQZrmkz,      X86::VPORDZrmkz      },
7207   { X86::VORPSZrrk,       X86::VORPDZrrk,
7208     X86::VPORQZrrk,       X86::VPORDZrrk       },
7209   { X86::VORPSZrrkz,      X86::VORPDZrrkz,
7210     X86::VPORQZrrkz,      X86::VPORDZrrkz      },
7211   { X86::VXORPSZrmk,      X86::VXORPDZrmk,
7212     X86::VPXORQZrmk,      X86::VPXORDZrmk      },
7213   { X86::VXORPSZrmkz,     X86::VXORPDZrmkz,
7214     X86::VPXORQZrmkz,     X86::VPXORDZrmkz     },
7215   { X86::VXORPSZrrk,      X86::VXORPDZrrk,
7216     X86::VPXORQZrrk,      X86::VPXORDZrrk      },
7217   { X86::VXORPSZrrkz,     X86::VXORPDZrrkz,
7218     X86::VPXORQZrrkz,     X86::VPXORDZrrkz     },
7219   // Broadcast loads can be handled the same as masked operations to avoid
7220   // changing element size.
7221   { X86::VANDNPSZ128rmb,  X86::VANDNPDZ128rmb,
7222     X86::VPANDNQZ128rmb,  X86::VPANDNDZ128rmb  },
7223   { X86::VANDPSZ128rmb,   X86::VANDPDZ128rmb,
7224     X86::VPANDQZ128rmb,   X86::VPANDDZ128rmb   },
7225   { X86::VORPSZ128rmb,    X86::VORPDZ128rmb,
7226     X86::VPORQZ128rmb,    X86::VPORDZ128rmb    },
7227   { X86::VXORPSZ128rmb,   X86::VXORPDZ128rmb,
7228     X86::VPXORQZ128rmb,   X86::VPXORDZ128rmb   },
7229   { X86::VANDNPSZ256rmb,  X86::VANDNPDZ256rmb,
7230     X86::VPANDNQZ256rmb,  X86::VPANDNDZ256rmb  },
7231   { X86::VANDPSZ256rmb,   X86::VANDPDZ256rmb,
7232     X86::VPANDQZ256rmb,   X86::VPANDDZ256rmb   },
7233   { X86::VORPSZ256rmb,    X86::VORPDZ256rmb,
7234     X86::VPORQZ256rmb,    X86::VPORDZ256rmb    },
7235   { X86::VXORPSZ256rmb,   X86::VXORPDZ256rmb,
7236     X86::VPXORQZ256rmb,   X86::VPXORDZ256rmb   },
7237   { X86::VANDNPSZrmb,     X86::VANDNPDZrmb,
7238     X86::VPANDNQZrmb,     X86::VPANDNDZrmb     },
7239   { X86::VANDPSZrmb,      X86::VANDPDZrmb,
7240     X86::VPANDQZrmb,      X86::VPANDDZrmb      },
7241   { X86::VANDPSZrmb,      X86::VANDPDZrmb,
7242     X86::VPANDQZrmb,      X86::VPANDDZrmb      },
7243   { X86::VORPSZrmb,       X86::VORPDZrmb,
7244     X86::VPORQZrmb,       X86::VPORDZrmb       },
7245   { X86::VXORPSZrmb,      X86::VXORPDZrmb,
7246     X86::VPXORQZrmb,      X86::VPXORDZrmb      },
7247   { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk,
7248     X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk },
7249   { X86::VANDPSZ128rmbk,  X86::VANDPDZ128rmbk,
7250     X86::VPANDQZ128rmbk,  X86::VPANDDZ128rmbk  },
7251   { X86::VORPSZ128rmbk,   X86::VORPDZ128rmbk,
7252     X86::VPORQZ128rmbk,   X86::VPORDZ128rmbk   },
7253   { X86::VXORPSZ128rmbk,  X86::VXORPDZ128rmbk,
7254     X86::VPXORQZ128rmbk,  X86::VPXORDZ128rmbk  },
7255   { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk,
7256     X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk },
7257   { X86::VANDPSZ256rmbk,  X86::VANDPDZ256rmbk,
7258     X86::VPANDQZ256rmbk,  X86::VPANDDZ256rmbk  },
7259   { X86::VORPSZ256rmbk,   X86::VORPDZ256rmbk,
7260     X86::VPORQZ256rmbk,   X86::VPORDZ256rmbk   },
7261   { X86::VXORPSZ256rmbk,  X86::VXORPDZ256rmbk,
7262     X86::VPXORQZ256rmbk,  X86::VPXORDZ256rmbk  },
7263   { X86::VANDNPSZrmbk,    X86::VANDNPDZrmbk,
7264     X86::VPANDNQZrmbk,    X86::VPANDNDZrmbk    },
7265   { X86::VANDPSZrmbk,     X86::VANDPDZrmbk,
7266     X86::VPANDQZrmbk,     X86::VPANDDZrmbk     },
7267   { X86::VANDPSZrmbk,     X86::VANDPDZrmbk,
7268     X86::VPANDQZrmbk,     X86::VPANDDZrmbk     },
7269   { X86::VORPSZrmbk,      X86::VORPDZrmbk,
7270     X86::VPORQZrmbk,      X86::VPORDZrmbk      },
7271   { X86::VXORPSZrmbk,     X86::VXORPDZrmbk,
7272     X86::VPXORQZrmbk,     X86::VPXORDZrmbk     },
7273   { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz,
7274     X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz},
7275   { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz,
7276     X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz },
7277   { X86::VORPSZ128rmbkz,  X86::VORPDZ128rmbkz,
7278     X86::VPORQZ128rmbkz,  X86::VPORDZ128rmbkz  },
7279   { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz,
7280     X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz },
7281   { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz,
7282     X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz},
7283   { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz,
7284     X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz },
7285   { X86::VORPSZ256rmbkz,  X86::VORPDZ256rmbkz,
7286     X86::VPORQZ256rmbkz,  X86::VPORDZ256rmbkz  },
7287   { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz,
7288     X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz },
7289   { X86::VANDNPSZrmbkz,   X86::VANDNPDZrmbkz,
7290     X86::VPANDNQZrmbkz,   X86::VPANDNDZrmbkz   },
7291   { X86::VANDPSZrmbkz,    X86::VANDPDZrmbkz,
7292     X86::VPANDQZrmbkz,    X86::VPANDDZrmbkz    },
7293   { X86::VANDPSZrmbkz,    X86::VANDPDZrmbkz,
7294     X86::VPANDQZrmbkz,    X86::VPANDDZrmbkz    },
7295   { X86::VORPSZrmbkz,     X86::VORPDZrmbkz,
7296     X86::VPORQZrmbkz,     X86::VPORDZrmbkz     },
7297   { X86::VXORPSZrmbkz,    X86::VXORPDZrmbkz,
7298     X86::VPXORQZrmbkz,    X86::VPXORDZrmbkz    },
7299 };
7300 
7301 // NOTE: These should only be used by the custom domain methods.
7302 static const uint16_t ReplaceableBlendInstrs[][3] = {
7303   //PackedSingle             PackedDouble             PackedInt
7304   { X86::BLENDPSrmi,         X86::BLENDPDrmi,         X86::PBLENDWrmi   },
7305   { X86::BLENDPSrri,         X86::BLENDPDrri,         X86::PBLENDWrri   },
7306   { X86::VBLENDPSrmi,        X86::VBLENDPDrmi,        X86::VPBLENDWrmi  },
7307   { X86::VBLENDPSrri,        X86::VBLENDPDrri,        X86::VPBLENDWrri  },
7308   { X86::VBLENDPSYrmi,       X86::VBLENDPDYrmi,       X86::VPBLENDWYrmi },
7309   { X86::VBLENDPSYrri,       X86::VBLENDPDYrri,       X86::VPBLENDWYrri },
7310 };
7311 static const uint16_t ReplaceableBlendAVX2Instrs[][3] = {
7312   //PackedSingle             PackedDouble             PackedInt
7313   { X86::VBLENDPSrmi,        X86::VBLENDPDrmi,        X86::VPBLENDDrmi  },
7314   { X86::VBLENDPSrri,        X86::VBLENDPDrri,        X86::VPBLENDDrri  },
7315   { X86::VBLENDPSYrmi,       X86::VBLENDPDYrmi,       X86::VPBLENDDYrmi },
7316   { X86::VBLENDPSYrri,       X86::VBLENDPDYrri,       X86::VPBLENDDYrri },
7317 };
7318 
7319 // Special table for changing EVEX logic instructions to VEX.
7320 // TODO: Should we run EVEX->VEX earlier?
7321 static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = {
7322   // Two integer columns for 64-bit and 32-bit elements.
7323   //PackedSingle     PackedDouble     PackedInt           PackedInt
7324   { X86::VANDNPSrm,  X86::VANDNPDrm,  X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
7325   { X86::VANDNPSrr,  X86::VANDNPDrr,  X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
7326   { X86::VANDPSrm,   X86::VANDPDrm,   X86::VPANDQZ128rm,  X86::VPANDDZ128rm  },
7327   { X86::VANDPSrr,   X86::VANDPDrr,   X86::VPANDQZ128rr,  X86::VPANDDZ128rr  },
7328   { X86::VORPSrm,    X86::VORPDrm,    X86::VPORQZ128rm,   X86::VPORDZ128rm   },
7329   { X86::VORPSrr,    X86::VORPDrr,    X86::VPORQZ128rr,   X86::VPORDZ128rr   },
7330   { X86::VXORPSrm,   X86::VXORPDrm,   X86::VPXORQZ128rm,  X86::VPXORDZ128rm  },
7331   { X86::VXORPSrr,   X86::VXORPDrr,   X86::VPXORQZ128rr,  X86::VPXORDZ128rr  },
7332   { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
7333   { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
7334   { X86::VANDPSYrm,  X86::VANDPDYrm,  X86::VPANDQZ256rm,  X86::VPANDDZ256rm  },
7335   { X86::VANDPSYrr,  X86::VANDPDYrr,  X86::VPANDQZ256rr,  X86::VPANDDZ256rr  },
7336   { X86::VORPSYrm,   X86::VORPDYrm,   X86::VPORQZ256rm,   X86::VPORDZ256rm   },
7337   { X86::VORPSYrr,   X86::VORPDYrr,   X86::VPORQZ256rr,   X86::VPORDZ256rr   },
7338   { X86::VXORPSYrm,  X86::VXORPDYrm,  X86::VPXORQZ256rm,  X86::VPXORDZ256rm  },
7339   { X86::VXORPSYrr,  X86::VXORPDYrr,  X86::VPXORQZ256rr,  X86::VPXORDZ256rr  },
7340 };
7341 
7342 // FIXME: Some shuffle and unpack instructions have equivalents in different
7343 // domains, but they require a bit more work than just switching opcodes.
7344 
7345 static const uint16_t *lookup(unsigned opcode, unsigned domain,
7346                               ArrayRef<uint16_t[3]> Table) {
7347   for (const uint16_t (&Row)[3] : Table)
7348     if (Row[domain-1] == opcode)
7349       return Row;
7350   return nullptr;
7351 }
7352 
7353 static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
7354                                     ArrayRef<uint16_t[4]> Table) {
7355   // If this is the integer domain make sure to check both integer columns.
7356   for (const uint16_t (&Row)[4] : Table)
7357     if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode))
7358       return Row;
7359   return nullptr;
7360 }
7361 
7362 // Helper to attempt to widen/narrow blend masks.
7363 static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
7364                             unsigned NewWidth, unsigned *pNewMask = nullptr) {
7365   assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
7366          "Illegal blend mask scale");
7367   unsigned NewMask = 0;
7368 
7369   if ((OldWidth % NewWidth) == 0) {
7370     unsigned Scale = OldWidth / NewWidth;
7371     unsigned SubMask = (1u << Scale) - 1;
7372     for (unsigned i = 0; i != NewWidth; ++i) {
7373       unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
7374       if (Sub == SubMask)
7375         NewMask |= (1u << i);
7376       else if (Sub != 0x0)
7377         return false;
7378     }
7379   } else {
7380     unsigned Scale = NewWidth / OldWidth;
7381     unsigned SubMask = (1u << Scale) - 1;
7382     for (unsigned i = 0; i != OldWidth; ++i) {
7383       if (OldMask & (1 << i)) {
7384         NewMask |= (SubMask << (i * Scale));
7385       }
7386     }
7387   }
7388 
7389   if (pNewMask)
7390     *pNewMask = NewMask;
7391   return true;
7392 }
7393 
7394 uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
7395   unsigned Opcode = MI.getOpcode();
7396   unsigned NumOperands = MI.getDesc().getNumOperands();
7397 
7398   auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
7399     uint16_t validDomains = 0;
7400     if (MI.getOperand(NumOperands - 1).isImm()) {
7401       unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
7402       if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
7403         validDomains |= 0x2; // PackedSingle
7404       if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
7405         validDomains |= 0x4; // PackedDouble
7406       if (!Is256 || Subtarget.hasAVX2())
7407         validDomains |= 0x8; // PackedInt
7408     }
7409     return validDomains;
7410   };
7411 
7412   switch (Opcode) {
7413   case X86::BLENDPDrmi:
7414   case X86::BLENDPDrri:
7415   case X86::VBLENDPDrmi:
7416   case X86::VBLENDPDrri:
7417     return GetBlendDomains(2, false);
7418   case X86::VBLENDPDYrmi:
7419   case X86::VBLENDPDYrri:
7420     return GetBlendDomains(4, true);
7421   case X86::BLENDPSrmi:
7422   case X86::BLENDPSrri:
7423   case X86::VBLENDPSrmi:
7424   case X86::VBLENDPSrri:
7425   case X86::VPBLENDDrmi:
7426   case X86::VPBLENDDrri:
7427     return GetBlendDomains(4, false);
7428   case X86::VBLENDPSYrmi:
7429   case X86::VBLENDPSYrri:
7430   case X86::VPBLENDDYrmi:
7431   case X86::VPBLENDDYrri:
7432     return GetBlendDomains(8, true);
7433   case X86::PBLENDWrmi:
7434   case X86::PBLENDWrri:
7435   case X86::VPBLENDWrmi:
7436   case X86::VPBLENDWrri:
7437   // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
7438   case X86::VPBLENDWYrmi:
7439   case X86::VPBLENDWYrri:
7440     return GetBlendDomains(8, false);
7441   case X86::VPANDDZ128rr:  case X86::VPANDDZ128rm:
7442   case X86::VPANDDZ256rr:  case X86::VPANDDZ256rm:
7443   case X86::VPANDQZ128rr:  case X86::VPANDQZ128rm:
7444   case X86::VPANDQZ256rr:  case X86::VPANDQZ256rm:
7445   case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7446   case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7447   case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7448   case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7449   case X86::VPORDZ128rr:   case X86::VPORDZ128rm:
7450   case X86::VPORDZ256rr:   case X86::VPORDZ256rm:
7451   case X86::VPORQZ128rr:   case X86::VPORQZ128rm:
7452   case X86::VPORQZ256rr:   case X86::VPORQZ256rm:
7453   case X86::VPXORDZ128rr:  case X86::VPXORDZ128rm:
7454   case X86::VPXORDZ256rr:  case X86::VPXORDZ256rm:
7455   case X86::VPXORQZ128rr:  case X86::VPXORQZ128rm:
7456   case X86::VPXORQZ256rr:  case X86::VPXORQZ256rm:
7457     // If we don't have DQI see if we can still switch from an EVEX integer
7458     // instruction to a VEX floating point instruction.
7459     if (Subtarget.hasDQI())
7460       return 0;
7461 
7462     if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
7463       return 0;
7464     if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
7465       return 0;
7466     // Register forms will have 3 operands. Memory form will have more.
7467     if (NumOperands == 3 &&
7468         RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
7469       return 0;
7470 
7471     // All domains are valid.
7472     return 0xe;
7473   case X86::MOVHLPSrr:
7474     // We can swap domains when both inputs are the same register.
7475     // FIXME: This doesn't catch all the cases we would like. If the input
7476     // register isn't KILLed by the instruction, the two address instruction
7477     // pass puts a COPY on one input. The other input uses the original
7478     // register. This prevents the same physical register from being used by
7479     // both inputs.
7480     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7481         MI.getOperand(0).getSubReg() == 0 &&
7482         MI.getOperand(1).getSubReg() == 0 &&
7483         MI.getOperand(2).getSubReg() == 0)
7484       return 0x6;
7485     return 0;
7486   case X86::SHUFPDrri:
7487     return 0x6;
7488   }
7489   return 0;
7490 }
7491 
7492 bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
7493                                             unsigned Domain) const {
7494   assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
7495   uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7496   assert(dom && "Not an SSE instruction");
7497 
7498   unsigned Opcode = MI.getOpcode();
7499   unsigned NumOperands = MI.getDesc().getNumOperands();
7500 
7501   auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
7502     if (MI.getOperand(NumOperands - 1).isImm()) {
7503       unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
7504       Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
7505       unsigned NewImm = Imm;
7506 
7507       const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
7508       if (!table)
7509         table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7510 
7511       if (Domain == 1) { // PackedSingle
7512         AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7513       } else if (Domain == 2) { // PackedDouble
7514         AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
7515       } else if (Domain == 3) { // PackedInt
7516         if (Subtarget.hasAVX2()) {
7517           // If we are already VPBLENDW use that, else use VPBLENDD.
7518           if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
7519             table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7520             AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7521           }
7522         } else {
7523           assert(!Is256 && "128-bit vector expected");
7524           AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
7525         }
7526       }
7527 
7528       assert(table && table[Domain - 1] && "Unknown domain op");
7529       MI.setDesc(get(table[Domain - 1]));
7530       MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
7531     }
7532     return true;
7533   };
7534 
7535   switch (Opcode) {
7536   case X86::BLENDPDrmi:
7537   case X86::BLENDPDrri:
7538   case X86::VBLENDPDrmi:
7539   case X86::VBLENDPDrri:
7540     return SetBlendDomain(2, false);
7541   case X86::VBLENDPDYrmi:
7542   case X86::VBLENDPDYrri:
7543     return SetBlendDomain(4, true);
7544   case X86::BLENDPSrmi:
7545   case X86::BLENDPSrri:
7546   case X86::VBLENDPSrmi:
7547   case X86::VBLENDPSrri:
7548   case X86::VPBLENDDrmi:
7549   case X86::VPBLENDDrri:
7550     return SetBlendDomain(4, false);
7551   case X86::VBLENDPSYrmi:
7552   case X86::VBLENDPSYrri:
7553   case X86::VPBLENDDYrmi:
7554   case X86::VPBLENDDYrri:
7555     return SetBlendDomain(8, true);
7556   case X86::PBLENDWrmi:
7557   case X86::PBLENDWrri:
7558   case X86::VPBLENDWrmi:
7559   case X86::VPBLENDWrri:
7560     return SetBlendDomain(8, false);
7561   case X86::VPBLENDWYrmi:
7562   case X86::VPBLENDWYrri:
7563     return SetBlendDomain(16, true);
7564   case X86::VPANDDZ128rr:  case X86::VPANDDZ128rm:
7565   case X86::VPANDDZ256rr:  case X86::VPANDDZ256rm:
7566   case X86::VPANDQZ128rr:  case X86::VPANDQZ128rm:
7567   case X86::VPANDQZ256rr:  case X86::VPANDQZ256rm:
7568   case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7569   case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7570   case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7571   case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7572   case X86::VPORDZ128rr:   case X86::VPORDZ128rm:
7573   case X86::VPORDZ256rr:   case X86::VPORDZ256rm:
7574   case X86::VPORQZ128rr:   case X86::VPORQZ128rm:
7575   case X86::VPORQZ256rr:   case X86::VPORQZ256rm:
7576   case X86::VPXORDZ128rr:  case X86::VPXORDZ128rm:
7577   case X86::VPXORDZ256rr:  case X86::VPXORDZ256rm:
7578   case X86::VPXORQZ128rr:  case X86::VPXORQZ128rm:
7579   case X86::VPXORQZ256rr:  case X86::VPXORQZ256rm: {
7580     // Without DQI, convert EVEX instructions to VEX instructions.
7581     if (Subtarget.hasDQI())
7582       return false;
7583 
7584     const uint16_t *table = lookupAVX512(MI.getOpcode(), dom,
7585                                          ReplaceableCustomAVX512LogicInstrs);
7586     assert(table && "Instruction not found in table?");
7587     // Don't change integer Q instructions to D instructions and
7588     // use D intructions if we started with a PS instruction.
7589     if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7590       Domain = 4;
7591     MI.setDesc(get(table[Domain - 1]));
7592     return true;
7593   }
7594   case X86::UNPCKHPDrr:
7595   case X86::MOVHLPSrr:
7596     // We just need to commute the instruction which will switch the domains.
7597     if (Domain != dom && Domain != 3 &&
7598         MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7599         MI.getOperand(0).getSubReg() == 0 &&
7600         MI.getOperand(1).getSubReg() == 0 &&
7601         MI.getOperand(2).getSubReg() == 0) {
7602       commuteInstruction(MI, false);
7603       return true;
7604     }
7605     // We must always return true for MOVHLPSrr.
7606     if (Opcode == X86::MOVHLPSrr)
7607       return true;
7608     break;
7609   case X86::SHUFPDrri: {
7610     if (Domain == 1) {
7611       unsigned Imm = MI.getOperand(3).getImm();
7612       unsigned NewImm = 0x44;
7613       if (Imm & 1) NewImm |= 0x0a;
7614       if (Imm & 2) NewImm |= 0xa0;
7615       MI.getOperand(3).setImm(NewImm);
7616       MI.setDesc(get(X86::SHUFPSrri));
7617     }
7618     return true;
7619   }
7620   }
7621   return false;
7622 }
7623 
7624 std::pair<uint16_t, uint16_t>
7625 X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
7626   uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7627   unsigned opcode = MI.getOpcode();
7628   uint16_t validDomains = 0;
7629   if (domain) {
7630     // Attempt to match for custom instructions.
7631     validDomains = getExecutionDomainCustom(MI);
7632     if (validDomains)
7633       return std::make_pair(domain, validDomains);
7634 
7635     if (lookup(opcode, domain, ReplaceableInstrs)) {
7636       validDomains = 0xe;
7637     } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
7638       validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
7639     } else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
7640       validDomains = 0x6;
7641     } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
7642       // Insert/extract instructions should only effect domain if AVX2
7643       // is enabled.
7644       if (!Subtarget.hasAVX2())
7645         return std::make_pair(0, 0);
7646       validDomains = 0xe;
7647     } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
7648       validDomains = 0xe;
7649     } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain,
7650                                                   ReplaceableInstrsAVX512DQ)) {
7651       validDomains = 0xe;
7652     } else if (Subtarget.hasDQI()) {
7653       if (const uint16_t *table = lookupAVX512(opcode, domain,
7654                                              ReplaceableInstrsAVX512DQMasked)) {
7655         if (domain == 1 || (domain == 3 && table[3] == opcode))
7656           validDomains = 0xa;
7657         else
7658           validDomains = 0xc;
7659       }
7660     }
7661   }
7662   return std::make_pair(domain, validDomains);
7663 }
7664 
7665 void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
7666   assert(Domain>0 && Domain<4 && "Invalid execution domain");
7667   uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7668   assert(dom && "Not an SSE instruction");
7669 
7670   // Attempt to match for custom instructions.
7671   if (setExecutionDomainCustom(MI, Domain))
7672     return;
7673 
7674   const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
7675   if (!table) { // try the other table
7676     assert((Subtarget.hasAVX2() || Domain < 3) &&
7677            "256-bit vector operations only available in AVX2");
7678     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
7679   }
7680   if (!table) { // try the FP table
7681     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
7682     assert((!table || Domain < 3) &&
7683            "Can only select PackedSingle or PackedDouble");
7684   }
7685   if (!table) { // try the other table
7686     assert(Subtarget.hasAVX2() &&
7687            "256-bit insert/extract only available in AVX2");
7688     table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
7689   }
7690   if (!table) { // try the AVX512 table
7691     assert(Subtarget.hasAVX512() && "Requires AVX-512");
7692     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
7693     // Don't change integer Q instructions to D instructions.
7694     if (table && Domain == 3 && table[3] == MI.getOpcode())
7695       Domain = 4;
7696   }
7697   if (!table) { // try the AVX512DQ table
7698     assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
7699     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
7700     // Don't change integer Q instructions to D instructions and
7701     // use D instructions if we started with a PS instruction.
7702     if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7703       Domain = 4;
7704   }
7705   if (!table) { // try the AVX512DQMasked table
7706     assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
7707     table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
7708     if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7709       Domain = 4;
7710   }
7711   assert(table && "Cannot change domain");
7712   MI.setDesc(get(table[Domain - 1]));
7713 }
7714 
7715 /// Return the noop instruction to use for a noop.
7716 void X86InstrInfo::getNoop(MCInst &NopInst) const {
7717   NopInst.setOpcode(X86::NOOP);
7718 }
7719 
7720 bool X86InstrInfo::isHighLatencyDef(int opc) const {
7721   switch (opc) {
7722   default: return false;
7723   case X86::DIVPDrm:
7724   case X86::DIVPDrr:
7725   case X86::DIVPSrm:
7726   case X86::DIVPSrr:
7727   case X86::DIVSDrm:
7728   case X86::DIVSDrm_Int:
7729   case X86::DIVSDrr:
7730   case X86::DIVSDrr_Int:
7731   case X86::DIVSSrm:
7732   case X86::DIVSSrm_Int:
7733   case X86::DIVSSrr:
7734   case X86::DIVSSrr_Int:
7735   case X86::SQRTPDm:
7736   case X86::SQRTPDr:
7737   case X86::SQRTPSm:
7738   case X86::SQRTPSr:
7739   case X86::SQRTSDm:
7740   case X86::SQRTSDm_Int:
7741   case X86::SQRTSDr:
7742   case X86::SQRTSDr_Int:
7743   case X86::SQRTSSm:
7744   case X86::SQRTSSm_Int:
7745   case X86::SQRTSSr:
7746   case X86::SQRTSSr_Int:
7747   // AVX instructions with high latency
7748   case X86::VDIVPDrm:
7749   case X86::VDIVPDrr:
7750   case X86::VDIVPDYrm:
7751   case X86::VDIVPDYrr:
7752   case X86::VDIVPSrm:
7753   case X86::VDIVPSrr:
7754   case X86::VDIVPSYrm:
7755   case X86::VDIVPSYrr:
7756   case X86::VDIVSDrm:
7757   case X86::VDIVSDrm_Int:
7758   case X86::VDIVSDrr:
7759   case X86::VDIVSDrr_Int:
7760   case X86::VDIVSSrm:
7761   case X86::VDIVSSrm_Int:
7762   case X86::VDIVSSrr:
7763   case X86::VDIVSSrr_Int:
7764   case X86::VSQRTPDm:
7765   case X86::VSQRTPDr:
7766   case X86::VSQRTPDYm:
7767   case X86::VSQRTPDYr:
7768   case X86::VSQRTPSm:
7769   case X86::VSQRTPSr:
7770   case X86::VSQRTPSYm:
7771   case X86::VSQRTPSYr:
7772   case X86::VSQRTSDm:
7773   case X86::VSQRTSDm_Int:
7774   case X86::VSQRTSDr:
7775   case X86::VSQRTSDr_Int:
7776   case X86::VSQRTSSm:
7777   case X86::VSQRTSSm_Int:
7778   case X86::VSQRTSSr:
7779   case X86::VSQRTSSr_Int:
7780   // AVX512 instructions with high latency
7781   case X86::VDIVPDZ128rm:
7782   case X86::VDIVPDZ128rmb:
7783   case X86::VDIVPDZ128rmbk:
7784   case X86::VDIVPDZ128rmbkz:
7785   case X86::VDIVPDZ128rmk:
7786   case X86::VDIVPDZ128rmkz:
7787   case X86::VDIVPDZ128rr:
7788   case X86::VDIVPDZ128rrk:
7789   case X86::VDIVPDZ128rrkz:
7790   case X86::VDIVPDZ256rm:
7791   case X86::VDIVPDZ256rmb:
7792   case X86::VDIVPDZ256rmbk:
7793   case X86::VDIVPDZ256rmbkz:
7794   case X86::VDIVPDZ256rmk:
7795   case X86::VDIVPDZ256rmkz:
7796   case X86::VDIVPDZ256rr:
7797   case X86::VDIVPDZ256rrk:
7798   case X86::VDIVPDZ256rrkz:
7799   case X86::VDIVPDZrrb:
7800   case X86::VDIVPDZrrbk:
7801   case X86::VDIVPDZrrbkz:
7802   case X86::VDIVPDZrm:
7803   case X86::VDIVPDZrmb:
7804   case X86::VDIVPDZrmbk:
7805   case X86::VDIVPDZrmbkz:
7806   case X86::VDIVPDZrmk:
7807   case X86::VDIVPDZrmkz:
7808   case X86::VDIVPDZrr:
7809   case X86::VDIVPDZrrk:
7810   case X86::VDIVPDZrrkz:
7811   case X86::VDIVPSZ128rm:
7812   case X86::VDIVPSZ128rmb:
7813   case X86::VDIVPSZ128rmbk:
7814   case X86::VDIVPSZ128rmbkz:
7815   case X86::VDIVPSZ128rmk:
7816   case X86::VDIVPSZ128rmkz:
7817   case X86::VDIVPSZ128rr:
7818   case X86::VDIVPSZ128rrk:
7819   case X86::VDIVPSZ128rrkz:
7820   case X86::VDIVPSZ256rm:
7821   case X86::VDIVPSZ256rmb:
7822   case X86::VDIVPSZ256rmbk:
7823   case X86::VDIVPSZ256rmbkz:
7824   case X86::VDIVPSZ256rmk:
7825   case X86::VDIVPSZ256rmkz:
7826   case X86::VDIVPSZ256rr:
7827   case X86::VDIVPSZ256rrk:
7828   case X86::VDIVPSZ256rrkz:
7829   case X86::VDIVPSZrrb:
7830   case X86::VDIVPSZrrbk:
7831   case X86::VDIVPSZrrbkz:
7832   case X86::VDIVPSZrm:
7833   case X86::VDIVPSZrmb:
7834   case X86::VDIVPSZrmbk:
7835   case X86::VDIVPSZrmbkz:
7836   case X86::VDIVPSZrmk:
7837   case X86::VDIVPSZrmkz:
7838   case X86::VDIVPSZrr:
7839   case X86::VDIVPSZrrk:
7840   case X86::VDIVPSZrrkz:
7841   case X86::VDIVSDZrm:
7842   case X86::VDIVSDZrr:
7843   case X86::VDIVSDZrm_Int:
7844   case X86::VDIVSDZrm_Intk:
7845   case X86::VDIVSDZrm_Intkz:
7846   case X86::VDIVSDZrr_Int:
7847   case X86::VDIVSDZrr_Intk:
7848   case X86::VDIVSDZrr_Intkz:
7849   case X86::VDIVSDZrrb_Int:
7850   case X86::VDIVSDZrrb_Intk:
7851   case X86::VDIVSDZrrb_Intkz:
7852   case X86::VDIVSSZrm:
7853   case X86::VDIVSSZrr:
7854   case X86::VDIVSSZrm_Int:
7855   case X86::VDIVSSZrm_Intk:
7856   case X86::VDIVSSZrm_Intkz:
7857   case X86::VDIVSSZrr_Int:
7858   case X86::VDIVSSZrr_Intk:
7859   case X86::VDIVSSZrr_Intkz:
7860   case X86::VDIVSSZrrb_Int:
7861   case X86::VDIVSSZrrb_Intk:
7862   case X86::VDIVSSZrrb_Intkz:
7863   case X86::VSQRTPDZ128m:
7864   case X86::VSQRTPDZ128mb:
7865   case X86::VSQRTPDZ128mbk:
7866   case X86::VSQRTPDZ128mbkz:
7867   case X86::VSQRTPDZ128mk:
7868   case X86::VSQRTPDZ128mkz:
7869   case X86::VSQRTPDZ128r:
7870   case X86::VSQRTPDZ128rk:
7871   case X86::VSQRTPDZ128rkz:
7872   case X86::VSQRTPDZ256m:
7873   case X86::VSQRTPDZ256mb:
7874   case X86::VSQRTPDZ256mbk:
7875   case X86::VSQRTPDZ256mbkz:
7876   case X86::VSQRTPDZ256mk:
7877   case X86::VSQRTPDZ256mkz:
7878   case X86::VSQRTPDZ256r:
7879   case X86::VSQRTPDZ256rk:
7880   case X86::VSQRTPDZ256rkz:
7881   case X86::VSQRTPDZm:
7882   case X86::VSQRTPDZmb:
7883   case X86::VSQRTPDZmbk:
7884   case X86::VSQRTPDZmbkz:
7885   case X86::VSQRTPDZmk:
7886   case X86::VSQRTPDZmkz:
7887   case X86::VSQRTPDZr:
7888   case X86::VSQRTPDZrb:
7889   case X86::VSQRTPDZrbk:
7890   case X86::VSQRTPDZrbkz:
7891   case X86::VSQRTPDZrk:
7892   case X86::VSQRTPDZrkz:
7893   case X86::VSQRTPSZ128m:
7894   case X86::VSQRTPSZ128mb:
7895   case X86::VSQRTPSZ128mbk:
7896   case X86::VSQRTPSZ128mbkz:
7897   case X86::VSQRTPSZ128mk:
7898   case X86::VSQRTPSZ128mkz:
7899   case X86::VSQRTPSZ128r:
7900   case X86::VSQRTPSZ128rk:
7901   case X86::VSQRTPSZ128rkz:
7902   case X86::VSQRTPSZ256m:
7903   case X86::VSQRTPSZ256mb:
7904   case X86::VSQRTPSZ256mbk:
7905   case X86::VSQRTPSZ256mbkz:
7906   case X86::VSQRTPSZ256mk:
7907   case X86::VSQRTPSZ256mkz:
7908   case X86::VSQRTPSZ256r:
7909   case X86::VSQRTPSZ256rk:
7910   case X86::VSQRTPSZ256rkz:
7911   case X86::VSQRTPSZm:
7912   case X86::VSQRTPSZmb:
7913   case X86::VSQRTPSZmbk:
7914   case X86::VSQRTPSZmbkz:
7915   case X86::VSQRTPSZmk:
7916   case X86::VSQRTPSZmkz:
7917   case X86::VSQRTPSZr:
7918   case X86::VSQRTPSZrb:
7919   case X86::VSQRTPSZrbk:
7920   case X86::VSQRTPSZrbkz:
7921   case X86::VSQRTPSZrk:
7922   case X86::VSQRTPSZrkz:
7923   case X86::VSQRTSDZm:
7924   case X86::VSQRTSDZm_Int:
7925   case X86::VSQRTSDZm_Intk:
7926   case X86::VSQRTSDZm_Intkz:
7927   case X86::VSQRTSDZr:
7928   case X86::VSQRTSDZr_Int:
7929   case X86::VSQRTSDZr_Intk:
7930   case X86::VSQRTSDZr_Intkz:
7931   case X86::VSQRTSDZrb_Int:
7932   case X86::VSQRTSDZrb_Intk:
7933   case X86::VSQRTSDZrb_Intkz:
7934   case X86::VSQRTSSZm:
7935   case X86::VSQRTSSZm_Int:
7936   case X86::VSQRTSSZm_Intk:
7937   case X86::VSQRTSSZm_Intkz:
7938   case X86::VSQRTSSZr:
7939   case X86::VSQRTSSZr_Int:
7940   case X86::VSQRTSSZr_Intk:
7941   case X86::VSQRTSSZr_Intkz:
7942   case X86::VSQRTSSZrb_Int:
7943   case X86::VSQRTSSZrb_Intk:
7944   case X86::VSQRTSSZrb_Intkz:
7945 
7946   case X86::VGATHERDPDYrm:
7947   case X86::VGATHERDPDZ128rm:
7948   case X86::VGATHERDPDZ256rm:
7949   case X86::VGATHERDPDZrm:
7950   case X86::VGATHERDPDrm:
7951   case X86::VGATHERDPSYrm:
7952   case X86::VGATHERDPSZ128rm:
7953   case X86::VGATHERDPSZ256rm:
7954   case X86::VGATHERDPSZrm:
7955   case X86::VGATHERDPSrm:
7956   case X86::VGATHERPF0DPDm:
7957   case X86::VGATHERPF0DPSm:
7958   case X86::VGATHERPF0QPDm:
7959   case X86::VGATHERPF0QPSm:
7960   case X86::VGATHERPF1DPDm:
7961   case X86::VGATHERPF1DPSm:
7962   case X86::VGATHERPF1QPDm:
7963   case X86::VGATHERPF1QPSm:
7964   case X86::VGATHERQPDYrm:
7965   case X86::VGATHERQPDZ128rm:
7966   case X86::VGATHERQPDZ256rm:
7967   case X86::VGATHERQPDZrm:
7968   case X86::VGATHERQPDrm:
7969   case X86::VGATHERQPSYrm:
7970   case X86::VGATHERQPSZ128rm:
7971   case X86::VGATHERQPSZ256rm:
7972   case X86::VGATHERQPSZrm:
7973   case X86::VGATHERQPSrm:
7974   case X86::VPGATHERDDYrm:
7975   case X86::VPGATHERDDZ128rm:
7976   case X86::VPGATHERDDZ256rm:
7977   case X86::VPGATHERDDZrm:
7978   case X86::VPGATHERDDrm:
7979   case X86::VPGATHERDQYrm:
7980   case X86::VPGATHERDQZ128rm:
7981   case X86::VPGATHERDQZ256rm:
7982   case X86::VPGATHERDQZrm:
7983   case X86::VPGATHERDQrm:
7984   case X86::VPGATHERQDYrm:
7985   case X86::VPGATHERQDZ128rm:
7986   case X86::VPGATHERQDZ256rm:
7987   case X86::VPGATHERQDZrm:
7988   case X86::VPGATHERQDrm:
7989   case X86::VPGATHERQQYrm:
7990   case X86::VPGATHERQQZ128rm:
7991   case X86::VPGATHERQQZ256rm:
7992   case X86::VPGATHERQQZrm:
7993   case X86::VPGATHERQQrm:
7994   case X86::VSCATTERDPDZ128mr:
7995   case X86::VSCATTERDPDZ256mr:
7996   case X86::VSCATTERDPDZmr:
7997   case X86::VSCATTERDPSZ128mr:
7998   case X86::VSCATTERDPSZ256mr:
7999   case X86::VSCATTERDPSZmr:
8000   case X86::VSCATTERPF0DPDm:
8001   case X86::VSCATTERPF0DPSm:
8002   case X86::VSCATTERPF0QPDm:
8003   case X86::VSCATTERPF0QPSm:
8004   case X86::VSCATTERPF1DPDm:
8005   case X86::VSCATTERPF1DPSm:
8006   case X86::VSCATTERPF1QPDm:
8007   case X86::VSCATTERPF1QPSm:
8008   case X86::VSCATTERQPDZ128mr:
8009   case X86::VSCATTERQPDZ256mr:
8010   case X86::VSCATTERQPDZmr:
8011   case X86::VSCATTERQPSZ128mr:
8012   case X86::VSCATTERQPSZ256mr:
8013   case X86::VSCATTERQPSZmr:
8014   case X86::VPSCATTERDDZ128mr:
8015   case X86::VPSCATTERDDZ256mr:
8016   case X86::VPSCATTERDDZmr:
8017   case X86::VPSCATTERDQZ128mr:
8018   case X86::VPSCATTERDQZ256mr:
8019   case X86::VPSCATTERDQZmr:
8020   case X86::VPSCATTERQDZ128mr:
8021   case X86::VPSCATTERQDZ256mr:
8022   case X86::VPSCATTERQDZmr:
8023   case X86::VPSCATTERQQZ128mr:
8024   case X86::VPSCATTERQQZ256mr:
8025   case X86::VPSCATTERQQZmr:
8026     return true;
8027   }
8028 }
8029 
8030 bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
8031                                          const MachineRegisterInfo *MRI,
8032                                          const MachineInstr &DefMI,
8033                                          unsigned DefIdx,
8034                                          const MachineInstr &UseMI,
8035                                          unsigned UseIdx) const {
8036   return isHighLatencyDef(DefMI.getOpcode());
8037 }
8038 
8039 bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
8040                                            const MachineBasicBlock *MBB) const {
8041   assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&
8042          Inst.getNumDefs() <= 2 && "Reassociation needs binary operators");
8043 
8044   // Integer binary math/logic instructions have a third source operand:
8045   // the EFLAGS register. That operand must be both defined here and never
8046   // used; ie, it must be dead. If the EFLAGS operand is live, then we can
8047   // not change anything because rearranging the operands could affect other
8048   // instructions that depend on the exact status flags (zero, sign, etc.)
8049   // that are set by using these particular operands with this operation.
8050   const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS);
8051   assert((Inst.getNumDefs() == 1 || FlagDef) &&
8052          "Implicit def isn't flags?");
8053   if (FlagDef && !FlagDef->isDead())
8054     return false;
8055 
8056   return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
8057 }
8058 
8059 // TODO: There are many more machine instruction opcodes to match:
8060 //       1. Other data types (integer, vectors)
8061 //       2. Other math / logic operations (xor, or)
8062 //       3. Other forms of the same operation (intrinsics and other variants)
8063 bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
8064   switch (Inst.getOpcode()) {
8065   case X86::AND8rr:
8066   case X86::AND16rr:
8067   case X86::AND32rr:
8068   case X86::AND64rr:
8069   case X86::OR8rr:
8070   case X86::OR16rr:
8071   case X86::OR32rr:
8072   case X86::OR64rr:
8073   case X86::XOR8rr:
8074   case X86::XOR16rr:
8075   case X86::XOR32rr:
8076   case X86::XOR64rr:
8077   case X86::IMUL16rr:
8078   case X86::IMUL32rr:
8079   case X86::IMUL64rr:
8080   case X86::PANDrr:
8081   case X86::PORrr:
8082   case X86::PXORrr:
8083   case X86::ANDPDrr:
8084   case X86::ANDPSrr:
8085   case X86::ORPDrr:
8086   case X86::ORPSrr:
8087   case X86::XORPDrr:
8088   case X86::XORPSrr:
8089   case X86::PADDBrr:
8090   case X86::PADDWrr:
8091   case X86::PADDDrr:
8092   case X86::PADDQrr:
8093   case X86::PMULLWrr:
8094   case X86::PMULLDrr:
8095   case X86::PMAXSBrr:
8096   case X86::PMAXSDrr:
8097   case X86::PMAXSWrr:
8098   case X86::PMAXUBrr:
8099   case X86::PMAXUDrr:
8100   case X86::PMAXUWrr:
8101   case X86::PMINSBrr:
8102   case X86::PMINSDrr:
8103   case X86::PMINSWrr:
8104   case X86::PMINUBrr:
8105   case X86::PMINUDrr:
8106   case X86::PMINUWrr:
8107   case X86::VPANDrr:
8108   case X86::VPANDYrr:
8109   case X86::VPANDDZ128rr:
8110   case X86::VPANDDZ256rr:
8111   case X86::VPANDDZrr:
8112   case X86::VPANDQZ128rr:
8113   case X86::VPANDQZ256rr:
8114   case X86::VPANDQZrr:
8115   case X86::VPORrr:
8116   case X86::VPORYrr:
8117   case X86::VPORDZ128rr:
8118   case X86::VPORDZ256rr:
8119   case X86::VPORDZrr:
8120   case X86::VPORQZ128rr:
8121   case X86::VPORQZ256rr:
8122   case X86::VPORQZrr:
8123   case X86::VPXORrr:
8124   case X86::VPXORYrr:
8125   case X86::VPXORDZ128rr:
8126   case X86::VPXORDZ256rr:
8127   case X86::VPXORDZrr:
8128   case X86::VPXORQZ128rr:
8129   case X86::VPXORQZ256rr:
8130   case X86::VPXORQZrr:
8131   case X86::VANDPDrr:
8132   case X86::VANDPSrr:
8133   case X86::VANDPDYrr:
8134   case X86::VANDPSYrr:
8135   case X86::VANDPDZ128rr:
8136   case X86::VANDPSZ128rr:
8137   case X86::VANDPDZ256rr:
8138   case X86::VANDPSZ256rr:
8139   case X86::VANDPDZrr:
8140   case X86::VANDPSZrr:
8141   case X86::VORPDrr:
8142   case X86::VORPSrr:
8143   case X86::VORPDYrr:
8144   case X86::VORPSYrr:
8145   case X86::VORPDZ128rr:
8146   case X86::VORPSZ128rr:
8147   case X86::VORPDZ256rr:
8148   case X86::VORPSZ256rr:
8149   case X86::VORPDZrr:
8150   case X86::VORPSZrr:
8151   case X86::VXORPDrr:
8152   case X86::VXORPSrr:
8153   case X86::VXORPDYrr:
8154   case X86::VXORPSYrr:
8155   case X86::VXORPDZ128rr:
8156   case X86::VXORPSZ128rr:
8157   case X86::VXORPDZ256rr:
8158   case X86::VXORPSZ256rr:
8159   case X86::VXORPDZrr:
8160   case X86::VXORPSZrr:
8161   case X86::KADDBrr:
8162   case X86::KADDWrr:
8163   case X86::KADDDrr:
8164   case X86::KADDQrr:
8165   case X86::KANDBrr:
8166   case X86::KANDWrr:
8167   case X86::KANDDrr:
8168   case X86::KANDQrr:
8169   case X86::KORBrr:
8170   case X86::KORWrr:
8171   case X86::KORDrr:
8172   case X86::KORQrr:
8173   case X86::KXORBrr:
8174   case X86::KXORWrr:
8175   case X86::KXORDrr:
8176   case X86::KXORQrr:
8177   case X86::VPADDBrr:
8178   case X86::VPADDWrr:
8179   case X86::VPADDDrr:
8180   case X86::VPADDQrr:
8181   case X86::VPADDBYrr:
8182   case X86::VPADDWYrr:
8183   case X86::VPADDDYrr:
8184   case X86::VPADDQYrr:
8185   case X86::VPADDBZ128rr:
8186   case X86::VPADDWZ128rr:
8187   case X86::VPADDDZ128rr:
8188   case X86::VPADDQZ128rr:
8189   case X86::VPADDBZ256rr:
8190   case X86::VPADDWZ256rr:
8191   case X86::VPADDDZ256rr:
8192   case X86::VPADDQZ256rr:
8193   case X86::VPADDBZrr:
8194   case X86::VPADDWZrr:
8195   case X86::VPADDDZrr:
8196   case X86::VPADDQZrr:
8197   case X86::VPMULLWrr:
8198   case X86::VPMULLWYrr:
8199   case X86::VPMULLWZ128rr:
8200   case X86::VPMULLWZ256rr:
8201   case X86::VPMULLWZrr:
8202   case X86::VPMULLDrr:
8203   case X86::VPMULLDYrr:
8204   case X86::VPMULLDZ128rr:
8205   case X86::VPMULLDZ256rr:
8206   case X86::VPMULLDZrr:
8207   case X86::VPMULLQZ128rr:
8208   case X86::VPMULLQZ256rr:
8209   case X86::VPMULLQZrr:
8210   case X86::VPMAXSBrr:
8211   case X86::VPMAXSBYrr:
8212   case X86::VPMAXSBZ128rr:
8213   case X86::VPMAXSBZ256rr:
8214   case X86::VPMAXSBZrr:
8215   case X86::VPMAXSDrr:
8216   case X86::VPMAXSDYrr:
8217   case X86::VPMAXSDZ128rr:
8218   case X86::VPMAXSDZ256rr:
8219   case X86::VPMAXSDZrr:
8220   case X86::VPMAXSQZ128rr:
8221   case X86::VPMAXSQZ256rr:
8222   case X86::VPMAXSQZrr:
8223   case X86::VPMAXSWrr:
8224   case X86::VPMAXSWYrr:
8225   case X86::VPMAXSWZ128rr:
8226   case X86::VPMAXSWZ256rr:
8227   case X86::VPMAXSWZrr:
8228   case X86::VPMAXUBrr:
8229   case X86::VPMAXUBYrr:
8230   case X86::VPMAXUBZ128rr:
8231   case X86::VPMAXUBZ256rr:
8232   case X86::VPMAXUBZrr:
8233   case X86::VPMAXUDrr:
8234   case X86::VPMAXUDYrr:
8235   case X86::VPMAXUDZ128rr:
8236   case X86::VPMAXUDZ256rr:
8237   case X86::VPMAXUDZrr:
8238   case X86::VPMAXUQZ128rr:
8239   case X86::VPMAXUQZ256rr:
8240   case X86::VPMAXUQZrr:
8241   case X86::VPMAXUWrr:
8242   case X86::VPMAXUWYrr:
8243   case X86::VPMAXUWZ128rr:
8244   case X86::VPMAXUWZ256rr:
8245   case X86::VPMAXUWZrr:
8246   case X86::VPMINSBrr:
8247   case X86::VPMINSBYrr:
8248   case X86::VPMINSBZ128rr:
8249   case X86::VPMINSBZ256rr:
8250   case X86::VPMINSBZrr:
8251   case X86::VPMINSDrr:
8252   case X86::VPMINSDYrr:
8253   case X86::VPMINSDZ128rr:
8254   case X86::VPMINSDZ256rr:
8255   case X86::VPMINSDZrr:
8256   case X86::VPMINSQZ128rr:
8257   case X86::VPMINSQZ256rr:
8258   case X86::VPMINSQZrr:
8259   case X86::VPMINSWrr:
8260   case X86::VPMINSWYrr:
8261   case X86::VPMINSWZ128rr:
8262   case X86::VPMINSWZ256rr:
8263   case X86::VPMINSWZrr:
8264   case X86::VPMINUBrr:
8265   case X86::VPMINUBYrr:
8266   case X86::VPMINUBZ128rr:
8267   case X86::VPMINUBZ256rr:
8268   case X86::VPMINUBZrr:
8269   case X86::VPMINUDrr:
8270   case X86::VPMINUDYrr:
8271   case X86::VPMINUDZ128rr:
8272   case X86::VPMINUDZ256rr:
8273   case X86::VPMINUDZrr:
8274   case X86::VPMINUQZ128rr:
8275   case X86::VPMINUQZ256rr:
8276   case X86::VPMINUQZrr:
8277   case X86::VPMINUWrr:
8278   case X86::VPMINUWYrr:
8279   case X86::VPMINUWZ128rr:
8280   case X86::VPMINUWZ256rr:
8281   case X86::VPMINUWZrr:
8282   // Normal min/max instructions are not commutative because of NaN and signed
8283   // zero semantics, but these are. Thus, there's no need to check for global
8284   // relaxed math; the instructions themselves have the properties we need.
8285   case X86::MAXCPDrr:
8286   case X86::MAXCPSrr:
8287   case X86::MAXCSDrr:
8288   case X86::MAXCSSrr:
8289   case X86::MINCPDrr:
8290   case X86::MINCPSrr:
8291   case X86::MINCSDrr:
8292   case X86::MINCSSrr:
8293   case X86::VMAXCPDrr:
8294   case X86::VMAXCPSrr:
8295   case X86::VMAXCPDYrr:
8296   case X86::VMAXCPSYrr:
8297   case X86::VMAXCPDZ128rr:
8298   case X86::VMAXCPSZ128rr:
8299   case X86::VMAXCPDZ256rr:
8300   case X86::VMAXCPSZ256rr:
8301   case X86::VMAXCPDZrr:
8302   case X86::VMAXCPSZrr:
8303   case X86::VMAXCSDrr:
8304   case X86::VMAXCSSrr:
8305   case X86::VMAXCSDZrr:
8306   case X86::VMAXCSSZrr:
8307   case X86::VMINCPDrr:
8308   case X86::VMINCPSrr:
8309   case X86::VMINCPDYrr:
8310   case X86::VMINCPSYrr:
8311   case X86::VMINCPDZ128rr:
8312   case X86::VMINCPSZ128rr:
8313   case X86::VMINCPDZ256rr:
8314   case X86::VMINCPSZ256rr:
8315   case X86::VMINCPDZrr:
8316   case X86::VMINCPSZrr:
8317   case X86::VMINCSDrr:
8318   case X86::VMINCSSrr:
8319   case X86::VMINCSDZrr:
8320   case X86::VMINCSSZrr:
8321     return true;
8322   case X86::ADDPDrr:
8323   case X86::ADDPSrr:
8324   case X86::ADDSDrr:
8325   case X86::ADDSSrr:
8326   case X86::MULPDrr:
8327   case X86::MULPSrr:
8328   case X86::MULSDrr:
8329   case X86::MULSSrr:
8330   case X86::VADDPDrr:
8331   case X86::VADDPSrr:
8332   case X86::VADDPDYrr:
8333   case X86::VADDPSYrr:
8334   case X86::VADDPDZ128rr:
8335   case X86::VADDPSZ128rr:
8336   case X86::VADDPDZ256rr:
8337   case X86::VADDPSZ256rr:
8338   case X86::VADDPDZrr:
8339   case X86::VADDPSZrr:
8340   case X86::VADDSDrr:
8341   case X86::VADDSSrr:
8342   case X86::VADDSDZrr:
8343   case X86::VADDSSZrr:
8344   case X86::VMULPDrr:
8345   case X86::VMULPSrr:
8346   case X86::VMULPDYrr:
8347   case X86::VMULPSYrr:
8348   case X86::VMULPDZ128rr:
8349   case X86::VMULPSZ128rr:
8350   case X86::VMULPDZ256rr:
8351   case X86::VMULPSZ256rr:
8352   case X86::VMULPDZrr:
8353   case X86::VMULPSZrr:
8354   case X86::VMULSDrr:
8355   case X86::VMULSSrr:
8356   case X86::VMULSDZrr:
8357   case X86::VMULSSZrr:
8358     return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
8359            Inst.getFlag(MachineInstr::MIFlag::FmNsz);
8360   default:
8361     return false;
8362   }
8363 }
8364 
8365 /// If \p DescribedReg overlaps with the MOVrr instruction's destination
8366 /// register then, if possible, describe the value in terms of the source
8367 /// register.
8368 static Optional<ParamLoadedValue>
8369 describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
8370                          const TargetRegisterInfo *TRI) {
8371   Register DestReg = MI.getOperand(0).getReg();
8372   Register SrcReg = MI.getOperand(1).getReg();
8373 
8374   auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8375 
8376   // If the described register is the destination, just return the source.
8377   if (DestReg == DescribedReg)
8378     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8379 
8380   // If the described register is a sub-register of the destination register,
8381   // then pick out the source register's corresponding sub-register.
8382   if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) {
8383     Register SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx);
8384     return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
8385   }
8386 
8387   // The remaining case to consider is when the described register is a
8388   // super-register of the destination register. MOV8rr and MOV16rr does not
8389   // write to any of the other bytes in the register, meaning that we'd have to
8390   // describe the value using a combination of the source register and the
8391   // non-overlapping bits in the described register, which is not currently
8392   // possible.
8393   if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr ||
8394       !TRI->isSuperRegister(DestReg, DescribedReg))
8395     return None;
8396 
8397   assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case");
8398   return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8399 }
8400 
8401 Optional<ParamLoadedValue>
8402 X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
8403   const MachineOperand *Op = nullptr;
8404   DIExpression *Expr = nullptr;
8405 
8406   const TargetRegisterInfo *TRI = &getRegisterInfo();
8407 
8408   switch (MI.getOpcode()) {
8409   case X86::LEA32r:
8410   case X86::LEA64r:
8411   case X86::LEA64_32r: {
8412     // We may need to describe a 64-bit parameter with a 32-bit LEA.
8413     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8414       return None;
8415 
8416     // Operand 4 could be global address. For now we do not support
8417     // such situation.
8418     if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm())
8419       return None;
8420 
8421     const MachineOperand &Op1 = MI.getOperand(1);
8422     const MachineOperand &Op2 = MI.getOperand(3);
8423     assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||
8424                            Register::isPhysicalRegister(Op2.getReg())));
8425 
8426     // Omit situations like:
8427     // %rsi = lea %rsi, 4, ...
8428     if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) ||
8429         Op2.getReg() == MI.getOperand(0).getReg())
8430       return None;
8431     else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister &&
8432               TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) ||
8433              (Op2.getReg() != X86::NoRegister &&
8434               TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg())))
8435       return None;
8436 
8437     int64_t Coef = MI.getOperand(2).getImm();
8438     int64_t Offset = MI.getOperand(4).getImm();
8439     SmallVector<uint64_t, 8> Ops;
8440 
8441     if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) {
8442       Op = &Op1;
8443     } else if (Op1.isFI())
8444       Op = &Op1;
8445 
8446     if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) {
8447       Ops.push_back(dwarf::DW_OP_constu);
8448       Ops.push_back(Coef + 1);
8449       Ops.push_back(dwarf::DW_OP_mul);
8450     } else {
8451       if (Op && Op2.getReg() != X86::NoRegister) {
8452         int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false);
8453         if (dwarfReg < 0)
8454           return None;
8455         else if (dwarfReg < 32) {
8456           Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
8457           Ops.push_back(0);
8458         } else {
8459           Ops.push_back(dwarf::DW_OP_bregx);
8460           Ops.push_back(dwarfReg);
8461           Ops.push_back(0);
8462         }
8463       } else if (!Op) {
8464         assert(Op2.getReg() != X86::NoRegister);
8465         Op = &Op2;
8466       }
8467 
8468       if (Coef > 1) {
8469         assert(Op2.getReg() != X86::NoRegister);
8470         Ops.push_back(dwarf::DW_OP_constu);
8471         Ops.push_back(Coef);
8472         Ops.push_back(dwarf::DW_OP_mul);
8473       }
8474 
8475       if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) &&
8476           Op2.getReg() != X86::NoRegister) {
8477         Ops.push_back(dwarf::DW_OP_plus);
8478       }
8479     }
8480 
8481     DIExpression::appendOffset(Ops, Offset);
8482     Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops);
8483 
8484     return ParamLoadedValue(*Op, Expr);;
8485   }
8486   case X86::MOV8ri:
8487   case X86::MOV16ri:
8488     // TODO: Handle MOV8ri and MOV16ri.
8489     return None;
8490   case X86::MOV32ri:
8491   case X86::MOV64ri:
8492   case X86::MOV64ri32:
8493     // MOV32ri may be used for producing zero-extended 32-bit immediates in
8494     // 64-bit parameters, so we need to consider super-registers.
8495     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8496       return None;
8497     return ParamLoadedValue(MI.getOperand(1), Expr);
8498   case X86::MOV8rr:
8499   case X86::MOV16rr:
8500   case X86::MOV32rr:
8501   case X86::MOV64rr:
8502     return describeMOVrrLoadedValue(MI, Reg, TRI);
8503   case X86::XOR32rr: {
8504     // 64-bit parameters are zero-materialized using XOR32rr, so also consider
8505     // super-registers.
8506     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8507       return None;
8508     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
8509       return ParamLoadedValue(MachineOperand::CreateImm(0), Expr);
8510     return None;
8511   }
8512   case X86::MOVSX64rr32: {
8513     // We may need to describe the lower 32 bits of the MOVSX; for example, in
8514     // cases like this:
8515     //
8516     //  $ebx = [...]
8517     //  $rdi = MOVSX64rr32 $ebx
8518     //  $esi = MOV32rr $edi
8519     if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg))
8520       return None;
8521 
8522     Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8523 
8524     // If the described register is the destination register we need to
8525     // sign-extend the source register from 32 bits. The other case we handle
8526     // is when the described register is the 32-bit sub-register of the
8527     // destination register, in case we just need to return the source
8528     // register.
8529     if (Reg == MI.getOperand(0).getReg())
8530       Expr = DIExpression::appendExt(Expr, 32, 64, true);
8531     else
8532       assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&
8533              "Unhandled sub-register case for MOVSX64rr32");
8534 
8535     return ParamLoadedValue(MI.getOperand(1), Expr);
8536   }
8537   default:
8538     assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction");
8539     return TargetInstrInfo::describeLoadedValue(MI, Reg);
8540   }
8541 }
8542 
8543 /// This is an architecture-specific helper function of reassociateOps.
8544 /// Set special operand attributes for new instructions after reassociation.
8545 void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
8546                                          MachineInstr &OldMI2,
8547                                          MachineInstr &NewMI1,
8548                                          MachineInstr &NewMI2) const {
8549   // Propagate FP flags from the original instructions.
8550   // But clear poison-generating flags because those may not be valid now.
8551   // TODO: There should be a helper function for copying only fast-math-flags.
8552   uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
8553   NewMI1.setFlags(IntersectedFlags);
8554   NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap);
8555   NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap);
8556   NewMI1.clearFlag(MachineInstr::MIFlag::IsExact);
8557 
8558   NewMI2.setFlags(IntersectedFlags);
8559   NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap);
8560   NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap);
8561   NewMI2.clearFlag(MachineInstr::MIFlag::IsExact);
8562 
8563   // Integer instructions may define an implicit EFLAGS dest register operand.
8564   MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS);
8565   MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS);
8566 
8567   assert(!OldFlagDef1 == !OldFlagDef2 &&
8568          "Unexpected instruction type for reassociation");
8569 
8570   if (!OldFlagDef1 || !OldFlagDef2)
8571     return;
8572 
8573   assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
8574          "Must have dead EFLAGS operand in reassociable instruction");
8575 
8576   MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS);
8577   MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS);
8578 
8579   assert(NewFlagDef1 && NewFlagDef2 &&
8580          "Unexpected operand in reassociable instruction");
8581 
8582   // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
8583   // of this pass or other passes. The EFLAGS operands must be dead in these new
8584   // instructions because the EFLAGS operands in the original instructions must
8585   // be dead in order for reassociation to occur.
8586   NewFlagDef1->setIsDead();
8587   NewFlagDef2->setIsDead();
8588 }
8589 
8590 std::pair<unsigned, unsigned>
8591 X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
8592   return std::make_pair(TF, 0u);
8593 }
8594 
8595 ArrayRef<std::pair<unsigned, const char *>>
8596 X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
8597   using namespace X86II;
8598   static const std::pair<unsigned, const char *> TargetFlags[] = {
8599       {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
8600       {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
8601       {MO_GOT, "x86-got"},
8602       {MO_GOTOFF, "x86-gotoff"},
8603       {MO_GOTPCREL, "x86-gotpcrel"},
8604       {MO_PLT, "x86-plt"},
8605       {MO_TLSGD, "x86-tlsgd"},
8606       {MO_TLSLD, "x86-tlsld"},
8607       {MO_TLSLDM, "x86-tlsldm"},
8608       {MO_GOTTPOFF, "x86-gottpoff"},
8609       {MO_INDNTPOFF, "x86-indntpoff"},
8610       {MO_TPOFF, "x86-tpoff"},
8611       {MO_DTPOFF, "x86-dtpoff"},
8612       {MO_NTPOFF, "x86-ntpoff"},
8613       {MO_GOTNTPOFF, "x86-gotntpoff"},
8614       {MO_DLLIMPORT, "x86-dllimport"},
8615       {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
8616       {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
8617       {MO_TLVP, "x86-tlvp"},
8618       {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
8619       {MO_SECREL, "x86-secrel"},
8620       {MO_COFFSTUB, "x86-coffstub"}};
8621   return makeArrayRef(TargetFlags);
8622 }
8623 
8624 namespace {
8625   /// Create Global Base Reg pass. This initializes the PIC
8626   /// global base register for x86-32.
8627   struct CGBR : public MachineFunctionPass {
8628     static char ID;
8629     CGBR() : MachineFunctionPass(ID) {}
8630 
8631     bool runOnMachineFunction(MachineFunction &MF) override {
8632       const X86TargetMachine *TM =
8633         static_cast<const X86TargetMachine *>(&MF.getTarget());
8634       const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
8635 
8636       // Don't do anything in the 64-bit small and kernel code models. They use
8637       // RIP-relative addressing for everything.
8638       if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
8639                             TM->getCodeModel() == CodeModel::Kernel))
8640         return false;
8641 
8642       // Only emit a global base reg in PIC mode.
8643       if (!TM->isPositionIndependent())
8644         return false;
8645 
8646       X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
8647       Register GlobalBaseReg = X86FI->getGlobalBaseReg();
8648 
8649       // If we didn't need a GlobalBaseReg, don't insert code.
8650       if (GlobalBaseReg == 0)
8651         return false;
8652 
8653       // Insert the set of GlobalBaseReg into the first MBB of the function
8654       MachineBasicBlock &FirstMBB = MF.front();
8655       MachineBasicBlock::iterator MBBI = FirstMBB.begin();
8656       DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
8657       MachineRegisterInfo &RegInfo = MF.getRegInfo();
8658       const X86InstrInfo *TII = STI.getInstrInfo();
8659 
8660       Register PC;
8661       if (STI.isPICStyleGOT())
8662         PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
8663       else
8664         PC = GlobalBaseReg;
8665 
8666       if (STI.is64Bit()) {
8667         if (TM->getCodeModel() == CodeModel::Medium) {
8668           // In the medium code model, use a RIP-relative LEA to materialize the
8669           // GOT.
8670           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
8671               .addReg(X86::RIP)
8672               .addImm(0)
8673               .addReg(0)
8674               .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
8675               .addReg(0);
8676         } else if (TM->getCodeModel() == CodeModel::Large) {
8677           // In the large code model, we are aiming for this code, though the
8678           // register allocation may vary:
8679           //   leaq .LN$pb(%rip), %rax
8680           //   movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
8681           //   addq %rcx, %rax
8682           // RAX now holds address of _GLOBAL_OFFSET_TABLE_.
8683           Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
8684           Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
8685           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
8686               .addReg(X86::RIP)
8687               .addImm(0)
8688               .addReg(0)
8689               .addSym(MF.getPICBaseSymbol())
8690               .addReg(0);
8691           std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
8692           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
8693               .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
8694                                  X86II::MO_PIC_BASE_OFFSET);
8695           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
8696               .addReg(PBReg, RegState::Kill)
8697               .addReg(GOTReg, RegState::Kill);
8698         } else {
8699           llvm_unreachable("unexpected code model");
8700         }
8701       } else {
8702         // Operand of MovePCtoStack is completely ignored by asm printer. It's
8703         // only used in JIT code emission as displacement to pc.
8704         BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
8705 
8706         // If we're using vanilla 'GOT' PIC style, we should use relative
8707         // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
8708         if (STI.isPICStyleGOT()) {
8709           // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
8710           // %some_register
8711           BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
8712               .addReg(PC)
8713               .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
8714                                  X86II::MO_GOT_ABSOLUTE_ADDRESS);
8715         }
8716       }
8717 
8718       return true;
8719     }
8720 
8721     StringRef getPassName() const override {
8722       return "X86 PIC Global Base Reg Initialization";
8723     }
8724 
8725     void getAnalysisUsage(AnalysisUsage &AU) const override {
8726       AU.setPreservesCFG();
8727       MachineFunctionPass::getAnalysisUsage(AU);
8728     }
8729   };
8730 } // namespace
8731 
8732 char CGBR::ID = 0;
8733 FunctionPass*
8734 llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
8735 
8736 namespace {
8737   struct LDTLSCleanup : public MachineFunctionPass {
8738     static char ID;
8739     LDTLSCleanup() : MachineFunctionPass(ID) {}
8740 
8741     bool runOnMachineFunction(MachineFunction &MF) override {
8742       if (skipFunction(MF.getFunction()))
8743         return false;
8744 
8745       X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
8746       if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
8747         // No point folding accesses if there isn't at least two.
8748         return false;
8749       }
8750 
8751       MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
8752       return VisitNode(DT->getRootNode(), 0);
8753     }
8754 
8755     // Visit the dominator subtree rooted at Node in pre-order.
8756     // If TLSBaseAddrReg is non-null, then use that to replace any
8757     // TLS_base_addr instructions. Otherwise, create the register
8758     // when the first such instruction is seen, and then use it
8759     // as we encounter more instructions.
8760     bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
8761       MachineBasicBlock *BB = Node->getBlock();
8762       bool Changed = false;
8763 
8764       // Traverse the current block.
8765       for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
8766            ++I) {
8767         switch (I->getOpcode()) {
8768           case X86::TLS_base_addr32:
8769           case X86::TLS_base_addr64:
8770             if (TLSBaseAddrReg)
8771               I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
8772             else
8773               I = SetRegister(*I, &TLSBaseAddrReg);
8774             Changed = true;
8775             break;
8776           default:
8777             break;
8778         }
8779       }
8780 
8781       // Visit the children of this block in the dominator tree.
8782       for (auto I = Node->begin(), E = Node->end(); I != E; ++I) {
8783         Changed |= VisitNode(*I, TLSBaseAddrReg);
8784       }
8785 
8786       return Changed;
8787     }
8788 
8789     // Replace the TLS_base_addr instruction I with a copy from
8790     // TLSBaseAddrReg, returning the new instruction.
8791     MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
8792                                          unsigned TLSBaseAddrReg) {
8793       MachineFunction *MF = I.getParent()->getParent();
8794       const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
8795       const bool is64Bit = STI.is64Bit();
8796       const X86InstrInfo *TII = STI.getInstrInfo();
8797 
8798       // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
8799       MachineInstr *Copy =
8800           BuildMI(*I.getParent(), I, I.getDebugLoc(),
8801                   TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
8802               .addReg(TLSBaseAddrReg);
8803 
8804       // Erase the TLS_base_addr instruction.
8805       I.eraseFromParent();
8806 
8807       return Copy;
8808     }
8809 
8810     // Create a virtual register in *TLSBaseAddrReg, and populate it by
8811     // inserting a copy instruction after I. Returns the new instruction.
8812     MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
8813       MachineFunction *MF = I.getParent()->getParent();
8814       const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
8815       const bool is64Bit = STI.is64Bit();
8816       const X86InstrInfo *TII = STI.getInstrInfo();
8817 
8818       // Create a virtual register for the TLS base address.
8819       MachineRegisterInfo &RegInfo = MF->getRegInfo();
8820       *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
8821                                                       ? &X86::GR64RegClass
8822                                                       : &X86::GR32RegClass);
8823 
8824       // Insert a copy from RAX/EAX to TLSBaseAddrReg.
8825       MachineInstr *Next = I.getNextNode();
8826       MachineInstr *Copy =
8827           BuildMI(*I.getParent(), Next, I.getDebugLoc(),
8828                   TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
8829               .addReg(is64Bit ? X86::RAX : X86::EAX);
8830 
8831       return Copy;
8832     }
8833 
8834     StringRef getPassName() const override {
8835       return "Local Dynamic TLS Access Clean-up";
8836     }
8837 
8838     void getAnalysisUsage(AnalysisUsage &AU) const override {
8839       AU.setPreservesCFG();
8840       AU.addRequired<MachineDominatorTree>();
8841       MachineFunctionPass::getAnalysisUsage(AU);
8842     }
8843   };
8844 }
8845 
8846 char LDTLSCleanup::ID = 0;
8847 FunctionPass*
8848 llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
8849 
8850 /// Constants defining how certain sequences should be outlined.
8851 ///
8852 /// \p MachineOutlinerDefault implies that the function is called with a call
8853 /// instruction, and a return must be emitted for the outlined function frame.
8854 ///
8855 /// That is,
8856 ///
8857 /// I1                                 OUTLINED_FUNCTION:
8858 /// I2 --> call OUTLINED_FUNCTION       I1
8859 /// I3                                  I2
8860 ///                                     I3
8861 ///                                     ret
8862 ///
8863 /// * Call construction overhead: 1 (call instruction)
8864 /// * Frame construction overhead: 1 (return instruction)
8865 ///
8866 /// \p MachineOutlinerTailCall implies that the function is being tail called.
8867 /// A jump is emitted instead of a call, and the return is already present in
8868 /// the outlined sequence. That is,
8869 ///
8870 /// I1                                 OUTLINED_FUNCTION:
8871 /// I2 --> jmp OUTLINED_FUNCTION       I1
8872 /// ret                                I2
8873 ///                                    ret
8874 ///
8875 /// * Call construction overhead: 1 (jump instruction)
8876 /// * Frame construction overhead: 0 (don't need to return)
8877 ///
8878 enum MachineOutlinerClass {
8879   MachineOutlinerDefault,
8880   MachineOutlinerTailCall
8881 };
8882 
8883 outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo(
8884     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
8885   unsigned SequenceSize =
8886       std::accumulate(RepeatedSequenceLocs[0].front(),
8887                       std::next(RepeatedSequenceLocs[0].back()), 0,
8888                       [](unsigned Sum, const MachineInstr &MI) {
8889                         // FIXME: x86 doesn't implement getInstSizeInBytes, so
8890                         // we can't tell the cost.  Just assume each instruction
8891                         // is one byte.
8892                         if (MI.isDebugInstr() || MI.isKill())
8893                           return Sum;
8894                         return Sum + 1;
8895                       });
8896 
8897   // We check to see if CFI Instructions are present, and if they are
8898   // we find the number of CFI Instructions in the candidates.
8899   unsigned CFICount = 0;
8900   MachineBasicBlock::iterator MBBI = RepeatedSequenceLocs[0].front();
8901   for (unsigned Loc = RepeatedSequenceLocs[0].getStartIdx();
8902        Loc < RepeatedSequenceLocs[0].getEndIdx() + 1; Loc++) {
8903     const std::vector<MCCFIInstruction> &CFIInstructions =
8904         RepeatedSequenceLocs[0].getMF()->getFrameInstructions();
8905     if (MBBI->isCFIInstruction()) {
8906       unsigned CFIIndex = MBBI->getOperand(0).getCFIIndex();
8907       MCCFIInstruction CFI = CFIInstructions[CFIIndex];
8908       CFICount++;
8909     }
8910     MBBI++;
8911   }
8912 
8913   // We compare the number of found CFI Instructions to  the number of CFI
8914   // instructions in the parent function for each candidate.  We must check this
8915   // since if we outline one of the CFI instructions in a function, we have to
8916   // outline them all for correctness. If we do not, the address offsets will be
8917   // incorrect between the two sections of the program.
8918   for (outliner::Candidate &C : RepeatedSequenceLocs) {
8919     std::vector<MCCFIInstruction> CFIInstructions =
8920         C.getMF()->getFrameInstructions();
8921 
8922     if (CFICount > 0 && CFICount != CFIInstructions.size())
8923       return outliner::OutlinedFunction();
8924   }
8925 
8926   // FIXME: Use real size in bytes for call and ret instructions.
8927   if (RepeatedSequenceLocs[0].back()->isTerminator()) {
8928     for (outliner::Candidate &C : RepeatedSequenceLocs)
8929       C.setCallInfo(MachineOutlinerTailCall, 1);
8930 
8931     return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
8932                                       0, // Number of bytes to emit frame.
8933                                       MachineOutlinerTailCall // Type of frame.
8934     );
8935   }
8936 
8937   if (CFICount > 0)
8938     return outliner::OutlinedFunction();
8939 
8940   for (outliner::Candidate &C : RepeatedSequenceLocs)
8941     C.setCallInfo(MachineOutlinerDefault, 1);
8942 
8943   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1,
8944                                     MachineOutlinerDefault);
8945 }
8946 
8947 bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
8948                                            bool OutlineFromLinkOnceODRs) const {
8949   const Function &F = MF.getFunction();
8950 
8951   // Does the function use a red zone? If it does, then we can't risk messing
8952   // with the stack.
8953   if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
8954     // It could have a red zone. If it does, then we don't want to touch it.
8955     const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
8956     if (!X86FI || X86FI->getUsesRedZone())
8957       return false;
8958   }
8959 
8960   // If we *don't* want to outline from things that could potentially be deduped
8961   // then return false.
8962   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
8963       return false;
8964 
8965   // This function is viable for outlining, so return true.
8966   return true;
8967 }
8968 
8969 outliner::InstrType
8970 X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,  unsigned Flags) const {
8971   MachineInstr &MI = *MIT;
8972   // Don't allow debug values to impact outlining type.
8973   if (MI.isDebugInstr() || MI.isIndirectDebugValue())
8974     return outliner::InstrType::Invisible;
8975 
8976   // At this point, KILL instructions don't really tell us much so we can go
8977   // ahead and skip over them.
8978   if (MI.isKill())
8979     return outliner::InstrType::Invisible;
8980 
8981   // Is this a tail call? If yes, we can outline as a tail call.
8982   if (isTailCall(MI))
8983     return outliner::InstrType::Legal;
8984 
8985   // Is this the terminator of a basic block?
8986   if (MI.isTerminator() || MI.isReturn()) {
8987 
8988     // Does its parent have any successors in its MachineFunction?
8989     if (MI.getParent()->succ_empty())
8990       return outliner::InstrType::Legal;
8991 
8992     // It does, so we can't tail call it.
8993     return outliner::InstrType::Illegal;
8994   }
8995 
8996   // Don't outline anything that modifies or reads from the stack pointer.
8997   //
8998   // FIXME: There are instructions which are being manually built without
8999   // explicit uses/defs so we also have to check the MCInstrDesc. We should be
9000   // able to remove the extra checks once those are fixed up. For example,
9001   // sometimes we might get something like %rax = POP64r 1. This won't be
9002   // caught by modifiesRegister or readsRegister even though the instruction
9003   // really ought to be formed so that modifiesRegister/readsRegister would
9004   // catch it.
9005   if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
9006       MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
9007       MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
9008     return outliner::InstrType::Illegal;
9009 
9010   // Outlined calls change the instruction pointer, so don't read from it.
9011   if (MI.readsRegister(X86::RIP, &RI) ||
9012       MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
9013       MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
9014     return outliner::InstrType::Illegal;
9015 
9016   // Positions can't safely be outlined.
9017   if (MI.isPosition())
9018     return outliner::InstrType::Illegal;
9019 
9020   // Make sure none of the operands of this instruction do anything tricky.
9021   for (const MachineOperand &MOP : MI.operands())
9022     if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
9023         MOP.isTargetIndex())
9024       return outliner::InstrType::Illegal;
9025 
9026   return outliner::InstrType::Legal;
9027 }
9028 
9029 void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB,
9030                                           MachineFunction &MF,
9031                                           const outliner::OutlinedFunction &OF)
9032                                           const {
9033   // If we're a tail call, we already have a return, so don't do anything.
9034   if (OF.FrameConstructionID == MachineOutlinerTailCall)
9035     return;
9036 
9037   // We're a normal call, so our sequence doesn't have a return instruction.
9038   // Add it in.
9039   MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ));
9040   MBB.insert(MBB.end(), retq);
9041 }
9042 
9043 MachineBasicBlock::iterator
9044 X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
9045                                  MachineBasicBlock::iterator &It,
9046                                  MachineFunction &MF,
9047                                  const outliner::Candidate &C) const {
9048   // Is it a tail call?
9049   if (C.CallConstructionID == MachineOutlinerTailCall) {
9050     // Yes, just insert a JMP.
9051     It = MBB.insert(It,
9052                   BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
9053                       .addGlobalAddress(M.getNamedValue(MF.getName())));
9054   } else {
9055     // No, insert a call.
9056     It = MBB.insert(It,
9057                   BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
9058                       .addGlobalAddress(M.getNamedValue(MF.getName())));
9059   }
9060 
9061   return It;
9062 }
9063 
9064 #define GET_INSTRINFO_HELPERS
9065 #include "X86GenInstrInfo.inc"
9066