xref: /freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp (revision a90b9d0159070121c221b966469c3e36d912bf82)
1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the SystemZ implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SystemZInstrInfo.h"
14 #include "MCTargetDesc/SystemZMCTargetDesc.h"
15 #include "SystemZ.h"
16 #include "SystemZInstrBuilder.h"
17 #include "SystemZSubtarget.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/LiveInterval.h"
20 #include "llvm/CodeGen/LiveIntervals.h"
21 #include "llvm/CodeGen/LivePhysRegs.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SlotIndexes.h"
31 #include "llvm/CodeGen/StackMaps.h"
32 #include "llvm/CodeGen/TargetInstrInfo.h"
33 #include "llvm/CodeGen/TargetSubtargetInfo.h"
34 #include "llvm/CodeGen/VirtRegMap.h"
35 #include "llvm/MC/MCInstrDesc.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/Support/BranchProbability.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Target/TargetMachine.h"
41 #include <cassert>
42 #include <cstdint>
43 #include <iterator>
44 
45 using namespace llvm;
46 
47 #define GET_INSTRINFO_CTOR_DTOR
48 #define GET_INSTRMAP_INFO
49 #include "SystemZGenInstrInfo.inc"
50 
51 #define DEBUG_TYPE "systemz-II"
52 
53 // Return a mask with Count low bits set.
54 static uint64_t allOnes(unsigned int Count) {
55   return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
56 }
57 
58 // Pin the vtable to this file.
59 void SystemZInstrInfo::anchor() {}
60 
61 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
62     : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
63       RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
64       STI(sti) {}
65 
66 // MI is a 128-bit load or store.  Split it into two 64-bit loads or stores,
67 // each having the opcode given by NewOpcode.
68 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
69                                  unsigned NewOpcode) const {
70   MachineBasicBlock *MBB = MI->getParent();
71   MachineFunction &MF = *MBB->getParent();
72 
73   // Get two load or store instructions.  Use the original instruction for
74   // one of them and create a clone for the other.
75   MachineInstr *HighPartMI = MF.CloneMachineInstr(&*MI);
76   MachineInstr *LowPartMI = &*MI;
77   MBB->insert(LowPartMI, HighPartMI);
78 
79   // Set up the two 64-bit registers and remember super reg and its flags.
80   MachineOperand &HighRegOp = HighPartMI->getOperand(0);
81   MachineOperand &LowRegOp = LowPartMI->getOperand(0);
82   Register Reg128 = LowRegOp.getReg();
83   unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
84   unsigned Reg128Undef  = getUndefRegState(LowRegOp.isUndef());
85   HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
86   LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
87 
88   // The address in the first (high) instruction is already correct.
89   // Adjust the offset in the second (low) instruction.
90   MachineOperand &HighOffsetOp = HighPartMI->getOperand(2);
91   MachineOperand &LowOffsetOp = LowPartMI->getOperand(2);
92   LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
93 
94   // Set the opcodes.
95   unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
96   unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
97   assert(HighOpcode && LowOpcode && "Both offsets should be in range");
98   HighPartMI->setDesc(get(HighOpcode));
99   LowPartMI->setDesc(get(LowOpcode));
100 
101   MachineInstr *FirstMI = HighPartMI;
102   if (MI->mayStore()) {
103     FirstMI->getOperand(0).setIsKill(false);
104     // Add implicit uses of the super register in case one of the subregs is
105     // undefined. We could track liveness and skip storing an undefined
106     // subreg, but this is hopefully rare (discovered with llvm-stress).
107     // If Reg128 was killed, set kill flag on MI.
108     unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
109     MachineInstrBuilder(MF, HighPartMI).addReg(Reg128, Reg128UndefImpl);
110     MachineInstrBuilder(MF, LowPartMI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
111   } else {
112     // If HighPartMI clobbers any of the address registers, it needs to come
113     // after LowPartMI.
114     auto overlapsAddressReg = [&](Register Reg) -> bool {
115       return RI.regsOverlap(Reg, MI->getOperand(1).getReg()) ||
116              RI.regsOverlap(Reg, MI->getOperand(3).getReg());
117     };
118     if (overlapsAddressReg(HighRegOp.getReg())) {
119       assert(!overlapsAddressReg(LowRegOp.getReg()) &&
120              "Both loads clobber address!");
121       MBB->splice(HighPartMI, MBB, LowPartMI);
122       FirstMI = LowPartMI;
123     }
124   }
125 
126   // Clear the kill flags on the address registers in the first instruction.
127   FirstMI->getOperand(1).setIsKill(false);
128   FirstMI->getOperand(3).setIsKill(false);
129 }
130 
131 // Split ADJDYNALLOC instruction MI.
132 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
133   MachineBasicBlock *MBB = MI->getParent();
134   MachineFunction &MF = *MBB->getParent();
135   MachineFrameInfo &MFFrame = MF.getFrameInfo();
136   MachineOperand &OffsetMO = MI->getOperand(2);
137   SystemZCallingConventionRegisters *Regs = STI.getSpecialRegisters();
138 
139   uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
140                      Regs->getCallFrameSize() +
141                      Regs->getStackPointerBias() +
142                      OffsetMO.getImm());
143   unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
144   assert(NewOpcode && "No support for huge argument lists yet");
145   MI->setDesc(get(NewOpcode));
146   OffsetMO.setImm(Offset);
147 }
148 
149 // MI is an RI-style pseudo instruction.  Replace it with LowOpcode
150 // if the first operand is a low GR32 and HighOpcode if the first operand
151 // is a high GR32.  ConvertHigh is true if LowOpcode takes a signed operand
152 // and HighOpcode takes an unsigned 32-bit operand.  In those cases,
153 // MI has the same kind of operand as LowOpcode, so needs to be converted
154 // if HighOpcode is used.
155 void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
156                                       unsigned HighOpcode,
157                                       bool ConvertHigh) const {
158   Register Reg = MI.getOperand(0).getReg();
159   bool IsHigh = SystemZ::isHighReg(Reg);
160   MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
161   if (IsHigh && ConvertHigh)
162     MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
163 }
164 
165 // MI is a three-operand RIE-style pseudo instruction.  Replace it with
166 // LowOpcodeK if the registers are both low GR32s, otherwise use a move
167 // followed by HighOpcode or LowOpcode, depending on whether the target
168 // is a high or low GR32.
169 void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
170                                        unsigned LowOpcodeK,
171                                        unsigned HighOpcode) const {
172   Register DestReg = MI.getOperand(0).getReg();
173   Register SrcReg = MI.getOperand(1).getReg();
174   bool DestIsHigh = SystemZ::isHighReg(DestReg);
175   bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
176   if (!DestIsHigh && !SrcIsHigh)
177     MI.setDesc(get(LowOpcodeK));
178   else {
179     if (DestReg != SrcReg) {
180       emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
181                     SystemZ::LR, 32, MI.getOperand(1).isKill(),
182                     MI.getOperand(1).isUndef());
183       MI.getOperand(1).setReg(DestReg);
184     }
185     MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
186     MI.tieOperands(0, 1);
187   }
188 }
189 
190 // MI is an RXY-style pseudo instruction.  Replace it with LowOpcode
191 // if the first operand is a low GR32 and HighOpcode if the first operand
192 // is a high GR32.
193 void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
194                                        unsigned HighOpcode) const {
195   Register Reg = MI.getOperand(0).getReg();
196   unsigned Opcode = getOpcodeForOffset(
197       SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
198       MI.getOperand(2).getImm());
199   MI.setDesc(get(Opcode));
200 }
201 
202 // MI is a load-on-condition pseudo instruction with a single register
203 // (source or destination) operand.  Replace it with LowOpcode if the
204 // register is a low GR32 and HighOpcode if the register is a high GR32.
205 void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
206                                        unsigned HighOpcode) const {
207   Register Reg = MI.getOperand(0).getReg();
208   unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;
209   MI.setDesc(get(Opcode));
210 }
211 
212 // MI is an RR-style pseudo instruction that zero-extends the low Size bits
213 // of one GRX32 into another.  Replace it with LowOpcode if both operands
214 // are low registers, otherwise use RISB[LH]G.
215 void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
216                                         unsigned Size) const {
217   MachineInstrBuilder MIB =
218     emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
219                MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
220                Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
221 
222   // Keep the remaining operands as-is.
223   for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2))
224     MIB.add(MO);
225 
226   MI.eraseFromParent();
227 }
228 
229 void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
230   MachineBasicBlock *MBB = MI->getParent();
231   MachineFunction &MF = *MBB->getParent();
232   const Register Reg64 = MI->getOperand(0).getReg();
233   const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
234 
235   // EAR can only load the low subregister so us a shift for %a0 to produce
236   // the GR containing %a0 and %a1.
237 
238   // ear <reg>, %a0
239   BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
240     .addReg(SystemZ::A0)
241     .addReg(Reg64, RegState::ImplicitDefine);
242 
243   // sllg <reg>, <reg>, 32
244   BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
245     .addReg(Reg64)
246     .addReg(0)
247     .addImm(32);
248 
249   // ear <reg>, %a1
250   BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
251     .addReg(SystemZ::A1);
252 
253   // lg <reg>, 40(<reg>)
254   MI->setDesc(get(SystemZ::LG));
255   MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
256 }
257 
258 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
259 // DestReg before MBBI in MBB.  Use LowLowOpcode when both DestReg and SrcReg
260 // are low registers, otherwise use RISB[LH]G.  Size is the number of bits
261 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
262 // KillSrc is true if this move is the last use of SrcReg.
263 MachineInstrBuilder
264 SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
265                                 MachineBasicBlock::iterator MBBI,
266                                 const DebugLoc &DL, unsigned DestReg,
267                                 unsigned SrcReg, unsigned LowLowOpcode,
268                                 unsigned Size, bool KillSrc,
269                                 bool UndefSrc) const {
270   unsigned Opcode;
271   bool DestIsHigh = SystemZ::isHighReg(DestReg);
272   bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
273   if (DestIsHigh && SrcIsHigh)
274     Opcode = SystemZ::RISBHH;
275   else if (DestIsHigh && !SrcIsHigh)
276     Opcode = SystemZ::RISBHL;
277   else if (!DestIsHigh && SrcIsHigh)
278     Opcode = SystemZ::RISBLH;
279   else {
280     return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
281       .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
282   }
283   unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
284   return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
285     .addReg(DestReg, RegState::Undef)
286     .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
287     .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
288 }
289 
290 MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI,
291                                                        bool NewMI,
292                                                        unsigned OpIdx1,
293                                                        unsigned OpIdx2) const {
294   auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
295     if (NewMI)
296       return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
297     return MI;
298   };
299 
300   switch (MI.getOpcode()) {
301   case SystemZ::SELRMux:
302   case SystemZ::SELFHR:
303   case SystemZ::SELR:
304   case SystemZ::SELGR:
305   case SystemZ::LOCRMux:
306   case SystemZ::LOCFHR:
307   case SystemZ::LOCR:
308   case SystemZ::LOCGR: {
309     auto &WorkingMI = cloneIfNew(MI);
310     // Invert condition.
311     unsigned CCValid = WorkingMI.getOperand(3).getImm();
312     unsigned CCMask = WorkingMI.getOperand(4).getImm();
313     WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
314     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
315                                                    OpIdx1, OpIdx2);
316   }
317   default:
318     return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
319   }
320 }
321 
322 // If MI is a simple load or store for a frame object, return the register
323 // it loads or stores and set FrameIndex to the index of the frame object.
324 // Return 0 otherwise.
325 //
326 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
327 static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
328                         unsigned Flag) {
329   const MCInstrDesc &MCID = MI.getDesc();
330   if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
331       MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
332     FrameIndex = MI.getOperand(1).getIndex();
333     return MI.getOperand(0).getReg();
334   }
335   return 0;
336 }
337 
338 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
339                                                int &FrameIndex) const {
340   return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
341 }
342 
343 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
344                                               int &FrameIndex) const {
345   return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
346 }
347 
348 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
349                                        int &DestFrameIndex,
350                                        int &SrcFrameIndex) const {
351   // Check for MVC 0(Length,FI1),0(FI2)
352   const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
353   if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
354       MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
355       MI.getOperand(4).getImm() != 0)
356     return false;
357 
358   // Check that Length covers the full slots.
359   int64_t Length = MI.getOperand(2).getImm();
360   unsigned FI1 = MI.getOperand(0).getIndex();
361   unsigned FI2 = MI.getOperand(3).getIndex();
362   if (MFI.getObjectSize(FI1) != Length ||
363       MFI.getObjectSize(FI2) != Length)
364     return false;
365 
366   DestFrameIndex = FI1;
367   SrcFrameIndex = FI2;
368   return true;
369 }
370 
371 bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
372                                      MachineBasicBlock *&TBB,
373                                      MachineBasicBlock *&FBB,
374                                      SmallVectorImpl<MachineOperand> &Cond,
375                                      bool AllowModify) const {
376   // Most of the code and comments here are boilerplate.
377 
378   // Start from the bottom of the block and work up, examining the
379   // terminator instructions.
380   MachineBasicBlock::iterator I = MBB.end();
381   while (I != MBB.begin()) {
382     --I;
383     if (I->isDebugInstr())
384       continue;
385 
386     // Working from the bottom, when we see a non-terminator instruction, we're
387     // done.
388     if (!isUnpredicatedTerminator(*I))
389       break;
390 
391     // A terminator that isn't a branch can't easily be handled by this
392     // analysis.
393     if (!I->isBranch())
394       return true;
395 
396     // Can't handle indirect branches.
397     SystemZII::Branch Branch(getBranchInfo(*I));
398     if (!Branch.hasMBBTarget())
399       return true;
400 
401     // Punt on compound branches.
402     if (Branch.Type != SystemZII::BranchNormal)
403       return true;
404 
405     if (Branch.CCMask == SystemZ::CCMASK_ANY) {
406       // Handle unconditional branches.
407       if (!AllowModify) {
408         TBB = Branch.getMBBTarget();
409         continue;
410       }
411 
412       // If the block has any instructions after a JMP, delete them.
413       MBB.erase(std::next(I), MBB.end());
414 
415       Cond.clear();
416       FBB = nullptr;
417 
418       // Delete the JMP if it's equivalent to a fall-through.
419       if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {
420         TBB = nullptr;
421         I->eraseFromParent();
422         I = MBB.end();
423         continue;
424       }
425 
426       // TBB is used to indicate the unconditinal destination.
427       TBB = Branch.getMBBTarget();
428       continue;
429     }
430 
431     // Working from the bottom, handle the first conditional branch.
432     if (Cond.empty()) {
433       // FIXME: add X86-style branch swap
434       FBB = TBB;
435       TBB = Branch.getMBBTarget();
436       Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
437       Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
438       continue;
439     }
440 
441     // Handle subsequent conditional branches.
442     assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
443 
444     // Only handle the case where all conditional branches branch to the same
445     // destination.
446     if (TBB != Branch.getMBBTarget())
447       return true;
448 
449     // If the conditions are the same, we can leave them alone.
450     unsigned OldCCValid = Cond[0].getImm();
451     unsigned OldCCMask = Cond[1].getImm();
452     if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
453       continue;
454 
455     // FIXME: Try combining conditions like X86 does.  Should be easy on Z!
456     return false;
457   }
458 
459   return false;
460 }
461 
462 unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB,
463                                         int *BytesRemoved) const {
464   assert(!BytesRemoved && "code size not handled");
465 
466   // Most of the code and comments here are boilerplate.
467   MachineBasicBlock::iterator I = MBB.end();
468   unsigned Count = 0;
469 
470   while (I != MBB.begin()) {
471     --I;
472     if (I->isDebugInstr())
473       continue;
474     if (!I->isBranch())
475       break;
476     if (!getBranchInfo(*I).hasMBBTarget())
477       break;
478     // Remove the branch.
479     I->eraseFromParent();
480     I = MBB.end();
481     ++Count;
482   }
483 
484   return Count;
485 }
486 
487 bool SystemZInstrInfo::
488 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
489   assert(Cond.size() == 2 && "Invalid condition");
490   Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
491   return false;
492 }
493 
494 unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB,
495                                         MachineBasicBlock *TBB,
496                                         MachineBasicBlock *FBB,
497                                         ArrayRef<MachineOperand> Cond,
498                                         const DebugLoc &DL,
499                                         int *BytesAdded) const {
500   // In this function we output 32-bit branches, which should always
501   // have enough range.  They can be shortened and relaxed by later code
502   // in the pipeline, if desired.
503 
504   // Shouldn't be a fall through.
505   assert(TBB && "insertBranch must not be told to insert a fallthrough");
506   assert((Cond.size() == 2 || Cond.size() == 0) &&
507          "SystemZ branch conditions have one component!");
508   assert(!BytesAdded && "code size not handled");
509 
510   if (Cond.empty()) {
511     // Unconditional branch?
512     assert(!FBB && "Unconditional branch with multiple successors!");
513     BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
514     return 1;
515   }
516 
517   // Conditional branch.
518   unsigned Count = 0;
519   unsigned CCValid = Cond[0].getImm();
520   unsigned CCMask = Cond[1].getImm();
521   BuildMI(&MBB, DL, get(SystemZ::BRC))
522     .addImm(CCValid).addImm(CCMask).addMBB(TBB);
523   ++Count;
524 
525   if (FBB) {
526     // Two-way Conditional branch. Insert the second branch.
527     BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
528     ++Count;
529   }
530   return Count;
531 }
532 
533 bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
534                                       Register &SrcReg2, int64_t &Mask,
535                                       int64_t &Value) const {
536   assert(MI.isCompare() && "Caller should have checked for a comparison");
537 
538   if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
539       MI.getOperand(1).isImm()) {
540     SrcReg = MI.getOperand(0).getReg();
541     SrcReg2 = 0;
542     Value = MI.getOperand(1).getImm();
543     Mask = ~0;
544     return true;
545   }
546 
547   return false;
548 }
549 
550 bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
551                                        ArrayRef<MachineOperand> Pred,
552                                        Register DstReg, Register TrueReg,
553                                        Register FalseReg, int &CondCycles,
554                                        int &TrueCycles,
555                                        int &FalseCycles) const {
556   // Not all subtargets have LOCR instructions.
557   if (!STI.hasLoadStoreOnCond())
558     return false;
559   if (Pred.size() != 2)
560     return false;
561 
562   // Check register classes.
563   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
564   const TargetRegisterClass *RC =
565     RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
566   if (!RC)
567     return false;
568 
569   // We have LOCR instructions for 32 and 64 bit general purpose registers.
570   if ((STI.hasLoadStoreOnCond2() &&
571        SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
572       SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
573       SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
574     CondCycles = 2;
575     TrueCycles = 2;
576     FalseCycles = 2;
577     return true;
578   }
579 
580   // Can't do anything else.
581   return false;
582 }
583 
584 void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,
585                                     MachineBasicBlock::iterator I,
586                                     const DebugLoc &DL, Register DstReg,
587                                     ArrayRef<MachineOperand> Pred,
588                                     Register TrueReg,
589                                     Register FalseReg) const {
590   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
591   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
592 
593   assert(Pred.size() == 2 && "Invalid condition");
594   unsigned CCValid = Pred[0].getImm();
595   unsigned CCMask = Pred[1].getImm();
596 
597   unsigned Opc;
598   if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
599     if (STI.hasMiscellaneousExtensions3())
600       Opc = SystemZ::SELRMux;
601     else if (STI.hasLoadStoreOnCond2())
602       Opc = SystemZ::LOCRMux;
603     else {
604       Opc = SystemZ::LOCR;
605       MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
606       Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
607       Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
608       BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
609       BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
610       TrueReg = TReg;
611       FalseReg = FReg;
612     }
613   } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
614     if (STI.hasMiscellaneousExtensions3())
615       Opc = SystemZ::SELGR;
616     else
617       Opc = SystemZ::LOCGR;
618   } else
619     llvm_unreachable("Invalid register class");
620 
621   BuildMI(MBB, I, DL, get(Opc), DstReg)
622     .addReg(FalseReg).addReg(TrueReg)
623     .addImm(CCValid).addImm(CCMask);
624 }
625 
626 bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
627                                      Register Reg,
628                                      MachineRegisterInfo *MRI) const {
629   unsigned DefOpc = DefMI.getOpcode();
630   if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
631       DefOpc != SystemZ::LGHI)
632     return false;
633   if (DefMI.getOperand(0).getReg() != Reg)
634     return false;
635   int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
636 
637   unsigned UseOpc = UseMI.getOpcode();
638   unsigned NewUseOpc;
639   unsigned UseIdx;
640   int CommuteIdx = -1;
641   bool TieOps = false;
642   switch (UseOpc) {
643   case SystemZ::SELRMux:
644     TieOps = true;
645     [[fallthrough]];
646   case SystemZ::LOCRMux:
647     if (!STI.hasLoadStoreOnCond2())
648       return false;
649     NewUseOpc = SystemZ::LOCHIMux;
650     if (UseMI.getOperand(2).getReg() == Reg)
651       UseIdx = 2;
652     else if (UseMI.getOperand(1).getReg() == Reg)
653       UseIdx = 2, CommuteIdx = 1;
654     else
655       return false;
656     break;
657   case SystemZ::SELGR:
658     TieOps = true;
659     [[fallthrough]];
660   case SystemZ::LOCGR:
661     if (!STI.hasLoadStoreOnCond2())
662       return false;
663     NewUseOpc = SystemZ::LOCGHI;
664     if (UseMI.getOperand(2).getReg() == Reg)
665       UseIdx = 2;
666     else if (UseMI.getOperand(1).getReg() == Reg)
667       UseIdx = 2, CommuteIdx = 1;
668     else
669       return false;
670     break;
671   default:
672     return false;
673   }
674 
675   if (CommuteIdx != -1)
676     if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
677       return false;
678 
679   bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
680   UseMI.setDesc(get(NewUseOpc));
681   if (TieOps)
682     UseMI.tieOperands(0, 1);
683   UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
684   if (DeleteDef)
685     DefMI.eraseFromParent();
686 
687   return true;
688 }
689 
690 bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const {
691   unsigned Opcode = MI.getOpcode();
692   if (Opcode == SystemZ::Return ||
693       Opcode == SystemZ::Return_XPLINK ||
694       Opcode == SystemZ::Trap ||
695       Opcode == SystemZ::CallJG ||
696       Opcode == SystemZ::CallBR)
697     return true;
698   return false;
699 }
700 
701 bool SystemZInstrInfo::
702 isProfitableToIfCvt(MachineBasicBlock &MBB,
703                     unsigned NumCycles, unsigned ExtraPredCycles,
704                     BranchProbability Probability) const {
705   // Avoid using conditional returns at the end of a loop (since then
706   // we'd need to emit an unconditional branch to the beginning anyway,
707   // making the loop body longer).  This doesn't apply for low-probability
708   // loops (eg. compare-and-swap retry), so just decide based on branch
709   // probability instead of looping structure.
710   // However, since Compare and Trap instructions cost the same as a regular
711   // Compare instruction, we should allow the if conversion to convert this
712   // into a Conditional Compare regardless of the branch probability.
713   if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
714       MBB.succ_empty() && Probability < BranchProbability(1, 8))
715     return false;
716   // For now only convert single instructions.
717   return NumCycles == 1;
718 }
719 
720 bool SystemZInstrInfo::
721 isProfitableToIfCvt(MachineBasicBlock &TMBB,
722                     unsigned NumCyclesT, unsigned ExtraPredCyclesT,
723                     MachineBasicBlock &FMBB,
724                     unsigned NumCyclesF, unsigned ExtraPredCyclesF,
725                     BranchProbability Probability) const {
726   // For now avoid converting mutually-exclusive cases.
727   return false;
728 }
729 
730 bool SystemZInstrInfo::
731 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
732                           BranchProbability Probability) const {
733   // For now only duplicate single instructions.
734   return NumCycles == 1;
735 }
736 
737 bool SystemZInstrInfo::PredicateInstruction(
738     MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
739   assert(Pred.size() == 2 && "Invalid condition");
740   unsigned CCValid = Pred[0].getImm();
741   unsigned CCMask = Pred[1].getImm();
742   assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
743   unsigned Opcode = MI.getOpcode();
744   if (Opcode == SystemZ::Trap) {
745     MI.setDesc(get(SystemZ::CondTrap));
746     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
747       .addImm(CCValid).addImm(CCMask)
748       .addReg(SystemZ::CC, RegState::Implicit);
749     return true;
750   }
751   if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
752     MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
753                                              : SystemZ::CondReturn_XPLINK));
754     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
755         .addImm(CCValid)
756         .addImm(CCMask)
757         .addReg(SystemZ::CC, RegState::Implicit);
758     return true;
759   }
760   if (Opcode == SystemZ::CallJG) {
761     MachineOperand FirstOp = MI.getOperand(0);
762     const uint32_t *RegMask = MI.getOperand(1).getRegMask();
763     MI.removeOperand(1);
764     MI.removeOperand(0);
765     MI.setDesc(get(SystemZ::CallBRCL));
766     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
767         .addImm(CCValid)
768         .addImm(CCMask)
769         .add(FirstOp)
770         .addRegMask(RegMask)
771         .addReg(SystemZ::CC, RegState::Implicit);
772     return true;
773   }
774   if (Opcode == SystemZ::CallBR) {
775     MachineOperand Target = MI.getOperand(0);
776     const uint32_t *RegMask = MI.getOperand(1).getRegMask();
777     MI.removeOperand(1);
778     MI.removeOperand(0);
779     MI.setDesc(get(SystemZ::CallBCR));
780     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
781       .addImm(CCValid).addImm(CCMask)
782       .add(Target)
783       .addRegMask(RegMask)
784       .addReg(SystemZ::CC, RegState::Implicit);
785     return true;
786   }
787   return false;
788 }
789 
790 void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
791                                    MachineBasicBlock::iterator MBBI,
792                                    const DebugLoc &DL, MCRegister DestReg,
793                                    MCRegister SrcReg, bool KillSrc) const {
794   // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
795   // super register in case one of the subregs is undefined.
796   // This handles ADDR128 too.
797   if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
798     copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
799                 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
800     MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
801       .addReg(SrcReg, RegState::Implicit);
802     copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
803                 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
804     MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
805       .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
806     return;
807   }
808 
809   if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
810     emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
811                   false);
812     return;
813   }
814 
815   // Move 128-bit floating-point values between VR128 and FP128.
816   if (SystemZ::VR128BitRegClass.contains(DestReg) &&
817       SystemZ::FP128BitRegClass.contains(SrcReg)) {
818     MCRegister SrcRegHi =
819         RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
820                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
821     MCRegister SrcRegLo =
822         RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
823                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
824 
825     BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
826       .addReg(SrcRegHi, getKillRegState(KillSrc))
827       .addReg(SrcRegLo, getKillRegState(KillSrc));
828     return;
829   }
830   if (SystemZ::FP128BitRegClass.contains(DestReg) &&
831       SystemZ::VR128BitRegClass.contains(SrcReg)) {
832     MCRegister DestRegHi =
833         RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
834                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
835     MCRegister DestRegLo =
836         RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
837                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
838 
839     if (DestRegHi != SrcReg)
840       copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
841     BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
842       .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
843     return;
844   }
845 
846   // Move CC value from a GR32.
847   if (DestReg == SystemZ::CC) {
848     unsigned Opcode =
849       SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH;
850     BuildMI(MBB, MBBI, DL, get(Opcode))
851       .addReg(SrcReg, getKillRegState(KillSrc))
852       .addImm(3 << (SystemZ::IPM_CC - 16));
853     return;
854   }
855 
856   // Everything else needs only one instruction.
857   unsigned Opcode;
858   if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
859     Opcode = SystemZ::LGR;
860   else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
861     // For z13 we prefer LDR over LER to avoid partial register dependencies.
862     Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
863   else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
864     Opcode = SystemZ::LDR;
865   else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
866     Opcode = SystemZ::LXR;
867   else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
868     Opcode = SystemZ::VLR32;
869   else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
870     Opcode = SystemZ::VLR64;
871   else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
872     Opcode = SystemZ::VLR;
873   else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
874     Opcode = SystemZ::CPYA;
875   else
876     llvm_unreachable("Impossible reg-to-reg copy");
877 
878   BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
879     .addReg(SrcReg, getKillRegState(KillSrc));
880 }
881 
882 void SystemZInstrInfo::storeRegToStackSlot(
883     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
884     bool isKill, int FrameIdx, const TargetRegisterClass *RC,
885     const TargetRegisterInfo *TRI, Register VReg) const {
886   DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
887 
888   // Callers may expect a single instruction, so keep 128-bit moves
889   // together for now and lower them after register allocation.
890   unsigned LoadOpcode, StoreOpcode;
891   getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
892   addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
893                         .addReg(SrcReg, getKillRegState(isKill)),
894                     FrameIdx);
895 }
896 
897 void SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
898                                             MachineBasicBlock::iterator MBBI,
899                                             Register DestReg, int FrameIdx,
900                                             const TargetRegisterClass *RC,
901                                             const TargetRegisterInfo *TRI,
902                                             Register VReg) const {
903   DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
904 
905   // Callers may expect a single instruction, so keep 128-bit moves
906   // together for now and lower them after register allocation.
907   unsigned LoadOpcode, StoreOpcode;
908   getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
909   addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
910                     FrameIdx);
911 }
912 
913 // Return true if MI is a simple load or store with a 12-bit displacement
914 // and no index.  Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
915 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
916   const MCInstrDesc &MCID = MI->getDesc();
917   return ((MCID.TSFlags & Flag) &&
918           isUInt<12>(MI->getOperand(2).getImm()) &&
919           MI->getOperand(3).getReg() == 0);
920 }
921 
922 namespace {
923 
924 struct LogicOp {
925   LogicOp() = default;
926   LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
927     : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
928 
929   explicit operator bool() const { return RegSize; }
930 
931   unsigned RegSize = 0;
932   unsigned ImmLSB = 0;
933   unsigned ImmSize = 0;
934 };
935 
936 } // end anonymous namespace
937 
938 static LogicOp interpretAndImmediate(unsigned Opcode) {
939   switch (Opcode) {
940   case SystemZ::NILMux: return LogicOp(32,  0, 16);
941   case SystemZ::NIHMux: return LogicOp(32, 16, 16);
942   case SystemZ::NILL64: return LogicOp(64,  0, 16);
943   case SystemZ::NILH64: return LogicOp(64, 16, 16);
944   case SystemZ::NIHL64: return LogicOp(64, 32, 16);
945   case SystemZ::NIHH64: return LogicOp(64, 48, 16);
946   case SystemZ::NIFMux: return LogicOp(32,  0, 32);
947   case SystemZ::NILF64: return LogicOp(64,  0, 32);
948   case SystemZ::NIHF64: return LogicOp(64, 32, 32);
949   default:              return LogicOp();
950   }
951 }
952 
953 static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
954   if (OldMI->registerDefIsDead(SystemZ::CC)) {
955     MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC);
956     if (CCDef != nullptr)
957       CCDef->setIsDead(true);
958   }
959 }
960 
961 static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI,
962                            MachineInstr::MIFlag Flag) {
963   if (OldMI->getFlag(Flag))
964     NewMI->setFlag(Flag);
965 }
966 
967 MachineInstr *
968 SystemZInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
969                                         LiveIntervals *LIS) const {
970   MachineBasicBlock *MBB = MI.getParent();
971 
972   // Try to convert an AND into an RISBG-type instruction.
973   // TODO: It might be beneficial to select RISBG and shorten to AND instead.
974   if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
975     uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
976     // AND IMMEDIATE leaves the other bits of the register unchanged.
977     Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
978     unsigned Start, End;
979     if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
980       unsigned NewOpcode;
981       if (And.RegSize == 64) {
982         NewOpcode = SystemZ::RISBG;
983         // Prefer RISBGN if available, since it does not clobber CC.
984         if (STI.hasMiscellaneousExtensions())
985           NewOpcode = SystemZ::RISBGN;
986       } else {
987         NewOpcode = SystemZ::RISBMux;
988         Start &= 31;
989         End &= 31;
990       }
991       MachineOperand &Dest = MI.getOperand(0);
992       MachineOperand &Src = MI.getOperand(1);
993       MachineInstrBuilder MIB =
994           BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
995               .add(Dest)
996               .addReg(0)
997               .addReg(Src.getReg(), getKillRegState(Src.isKill()),
998                       Src.getSubReg())
999               .addImm(Start)
1000               .addImm(End + 128)
1001               .addImm(0);
1002       if (LV) {
1003         unsigned NumOps = MI.getNumOperands();
1004         for (unsigned I = 1; I < NumOps; ++I) {
1005           MachineOperand &Op = MI.getOperand(I);
1006           if (Op.isReg() && Op.isKill())
1007             LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1008         }
1009       }
1010       if (LIS)
1011         LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1012       transferDeadCC(&MI, MIB);
1013       return MIB;
1014     }
1015   }
1016   return nullptr;
1017 }
1018 
1019 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1020     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1021     MachineBasicBlock::iterator InsertPt, int FrameIndex,
1022     LiveIntervals *LIS, VirtRegMap *VRM) const {
1023   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1024   MachineRegisterInfo &MRI = MF.getRegInfo();
1025   const MachineFrameInfo &MFI = MF.getFrameInfo();
1026   unsigned Size = MFI.getObjectSize(FrameIndex);
1027   unsigned Opcode = MI.getOpcode();
1028 
1029   // Check CC liveness if new instruction introduces a dead def of CC.
1030   SlotIndex MISlot = SlotIndex();
1031   LiveRange *CCLiveRange = nullptr;
1032   bool CCLiveAtMI = true;
1033   if (LIS) {
1034     MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1035     auto CCUnits = TRI->regunits(MCRegister::from(SystemZ::CC));
1036     assert(range_size(CCUnits) == 1 && "CC only has one reg unit.");
1037     CCLiveRange = &LIS->getRegUnit(*CCUnits.begin());
1038     CCLiveAtMI = CCLiveRange->liveAt(MISlot);
1039   }
1040 
1041   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1042     if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1043         isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1044       // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1045       MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1046                                       MI.getDebugLoc(), get(SystemZ::AGSI))
1047         .addFrameIndex(FrameIndex)
1048         .addImm(0)
1049         .addImm(MI.getOperand(2).getImm());
1050       BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
1051       CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1052       return BuiltMI;
1053     }
1054     return nullptr;
1055   }
1056 
1057   // All other cases require a single operand.
1058   if (Ops.size() != 1)
1059     return nullptr;
1060 
1061   unsigned OpNum = Ops[0];
1062   assert(Size * 8 ==
1063            TRI->getRegSizeInBits(*MF.getRegInfo()
1064                                .getRegClass(MI.getOperand(OpNum).getReg())) &&
1065          "Invalid size combination");
1066 
1067   if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1068       isInt<8>(MI.getOperand(2).getImm())) {
1069     // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1070     Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1071     MachineInstr *BuiltMI =
1072         BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1073             .addFrameIndex(FrameIndex)
1074             .addImm(0)
1075             .addImm(MI.getOperand(2).getImm());
1076     transferDeadCC(&MI, BuiltMI);
1077     transferMIFlag(&MI, BuiltMI, MachineInstr::NoSWrap);
1078     return BuiltMI;
1079   }
1080 
1081   if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1082        isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1083       (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1084        isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1085     // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1086     Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1087     MachineInstr *BuiltMI =
1088         BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1089             .addFrameIndex(FrameIndex)
1090             .addImm(0)
1091             .addImm((int8_t)MI.getOperand(2).getImm());
1092     transferDeadCC(&MI, BuiltMI);
1093     return BuiltMI;
1094   }
1095 
1096   if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1097        isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1098       (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1099        isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1100     // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1101     Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1102     MachineInstr *BuiltMI =
1103         BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1104             .addFrameIndex(FrameIndex)
1105             .addImm(0)
1106             .addImm((int8_t)-MI.getOperand(2).getImm());
1107     transferDeadCC(&MI, BuiltMI);
1108     return BuiltMI;
1109   }
1110 
1111   unsigned MemImmOpc = 0;
1112   switch (Opcode) {
1113   case SystemZ::LHIMux:
1114   case SystemZ::LHI:    MemImmOpc = SystemZ::MVHI;  break;
1115   case SystemZ::LGHI:   MemImmOpc = SystemZ::MVGHI; break;
1116   case SystemZ::CHIMux:
1117   case SystemZ::CHI:    MemImmOpc = SystemZ::CHSI;  break;
1118   case SystemZ::CGHI:   MemImmOpc = SystemZ::CGHSI; break;
1119   case SystemZ::CLFIMux:
1120   case SystemZ::CLFI:
1121     if (isUInt<16>(MI.getOperand(1).getImm()))
1122       MemImmOpc = SystemZ::CLFHSI;
1123     break;
1124   case SystemZ::CLGFI:
1125     if (isUInt<16>(MI.getOperand(1).getImm()))
1126       MemImmOpc = SystemZ::CLGHSI;
1127     break;
1128   default: break;
1129   }
1130   if (MemImmOpc)
1131     return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1132                    get(MemImmOpc))
1133                .addFrameIndex(FrameIndex)
1134                .addImm(0)
1135                .addImm(MI.getOperand(1).getImm());
1136 
1137   if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1138     bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1139     bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1140     // If we're spilling the destination of an LDGR or LGDR, store the
1141     // source register instead.
1142     if (OpNum == 0) {
1143       unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1144       return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1145                      get(StoreOpcode))
1146           .add(MI.getOperand(1))
1147           .addFrameIndex(FrameIndex)
1148           .addImm(0)
1149           .addReg(0);
1150     }
1151     // If we're spilling the source of an LDGR or LGDR, load the
1152     // destination register instead.
1153     if (OpNum == 1) {
1154       unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1155       return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1156                      get(LoadOpcode))
1157         .add(MI.getOperand(0))
1158         .addFrameIndex(FrameIndex)
1159         .addImm(0)
1160         .addReg(0);
1161     }
1162   }
1163 
1164   // Look for cases where the source of a simple store or the destination
1165   // of a simple load is being spilled.  Try to use MVC instead.
1166   //
1167   // Although MVC is in practice a fast choice in these cases, it is still
1168   // logically a bytewise copy.  This means that we cannot use it if the
1169   // load or store is volatile.  We also wouldn't be able to use MVC if
1170   // the two memories partially overlap, but that case cannot occur here,
1171   // because we know that one of the memories is a full frame index.
1172   //
1173   // For performance reasons, we also want to avoid using MVC if the addresses
1174   // might be equal.  We don't worry about that case here, because spill slot
1175   // coloring happens later, and because we have special code to remove
1176   // MVCs that turn out to be redundant.
1177   if (OpNum == 0 && MI.hasOneMemOperand()) {
1178     MachineMemOperand *MMO = *MI.memoperands_begin();
1179     if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1180       // Handle conversion of loads.
1181       if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) {
1182         return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1183                        get(SystemZ::MVC))
1184             .addFrameIndex(FrameIndex)
1185             .addImm(0)
1186             .addImm(Size)
1187             .add(MI.getOperand(1))
1188             .addImm(MI.getOperand(2).getImm())
1189             .addMemOperand(MMO);
1190       }
1191       // Handle conversion of stores.
1192       if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) {
1193         return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1194                        get(SystemZ::MVC))
1195             .add(MI.getOperand(1))
1196             .addImm(MI.getOperand(2).getImm())
1197             .addImm(Size)
1198             .addFrameIndex(FrameIndex)
1199             .addImm(0)
1200             .addMemOperand(MMO);
1201       }
1202     }
1203   }
1204 
1205   // If the spilled operand is the final one or the instruction is
1206   // commutable, try to change <INSN>R into <INSN>.  Don't introduce a def of
1207   // CC if it is live and MI does not define it.
1208   unsigned NumOps = MI.getNumExplicitOperands();
1209   int MemOpcode = SystemZ::getMemOpcode(Opcode);
1210   if (MemOpcode == -1 ||
1211       (CCLiveAtMI && !MI.definesRegister(SystemZ::CC) &&
1212        get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
1213     return nullptr;
1214 
1215   // Check if all other vregs have a usable allocation in the case of vector
1216   // to FP conversion.
1217   const MCInstrDesc &MCID = MI.getDesc();
1218   for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
1219     const MCOperandInfo &MCOI = MCID.operands()[I];
1220     if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
1221       continue;
1222     const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
1223     if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) {
1224       Register Reg = MI.getOperand(I).getReg();
1225       Register PhysReg = Reg.isVirtual()
1226                              ? (VRM ? Register(VRM->getPhys(Reg)) : Register())
1227                              : Reg;
1228       if (!PhysReg ||
1229           !(SystemZ::FP32BitRegClass.contains(PhysReg) ||
1230             SystemZ::FP64BitRegClass.contains(PhysReg) ||
1231             SystemZ::VF128BitRegClass.contains(PhysReg)))
1232         return nullptr;
1233     }
1234   }
1235   // Fused multiply and add/sub need to have the same dst and accumulator reg.
1236   bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB ||
1237                     Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB);
1238   if (FusedFPOp) {
1239     Register DstReg = VRM->getPhys(MI.getOperand(0).getReg());
1240     Register AccReg = VRM->getPhys(MI.getOperand(3).getReg());
1241     if (OpNum == 0 || OpNum == 3 || DstReg != AccReg)
1242       return nullptr;
1243   }
1244 
1245   // Try to swap compare operands if possible.
1246   bool NeedsCommute = false;
1247   if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR ||
1248        MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR ||
1249        MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB ||
1250        MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) &&
1251       OpNum == 0 && prepareCompareSwapOperands(MI))
1252     NeedsCommute = true;
1253 
1254   bool CCOperands = false;
1255   if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR ||
1256       MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) {
1257     assert(MI.getNumOperands() == 6 && NumOps == 5 &&
1258            "LOCR/SELR instruction operands corrupt?");
1259     NumOps -= 2;
1260     CCOperands = true;
1261   }
1262 
1263   // See if this is a 3-address instruction that is convertible to 2-address
1264   // and suitable for folding below.  Only try this with virtual registers
1265   // and a provided VRM (during regalloc).
1266   if (NumOps == 3 && SystemZ::getTargetMemOpcode(MemOpcode) != -1) {
1267     if (VRM == nullptr)
1268       return nullptr;
1269     else {
1270       Register DstReg = MI.getOperand(0).getReg();
1271       Register DstPhys =
1272           (DstReg.isVirtual() ? Register(VRM->getPhys(DstReg)) : DstReg);
1273       Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1274                                     : ((OpNum == 1 && MI.isCommutable())
1275                                            ? MI.getOperand(2).getReg()
1276                                            : Register()));
1277       if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1278           SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg))
1279         NeedsCommute = (OpNum == 1);
1280       else
1281         return nullptr;
1282     }
1283   }
1284 
1285   if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) {
1286     const MCInstrDesc &MemDesc = get(MemOpcode);
1287     uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1288     assert(AccessBytes != 0 && "Size of access should be known");
1289     assert(AccessBytes <= Size && "Access outside the frame index");
1290     uint64_t Offset = Size - AccessBytes;
1291     MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1292                                       MI.getDebugLoc(), get(MemOpcode));
1293     if (MI.isCompare()) {
1294       assert(NumOps == 2 && "Expected 2 register operands for a compare.");
1295       MIB.add(MI.getOperand(NeedsCommute ? 1 : 0));
1296     }
1297     else if (FusedFPOp) {
1298       MIB.add(MI.getOperand(0));
1299       MIB.add(MI.getOperand(3));
1300       MIB.add(MI.getOperand(OpNum == 1 ? 2 : 1));
1301     }
1302     else {
1303       MIB.add(MI.getOperand(0));
1304       if (NeedsCommute)
1305         MIB.add(MI.getOperand(2));
1306       else
1307         for (unsigned I = 1; I < OpNum; ++I)
1308           MIB.add(MI.getOperand(I));
1309     }
1310     MIB.addFrameIndex(FrameIndex).addImm(Offset);
1311     if (MemDesc.TSFlags & SystemZII::HasIndex)
1312       MIB.addReg(0);
1313     if (CCOperands) {
1314       unsigned CCValid = MI.getOperand(NumOps).getImm();
1315       unsigned CCMask = MI.getOperand(NumOps + 1).getImm();
1316       MIB.addImm(CCValid);
1317       MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
1318     }
1319     if (MIB->definesRegister(SystemZ::CC) &&
1320         (!MI.definesRegister(SystemZ::CC) ||
1321          MI.registerDefIsDead(SystemZ::CC))) {
1322       MIB->addRegisterDead(SystemZ::CC, TRI);
1323       if (CCLiveRange)
1324         CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1325     }
1326     // Constrain the register classes if converted from a vector opcode. The
1327     // allocated regs are in an FP reg-class per previous check above.
1328     for (const MachineOperand &MO : MIB->operands())
1329       if (MO.isReg() && MO.getReg().isVirtual()) {
1330         Register Reg = MO.getReg();
1331         if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
1332           MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
1333         else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
1334           MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass);
1335         else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass)
1336           MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass);
1337       }
1338 
1339     transferDeadCC(&MI, MIB);
1340     transferMIFlag(&MI, MIB, MachineInstr::NoSWrap);
1341     transferMIFlag(&MI, MIB, MachineInstr::NoFPExcept);
1342     return MIB;
1343   }
1344 
1345   return nullptr;
1346 }
1347 
1348 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1349     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1350     MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1351     LiveIntervals *LIS) const {
1352   return nullptr;
1353 }
1354 
1355 bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1356   switch (MI.getOpcode()) {
1357   case SystemZ::L128:
1358     splitMove(MI, SystemZ::LG);
1359     return true;
1360 
1361   case SystemZ::ST128:
1362     splitMove(MI, SystemZ::STG);
1363     return true;
1364 
1365   case SystemZ::LX:
1366     splitMove(MI, SystemZ::LD);
1367     return true;
1368 
1369   case SystemZ::STX:
1370     splitMove(MI, SystemZ::STD);
1371     return true;
1372 
1373   case SystemZ::LBMux:
1374     expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1375     return true;
1376 
1377   case SystemZ::LHMux:
1378     expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1379     return true;
1380 
1381   case SystemZ::LLCRMux:
1382     expandZExtPseudo(MI, SystemZ::LLCR, 8);
1383     return true;
1384 
1385   case SystemZ::LLHRMux:
1386     expandZExtPseudo(MI, SystemZ::LLHR, 16);
1387     return true;
1388 
1389   case SystemZ::LLCMux:
1390     expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1391     return true;
1392 
1393   case SystemZ::LLHMux:
1394     expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1395     return true;
1396 
1397   case SystemZ::LMux:
1398     expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1399     return true;
1400 
1401   case SystemZ::LOCMux:
1402     expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1403     return true;
1404 
1405   case SystemZ::LOCHIMux:
1406     expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1407     return true;
1408 
1409   case SystemZ::STCMux:
1410     expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1411     return true;
1412 
1413   case SystemZ::STHMux:
1414     expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1415     return true;
1416 
1417   case SystemZ::STMux:
1418     expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1419     return true;
1420 
1421   case SystemZ::STOCMux:
1422     expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1423     return true;
1424 
1425   case SystemZ::LHIMux:
1426     expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1427     return true;
1428 
1429   case SystemZ::IIFMux:
1430     expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1431     return true;
1432 
1433   case SystemZ::IILMux:
1434     expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1435     return true;
1436 
1437   case SystemZ::IIHMux:
1438     expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1439     return true;
1440 
1441   case SystemZ::NIFMux:
1442     expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1443     return true;
1444 
1445   case SystemZ::NILMux:
1446     expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1447     return true;
1448 
1449   case SystemZ::NIHMux:
1450     expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1451     return true;
1452 
1453   case SystemZ::OIFMux:
1454     expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1455     return true;
1456 
1457   case SystemZ::OILMux:
1458     expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1459     return true;
1460 
1461   case SystemZ::OIHMux:
1462     expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1463     return true;
1464 
1465   case SystemZ::XIFMux:
1466     expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1467     return true;
1468 
1469   case SystemZ::TMLMux:
1470     expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1471     return true;
1472 
1473   case SystemZ::TMHMux:
1474     expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1475     return true;
1476 
1477   case SystemZ::AHIMux:
1478     expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1479     return true;
1480 
1481   case SystemZ::AHIMuxK:
1482     expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1483     return true;
1484 
1485   case SystemZ::AFIMux:
1486     expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1487     return true;
1488 
1489   case SystemZ::CHIMux:
1490     expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1491     return true;
1492 
1493   case SystemZ::CFIMux:
1494     expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1495     return true;
1496 
1497   case SystemZ::CLFIMux:
1498     expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1499     return true;
1500 
1501   case SystemZ::CMux:
1502     expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1503     return true;
1504 
1505   case SystemZ::CLMux:
1506     expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1507     return true;
1508 
1509   case SystemZ::RISBMux: {
1510     bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg());
1511     bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());
1512     if (SrcIsHigh == DestIsHigh)
1513       MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1514     else {
1515       MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1516       MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1517     }
1518     return true;
1519   }
1520 
1521   case SystemZ::ADJDYNALLOC:
1522     splitAdjDynAlloc(MI);
1523     return true;
1524 
1525   case TargetOpcode::LOAD_STACK_GUARD:
1526     expandLoadStackGuard(&MI);
1527     return true;
1528 
1529   default:
1530     return false;
1531   }
1532 }
1533 
1534 unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
1535   if (MI.isInlineAsm()) {
1536     const MachineFunction *MF = MI.getParent()->getParent();
1537     const char *AsmStr = MI.getOperand(0).getSymbolName();
1538     return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1539   }
1540   else if (MI.getOpcode() == SystemZ::PATCHPOINT)
1541     return PatchPointOpers(&MI).getNumPatchBytes();
1542   else if (MI.getOpcode() == SystemZ::STACKMAP)
1543     return MI.getOperand(1).getImm();
1544   else if (MI.getOpcode() == SystemZ::FENTRY_CALL)
1545     return 6;
1546 
1547   return MI.getDesc().getSize();
1548 }
1549 
1550 SystemZII::Branch
1551 SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {
1552   switch (MI.getOpcode()) {
1553   case SystemZ::BR:
1554   case SystemZ::BI:
1555   case SystemZ::J:
1556   case SystemZ::JG:
1557     return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
1558                              SystemZ::CCMASK_ANY, &MI.getOperand(0));
1559 
1560   case SystemZ::BRC:
1561   case SystemZ::BRCL:
1562     return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1563                              MI.getOperand(1).getImm(), &MI.getOperand(2));
1564 
1565   case SystemZ::BRCT:
1566   case SystemZ::BRCTH:
1567     return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
1568                              SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1569 
1570   case SystemZ::BRCTG:
1571     return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
1572                              SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1573 
1574   case SystemZ::CIJ:
1575   case SystemZ::CRJ:
1576     return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
1577                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1578 
1579   case SystemZ::CLIJ:
1580   case SystemZ::CLRJ:
1581     return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
1582                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1583 
1584   case SystemZ::CGIJ:
1585   case SystemZ::CGRJ:
1586     return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
1587                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1588 
1589   case SystemZ::CLGIJ:
1590   case SystemZ::CLGRJ:
1591     return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
1592                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1593 
1594   case SystemZ::INLINEASM_BR:
1595     // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1596     return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr);
1597 
1598   default:
1599     llvm_unreachable("Unrecognized branch opcode");
1600   }
1601 }
1602 
1603 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
1604                                            unsigned &LoadOpcode,
1605                                            unsigned &StoreOpcode) const {
1606   if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1607     LoadOpcode = SystemZ::L;
1608     StoreOpcode = SystemZ::ST;
1609   } else if (RC == &SystemZ::GRH32BitRegClass) {
1610     LoadOpcode = SystemZ::LFH;
1611     StoreOpcode = SystemZ::STFH;
1612   } else if (RC == &SystemZ::GRX32BitRegClass) {
1613     LoadOpcode = SystemZ::LMux;
1614     StoreOpcode = SystemZ::STMux;
1615   } else if (RC == &SystemZ::GR64BitRegClass ||
1616              RC == &SystemZ::ADDR64BitRegClass) {
1617     LoadOpcode = SystemZ::LG;
1618     StoreOpcode = SystemZ::STG;
1619   } else if (RC == &SystemZ::GR128BitRegClass ||
1620              RC == &SystemZ::ADDR128BitRegClass) {
1621     LoadOpcode = SystemZ::L128;
1622     StoreOpcode = SystemZ::ST128;
1623   } else if (RC == &SystemZ::FP32BitRegClass) {
1624     LoadOpcode = SystemZ::LE;
1625     StoreOpcode = SystemZ::STE;
1626   } else if (RC == &SystemZ::FP64BitRegClass) {
1627     LoadOpcode = SystemZ::LD;
1628     StoreOpcode = SystemZ::STD;
1629   } else if (RC == &SystemZ::FP128BitRegClass) {
1630     LoadOpcode = SystemZ::LX;
1631     StoreOpcode = SystemZ::STX;
1632   } else if (RC == &SystemZ::VR32BitRegClass) {
1633     LoadOpcode = SystemZ::VL32;
1634     StoreOpcode = SystemZ::VST32;
1635   } else if (RC == &SystemZ::VR64BitRegClass) {
1636     LoadOpcode = SystemZ::VL64;
1637     StoreOpcode = SystemZ::VST64;
1638   } else if (RC == &SystemZ::VF128BitRegClass ||
1639              RC == &SystemZ::VR128BitRegClass) {
1640     LoadOpcode = SystemZ::VL;
1641     StoreOpcode = SystemZ::VST;
1642   } else
1643     llvm_unreachable("Unsupported regclass to load or store");
1644 }
1645 
1646 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
1647                                               int64_t Offset,
1648                                               const MachineInstr *MI) const {
1649   const MCInstrDesc &MCID = get(Opcode);
1650   int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1651   if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1652     // Get the instruction to use for unsigned 12-bit displacements.
1653     int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1654     if (Disp12Opcode >= 0)
1655       return Disp12Opcode;
1656 
1657     // All address-related instructions can use unsigned 12-bit
1658     // displacements.
1659     return Opcode;
1660   }
1661   if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1662     // Get the instruction to use for signed 20-bit displacements.
1663     int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1664     if (Disp20Opcode >= 0)
1665       return Disp20Opcode;
1666 
1667     // Check whether Opcode allows signed 20-bit displacements.
1668     if (MCID.TSFlags & SystemZII::Has20BitOffset)
1669       return Opcode;
1670 
1671     // If a VR32/VR64 reg ended up in an FP register, use the FP opcode.
1672     if (MI && MI->getOperand(0).isReg()) {
1673       Register Reg = MI->getOperand(0).getReg();
1674       if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) {
1675         switch (Opcode) {
1676         case SystemZ::VL32:
1677           return SystemZ::LEY;
1678         case SystemZ::VST32:
1679           return SystemZ::STEY;
1680         case SystemZ::VL64:
1681           return SystemZ::LDY;
1682         case SystemZ::VST64:
1683           return SystemZ::STDY;
1684         default: break;
1685         }
1686       }
1687     }
1688   }
1689   return 0;
1690 }
1691 
1692 bool SystemZInstrInfo::hasDisplacementPairInsn(unsigned Opcode) const {
1693   const MCInstrDesc &MCID = get(Opcode);
1694   if (MCID.TSFlags & SystemZII::Has20BitOffset)
1695     return SystemZ::getDisp12Opcode(Opcode) >= 0;
1696   return SystemZ::getDisp20Opcode(Opcode) >= 0;
1697 }
1698 
1699 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1700   switch (Opcode) {
1701   case SystemZ::L:      return SystemZ::LT;
1702   case SystemZ::LY:     return SystemZ::LT;
1703   case SystemZ::LG:     return SystemZ::LTG;
1704   case SystemZ::LGF:    return SystemZ::LTGF;
1705   case SystemZ::LR:     return SystemZ::LTR;
1706   case SystemZ::LGFR:   return SystemZ::LTGFR;
1707   case SystemZ::LGR:    return SystemZ::LTGR;
1708   case SystemZ::LCDFR:  return SystemZ::LCDBR;
1709   case SystemZ::LPDFR:  return SystemZ::LPDBR;
1710   case SystemZ::LNDFR:  return SystemZ::LNDBR;
1711   case SystemZ::LCDFR_32:  return SystemZ::LCEBR;
1712   case SystemZ::LPDFR_32:  return SystemZ::LPEBR;
1713   case SystemZ::LNDFR_32:  return SystemZ::LNEBR;
1714   // On zEC12 we prefer to use RISBGN.  But if there is a chance to
1715   // actually use the condition code, we may turn it back into RISGB.
1716   // Note that RISBG is not really a "load-and-test" instruction,
1717   // but sets the same condition code values, so is OK to use here.
1718   case SystemZ::RISBGN: return SystemZ::RISBG;
1719   default:              return 0;
1720   }
1721 }
1722 
1723 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1724                                    unsigned &Start, unsigned &End) const {
1725   // Reject trivial all-zero masks.
1726   Mask &= allOnes(BitSize);
1727   if (Mask == 0)
1728     return false;
1729 
1730   // Handle the 1+0+ or 0+1+0* cases.  Start then specifies the index of
1731   // the msb and End specifies the index of the lsb.
1732   unsigned LSB, Length;
1733   if (isShiftedMask_64(Mask, LSB, Length)) {
1734     Start = 63 - (LSB + Length - 1);
1735     End = 63 - LSB;
1736     return true;
1737   }
1738 
1739   // Handle the wrap-around 1+0+1+ cases.  Start then specifies the msb
1740   // of the low 1s and End specifies the lsb of the high 1s.
1741   if (isShiftedMask_64(Mask ^ allOnes(BitSize), LSB, Length)) {
1742     assert(LSB > 0 && "Bottom bit must be set");
1743     assert(LSB + Length < BitSize && "Top bit must be set");
1744     Start = 63 - (LSB - 1);
1745     End = 63 - (LSB + Length);
1746     return true;
1747   }
1748 
1749   return false;
1750 }
1751 
1752 unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
1753                                            SystemZII::FusedCompareType Type,
1754                                            const MachineInstr *MI) const {
1755   switch (Opcode) {
1756   case SystemZ::CHI:
1757   case SystemZ::CGHI:
1758     if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
1759       return 0;
1760     break;
1761   case SystemZ::CLFI:
1762   case SystemZ::CLGFI:
1763     if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
1764       return 0;
1765     break;
1766   case SystemZ::CL:
1767   case SystemZ::CLG:
1768     if (!STI.hasMiscellaneousExtensions())
1769       return 0;
1770     if (!(MI && MI->getOperand(3).getReg() == 0))
1771       return 0;
1772     break;
1773   }
1774   switch (Type) {
1775   case SystemZII::CompareAndBranch:
1776     switch (Opcode) {
1777     case SystemZ::CR:
1778       return SystemZ::CRJ;
1779     case SystemZ::CGR:
1780       return SystemZ::CGRJ;
1781     case SystemZ::CHI:
1782       return SystemZ::CIJ;
1783     case SystemZ::CGHI:
1784       return SystemZ::CGIJ;
1785     case SystemZ::CLR:
1786       return SystemZ::CLRJ;
1787     case SystemZ::CLGR:
1788       return SystemZ::CLGRJ;
1789     case SystemZ::CLFI:
1790       return SystemZ::CLIJ;
1791     case SystemZ::CLGFI:
1792       return SystemZ::CLGIJ;
1793     default:
1794       return 0;
1795     }
1796   case SystemZII::CompareAndReturn:
1797     switch (Opcode) {
1798     case SystemZ::CR:
1799       return SystemZ::CRBReturn;
1800     case SystemZ::CGR:
1801       return SystemZ::CGRBReturn;
1802     case SystemZ::CHI:
1803       return SystemZ::CIBReturn;
1804     case SystemZ::CGHI:
1805       return SystemZ::CGIBReturn;
1806     case SystemZ::CLR:
1807       return SystemZ::CLRBReturn;
1808     case SystemZ::CLGR:
1809       return SystemZ::CLGRBReturn;
1810     case SystemZ::CLFI:
1811       return SystemZ::CLIBReturn;
1812     case SystemZ::CLGFI:
1813       return SystemZ::CLGIBReturn;
1814     default:
1815       return 0;
1816     }
1817   case SystemZII::CompareAndSibcall:
1818     switch (Opcode) {
1819     case SystemZ::CR:
1820       return SystemZ::CRBCall;
1821     case SystemZ::CGR:
1822       return SystemZ::CGRBCall;
1823     case SystemZ::CHI:
1824       return SystemZ::CIBCall;
1825     case SystemZ::CGHI:
1826       return SystemZ::CGIBCall;
1827     case SystemZ::CLR:
1828       return SystemZ::CLRBCall;
1829     case SystemZ::CLGR:
1830       return SystemZ::CLGRBCall;
1831     case SystemZ::CLFI:
1832       return SystemZ::CLIBCall;
1833     case SystemZ::CLGFI:
1834       return SystemZ::CLGIBCall;
1835     default:
1836       return 0;
1837     }
1838   case SystemZII::CompareAndTrap:
1839     switch (Opcode) {
1840     case SystemZ::CR:
1841       return SystemZ::CRT;
1842     case SystemZ::CGR:
1843       return SystemZ::CGRT;
1844     case SystemZ::CHI:
1845       return SystemZ::CIT;
1846     case SystemZ::CGHI:
1847       return SystemZ::CGIT;
1848     case SystemZ::CLR:
1849       return SystemZ::CLRT;
1850     case SystemZ::CLGR:
1851       return SystemZ::CLGRT;
1852     case SystemZ::CLFI:
1853       return SystemZ::CLFIT;
1854     case SystemZ::CLGFI:
1855       return SystemZ::CLGIT;
1856     case SystemZ::CL:
1857       return SystemZ::CLT;
1858     case SystemZ::CLG:
1859       return SystemZ::CLGT;
1860     default:
1861       return 0;
1862     }
1863   }
1864   return 0;
1865 }
1866 
1867 bool SystemZInstrInfo::
1868 prepareCompareSwapOperands(MachineBasicBlock::iterator const MBBI) const {
1869   assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() &&
1870          MBBI->getOperand(1).isReg() && !MBBI->mayLoad() &&
1871          "Not a compare reg/reg.");
1872 
1873   MachineBasicBlock *MBB = MBBI->getParent();
1874   bool CCLive = true;
1875   SmallVector<MachineInstr *, 4> CCUsers;
1876   for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
1877     if (MI.readsRegister(SystemZ::CC)) {
1878       unsigned Flags = MI.getDesc().TSFlags;
1879       if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast))
1880         CCUsers.push_back(&MI);
1881       else
1882         return false;
1883     }
1884     if (MI.definesRegister(SystemZ::CC)) {
1885       CCLive = false;
1886       break;
1887     }
1888   }
1889   if (CCLive) {
1890     LivePhysRegs LiveRegs(*MBB->getParent()->getSubtarget().getRegisterInfo());
1891     LiveRegs.addLiveOuts(*MBB);
1892     if (LiveRegs.contains(SystemZ::CC))
1893       return false;
1894   }
1895 
1896   // Update all CC users.
1897   for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) {
1898     unsigned Flags = CCUsers[Idx]->getDesc().TSFlags;
1899     unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ?
1900                            0 : CCUsers[Idx]->getNumExplicitOperands() - 2);
1901     MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1);
1902     unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm());
1903     CCMaskMO.setImm(NewCCMask);
1904   }
1905 
1906   return true;
1907 }
1908 
1909 unsigned SystemZ::reverseCCMask(unsigned CCMask) {
1910   return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1911           (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
1912           (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
1913           (CCMask & SystemZ::CCMASK_CMP_UO));
1914 }
1915 
1916 MachineBasicBlock *SystemZ::emitBlockAfter(MachineBasicBlock *MBB) {
1917   MachineFunction &MF = *MBB->getParent();
1918   MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
1919   MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
1920   return NewMBB;
1921 }
1922 
1923 MachineBasicBlock *SystemZ::splitBlockAfter(MachineBasicBlock::iterator MI,
1924                                             MachineBasicBlock *MBB) {
1925   MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
1926   NewMBB->splice(NewMBB->begin(), MBB,
1927                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
1928   NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
1929   return NewMBB;
1930 }
1931 
1932 MachineBasicBlock *SystemZ::splitBlockBefore(MachineBasicBlock::iterator MI,
1933                                              MachineBasicBlock *MBB) {
1934   MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
1935   NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
1936   NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
1937   return NewMBB;
1938 }
1939 
1940 unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
1941   if (!STI.hasLoadAndTrap())
1942     return 0;
1943   switch (Opcode) {
1944   case SystemZ::L:
1945   case SystemZ::LY:
1946     return SystemZ::LAT;
1947   case SystemZ::LG:
1948     return SystemZ::LGAT;
1949   case SystemZ::LFH:
1950     return SystemZ::LFHAT;
1951   case SystemZ::LLGF:
1952     return SystemZ::LLGFAT;
1953   case SystemZ::LLGT:
1954     return SystemZ::LLGTAT;
1955   }
1956   return 0;
1957 }
1958 
1959 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
1960                                      MachineBasicBlock::iterator MBBI,
1961                                      unsigned Reg, uint64_t Value) const {
1962   DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1963   unsigned Opcode = 0;
1964   if (isInt<16>(Value))
1965     Opcode = SystemZ::LGHI;
1966   else if (SystemZ::isImmLL(Value))
1967     Opcode = SystemZ::LLILL;
1968   else if (SystemZ::isImmLH(Value)) {
1969     Opcode = SystemZ::LLILH;
1970     Value >>= 16;
1971   }
1972   else if (isInt<32>(Value))
1973     Opcode = SystemZ::LGFI;
1974   if (Opcode) {
1975     BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
1976     return;
1977   }
1978 
1979   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1980   assert (MRI.isSSA() &&  "Huge values only handled before reg-alloc .");
1981   Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
1982   Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
1983   BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0);
1984   BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1)
1985     .addReg(Reg0).addImm(Value >> 32);
1986   BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg)
1987     .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1));
1988 }
1989 
1990 bool SystemZInstrInfo::verifyInstruction(const MachineInstr &MI,
1991                                          StringRef &ErrInfo) const {
1992   const MCInstrDesc &MCID = MI.getDesc();
1993   for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
1994     if (I >= MCID.getNumOperands())
1995       break;
1996     const MachineOperand &Op = MI.getOperand(I);
1997     const MCOperandInfo &MCOI = MCID.operands()[I];
1998     // Addressing modes have register and immediate operands. Op should be a
1999     // register (or frame index) operand if MCOI.RegClass contains a valid
2000     // register class, or an immediate otherwise.
2001     if (MCOI.OperandType == MCOI::OPERAND_MEMORY &&
2002         ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) ||
2003          (MCOI.RegClass == -1 && !Op.isImm()))) {
2004       ErrInfo = "Addressing mode operands corrupt!";
2005       return false;
2006     }
2007   }
2008 
2009   return true;
2010 }
2011 
2012 bool SystemZInstrInfo::
2013 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
2014                                 const MachineInstr &MIb) const {
2015 
2016   if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
2017     return false;
2018 
2019   // If mem-operands show that the same address Value is used by both
2020   // instructions, check for non-overlapping offsets and widths. Not
2021   // sure if a register based analysis would be an improvement...
2022 
2023   MachineMemOperand *MMOa = *MIa.memoperands_begin();
2024   MachineMemOperand *MMOb = *MIb.memoperands_begin();
2025   const Value *VALa = MMOa->getValue();
2026   const Value *VALb = MMOb->getValue();
2027   bool SameVal = (VALa && VALb && (VALa == VALb));
2028   if (!SameVal) {
2029     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
2030     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
2031     if (PSVa && PSVb && (PSVa == PSVb))
2032       SameVal = true;
2033   }
2034   if (SameVal) {
2035     int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
2036     int WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
2037     int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2038     int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2039     int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2040     if (LowOffset + LowWidth <= HighOffset)
2041       return true;
2042   }
2043 
2044   return false;
2045 }
2046