1 //===- MipsSEFrameLowering.cpp - Mips32/64 Frame Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Mips32/64 implementation of TargetFrameLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "MipsSEFrameLowering.h"
14 #include "MCTargetDesc/MipsABIInfo.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterInfo.h"
17 #include "MipsSEInstrInfo.h"
18 #include "MipsSubtarget.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/RegisterScavenging.h"
31 #include "llvm/CodeGen/TargetInstrInfo.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/CodeGen/TargetSubtargetInfo.h"
34 #include "llvm/IR/DebugLoc.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/MC/MCDwarf.h"
37 #include "llvm/MC/MCRegisterInfo.h"
38 #include "llvm/MC/MachineLocation.h"
39 #include "llvm/Support/CodeGen.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/MathExtras.h"
42 #include <cassert>
43 #include <cstdint>
44 #include <utility>
45 #include <vector>
46
47 using namespace llvm;
48
getMFHiLoOpc(unsigned Src)49 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) {
50 if (Mips::ACC64RegClass.contains(Src))
51 return std::make_pair((unsigned)Mips::PseudoMFHI,
52 (unsigned)Mips::PseudoMFLO);
53
54 if (Mips::ACC64DSPRegClass.contains(Src))
55 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP);
56
57 if (Mips::ACC128RegClass.contains(Src))
58 return std::make_pair((unsigned)Mips::PseudoMFHI64,
59 (unsigned)Mips::PseudoMFLO64);
60
61 return std::make_pair(0, 0);
62 }
63
64 namespace {
65
66 /// Helper class to expand pseudos.
67 class ExpandPseudo {
68 public:
69 ExpandPseudo(MachineFunction &MF);
70 bool expand();
71
72 private:
73 using Iter = MachineBasicBlock::iterator;
74
75 bool expandInstr(MachineBasicBlock &MBB, Iter I);
76 void expandLoadCCond(MachineBasicBlock &MBB, Iter I);
77 void expandStoreCCond(MachineBasicBlock &MBB, Iter I);
78 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
79 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
80 unsigned MFLoOpc, unsigned RegSize);
81 bool expandCopy(MachineBasicBlock &MBB, Iter I);
82 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
83 unsigned MFLoOpc);
84 bool expandBuildPairF64(MachineBasicBlock &MBB,
85 MachineBasicBlock::iterator I, bool FP64) const;
86 bool expandExtractElementF64(MachineBasicBlock &MBB,
87 MachineBasicBlock::iterator I, bool FP64) const;
88
89 MachineFunction &MF;
90 MachineRegisterInfo &MRI;
91 const MipsSubtarget &Subtarget;
92 const MipsSEInstrInfo &TII;
93 const MipsRegisterInfo &RegInfo;
94 };
95
96 } // end anonymous namespace
97
ExpandPseudo(MachineFunction & MF_)98 ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
99 : MF(MF_), MRI(MF.getRegInfo()),
100 Subtarget(MF.getSubtarget<MipsSubtarget>()),
101 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())),
102 RegInfo(*Subtarget.getRegisterInfo()) {}
103
expand()104 bool ExpandPseudo::expand() {
105 bool Expanded = false;
106
107 for (auto &MBB : MF) {
108 for (Iter I = MBB.begin(), End = MBB.end(); I != End;)
109 Expanded |= expandInstr(MBB, I++);
110 }
111
112 return Expanded;
113 }
114
expandInstr(MachineBasicBlock & MBB,Iter I)115 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
116 switch(I->getOpcode()) {
117 case Mips::LOAD_CCOND_DSP:
118 expandLoadCCond(MBB, I);
119 break;
120 case Mips::STORE_CCOND_DSP:
121 expandStoreCCond(MBB, I);
122 break;
123 case Mips::LOAD_ACC64:
124 case Mips::LOAD_ACC64DSP:
125 expandLoadACC(MBB, I, 4);
126 break;
127 case Mips::LOAD_ACC128:
128 expandLoadACC(MBB, I, 8);
129 break;
130 case Mips::STORE_ACC64:
131 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4);
132 break;
133 case Mips::STORE_ACC64DSP:
134 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4);
135 break;
136 case Mips::STORE_ACC128:
137 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
138 break;
139 case Mips::BuildPairF64:
140 if (expandBuildPairF64(MBB, I, false))
141 MBB.erase(I);
142 return false;
143 case Mips::BuildPairF64_64:
144 if (expandBuildPairF64(MBB, I, true))
145 MBB.erase(I);
146 return false;
147 case Mips::ExtractElementF64:
148 if (expandExtractElementF64(MBB, I, false))
149 MBB.erase(I);
150 return false;
151 case Mips::ExtractElementF64_64:
152 if (expandExtractElementF64(MBB, I, true))
153 MBB.erase(I);
154 return false;
155 case TargetOpcode::COPY:
156 if (!expandCopy(MBB, I))
157 return false;
158 break;
159 default:
160 return false;
161 }
162
163 MBB.erase(I);
164 return true;
165 }
166
expandLoadCCond(MachineBasicBlock & MBB,Iter I)167 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
168 // load $vr, FI
169 // copy ccond, $vr
170
171 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
172
173 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
174 Register VR = MRI.createVirtualRegister(RC);
175 Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
176
177 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
178 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
179 .addReg(VR, RegState::Kill);
180 }
181
expandStoreCCond(MachineBasicBlock & MBB,Iter I)182 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
183 // copy $vr, ccond
184 // store $vr, FI
185
186 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
187
188 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
189 Register VR = MRI.createVirtualRegister(RC);
190 Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
191
192 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
193 .addReg(Src, getKillRegState(I->getOperand(0).isKill()));
194 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0);
195 }
196
expandLoadACC(MachineBasicBlock & MBB,Iter I,unsigned RegSize)197 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
198 unsigned RegSize) {
199 // load $vr0, FI
200 // copy lo, $vr0
201 // load $vr1, FI + 4
202 // copy hi, $vr1
203
204 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
205
206 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
207 Register VR0 = MRI.createVirtualRegister(RC);
208 Register VR1 = MRI.createVirtualRegister(RC);
209 Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
210 Register Lo = RegInfo.getSubReg(Dst, Mips::sub_lo);
211 Register Hi = RegInfo.getSubReg(Dst, Mips::sub_hi);
212 DebugLoc DL = I->getDebugLoc();
213 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
214
215 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0);
216 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill);
217 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize);
218 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill);
219 }
220
expandStoreACC(MachineBasicBlock & MBB,Iter I,unsigned MFHiOpc,unsigned MFLoOpc,unsigned RegSize)221 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
222 unsigned MFHiOpc, unsigned MFLoOpc,
223 unsigned RegSize) {
224 // mflo $vr0, src
225 // store $vr0, FI
226 // mfhi $vr1, src
227 // store $vr1, FI + 4
228
229 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
230
231 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
232 Register VR0 = MRI.createVirtualRegister(RC);
233 Register VR1 = MRI.createVirtualRegister(RC);
234 Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
235 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill());
236 DebugLoc DL = I->getDebugLoc();
237
238 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
239 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
240 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
241 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
242 }
243
expandCopy(MachineBasicBlock & MBB,Iter I)244 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
245 Register Src = I->getOperand(1).getReg();
246 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
247
248 if (!Opcodes.first)
249 return false;
250
251 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second);
252 }
253
expandCopyACC(MachineBasicBlock & MBB,Iter I,unsigned MFHiOpc,unsigned MFLoOpc)254 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
255 unsigned MFHiOpc, unsigned MFLoOpc) {
256 // mflo $vr0, src
257 // copy dst_lo, $vr0
258 // mfhi $vr1, src
259 // copy dst_hi, $vr1
260
261 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
262 const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst);
263 unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16;
264 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
265 Register VR0 = MRI.createVirtualRegister(RC);
266 Register VR1 = MRI.createVirtualRegister(RC);
267 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill());
268 Register DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
269 Register DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
270 DebugLoc DL = I->getDebugLoc();
271
272 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
273 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo)
274 .addReg(VR0, RegState::Kill);
275 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
276 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi)
277 .addReg(VR1, RegState::Kill);
278 return true;
279 }
280
281 /// This method expands the same instruction that MipsSEInstrInfo::
282 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not
283 /// available and the case where the ABI is FP64A. It is implemented here
284 /// because frame indexes are eliminated before MipsSEInstrInfo::
285 /// expandBuildPairF64 is called.
expandBuildPairF64(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,bool FP64) const286 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
287 MachineBasicBlock::iterator I,
288 bool FP64) const {
289 // For fpxx and when mthc1 is not available, use:
290 // spill + reload via ldc1
291 //
292 // The case where dmtc1 is available doesn't need to be handled here
293 // because it never creates a BuildPairF64 node.
294 //
295 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
296 // for odd-numbered double precision values (because the lower 32-bits is
297 // transferred with mtc1 which is redirected to the upper half of the even
298 // register). Unfortunately, we have to make this decision before register
299 // allocation so for now we use a spill/reload sequence for all
300 // double-precision values in regardless of being an odd/even register.
301 //
302 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
303 // implicit operand, so other passes (like ShrinkWrapping) are aware that
304 // stack is used.
305 if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
306 && I->getOperand(3).getReg() == Mips::SP) {
307 Register DstReg = I->getOperand(0).getReg();
308 Register LoReg = I->getOperand(1).getReg();
309 Register HiReg = I->getOperand(2).getReg();
310
311 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
312 // the cases where mthc1 is not available). 64-bit architectures and
313 // MIPS32r2 or later can use FGR64 though.
314 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
315 !Subtarget.isFP64bit());
316
317 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
318 const TargetRegisterClass *RC2 =
319 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
320
321 // We re-use the same spill slot each time so that the stack frame doesn't
322 // grow too much in functions with a large number of moves.
323 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC2);
324 if (!Subtarget.isLittle())
325 std::swap(LoReg, HiReg);
326 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC,
327 &RegInfo, 0);
328 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC,
329 &RegInfo, 4);
330 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
331 return true;
332 }
333
334 return false;
335 }
336
337 /// This method expands the same instruction that MipsSEInstrInfo::
338 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not
339 /// available and the case where the ABI is FP64A. It is implemented here
340 /// because frame indexes are eliminated before MipsSEInstrInfo::
341 /// expandExtractElementF64 is called.
expandExtractElementF64(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,bool FP64) const342 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
343 MachineBasicBlock::iterator I,
344 bool FP64) const {
345 const MachineOperand &Op1 = I->getOperand(1);
346 const MachineOperand &Op2 = I->getOperand(2);
347
348 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) {
349 Register DstReg = I->getOperand(0).getReg();
350 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg);
351 return true;
352 }
353
354 // For fpxx and when mfhc1 is not available, use:
355 // spill + reload via ldc1
356 //
357 // The case where dmfc1 is available doesn't need to be handled here
358 // because it never creates a ExtractElementF64 node.
359 //
360 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
361 // for odd-numbered double precision values (because the lower 32-bits is
362 // transferred with mfc1 which is redirected to the upper half of the even
363 // register). Unfortunately, we have to make this decision before register
364 // allocation so for now we use a spill/reload sequence for all
365 // double-precision values in regardless of being an odd/even register.
366 //
367 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
368 // implicit operand, so other passes (like ShrinkWrapping) are aware that
369 // stack is used.
370 if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
371 && I->getOperand(3).getReg() == Mips::SP) {
372 Register DstReg = I->getOperand(0).getReg();
373 Register SrcReg = Op1.getReg();
374 unsigned N = Op2.getImm();
375 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N));
376
377 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
378 // the cases where mfhc1 is not available). 64-bit architectures and
379 // MIPS32r2 or later can use FGR64 though.
380 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
381 !Subtarget.isFP64bit());
382
383 const TargetRegisterClass *RC =
384 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
385 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass;
386
387 // We re-use the same spill slot each time so that the stack frame doesn't
388 // grow too much in functions with a large number of moves.
389 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC);
390 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0);
391 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
392 return true;
393 }
394
395 return false;
396 }
397
MipsSEFrameLowering(const MipsSubtarget & STI)398 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI)
399 : MipsFrameLowering(STI, STI.getStackAlignment()) {}
400
emitPrologue(MachineFunction & MF,MachineBasicBlock & MBB) const401 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
402 MachineBasicBlock &MBB) const {
403 MachineFrameInfo &MFI = MF.getFrameInfo();
404 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
405
406 const MipsSEInstrInfo &TII =
407 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
408 const MipsRegisterInfo &RegInfo =
409 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
410
411 MachineBasicBlock::iterator MBBI = MBB.begin();
412 DebugLoc dl;
413 MipsABIInfo ABI = STI.getABI();
414 unsigned SP = ABI.GetStackPtr();
415 unsigned FP = ABI.GetFramePtr();
416 unsigned ZERO = ABI.GetNullPtr();
417 unsigned MOVE = ABI.GetGPRMoveOp();
418 unsigned ADDiu = ABI.GetPtrAddiuOp();
419 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND;
420
421 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ?
422 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
423
424 // First, compute final stack size.
425 uint64_t StackSize = MFI.getStackSize();
426
427 // No need to allocate space on the stack.
428 if (StackSize == 0 && !MFI.adjustsStack()) return;
429
430 const MCRegisterInfo *MRI = MF.getContext().getRegisterInfo();
431
432 // Adjust stack.
433 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI);
434
435 // emit ".cfi_def_cfa_offset StackSize"
436 unsigned CFIIndex =
437 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize));
438 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
439 .addCFIIndex(CFIIndex);
440
441 if (MF.getFunction().hasFnAttribute("interrupt"))
442 emitInterruptPrologueStub(MF, MBB);
443
444 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
445
446 if (!CSI.empty()) {
447 // Find the instruction past the last instruction that saves a callee-saved
448 // register to the stack.
449 for (unsigned i = 0; i < CSI.size(); ++i)
450 ++MBBI;
451
452 // Iterate over list of callee-saved registers and emit .cfi_offset
453 // directives.
454 for (const CalleeSavedInfo &I : CSI) {
455 int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
456 Register Reg = I.getReg();
457
458 // If Reg is a double precision register, emit two cfa_offsets,
459 // one for each of the paired single precision registers.
460 if (Mips::AFGR64RegClass.contains(Reg)) {
461 unsigned Reg0 =
462 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true);
463 unsigned Reg1 =
464 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true);
465
466 if (!STI.isLittle())
467 std::swap(Reg0, Reg1);
468
469 unsigned CFIIndex = MF.addFrameInst(
470 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
471 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
472 .addCFIIndex(CFIIndex);
473
474 CFIIndex = MF.addFrameInst(
475 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
476 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
477 .addCFIIndex(CFIIndex);
478 } else if (Mips::FGR64RegClass.contains(Reg)) {
479 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true);
480 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1;
481
482 if (!STI.isLittle())
483 std::swap(Reg0, Reg1);
484
485 unsigned CFIIndex = MF.addFrameInst(
486 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
487 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
488 .addCFIIndex(CFIIndex);
489
490 CFIIndex = MF.addFrameInst(
491 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
492 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
493 .addCFIIndex(CFIIndex);
494 } else {
495 // Reg is either in GPR32 or FGR32.
496 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
497 nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
498 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
499 .addCFIIndex(CFIIndex);
500 }
501 }
502 }
503
504 if (MipsFI->callsEhReturn()) {
505 // Insert instructions that spill eh data registers.
506 for (int I = 0; I < 4; ++I) {
507 if (!MBB.isLiveIn(ABI.GetEhDataReg(I)))
508 MBB.addLiveIn(ABI.GetEhDataReg(I));
509 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
510 MipsFI->getEhDataRegFI(I), RC, &RegInfo,
511 Register());
512 }
513
514 // Emit .cfi_offset directives for eh data registers.
515 for (int I = 0; I < 4; ++I) {
516 int64_t Offset = MFI.getObjectOffset(MipsFI->getEhDataRegFI(I));
517 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true);
518 unsigned CFIIndex = MF.addFrameInst(
519 MCCFIInstruction::createOffset(nullptr, Reg, Offset));
520 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
521 .addCFIIndex(CFIIndex);
522 }
523 }
524
525 // if framepointer enabled, set it to point to the stack pointer.
526 if (hasFP(MF)) {
527 // Insert instruction "move $fp, $sp" at this location.
528 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO)
529 .setMIFlag(MachineInstr::FrameSetup);
530
531 // emit ".cfi_def_cfa_register $fp"
532 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(
533 nullptr, MRI->getDwarfRegNum(FP, true)));
534 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
535 .addCFIIndex(CFIIndex);
536
537 if (RegInfo.hasStackRealignment(MF)) {
538 // addiu $Reg, $zero, -MaxAlignment
539 // andi $sp, $sp, $Reg
540 Register VR = MF.getRegInfo().createVirtualRegister(RC);
541 assert((Log2(MFI.getMaxAlign()) < 16) &&
542 "Function's alignment size requirement is not supported.");
543 int64_t MaxAlign = -(int64_t)MFI.getMaxAlign().value();
544
545 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO).addImm(MaxAlign);
546 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR);
547
548 if (hasBP(MF)) {
549 // move $s7, $sp
550 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7;
551 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP)
552 .addReg(SP)
553 .addReg(ZERO);
554 }
555 }
556 }
557 }
558
emitInterruptPrologueStub(MachineFunction & MF,MachineBasicBlock & MBB) const559 void MipsSEFrameLowering::emitInterruptPrologueStub(
560 MachineFunction &MF, MachineBasicBlock &MBB) const {
561 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
562 MachineBasicBlock::iterator MBBI = MBB.begin();
563 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
564
565 // Report an error the target doesn't support Mips32r2 or later.
566 // The epilogue relies on the use of the "ehb" to clear execution
567 // hazards. Pre R2 Mips relies on an implementation defined number
568 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard
569 // clearing is not provided so reject that configuration.
570 if (!STI.hasMips32r2())
571 report_fatal_error(
572 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or "
573 "MIPS16 targets.");
574
575 // The GP register contains the "user" value, so we cannot perform
576 // any gp relative loads until we restore the "kernel" or "system" gp
577 // value. Until support is written we shall only accept the static
578 // relocation model.
579 if ((STI.getRelocationModel() != Reloc::Static))
580 report_fatal_error("\"interrupt\" attribute is only supported for the "
581 "static relocation model on MIPS at the present time.");
582
583 if (!STI.isABI_O32() || STI.hasMips64())
584 report_fatal_error("\"interrupt\" attribute is only supported for the "
585 "O32 ABI on MIPS32R2+ at the present time.");
586
587 // Perform ISR handling like GCC
588 StringRef IntKind =
589 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
590 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
591
592 // EIC interrupt handling needs to read the Cause register to disable
593 // interrupts.
594 if (IntKind == "eic") {
595 // Coprocessor registers are always live per se.
596 MBB.addLiveIn(Mips::COP013);
597 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0)
598 .addReg(Mips::COP013)
599 .addImm(0)
600 .setMIFlag(MachineInstr::FrameSetup);
601
602 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0)
603 .addReg(Mips::K0)
604 .addImm(10)
605 .addImm(6)
606 .setMIFlag(MachineInstr::FrameSetup);
607 }
608
609 // Fetch and spill EPC
610 MBB.addLiveIn(Mips::COP014);
611 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
612 .addReg(Mips::COP014)
613 .addImm(0)
614 .setMIFlag(MachineInstr::FrameSetup);
615
616 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
617 MipsFI->getISRRegFI(0), PtrRC,
618 STI.getRegisterInfo(), 0);
619
620 // Fetch and Spill Status
621 MBB.addLiveIn(Mips::COP012);
622 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
623 .addReg(Mips::COP012)
624 .addImm(0)
625 .setMIFlag(MachineInstr::FrameSetup);
626
627 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
628 MipsFI->getISRRegFI(1), PtrRC,
629 STI.getRegisterInfo(), 0);
630
631 // Build the configuration for disabling lower priority interrupts. Non EIC
632 // interrupts need to be masked off with zero, EIC from the Cause register.
633 unsigned InsPosition = 8;
634 unsigned InsSize = 0;
635 unsigned SrcReg = Mips::ZERO;
636
637 // If the interrupt we're tied to is the EIC, switch the source for the
638 // masking off interrupts to the cause register.
639 if (IntKind == "eic") {
640 SrcReg = Mips::K0;
641 InsPosition = 10;
642 InsSize = 6;
643 } else
644 InsSize = StringSwitch<unsigned>(IntKind)
645 .Case("sw0", 1)
646 .Case("sw1", 2)
647 .Case("hw0", 3)
648 .Case("hw1", 4)
649 .Case("hw2", 5)
650 .Case("hw3", 6)
651 .Case("hw4", 7)
652 .Case("hw5", 8)
653 .Default(0);
654 assert(InsSize != 0 && "Unknown interrupt type!");
655
656 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
657 .addReg(SrcReg)
658 .addImm(InsPosition)
659 .addImm(InsSize)
660 .addReg(Mips::K1)
661 .setMIFlag(MachineInstr::FrameSetup);
662
663 // Mask off KSU, ERL, EXL
664 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
665 .addReg(Mips::ZERO)
666 .addImm(1)
667 .addImm(4)
668 .addReg(Mips::K1)
669 .setMIFlag(MachineInstr::FrameSetup);
670
671 // Disable the FPU as we are not spilling those register sets.
672 if (!STI.useSoftFloat())
673 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
674 .addReg(Mips::ZERO)
675 .addImm(29)
676 .addImm(1)
677 .addReg(Mips::K1)
678 .setMIFlag(MachineInstr::FrameSetup);
679
680 // Set the new status
681 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
682 .addReg(Mips::K1)
683 .addImm(0)
684 .setMIFlag(MachineInstr::FrameSetup);
685 }
686
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const687 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
688 MachineBasicBlock &MBB) const {
689 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
690 MachineFrameInfo &MFI = MF.getFrameInfo();
691 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
692
693 const MipsSEInstrInfo &TII =
694 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
695 const MipsRegisterInfo &RegInfo =
696 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
697
698 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
699 MipsABIInfo ABI = STI.getABI();
700 unsigned SP = ABI.GetStackPtr();
701 unsigned FP = ABI.GetFramePtr();
702 unsigned ZERO = ABI.GetNullPtr();
703 unsigned MOVE = ABI.GetGPRMoveOp();
704
705 // if framepointer enabled, restore the stack pointer.
706 if (hasFP(MF)) {
707 // Find the first instruction that restores a callee-saved register.
708 MachineBasicBlock::iterator I = MBBI;
709
710 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i)
711 --I;
712
713 // Insert instruction "move $sp, $fp" at this location.
714 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO);
715 }
716
717 if (MipsFI->callsEhReturn()) {
718 const TargetRegisterClass *RC =
719 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
720
721 // Find first instruction that restores a callee-saved register.
722 MachineBasicBlock::iterator I = MBBI;
723 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i)
724 --I;
725
726 // Insert instructions that restore eh data registers.
727 for (int J = 0; J < 4; ++J) {
728 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J),
729 MipsFI->getEhDataRegFI(J), RC, &RegInfo,
730 Register());
731 }
732 }
733
734 if (MF.getFunction().hasFnAttribute("interrupt"))
735 emitInterruptEpilogueStub(MF, MBB);
736
737 // Get the number of bytes from FrameInfo
738 uint64_t StackSize = MFI.getStackSize();
739
740 if (!StackSize)
741 return;
742
743 // Adjust stack.
744 TII.adjustStackPtr(SP, StackSize, MBB, MBBI);
745 }
746
emitInterruptEpilogueStub(MachineFunction & MF,MachineBasicBlock & MBB) const747 void MipsSEFrameLowering::emitInterruptEpilogueStub(
748 MachineFunction &MF, MachineBasicBlock &MBB) const {
749 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
750 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
751 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
752
753 // Perform ISR handling like GCC
754 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
755
756 // Disable Interrupts.
757 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO);
758 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB));
759
760 // Restore EPC
761 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
762 MipsFI->getISRRegFI(0), PtrRC,
763 STI.getRegisterInfo(), Register());
764 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014)
765 .addReg(Mips::K1)
766 .addImm(0);
767
768 // Restore Status
769 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
770 MipsFI->getISRRegFI(1), PtrRC,
771 STI.getRegisterInfo(), Register());
772 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
773 .addReg(Mips::K1)
774 .addImm(0);
775 }
776
777 StackOffset
getFrameIndexReference(const MachineFunction & MF,int FI,Register & FrameReg) const778 MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
779 Register &FrameReg) const {
780 const MachineFrameInfo &MFI = MF.getFrameInfo();
781 MipsABIInfo ABI = STI.getABI();
782
783 if (MFI.isFixedObjectIndex(FI))
784 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr();
785 else
786 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr();
787
788 return StackOffset::getFixed(MFI.getObjectOffset(FI) + MFI.getStackSize() -
789 getOffsetOfLocalArea() +
790 MFI.getOffsetAdjustment());
791 }
792
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,ArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const793 bool MipsSEFrameLowering::spillCalleeSavedRegisters(
794 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
795 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
796 MachineFunction *MF = MBB.getParent();
797 const TargetInstrInfo &TII = *STI.getInstrInfo();
798
799 for (const CalleeSavedInfo &I : CSI) {
800 // Add the callee-saved register as live-in. Do not add if the register is
801 // RA and return address is taken, because it has already been added in
802 // method MipsTargetLowering::lowerRETURNADDR.
803 // It's killed at the spill, unless the register is RA and return address
804 // is taken.
805 Register Reg = I.getReg();
806 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
807 && MF->getFrameInfo().isReturnAddressTaken();
808 if (!IsRAAndRetAddrIsTaken)
809 MBB.addLiveIn(Reg);
810
811 // ISRs require HI/LO to be spilled into kernel registers to be then
812 // spilled to the stack frame.
813 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 ||
814 Reg == Mips::HI0 || Reg == Mips::HI0_64);
815 const Function &Func = MBB.getParent()->getFunction();
816 if (IsLOHI && Func.hasFnAttribute("interrupt")) {
817 DebugLoc DL = MI->getDebugLoc();
818
819 unsigned Op = 0;
820 if (!STI.getABI().ArePtrs64bit()) {
821 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO;
822 Reg = Mips::K0;
823 } else {
824 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64;
825 Reg = Mips::K0_64;
826 }
827 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0)
828 .setMIFlag(MachineInstr::FrameSetup);
829 }
830
831 // Insert the spill to the stack frame.
832 bool IsKill = !IsRAAndRetAddrIsTaken;
833 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
834 TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, I.getFrameIdx(), RC, TRI,
835 Register());
836 }
837
838 return true;
839 }
840
841 bool
hasReservedCallFrame(const MachineFunction & MF) const842 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
843 const MachineFrameInfo &MFI = MF.getFrameInfo();
844 // Reserve call frame if the size of the maximum call frame fits into 16-bit
845 // immediate field and there are no variable sized objects on the stack.
846 // Make sure the second register scavenger spill slot can be accessed with one
847 // instruction.
848 return isInt<16>(MFI.getMaxCallFrameSize() + getStackAlignment()) &&
849 !MFI.hasVarSizedObjects();
850 }
851
852 /// Mark \p Reg and all registers aliasing it in the bitset.
setAliasRegs(MachineFunction & MF,BitVector & SavedRegs,unsigned Reg)853 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs,
854 unsigned Reg) {
855 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
856 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
857 SavedRegs.set(*AI);
858 }
859
determineCalleeSaves(MachineFunction & MF,BitVector & SavedRegs,RegScavenger * RS) const860 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
861 BitVector &SavedRegs,
862 RegScavenger *RS) const {
863 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
864 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
865 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
866 MipsABIInfo ABI = STI.getABI();
867 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
868 unsigned FP = ABI.GetFramePtr();
869 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
870
871 // Mark $ra and $fp as used if function has dedicated frame pointer.
872 if (hasFP(MF)) {
873 setAliasRegs(MF, SavedRegs, RA);
874 setAliasRegs(MF, SavedRegs, FP);
875 }
876 // Mark $s7 as used if function has dedicated base pointer.
877 if (hasBP(MF))
878 setAliasRegs(MF, SavedRegs, BP);
879
880 // Create spill slots for eh data registers if function calls eh_return.
881 if (MipsFI->callsEhReturn())
882 MipsFI->createEhDataRegsFI(MF);
883
884 // Create spill slots for Coprocessor 0 registers if function is an ISR.
885 if (MipsFI->isISR())
886 MipsFI->createISRRegFI(MF);
887
888 // Expand pseudo instructions which load, store or copy accumulators.
889 // Add an emergency spill slot if a pseudo was expanded.
890 if (ExpandPseudo(MF).expand()) {
891 // The spill slot should be half the size of the accumulator. If target have
892 // general-purpose registers 64 bits wide, it should be 64-bit, otherwise
893 // it should be 32-bit.
894 const TargetRegisterClass &RC = STI.isGP64bit() ?
895 Mips::GPR64RegClass : Mips::GPR32RegClass;
896 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
897 TRI->getSpillAlign(RC), false);
898 RS->addScavengingFrameIndex(FI);
899 }
900
901 // Set scavenging frame index if necessary.
902 uint64_t MaxSPOffset = estimateStackSize(MF);
903
904 // MSA has a minimum offset of 10 bits signed. If there is a variable
905 // sized object on the stack, the estimation cannot account for it.
906 if (isIntN(STI.hasMSA() ? 10 : 16, MaxSPOffset) &&
907 !MF.getFrameInfo().hasVarSizedObjects())
908 return;
909
910 const TargetRegisterClass &RC =
911 ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass;
912 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
913 TRI->getSpillAlign(RC), false);
914 RS->addScavengingFrameIndex(FI);
915 }
916
917 const MipsFrameLowering *
createMipsSEFrameLowering(const MipsSubtarget & ST)918 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) {
919 return new MipsSEFrameLowering(ST);
920 }
921