1 //===- MipsSEFrameLowering.cpp - Mips32/64 Frame Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Mips32/64 implementation of TargetFrameLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MipsSEFrameLowering.h" 14 #include "MCTargetDesc/MipsABIInfo.h" 15 #include "MipsMachineFunction.h" 16 #include "MipsRegisterInfo.h" 17 #include "MipsSEInstrInfo.h" 18 #include "MipsSubtarget.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/ADT/StringSwitch.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstr.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineModuleInfo.h" 28 #include "llvm/CodeGen/MachineOperand.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/RegisterScavenging.h" 31 #include "llvm/CodeGen/TargetInstrInfo.h" 32 #include "llvm/CodeGen/TargetRegisterInfo.h" 33 #include "llvm/CodeGen/TargetSubtargetInfo.h" 34 #include "llvm/IR/DebugLoc.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/MC/MCDwarf.h" 37 #include "llvm/MC/MCRegisterInfo.h" 38 #include "llvm/MC/MachineLocation.h" 39 #include "llvm/Support/CodeGen.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/MathExtras.h" 42 #include <cassert> 43 #include <cstdint> 44 #include <utility> 45 #include <vector> 46 47 using namespace llvm; 48 49 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) { 50 if (Mips::ACC64RegClass.contains(Src)) 51 return std::make_pair((unsigned)Mips::PseudoMFHI, 52 (unsigned)Mips::PseudoMFLO); 53 54 if (Mips::ACC64DSPRegClass.contains(Src)) 55 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP); 56 57 if (Mips::ACC128RegClass.contains(Src)) 58 return std::make_pair((unsigned)Mips::PseudoMFHI64, 59 (unsigned)Mips::PseudoMFLO64); 60 61 return std::make_pair(0, 0); 62 } 63 64 namespace { 65 66 /// Helper class to expand pseudos. 67 class ExpandPseudo { 68 public: 69 ExpandPseudo(MachineFunction &MF); 70 bool expand(); 71 72 private: 73 using Iter = MachineBasicBlock::iterator; 74 75 bool expandInstr(MachineBasicBlock &MBB, Iter I); 76 void expandLoadCCond(MachineBasicBlock &MBB, Iter I); 77 void expandStoreCCond(MachineBasicBlock &MBB, Iter I); 78 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize); 79 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 80 unsigned MFLoOpc, unsigned RegSize); 81 bool expandCopy(MachineBasicBlock &MBB, Iter I); 82 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc, 83 unsigned MFLoOpc); 84 bool expandBuildPairF64(MachineBasicBlock &MBB, 85 MachineBasicBlock::iterator I, bool FP64) const; 86 bool expandExtractElementF64(MachineBasicBlock &MBB, 87 MachineBasicBlock::iterator I, bool FP64) const; 88 89 MachineFunction &MF; 90 MachineRegisterInfo &MRI; 91 const MipsSubtarget &Subtarget; 92 const MipsSEInstrInfo &TII; 93 const MipsRegisterInfo &RegInfo; 94 }; 95 96 } // end anonymous namespace 97 98 ExpandPseudo::ExpandPseudo(MachineFunction &MF_) 99 : MF(MF_), MRI(MF.getRegInfo()), 100 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())), 101 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())), 102 RegInfo(*Subtarget.getRegisterInfo()) {} 103 104 bool ExpandPseudo::expand() { 105 bool Expanded = false; 106 107 for (auto &MBB : MF) { 108 for (Iter I = MBB.begin(), End = MBB.end(); I != End;) 109 Expanded |= expandInstr(MBB, I++); 110 } 111 112 return Expanded; 113 } 114 115 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) { 116 switch(I->getOpcode()) { 117 case Mips::LOAD_CCOND_DSP: 118 expandLoadCCond(MBB, I); 119 break; 120 case Mips::STORE_CCOND_DSP: 121 expandStoreCCond(MBB, I); 122 break; 123 case Mips::LOAD_ACC64: 124 case Mips::LOAD_ACC64DSP: 125 expandLoadACC(MBB, I, 4); 126 break; 127 case Mips::LOAD_ACC128: 128 expandLoadACC(MBB, I, 8); 129 break; 130 case Mips::STORE_ACC64: 131 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4); 132 break; 133 case Mips::STORE_ACC64DSP: 134 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4); 135 break; 136 case Mips::STORE_ACC128: 137 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8); 138 break; 139 case Mips::BuildPairF64: 140 if (expandBuildPairF64(MBB, I, false)) 141 MBB.erase(I); 142 return false; 143 case Mips::BuildPairF64_64: 144 if (expandBuildPairF64(MBB, I, true)) 145 MBB.erase(I); 146 return false; 147 case Mips::ExtractElementF64: 148 if (expandExtractElementF64(MBB, I, false)) 149 MBB.erase(I); 150 return false; 151 case Mips::ExtractElementF64_64: 152 if (expandExtractElementF64(MBB, I, true)) 153 MBB.erase(I); 154 return false; 155 case TargetOpcode::COPY: 156 if (!expandCopy(MBB, I)) 157 return false; 158 break; 159 default: 160 return false; 161 } 162 163 MBB.erase(I); 164 return true; 165 } 166 167 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { 168 // load $vr, FI 169 // copy ccond, $vr 170 171 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 172 173 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 174 Register VR = MRI.createVirtualRegister(RC); 175 Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 176 177 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); 178 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) 179 .addReg(VR, RegState::Kill); 180 } 181 182 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { 183 // copy $vr, ccond 184 // store $vr, FI 185 186 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 187 188 const TargetRegisterClass *RC = RegInfo.intRegClass(4); 189 Register VR = MRI.createVirtualRegister(RC); 190 Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 191 192 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) 193 .addReg(Src, getKillRegState(I->getOperand(0).isKill())); 194 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0); 195 } 196 197 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, 198 unsigned RegSize) { 199 // load $vr0, FI 200 // copy lo, $vr0 201 // load $vr1, FI + 4 202 // copy hi, $vr1 203 204 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 205 206 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 207 Register VR0 = MRI.createVirtualRegister(RC); 208 Register VR1 = MRI.createVirtualRegister(RC); 209 Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 210 Register Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); 211 Register Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); 212 DebugLoc DL = I->getDebugLoc(); 213 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); 214 215 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0); 216 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill); 217 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize); 218 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill); 219 } 220 221 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, 222 unsigned MFHiOpc, unsigned MFLoOpc, 223 unsigned RegSize) { 224 // mflo $vr0, src 225 // store $vr0, FI 226 // mfhi $vr1, src 227 // store $vr1, FI + 4 228 229 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); 230 231 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); 232 Register VR0 = MRI.createVirtualRegister(RC); 233 Register VR1 = MRI.createVirtualRegister(RC); 234 Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); 235 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); 236 DebugLoc DL = I->getDebugLoc(); 237 238 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 239 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0); 240 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 241 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize); 242 } 243 244 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { 245 Register Src = I->getOperand(1).getReg(); 246 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src); 247 248 if (!Opcodes.first) 249 return false; 250 251 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second); 252 } 253 254 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, 255 unsigned MFHiOpc, unsigned MFLoOpc) { 256 // mflo $vr0, src 257 // copy dst_lo, $vr0 258 // mfhi $vr1, src 259 // copy dst_hi, $vr1 260 261 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); 262 const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst); 263 unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16; 264 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); 265 Register VR0 = MRI.createVirtualRegister(RC); 266 Register VR1 = MRI.createVirtualRegister(RC); 267 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); 268 Register DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); 269 Register DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); 270 DebugLoc DL = I->getDebugLoc(); 271 272 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); 273 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo) 274 .addReg(VR0, RegState::Kill); 275 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill); 276 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi) 277 .addReg(VR1, RegState::Kill); 278 return true; 279 } 280 281 /// This method expands the same instruction that MipsSEInstrInfo:: 282 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not 283 /// available and the case where the ABI is FP64A. It is implemented here 284 /// because frame indexes are eliminated before MipsSEInstrInfo:: 285 /// expandBuildPairF64 is called. 286 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, 287 MachineBasicBlock::iterator I, 288 bool FP64) const { 289 // For fpxx and when mthc1 is not available, use: 290 // spill + reload via ldc1 291 // 292 // The case where dmtc1 is available doesn't need to be handled here 293 // because it never creates a BuildPairF64 node. 294 // 295 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 296 // for odd-numbered double precision values (because the lower 32-bits is 297 // transferred with mtc1 which is redirected to the upper half of the even 298 // register). Unfortunately, we have to make this decision before register 299 // allocation so for now we use a spill/reload sequence for all 300 // double-precision values in regardless of being an odd/even register. 301 // 302 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as 303 // implicit operand, so other passes (like ShrinkWrapping) are aware that 304 // stack is used. 305 if (I->getNumOperands() == 4 && I->getOperand(3).isReg() 306 && I->getOperand(3).getReg() == Mips::SP) { 307 Register DstReg = I->getOperand(0).getReg(); 308 Register LoReg = I->getOperand(1).getReg(); 309 Register HiReg = I->getOperand(2).getReg(); 310 311 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 312 // the cases where mthc1 is not available). 64-bit architectures and 313 // MIPS32r2 or later can use FGR64 though. 314 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 315 !Subtarget.isFP64bit()); 316 317 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 318 const TargetRegisterClass *RC2 = 319 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 320 321 // We re-use the same spill slot each time so that the stack frame doesn't 322 // grow too much in functions with a large number of moves. 323 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC2); 324 if (!Subtarget.isLittle()) 325 std::swap(LoReg, HiReg); 326 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 327 &RegInfo, 0); 328 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 329 &RegInfo, 4); 330 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0); 331 return true; 332 } 333 334 return false; 335 } 336 337 /// This method expands the same instruction that MipsSEInstrInfo:: 338 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not 339 /// available and the case where the ABI is FP64A. It is implemented here 340 /// because frame indexes are eliminated before MipsSEInstrInfo:: 341 /// expandExtractElementF64 is called. 342 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, 343 MachineBasicBlock::iterator I, 344 bool FP64) const { 345 const MachineOperand &Op1 = I->getOperand(1); 346 const MachineOperand &Op2 = I->getOperand(2); 347 348 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { 349 Register DstReg = I->getOperand(0).getReg(); 350 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg); 351 return true; 352 } 353 354 // For fpxx and when mfhc1 is not available, use: 355 // spill + reload via ldc1 356 // 357 // The case where dmfc1 is available doesn't need to be handled here 358 // because it never creates a ExtractElementF64 node. 359 // 360 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence 361 // for odd-numbered double precision values (because the lower 32-bits is 362 // transferred with mfc1 which is redirected to the upper half of the even 363 // register). Unfortunately, we have to make this decision before register 364 // allocation so for now we use a spill/reload sequence for all 365 // double-precision values in regardless of being an odd/even register. 366 // 367 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as 368 // implicit operand, so other passes (like ShrinkWrapping) are aware that 369 // stack is used. 370 if (I->getNumOperands() == 4 && I->getOperand(3).isReg() 371 && I->getOperand(3).getReg() == Mips::SP) { 372 Register DstReg = I->getOperand(0).getReg(); 373 Register SrcReg = Op1.getReg(); 374 unsigned N = Op2.getImm(); 375 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); 376 377 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are 378 // the cases where mfhc1 is not available). 64-bit architectures and 379 // MIPS32r2 or later can use FGR64 though. 380 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() || 381 !Subtarget.isFP64bit()); 382 383 const TargetRegisterClass *RC = 384 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 385 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass; 386 387 // We re-use the same spill slot each time so that the stack frame doesn't 388 // grow too much in functions with a large number of moves. 389 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC); 390 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0); 391 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset); 392 return true; 393 } 394 395 return false; 396 } 397 398 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI) 399 : MipsFrameLowering(STI, STI.getStackAlignment()) {} 400 401 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, 402 MachineBasicBlock &MBB) const { 403 MachineFrameInfo &MFI = MF.getFrameInfo(); 404 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 405 406 const MipsSEInstrInfo &TII = 407 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 408 const MipsRegisterInfo &RegInfo = 409 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 410 411 MachineBasicBlock::iterator MBBI = MBB.begin(); 412 DebugLoc dl; 413 MipsABIInfo ABI = STI.getABI(); 414 unsigned SP = ABI.GetStackPtr(); 415 unsigned FP = ABI.GetFramePtr(); 416 unsigned ZERO = ABI.GetNullPtr(); 417 unsigned MOVE = ABI.GetGPRMoveOp(); 418 unsigned ADDiu = ABI.GetPtrAddiuOp(); 419 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND; 420 421 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? 422 &Mips::GPR64RegClass : &Mips::GPR32RegClass; 423 424 // First, compute final stack size. 425 uint64_t StackSize = MFI.getStackSize(); 426 427 // No need to allocate space on the stack. 428 if (StackSize == 0 && !MFI.adjustsStack()) return; 429 430 MachineModuleInfo &MMI = MF.getMMI(); 431 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 432 433 // Adjust stack. 434 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); 435 436 // emit ".cfi_def_cfa_offset StackSize" 437 unsigned CFIIndex = 438 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize)); 439 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 440 .addCFIIndex(CFIIndex); 441 442 if (MF.getFunction().hasFnAttribute("interrupt")) 443 emitInterruptPrologueStub(MF, MBB); 444 445 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 446 447 if (!CSI.empty()) { 448 // Find the instruction past the last instruction that saves a callee-saved 449 // register to the stack. 450 for (unsigned i = 0; i < CSI.size(); ++i) 451 ++MBBI; 452 453 // Iterate over list of callee-saved registers and emit .cfi_offset 454 // directives. 455 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), 456 E = CSI.end(); I != E; ++I) { 457 int64_t Offset = MFI.getObjectOffset(I->getFrameIdx()); 458 unsigned Reg = I->getReg(); 459 460 // If Reg is a double precision register, emit two cfa_offsets, 461 // one for each of the paired single precision registers. 462 if (Mips::AFGR64RegClass.contains(Reg)) { 463 unsigned Reg0 = 464 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true); 465 unsigned Reg1 = 466 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true); 467 468 if (!STI.isLittle()) 469 std::swap(Reg0, Reg1); 470 471 unsigned CFIIndex = MF.addFrameInst( 472 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 473 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 474 .addCFIIndex(CFIIndex); 475 476 CFIIndex = MF.addFrameInst( 477 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 478 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 479 .addCFIIndex(CFIIndex); 480 } else if (Mips::FGR64RegClass.contains(Reg)) { 481 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true); 482 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1; 483 484 if (!STI.isLittle()) 485 std::swap(Reg0, Reg1); 486 487 unsigned CFIIndex = MF.addFrameInst( 488 MCCFIInstruction::createOffset(nullptr, Reg0, Offset)); 489 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 490 .addCFIIndex(CFIIndex); 491 492 CFIIndex = MF.addFrameInst( 493 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4)); 494 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 495 .addCFIIndex(CFIIndex); 496 } else { 497 // Reg is either in GPR32 or FGR32. 498 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 499 nullptr, MRI->getDwarfRegNum(Reg, true), Offset)); 500 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 501 .addCFIIndex(CFIIndex); 502 } 503 } 504 } 505 506 if (MipsFI->callsEhReturn()) { 507 // Insert instructions that spill eh data registers. 508 for (int I = 0; I < 4; ++I) { 509 if (!MBB.isLiveIn(ABI.GetEhDataReg(I))) 510 MBB.addLiveIn(ABI.GetEhDataReg(I)); 511 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false, 512 MipsFI->getEhDataRegFI(I), RC, &RegInfo); 513 } 514 515 // Emit .cfi_offset directives for eh data registers. 516 for (int I = 0; I < 4; ++I) { 517 int64_t Offset = MFI.getObjectOffset(MipsFI->getEhDataRegFI(I)); 518 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true); 519 unsigned CFIIndex = MF.addFrameInst( 520 MCCFIInstruction::createOffset(nullptr, Reg, Offset)); 521 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 522 .addCFIIndex(CFIIndex); 523 } 524 } 525 526 // if framepointer enabled, set it to point to the stack pointer. 527 if (hasFP(MF)) { 528 // Insert instruction "move $fp, $sp" at this location. 529 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO) 530 .setMIFlag(MachineInstr::FrameSetup); 531 532 // emit ".cfi_def_cfa_register $fp" 533 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( 534 nullptr, MRI->getDwarfRegNum(FP, true))); 535 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 536 .addCFIIndex(CFIIndex); 537 538 if (RegInfo.hasStackRealignment(MF)) { 539 // addiu $Reg, $zero, -MaxAlignment 540 // andi $sp, $sp, $Reg 541 Register VR = MF.getRegInfo().createVirtualRegister(RC); 542 assert((Log2(MFI.getMaxAlign()) < 16) && 543 "Function's alignment size requirement is not supported."); 544 int64_t MaxAlign = -(int64_t)MFI.getMaxAlign().value(); 545 546 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO).addImm(MaxAlign); 547 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR); 548 549 if (hasBP(MF)) { 550 // move $s7, $sp 551 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7; 552 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP) 553 .addReg(SP) 554 .addReg(ZERO); 555 } 556 } 557 } 558 } 559 560 void MipsSEFrameLowering::emitInterruptPrologueStub( 561 MachineFunction &MF, MachineBasicBlock &MBB) const { 562 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 563 MachineBasicBlock::iterator MBBI = MBB.begin(); 564 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 565 566 // Report an error the target doesn't support Mips32r2 or later. 567 // The epilogue relies on the use of the "ehb" to clear execution 568 // hazards. Pre R2 Mips relies on an implementation defined number 569 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard 570 // clearing is not provided so reject that configuration. 571 if (!STI.hasMips32r2()) 572 report_fatal_error( 573 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or " 574 "MIPS16 targets."); 575 576 // The GP register contains the "user" value, so we cannot perform 577 // any gp relative loads until we restore the "kernel" or "system" gp 578 // value. Until support is written we shall only accept the static 579 // relocation model. 580 if ((STI.getRelocationModel() != Reloc::Static)) 581 report_fatal_error("\"interrupt\" attribute is only supported for the " 582 "static relocation model on MIPS at the present time."); 583 584 if (!STI.isABI_O32() || STI.hasMips64()) 585 report_fatal_error("\"interrupt\" attribute is only supported for the " 586 "O32 ABI on MIPS32R2+ at the present time."); 587 588 // Perform ISR handling like GCC 589 StringRef IntKind = 590 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 591 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 592 593 // EIC interrupt handling needs to read the Cause register to disable 594 // interrupts. 595 if (IntKind == "eic") { 596 // Coprocessor registers are always live per se. 597 MBB.addLiveIn(Mips::COP013); 598 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0) 599 .addReg(Mips::COP013) 600 .addImm(0) 601 .setMIFlag(MachineInstr::FrameSetup); 602 603 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0) 604 .addReg(Mips::K0) 605 .addImm(10) 606 .addImm(6) 607 .setMIFlag(MachineInstr::FrameSetup); 608 } 609 610 // Fetch and spill EPC 611 MBB.addLiveIn(Mips::COP014); 612 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 613 .addReg(Mips::COP014) 614 .addImm(0) 615 .setMIFlag(MachineInstr::FrameSetup); 616 617 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 618 MipsFI->getISRRegFI(0), PtrRC, 619 STI.getRegisterInfo(), 0); 620 621 // Fetch and Spill Status 622 MBB.addLiveIn(Mips::COP012); 623 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1) 624 .addReg(Mips::COP012) 625 .addImm(0) 626 .setMIFlag(MachineInstr::FrameSetup); 627 628 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false, 629 MipsFI->getISRRegFI(1), PtrRC, 630 STI.getRegisterInfo(), 0); 631 632 // Build the configuration for disabling lower priority interrupts. Non EIC 633 // interrupts need to be masked off with zero, EIC from the Cause register. 634 unsigned InsPosition = 8; 635 unsigned InsSize = 0; 636 unsigned SrcReg = Mips::ZERO; 637 638 // If the interrupt we're tied to is the EIC, switch the source for the 639 // masking off interrupts to the cause register. 640 if (IntKind == "eic") { 641 SrcReg = Mips::K0; 642 InsPosition = 10; 643 InsSize = 6; 644 } else 645 InsSize = StringSwitch<unsigned>(IntKind) 646 .Case("sw0", 1) 647 .Case("sw1", 2) 648 .Case("hw0", 3) 649 .Case("hw1", 4) 650 .Case("hw2", 5) 651 .Case("hw3", 6) 652 .Case("hw4", 7) 653 .Case("hw5", 8) 654 .Default(0); 655 assert(InsSize != 0 && "Unknown interrupt type!"); 656 657 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 658 .addReg(SrcReg) 659 .addImm(InsPosition) 660 .addImm(InsSize) 661 .addReg(Mips::K1) 662 .setMIFlag(MachineInstr::FrameSetup); 663 664 // Mask off KSU, ERL, EXL 665 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 666 .addReg(Mips::ZERO) 667 .addImm(1) 668 .addImm(4) 669 .addReg(Mips::K1) 670 .setMIFlag(MachineInstr::FrameSetup); 671 672 // Disable the FPU as we are not spilling those register sets. 673 if (!STI.useSoftFloat()) 674 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1) 675 .addReg(Mips::ZERO) 676 .addImm(29) 677 .addImm(1) 678 .addReg(Mips::K1) 679 .setMIFlag(MachineInstr::FrameSetup); 680 681 // Set the new status 682 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 683 .addReg(Mips::K1) 684 .addImm(0) 685 .setMIFlag(MachineInstr::FrameSetup); 686 } 687 688 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, 689 MachineBasicBlock &MBB) const { 690 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 691 MachineFrameInfo &MFI = MF.getFrameInfo(); 692 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 693 694 const MipsSEInstrInfo &TII = 695 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo()); 696 const MipsRegisterInfo &RegInfo = 697 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo()); 698 699 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 700 MipsABIInfo ABI = STI.getABI(); 701 unsigned SP = ABI.GetStackPtr(); 702 unsigned FP = ABI.GetFramePtr(); 703 unsigned ZERO = ABI.GetNullPtr(); 704 unsigned MOVE = ABI.GetGPRMoveOp(); 705 706 // if framepointer enabled, restore the stack pointer. 707 if (hasFP(MF)) { 708 // Find the first instruction that restores a callee-saved register. 709 MachineBasicBlock::iterator I = MBBI; 710 711 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i) 712 --I; 713 714 // Insert instruction "move $sp, $fp" at this location. 715 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO); 716 } 717 718 if (MipsFI->callsEhReturn()) { 719 const TargetRegisterClass *RC = 720 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 721 722 // Find first instruction that restores a callee-saved register. 723 MachineBasicBlock::iterator I = MBBI; 724 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i) 725 --I; 726 727 // Insert instructions that restore eh data registers. 728 for (int J = 0; J < 4; ++J) { 729 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J), 730 MipsFI->getEhDataRegFI(J), RC, &RegInfo); 731 } 732 } 733 734 if (MF.getFunction().hasFnAttribute("interrupt")) 735 emitInterruptEpilogueStub(MF, MBB); 736 737 // Get the number of bytes from FrameInfo 738 uint64_t StackSize = MFI.getStackSize(); 739 740 if (!StackSize) 741 return; 742 743 // Adjust stack. 744 TII.adjustStackPtr(SP, StackSize, MBB, MBBI); 745 } 746 747 void MipsSEFrameLowering::emitInterruptEpilogueStub( 748 MachineFunction &MF, MachineBasicBlock &MBB) const { 749 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 750 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 751 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 752 753 // Perform ISR handling like GCC 754 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; 755 756 // Disable Interrupts. 757 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO); 758 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB)); 759 760 // Restore EPC 761 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 762 MipsFI->getISRRegFI(0), PtrRC, 763 STI.getRegisterInfo()); 764 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014) 765 .addReg(Mips::K1) 766 .addImm(0); 767 768 // Restore Status 769 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1, 770 MipsFI->getISRRegFI(1), PtrRC, 771 STI.getRegisterInfo()); 772 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012) 773 .addReg(Mips::K1) 774 .addImm(0); 775 } 776 777 StackOffset 778 MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 779 Register &FrameReg) const { 780 const MachineFrameInfo &MFI = MF.getFrameInfo(); 781 MipsABIInfo ABI = STI.getABI(); 782 783 if (MFI.isFixedObjectIndex(FI)) 784 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr(); 785 else 786 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr(); 787 788 return StackOffset::getFixed(MFI.getObjectOffset(FI) + MFI.getStackSize() - 789 getOffsetOfLocalArea() + 790 MFI.getOffsetAdjustment()); 791 } 792 793 bool MipsSEFrameLowering::spillCalleeSavedRegisters( 794 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 795 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 796 MachineFunction *MF = MBB.getParent(); 797 const TargetInstrInfo &TII = *STI.getInstrInfo(); 798 799 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 800 // Add the callee-saved register as live-in. Do not add if the register is 801 // RA and return address is taken, because it has already been added in 802 // method MipsTargetLowering::lowerRETURNADDR. 803 // It's killed at the spill, unless the register is RA and return address 804 // is taken. 805 unsigned Reg = CSI[i].getReg(); 806 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64) 807 && MF->getFrameInfo().isReturnAddressTaken(); 808 if (!IsRAAndRetAddrIsTaken) 809 MBB.addLiveIn(Reg); 810 811 // ISRs require HI/LO to be spilled into kernel registers to be then 812 // spilled to the stack frame. 813 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 || 814 Reg == Mips::HI0 || Reg == Mips::HI0_64); 815 const Function &Func = MBB.getParent()->getFunction(); 816 if (IsLOHI && Func.hasFnAttribute("interrupt")) { 817 DebugLoc DL = MI->getDebugLoc(); 818 819 unsigned Op = 0; 820 if (!STI.getABI().ArePtrs64bit()) { 821 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO; 822 Reg = Mips::K0; 823 } else { 824 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64; 825 Reg = Mips::K0_64; 826 } 827 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0) 828 .setMIFlag(MachineInstr::FrameSetup); 829 } 830 831 // Insert the spill to the stack frame. 832 bool IsKill = !IsRAAndRetAddrIsTaken; 833 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 834 TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, 835 CSI[i].getFrameIdx(), RC, TRI); 836 } 837 838 return true; 839 } 840 841 bool 842 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 843 const MachineFrameInfo &MFI = MF.getFrameInfo(); 844 // Reserve call frame if the size of the maximum call frame fits into 16-bit 845 // immediate field and there are no variable sized objects on the stack. 846 // Make sure the second register scavenger spill slot can be accessed with one 847 // instruction. 848 return isInt<16>(MFI.getMaxCallFrameSize() + getStackAlignment()) && 849 !MFI.hasVarSizedObjects(); 850 } 851 852 /// Mark \p Reg and all registers aliasing it in the bitset. 853 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs, 854 unsigned Reg) { 855 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 856 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) 857 SavedRegs.set(*AI); 858 } 859 860 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, 861 BitVector &SavedRegs, 862 RegScavenger *RS) const { 863 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 864 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 865 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); 866 MipsABIInfo ABI = STI.getABI(); 867 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA; 868 unsigned FP = ABI.GetFramePtr(); 869 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; 870 871 // Mark $ra and $fp as used if function has dedicated frame pointer. 872 if (hasFP(MF)) { 873 setAliasRegs(MF, SavedRegs, RA); 874 setAliasRegs(MF, SavedRegs, FP); 875 } 876 // Mark $s7 as used if function has dedicated base pointer. 877 if (hasBP(MF)) 878 setAliasRegs(MF, SavedRegs, BP); 879 880 // Create spill slots for eh data registers if function calls eh_return. 881 if (MipsFI->callsEhReturn()) 882 MipsFI->createEhDataRegsFI(MF); 883 884 // Create spill slots for Coprocessor 0 registers if function is an ISR. 885 if (MipsFI->isISR()) 886 MipsFI->createISRRegFI(MF); 887 888 // Expand pseudo instructions which load, store or copy accumulators. 889 // Add an emergency spill slot if a pseudo was expanded. 890 if (ExpandPseudo(MF).expand()) { 891 // The spill slot should be half the size of the accumulator. If target have 892 // general-purpose registers 64 bits wide, it should be 64-bit, otherwise 893 // it should be 32-bit. 894 const TargetRegisterClass &RC = STI.isGP64bit() ? 895 Mips::GPR64RegClass : Mips::GPR32RegClass; 896 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC), 897 TRI->getSpillAlign(RC), false); 898 RS->addScavengingFrameIndex(FI); 899 } 900 901 // Set scavenging frame index if necessary. 902 uint64_t MaxSPOffset = estimateStackSize(MF); 903 904 // MSA has a minimum offset of 10 bits signed. If there is a variable 905 // sized object on the stack, the estimation cannot account for it. 906 if (isIntN(STI.hasMSA() ? 10 : 16, MaxSPOffset) && 907 !MF.getFrameInfo().hasVarSizedObjects()) 908 return; 909 910 const TargetRegisterClass &RC = 911 ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass; 912 int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC), 913 TRI->getSpillAlign(RC), false); 914 RS->addScavengingFrameIndex(FI); 915 } 916 917 const MipsFrameLowering * 918 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) { 919 return new MipsSEFrameLowering(ST); 920 } 921