1 //===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Sparc implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "SparcInstrInfo.h" 14 #include "Sparc.h" 15 #include "SparcMachineFunctionInfo.h" 16 #include "SparcSubtarget.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineMemOperand.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/MC/TargetRegistry.h" 24 #include "llvm/Support/ErrorHandling.h" 25 26 using namespace llvm; 27 28 #define GET_INSTRINFO_CTOR_DTOR 29 #include "SparcGenInstrInfo.inc" 30 31 static cl::opt<unsigned> BPccDisplacementBits( 32 "sparc-bpcc-offset-bits", cl::Hidden, cl::init(19), 33 cl::desc("Restrict range of BPcc/FBPfcc instructions (DEBUG)")); 34 35 // Pin the vtable to this file. 36 void SparcInstrInfo::anchor() {} 37 38 SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST) 39 : SparcGenInstrInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), RI(), 40 Subtarget(ST) {} 41 42 /// isLoadFromStackSlot - If the specified machine instruction is a direct 43 /// load from a stack slot, return the virtual or physical register number of 44 /// the destination along with the FrameIndex of the loaded stack slot. If 45 /// not, return 0. This predicate must return 0 if the instruction has 46 /// any side effects other than loading from the stack slot. 47 unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 48 int &FrameIndex) const { 49 if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri || 50 MI.getOpcode() == SP::LDFri || MI.getOpcode() == SP::LDDFri || 51 MI.getOpcode() == SP::LDQFri) { 52 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 53 MI.getOperand(2).getImm() == 0) { 54 FrameIndex = MI.getOperand(1).getIndex(); 55 return MI.getOperand(0).getReg(); 56 } 57 } 58 return 0; 59 } 60 61 /// isStoreToStackSlot - If the specified machine instruction is a direct 62 /// store to a stack slot, return the virtual or physical register number of 63 /// the source reg along with the FrameIndex of the loaded stack slot. If 64 /// not, return 0. This predicate must return 0 if the instruction has 65 /// any side effects other than storing to the stack slot. 66 unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 67 int &FrameIndex) const { 68 if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri || 69 MI.getOpcode() == SP::STFri || MI.getOpcode() == SP::STDFri || 70 MI.getOpcode() == SP::STQFri) { 71 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && 72 MI.getOperand(1).getImm() == 0) { 73 FrameIndex = MI.getOperand(0).getIndex(); 74 return MI.getOperand(2).getReg(); 75 } 76 } 77 return 0; 78 } 79 80 static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC) 81 { 82 switch(CC) { 83 case SPCC::ICC_A: return SPCC::ICC_N; 84 case SPCC::ICC_N: return SPCC::ICC_A; 85 case SPCC::ICC_NE: return SPCC::ICC_E; 86 case SPCC::ICC_E: return SPCC::ICC_NE; 87 case SPCC::ICC_G: return SPCC::ICC_LE; 88 case SPCC::ICC_LE: return SPCC::ICC_G; 89 case SPCC::ICC_GE: return SPCC::ICC_L; 90 case SPCC::ICC_L: return SPCC::ICC_GE; 91 case SPCC::ICC_GU: return SPCC::ICC_LEU; 92 case SPCC::ICC_LEU: return SPCC::ICC_GU; 93 case SPCC::ICC_CC: return SPCC::ICC_CS; 94 case SPCC::ICC_CS: return SPCC::ICC_CC; 95 case SPCC::ICC_POS: return SPCC::ICC_NEG; 96 case SPCC::ICC_NEG: return SPCC::ICC_POS; 97 case SPCC::ICC_VC: return SPCC::ICC_VS; 98 case SPCC::ICC_VS: return SPCC::ICC_VC; 99 100 case SPCC::FCC_A: return SPCC::FCC_N; 101 case SPCC::FCC_N: return SPCC::FCC_A; 102 case SPCC::FCC_U: return SPCC::FCC_O; 103 case SPCC::FCC_O: return SPCC::FCC_U; 104 case SPCC::FCC_G: return SPCC::FCC_ULE; 105 case SPCC::FCC_LE: return SPCC::FCC_UG; 106 case SPCC::FCC_UG: return SPCC::FCC_LE; 107 case SPCC::FCC_ULE: return SPCC::FCC_G; 108 case SPCC::FCC_L: return SPCC::FCC_UGE; 109 case SPCC::FCC_GE: return SPCC::FCC_UL; 110 case SPCC::FCC_UL: return SPCC::FCC_GE; 111 case SPCC::FCC_UGE: return SPCC::FCC_L; 112 case SPCC::FCC_LG: return SPCC::FCC_UE; 113 case SPCC::FCC_UE: return SPCC::FCC_LG; 114 case SPCC::FCC_NE: return SPCC::FCC_E; 115 case SPCC::FCC_E: return SPCC::FCC_NE; 116 117 case SPCC::CPCC_A: return SPCC::CPCC_N; 118 case SPCC::CPCC_N: return SPCC::CPCC_A; 119 case SPCC::CPCC_3: [[fallthrough]]; 120 case SPCC::CPCC_2: [[fallthrough]]; 121 case SPCC::CPCC_23: [[fallthrough]]; 122 case SPCC::CPCC_1: [[fallthrough]]; 123 case SPCC::CPCC_13: [[fallthrough]]; 124 case SPCC::CPCC_12: [[fallthrough]]; 125 case SPCC::CPCC_123: [[fallthrough]]; 126 case SPCC::CPCC_0: [[fallthrough]]; 127 case SPCC::CPCC_03: [[fallthrough]]; 128 case SPCC::CPCC_02: [[fallthrough]]; 129 case SPCC::CPCC_023: [[fallthrough]]; 130 case SPCC::CPCC_01: [[fallthrough]]; 131 case SPCC::CPCC_013: [[fallthrough]]; 132 case SPCC::CPCC_012: 133 // "Opposite" code is not meaningful, as we don't know 134 // what the CoProc condition means here. The cond-code will 135 // only be used in inline assembler, so this code should 136 // not be reached in a normal compilation pass. 137 llvm_unreachable("Meaningless inversion of co-processor cond code"); 138 139 case SPCC::REG_BEGIN: 140 llvm_unreachable("Use of reserved cond code"); 141 case SPCC::REG_Z: 142 return SPCC::REG_NZ; 143 case SPCC::REG_LEZ: 144 return SPCC::REG_GZ; 145 case SPCC::REG_LZ: 146 return SPCC::REG_GEZ; 147 case SPCC::REG_NZ: 148 return SPCC::REG_Z; 149 case SPCC::REG_GZ: 150 return SPCC::REG_LEZ; 151 case SPCC::REG_GEZ: 152 return SPCC::REG_LZ; 153 } 154 llvm_unreachable("Invalid cond code"); 155 } 156 157 static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; } 158 159 static bool isI32CondBranchOpcode(int Opc) { 160 return Opc == SP::BCOND || Opc == SP::BPICC || Opc == SP::BPICCA || 161 Opc == SP::BPICCNT || Opc == SP::BPICCANT; 162 } 163 164 static bool isI64CondBranchOpcode(int Opc) { 165 return Opc == SP::BPXCC || Opc == SP::BPXCCA || Opc == SP::BPXCCNT || 166 Opc == SP::BPXCCANT; 167 } 168 169 static bool isFCondBranchOpcode(int Opc) { 170 return Opc == SP::FBCOND || Opc == SP::FBCONDA || Opc == SP::FBCOND_V9 || 171 Opc == SP::FBCONDA_V9; 172 } 173 174 static bool isCondBranchOpcode(int Opc) { 175 return isI32CondBranchOpcode(Opc) || isI64CondBranchOpcode(Opc) || 176 isFCondBranchOpcode(Opc); 177 } 178 179 static bool isIndirectBranchOpcode(int Opc) { 180 return Opc == SP::BINDrr || Opc == SP::BINDri; 181 } 182 183 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, 184 SmallVectorImpl<MachineOperand> &Cond) { 185 unsigned Opc = LastInst->getOpcode(); 186 int64_t CC = LastInst->getOperand(1).getImm(); 187 188 // Push the branch opcode into Cond too so later in insertBranch 189 // it can use the information to emit the correct SPARC branch opcode. 190 Cond.push_back(MachineOperand::CreateImm(Opc)); 191 Cond.push_back(MachineOperand::CreateImm(CC)); 192 193 Target = LastInst->getOperand(0).getMBB(); 194 } 195 196 MachineBasicBlock * 197 SparcInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { 198 switch (MI.getOpcode()) { 199 default: 200 llvm_unreachable("unexpected opcode!"); 201 case SP::BA: 202 case SP::BCOND: 203 case SP::BCONDA: 204 case SP::FBCOND: 205 case SP::FBCONDA: 206 case SP::BPICC: 207 case SP::BPICCA: 208 case SP::BPICCNT: 209 case SP::BPICCANT: 210 case SP::BPXCC: 211 case SP::BPXCCA: 212 case SP::BPXCCNT: 213 case SP::BPXCCANT: 214 case SP::BPFCC: 215 case SP::BPFCCA: 216 case SP::BPFCCNT: 217 case SP::BPFCCANT: 218 case SP::FBCOND_V9: 219 case SP::FBCONDA_V9: 220 return MI.getOperand(0).getMBB(); 221 } 222 } 223 224 bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 225 MachineBasicBlock *&TBB, 226 MachineBasicBlock *&FBB, 227 SmallVectorImpl<MachineOperand> &Cond, 228 bool AllowModify) const { 229 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 230 if (I == MBB.end()) 231 return false; 232 233 if (!isUnpredicatedTerminator(*I)) 234 return false; 235 236 // Get the last instruction in the block. 237 MachineInstr *LastInst = &*I; 238 unsigned LastOpc = LastInst->getOpcode(); 239 240 // If there is only one terminator instruction, process it. 241 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { 242 if (isUncondBranchOpcode(LastOpc)) { 243 TBB = LastInst->getOperand(0).getMBB(); 244 return false; 245 } 246 if (isCondBranchOpcode(LastOpc)) { 247 // Block ends with fall-through condbranch. 248 parseCondBranch(LastInst, TBB, Cond); 249 return false; 250 } 251 return true; // Can't handle indirect branch. 252 } 253 254 // Get the instruction before it if it is a terminator. 255 MachineInstr *SecondLastInst = &*I; 256 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 257 258 // If AllowModify is true and the block ends with two or more unconditional 259 // branches, delete all but the first unconditional branch. 260 if (AllowModify && isUncondBranchOpcode(LastOpc)) { 261 while (isUncondBranchOpcode(SecondLastOpc)) { 262 LastInst->eraseFromParent(); 263 LastInst = SecondLastInst; 264 LastOpc = LastInst->getOpcode(); 265 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { 266 // Return now the only terminator is an unconditional branch. 267 TBB = LastInst->getOperand(0).getMBB(); 268 return false; 269 } else { 270 SecondLastInst = &*I; 271 SecondLastOpc = SecondLastInst->getOpcode(); 272 } 273 } 274 } 275 276 // If there are three terminators, we don't know what sort of block this is. 277 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I)) 278 return true; 279 280 // If the block ends with a B and a Bcc, handle it. 281 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 282 parseCondBranch(SecondLastInst, TBB, Cond); 283 FBB = LastInst->getOperand(0).getMBB(); 284 return false; 285 } 286 287 // If the block ends with two unconditional branches, handle it. The second 288 // one is not executed. 289 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 290 TBB = SecondLastInst->getOperand(0).getMBB(); 291 return false; 292 } 293 294 // ...likewise if it ends with an indirect branch followed by an unconditional 295 // branch. 296 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 297 I = LastInst; 298 if (AllowModify) 299 I->eraseFromParent(); 300 return true; 301 } 302 303 // Otherwise, can't handle this. 304 return true; 305 } 306 307 unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB, 308 MachineBasicBlock *TBB, 309 MachineBasicBlock *FBB, 310 ArrayRef<MachineOperand> Cond, 311 const DebugLoc &DL, 312 int *BytesAdded) const { 313 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 314 assert((Cond.size() <= 2) && 315 "Sparc branch conditions should have at most two components!"); 316 317 if (Cond.empty()) { 318 assert(!FBB && "Unconditional branch with multiple successors!"); 319 BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB); 320 if (BytesAdded) 321 *BytesAdded = 8; 322 return 1; 323 } 324 325 // Conditional branch 326 unsigned Opc = Cond[0].getImm(); 327 unsigned CC = Cond[1].getImm(); 328 BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC); 329 330 if (!FBB) { 331 if (BytesAdded) 332 *BytesAdded = 8; 333 return 1; 334 } 335 336 BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB); 337 if (BytesAdded) 338 *BytesAdded = 16; 339 return 2; 340 } 341 342 unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB, 343 int *BytesRemoved) const { 344 MachineBasicBlock::iterator I = MBB.end(); 345 unsigned Count = 0; 346 int Removed = 0; 347 while (I != MBB.begin()) { 348 --I; 349 350 if (I->isDebugInstr()) 351 continue; 352 353 if (!isCondBranchOpcode(I->getOpcode()) && 354 !isUncondBranchOpcode(I->getOpcode())) 355 break; // Not a branch 356 357 Removed += getInstSizeInBytes(*I); 358 I->eraseFromParent(); 359 I = MBB.end(); 360 ++Count; 361 } 362 363 if (BytesRemoved) 364 *BytesRemoved = Removed; 365 return Count; 366 } 367 368 bool SparcInstrInfo::reverseBranchCondition( 369 SmallVectorImpl<MachineOperand> &Cond) const { 370 assert(Cond.size() <= 2); 371 SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[1].getImm()); 372 Cond[1].setImm(GetOppositeBranchCondition(CC)); 373 return false; 374 } 375 376 bool SparcInstrInfo::isBranchOffsetInRange(unsigned BranchOpc, 377 int64_t Offset) const { 378 assert((Offset & 0b11) == 0 && "Malformed branch offset"); 379 switch (BranchOpc) { 380 case SP::BA: 381 case SP::BCOND: 382 case SP::BCONDA: 383 case SP::FBCOND: 384 case SP::FBCONDA: 385 return isIntN(22, Offset >> 2); 386 387 case SP::BPICC: 388 case SP::BPICCA: 389 case SP::BPICCNT: 390 case SP::BPICCANT: 391 case SP::BPXCC: 392 case SP::BPXCCA: 393 case SP::BPXCCNT: 394 case SP::BPXCCANT: 395 case SP::BPFCC: 396 case SP::BPFCCA: 397 case SP::BPFCCNT: 398 case SP::BPFCCANT: 399 case SP::FBCOND_V9: 400 case SP::FBCONDA_V9: 401 return isIntN(BPccDisplacementBits, Offset >> 2); 402 } 403 404 llvm_unreachable("Unknown branch instruction!"); 405 } 406 407 void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 408 MachineBasicBlock::iterator I, 409 const DebugLoc &DL, MCRegister DestReg, 410 MCRegister SrcReg, bool KillSrc) const { 411 unsigned numSubRegs = 0; 412 unsigned movOpc = 0; 413 const unsigned *subRegIdx = nullptr; 414 bool ExtraG0 = false; 415 416 const unsigned DW_SubRegsIdx[] = { SP::sub_even, SP::sub_odd }; 417 const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd }; 418 const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 }; 419 const unsigned QFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd, 420 SP::sub_odd64_then_sub_even, 421 SP::sub_odd64_then_sub_odd }; 422 423 if (SP::IntRegsRegClass.contains(DestReg, SrcReg)) 424 BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0) 425 .addReg(SrcReg, getKillRegState(KillSrc)); 426 else if (SP::IntPairRegClass.contains(DestReg, SrcReg)) { 427 subRegIdx = DW_SubRegsIdx; 428 numSubRegs = 2; 429 movOpc = SP::ORrr; 430 ExtraG0 = true; 431 } else if (SP::FPRegsRegClass.contains(DestReg, SrcReg)) 432 BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg) 433 .addReg(SrcReg, getKillRegState(KillSrc)); 434 else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg)) { 435 if (Subtarget.isV9()) { 436 BuildMI(MBB, I, DL, get(SP::FMOVD), DestReg) 437 .addReg(SrcReg, getKillRegState(KillSrc)); 438 } else { 439 // Use two FMOVS instructions. 440 subRegIdx = DFP_FP_SubRegsIdx; 441 numSubRegs = 2; 442 movOpc = SP::FMOVS; 443 } 444 } else if (SP::QFPRegsRegClass.contains(DestReg, SrcReg)) { 445 if (Subtarget.isV9()) { 446 if (Subtarget.hasHardQuad()) { 447 BuildMI(MBB, I, DL, get(SP::FMOVQ), DestReg) 448 .addReg(SrcReg, getKillRegState(KillSrc)); 449 } else { 450 // Use two FMOVD instructions. 451 subRegIdx = QFP_DFP_SubRegsIdx; 452 numSubRegs = 2; 453 movOpc = SP::FMOVD; 454 } 455 } else { 456 // Use four FMOVS instructions. 457 subRegIdx = QFP_FP_SubRegsIdx; 458 numSubRegs = 4; 459 movOpc = SP::FMOVS; 460 } 461 } else if (SP::ASRRegsRegClass.contains(DestReg) && 462 SP::IntRegsRegClass.contains(SrcReg)) { 463 BuildMI(MBB, I, DL, get(SP::WRASRrr), DestReg) 464 .addReg(SP::G0) 465 .addReg(SrcReg, getKillRegState(KillSrc)); 466 } else if (SP::IntRegsRegClass.contains(DestReg) && 467 SP::ASRRegsRegClass.contains(SrcReg)) { 468 BuildMI(MBB, I, DL, get(SP::RDASR), DestReg) 469 .addReg(SrcReg, getKillRegState(KillSrc)); 470 } else 471 llvm_unreachable("Impossible reg-to-reg copy"); 472 473 if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0) 474 return; 475 476 const TargetRegisterInfo *TRI = &getRegisterInfo(); 477 MachineInstr *MovMI = nullptr; 478 479 for (unsigned i = 0; i != numSubRegs; ++i) { 480 Register Dst = TRI->getSubReg(DestReg, subRegIdx[i]); 481 Register Src = TRI->getSubReg(SrcReg, subRegIdx[i]); 482 assert(Dst && Src && "Bad sub-register"); 483 484 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(movOpc), Dst); 485 if (ExtraG0) 486 MIB.addReg(SP::G0); 487 MIB.addReg(Src); 488 MovMI = MIB.getInstr(); 489 } 490 // Add implicit super-register defs and kills to the last MovMI. 491 MovMI->addRegisterDefined(DestReg, TRI); 492 if (KillSrc) 493 MovMI->addRegisterKilled(SrcReg, TRI); 494 } 495 496 void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 497 MachineBasicBlock::iterator I, 498 Register SrcReg, bool isKill, int FI, 499 const TargetRegisterClass *RC, 500 const TargetRegisterInfo *TRI, 501 Register VReg) const { 502 DebugLoc DL; 503 if (I != MBB.end()) DL = I->getDebugLoc(); 504 505 MachineFunction *MF = MBB.getParent(); 506 const MachineFrameInfo &MFI = MF->getFrameInfo(); 507 MachineMemOperand *MMO = MF->getMachineMemOperand( 508 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 509 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 510 511 // On the order of operands here: think "[FrameIdx + 0] = SrcReg". 512 if (RC == &SP::I64RegsRegClass) 513 BuildMI(MBB, I, DL, get(SP::STXri)).addFrameIndex(FI).addImm(0) 514 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); 515 else if (RC == &SP::IntRegsRegClass) 516 BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0) 517 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); 518 else if (RC == &SP::IntPairRegClass) 519 BuildMI(MBB, I, DL, get(SP::STDri)).addFrameIndex(FI).addImm(0) 520 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); 521 else if (RC == &SP::FPRegsRegClass) 522 BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0) 523 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); 524 else if (SP::DFPRegsRegClass.hasSubClassEq(RC)) 525 BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0) 526 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); 527 else if (SP::QFPRegsRegClass.hasSubClassEq(RC)) 528 // Use STQFri irrespective of its legality. If STQ is not legal, it will be 529 // lowered into two STDs in eliminateFrameIndex. 530 BuildMI(MBB, I, DL, get(SP::STQFri)).addFrameIndex(FI).addImm(0) 531 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); 532 else 533 llvm_unreachable("Can't store this register to stack slot"); 534 } 535 536 void SparcInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 537 MachineBasicBlock::iterator I, 538 Register DestReg, int FI, 539 const TargetRegisterClass *RC, 540 const TargetRegisterInfo *TRI, 541 Register VReg) const { 542 DebugLoc DL; 543 if (I != MBB.end()) DL = I->getDebugLoc(); 544 545 MachineFunction *MF = MBB.getParent(); 546 const MachineFrameInfo &MFI = MF->getFrameInfo(); 547 MachineMemOperand *MMO = MF->getMachineMemOperand( 548 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 549 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 550 551 if (RC == &SP::I64RegsRegClass) 552 BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0) 553 .addMemOperand(MMO); 554 else if (RC == &SP::IntRegsRegClass) 555 BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0) 556 .addMemOperand(MMO); 557 else if (RC == &SP::IntPairRegClass) 558 BuildMI(MBB, I, DL, get(SP::LDDri), DestReg).addFrameIndex(FI).addImm(0) 559 .addMemOperand(MMO); 560 else if (RC == &SP::FPRegsRegClass) 561 BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0) 562 .addMemOperand(MMO); 563 else if (SP::DFPRegsRegClass.hasSubClassEq(RC)) 564 BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0) 565 .addMemOperand(MMO); 566 else if (SP::QFPRegsRegClass.hasSubClassEq(RC)) 567 // Use LDQFri irrespective of its legality. If LDQ is not legal, it will be 568 // lowered into two LDDs in eliminateFrameIndex. 569 BuildMI(MBB, I, DL, get(SP::LDQFri), DestReg).addFrameIndex(FI).addImm(0) 570 .addMemOperand(MMO); 571 else 572 llvm_unreachable("Can't load this register from stack slot"); 573 } 574 575 Register SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 576 SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>(); 577 Register GlobalBaseReg = SparcFI->getGlobalBaseReg(); 578 if (GlobalBaseReg) 579 return GlobalBaseReg; 580 581 // Insert the set of GlobalBaseReg into the first MBB of the function 582 MachineBasicBlock &FirstMBB = MF->front(); 583 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 584 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 585 586 const TargetRegisterClass *PtrRC = 587 Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; 588 GlobalBaseReg = RegInfo.createVirtualRegister(PtrRC); 589 590 DebugLoc dl; 591 592 BuildMI(FirstMBB, MBBI, dl, get(SP::GETPCX), GlobalBaseReg); 593 SparcFI->setGlobalBaseReg(GlobalBaseReg); 594 return GlobalBaseReg; 595 } 596 597 unsigned SparcInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 598 unsigned Opcode = MI.getOpcode(); 599 600 if (MI.isInlineAsm()) { 601 const MachineFunction *MF = MI.getParent()->getParent(); 602 const char *AsmStr = MI.getOperand(0).getSymbolName(); 603 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 604 } 605 606 // If the instruction has a delay slot, be conservative and also include 607 // it for sizing purposes. This is done so that the BranchRelaxation pass 608 // will not mistakenly mark out-of-range branches as in-range. 609 if (MI.hasDelaySlot()) 610 return get(Opcode).getSize() * 2; 611 return get(Opcode).getSize(); 612 } 613 614 bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 615 switch (MI.getOpcode()) { 616 case TargetOpcode::LOAD_STACK_GUARD: { 617 assert(Subtarget.isTargetLinux() && 618 "Only Linux target is expected to contain LOAD_STACK_GUARD"); 619 // offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc. 620 const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14; 621 MI.setDesc(get(Subtarget.is64Bit() ? SP::LDXri : SP::LDri)); 622 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 623 .addReg(SP::G7) 624 .addImm(Offset); 625 return true; 626 } 627 } 628 return false; 629 } 630