1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// Mips. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/MipsInstPrinter.h" 15 #include "MipsMachineFunction.h" 16 #include "MipsRegisterBankInfo.h" 17 #include "MipsTargetMachine.h" 18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 20 #include "llvm/CodeGen/MachineJumpTableInfo.h" 21 22 #define DEBUG_TYPE "mips-isel" 23 24 using namespace llvm; 25 26 namespace { 27 28 #define GET_GLOBALISEL_PREDICATE_BITSET 29 #include "MipsGenGlobalISel.inc" 30 #undef GET_GLOBALISEL_PREDICATE_BITSET 31 32 class MipsInstructionSelector : public InstructionSelector { 33 public: 34 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI, 35 const MipsRegisterBankInfo &RBI); 36 37 bool select(MachineInstr &I) override; 38 static const char *getName() { return DEBUG_TYPE; } 39 40 private: 41 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 42 bool materialize32BitImm(Register DestReg, APInt Imm, 43 MachineIRBuilder &B) const; 44 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const; 45 const TargetRegisterClass * 46 getRegClassForTypeOnBank(unsigned OpSize, const RegisterBank &RB, 47 const RegisterBankInfo &RBI) const; 48 unsigned selectLoadStoreOpCode(MachineInstr &I, 49 MachineRegisterInfo &MRI) const; 50 51 const MipsTargetMachine &TM; 52 const MipsSubtarget &STI; 53 const MipsInstrInfo &TII; 54 const MipsRegisterInfo &TRI; 55 const MipsRegisterBankInfo &RBI; 56 57 #define GET_GLOBALISEL_PREDICATES_DECL 58 #include "MipsGenGlobalISel.inc" 59 #undef GET_GLOBALISEL_PREDICATES_DECL 60 61 #define GET_GLOBALISEL_TEMPORARIES_DECL 62 #include "MipsGenGlobalISel.inc" 63 #undef GET_GLOBALISEL_TEMPORARIES_DECL 64 }; 65 66 } // end anonymous namespace 67 68 #define GET_GLOBALISEL_IMPL 69 #include "MipsGenGlobalISel.inc" 70 #undef GET_GLOBALISEL_IMPL 71 72 MipsInstructionSelector::MipsInstructionSelector( 73 const MipsTargetMachine &TM, const MipsSubtarget &STI, 74 const MipsRegisterBankInfo &RBI) 75 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()), 76 TRI(*STI.getRegisterInfo()), RBI(RBI), 77 78 #define GET_GLOBALISEL_PREDICATES_INIT 79 #include "MipsGenGlobalISel.inc" 80 #undef GET_GLOBALISEL_PREDICATES_INIT 81 #define GET_GLOBALISEL_TEMPORARIES_INIT 82 #include "MipsGenGlobalISel.inc" 83 #undef GET_GLOBALISEL_TEMPORARIES_INIT 84 { 85 } 86 87 bool MipsInstructionSelector::selectCopy(MachineInstr &I, 88 MachineRegisterInfo &MRI) const { 89 Register DstReg = I.getOperand(0).getReg(); 90 if (Register::isPhysicalRegister(DstReg)) 91 return true; 92 93 const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI); 94 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); 95 96 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 97 if (RegBank->getID() == Mips::FPRBRegBankID) { 98 if (DstSize == 32) 99 RC = &Mips::FGR32RegClass; 100 else if (DstSize == 64) 101 RC = STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; 102 else 103 llvm_unreachable("Unsupported destination size"); 104 } 105 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 106 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 107 << " operand\n"); 108 return false; 109 } 110 return true; 111 } 112 113 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank( 114 unsigned OpSize, const RegisterBank &RB, 115 const RegisterBankInfo &RBI) const { 116 if (RB.getID() == Mips::GPRBRegBankID) 117 return &Mips::GPR32RegClass; 118 119 if (RB.getID() == Mips::FPRBRegBankID) 120 return OpSize == 32 121 ? &Mips::FGR32RegClass 122 : STI.hasMips32r6() || STI.isFP64bit() ? &Mips::FGR64RegClass 123 : &Mips::AFGR64RegClass; 124 125 llvm_unreachable("getRegClassForTypeOnBank can't find register class."); 126 return nullptr; 127 } 128 129 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm, 130 MachineIRBuilder &B) const { 131 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size."); 132 // Ori zero extends immediate. Used for values with zeros in high 16 bits. 133 if (Imm.getHiBits(16).isNullValue()) { 134 MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)}) 135 .addImm(Imm.getLoBits(16).getLimitedValue()); 136 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 137 } 138 // Lui places immediate in high 16 bits and sets low 16 bits to zero. 139 if (Imm.getLoBits(16).isNullValue()) { 140 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {}) 141 .addImm(Imm.getHiBits(16).getLimitedValue()); 142 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 143 } 144 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits. 145 if (Imm.isSignedIntN(16)) { 146 MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)}) 147 .addImm(Imm.getLoBits(16).getLimitedValue()); 148 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); 149 } 150 // Values that cannot be materialized with single immediate instruction. 151 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass); 152 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {}) 153 .addImm(Imm.getHiBits(16).getLimitedValue()); 154 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg}) 155 .addImm(Imm.getLoBits(16).getLimitedValue()); 156 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 157 return false; 158 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI)) 159 return false; 160 return true; 161 } 162 163 /// Returning Opc indicates that we failed to select MIPS instruction opcode. 164 unsigned 165 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I, 166 MachineRegisterInfo &MRI) const { 167 STI.getRegisterInfo(); 168 const Register DestReg = I.getOperand(0).getReg(); 169 const unsigned RegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID(); 170 const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize(); 171 unsigned Opc = I.getOpcode(); 172 const bool isStore = Opc == TargetOpcode::G_STORE; 173 if (RegBank == Mips::GPRBRegBankID) { 174 if (isStore) 175 switch (MemSizeInBytes) { 176 case 4: 177 return Mips::SW; 178 case 2: 179 return Mips::SH; 180 case 1: 181 return Mips::SB; 182 default: 183 return Opc; 184 } 185 else 186 // Unspecified extending load is selected into zeroExtending load. 187 switch (MemSizeInBytes) { 188 case 4: 189 return Mips::LW; 190 case 2: 191 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu; 192 case 1: 193 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu; 194 default: 195 return Opc; 196 } 197 } 198 199 if (RegBank == Mips::FPRBRegBankID) { 200 switch (MemSizeInBytes) { 201 case 4: 202 return isStore ? Mips::SWC1 : Mips::LWC1; 203 case 8: 204 if (STI.isFP64bit()) 205 return isStore ? Mips::SDC164 : Mips::LDC164; 206 else 207 return isStore ? Mips::SDC1 : Mips::LDC1; 208 case 16: { 209 assert(STI.hasMSA() && "Vector instructions require target with MSA."); 210 const unsigned VectorElementSizeInBytes = 211 MRI.getType(DestReg).getElementType().getSizeInBytes(); 212 if (VectorElementSizeInBytes == 1) 213 return isStore ? Mips::ST_B : Mips::LD_B; 214 if (VectorElementSizeInBytes == 2) 215 return isStore ? Mips::ST_H : Mips::LD_H; 216 if (VectorElementSizeInBytes == 4) 217 return isStore ? Mips::ST_W : Mips::LD_W; 218 if (VectorElementSizeInBytes == 8) 219 return isStore ? Mips::ST_D : Mips::LD_D; 220 return Opc; 221 } 222 default: 223 return Opc; 224 } 225 } 226 return Opc; 227 } 228 229 bool MipsInstructionSelector::select(MachineInstr &I) { 230 231 MachineBasicBlock &MBB = *I.getParent(); 232 MachineFunction &MF = *MBB.getParent(); 233 MachineRegisterInfo &MRI = MF.getRegInfo(); 234 235 if (!isPreISelGenericOpcode(I.getOpcode())) { 236 if (I.isCopy()) 237 return selectCopy(I, MRI); 238 239 return true; 240 } 241 242 if (I.getOpcode() == Mips::G_MUL) { 243 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL)) 244 .add(I.getOperand(0)) 245 .add(I.getOperand(1)) 246 .add(I.getOperand(2)); 247 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI)) 248 return false; 249 Mul->getOperand(3).setIsDead(true); 250 Mul->getOperand(4).setIsDead(true); 251 252 I.eraseFromParent(); 253 return true; 254 } 255 256 if (selectImpl(I, *CoverageInfo)) 257 return true; 258 259 MachineInstr *MI = nullptr; 260 using namespace TargetOpcode; 261 262 switch (I.getOpcode()) { 263 case G_UMULH: { 264 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 265 MachineInstr *PseudoMULTu, *PseudoMove; 266 267 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu)) 268 .addDef(PseudoMULTuReg) 269 .add(I.getOperand(1)) 270 .add(I.getOperand(2)); 271 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI)) 272 return false; 273 274 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI)) 275 .addDef(I.getOperand(0).getReg()) 276 .addUse(PseudoMULTuReg); 277 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 278 return false; 279 280 I.eraseFromParent(); 281 return true; 282 } 283 case G_GEP: { 284 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 285 .add(I.getOperand(0)) 286 .add(I.getOperand(1)) 287 .add(I.getOperand(2)); 288 break; 289 } 290 case G_INTTOPTR: 291 case G_PTRTOINT: { 292 I.setDesc(TII.get(COPY)); 293 return selectCopy(I, MRI); 294 } 295 case G_FRAME_INDEX: { 296 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 297 .add(I.getOperand(0)) 298 .add(I.getOperand(1)) 299 .addImm(0); 300 break; 301 } 302 case G_BRCOND: { 303 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE)) 304 .add(I.getOperand(0)) 305 .addUse(Mips::ZERO) 306 .add(I.getOperand(1)); 307 break; 308 } 309 case G_BRJT: { 310 unsigned EntrySize = 311 MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout()); 312 assert(isPowerOf2_32(EntrySize) && 313 "Non-power-of-two jump-table entry size not supported."); 314 315 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass); 316 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL)) 317 .addDef(JTIndex) 318 .addUse(I.getOperand(2).getReg()) 319 .addImm(Log2_32(EntrySize)); 320 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI)) 321 return false; 322 323 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass); 324 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 325 .addDef(DestAddress) 326 .addUse(I.getOperand(0).getReg()) 327 .addUse(JTIndex); 328 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI)) 329 return false; 330 331 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass); 332 MachineInstr *LW = 333 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 334 .addDef(Dest) 335 .addUse(DestAddress) 336 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO) 337 .addMemOperand(MF.getMachineMemOperand( 338 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4)); 339 if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI)) 340 return false; 341 342 if (MF.getTarget().isPositionIndependent()) { 343 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass); 344 LW->getOperand(0).setReg(DestTmp); 345 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu)) 346 .addDef(Dest) 347 .addUse(DestTmp) 348 .addUse(MF.getInfo<MipsFunctionInfo>() 349 ->getGlobalBaseRegForGlobalISel()); 350 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI)) 351 return false; 352 } 353 354 MachineInstr *Branch = 355 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch)) 356 .addUse(Dest); 357 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI)) 358 return false; 359 360 I.eraseFromParent(); 361 return true; 362 } 363 case G_BRINDIRECT: { 364 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch)) 365 .add(I.getOperand(0)); 366 break; 367 } 368 case G_PHI: { 369 const Register DestReg = I.getOperand(0).getReg(); 370 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); 371 372 const TargetRegisterClass *DefRC = nullptr; 373 if (Register::isPhysicalRegister(DestReg)) 374 DefRC = TRI.getRegClass(DestReg); 375 else 376 DefRC = getRegClassForTypeOnBank(OpSize, 377 *RBI.getRegBank(DestReg, MRI, TRI), RBI); 378 379 I.setDesc(TII.get(TargetOpcode::PHI)); 380 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI); 381 } 382 case G_STORE: 383 case G_LOAD: 384 case G_ZEXTLOAD: 385 case G_SEXTLOAD: { 386 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI); 387 if (NewOpc == I.getOpcode()) 388 return false; 389 390 MachineOperand BaseAddr = I.getOperand(1); 391 int64_t SignedOffset = 0; 392 // Try to fold load/store + G_GEP + G_CONSTANT 393 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate 394 // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset 395 // %LoadResult/%StoreSrc = load/store %Addr(p0) 396 // into: 397 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate 398 399 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg()); 400 if (Addr->getOpcode() == G_GEP) { 401 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg()); 402 if (Offset->getOpcode() == G_CONSTANT) { 403 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue(); 404 if (OffsetValue.isSignedIntN(16)) { 405 BaseAddr = Addr->getOperand(1); 406 SignedOffset = OffsetValue.getSExtValue(); 407 } 408 } 409 } 410 411 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc)) 412 .add(I.getOperand(0)) 413 .add(BaseAddr) 414 .addImm(SignedOffset) 415 .addMemOperand(*I.memoperands_begin()); 416 break; 417 } 418 case G_UDIV: 419 case G_UREM: 420 case G_SDIV: 421 case G_SREM: { 422 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); 423 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV; 424 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV; 425 426 MachineInstr *PseudoDIV, *PseudoMove; 427 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(), 428 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV)) 429 .addDef(HILOReg) 430 .add(I.getOperand(1)) 431 .add(I.getOperand(2)); 432 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI)) 433 return false; 434 435 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), 436 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI)) 437 .addDef(I.getOperand(0).getReg()) 438 .addUse(HILOReg); 439 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) 440 return false; 441 442 I.eraseFromParent(); 443 return true; 444 } 445 case G_SELECT: { 446 // Handle operands with pointer type. 447 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I)) 448 .add(I.getOperand(0)) 449 .add(I.getOperand(2)) 450 .add(I.getOperand(1)) 451 .add(I.getOperand(3)); 452 break; 453 } 454 case G_IMPLICIT_DEF: { 455 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF)) 456 .add(I.getOperand(0)); 457 458 // Set class based on register bank, there can be fpr and gpr implicit def. 459 MRI.setRegClass(MI->getOperand(0).getReg(), 460 getRegClassForTypeOnBank( 461 MRI.getType(I.getOperand(0).getReg()).getSizeInBits(), 462 *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI), 463 RBI)); 464 break; 465 } 466 case G_CONSTANT: { 467 MachineIRBuilder B(I); 468 if (!materialize32BitImm(I.getOperand(0).getReg(), 469 I.getOperand(1).getCImm()->getValue(), B)) 470 return false; 471 472 I.eraseFromParent(); 473 return true; 474 } 475 case G_FCONSTANT: { 476 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF(); 477 APInt APImm = FPimm.bitcastToAPInt(); 478 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 479 480 if (Size == 32) { 481 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 482 MachineIRBuilder B(I); 483 if (!materialize32BitImm(GPRReg, APImm, B)) 484 return false; 485 486 MachineInstrBuilder MTC1 = 487 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg}); 488 if (!MTC1.constrainAllUses(TII, TRI, RBI)) 489 return false; 490 } 491 if (Size == 64) { 492 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass); 493 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass); 494 MachineIRBuilder B(I); 495 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B)) 496 return false; 497 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B)) 498 return false; 499 500 MachineInstrBuilder PairF64 = B.buildInstr( 501 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64, 502 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh}); 503 if (!PairF64.constrainAllUses(TII, TRI, RBI)) 504 return false; 505 } 506 507 I.eraseFromParent(); 508 return true; 509 } 510 case G_FABS: { 511 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 512 unsigned FABSOpcode = 513 Size == 32 ? Mips::FABS_S 514 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32; 515 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode)) 516 .add(I.getOperand(0)) 517 .add(I.getOperand(1)); 518 break; 519 } 520 case G_FPTOSI: { 521 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits(); 522 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 523 (void)ToSize; 524 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI"); 525 assert((FromSize == 32 || FromSize == 64) && 526 "Unsupported floating point size for G_FPTOSI"); 527 528 unsigned Opcode; 529 if (FromSize == 32) 530 Opcode = Mips::TRUNC_W_S; 531 else 532 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32; 533 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass); 534 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode)) 535 .addDef(ResultInFPR) 536 .addUse(I.getOperand(1).getReg()); 537 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI)) 538 return false; 539 540 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1)) 541 .addDef(I.getOperand(0).getReg()) 542 .addUse(ResultInFPR); 543 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 544 return false; 545 546 I.eraseFromParent(); 547 return true; 548 } 549 case G_GLOBAL_VALUE: { 550 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal(); 551 if (MF.getTarget().isPositionIndependent()) { 552 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 553 .addDef(I.getOperand(0).getReg()) 554 .addReg(MF.getInfo<MipsFunctionInfo>() 555 ->getGlobalBaseRegForGlobalISel()) 556 .addGlobalAddress(GVal); 557 // Global Values that don't have local linkage are handled differently 558 // when they are part of call sequence. MipsCallLowering::lowerCall 559 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds 560 // MO_GOT_CALL flag when Callee doesn't have local linkage. 561 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL) 562 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL); 563 else 564 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT); 565 LWGOT->addMemOperand( 566 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), 567 MachineMemOperand::MOLoad, 4, 4)); 568 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI)) 569 return false; 570 571 if (GVal->hasLocalLinkage()) { 572 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass); 573 LWGOT->getOperand(0).setReg(LWGOTDef); 574 575 MachineInstr *ADDiu = 576 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 577 .addDef(I.getOperand(0).getReg()) 578 .addReg(LWGOTDef) 579 .addGlobalAddress(GVal); 580 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 581 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 582 return false; 583 } 584 } else { 585 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 586 587 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) 588 .addDef(LUiReg) 589 .addGlobalAddress(GVal); 590 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI); 591 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI)) 592 return false; 593 594 MachineInstr *ADDiu = 595 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 596 .addDef(I.getOperand(0).getReg()) 597 .addUse(LUiReg) 598 .addGlobalAddress(GVal); 599 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO); 600 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI)) 601 return false; 602 } 603 I.eraseFromParent(); 604 return true; 605 } 606 case G_JUMP_TABLE: { 607 if (MF.getTarget().isPositionIndependent()) { 608 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW)) 609 .addDef(I.getOperand(0).getReg()) 610 .addReg(MF.getInfo<MipsFunctionInfo>() 611 ->getGlobalBaseRegForGlobalISel()) 612 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT) 613 .addMemOperand( 614 MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), 615 MachineMemOperand::MOLoad, 4, 4)); 616 } else { 617 MI = 618 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) 619 .addDef(I.getOperand(0).getReg()) 620 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI); 621 } 622 break; 623 } 624 case G_ICMP: { 625 struct Instr { 626 unsigned Opcode; 627 Register Def, LHS, RHS; 628 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS) 629 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){}; 630 631 bool hasImm() const { 632 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi) 633 return true; 634 return false; 635 } 636 }; 637 638 SmallVector<struct Instr, 2> Instructions; 639 Register ICMPReg = I.getOperand(0).getReg(); 640 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); 641 Register LHS = I.getOperand(2).getReg(); 642 Register RHS = I.getOperand(3).getReg(); 643 CmpInst::Predicate Cond = 644 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate()); 645 646 switch (Cond) { 647 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1 648 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 649 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1); 650 break; 651 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS) 652 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); 653 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp); 654 break; 655 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS 656 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS); 657 break; 658 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS) 659 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS); 660 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 661 break; 662 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS 663 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS); 664 break; 665 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS) 666 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS); 667 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 668 break; 669 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS 670 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS); 671 break; 672 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS) 673 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS); 674 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 675 break; 676 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS 677 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS); 678 break; 679 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS) 680 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS); 681 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); 682 break; 683 default: 684 return false; 685 } 686 687 MachineIRBuilder B(I); 688 for (const struct Instr &Instruction : Instructions) { 689 MachineInstrBuilder MIB = B.buildInstr( 690 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS}); 691 692 if (Instruction.hasImm()) 693 MIB.addImm(Instruction.RHS); 694 else 695 MIB.addUse(Instruction.RHS); 696 697 if (!MIB.constrainAllUses(TII, TRI, RBI)) 698 return false; 699 } 700 701 I.eraseFromParent(); 702 return true; 703 } 704 case G_FCMP: { 705 unsigned MipsFCMPCondCode; 706 bool isLogicallyNegated; 707 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>( 708 I.getOperand(1).getPredicate())) { 709 case CmpInst::FCMP_UNO: // Unordered 710 case CmpInst::FCMP_ORD: // Ordered (OR) 711 MipsFCMPCondCode = Mips::FCOND_UN; 712 isLogicallyNegated = Cond != CmpInst::FCMP_UNO; 713 break; 714 case CmpInst::FCMP_OEQ: // Equal 715 case CmpInst::FCMP_UNE: // Not Equal (NEQ) 716 MipsFCMPCondCode = Mips::FCOND_OEQ; 717 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ; 718 break; 719 case CmpInst::FCMP_UEQ: // Unordered or Equal 720 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL) 721 MipsFCMPCondCode = Mips::FCOND_UEQ; 722 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ; 723 break; 724 case CmpInst::FCMP_OLT: // Ordered or Less Than 725 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE) 726 MipsFCMPCondCode = Mips::FCOND_OLT; 727 isLogicallyNegated = Cond != CmpInst::FCMP_OLT; 728 break; 729 case CmpInst::FCMP_ULT: // Unordered or Less Than 730 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE) 731 MipsFCMPCondCode = Mips::FCOND_ULT; 732 isLogicallyNegated = Cond != CmpInst::FCMP_ULT; 733 break; 734 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal 735 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT) 736 MipsFCMPCondCode = Mips::FCOND_OLE; 737 isLogicallyNegated = Cond != CmpInst::FCMP_OLE; 738 break; 739 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal 740 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT) 741 MipsFCMPCondCode = Mips::FCOND_ULE; 742 isLogicallyNegated = Cond != CmpInst::FCMP_ULE; 743 break; 744 default: 745 return false; 746 } 747 748 // Default compare result in gpr register will be `true`. 749 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false 750 // using MOVF_I. When orignal predicate (Cond) is logically negated 751 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used. 752 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I; 753 754 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 755 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) 756 .addDef(TrueInReg) 757 .addUse(Mips::ZERO) 758 .addImm(1); 759 760 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); 761 unsigned FCMPOpcode = 762 Size == 32 ? Mips::FCMP_S32 763 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32; 764 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode)) 765 .addUse(I.getOperand(2).getReg()) 766 .addUse(I.getOperand(3).getReg()) 767 .addImm(MipsFCMPCondCode); 768 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI)) 769 return false; 770 771 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode)) 772 .addDef(I.getOperand(0).getReg()) 773 .addUse(Mips::ZERO) 774 .addUse(Mips::FCC0) 775 .addUse(TrueInReg); 776 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI)) 777 return false; 778 779 I.eraseFromParent(); 780 return true; 781 } 782 case G_FENCE: { 783 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0); 784 break; 785 } 786 case G_VASTART: { 787 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>(); 788 int FI = FuncInfo->getVarArgsFrameIndex(); 789 790 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); 791 MachineInstr *LEA_ADDiu = 792 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu)) 793 .addDef(LeaReg) 794 .addFrameIndex(FI) 795 .addImm(0); 796 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI)) 797 return false; 798 799 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW)) 800 .addUse(LeaReg) 801 .addUse(I.getOperand(0).getReg()) 802 .addImm(0); 803 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI)) 804 return false; 805 806 I.eraseFromParent(); 807 return true; 808 } 809 default: 810 return false; 811 } 812 813 I.eraseFromParent(); 814 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); 815 } 816 817 namespace llvm { 818 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM, 819 MipsSubtarget &Subtarget, 820 MipsRegisterBankInfo &RBI) { 821 return new MipsInstructionSelector(TM, Subtarget, RBI); 822 } 823 } // end namespace llvm 824