1 //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for ARM. 10 /// \todo This should be generated by TableGen. 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMRegisterBankInfo.h" 14 #include "ARMSubtarget.h" 15 #include "ARMTargetMachine.h" 16 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 17 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 18 #include "llvm/CodeGen/MachineConstantPool.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/Support/Debug.h" 21 22 #define DEBUG_TYPE "arm-isel" 23 24 using namespace llvm; 25 26 namespace { 27 28 #define GET_GLOBALISEL_PREDICATE_BITSET 29 #include "ARMGenGlobalISel.inc" 30 #undef GET_GLOBALISEL_PREDICATE_BITSET 31 32 class ARMInstructionSelector : public InstructionSelector { 33 public: 34 ARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI, 35 const ARMRegisterBankInfo &RBI); 36 37 bool select(MachineInstr &I) override; 38 static const char *getName() { return DEBUG_TYPE; } 39 40 private: 41 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 42 43 struct CmpConstants; 44 struct InsertInfo; 45 46 bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB, 47 MachineRegisterInfo &MRI) const; 48 49 // Helper for inserting a comparison sequence that sets \p ResReg to either 1 50 // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or 51 // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS). 52 bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg, 53 ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg, 54 unsigned PrevRes) const; 55 56 // Set \p DestReg to \p Constant. 57 void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const; 58 59 bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 60 bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 61 bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const; 62 63 // Check if the types match and both operands have the expected size and 64 // register bank. 65 bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS, 66 unsigned ExpectedSize, unsigned ExpectedRegBankID) const; 67 68 // Check if the register has the expected size and register bank. 69 bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize, 70 unsigned ExpectedRegBankID) const; 71 72 const ARMBaseInstrInfo &TII; 73 const ARMBaseRegisterInfo &TRI; 74 const ARMBaseTargetMachine &TM; 75 const ARMRegisterBankInfo &RBI; 76 const ARMSubtarget &STI; 77 78 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel 79 // uses "STI." in the code generated by TableGen. If we want to reuse some of 80 // the custom C++ predicates written for DAGISel, we need to have both around. 81 const ARMSubtarget *Subtarget = &STI; 82 83 // Store the opcodes that we might need, so we don't have to check what kind 84 // of subtarget (ARM vs Thumb) we have all the time. 85 struct OpcodeCache { 86 unsigned ZEXT16; 87 unsigned SEXT16; 88 89 unsigned ZEXT8; 90 unsigned SEXT8; 91 92 // Used for implementing ZEXT/SEXT from i1 93 unsigned AND; 94 unsigned RSB; 95 96 unsigned STORE32; 97 unsigned LOAD32; 98 99 unsigned STORE16; 100 unsigned LOAD16; 101 102 unsigned STORE8; 103 unsigned LOAD8; 104 105 unsigned ADDrr; 106 unsigned ADDri; 107 108 // Used for G_ICMP 109 unsigned CMPrr; 110 unsigned MOVi; 111 unsigned MOVCCi; 112 113 // Used for G_SELECT 114 unsigned MOVCCr; 115 116 unsigned TSTri; 117 unsigned Bcc; 118 119 // Used for G_GLOBAL_VALUE 120 unsigned MOVi32imm; 121 unsigned ConstPoolLoad; 122 unsigned MOV_ga_pcrel; 123 unsigned LDRLIT_ga_pcrel; 124 unsigned LDRLIT_ga_abs; 125 126 OpcodeCache(const ARMSubtarget &STI); 127 } const Opcodes; 128 129 // Select the opcode for simple extensions (that translate to a single SXT/UXT 130 // instruction). Extension operations more complicated than that should not 131 // invoke this. Returns the original opcode if it doesn't know how to select a 132 // better one. 133 unsigned selectSimpleExtOpc(unsigned Opc, unsigned Size) const; 134 135 // Select the opcode for simple loads and stores. Returns the original opcode 136 // if it doesn't know how to select a better one. 137 unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank, 138 unsigned Size) const; 139 140 void renderVFPF32Imm(MachineInstrBuilder &New, const MachineInstr &Old) const; 141 void renderVFPF64Imm(MachineInstrBuilder &New, const MachineInstr &Old) const; 142 143 #define GET_GLOBALISEL_PREDICATES_DECL 144 #include "ARMGenGlobalISel.inc" 145 #undef GET_GLOBALISEL_PREDICATES_DECL 146 147 // We declare the temporaries used by selectImpl() in the class to minimize the 148 // cost of constructing placeholder values. 149 #define GET_GLOBALISEL_TEMPORARIES_DECL 150 #include "ARMGenGlobalISel.inc" 151 #undef GET_GLOBALISEL_TEMPORARIES_DECL 152 }; 153 } // end anonymous namespace 154 155 namespace llvm { 156 InstructionSelector * 157 createARMInstructionSelector(const ARMBaseTargetMachine &TM, 158 const ARMSubtarget &STI, 159 const ARMRegisterBankInfo &RBI) { 160 return new ARMInstructionSelector(TM, STI, RBI); 161 } 162 } 163 164 const unsigned zero_reg = 0; 165 166 #define GET_GLOBALISEL_IMPL 167 #include "ARMGenGlobalISel.inc" 168 #undef GET_GLOBALISEL_IMPL 169 170 ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM, 171 const ARMSubtarget &STI, 172 const ARMRegisterBankInfo &RBI) 173 : InstructionSelector(), TII(*STI.getInstrInfo()), 174 TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI), STI(STI), Opcodes(STI), 175 #define GET_GLOBALISEL_PREDICATES_INIT 176 #include "ARMGenGlobalISel.inc" 177 #undef GET_GLOBALISEL_PREDICATES_INIT 178 #define GET_GLOBALISEL_TEMPORARIES_INIT 179 #include "ARMGenGlobalISel.inc" 180 #undef GET_GLOBALISEL_TEMPORARIES_INIT 181 { 182 } 183 184 static const TargetRegisterClass *guessRegClass(unsigned Reg, 185 MachineRegisterInfo &MRI, 186 const TargetRegisterInfo &TRI, 187 const RegisterBankInfo &RBI) { 188 const RegisterBank *RegBank = RBI.getRegBank(Reg, MRI, TRI); 189 assert(RegBank && "Can't get reg bank for virtual register"); 190 191 const unsigned Size = MRI.getType(Reg).getSizeInBits(); 192 assert((RegBank->getID() == ARM::GPRRegBankID || 193 RegBank->getID() == ARM::FPRRegBankID) && 194 "Unsupported reg bank"); 195 196 if (RegBank->getID() == ARM::FPRRegBankID) { 197 if (Size == 32) 198 return &ARM::SPRRegClass; 199 else if (Size == 64) 200 return &ARM::DPRRegClass; 201 else if (Size == 128) 202 return &ARM::QPRRegClass; 203 else 204 llvm_unreachable("Unsupported destination size"); 205 } 206 207 return &ARM::GPRRegClass; 208 } 209 210 static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, 211 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, 212 const RegisterBankInfo &RBI) { 213 Register DstReg = I.getOperand(0).getReg(); 214 if (Register::isPhysicalRegister(DstReg)) 215 return true; 216 217 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 218 219 // No need to constrain SrcReg. It will get constrained when 220 // we hit another of its uses or its defs. 221 // Copies do not have constraints. 222 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 223 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 224 << " operand\n"); 225 return false; 226 } 227 return true; 228 } 229 230 static bool selectMergeValues(MachineInstrBuilder &MIB, 231 const ARMBaseInstrInfo &TII, 232 MachineRegisterInfo &MRI, 233 const TargetRegisterInfo &TRI, 234 const RegisterBankInfo &RBI) { 235 assert(TII.getSubtarget().hasVFP2Base() && "Can't select merge without VFP"); 236 237 // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs 238 // into one DPR. 239 Register VReg0 = MIB->getOperand(0).getReg(); 240 (void)VReg0; 241 assert(MRI.getType(VReg0).getSizeInBits() == 64 && 242 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID && 243 "Unsupported operand for G_MERGE_VALUES"); 244 Register VReg1 = MIB->getOperand(1).getReg(); 245 (void)VReg1; 246 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 247 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 248 "Unsupported operand for G_MERGE_VALUES"); 249 Register VReg2 = MIB->getOperand(2).getReg(); 250 (void)VReg2; 251 assert(MRI.getType(VReg2).getSizeInBits() == 32 && 252 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID && 253 "Unsupported operand for G_MERGE_VALUES"); 254 255 MIB->setDesc(TII.get(ARM::VMOVDRR)); 256 MIB.add(predOps(ARMCC::AL)); 257 258 return true; 259 } 260 261 static bool selectUnmergeValues(MachineInstrBuilder &MIB, 262 const ARMBaseInstrInfo &TII, 263 MachineRegisterInfo &MRI, 264 const TargetRegisterInfo &TRI, 265 const RegisterBankInfo &RBI) { 266 assert(TII.getSubtarget().hasVFP2Base() && 267 "Can't select unmerge without VFP"); 268 269 // We only support G_UNMERGE_VALUES as a way to break up one DPR into two 270 // GPRs. 271 Register VReg0 = MIB->getOperand(0).getReg(); 272 (void)VReg0; 273 assert(MRI.getType(VReg0).getSizeInBits() == 32 && 274 RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID && 275 "Unsupported operand for G_UNMERGE_VALUES"); 276 Register VReg1 = MIB->getOperand(1).getReg(); 277 (void)VReg1; 278 assert(MRI.getType(VReg1).getSizeInBits() == 32 && 279 RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && 280 "Unsupported operand for G_UNMERGE_VALUES"); 281 Register VReg2 = MIB->getOperand(2).getReg(); 282 (void)VReg2; 283 assert(MRI.getType(VReg2).getSizeInBits() == 64 && 284 RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID && 285 "Unsupported operand for G_UNMERGE_VALUES"); 286 287 MIB->setDesc(TII.get(ARM::VMOVRRD)); 288 MIB.add(predOps(ARMCC::AL)); 289 290 return true; 291 } 292 293 ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) { 294 bool isThumb = STI.isThumb(); 295 296 using namespace TargetOpcode; 297 298 #define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC 299 STORE_OPCODE(SEXT16, SXTH); 300 STORE_OPCODE(ZEXT16, UXTH); 301 302 STORE_OPCODE(SEXT8, SXTB); 303 STORE_OPCODE(ZEXT8, UXTB); 304 305 STORE_OPCODE(AND, ANDri); 306 STORE_OPCODE(RSB, RSBri); 307 308 STORE_OPCODE(STORE32, STRi12); 309 STORE_OPCODE(LOAD32, LDRi12); 310 311 // LDRH/STRH are special... 312 STORE16 = isThumb ? ARM::t2STRHi12 : ARM::STRH; 313 LOAD16 = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 314 315 STORE_OPCODE(STORE8, STRBi12); 316 STORE_OPCODE(LOAD8, LDRBi12); 317 318 STORE_OPCODE(ADDrr, ADDrr); 319 STORE_OPCODE(ADDri, ADDri); 320 321 STORE_OPCODE(CMPrr, CMPrr); 322 STORE_OPCODE(MOVi, MOVi); 323 STORE_OPCODE(MOVCCi, MOVCCi); 324 325 STORE_OPCODE(MOVCCr, MOVCCr); 326 327 STORE_OPCODE(TSTri, TSTri); 328 STORE_OPCODE(Bcc, Bcc); 329 330 STORE_OPCODE(MOVi32imm, MOVi32imm); 331 ConstPoolLoad = isThumb ? ARM::t2LDRpci : ARM::LDRi12; 332 STORE_OPCODE(MOV_ga_pcrel, MOV_ga_pcrel); 333 LDRLIT_ga_pcrel = isThumb ? ARM::tLDRLIT_ga_pcrel : ARM::LDRLIT_ga_pcrel; 334 LDRLIT_ga_abs = isThumb ? ARM::tLDRLIT_ga_abs : ARM::LDRLIT_ga_abs; 335 #undef MAP_OPCODE 336 } 337 338 unsigned ARMInstructionSelector::selectSimpleExtOpc(unsigned Opc, 339 unsigned Size) const { 340 using namespace TargetOpcode; 341 342 if (Size != 8 && Size != 16) 343 return Opc; 344 345 if (Opc == G_SEXT) 346 return Size == 8 ? Opcodes.SEXT8 : Opcodes.SEXT16; 347 348 if (Opc == G_ZEXT) 349 return Size == 8 ? Opcodes.ZEXT8 : Opcodes.ZEXT16; 350 351 return Opc; 352 } 353 354 unsigned ARMInstructionSelector::selectLoadStoreOpCode(unsigned Opc, 355 unsigned RegBank, 356 unsigned Size) const { 357 bool isStore = Opc == TargetOpcode::G_STORE; 358 359 if (RegBank == ARM::GPRRegBankID) { 360 switch (Size) { 361 case 1: 362 case 8: 363 return isStore ? Opcodes.STORE8 : Opcodes.LOAD8; 364 case 16: 365 return isStore ? Opcodes.STORE16 : Opcodes.LOAD16; 366 case 32: 367 return isStore ? Opcodes.STORE32 : Opcodes.LOAD32; 368 default: 369 return Opc; 370 } 371 } 372 373 if (RegBank == ARM::FPRRegBankID) { 374 switch (Size) { 375 case 32: 376 return isStore ? ARM::VSTRS : ARM::VLDRS; 377 case 64: 378 return isStore ? ARM::VSTRD : ARM::VLDRD; 379 default: 380 return Opc; 381 } 382 } 383 384 return Opc; 385 } 386 387 // When lowering comparisons, we sometimes need to perform two compares instead 388 // of just one. Get the condition codes for both comparisons. If only one is 389 // needed, the second member of the pair is ARMCC::AL. 390 static std::pair<ARMCC::CondCodes, ARMCC::CondCodes> 391 getComparePreds(CmpInst::Predicate Pred) { 392 std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL}; 393 switch (Pred) { 394 case CmpInst::FCMP_ONE: 395 Preds = {ARMCC::GT, ARMCC::MI}; 396 break; 397 case CmpInst::FCMP_UEQ: 398 Preds = {ARMCC::EQ, ARMCC::VS}; 399 break; 400 case CmpInst::ICMP_EQ: 401 case CmpInst::FCMP_OEQ: 402 Preds.first = ARMCC::EQ; 403 break; 404 case CmpInst::ICMP_SGT: 405 case CmpInst::FCMP_OGT: 406 Preds.first = ARMCC::GT; 407 break; 408 case CmpInst::ICMP_SGE: 409 case CmpInst::FCMP_OGE: 410 Preds.first = ARMCC::GE; 411 break; 412 case CmpInst::ICMP_UGT: 413 case CmpInst::FCMP_UGT: 414 Preds.first = ARMCC::HI; 415 break; 416 case CmpInst::FCMP_OLT: 417 Preds.first = ARMCC::MI; 418 break; 419 case CmpInst::ICMP_ULE: 420 case CmpInst::FCMP_OLE: 421 Preds.first = ARMCC::LS; 422 break; 423 case CmpInst::FCMP_ORD: 424 Preds.first = ARMCC::VC; 425 break; 426 case CmpInst::FCMP_UNO: 427 Preds.first = ARMCC::VS; 428 break; 429 case CmpInst::FCMP_UGE: 430 Preds.first = ARMCC::PL; 431 break; 432 case CmpInst::ICMP_SLT: 433 case CmpInst::FCMP_ULT: 434 Preds.first = ARMCC::LT; 435 break; 436 case CmpInst::ICMP_SLE: 437 case CmpInst::FCMP_ULE: 438 Preds.first = ARMCC::LE; 439 break; 440 case CmpInst::FCMP_UNE: 441 case CmpInst::ICMP_NE: 442 Preds.first = ARMCC::NE; 443 break; 444 case CmpInst::ICMP_UGE: 445 Preds.first = ARMCC::HS; 446 break; 447 case CmpInst::ICMP_ULT: 448 Preds.first = ARMCC::LO; 449 break; 450 default: 451 break; 452 } 453 assert(Preds.first != ARMCC::AL && "No comparisons needed?"); 454 return Preds; 455 } 456 457 struct ARMInstructionSelector::CmpConstants { 458 CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned SelectOpcode, 459 unsigned OpRegBank, unsigned OpSize) 460 : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode), 461 SelectResultOpcode(SelectOpcode), OperandRegBankID(OpRegBank), 462 OperandSize(OpSize) {} 463 464 // The opcode used for performing the comparison. 465 const unsigned ComparisonOpcode; 466 467 // The opcode used for reading the flags set by the comparison. May be 468 // ARM::INSTRUCTION_LIST_END if we don't need to read the flags. 469 const unsigned ReadFlagsOpcode; 470 471 // The opcode used for materializing the result of the comparison. 472 const unsigned SelectResultOpcode; 473 474 // The assumed register bank ID for the operands. 475 const unsigned OperandRegBankID; 476 477 // The assumed size in bits for the operands. 478 const unsigned OperandSize; 479 }; 480 481 struct ARMInstructionSelector::InsertInfo { 482 InsertInfo(MachineInstrBuilder &MIB) 483 : MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())), 484 DbgLoc(MIB->getDebugLoc()) {} 485 486 MachineBasicBlock &MBB; 487 const MachineBasicBlock::instr_iterator InsertBefore; 488 const DebugLoc &DbgLoc; 489 }; 490 491 void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg, 492 unsigned Constant) const { 493 (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Opcodes.MOVi)) 494 .addDef(DestReg) 495 .addImm(Constant) 496 .add(predOps(ARMCC::AL)) 497 .add(condCodeOp()); 498 } 499 500 bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI, 501 unsigned LHSReg, unsigned RHSReg, 502 unsigned ExpectedSize, 503 unsigned ExpectedRegBankID) const { 504 return MRI.getType(LHSReg) == MRI.getType(RHSReg) && 505 validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) && 506 validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID); 507 } 508 509 bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg, 510 unsigned ExpectedSize, 511 unsigned ExpectedRegBankID) const { 512 if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) { 513 LLVM_DEBUG(dbgs() << "Unexpected size for register"); 514 return false; 515 } 516 517 if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) { 518 LLVM_DEBUG(dbgs() << "Unexpected register bank for register"); 519 return false; 520 } 521 522 return true; 523 } 524 525 bool ARMInstructionSelector::selectCmp(CmpConstants Helper, 526 MachineInstrBuilder &MIB, 527 MachineRegisterInfo &MRI) const { 528 const InsertInfo I(MIB); 529 530 auto ResReg = MIB->getOperand(0).getReg(); 531 if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID)) 532 return false; 533 534 auto Cond = 535 static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate()); 536 if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) { 537 putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0); 538 MIB->eraseFromParent(); 539 return true; 540 } 541 542 auto LHSReg = MIB->getOperand(2).getReg(); 543 auto RHSReg = MIB->getOperand(3).getReg(); 544 if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize, 545 Helper.OperandRegBankID)) 546 return false; 547 548 auto ARMConds = getComparePreds(Cond); 549 auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 550 putConstant(I, ZeroReg, 0); 551 552 if (ARMConds.second == ARMCC::AL) { 553 // Simple case, we only need one comparison and we're done. 554 if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg, 555 ZeroReg)) 556 return false; 557 } else { 558 // Not so simple, we need two successive comparisons. 559 auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass); 560 if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg, 561 RHSReg, ZeroReg)) 562 return false; 563 if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg, 564 IntermediateRes)) 565 return false; 566 } 567 568 MIB->eraseFromParent(); 569 return true; 570 } 571 572 bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I, 573 unsigned ResReg, 574 ARMCC::CondCodes Cond, 575 unsigned LHSReg, unsigned RHSReg, 576 unsigned PrevRes) const { 577 // Perform the comparison. 578 auto CmpI = 579 BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode)) 580 .addUse(LHSReg) 581 .addUse(RHSReg) 582 .add(predOps(ARMCC::AL)); 583 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 584 return false; 585 586 // Read the comparison flags (if necessary). 587 if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) { 588 auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, 589 TII.get(Helper.ReadFlagsOpcode)) 590 .add(predOps(ARMCC::AL)); 591 if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI)) 592 return false; 593 } 594 595 // Select either 1 or the previous result based on the value of the flags. 596 auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, 597 TII.get(Helper.SelectResultOpcode)) 598 .addDef(ResReg) 599 .addUse(PrevRes) 600 .addImm(1) 601 .add(predOps(Cond, ARM::CPSR)); 602 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 603 return false; 604 605 return true; 606 } 607 608 bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, 609 MachineRegisterInfo &MRI) const { 610 if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) { 611 LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n"); 612 return false; 613 } 614 615 auto GV = MIB->getOperand(1).getGlobal(); 616 if (GV->isThreadLocal()) { 617 LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n"); 618 return false; 619 } 620 621 auto &MBB = *MIB->getParent(); 622 auto &MF = *MBB.getParent(); 623 624 bool UseMovt = STI.useMovt(); 625 626 unsigned Size = TM.getPointerSize(0); 627 unsigned Alignment = 4; 628 629 auto addOpsForConstantPoolLoad = [&MF, Alignment, 630 Size](MachineInstrBuilder &MIB, 631 const GlobalValue *GV, bool IsSBREL) { 632 assert((MIB->getOpcode() == ARM::LDRi12 || 633 MIB->getOpcode() == ARM::t2LDRpci) && 634 "Unsupported instruction"); 635 auto ConstPool = MF.getConstantPool(); 636 auto CPIndex = 637 // For SB relative entries we need a target-specific constant pool. 638 // Otherwise, just use a regular constant pool entry. 639 IsSBREL 640 ? ConstPool->getConstantPoolIndex( 641 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment) 642 : ConstPool->getConstantPoolIndex(GV, Alignment); 643 MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) 644 .addMemOperand(MF.getMachineMemOperand( 645 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad, 646 Size, Alignment)); 647 if (MIB->getOpcode() == ARM::LDRi12) 648 MIB.addImm(0); 649 MIB.add(predOps(ARMCC::AL)); 650 }; 651 652 auto addGOTMemOperand = [this, &MF, Alignment](MachineInstrBuilder &MIB) { 653 MIB.addMemOperand(MF.getMachineMemOperand( 654 MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 655 TM.getProgramPointerSize(), Alignment)); 656 }; 657 658 if (TM.isPositionIndependent()) { 659 bool Indirect = STI.isGVIndirectSymbol(GV); 660 661 // For ARM mode, we have different pseudoinstructions for direct accesses 662 // and indirect accesses, and the ones for indirect accesses include the 663 // load from GOT. For Thumb mode, we use the same pseudoinstruction for both 664 // direct and indirect accesses, and we need to manually generate the load 665 // from GOT. 666 bool UseOpcodeThatLoads = Indirect && !STI.isThumb(); 667 668 // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't 669 // support it yet. See PR28229. 670 unsigned Opc = 671 UseMovt && !STI.isTargetELF() 672 ? (UseOpcodeThatLoads ? (unsigned)ARM::MOV_ga_pcrel_ldr 673 : Opcodes.MOV_ga_pcrel) 674 : (UseOpcodeThatLoads ? (unsigned)ARM::LDRLIT_ga_pcrel_ldr 675 : Opcodes.LDRLIT_ga_pcrel); 676 MIB->setDesc(TII.get(Opc)); 677 678 int TargetFlags = ARMII::MO_NO_FLAG; 679 if (STI.isTargetDarwin()) 680 TargetFlags |= ARMII::MO_NONLAZY; 681 if (STI.isGVInGOT(GV)) 682 TargetFlags |= ARMII::MO_GOT; 683 MIB->getOperand(1).setTargetFlags(TargetFlags); 684 685 if (Indirect) { 686 if (!UseOpcodeThatLoads) { 687 auto ResultReg = MIB->getOperand(0).getReg(); 688 auto AddressReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 689 690 MIB->getOperand(0).setReg(AddressReg); 691 692 auto InsertBefore = std::next(MIB->getIterator()); 693 auto MIBLoad = BuildMI(MBB, InsertBefore, MIB->getDebugLoc(), 694 TII.get(Opcodes.LOAD32)) 695 .addDef(ResultReg) 696 .addReg(AddressReg) 697 .addImm(0) 698 .add(predOps(ARMCC::AL)); 699 addGOTMemOperand(MIBLoad); 700 701 if (!constrainSelectedInstRegOperands(*MIBLoad, TII, TRI, RBI)) 702 return false; 703 } else { 704 addGOTMemOperand(MIB); 705 } 706 } 707 708 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 709 } 710 711 bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV); 712 if (STI.isROPI() && isReadOnly) { 713 unsigned Opc = UseMovt ? Opcodes.MOV_ga_pcrel : Opcodes.LDRLIT_ga_pcrel; 714 MIB->setDesc(TII.get(Opc)); 715 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 716 } 717 if (STI.isRWPI() && !isReadOnly) { 718 auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass); 719 MachineInstrBuilder OffsetMIB; 720 if (UseMovt) { 721 OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), 722 TII.get(Opcodes.MOVi32imm), Offset); 723 OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL); 724 } else { 725 // Load the offset from the constant pool. 726 OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), 727 TII.get(Opcodes.ConstPoolLoad), Offset); 728 addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true); 729 } 730 if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI)) 731 return false; 732 733 // Add the offset to the SB register. 734 MIB->setDesc(TII.get(Opcodes.ADDrr)); 735 MIB->RemoveOperand(1); 736 MIB.addReg(ARM::R9) // FIXME: don't hardcode R9 737 .addReg(Offset) 738 .add(predOps(ARMCC::AL)) 739 .add(condCodeOp()); 740 741 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 742 } 743 744 if (STI.isTargetELF()) { 745 if (UseMovt) { 746 MIB->setDesc(TII.get(Opcodes.MOVi32imm)); 747 } else { 748 // Load the global's address from the constant pool. 749 MIB->setDesc(TII.get(Opcodes.ConstPoolLoad)); 750 MIB->RemoveOperand(1); 751 addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false); 752 } 753 } else if (STI.isTargetMachO()) { 754 if (UseMovt) 755 MIB->setDesc(TII.get(Opcodes.MOVi32imm)); 756 else 757 MIB->setDesc(TII.get(Opcodes.LDRLIT_ga_abs)); 758 } else { 759 LLVM_DEBUG(dbgs() << "Object format not supported yet\n"); 760 return false; 761 } 762 763 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 764 } 765 766 bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, 767 MachineRegisterInfo &MRI) const { 768 auto &MBB = *MIB->getParent(); 769 auto InsertBefore = std::next(MIB->getIterator()); 770 auto &DbgLoc = MIB->getDebugLoc(); 771 772 // Compare the condition to 1. 773 auto CondReg = MIB->getOperand(1).getReg(); 774 assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) && 775 "Unsupported types for select operation"); 776 auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.TSTri)) 777 .addUse(CondReg) 778 .addImm(1) 779 .add(predOps(ARMCC::AL)); 780 if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) 781 return false; 782 783 // Move a value into the result register based on the result of the 784 // comparison. 785 auto ResReg = MIB->getOperand(0).getReg(); 786 auto TrueReg = MIB->getOperand(2).getReg(); 787 auto FalseReg = MIB->getOperand(3).getReg(); 788 assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) && 789 validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) && 790 "Unsupported types for select operation"); 791 auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.MOVCCr)) 792 .addDef(ResReg) 793 .addUse(TrueReg) 794 .addUse(FalseReg) 795 .add(predOps(ARMCC::EQ, ARM::CPSR)); 796 if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI)) 797 return false; 798 799 MIB->eraseFromParent(); 800 return true; 801 } 802 803 bool ARMInstructionSelector::selectShift(unsigned ShiftOpc, 804 MachineInstrBuilder &MIB) const { 805 assert(!STI.isThumb() && "Unsupported subtarget"); 806 MIB->setDesc(TII.get(ARM::MOVsr)); 807 MIB.addImm(ShiftOpc); 808 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 809 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 810 } 811 812 void ARMInstructionSelector::renderVFPF32Imm( 813 MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst) const { 814 assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT && 815 "Expected G_FCONSTANT"); 816 817 APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF(); 818 int FPImmEncoding = ARM_AM::getFP32Imm(FPImmValue); 819 assert(FPImmEncoding != -1 && "Invalid immediate value"); 820 821 NewInstBuilder.addImm(FPImmEncoding); 822 } 823 824 void ARMInstructionSelector::renderVFPF64Imm( 825 MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst) const { 826 assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT && 827 "Expected G_FCONSTANT"); 828 829 APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF(); 830 int FPImmEncoding = ARM_AM::getFP64Imm(FPImmValue); 831 assert(FPImmEncoding != -1 && "Invalid immediate value"); 832 833 NewInstBuilder.addImm(FPImmEncoding); 834 } 835 836 bool ARMInstructionSelector::select(MachineInstr &I) { 837 assert(I.getParent() && "Instruction should be in a basic block!"); 838 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 839 840 auto &MBB = *I.getParent(); 841 auto &MF = *MBB.getParent(); 842 auto &MRI = MF.getRegInfo(); 843 844 if (!isPreISelGenericOpcode(I.getOpcode())) { 845 if (I.isCopy()) 846 return selectCopy(I, TII, MRI, TRI, RBI); 847 848 return true; 849 } 850 851 using namespace TargetOpcode; 852 853 if (selectImpl(I, *CoverageInfo)) 854 return true; 855 856 MachineInstrBuilder MIB{MF, I}; 857 bool isSExt = false; 858 859 switch (I.getOpcode()) { 860 case G_SEXT: 861 isSExt = true; 862 LLVM_FALLTHROUGH; 863 case G_ZEXT: { 864 assert(MRI.getType(I.getOperand(0).getReg()).getSizeInBits() <= 32 && 865 "Unsupported destination size for extension"); 866 867 LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); 868 unsigned SrcSize = SrcTy.getSizeInBits(); 869 switch (SrcSize) { 870 case 1: { 871 // ZExt boils down to & 0x1; for SExt we also subtract that from 0 872 I.setDesc(TII.get(Opcodes.AND)); 873 MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp()); 874 875 if (isSExt) { 876 Register SExtResult = I.getOperand(0).getReg(); 877 878 // Use a new virtual register for the result of the AND 879 Register AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass); 880 I.getOperand(0).setReg(AndResult); 881 882 auto InsertBefore = std::next(I.getIterator()); 883 auto SubI = 884 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.RSB)) 885 .addDef(SExtResult) 886 .addUse(AndResult) 887 .addImm(0) 888 .add(predOps(ARMCC::AL)) 889 .add(condCodeOp()); 890 if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI)) 891 return false; 892 } 893 break; 894 } 895 case 8: 896 case 16: { 897 unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize); 898 if (NewOpc == I.getOpcode()) 899 return false; 900 I.setDesc(TII.get(NewOpc)); 901 MIB.addImm(0).add(predOps(ARMCC::AL)); 902 break; 903 } 904 default: 905 LLVM_DEBUG(dbgs() << "Unsupported source size for extension"); 906 return false; 907 } 908 break; 909 } 910 case G_ANYEXT: 911 case G_TRUNC: { 912 // The high bits are undefined, so there's nothing special to do, just 913 // treat it as a copy. 914 auto SrcReg = I.getOperand(1).getReg(); 915 auto DstReg = I.getOperand(0).getReg(); 916 917 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 918 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 919 920 if (SrcRegBank.getID() == ARM::FPRRegBankID) { 921 // This should only happen in the obscure case where we have put a 64-bit 922 // integer into a D register. Get it out of there and keep only the 923 // interesting part. 924 assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT"); 925 assert(DstRegBank.getID() == ARM::GPRRegBankID && 926 "Unsupported combination of register banks"); 927 assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size"); 928 assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size"); 929 930 Register IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass); 931 auto InsertBefore = std::next(I.getIterator()); 932 auto MovI = 933 BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD)) 934 .addDef(DstReg) 935 .addDef(IgnoredBits) 936 .addUse(SrcReg) 937 .add(predOps(ARMCC::AL)); 938 if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI)) 939 return false; 940 941 MIB->eraseFromParent(); 942 return true; 943 } 944 945 if (SrcRegBank.getID() != DstRegBank.getID()) { 946 LLVM_DEBUG( 947 dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n"); 948 return false; 949 } 950 951 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 952 LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n"); 953 return false; 954 } 955 956 I.setDesc(TII.get(COPY)); 957 return selectCopy(I, TII, MRI, TRI, RBI); 958 } 959 case G_CONSTANT: { 960 if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) { 961 // Non-pointer constants should be handled by TableGen. 962 LLVM_DEBUG(dbgs() << "Unsupported constant type\n"); 963 return false; 964 } 965 966 auto &Val = I.getOperand(1); 967 if (Val.isCImm()) { 968 if (!Val.getCImm()->isZero()) { 969 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n"); 970 return false; 971 } 972 Val.ChangeToImmediate(0); 973 } else { 974 assert(Val.isImm() && "Unexpected operand for G_CONSTANT"); 975 if (Val.getImm() != 0) { 976 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n"); 977 return false; 978 } 979 } 980 981 assert(!STI.isThumb() && "Unsupported subtarget"); 982 I.setDesc(TII.get(ARM::MOVi)); 983 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 984 break; 985 } 986 case G_FCONSTANT: { 987 // Load from constant pool 988 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8; 989 unsigned Alignment = Size; 990 991 assert((Size == 4 || Size == 8) && "Unsupported FP constant type"); 992 auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD; 993 994 auto ConstPool = MF.getConstantPool(); 995 auto CPIndex = 996 ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment); 997 MIB->setDesc(TII.get(LoadOpcode)); 998 MIB->RemoveOperand(1); 999 MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) 1000 .addMemOperand( 1001 MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), 1002 MachineMemOperand::MOLoad, Size, Alignment)) 1003 .addImm(0) 1004 .add(predOps(ARMCC::AL)); 1005 break; 1006 } 1007 case G_INTTOPTR: 1008 case G_PTRTOINT: { 1009 auto SrcReg = I.getOperand(1).getReg(); 1010 auto DstReg = I.getOperand(0).getReg(); 1011 1012 const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 1013 const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 1014 1015 if (SrcRegBank.getID() != DstRegBank.getID()) { 1016 LLVM_DEBUG( 1017 dbgs() 1018 << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n"); 1019 return false; 1020 } 1021 1022 if (SrcRegBank.getID() != ARM::GPRRegBankID) { 1023 LLVM_DEBUG( 1024 dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n"); 1025 return false; 1026 } 1027 1028 I.setDesc(TII.get(COPY)); 1029 return selectCopy(I, TII, MRI, TRI, RBI); 1030 } 1031 case G_SELECT: 1032 return selectSelect(MIB, MRI); 1033 case G_ICMP: { 1034 CmpConstants Helper(Opcodes.CMPrr, ARM::INSTRUCTION_LIST_END, 1035 Opcodes.MOVCCi, ARM::GPRRegBankID, 32); 1036 return selectCmp(Helper, MIB, MRI); 1037 } 1038 case G_FCMP: { 1039 assert(STI.hasVFP2Base() && "Can't select fcmp without VFP"); 1040 1041 Register OpReg = I.getOperand(2).getReg(); 1042 unsigned Size = MRI.getType(OpReg).getSizeInBits(); 1043 1044 if (Size == 64 && !STI.hasFP64()) { 1045 LLVM_DEBUG(dbgs() << "Subtarget only supports single precision"); 1046 return false; 1047 } 1048 if (Size != 32 && Size != 64) { 1049 LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand"); 1050 return false; 1051 } 1052 1053 CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT, 1054 Opcodes.MOVCCi, ARM::FPRRegBankID, Size); 1055 return selectCmp(Helper, MIB, MRI); 1056 } 1057 case G_LSHR: 1058 return selectShift(ARM_AM::ShiftOpc::lsr, MIB); 1059 case G_ASHR: 1060 return selectShift(ARM_AM::ShiftOpc::asr, MIB); 1061 case G_SHL: { 1062 return selectShift(ARM_AM::ShiftOpc::lsl, MIB); 1063 } 1064 case G_GEP: 1065 I.setDesc(TII.get(Opcodes.ADDrr)); 1066 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 1067 break; 1068 case G_FRAME_INDEX: 1069 // Add 0 to the given frame index and hope it will eventually be folded into 1070 // the user(s). 1071 I.setDesc(TII.get(Opcodes.ADDri)); 1072 MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp()); 1073 break; 1074 case G_GLOBAL_VALUE: 1075 return selectGlobal(MIB, MRI); 1076 case G_STORE: 1077 case G_LOAD: { 1078 const auto &MemOp = **I.memoperands_begin(); 1079 if (MemOp.isAtomic()) { 1080 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n"); 1081 return false; 1082 } 1083 1084 Register Reg = I.getOperand(0).getReg(); 1085 unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID(); 1086 1087 LLT ValTy = MRI.getType(Reg); 1088 const auto ValSize = ValTy.getSizeInBits(); 1089 1090 assert((ValSize != 64 || STI.hasVFP2Base()) && 1091 "Don't know how to load/store 64-bit value without VFP"); 1092 1093 const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize); 1094 if (NewOpc == G_LOAD || NewOpc == G_STORE) 1095 return false; 1096 1097 if (ValSize == 1 && NewOpc == Opcodes.STORE8) { 1098 // Before storing a 1-bit value, make sure to clear out any unneeded bits. 1099 Register OriginalValue = I.getOperand(0).getReg(); 1100 1101 Register ValueToStore = MRI.createVirtualRegister(&ARM::GPRRegClass); 1102 I.getOperand(0).setReg(ValueToStore); 1103 1104 auto InsertBefore = I.getIterator(); 1105 auto AndI = BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.AND)) 1106 .addDef(ValueToStore) 1107 .addUse(OriginalValue) 1108 .addImm(1) 1109 .add(predOps(ARMCC::AL)) 1110 .add(condCodeOp()); 1111 if (!constrainSelectedInstRegOperands(*AndI, TII, TRI, RBI)) 1112 return false; 1113 } 1114 1115 I.setDesc(TII.get(NewOpc)); 1116 1117 if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH) 1118 // LDRH has a funny addressing mode (there's already a FIXME for it). 1119 MIB.addReg(0); 1120 MIB.addImm(0).add(predOps(ARMCC::AL)); 1121 break; 1122 } 1123 case G_MERGE_VALUES: { 1124 if (!selectMergeValues(MIB, TII, MRI, TRI, RBI)) 1125 return false; 1126 break; 1127 } 1128 case G_UNMERGE_VALUES: { 1129 if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI)) 1130 return false; 1131 break; 1132 } 1133 case G_BRCOND: { 1134 if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) { 1135 LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND"); 1136 return false; 1137 } 1138 1139 // Set the flags. 1140 auto Test = 1141 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.TSTri)) 1142 .addReg(I.getOperand(0).getReg()) 1143 .addImm(1) 1144 .add(predOps(ARMCC::AL)); 1145 if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI)) 1146 return false; 1147 1148 // Branch conditionally. 1149 auto Branch = 1150 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.Bcc)) 1151 .add(I.getOperand(1)) 1152 .add(predOps(ARMCC::NE, ARM::CPSR)); 1153 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI)) 1154 return false; 1155 I.eraseFromParent(); 1156 return true; 1157 } 1158 case G_PHI: { 1159 I.setDesc(TII.get(PHI)); 1160 1161 Register DstReg = I.getOperand(0).getReg(); 1162 const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); 1163 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 1164 break; 1165 } 1166 1167 return true; 1168 } 1169 default: 1170 return false; 1171 } 1172 1173 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 1174 } 1175