1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Thumb2InstrInfo.h" 14 #include "ARMMachineFunctionInfo.h" 15 #include "ARMSubtarget.h" 16 #include "MCTargetDesc/ARMAddressingModes.h" 17 #include "llvm/CodeGen/MachineBasicBlock.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstr.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineMemOperand.h" 23 #include "llvm/CodeGen/MachineOperand.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/IR/DebugLoc.h" 27 #include "llvm/MC/MCInst.h" 28 #include "llvm/MC/MCInstBuilder.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cassert> 35 36 using namespace llvm; 37 38 static cl::opt<bool> 39 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden, 40 cl::desc("Use old-style Thumb2 if-conversion heuristics"), 41 cl::init(false)); 42 43 static cl::opt<bool> 44 PreferNoCSEL("prefer-no-csel", cl::Hidden, 45 cl::desc("Prefer predicated Move to CSEL"), 46 cl::init(false)); 47 48 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI) 49 : ARMBaseInstrInfo(STI) {} 50 51 /// Return the noop instruction to use for a noop. 52 MCInst Thumb2InstrInfo::getNop() const { 53 return MCInstBuilder(ARM::tHINT).addImm(0).addImm(ARMCC::AL).addReg(0); 54 } 55 56 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const { 57 // FIXME 58 return 0; 59 } 60 61 void 62 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 63 MachineBasicBlock *NewDest) const { 64 MachineBasicBlock *MBB = Tail->getParent(); 65 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 66 if (!AFI->hasITBlocks() || Tail->isBranch()) { 67 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 68 return; 69 } 70 71 // If the first instruction of Tail is predicated, we may have to update 72 // the IT instruction. 73 Register PredReg; 74 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg); 75 MachineBasicBlock::iterator MBBI = Tail; 76 if (CC != ARMCC::AL) 77 // Expecting at least the t2IT instruction before it. 78 --MBBI; 79 80 // Actually replace the tail. 81 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 82 83 // Fix up IT. 84 if (CC != ARMCC::AL) { 85 MachineBasicBlock::iterator E = MBB->begin(); 86 unsigned Count = 4; // At most 4 instructions in an IT block. 87 while (Count && MBBI != E) { 88 if (MBBI->isDebugInstr()) { 89 --MBBI; 90 continue; 91 } 92 if (MBBI->getOpcode() == ARM::t2IT) { 93 unsigned Mask = MBBI->getOperand(1).getImm(); 94 if (Count == 4) 95 MBBI->eraseFromParent(); 96 else { 97 unsigned MaskOn = 1 << Count; 98 unsigned MaskOff = ~(MaskOn - 1); 99 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn); 100 } 101 return; 102 } 103 --MBBI; 104 --Count; 105 } 106 107 // Ctrl flow can reach here if branch folding is run before IT block 108 // formation pass. 109 } 110 } 111 112 bool 113 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 114 MachineBasicBlock::iterator MBBI) const { 115 while (MBBI->isDebugInstr()) { 116 ++MBBI; 117 if (MBBI == MBB.end()) 118 return false; 119 } 120 121 Register PredReg; 122 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL; 123 } 124 125 MachineInstr * 126 Thumb2InstrInfo::optimizeSelect(MachineInstr &MI, 127 SmallPtrSetImpl<MachineInstr *> &SeenMIs, 128 bool PreferFalse) const { 129 // Try to use the base optimizeSelect, which uses canFoldIntoMOVCC to fold the 130 // MOVCC into another instruction. If that fails on 8.1-M fall back to using a 131 // CSEL. 132 MachineInstr *RV = ARMBaseInstrInfo::optimizeSelect(MI, SeenMIs, PreferFalse); 133 if (!RV && getSubtarget().hasV8_1MMainlineOps() && !PreferNoCSEL) { 134 Register DestReg = MI.getOperand(0).getReg(); 135 136 if (!DestReg.isVirtual()) 137 return nullptr; 138 139 MachineInstrBuilder NewMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 140 get(ARM::t2CSEL), DestReg) 141 .add(MI.getOperand(2)) 142 .add(MI.getOperand(1)) 143 .add(MI.getOperand(3)); 144 SeenMIs.insert(NewMI); 145 return NewMI; 146 } 147 return RV; 148 } 149 150 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 151 MachineBasicBlock::iterator I, 152 const DebugLoc &DL, MCRegister DestReg, 153 MCRegister SrcReg, bool KillSrc) const { 154 // Handle SPR, DPR, and QPR copies. 155 if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) 156 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); 157 158 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) 159 .addReg(SrcReg, getKillRegState(KillSrc)) 160 .add(predOps(ARMCC::AL)); 161 } 162 163 void Thumb2InstrInfo:: 164 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 165 Register SrcReg, bool isKill, int FI, 166 const TargetRegisterClass *RC, 167 const TargetRegisterInfo *TRI) const { 168 DebugLoc DL; 169 if (I != MBB.end()) DL = I->getDebugLoc(); 170 171 MachineFunction &MF = *MBB.getParent(); 172 MachineFrameInfo &MFI = MF.getFrameInfo(); 173 MachineMemOperand *MMO = MF.getMachineMemOperand( 174 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 175 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 176 177 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 178 BuildMI(MBB, I, DL, get(ARM::t2STRi12)) 179 .addReg(SrcReg, getKillRegState(isKill)) 180 .addFrameIndex(FI) 181 .addImm(0) 182 .addMemOperand(MMO) 183 .add(predOps(ARMCC::AL)); 184 return; 185 } 186 187 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 188 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for 189 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 190 // otherwise). 191 if (Register::isVirtualRegister(SrcReg)) { 192 MachineRegisterInfo *MRI = &MF.getRegInfo(); 193 MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass); 194 } 195 196 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); 197 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 198 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 199 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 200 return; 201 } 202 203 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); 204 } 205 206 void Thumb2InstrInfo:: 207 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 208 Register DestReg, int FI, 209 const TargetRegisterClass *RC, 210 const TargetRegisterInfo *TRI) const { 211 MachineFunction &MF = *MBB.getParent(); 212 MachineFrameInfo &MFI = MF.getFrameInfo(); 213 MachineMemOperand *MMO = MF.getMachineMemOperand( 214 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 215 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 216 DebugLoc DL; 217 if (I != MBB.end()) DL = I->getDebugLoc(); 218 219 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 220 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) 221 .addFrameIndex(FI) 222 .addImm(0) 223 .addMemOperand(MMO) 224 .add(predOps(ARMCC::AL)); 225 return; 226 } 227 228 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 229 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for 230 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 231 // otherwise). 232 if (Register::isVirtualRegister(DestReg)) { 233 MachineRegisterInfo *MRI = &MF.getRegInfo(); 234 MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass); 235 } 236 237 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); 238 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 239 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 240 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 241 242 if (Register::isPhysicalRegister(DestReg)) 243 MIB.addReg(DestReg, RegState::ImplicitDefine); 244 return; 245 } 246 247 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); 248 } 249 250 void Thumb2InstrInfo::expandLoadStackGuard( 251 MachineBasicBlock::iterator MI) const { 252 MachineFunction &MF = *MI->getParent()->getParent(); 253 Module &M = *MF.getFunction().getParent(); 254 255 if (M.getStackProtectorGuard() == "tls") { 256 expandLoadStackGuardBase(MI, ARM::t2MRC, ARM::t2LDRi12); 257 return; 258 } 259 260 const GlobalValue *GV = 261 cast<GlobalValue>((*MI->memoperands_begin())->getValue()); 262 263 if (MF.getSubtarget<ARMSubtarget>().isGVInGOT(GV)) 264 expandLoadStackGuardBase(MI, ARM::t2LDRLIT_ga_pcrel, ARM::t2LDRi12); 265 else if (MF.getTarget().isPositionIndependent()) 266 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12); 267 else 268 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12); 269 } 270 271 MachineInstr *Thumb2InstrInfo::commuteInstructionImpl(MachineInstr &MI, 272 bool NewMI, 273 unsigned OpIdx1, 274 unsigned OpIdx2) const { 275 switch (MI.getOpcode()) { 276 case ARM::MVE_VMAXNMAf16: 277 case ARM::MVE_VMAXNMAf32: 278 case ARM::MVE_VMINNMAf16: 279 case ARM::MVE_VMINNMAf32: 280 // Don't allow predicated instructions to be commuted. 281 if (getVPTInstrPredicate(MI) != ARMVCC::None) 282 return nullptr; 283 } 284 return ARMBaseInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 285 } 286 287 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, 288 MachineBasicBlock::iterator &MBBI, 289 const DebugLoc &dl, Register DestReg, 290 Register BaseReg, int NumBytes, 291 ARMCC::CondCodes Pred, Register PredReg, 292 const ARMBaseInstrInfo &TII, 293 unsigned MIFlags) { 294 if (NumBytes == 0 && DestReg != BaseReg) { 295 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 296 .addReg(BaseReg, RegState::Kill) 297 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 298 return; 299 } 300 301 bool isSub = NumBytes < 0; 302 if (isSub) NumBytes = -NumBytes; 303 304 // If profitable, use a movw or movt to materialize the offset. 305 // FIXME: Use the scavenger to grab a scratch register. 306 if (DestReg != ARM::SP && DestReg != BaseReg && 307 NumBytes >= 4096 && 308 ARM_AM::getT2SOImmVal(NumBytes) == -1) { 309 bool Fits = false; 310 if (NumBytes < 65536) { 311 // Use a movw to materialize the 16-bit constant. 312 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg) 313 .addImm(NumBytes) 314 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 315 Fits = true; 316 } else if ((NumBytes & 0xffff) == 0) { 317 // Use a movt to materialize the 32-bit constant. 318 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg) 319 .addReg(DestReg) 320 .addImm(NumBytes >> 16) 321 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 322 Fits = true; 323 } 324 325 if (Fits) { 326 if (isSub) { 327 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg) 328 .addReg(BaseReg) 329 .addReg(DestReg, RegState::Kill) 330 .add(predOps(Pred, PredReg)) 331 .add(condCodeOp()) 332 .setMIFlags(MIFlags); 333 } else { 334 // Here we know that DestReg is not SP but we do not 335 // know anything about BaseReg. t2ADDrr is an invalid 336 // instruction is SP is used as the second argument, but 337 // is fine if SP is the first argument. To be sure we 338 // do not generate invalid encoding, put BaseReg first. 339 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg) 340 .addReg(BaseReg) 341 .addReg(DestReg, RegState::Kill) 342 .add(predOps(Pred, PredReg)) 343 .add(condCodeOp()) 344 .setMIFlags(MIFlags); 345 } 346 return; 347 } 348 } 349 350 while (NumBytes) { 351 unsigned ThisVal = NumBytes; 352 unsigned Opc = 0; 353 if (DestReg == ARM::SP && BaseReg != ARM::SP) { 354 // mov sp, rn. Note t2MOVr cannot be used. 355 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 356 .addReg(BaseReg) 357 .setMIFlags(MIFlags) 358 .add(predOps(ARMCC::AL)); 359 BaseReg = ARM::SP; 360 continue; 361 } 362 363 assert((DestReg != ARM::SP || BaseReg == ARM::SP) && 364 "Writing to SP, from other register."); 365 366 // Try to use T1, as it smaller 367 if ((DestReg == ARM::SP) && (ThisVal < ((1 << 7) - 1) * 4)) { 368 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?"); 369 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi; 370 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 371 .addReg(BaseReg) 372 .addImm(ThisVal / 4) 373 .setMIFlags(MIFlags) 374 .add(predOps(ARMCC::AL)); 375 break; 376 } 377 bool HasCCOut = true; 378 int ImmIsT2SO = ARM_AM::getT2SOImmVal(ThisVal); 379 bool ToSP = DestReg == ARM::SP; 380 unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri; 381 unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri; 382 unsigned t2SUBi12 = ToSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12; 383 unsigned t2ADDi12 = ToSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12; 384 Opc = isSub ? t2SUB : t2ADD; 385 // Prefer T2: sub rd, rn, so_imm | sub sp, sp, so_imm 386 if (ImmIsT2SO != -1) { 387 NumBytes = 0; 388 } else if (ThisVal < 4096) { 389 // Prefer T3 if can make it in a single go: subw rd, rn, imm12 | subw sp, 390 // sp, imm12 391 Opc = isSub ? t2SUBi12 : t2ADDi12; 392 HasCCOut = false; 393 NumBytes = 0; 394 } else { 395 // Use one T2 instruction to reduce NumBytes 396 // FIXME: Move this to ARMAddressingModes.h? 397 unsigned RotAmt = countLeadingZeros(ThisVal); 398 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 399 NumBytes &= ~ThisVal; 400 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 401 "Bit extraction didn't work?"); 402 } 403 404 // Build the new ADD / SUB. 405 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 406 .addReg(BaseReg, RegState::Kill) 407 .addImm(ThisVal) 408 .add(predOps(ARMCC::AL)) 409 .setMIFlags(MIFlags); 410 if (HasCCOut) 411 MIB.add(condCodeOp()); 412 413 BaseReg = DestReg; 414 } 415 } 416 417 static unsigned 418 negativeOffsetOpcode(unsigned opcode) 419 { 420 switch (opcode) { 421 case ARM::t2LDRi12: return ARM::t2LDRi8; 422 case ARM::t2LDRHi12: return ARM::t2LDRHi8; 423 case ARM::t2LDRBi12: return ARM::t2LDRBi8; 424 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8; 425 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8; 426 case ARM::t2STRi12: return ARM::t2STRi8; 427 case ARM::t2STRBi12: return ARM::t2STRBi8; 428 case ARM::t2STRHi12: return ARM::t2STRHi8; 429 case ARM::t2PLDi12: return ARM::t2PLDi8; 430 case ARM::t2PLDWi12: return ARM::t2PLDWi8; 431 case ARM::t2PLIi12: return ARM::t2PLIi8; 432 433 case ARM::t2LDRi8: 434 case ARM::t2LDRHi8: 435 case ARM::t2LDRBi8: 436 case ARM::t2LDRSHi8: 437 case ARM::t2LDRSBi8: 438 case ARM::t2STRi8: 439 case ARM::t2STRBi8: 440 case ARM::t2STRHi8: 441 case ARM::t2PLDi8: 442 case ARM::t2PLDWi8: 443 case ARM::t2PLIi8: 444 return opcode; 445 446 default: 447 llvm_unreachable("unknown thumb2 opcode."); 448 } 449 } 450 451 static unsigned 452 positiveOffsetOpcode(unsigned opcode) 453 { 454 switch (opcode) { 455 case ARM::t2LDRi8: return ARM::t2LDRi12; 456 case ARM::t2LDRHi8: return ARM::t2LDRHi12; 457 case ARM::t2LDRBi8: return ARM::t2LDRBi12; 458 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12; 459 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12; 460 case ARM::t2STRi8: return ARM::t2STRi12; 461 case ARM::t2STRBi8: return ARM::t2STRBi12; 462 case ARM::t2STRHi8: return ARM::t2STRHi12; 463 case ARM::t2PLDi8: return ARM::t2PLDi12; 464 case ARM::t2PLDWi8: return ARM::t2PLDWi12; 465 case ARM::t2PLIi8: return ARM::t2PLIi12; 466 467 case ARM::t2LDRi12: 468 case ARM::t2LDRHi12: 469 case ARM::t2LDRBi12: 470 case ARM::t2LDRSHi12: 471 case ARM::t2LDRSBi12: 472 case ARM::t2STRi12: 473 case ARM::t2STRBi12: 474 case ARM::t2STRHi12: 475 case ARM::t2PLDi12: 476 case ARM::t2PLDWi12: 477 case ARM::t2PLIi12: 478 return opcode; 479 480 default: 481 llvm_unreachable("unknown thumb2 opcode."); 482 } 483 } 484 485 static unsigned 486 immediateOffsetOpcode(unsigned opcode) 487 { 488 switch (opcode) { 489 case ARM::t2LDRs: return ARM::t2LDRi12; 490 case ARM::t2LDRHs: return ARM::t2LDRHi12; 491 case ARM::t2LDRBs: return ARM::t2LDRBi12; 492 case ARM::t2LDRSHs: return ARM::t2LDRSHi12; 493 case ARM::t2LDRSBs: return ARM::t2LDRSBi12; 494 case ARM::t2STRs: return ARM::t2STRi12; 495 case ARM::t2STRBs: return ARM::t2STRBi12; 496 case ARM::t2STRHs: return ARM::t2STRHi12; 497 case ARM::t2PLDs: return ARM::t2PLDi12; 498 case ARM::t2PLDWs: return ARM::t2PLDWi12; 499 case ARM::t2PLIs: return ARM::t2PLIi12; 500 501 case ARM::t2LDRi12: 502 case ARM::t2LDRHi12: 503 case ARM::t2LDRBi12: 504 case ARM::t2LDRSHi12: 505 case ARM::t2LDRSBi12: 506 case ARM::t2STRi12: 507 case ARM::t2STRBi12: 508 case ARM::t2STRHi12: 509 case ARM::t2PLDi12: 510 case ARM::t2PLDWi12: 511 case ARM::t2PLIi12: 512 case ARM::t2LDRi8: 513 case ARM::t2LDRHi8: 514 case ARM::t2LDRBi8: 515 case ARM::t2LDRSHi8: 516 case ARM::t2LDRSBi8: 517 case ARM::t2STRi8: 518 case ARM::t2STRBi8: 519 case ARM::t2STRHi8: 520 case ARM::t2PLDi8: 521 case ARM::t2PLDWi8: 522 case ARM::t2PLIi8: 523 return opcode; 524 525 default: 526 llvm_unreachable("unknown thumb2 opcode."); 527 } 528 } 529 530 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 531 Register FrameReg, int &Offset, 532 const ARMBaseInstrInfo &TII, 533 const TargetRegisterInfo *TRI) { 534 unsigned Opcode = MI.getOpcode(); 535 const MCInstrDesc &Desc = MI.getDesc(); 536 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 537 bool isSub = false; 538 539 MachineFunction &MF = *MI.getParent()->getParent(); 540 const TargetRegisterClass *RegClass = 541 TII.getRegClass(Desc, FrameRegIdx, TRI, MF); 542 543 // Memory operands in inline assembly always use AddrModeT2_i12. 544 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 545 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2? 546 547 const bool IsSP = Opcode == ARM::t2ADDspImm12 || Opcode == ARM::t2ADDspImm; 548 if (IsSP || Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) { 549 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 550 551 Register PredReg; 552 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL && 553 !MI.definesRegister(ARM::CPSR)) { 554 // Turn it into a move. 555 MI.setDesc(TII.get(ARM::tMOVr)); 556 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 557 // Remove offset and remaining explicit predicate operands. 558 do MI.RemoveOperand(FrameRegIdx+1); 559 while (MI.getNumOperands() > FrameRegIdx+1); 560 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI); 561 MIB.add(predOps(ARMCC::AL)); 562 return true; 563 } 564 565 bool HasCCOut = (Opcode != ARM::t2ADDspImm12 && Opcode != ARM::t2ADDri12); 566 567 if (Offset < 0) { 568 Offset = -Offset; 569 isSub = true; 570 MI.setDesc(IsSP ? TII.get(ARM::t2SUBspImm) : TII.get(ARM::t2SUBri)); 571 } else { 572 MI.setDesc(IsSP ? TII.get(ARM::t2ADDspImm) : TII.get(ARM::t2ADDri)); 573 } 574 575 // Common case: small offset, fits into instruction. 576 if (ARM_AM::getT2SOImmVal(Offset) != -1) { 577 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 578 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 579 // Add cc_out operand if the original instruction did not have one. 580 if (!HasCCOut) 581 MI.addOperand(MachineOperand::CreateReg(0, false)); 582 Offset = 0; 583 return true; 584 } 585 // Another common case: imm12. 586 if (Offset < 4096 && 587 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) { 588 unsigned NewOpc = isSub ? IsSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12 589 : IsSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12; 590 MI.setDesc(TII.get(NewOpc)); 591 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 592 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 593 // Remove the cc_out operand. 594 if (HasCCOut) 595 MI.RemoveOperand(MI.getNumOperands()-1); 596 Offset = 0; 597 return true; 598 } 599 600 // Otherwise, extract 8 adjacent bits from the immediate into this 601 // t2ADDri/t2SUBri. 602 unsigned RotAmt = countLeadingZeros<unsigned>(Offset); 603 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt); 604 605 // We will handle these bits from offset, clear them. 606 Offset &= ~ThisImmVal; 607 608 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 && 609 "Bit extraction didn't work?"); 610 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 611 // Add cc_out operand if the original instruction did not have one. 612 if (!HasCCOut) 613 MI.addOperand(MachineOperand::CreateReg(0, false)); 614 } else { 615 // AddrMode4 and AddrMode6 cannot handle any offset. 616 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 617 return false; 618 619 // AddrModeT2_so cannot handle any offset. If there is no offset 620 // register then we change to an immediate version. 621 unsigned NewOpc = Opcode; 622 if (AddrMode == ARMII::AddrModeT2_so) { 623 Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg(); 624 if (OffsetReg != 0) { 625 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 626 return Offset == 0; 627 } 628 629 MI.RemoveOperand(FrameRegIdx+1); 630 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0); 631 NewOpc = immediateOffsetOpcode(Opcode); 632 AddrMode = ARMII::AddrModeT2_i12; 633 } 634 635 unsigned NumBits = 0; 636 unsigned Scale = 1; 637 if (AddrMode == ARMII::AddrModeT2_i8neg || 638 AddrMode == ARMII::AddrModeT2_i12) { 639 // i8 supports only negative, and i12 supports only positive, so 640 // based on Offset sign convert Opcode to the appropriate 641 // instruction 642 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 643 if (Offset < 0) { 644 NewOpc = negativeOffsetOpcode(Opcode); 645 NumBits = 8; 646 isSub = true; 647 Offset = -Offset; 648 } else { 649 NewOpc = positiveOffsetOpcode(Opcode); 650 NumBits = 12; 651 } 652 } else if (AddrMode == ARMII::AddrMode5) { 653 // VFP address mode. 654 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 655 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 656 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 657 InstrOffs *= -1; 658 NumBits = 8; 659 Scale = 4; 660 Offset += InstrOffs * 4; 661 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 662 if (Offset < 0) { 663 Offset = -Offset; 664 isSub = true; 665 } 666 } else if (AddrMode == ARMII::AddrMode5FP16) { 667 // VFP address mode. 668 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 669 int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm()); 670 if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub) 671 InstrOffs *= -1; 672 NumBits = 8; 673 Scale = 2; 674 Offset += InstrOffs * 2; 675 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 676 if (Offset < 0) { 677 Offset = -Offset; 678 isSub = true; 679 } 680 } else if (AddrMode == ARMII::AddrModeT2_i7s4 || 681 AddrMode == ARMII::AddrModeT2_i7s2 || 682 AddrMode == ARMII::AddrModeT2_i7) { 683 Offset += MI.getOperand(FrameRegIdx + 1).getImm(); 684 unsigned OffsetMask; 685 switch (AddrMode) { 686 case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break; 687 case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break; 688 default: NumBits = 7; OffsetMask = 0x0; break; 689 } 690 // MCInst operand expects already scaled value. 691 Scale = 1; 692 assert((Offset & OffsetMask) == 0 && "Can't encode this offset!"); 693 (void)OffsetMask; // squash unused-variable warning at -NDEBUG 694 } else if (AddrMode == ARMII::AddrModeT2_i8s4) { 695 Offset += MI.getOperand(FrameRegIdx + 1).getImm(); 696 NumBits = 8 + 2; 697 // MCInst operand expects already scaled value. 698 Scale = 1; 699 assert((Offset & 3) == 0 && "Can't encode this offset!"); 700 } else if (AddrMode == ARMII::AddrModeT2_ldrex) { 701 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 702 NumBits = 8; // 8 bits scaled by 4 703 Scale = 4; 704 assert((Offset & 3) == 0 && "Can't encode this offset!"); 705 } else { 706 llvm_unreachable("Unsupported addressing mode!"); 707 } 708 709 if (NewOpc != Opcode) 710 MI.setDesc(TII.get(NewOpc)); 711 712 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1); 713 714 // Attempt to fold address computation 715 // Common case: small offset, fits into instruction. We need to make sure 716 // the register class is correct too, for instructions like the MVE 717 // VLDRH.32, which only accepts low tGPR registers. 718 int ImmedOffset = Offset / Scale; 719 unsigned Mask = (1 << NumBits) - 1; 720 if ((unsigned)Offset <= Mask * Scale && 721 (Register::isVirtualRegister(FrameReg) || 722 RegClass->contains(FrameReg))) { 723 if (Register::isVirtualRegister(FrameReg)) { 724 // Make sure the register class for the virtual register is correct 725 MachineRegisterInfo *MRI = &MF.getRegInfo(); 726 if (!MRI->constrainRegClass(FrameReg, RegClass)) 727 llvm_unreachable("Unable to constrain virtual register class."); 728 } 729 730 // Replace the FrameIndex with fp/sp 731 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 732 if (isSub) { 733 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 734 // FIXME: Not consistent. 735 ImmedOffset |= 1 << NumBits; 736 else 737 ImmedOffset = -ImmedOffset; 738 } 739 ImmOp.ChangeToImmediate(ImmedOffset); 740 Offset = 0; 741 return true; 742 } 743 744 // Otherwise, offset doesn't fit. Pull in what we can to simplify 745 ImmedOffset = ImmedOffset & Mask; 746 if (isSub) { 747 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 748 // FIXME: Not consistent. 749 ImmedOffset |= 1 << NumBits; 750 else { 751 ImmedOffset = -ImmedOffset; 752 if (ImmedOffset == 0) 753 // Change the opcode back if the encoded offset is zero. 754 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc))); 755 } 756 } 757 ImmOp.ChangeToImmediate(ImmedOffset); 758 Offset &= ~(Mask*Scale); 759 } 760 761 Offset = (isSub) ? -Offset : Offset; 762 return Offset == 0 && (Register::isVirtualRegister(FrameReg) || 763 RegClass->contains(FrameReg)); 764 } 765 766 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI, 767 Register &PredReg) { 768 unsigned Opc = MI.getOpcode(); 769 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc) 770 return ARMCC::AL; 771 return getInstrPredicate(MI, PredReg); 772 } 773 774 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) { 775 const MCInstrDesc &MCID = MI.getDesc(); 776 777 if (!MCID.OpInfo) 778 return -1; 779 780 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 781 if (ARM::isVpred(MCID.OpInfo[i].OperandType)) 782 return i; 783 784 return -1; 785 } 786 787 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI, 788 Register &PredReg) { 789 int PIdx = findFirstVPTPredOperandIdx(MI); 790 if (PIdx == -1) { 791 PredReg = 0; 792 return ARMVCC::None; 793 } 794 795 PredReg = MI.getOperand(PIdx+1).getReg(); 796 return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm(); 797 } 798 799 void llvm::recomputeVPTBlockMask(MachineInstr &Instr) { 800 assert(isVPTOpcode(Instr.getOpcode()) && "Not a VPST or VPT Instruction!"); 801 802 MachineOperand &MaskOp = Instr.getOperand(0); 803 assert(MaskOp.isImm() && "Operand 0 is not the block mask of the VPT/VPST?!"); 804 805 MachineBasicBlock::iterator Iter = ++Instr.getIterator(), 806 End = Instr.getParent()->end(); 807 808 while (Iter != End && Iter->isDebugInstr()) 809 ++Iter; 810 811 // Verify that the instruction after the VPT/VPST is predicated (it should 812 // be), and skip it. 813 assert(Iter != End && "Expected some instructions in any VPT block"); 814 assert( 815 getVPTInstrPredicate(*Iter) == ARMVCC::Then && 816 "VPT/VPST should be followed by an instruction with a 'then' predicate!"); 817 ++Iter; 818 819 // Iterate over the predicated instructions, updating the BlockMask as we go. 820 ARM::PredBlockMask BlockMask = ARM::PredBlockMask::T; 821 while (Iter != End) { 822 if (Iter->isDebugInstr()) { 823 ++Iter; 824 continue; 825 } 826 ARMVCC::VPTCodes Pred = getVPTInstrPredicate(*Iter); 827 if (Pred == ARMVCC::None) 828 break; 829 BlockMask = expandPredBlockMask(BlockMask, Pred); 830 ++Iter; 831 } 832 833 // Rewrite the BlockMask. 834 MaskOp.setImm((int64_t)(BlockMask)); 835 } 836