1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Thumb2InstrInfo.h" 14 #include "ARMMachineFunctionInfo.h" 15 #include "MCTargetDesc/ARMAddressingModes.h" 16 #include "llvm/CodeGen/MachineBasicBlock.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstr.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineMemOperand.h" 22 #include "llvm/CodeGen/MachineOperand.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetRegisterInfo.h" 25 #include "llvm/IR/DebugLoc.h" 26 #include "llvm/MC/MCInst.h" 27 #include "llvm/MC/MCInstrDesc.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/MathExtras.h" 31 #include "llvm/Target/TargetMachine.h" 32 #include <cassert> 33 34 using namespace llvm; 35 36 static cl::opt<bool> 37 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden, 38 cl::desc("Use old-style Thumb2 if-conversion heuristics"), 39 cl::init(false)); 40 41 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI) 42 : ARMBaseInstrInfo(STI) {} 43 44 /// Return the noop instruction to use for a noop. 45 void Thumb2InstrInfo::getNoop(MCInst &NopInst) const { 46 NopInst.setOpcode(ARM::tHINT); 47 NopInst.addOperand(MCOperand::createImm(0)); 48 NopInst.addOperand(MCOperand::createImm(ARMCC::AL)); 49 NopInst.addOperand(MCOperand::createReg(0)); 50 } 51 52 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const { 53 // FIXME 54 return 0; 55 } 56 57 void 58 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 59 MachineBasicBlock *NewDest) const { 60 MachineBasicBlock *MBB = Tail->getParent(); 61 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 62 if (!AFI->hasITBlocks() || Tail->isBranch()) { 63 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 64 return; 65 } 66 67 // If the first instruction of Tail is predicated, we may have to update 68 // the IT instruction. 69 unsigned PredReg = 0; 70 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg); 71 MachineBasicBlock::iterator MBBI = Tail; 72 if (CC != ARMCC::AL) 73 // Expecting at least the t2IT instruction before it. 74 --MBBI; 75 76 // Actually replace the tail. 77 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 78 79 // Fix up IT. 80 if (CC != ARMCC::AL) { 81 MachineBasicBlock::iterator E = MBB->begin(); 82 unsigned Count = 4; // At most 4 instructions in an IT block. 83 while (Count && MBBI != E) { 84 if (MBBI->isDebugInstr()) { 85 --MBBI; 86 continue; 87 } 88 if (MBBI->getOpcode() == ARM::t2IT) { 89 unsigned Mask = MBBI->getOperand(1).getImm(); 90 if (Count == 4) 91 MBBI->eraseFromParent(); 92 else { 93 unsigned MaskOn = 1 << Count; 94 unsigned MaskOff = ~(MaskOn - 1); 95 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn); 96 } 97 return; 98 } 99 --MBBI; 100 --Count; 101 } 102 103 // Ctrl flow can reach here if branch folding is run before IT block 104 // formation pass. 105 } 106 } 107 108 bool 109 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 110 MachineBasicBlock::iterator MBBI) const { 111 while (MBBI->isDebugInstr()) { 112 ++MBBI; 113 if (MBBI == MBB.end()) 114 return false; 115 } 116 117 unsigned PredReg = 0; 118 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL; 119 } 120 121 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 122 MachineBasicBlock::iterator I, 123 const DebugLoc &DL, unsigned DestReg, 124 unsigned SrcReg, bool KillSrc) const { 125 // Handle SPR, DPR, and QPR copies. 126 if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) 127 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); 128 129 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) 130 .addReg(SrcReg, getKillRegState(KillSrc)) 131 .add(predOps(ARMCC::AL)); 132 } 133 134 void Thumb2InstrInfo:: 135 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 136 unsigned SrcReg, bool isKill, int FI, 137 const TargetRegisterClass *RC, 138 const TargetRegisterInfo *TRI) const { 139 DebugLoc DL; 140 if (I != MBB.end()) DL = I->getDebugLoc(); 141 142 MachineFunction &MF = *MBB.getParent(); 143 MachineFrameInfo &MFI = MF.getFrameInfo(); 144 MachineMemOperand *MMO = MF.getMachineMemOperand( 145 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 146 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 147 148 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 149 BuildMI(MBB, I, DL, get(ARM::t2STRi12)) 150 .addReg(SrcReg, getKillRegState(isKill)) 151 .addFrameIndex(FI) 152 .addImm(0) 153 .addMemOperand(MMO) 154 .add(predOps(ARMCC::AL)); 155 return; 156 } 157 158 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 159 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for 160 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 161 // otherwise). 162 if (TargetRegisterInfo::isVirtualRegister(SrcReg)) { 163 MachineRegisterInfo *MRI = &MF.getRegInfo(); 164 MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_GPRwithAPSRnospRegClass); 165 } 166 167 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); 168 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 169 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 170 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 171 return; 172 } 173 174 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); 175 } 176 177 void Thumb2InstrInfo:: 178 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 179 unsigned DestReg, int FI, 180 const TargetRegisterClass *RC, 181 const TargetRegisterInfo *TRI) const { 182 MachineFunction &MF = *MBB.getParent(); 183 MachineFrameInfo &MFI = MF.getFrameInfo(); 184 MachineMemOperand *MMO = MF.getMachineMemOperand( 185 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 186 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 187 DebugLoc DL; 188 if (I != MBB.end()) DL = I->getDebugLoc(); 189 190 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 191 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) 192 .addFrameIndex(FI) 193 .addImm(0) 194 .addMemOperand(MMO) 195 .add(predOps(ARMCC::AL)); 196 return; 197 } 198 199 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 200 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for 201 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 202 // otherwise). 203 if (TargetRegisterInfo::isVirtualRegister(DestReg)) { 204 MachineRegisterInfo *MRI = &MF.getRegInfo(); 205 MRI->constrainRegClass(DestReg, 206 &ARM::GPRPair_with_gsub_1_in_GPRwithAPSRnospRegClass); 207 } 208 209 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); 210 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 211 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 212 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 213 214 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 215 MIB.addReg(DestReg, RegState::ImplicitDefine); 216 return; 217 } 218 219 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); 220 } 221 222 void Thumb2InstrInfo::expandLoadStackGuard( 223 MachineBasicBlock::iterator MI) const { 224 MachineFunction &MF = *MI->getParent()->getParent(); 225 if (MF.getTarget().isPositionIndependent()) 226 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12); 227 else 228 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12); 229 } 230 231 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, 232 MachineBasicBlock::iterator &MBBI, 233 const DebugLoc &dl, unsigned DestReg, 234 unsigned BaseReg, int NumBytes, 235 ARMCC::CondCodes Pred, unsigned PredReg, 236 const ARMBaseInstrInfo &TII, 237 unsigned MIFlags) { 238 if (NumBytes == 0 && DestReg != BaseReg) { 239 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 240 .addReg(BaseReg, RegState::Kill) 241 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 242 return; 243 } 244 245 bool isSub = NumBytes < 0; 246 if (isSub) NumBytes = -NumBytes; 247 248 // If profitable, use a movw or movt to materialize the offset. 249 // FIXME: Use the scavenger to grab a scratch register. 250 if (DestReg != ARM::SP && DestReg != BaseReg && 251 NumBytes >= 4096 && 252 ARM_AM::getT2SOImmVal(NumBytes) == -1) { 253 bool Fits = false; 254 if (NumBytes < 65536) { 255 // Use a movw to materialize the 16-bit constant. 256 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg) 257 .addImm(NumBytes) 258 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 259 Fits = true; 260 } else if ((NumBytes & 0xffff) == 0) { 261 // Use a movt to materialize the 32-bit constant. 262 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg) 263 .addReg(DestReg) 264 .addImm(NumBytes >> 16) 265 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 266 Fits = true; 267 } 268 269 if (Fits) { 270 if (isSub) { 271 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg) 272 .addReg(BaseReg) 273 .addReg(DestReg, RegState::Kill) 274 .add(predOps(Pred, PredReg)) 275 .add(condCodeOp()) 276 .setMIFlags(MIFlags); 277 } else { 278 // Here we know that DestReg is not SP but we do not 279 // know anything about BaseReg. t2ADDrr is an invalid 280 // instruction is SP is used as the second argument, but 281 // is fine if SP is the first argument. To be sure we 282 // do not generate invalid encoding, put BaseReg first. 283 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg) 284 .addReg(BaseReg) 285 .addReg(DestReg, RegState::Kill) 286 .add(predOps(Pred, PredReg)) 287 .add(condCodeOp()) 288 .setMIFlags(MIFlags); 289 } 290 return; 291 } 292 } 293 294 while (NumBytes) { 295 unsigned ThisVal = NumBytes; 296 unsigned Opc = 0; 297 if (DestReg == ARM::SP && BaseReg != ARM::SP) { 298 // mov sp, rn. Note t2MOVr cannot be used. 299 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 300 .addReg(BaseReg) 301 .setMIFlags(MIFlags) 302 .add(predOps(ARMCC::AL)); 303 BaseReg = ARM::SP; 304 continue; 305 } 306 307 bool HasCCOut = true; 308 if (BaseReg == ARM::SP) { 309 // sub sp, sp, #imm7 310 if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) { 311 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?"); 312 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi; 313 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 314 .addReg(BaseReg) 315 .addImm(ThisVal / 4) 316 .setMIFlags(MIFlags) 317 .add(predOps(ARMCC::AL)); 318 NumBytes = 0; 319 continue; 320 } 321 322 // sub rd, sp, so_imm 323 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 324 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 325 NumBytes = 0; 326 } else { 327 // FIXME: Move this to ARMAddressingModes.h? 328 unsigned RotAmt = countLeadingZeros(ThisVal); 329 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 330 NumBytes &= ~ThisVal; 331 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 332 "Bit extraction didn't work?"); 333 } 334 } else { 335 assert(DestReg != ARM::SP && BaseReg != ARM::SP); 336 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 337 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 338 NumBytes = 0; 339 } else if (ThisVal < 4096) { 340 Opc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 341 HasCCOut = false; 342 NumBytes = 0; 343 } else { 344 // FIXME: Move this to ARMAddressingModes.h? 345 unsigned RotAmt = countLeadingZeros(ThisVal); 346 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 347 NumBytes &= ~ThisVal; 348 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 349 "Bit extraction didn't work?"); 350 } 351 } 352 353 // Build the new ADD / SUB. 354 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 355 .addReg(BaseReg, RegState::Kill) 356 .addImm(ThisVal) 357 .add(predOps(ARMCC::AL)) 358 .setMIFlags(MIFlags); 359 if (HasCCOut) 360 MIB.add(condCodeOp()); 361 362 BaseReg = DestReg; 363 } 364 } 365 366 static unsigned 367 negativeOffsetOpcode(unsigned opcode) 368 { 369 switch (opcode) { 370 case ARM::t2LDRi12: return ARM::t2LDRi8; 371 case ARM::t2LDRHi12: return ARM::t2LDRHi8; 372 case ARM::t2LDRBi12: return ARM::t2LDRBi8; 373 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8; 374 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8; 375 case ARM::t2STRi12: return ARM::t2STRi8; 376 case ARM::t2STRBi12: return ARM::t2STRBi8; 377 case ARM::t2STRHi12: return ARM::t2STRHi8; 378 case ARM::t2PLDi12: return ARM::t2PLDi8; 379 380 case ARM::t2LDRi8: 381 case ARM::t2LDRHi8: 382 case ARM::t2LDRBi8: 383 case ARM::t2LDRSHi8: 384 case ARM::t2LDRSBi8: 385 case ARM::t2STRi8: 386 case ARM::t2STRBi8: 387 case ARM::t2STRHi8: 388 case ARM::t2PLDi8: 389 return opcode; 390 391 default: 392 break; 393 } 394 395 return 0; 396 } 397 398 static unsigned 399 positiveOffsetOpcode(unsigned opcode) 400 { 401 switch (opcode) { 402 case ARM::t2LDRi8: return ARM::t2LDRi12; 403 case ARM::t2LDRHi8: return ARM::t2LDRHi12; 404 case ARM::t2LDRBi8: return ARM::t2LDRBi12; 405 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12; 406 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12; 407 case ARM::t2STRi8: return ARM::t2STRi12; 408 case ARM::t2STRBi8: return ARM::t2STRBi12; 409 case ARM::t2STRHi8: return ARM::t2STRHi12; 410 case ARM::t2PLDi8: return ARM::t2PLDi12; 411 412 case ARM::t2LDRi12: 413 case ARM::t2LDRHi12: 414 case ARM::t2LDRBi12: 415 case ARM::t2LDRSHi12: 416 case ARM::t2LDRSBi12: 417 case ARM::t2STRi12: 418 case ARM::t2STRBi12: 419 case ARM::t2STRHi12: 420 case ARM::t2PLDi12: 421 return opcode; 422 423 default: 424 break; 425 } 426 427 return 0; 428 } 429 430 static unsigned 431 immediateOffsetOpcode(unsigned opcode) 432 { 433 switch (opcode) { 434 case ARM::t2LDRs: return ARM::t2LDRi12; 435 case ARM::t2LDRHs: return ARM::t2LDRHi12; 436 case ARM::t2LDRBs: return ARM::t2LDRBi12; 437 case ARM::t2LDRSHs: return ARM::t2LDRSHi12; 438 case ARM::t2LDRSBs: return ARM::t2LDRSBi12; 439 case ARM::t2STRs: return ARM::t2STRi12; 440 case ARM::t2STRBs: return ARM::t2STRBi12; 441 case ARM::t2STRHs: return ARM::t2STRHi12; 442 case ARM::t2PLDs: return ARM::t2PLDi12; 443 444 case ARM::t2LDRi12: 445 case ARM::t2LDRHi12: 446 case ARM::t2LDRBi12: 447 case ARM::t2LDRSHi12: 448 case ARM::t2LDRSBi12: 449 case ARM::t2STRi12: 450 case ARM::t2STRBi12: 451 case ARM::t2STRHi12: 452 case ARM::t2PLDi12: 453 case ARM::t2LDRi8: 454 case ARM::t2LDRHi8: 455 case ARM::t2LDRBi8: 456 case ARM::t2LDRSHi8: 457 case ARM::t2LDRSBi8: 458 case ARM::t2STRi8: 459 case ARM::t2STRBi8: 460 case ARM::t2STRHi8: 461 case ARM::t2PLDi8: 462 return opcode; 463 464 default: 465 break; 466 } 467 468 return 0; 469 } 470 471 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 472 unsigned FrameReg, int &Offset, 473 const ARMBaseInstrInfo &TII) { 474 unsigned Opcode = MI.getOpcode(); 475 const MCInstrDesc &Desc = MI.getDesc(); 476 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 477 bool isSub = false; 478 479 // Memory operands in inline assembly always use AddrModeT2_i12. 480 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 481 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2? 482 483 if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) { 484 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 485 486 unsigned PredReg; 487 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL && 488 !MI.definesRegister(ARM::CPSR)) { 489 // Turn it into a move. 490 MI.setDesc(TII.get(ARM::tMOVr)); 491 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 492 // Remove offset and remaining explicit predicate operands. 493 do MI.RemoveOperand(FrameRegIdx+1); 494 while (MI.getNumOperands() > FrameRegIdx+1); 495 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI); 496 MIB.add(predOps(ARMCC::AL)); 497 return true; 498 } 499 500 bool HasCCOut = Opcode != ARM::t2ADDri12; 501 502 if (Offset < 0) { 503 Offset = -Offset; 504 isSub = true; 505 MI.setDesc(TII.get(ARM::t2SUBri)); 506 } else { 507 MI.setDesc(TII.get(ARM::t2ADDri)); 508 } 509 510 // Common case: small offset, fits into instruction. 511 if (ARM_AM::getT2SOImmVal(Offset) != -1) { 512 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 513 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 514 // Add cc_out operand if the original instruction did not have one. 515 if (!HasCCOut) 516 MI.addOperand(MachineOperand::CreateReg(0, false)); 517 Offset = 0; 518 return true; 519 } 520 // Another common case: imm12. 521 if (Offset < 4096 && 522 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) { 523 unsigned NewOpc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 524 MI.setDesc(TII.get(NewOpc)); 525 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 526 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 527 // Remove the cc_out operand. 528 if (HasCCOut) 529 MI.RemoveOperand(MI.getNumOperands()-1); 530 Offset = 0; 531 return true; 532 } 533 534 // Otherwise, extract 8 adjacent bits from the immediate into this 535 // t2ADDri/t2SUBri. 536 unsigned RotAmt = countLeadingZeros<unsigned>(Offset); 537 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt); 538 539 // We will handle these bits from offset, clear them. 540 Offset &= ~ThisImmVal; 541 542 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 && 543 "Bit extraction didn't work?"); 544 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 545 // Add cc_out operand if the original instruction did not have one. 546 if (!HasCCOut) 547 MI.addOperand(MachineOperand::CreateReg(0, false)); 548 } else { 549 // AddrMode4 and AddrMode6 cannot handle any offset. 550 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 551 return false; 552 553 // AddrModeT2_so cannot handle any offset. If there is no offset 554 // register then we change to an immediate version. 555 unsigned NewOpc = Opcode; 556 if (AddrMode == ARMII::AddrModeT2_so) { 557 unsigned OffsetReg = MI.getOperand(FrameRegIdx+1).getReg(); 558 if (OffsetReg != 0) { 559 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 560 return Offset == 0; 561 } 562 563 MI.RemoveOperand(FrameRegIdx+1); 564 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0); 565 NewOpc = immediateOffsetOpcode(Opcode); 566 AddrMode = ARMII::AddrModeT2_i12; 567 } 568 569 unsigned NumBits = 0; 570 unsigned Scale = 1; 571 if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) { 572 // i8 supports only negative, and i12 supports only positive, so 573 // based on Offset sign convert Opcode to the appropriate 574 // instruction 575 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 576 if (Offset < 0) { 577 NewOpc = negativeOffsetOpcode(Opcode); 578 NumBits = 8; 579 isSub = true; 580 Offset = -Offset; 581 } else { 582 NewOpc = positiveOffsetOpcode(Opcode); 583 NumBits = 12; 584 } 585 } else if (AddrMode == ARMII::AddrMode5) { 586 // VFP address mode. 587 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 588 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 589 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 590 InstrOffs *= -1; 591 NumBits = 8; 592 Scale = 4; 593 Offset += InstrOffs * 4; 594 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 595 if (Offset < 0) { 596 Offset = -Offset; 597 isSub = true; 598 } 599 } else if (AddrMode == ARMII::AddrMode5FP16) { 600 // VFP address mode. 601 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 602 int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm()); 603 if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub) 604 InstrOffs *= -1; 605 NumBits = 8; 606 Scale = 2; 607 Offset += InstrOffs * 2; 608 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 609 if (Offset < 0) { 610 Offset = -Offset; 611 isSub = true; 612 } 613 } else if (AddrMode == ARMII::AddrModeT2_i7s4 || 614 AddrMode == ARMII::AddrModeT2_i7s2 || 615 AddrMode == ARMII::AddrModeT2_i7) { 616 Offset += MI.getOperand(FrameRegIdx + 1).getImm(); 617 unsigned OffsetMask; 618 switch (AddrMode) { 619 case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break; 620 case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break; 621 default: NumBits = 7; OffsetMask = 0x0; break; 622 } 623 // MCInst operand expects already scaled value. 624 Scale = 1; 625 assert((Offset & OffsetMask) == 0 && "Can't encode this offset!"); 626 (void)OffsetMask; // squash unused-variable warning at -NDEBUG 627 } else if (AddrMode == ARMII::AddrModeT2_i8s4) { 628 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 629 NumBits = 8 + 2; 630 // MCInst operand expects already scaled value. 631 Scale = 1; 632 assert((Offset & 3) == 0 && "Can't encode this offset!"); 633 } else if (AddrMode == ARMII::AddrModeT2_ldrex) { 634 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 635 NumBits = 8; // 8 bits scaled by 4 636 Scale = 4; 637 assert((Offset & 3) == 0 && "Can't encode this offset!"); 638 } else { 639 llvm_unreachable("Unsupported addressing mode!"); 640 } 641 642 if (NewOpc != Opcode) 643 MI.setDesc(TII.get(NewOpc)); 644 645 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1); 646 647 // Attempt to fold address computation 648 // Common case: small offset, fits into instruction. 649 int ImmedOffset = Offset / Scale; 650 unsigned Mask = (1 << NumBits) - 1; 651 if ((unsigned)Offset <= Mask * Scale) { 652 // Replace the FrameIndex with fp/sp 653 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 654 if (isSub) { 655 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 656 // FIXME: Not consistent. 657 ImmedOffset |= 1 << NumBits; 658 else 659 ImmedOffset = -ImmedOffset; 660 } 661 ImmOp.ChangeToImmediate(ImmedOffset); 662 Offset = 0; 663 return true; 664 } 665 666 // Otherwise, offset doesn't fit. Pull in what we can to simplify 667 ImmedOffset = ImmedOffset & Mask; 668 if (isSub) { 669 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 670 // FIXME: Not consistent. 671 ImmedOffset |= 1 << NumBits; 672 else { 673 ImmedOffset = -ImmedOffset; 674 if (ImmedOffset == 0) 675 // Change the opcode back if the encoded offset is zero. 676 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc))); 677 } 678 } 679 ImmOp.ChangeToImmediate(ImmedOffset); 680 Offset &= ~(Mask*Scale); 681 } 682 683 Offset = (isSub) ? -Offset : Offset; 684 return Offset == 0; 685 } 686 687 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI, 688 unsigned &PredReg) { 689 unsigned Opc = MI.getOpcode(); 690 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc) 691 return ARMCC::AL; 692 return getInstrPredicate(MI, PredReg); 693 } 694 695 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) { 696 const MCInstrDesc &MCID = MI.getDesc(); 697 698 if (!MCID.OpInfo) 699 return -1; 700 701 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 702 if (ARM::isVpred(MCID.OpInfo[i].OperandType)) 703 return i; 704 705 return -1; 706 } 707 708 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI, 709 unsigned &PredReg) { 710 int PIdx = findFirstVPTPredOperandIdx(MI); 711 if (PIdx == -1) { 712 PredReg = 0; 713 return ARMVCC::None; 714 } 715 716 PredReg = MI.getOperand(PIdx+1).getReg(); 717 return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm(); 718 } 719