1 //===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the VE implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "VEInstrInfo.h" 14 #include "VE.h" 15 #include "VEMachineFunctionInfo.h" 16 #include "VESubtarget.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineMemOperand.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/MC/TargetRegistry.h" 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/ErrorHandling.h" 27 28 #define DEBUG_TYPE "ve-instr-info" 29 30 using namespace llvm; 31 32 #define GET_INSTRINFO_CTOR_DTOR 33 #include "VEGenInstrInfo.inc" 34 35 // Pin the vtable to this file. 36 void VEInstrInfo::anchor() {} 37 38 VEInstrInfo::VEInstrInfo(VESubtarget &ST) 39 : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {} 40 41 static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); } 42 43 static VECC::CondCode GetOppositeBranchCondition(VECC::CondCode CC) { 44 switch (CC) { 45 case VECC::CC_IG: 46 return VECC::CC_ILE; 47 case VECC::CC_IL: 48 return VECC::CC_IGE; 49 case VECC::CC_INE: 50 return VECC::CC_IEQ; 51 case VECC::CC_IEQ: 52 return VECC::CC_INE; 53 case VECC::CC_IGE: 54 return VECC::CC_IL; 55 case VECC::CC_ILE: 56 return VECC::CC_IG; 57 case VECC::CC_AF: 58 return VECC::CC_AT; 59 case VECC::CC_G: 60 return VECC::CC_LENAN; 61 case VECC::CC_L: 62 return VECC::CC_GENAN; 63 case VECC::CC_NE: 64 return VECC::CC_EQNAN; 65 case VECC::CC_EQ: 66 return VECC::CC_NENAN; 67 case VECC::CC_GE: 68 return VECC::CC_LNAN; 69 case VECC::CC_LE: 70 return VECC::CC_GNAN; 71 case VECC::CC_NUM: 72 return VECC::CC_NAN; 73 case VECC::CC_NAN: 74 return VECC::CC_NUM; 75 case VECC::CC_GNAN: 76 return VECC::CC_LE; 77 case VECC::CC_LNAN: 78 return VECC::CC_GE; 79 case VECC::CC_NENAN: 80 return VECC::CC_EQ; 81 case VECC::CC_EQNAN: 82 return VECC::CC_NE; 83 case VECC::CC_GENAN: 84 return VECC::CC_L; 85 case VECC::CC_LENAN: 86 return VECC::CC_G; 87 case VECC::CC_AT: 88 return VECC::CC_AF; 89 case VECC::UNKNOWN: 90 return VECC::UNKNOWN; 91 } 92 llvm_unreachable("Invalid cond code"); 93 } 94 95 // Treat a branch relative long always instruction as unconditional branch. 96 // For example, br.l.t and br.l. 97 static bool isUncondBranchOpcode(int Opc) { 98 using namespace llvm::VE; 99 100 #define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t) 101 // VE has other branch relative always instructions for word/double/float, 102 // but we use only long branches in our lower. So, check it here. 103 assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) && 104 "Branch relative word/double/float always instructions should not be " 105 "used!"); 106 return BRKIND(BRCFL); 107 #undef BRKIND 108 } 109 110 // Treat branch relative conditional as conditional branch instructions. 111 // For example, brgt.l.t and brle.s.nt. 112 static bool isCondBranchOpcode(int Opc) { 113 using namespace llvm::VE; 114 115 #define BRKIND(NAME) \ 116 (Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \ 117 Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t) 118 return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS); 119 #undef BRKIND 120 } 121 122 // Treat branch long always instructions as indirect branch. 123 // For example, b.l.t and b.l. 124 static bool isIndirectBranchOpcode(int Opc) { 125 using namespace llvm::VE; 126 127 #define BRKIND(NAME) \ 128 (Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t) 129 // VE has other branch always instructions for word/double/float, but 130 // we use only long branches in our lower. So, check it here. 131 assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) && 132 "Branch word/double/float always instructions should not be used!"); 133 return BRKIND(BCFL); 134 #undef BRKIND 135 } 136 137 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, 138 SmallVectorImpl<MachineOperand> &Cond) { 139 Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm())); 140 Cond.push_back(LastInst->getOperand(1)); 141 Cond.push_back(LastInst->getOperand(2)); 142 Target = LastInst->getOperand(3).getMBB(); 143 } 144 145 bool VEInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 146 MachineBasicBlock *&FBB, 147 SmallVectorImpl<MachineOperand> &Cond, 148 bool AllowModify) const { 149 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 150 if (I == MBB.end()) 151 return false; 152 153 if (!isUnpredicatedTerminator(*I)) 154 return false; 155 156 // Get the last instruction in the block. 157 MachineInstr *LastInst = &*I; 158 unsigned LastOpc = LastInst->getOpcode(); 159 160 // If there is only one terminator instruction, process it. 161 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { 162 if (isUncondBranchOpcode(LastOpc)) { 163 TBB = LastInst->getOperand(0).getMBB(); 164 return false; 165 } 166 if (isCondBranchOpcode(LastOpc)) { 167 // Block ends with fall-through condbranch. 168 parseCondBranch(LastInst, TBB, Cond); 169 return false; 170 } 171 return true; // Can't handle indirect branch. 172 } 173 174 // Get the instruction before it if it is a terminator. 175 MachineInstr *SecondLastInst = &*I; 176 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 177 178 // If AllowModify is true and the block ends with two or more unconditional 179 // branches, delete all but the first unconditional branch. 180 if (AllowModify && isUncondBranchOpcode(LastOpc)) { 181 while (isUncondBranchOpcode(SecondLastOpc)) { 182 LastInst->eraseFromParent(); 183 LastInst = SecondLastInst; 184 LastOpc = LastInst->getOpcode(); 185 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { 186 // Return now the only terminator is an unconditional branch. 187 TBB = LastInst->getOperand(0).getMBB(); 188 return false; 189 } 190 SecondLastInst = &*I; 191 SecondLastOpc = SecondLastInst->getOpcode(); 192 } 193 } 194 195 // If there are three terminators, we don't know what sort of block this is. 196 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I)) 197 return true; 198 199 // If the block ends with a B and a Bcc, handle it. 200 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 201 parseCondBranch(SecondLastInst, TBB, Cond); 202 FBB = LastInst->getOperand(0).getMBB(); 203 return false; 204 } 205 206 // If the block ends with two unconditional branches, handle it. The second 207 // one is not executed. 208 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 209 TBB = SecondLastInst->getOperand(0).getMBB(); 210 return false; 211 } 212 213 // ...likewise if it ends with an indirect branch followed by an unconditional 214 // branch. 215 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 216 I = LastInst; 217 if (AllowModify) 218 I->eraseFromParent(); 219 return true; 220 } 221 222 // Otherwise, can't handle this. 223 return true; 224 } 225 226 unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB, 227 MachineBasicBlock *TBB, 228 MachineBasicBlock *FBB, 229 ArrayRef<MachineOperand> Cond, 230 const DebugLoc &DL, int *BytesAdded) const { 231 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 232 assert((Cond.size() == 3 || Cond.size() == 0) && 233 "VE branch conditions should have three component!"); 234 assert(!BytesAdded && "code size not handled"); 235 if (Cond.empty()) { 236 // Uncondition branch 237 assert(!FBB && "Unconditional branch with multiple successors!"); 238 BuildMI(&MBB, DL, get(VE::BRCFLa_t)) 239 .addMBB(TBB); 240 return 1; 241 } 242 243 // Conditional branch 244 // (BRCFir CC sy sz addr) 245 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented"); 246 247 unsigned opc[2]; 248 const TargetRegisterInfo *TRI = &getRegisterInfo(); 249 MachineFunction *MF = MBB.getParent(); 250 const MachineRegisterInfo &MRI = MF->getRegInfo(); 251 Register Reg = Cond[2].getReg(); 252 if (IsIntegerCC(Cond[0].getImm())) { 253 if (TRI->getRegSizeInBits(Reg, MRI) == 32) { 254 opc[0] = VE::BRCFWir; 255 opc[1] = VE::BRCFWrr; 256 } else { 257 opc[0] = VE::BRCFLir; 258 opc[1] = VE::BRCFLrr; 259 } 260 } else { 261 if (TRI->getRegSizeInBits(Reg, MRI) == 32) { 262 opc[0] = VE::BRCFSir; 263 opc[1] = VE::BRCFSrr; 264 } else { 265 opc[0] = VE::BRCFDir; 266 opc[1] = VE::BRCFDrr; 267 } 268 } 269 if (Cond[1].isImm()) { 270 BuildMI(&MBB, DL, get(opc[0])) 271 .add(Cond[0]) // condition code 272 .add(Cond[1]) // lhs 273 .add(Cond[2]) // rhs 274 .addMBB(TBB); 275 } else { 276 BuildMI(&MBB, DL, get(opc[1])) 277 .add(Cond[0]) 278 .add(Cond[1]) 279 .add(Cond[2]) 280 .addMBB(TBB); 281 } 282 283 if (!FBB) 284 return 1; 285 286 BuildMI(&MBB, DL, get(VE::BRCFLa_t)) 287 .addMBB(FBB); 288 return 2; 289 } 290 291 unsigned VEInstrInfo::removeBranch(MachineBasicBlock &MBB, 292 int *BytesRemoved) const { 293 assert(!BytesRemoved && "code size not handled"); 294 295 MachineBasicBlock::iterator I = MBB.end(); 296 unsigned Count = 0; 297 while (I != MBB.begin()) { 298 --I; 299 300 if (I->isDebugValue()) 301 continue; 302 303 if (!isUncondBranchOpcode(I->getOpcode()) && 304 !isCondBranchOpcode(I->getOpcode())) 305 break; // Not a branch 306 307 I->eraseFromParent(); 308 I = MBB.end(); 309 ++Count; 310 } 311 return Count; 312 } 313 314 bool VEInstrInfo::reverseBranchCondition( 315 SmallVectorImpl<MachineOperand> &Cond) const { 316 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm()); 317 Cond[0].setImm(GetOppositeBranchCondition(CC)); 318 return false; 319 } 320 321 static bool IsAliasOfSX(Register Reg) { 322 return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) || 323 VE::F32RegClass.contains(Reg); 324 } 325 326 static void copyPhysSubRegs(MachineBasicBlock &MBB, 327 MachineBasicBlock::iterator I, const DebugLoc &DL, 328 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 329 const MCInstrDesc &MCID, unsigned int NumSubRegs, 330 const unsigned *SubRegIdx, 331 const TargetRegisterInfo *TRI) { 332 MachineInstr *MovMI = nullptr; 333 334 for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) { 335 Register SubDest = TRI->getSubReg(DestReg, SubRegIdx[Idx]); 336 Register SubSrc = TRI->getSubReg(SrcReg, SubRegIdx[Idx]); 337 assert(SubDest && SubSrc && "Bad sub-register"); 338 339 if (MCID.getOpcode() == VE::ORri) { 340 // generate "ORri, dest, src, 0" instruction. 341 MachineInstrBuilder MIB = 342 BuildMI(MBB, I, DL, MCID, SubDest).addReg(SubSrc).addImm(0); 343 MovMI = MIB.getInstr(); 344 } else if (MCID.getOpcode() == VE::ANDMmm) { 345 // generate "ANDM, dest, vm0, src" instruction. 346 MachineInstrBuilder MIB = 347 BuildMI(MBB, I, DL, MCID, SubDest).addReg(VE::VM0).addReg(SubSrc); 348 MovMI = MIB.getInstr(); 349 } else { 350 llvm_unreachable("Unexpected reg-to-reg copy instruction"); 351 } 352 } 353 // Add implicit super-register defs and kills to the last MovMI. 354 MovMI->addRegisterDefined(DestReg, TRI); 355 if (KillSrc) 356 MovMI->addRegisterKilled(SrcReg, TRI, true); 357 } 358 359 void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 360 MachineBasicBlock::iterator I, const DebugLoc &DL, 361 MCRegister DestReg, MCRegister SrcReg, 362 bool KillSrc) const { 363 364 if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) { 365 BuildMI(MBB, I, DL, get(VE::ORri), DestReg) 366 .addReg(SrcReg, getKillRegState(KillSrc)) 367 .addImm(0); 368 } else if (VE::V64RegClass.contains(DestReg, SrcReg)) { 369 // Generate following instructions 370 // %sw16 = LEA32zii 256 371 // VORmvl %dest, (0)1, %src, %sw16 372 // TODO: reuse a register if vl is already assigned to a register 373 // FIXME: it would be better to scavenge a register here instead of 374 // reserving SX16 all of the time. 375 const TargetRegisterInfo *TRI = &getRegisterInfo(); 376 Register TmpReg = VE::SX16; 377 Register SubTmp = TRI->getSubReg(TmpReg, VE::sub_i32); 378 BuildMI(MBB, I, DL, get(VE::LEAzii), TmpReg) 379 .addImm(0) 380 .addImm(0) 381 .addImm(256); 382 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(VE::VORmvl), DestReg) 383 .addImm(M1(0)) // Represent (0)1. 384 .addReg(SrcReg, getKillRegState(KillSrc)) 385 .addReg(SubTmp, getKillRegState(true)); 386 MIB.getInstr()->addRegisterKilled(TmpReg, TRI, true); 387 } else if (VE::VMRegClass.contains(DestReg, SrcReg)) { 388 BuildMI(MBB, I, DL, get(VE::ANDMmm), DestReg) 389 .addReg(VE::VM0) 390 .addReg(SrcReg, getKillRegState(KillSrc)); 391 } else if (VE::VM512RegClass.contains(DestReg, SrcReg)) { 392 // Use two instructions. 393 const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd}; 394 unsigned int NumSubRegs = 2; 395 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ANDMmm), 396 NumSubRegs, SubRegIdx, &getRegisterInfo()); 397 } else if (VE::F128RegClass.contains(DestReg, SrcReg)) { 398 // Use two instructions. 399 const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd}; 400 unsigned int NumSubRegs = 2; 401 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ORri), 402 NumSubRegs, SubRegIdx, &getRegisterInfo()); 403 } else { 404 const TargetRegisterInfo *TRI = &getRegisterInfo(); 405 dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI) 406 << " to " << printReg(DestReg, TRI) << "\n"; 407 llvm_unreachable("Impossible reg-to-reg copy"); 408 } 409 } 410 411 /// isLoadFromStackSlot - If the specified machine instruction is a direct 412 /// load from a stack slot, return the virtual or physical register number of 413 /// the destination along with the FrameIndex of the loaded stack slot. If 414 /// not, return 0. This predicate must return 0 if the instruction has 415 /// any side effects other than loading from the stack slot. 416 unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 417 int &FrameIndex) const { 418 if (MI.getOpcode() == VE::LDrii || // I64 419 MI.getOpcode() == VE::LDLSXrii || // I32 420 MI.getOpcode() == VE::LDUrii || // F32 421 MI.getOpcode() == VE::LDQrii || // F128 (pseudo) 422 MI.getOpcode() == VE::LDVMrii || // VM (pseudo) 423 MI.getOpcode() == VE::LDVM512rii // VM512 (pseudo) 424 ) { 425 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 426 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() && 427 MI.getOperand(3).getImm() == 0) { 428 FrameIndex = MI.getOperand(1).getIndex(); 429 return MI.getOperand(0).getReg(); 430 } 431 } 432 return 0; 433 } 434 435 /// isStoreToStackSlot - If the specified machine instruction is a direct 436 /// store to a stack slot, return the virtual or physical register number of 437 /// the source reg along with the FrameIndex of the loaded stack slot. If 438 /// not, return 0. This predicate must return 0 if the instruction has 439 /// any side effects other than storing to the stack slot. 440 unsigned VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 441 int &FrameIndex) const { 442 if (MI.getOpcode() == VE::STrii || // I64 443 MI.getOpcode() == VE::STLrii || // I32 444 MI.getOpcode() == VE::STUrii || // F32 445 MI.getOpcode() == VE::STQrii || // F128 (pseudo) 446 MI.getOpcode() == VE::STVMrii || // VM (pseudo) 447 MI.getOpcode() == VE::STVM512rii // VM512 (pseudo) 448 ) { 449 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && 450 MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() && 451 MI.getOperand(2).getImm() == 0) { 452 FrameIndex = MI.getOperand(0).getIndex(); 453 return MI.getOperand(3).getReg(); 454 } 455 } 456 return 0; 457 } 458 459 void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 460 MachineBasicBlock::iterator I, 461 Register SrcReg, bool isKill, int FI, 462 const TargetRegisterClass *RC, 463 const TargetRegisterInfo *TRI, 464 Register VReg) const { 465 DebugLoc DL; 466 if (I != MBB.end()) 467 DL = I->getDebugLoc(); 468 469 MachineFunction *MF = MBB.getParent(); 470 const MachineFrameInfo &MFI = MF->getFrameInfo(); 471 MachineMemOperand *MMO = MF->getMachineMemOperand( 472 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 473 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 474 475 // On the order of operands here: think "[FrameIdx + 0] = SrcReg". 476 if (RC == &VE::I64RegClass) { 477 BuildMI(MBB, I, DL, get(VE::STrii)) 478 .addFrameIndex(FI) 479 .addImm(0) 480 .addImm(0) 481 .addReg(SrcReg, getKillRegState(isKill)) 482 .addMemOperand(MMO); 483 } else if (RC == &VE::I32RegClass) { 484 BuildMI(MBB, I, DL, get(VE::STLrii)) 485 .addFrameIndex(FI) 486 .addImm(0) 487 .addImm(0) 488 .addReg(SrcReg, getKillRegState(isKill)) 489 .addMemOperand(MMO); 490 } else if (RC == &VE::F32RegClass) { 491 BuildMI(MBB, I, DL, get(VE::STUrii)) 492 .addFrameIndex(FI) 493 .addImm(0) 494 .addImm(0) 495 .addReg(SrcReg, getKillRegState(isKill)) 496 .addMemOperand(MMO); 497 } else if (VE::F128RegClass.hasSubClassEq(RC)) { 498 BuildMI(MBB, I, DL, get(VE::STQrii)) 499 .addFrameIndex(FI) 500 .addImm(0) 501 .addImm(0) 502 .addReg(SrcReg, getKillRegState(isKill)) 503 .addMemOperand(MMO); 504 } else if (RC == &VE::VMRegClass) { 505 BuildMI(MBB, I, DL, get(VE::STVMrii)) 506 .addFrameIndex(FI) 507 .addImm(0) 508 .addImm(0) 509 .addReg(SrcReg, getKillRegState(isKill)) 510 .addMemOperand(MMO); 511 } else if (VE::VM512RegClass.hasSubClassEq(RC)) { 512 BuildMI(MBB, I, DL, get(VE::STVM512rii)) 513 .addFrameIndex(FI) 514 .addImm(0) 515 .addImm(0) 516 .addReg(SrcReg, getKillRegState(isKill)) 517 .addMemOperand(MMO); 518 } else 519 report_fatal_error("Can't store this register to stack slot"); 520 } 521 522 void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 523 MachineBasicBlock::iterator I, 524 Register DestReg, int FI, 525 const TargetRegisterClass *RC, 526 const TargetRegisterInfo *TRI, 527 Register VReg) const { 528 DebugLoc DL; 529 if (I != MBB.end()) 530 DL = I->getDebugLoc(); 531 532 MachineFunction *MF = MBB.getParent(); 533 const MachineFrameInfo &MFI = MF->getFrameInfo(); 534 MachineMemOperand *MMO = MF->getMachineMemOperand( 535 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 536 MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); 537 538 if (RC == &VE::I64RegClass) { 539 BuildMI(MBB, I, DL, get(VE::LDrii), DestReg) 540 .addFrameIndex(FI) 541 .addImm(0) 542 .addImm(0) 543 .addMemOperand(MMO); 544 } else if (RC == &VE::I32RegClass) { 545 BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg) 546 .addFrameIndex(FI) 547 .addImm(0) 548 .addImm(0) 549 .addMemOperand(MMO); 550 } else if (RC == &VE::F32RegClass) { 551 BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg) 552 .addFrameIndex(FI) 553 .addImm(0) 554 .addImm(0) 555 .addMemOperand(MMO); 556 } else if (VE::F128RegClass.hasSubClassEq(RC)) { 557 BuildMI(MBB, I, DL, get(VE::LDQrii), DestReg) 558 .addFrameIndex(FI) 559 .addImm(0) 560 .addImm(0) 561 .addMemOperand(MMO); 562 } else if (RC == &VE::VMRegClass) { 563 BuildMI(MBB, I, DL, get(VE::LDVMrii), DestReg) 564 .addFrameIndex(FI) 565 .addImm(0) 566 .addImm(0) 567 .addMemOperand(MMO); 568 } else if (VE::VM512RegClass.hasSubClassEq(RC)) { 569 BuildMI(MBB, I, DL, get(VE::LDVM512rii), DestReg) 570 .addFrameIndex(FI) 571 .addImm(0) 572 .addImm(0) 573 .addMemOperand(MMO); 574 } else 575 report_fatal_error("Can't load this register from stack slot"); 576 } 577 578 bool VEInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 579 Register Reg, MachineRegisterInfo *MRI) const { 580 LLVM_DEBUG(dbgs() << "FoldImmediate\n"); 581 582 LLVM_DEBUG(dbgs() << "checking DefMI\n"); 583 int64_t ImmVal; 584 switch (DefMI.getOpcode()) { 585 default: 586 return false; 587 case VE::ORim: 588 // General move small immediate instruction on VE. 589 LLVM_DEBUG(dbgs() << "checking ORim\n"); 590 LLVM_DEBUG(DefMI.dump()); 591 // FIXME: We may need to support FPImm too. 592 assert(DefMI.getOperand(1).isImm()); 593 assert(DefMI.getOperand(2).isImm()); 594 ImmVal = 595 DefMI.getOperand(1).getImm() + mimm2Val(DefMI.getOperand(2).getImm()); 596 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n"); 597 break; 598 case VE::LEAzii: 599 // General move immediate instruction on VE. 600 LLVM_DEBUG(dbgs() << "checking LEAzii\n"); 601 LLVM_DEBUG(DefMI.dump()); 602 // FIXME: We may need to support FPImm too. 603 assert(DefMI.getOperand(2).isImm()); 604 if (!DefMI.getOperand(3).isImm()) 605 // LEAzii may refer label 606 return false; 607 ImmVal = DefMI.getOperand(2).getImm() + DefMI.getOperand(3).getImm(); 608 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n"); 609 break; 610 } 611 612 // Try to fold like below: 613 // %1:i64 = ORim 0, 0(1) 614 // %2:i64 = CMPSLrr %0, %1 615 // To 616 // %2:i64 = CMPSLrm %0, 0(1) 617 // 618 // Another example: 619 // %1:i64 = ORim 6, 0(1) 620 // %2:i64 = CMPSLrr %1, %0 621 // To 622 // %2:i64 = CMPSLir 6, %0 623 // 624 // Support commutable instructions like below: 625 // %1:i64 = ORim 6, 0(1) 626 // %2:i64 = ADDSLrr %1, %0 627 // To 628 // %2:i64 = ADDSLri %0, 6 629 // 630 // FIXME: Need to support i32. Current implementtation requires 631 // EXTRACT_SUBREG, so input has following COPY and it avoids folding: 632 // %1:i64 = ORim 6, 0(1) 633 // %2:i32 = COPY %1.sub_i32 634 // %3:i32 = ADDSWSXrr %0, %2 635 // FIXME: Need to support shift, cmov, and more instructions. 636 // FIXME: Need to support lvl too, but LVLGen runs after peephole-opt. 637 638 LLVM_DEBUG(dbgs() << "checking UseMI\n"); 639 LLVM_DEBUG(UseMI.dump()); 640 unsigned NewUseOpcSImm7; 641 unsigned NewUseOpcMImm; 642 enum InstType { 643 rr2ri_rm, // rr -> ri or rm, commutable 644 rr2ir_rm, // rr -> ir or rm 645 } InstType; 646 647 using namespace llvm::VE; 648 #define INSTRKIND(NAME) \ 649 case NAME##rr: \ 650 NewUseOpcSImm7 = NAME##ri; \ 651 NewUseOpcMImm = NAME##rm; \ 652 InstType = rr2ri_rm; \ 653 break 654 #define NCINSTRKIND(NAME) \ 655 case NAME##rr: \ 656 NewUseOpcSImm7 = NAME##ir; \ 657 NewUseOpcMImm = NAME##rm; \ 658 InstType = rr2ir_rm; \ 659 break 660 661 switch (UseMI.getOpcode()) { 662 default: 663 return false; 664 665 INSTRKIND(ADDUL); 666 INSTRKIND(ADDSWSX); 667 INSTRKIND(ADDSWZX); 668 INSTRKIND(ADDSL); 669 NCINSTRKIND(SUBUL); 670 NCINSTRKIND(SUBSWSX); 671 NCINSTRKIND(SUBSWZX); 672 NCINSTRKIND(SUBSL); 673 INSTRKIND(MULUL); 674 INSTRKIND(MULSWSX); 675 INSTRKIND(MULSWZX); 676 INSTRKIND(MULSL); 677 NCINSTRKIND(DIVUL); 678 NCINSTRKIND(DIVSWSX); 679 NCINSTRKIND(DIVSWZX); 680 NCINSTRKIND(DIVSL); 681 NCINSTRKIND(CMPUL); 682 NCINSTRKIND(CMPSWSX); 683 NCINSTRKIND(CMPSWZX); 684 NCINSTRKIND(CMPSL); 685 INSTRKIND(MAXSWSX); 686 INSTRKIND(MAXSWZX); 687 INSTRKIND(MAXSL); 688 INSTRKIND(MINSWSX); 689 INSTRKIND(MINSWZX); 690 INSTRKIND(MINSL); 691 INSTRKIND(AND); 692 INSTRKIND(OR); 693 INSTRKIND(XOR); 694 INSTRKIND(EQV); 695 NCINSTRKIND(NND); 696 NCINSTRKIND(MRG); 697 } 698 699 #undef INSTRKIND 700 701 unsigned NewUseOpc; 702 unsigned UseIdx; 703 bool Commute = false; 704 LLVM_DEBUG(dbgs() << "checking UseMI operands\n"); 705 switch (InstType) { 706 case rr2ri_rm: 707 UseIdx = 2; 708 if (UseMI.getOperand(1).getReg() == Reg) { 709 Commute = true; 710 } else { 711 assert(UseMI.getOperand(2).getReg() == Reg); 712 } 713 if (isInt<7>(ImmVal)) { 714 // This ImmVal matches to SImm7 slot, so change UseOpc to an instruction 715 // holds a simm7 slot. 716 NewUseOpc = NewUseOpcSImm7; 717 } else if (isMImmVal(ImmVal)) { 718 // Similarly, change UseOpc to an instruction holds a mimm slot. 719 NewUseOpc = NewUseOpcMImm; 720 ImmVal = val2MImm(ImmVal); 721 } else 722 return false; 723 break; 724 case rr2ir_rm: 725 if (UseMI.getOperand(1).getReg() == Reg) { 726 // Check immediate value whether it matchs to the UseMI instruction. 727 if (!isInt<7>(ImmVal)) 728 return false; 729 NewUseOpc = NewUseOpcSImm7; 730 UseIdx = 1; 731 } else { 732 assert(UseMI.getOperand(2).getReg() == Reg); 733 // Check immediate value whether it matchs to the UseMI instruction. 734 if (!isMImmVal(ImmVal)) 735 return false; 736 NewUseOpc = NewUseOpcMImm; 737 ImmVal = val2MImm(ImmVal); 738 UseIdx = 2; 739 } 740 break; 741 } 742 743 LLVM_DEBUG(dbgs() << "modifying UseMI\n"); 744 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 745 UseMI.setDesc(get(NewUseOpc)); 746 if (Commute) { 747 UseMI.getOperand(1).setReg(UseMI.getOperand(UseIdx).getReg()); 748 } 749 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal); 750 if (DeleteDef) 751 DefMI.eraseFromParent(); 752 753 return true; 754 } 755 756 Register VEInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 757 VEMachineFunctionInfo *VEFI = MF->getInfo<VEMachineFunctionInfo>(); 758 Register GlobalBaseReg = VEFI->getGlobalBaseReg(); 759 if (GlobalBaseReg != 0) 760 return GlobalBaseReg; 761 762 // We use %s15 (%got) as a global base register 763 GlobalBaseReg = VE::SX15; 764 765 // Insert a pseudo instruction to set the GlobalBaseReg into the first 766 // MBB of the function 767 MachineBasicBlock &FirstMBB = MF->front(); 768 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 769 DebugLoc dl; 770 BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg); 771 VEFI->setGlobalBaseReg(GlobalBaseReg); 772 return GlobalBaseReg; 773 } 774 775 static Register getVM512Upper(Register reg) { 776 return (reg - VE::VMP0) * 2 + VE::VM0; 777 } 778 779 static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; } 780 781 // Expand pseudo logical vector instructions for VM512 registers. 782 static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) { 783 MachineBasicBlock *MBB = MI.getParent(); 784 DebugLoc DL = MI.getDebugLoc(); 785 786 Register VMXu = getVM512Upper(MI.getOperand(0).getReg()); 787 Register VMXl = getVM512Lower(MI.getOperand(0).getReg()); 788 Register VMYu = getVM512Upper(MI.getOperand(1).getReg()); 789 Register VMYl = getVM512Lower(MI.getOperand(1).getReg()); 790 791 switch (MI.getOpcode()) { 792 default: { 793 Register VMZu = getVM512Upper(MI.getOperand(2).getReg()); 794 Register VMZl = getVM512Lower(MI.getOperand(2).getReg()); 795 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu); 796 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl); 797 break; 798 } 799 case VE::NEGMy: 800 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu); 801 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl); 802 break; 803 } 804 MI.eraseFromParent(); 805 } 806 807 static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI, 808 bool Upper) { 809 // VM512 810 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(0).getReg()) 811 : getVM512Lower(MI.getOperand(0).getReg())); 812 813 switch (MI.getNumExplicitOperands()) { 814 default: 815 report_fatal_error("unexpected number of operands for pvfmk"); 816 case 2: // _Ml: VM512, VL 817 // VL 818 MIB.addReg(MI.getOperand(1).getReg()); 819 break; 820 case 4: // _Mvl: VM512, CC, VR, VL 821 // CC 822 MIB.addImm(MI.getOperand(1).getImm()); 823 // VR 824 MIB.addReg(MI.getOperand(2).getReg()); 825 // VL 826 MIB.addReg(MI.getOperand(3).getReg()); 827 break; 828 case 5: // _MvMl: VM512, CC, VR, VM512, VL 829 // CC 830 MIB.addImm(MI.getOperand(1).getImm()); 831 // VR 832 MIB.addReg(MI.getOperand(2).getReg()); 833 // VM512 834 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(3).getReg()) 835 : getVM512Lower(MI.getOperand(3).getReg())); 836 // VL 837 MIB.addReg(MI.getOperand(4).getReg()); 838 break; 839 } 840 } 841 842 static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI) { 843 // replace to pvfmk.w.up and pvfmk.w.lo 844 // replace to pvfmk.s.up and pvfmk.s.lo 845 846 static const std::pair<unsigned, std::pair<unsigned, unsigned>> VFMKMap[] = { 847 {VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}}, 848 {VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}}, 849 {VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}}, 850 {VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}}, 851 {VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}}, 852 {VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}}, 853 }; 854 855 unsigned Opcode = MI.getOpcode(); 856 857 const auto *Found = 858 llvm::find_if(VFMKMap, [&](auto P) { return P.first == Opcode; }); 859 if (Found == std::end(VFMKMap)) 860 report_fatal_error("unexpected opcode for pseudo vfmk"); 861 862 unsigned OpcodeUpper = (*Found).second.first; 863 unsigned OpcodeLower = (*Found).second.second; 864 865 MachineBasicBlock *MBB = MI.getParent(); 866 DebugLoc DL = MI.getDebugLoc(); 867 868 MachineInstrBuilder Bu = BuildMI(*MBB, MI, DL, TI.get(OpcodeUpper)); 869 addOperandsForVFMK(Bu, MI, /* Upper */ true); 870 MachineInstrBuilder Bl = BuildMI(*MBB, MI, DL, TI.get(OpcodeLower)); 871 addOperandsForVFMK(Bl, MI, /* Upper */ false); 872 873 MI.eraseFromParent(); 874 } 875 876 bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 877 switch (MI.getOpcode()) { 878 case VE::EXTEND_STACK: { 879 return expandExtendStackPseudo(MI); 880 } 881 case VE::EXTEND_STACK_GUARD: { 882 MI.eraseFromParent(); // The pseudo instruction is gone now. 883 return true; 884 } 885 case VE::GETSTACKTOP: { 886 return expandGetStackTopPseudo(MI); 887 } 888 889 case VE::ANDMyy: 890 expandPseudoLogM(MI, get(VE::ANDMmm)); 891 return true; 892 case VE::ORMyy: 893 expandPseudoLogM(MI, get(VE::ORMmm)); 894 return true; 895 case VE::XORMyy: 896 expandPseudoLogM(MI, get(VE::XORMmm)); 897 return true; 898 case VE::EQVMyy: 899 expandPseudoLogM(MI, get(VE::EQVMmm)); 900 return true; 901 case VE::NNDMyy: 902 expandPseudoLogM(MI, get(VE::NNDMmm)); 903 return true; 904 case VE::NEGMy: 905 expandPseudoLogM(MI, get(VE::NEGMm)); 906 return true; 907 908 case VE::LVMyir: 909 case VE::LVMyim: 910 case VE::LVMyir_y: 911 case VE::LVMyim_y: { 912 Register VMXu = getVM512Upper(MI.getOperand(0).getReg()); 913 Register VMXl = getVM512Lower(MI.getOperand(0).getReg()); 914 int64_t Imm = MI.getOperand(1).getImm(); 915 bool IsSrcReg = 916 MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y; 917 Register Src = IsSrcReg ? MI.getOperand(2).getReg() : VE::NoRegister; 918 int64_t MImm = IsSrcReg ? 0 : MI.getOperand(2).getImm(); 919 bool KillSrc = IsSrcReg ? MI.getOperand(2).isKill() : false; 920 Register VMX = VMXl; 921 if (Imm >= 4) { 922 VMX = VMXu; 923 Imm -= 4; 924 } 925 MachineBasicBlock *MBB = MI.getParent(); 926 DebugLoc DL = MI.getDebugLoc(); 927 switch (MI.getOpcode()) { 928 case VE::LVMyir: 929 BuildMI(*MBB, MI, DL, get(VE::LVMir)) 930 .addDef(VMX) 931 .addImm(Imm) 932 .addReg(Src, getKillRegState(KillSrc)); 933 break; 934 case VE::LVMyim: 935 BuildMI(*MBB, MI, DL, get(VE::LVMim)) 936 .addDef(VMX) 937 .addImm(Imm) 938 .addImm(MImm); 939 break; 940 case VE::LVMyir_y: 941 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() && 942 "LVMyir_y has different register in 3rd operand"); 943 BuildMI(*MBB, MI, DL, get(VE::LVMir_m)) 944 .addDef(VMX) 945 .addImm(Imm) 946 .addReg(Src, getKillRegState(KillSrc)) 947 .addReg(VMX); 948 break; 949 case VE::LVMyim_y: 950 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() && 951 "LVMyim_y has different register in 3rd operand"); 952 BuildMI(*MBB, MI, DL, get(VE::LVMim_m)) 953 .addDef(VMX) 954 .addImm(Imm) 955 .addImm(MImm) 956 .addReg(VMX); 957 break; 958 } 959 MI.eraseFromParent(); 960 return true; 961 } 962 case VE::SVMyi: { 963 Register Dest = MI.getOperand(0).getReg(); 964 Register VMZu = getVM512Upper(MI.getOperand(1).getReg()); 965 Register VMZl = getVM512Lower(MI.getOperand(1).getReg()); 966 bool KillSrc = MI.getOperand(1).isKill(); 967 int64_t Imm = MI.getOperand(2).getImm(); 968 Register VMZ = VMZl; 969 if (Imm >= 4) { 970 VMZ = VMZu; 971 Imm -= 4; 972 } 973 MachineBasicBlock *MBB = MI.getParent(); 974 DebugLoc DL = MI.getDebugLoc(); 975 MachineInstrBuilder MIB = 976 BuildMI(*MBB, MI, DL, get(VE::SVMmi), Dest).addReg(VMZ).addImm(Imm); 977 MachineInstr *Inst = MIB.getInstr(); 978 if (KillSrc) { 979 const TargetRegisterInfo *TRI = &getRegisterInfo(); 980 Inst->addRegisterKilled(MI.getOperand(1).getReg(), TRI, true); 981 } 982 MI.eraseFromParent(); 983 return true; 984 } 985 case VE::VFMKyal: 986 case VE::VFMKynal: 987 case VE::VFMKWyvl: 988 case VE::VFMKWyvyl: 989 case VE::VFMKSyvl: 990 case VE::VFMKSyvyl: 991 expandPseudoVFMK(*this, MI); 992 return true; 993 } 994 return false; 995 } 996 997 bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const { 998 MachineBasicBlock &MBB = *MI.getParent(); 999 MachineFunction &MF = *MBB.getParent(); 1000 const VESubtarget &STI = MF.getSubtarget<VESubtarget>(); 1001 const VEInstrInfo &TII = *STI.getInstrInfo(); 1002 DebugLoc dl = MBB.findDebugLoc(MI); 1003 1004 // Create following instructions and multiple basic blocks. 1005 // 1006 // thisBB: 1007 // brge.l.t %sp, %sl, sinkBB 1008 // syscallBB: 1009 // ld %s61, 0x18(, %tp) // load param area 1010 // or %s62, 0, %s0 // spill the value of %s0 1011 // lea %s63, 0x13b // syscall # of grow 1012 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0 1013 // shm.l %sl, 0x8(%s61) // store old limit at addr:8 1014 // shm.l %sp, 0x10(%s61) // store new limit at addr:16 1015 // monc // call monitor 1016 // or %s0, 0, %s62 // restore the value of %s0 1017 // sinkBB: 1018 1019 // Create new MBB 1020 MachineBasicBlock *BB = &MBB; 1021 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1022 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1023 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1024 MachineFunction::iterator It = ++(BB->getIterator()); 1025 MF.insert(It, syscallMBB); 1026 MF.insert(It, sinkMBB); 1027 1028 // Transfer the remainder of BB and its successor edges to sinkMBB. 1029 sinkMBB->splice(sinkMBB->begin(), BB, 1030 std::next(std::next(MachineBasicBlock::iterator(MI))), 1031 BB->end()); 1032 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1033 1034 // Next, add the true and fallthrough blocks as its successors. 1035 BB->addSuccessor(syscallMBB); 1036 BB->addSuccessor(sinkMBB); 1037 BuildMI(BB, dl, TII.get(VE::BRCFLrr_t)) 1038 .addImm(VECC::CC_IGE) 1039 .addReg(VE::SX11) // %sp 1040 .addReg(VE::SX8) // %sl 1041 .addMBB(sinkMBB); 1042 1043 BB = syscallMBB; 1044 1045 // Update machine-CFG edges 1046 BB->addSuccessor(sinkMBB); 1047 1048 BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61) 1049 .addReg(VE::SX14) 1050 .addImm(0) 1051 .addImm(0x18); 1052 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62) 1053 .addReg(VE::SX0) 1054 .addImm(0); 1055 BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63) 1056 .addImm(0) 1057 .addImm(0) 1058 .addImm(0x13b); 1059 BuildMI(BB, dl, TII.get(VE::SHMLri)) 1060 .addReg(VE::SX61) 1061 .addImm(0) 1062 .addReg(VE::SX63); 1063 BuildMI(BB, dl, TII.get(VE::SHMLri)) 1064 .addReg(VE::SX61) 1065 .addImm(8) 1066 .addReg(VE::SX8); 1067 BuildMI(BB, dl, TII.get(VE::SHMLri)) 1068 .addReg(VE::SX61) 1069 .addImm(16) 1070 .addReg(VE::SX11); 1071 BuildMI(BB, dl, TII.get(VE::MONC)); 1072 1073 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0) 1074 .addReg(VE::SX62) 1075 .addImm(0); 1076 1077 MI.eraseFromParent(); // The pseudo instruction is gone now. 1078 return true; 1079 } 1080 1081 bool VEInstrInfo::expandGetStackTopPseudo(MachineInstr &MI) const { 1082 MachineBasicBlock *MBB = MI.getParent(); 1083 MachineFunction &MF = *MBB->getParent(); 1084 const VESubtarget &STI = MF.getSubtarget<VESubtarget>(); 1085 const VEInstrInfo &TII = *STI.getInstrInfo(); 1086 DebugLoc DL = MBB->findDebugLoc(MI); 1087 1088 // Create following instruction 1089 // 1090 // dst = %sp + target specific frame + the size of parameter area 1091 1092 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1093 const VEFrameLowering &TFL = *STI.getFrameLowering(); 1094 1095 // The VE ABI requires a reserved area at the top of stack as described 1096 // in VEFrameLowering.cpp. So, we adjust it here. 1097 unsigned NumBytes = STI.getAdjustedFrameSize(0); 1098 1099 // Also adds the size of parameter area. 1100 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF)) 1101 NumBytes += MFI.getMaxCallFrameSize(); 1102 1103 BuildMI(*MBB, MI, DL, TII.get(VE::LEArii)) 1104 .addDef(MI.getOperand(0).getReg()) 1105 .addReg(VE::SX11) 1106 .addImm(0) 1107 .addImm(NumBytes); 1108 1109 MI.eraseFromParent(); // The pseudo instruction is gone now. 1110 return true; 1111 } 1112