1 //===- HexagonBitTracker.cpp ----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "HexagonBitTracker.h" 10 #include "Hexagon.h" 11 #include "HexagonInstrInfo.h" 12 #include "HexagonRegisterInfo.h" 13 #include "HexagonSubtarget.h" 14 #include "llvm/CodeGen/MachineFrameInfo.h" 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/CodeGen/MachineInstr.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/IR/Argument.h" 21 #include "llvm/IR/Attributes.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/Type.h" 24 #include "llvm/Support/Compiler.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/ErrorHandling.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include <cassert> 30 #include <cstddef> 31 #include <cstdint> 32 #include <cstdlib> 33 #include <utility> 34 #include <vector> 35 36 using namespace llvm; 37 38 using BT = BitTracker; 39 40 HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, 41 MachineRegisterInfo &mri, 42 const HexagonInstrInfo &tii, 43 MachineFunction &mf) 44 : MachineEvaluator(tri, mri), MF(mf), MFI(mf.getFrameInfo()), TII(tii) { 45 // Populate the VRX map (VR to extension-type). 46 // Go over all the formal parameters of the function. If a given parameter 47 // P is sign- or zero-extended, locate the virtual register holding that 48 // parameter and create an entry in the VRX map indicating the type of ex- 49 // tension (and the source type). 50 // This is a bit complicated to do accurately, since the memory layout in- 51 // formation is necessary to precisely determine whether an aggregate para- 52 // meter will be passed in a register or in memory. What is given in MRI 53 // is the association between the physical register that is live-in (i.e. 54 // holds an argument), and the virtual register that this value will be 55 // copied into. This, by itself, is not sufficient to map back the virtual 56 // register to a formal parameter from Function (since consecutive live-ins 57 // from MRI may not correspond to consecutive formal parameters from Func- 58 // tion). To avoid the complications with in-memory arguments, only consi- 59 // der the initial sequence of formal parameters that are known to be 60 // passed via registers. 61 unsigned InVirtReg, InPhysReg = 0; 62 63 for (const Argument &Arg : MF.getFunction().args()) { 64 Type *ATy = Arg.getType(); 65 unsigned Width = 0; 66 if (ATy->isIntegerTy()) 67 Width = ATy->getIntegerBitWidth(); 68 else if (ATy->isPointerTy()) 69 Width = 32; 70 // If pointer size is not set through target data, it will default to 71 // Module::AnyPointerSize. 72 if (Width == 0 || Width > 64) 73 break; 74 if (Arg.hasAttribute(Attribute::ByVal)) 75 continue; 76 InPhysReg = getNextPhysReg(InPhysReg, Width); 77 if (!InPhysReg) 78 break; 79 InVirtReg = getVirtRegFor(InPhysReg); 80 if (!InVirtReg) 81 continue; 82 if (Arg.hasAttribute(Attribute::SExt)) 83 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width))); 84 else if (Arg.hasAttribute(Attribute::ZExt)) 85 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width))); 86 } 87 } 88 89 BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const { 90 if (Sub == 0) 91 return MachineEvaluator::mask(Reg, 0); 92 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 93 unsigned ID = RC.getID(); 94 uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub)); 95 const auto &HRI = static_cast<const HexagonRegisterInfo&>(TRI); 96 bool IsSubLo = (Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo)); 97 switch (ID) { 98 case Hexagon::DoubleRegsRegClassID: 99 case Hexagon::HvxWRRegClassID: 100 case Hexagon::HvxVQRRegClassID: 101 return IsSubLo ? BT::BitMask(0, RW-1) 102 : BT::BitMask(RW, 2*RW-1); 103 default: 104 break; 105 } 106 #ifndef NDEBUG 107 dbgs() << printReg(Reg, &TRI, Sub) << " in reg class " 108 << TRI.getRegClassName(&RC) << '\n'; 109 #endif 110 llvm_unreachable("Unexpected register/subregister"); 111 } 112 113 uint16_t HexagonEvaluator::getPhysRegBitWidth(unsigned Reg) const { 114 assert(Register::isPhysicalRegister(Reg)); 115 116 using namespace Hexagon; 117 const auto &HST = MF.getSubtarget<HexagonSubtarget>(); 118 if (HST.useHVXOps()) { 119 for (auto &RC : {HvxVRRegClass, HvxWRRegClass, HvxQRRegClass, 120 HvxVQRRegClass}) 121 if (RC.contains(Reg)) 122 return TRI.getRegSizeInBits(RC); 123 } 124 // Default treatment for other physical registers. 125 if (const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg)) 126 return TRI.getRegSizeInBits(*RC); 127 128 llvm_unreachable( 129 (Twine("Unhandled physical register") + TRI.getName(Reg)).str().c_str()); 130 } 131 132 const TargetRegisterClass &HexagonEvaluator::composeWithSubRegIndex( 133 const TargetRegisterClass &RC, unsigned Idx) const { 134 if (Idx == 0) 135 return RC; 136 137 #ifndef NDEBUG 138 const auto &HRI = static_cast<const HexagonRegisterInfo&>(TRI); 139 bool IsSubLo = (Idx == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo)); 140 bool IsSubHi = (Idx == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi)); 141 assert(IsSubLo != IsSubHi && "Must refer to either low or high subreg"); 142 #endif 143 144 switch (RC.getID()) { 145 case Hexagon::DoubleRegsRegClassID: 146 return Hexagon::IntRegsRegClass; 147 case Hexagon::HvxWRRegClassID: 148 return Hexagon::HvxVRRegClass; 149 case Hexagon::HvxVQRRegClassID: 150 return Hexagon::HvxWRRegClass; 151 default: 152 break; 153 } 154 #ifndef NDEBUG 155 dbgs() << "Reg class id: " << RC.getID() << " idx: " << Idx << '\n'; 156 #endif 157 llvm_unreachable("Unimplemented combination of reg class/subreg idx"); 158 } 159 160 namespace { 161 162 class RegisterRefs { 163 std::vector<BT::RegisterRef> Vector; 164 165 public: 166 RegisterRefs(const MachineInstr &MI) : Vector(MI.getNumOperands()) { 167 for (unsigned i = 0, n = Vector.size(); i < n; ++i) { 168 const MachineOperand &MO = MI.getOperand(i); 169 if (MO.isReg()) 170 Vector[i] = BT::RegisterRef(MO); 171 // For indices that don't correspond to registers, the entry will 172 // remain constructed via the default constructor. 173 } 174 } 175 176 size_t size() const { return Vector.size(); } 177 178 const BT::RegisterRef &operator[](unsigned n) const { 179 // The main purpose of this operator is to assert with bad argument. 180 assert(n < Vector.size()); 181 return Vector[n]; 182 } 183 }; 184 185 } // end anonymous namespace 186 187 bool HexagonEvaluator::evaluate(const MachineInstr &MI, 188 const CellMapType &Inputs, 189 CellMapType &Outputs) const { 190 using namespace Hexagon; 191 192 unsigned NumDefs = 0; 193 194 // Sanity verification: there should not be any defs with subregisters. 195 for (const MachineOperand &MO : MI.operands()) { 196 if (!MO.isReg() || !MO.isDef()) 197 continue; 198 NumDefs++; 199 assert(MO.getSubReg() == 0); 200 } 201 202 if (NumDefs == 0) 203 return false; 204 205 unsigned Opc = MI.getOpcode(); 206 207 if (MI.mayLoad()) { 208 switch (Opc) { 209 // These instructions may be marked as mayLoad, but they are generating 210 // immediate values, so skip them. 211 case CONST32: 212 case CONST64: 213 break; 214 default: 215 return evaluateLoad(MI, Inputs, Outputs); 216 } 217 } 218 219 // Check COPY instructions that copy formal parameters into virtual 220 // registers. Such parameters can be sign- or zero-extended at the 221 // call site, and we should take advantage of this knowledge. The MRI 222 // keeps a list of pairs of live-in physical and virtual registers, 223 // which provides information about which virtual registers will hold 224 // the argument values. The function will still contain instructions 225 // defining those virtual registers, and in practice those are COPY 226 // instructions from a physical to a virtual register. In such cases, 227 // applying the argument extension to the virtual register can be seen 228 // as simply mirroring the extension that had already been applied to 229 // the physical register at the call site. If the defining instruction 230 // was not a COPY, it would not be clear how to mirror that extension 231 // on the callee's side. For that reason, only check COPY instructions 232 // for potential extensions. 233 if (MI.isCopy()) { 234 if (evaluateFormalCopy(MI, Inputs, Outputs)) 235 return true; 236 } 237 238 // Beyond this point, if any operand is a global, skip that instruction. 239 // The reason is that certain instructions that can take an immediate 240 // operand can also have a global symbol in that operand. To avoid 241 // checking what kind of operand a given instruction has individually 242 // for each instruction, do it here. Global symbols as operands gene- 243 // rally do not provide any useful information. 244 for (const MachineOperand &MO : MI.operands()) { 245 if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() || 246 MO.isCPI()) 247 return false; 248 } 249 250 RegisterRefs Reg(MI); 251 #define op(i) MI.getOperand(i) 252 #define rc(i) RegisterCell::ref(getCell(Reg[i], Inputs)) 253 #define im(i) MI.getOperand(i).getImm() 254 255 // If the instruction has no register operands, skip it. 256 if (Reg.size() == 0) 257 return false; 258 259 // Record result for register in operand 0. 260 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs) 261 -> bool { 262 putCell(Reg[0], Val, Outputs); 263 return true; 264 }; 265 // Get the cell corresponding to the N-th operand. 266 auto cop = [this, &Reg, &MI, &Inputs](unsigned N, 267 uint16_t W) -> BT::RegisterCell { 268 const MachineOperand &Op = MI.getOperand(N); 269 if (Op.isImm()) 270 return eIMM(Op.getImm(), W); 271 if (!Op.isReg()) 272 return RegisterCell::self(0, W); 273 assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch"); 274 return rc(N); 275 }; 276 // Extract RW low bits of the cell. 277 auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW) 278 -> BT::RegisterCell { 279 assert(RW <= RC.width()); 280 return eXTR(RC, 0, RW); 281 }; 282 // Extract RW high bits of the cell. 283 auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW) 284 -> BT::RegisterCell { 285 uint16_t W = RC.width(); 286 assert(RW <= W); 287 return eXTR(RC, W-RW, W); 288 }; 289 // Extract N-th halfword (counting from the least significant position). 290 auto half = [this] (const BT::RegisterCell &RC, unsigned N) 291 -> BT::RegisterCell { 292 assert(N*16+16 <= RC.width()); 293 return eXTR(RC, N*16, N*16+16); 294 }; 295 // Shuffle bits (pick even/odd from cells and merge into result). 296 auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt, 297 uint16_t BW, bool Odd) -> BT::RegisterCell { 298 uint16_t I = Odd, Ws = Rs.width(); 299 assert(Ws == Rt.width()); 300 RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW)); 301 I += 2; 302 while (I*BW < Ws) { 303 RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW)); 304 I += 2; 305 } 306 return RC; 307 }; 308 309 // The bitwidth of the 0th operand. In most (if not all) of the 310 // instructions below, the 0th operand is the defined register. 311 // Pre-compute the bitwidth here, because it is needed in many cases 312 // cases below. 313 uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0; 314 315 // Register id of the 0th operand. It can be 0. 316 unsigned Reg0 = Reg[0].Reg; 317 318 switch (Opc) { 319 // Transfer immediate: 320 321 case A2_tfrsi: 322 case A2_tfrpi: 323 case CONST32: 324 case CONST64: 325 return rr0(eIMM(im(1), W0), Outputs); 326 case PS_false: 327 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs); 328 case PS_true: 329 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs); 330 case PS_fi: { 331 int FI = op(1).getIndex(); 332 int Off = op(2).getImm(); 333 unsigned A = MFI.getObjectAlign(FI).value() + std::abs(Off); 334 unsigned L = countTrailingZeros(A); 335 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); 336 RC.fill(0, L, BT::BitValue::Zero); 337 return rr0(RC, Outputs); 338 } 339 340 // Transfer register: 341 342 case A2_tfr: 343 case A2_tfrp: 344 case C2_pxfer_map: 345 return rr0(rc(1), Outputs); 346 case C2_tfrpr: { 347 uint16_t RW = W0; 348 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 349 assert(PW <= RW); 350 RegisterCell PC = eXTR(rc(1), 0, PW); 351 RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1)); 352 RC.fill(PW, RW, BT::BitValue::Zero); 353 return rr0(RC, Outputs); 354 } 355 case C2_tfrrp: { 356 uint16_t RW = W0; 357 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 358 RegisterCell RC = RegisterCell::self(Reg[0].Reg, RW); 359 RC.fill(PW, RW, BT::BitValue::Zero); 360 return rr0(eINS(RC, eXTR(rc(1), 0, PW), 0), Outputs); 361 } 362 363 // Arithmetic: 364 365 case A2_abs: 366 case A2_absp: 367 // TODO 368 break; 369 370 case A2_addsp: { 371 uint16_t W1 = getRegBitWidth(Reg[1]); 372 assert(W0 == 64 && W1 == 32); 373 RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1)); 374 RegisterCell RC = eADD(eSXT(CW, W1), rc(2)); 375 return rr0(RC, Outputs); 376 } 377 case A2_add: 378 case A2_addp: 379 return rr0(eADD(rc(1), rc(2)), Outputs); 380 case A2_addi: 381 return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs); 382 case S4_addi_asl_ri: { 383 RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3))); 384 return rr0(RC, Outputs); 385 } 386 case S4_addi_lsr_ri: { 387 RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3))); 388 return rr0(RC, Outputs); 389 } 390 case S4_addaddi: { 391 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 392 return rr0(RC, Outputs); 393 } 394 case M4_mpyri_addi: { 395 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 396 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 397 return rr0(RC, Outputs); 398 } 399 case M4_mpyrr_addi: { 400 RegisterCell M = eMLS(rc(2), rc(3)); 401 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 402 return rr0(RC, Outputs); 403 } 404 case M4_mpyri_addr_u2: { 405 RegisterCell M = eMLS(eIMM(im(2), W0), rc(3)); 406 RegisterCell RC = eADD(rc(1), lo(M, W0)); 407 return rr0(RC, Outputs); 408 } 409 case M4_mpyri_addr: { 410 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 411 RegisterCell RC = eADD(rc(1), lo(M, W0)); 412 return rr0(RC, Outputs); 413 } 414 case M4_mpyrr_addr: { 415 RegisterCell M = eMLS(rc(2), rc(3)); 416 RegisterCell RC = eADD(rc(1), lo(M, W0)); 417 return rr0(RC, Outputs); 418 } 419 case S4_subaddi: { 420 RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3))); 421 return rr0(RC, Outputs); 422 } 423 case M2_accii: { 424 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 425 return rr0(RC, Outputs); 426 } 427 case M2_acci: { 428 RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3))); 429 return rr0(RC, Outputs); 430 } 431 case M2_subacc: { 432 RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3))); 433 return rr0(RC, Outputs); 434 } 435 case S2_addasl_rrri: { 436 RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3))); 437 return rr0(RC, Outputs); 438 } 439 case C4_addipc: { 440 RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0); 441 RPC.fill(0, 2, BT::BitValue::Zero); 442 return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs); 443 } 444 case A2_sub: 445 case A2_subp: 446 return rr0(eSUB(rc(1), rc(2)), Outputs); 447 case A2_subri: 448 return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs); 449 case S4_subi_asl_ri: { 450 RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3))); 451 return rr0(RC, Outputs); 452 } 453 case S4_subi_lsr_ri: { 454 RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3))); 455 return rr0(RC, Outputs); 456 } 457 case M2_naccii: { 458 RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0))); 459 return rr0(RC, Outputs); 460 } 461 case M2_nacci: { 462 RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3))); 463 return rr0(RC, Outputs); 464 } 465 // 32-bit negation is done by "Rd = A2_subri 0, Rs" 466 case A2_negp: 467 return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs); 468 469 case M2_mpy_up: { 470 RegisterCell M = eMLS(rc(1), rc(2)); 471 return rr0(hi(M, W0), Outputs); 472 } 473 case M2_dpmpyss_s0: 474 return rr0(eMLS(rc(1), rc(2)), Outputs); 475 case M2_dpmpyss_acc_s0: 476 return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs); 477 case M2_dpmpyss_nac_s0: 478 return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs); 479 case M2_mpyi: { 480 RegisterCell M = eMLS(rc(1), rc(2)); 481 return rr0(lo(M, W0), Outputs); 482 } 483 case M2_macsip: { 484 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 485 RegisterCell RC = eADD(rc(1), lo(M, W0)); 486 return rr0(RC, Outputs); 487 } 488 case M2_macsin: { 489 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 490 RegisterCell RC = eSUB(rc(1), lo(M, W0)); 491 return rr0(RC, Outputs); 492 } 493 case M2_maci: { 494 RegisterCell M = eMLS(rc(2), rc(3)); 495 RegisterCell RC = eADD(rc(1), lo(M, W0)); 496 return rr0(RC, Outputs); 497 } 498 case M2_mpysmi: { 499 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 500 return rr0(lo(M, 32), Outputs); 501 } 502 case M2_mpysin: { 503 RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0)); 504 return rr0(lo(M, 32), Outputs); 505 } 506 case M2_mpysip: { 507 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 508 return rr0(lo(M, 32), Outputs); 509 } 510 case M2_mpyu_up: { 511 RegisterCell M = eMLU(rc(1), rc(2)); 512 return rr0(hi(M, W0), Outputs); 513 } 514 case M2_dpmpyuu_s0: 515 return rr0(eMLU(rc(1), rc(2)), Outputs); 516 case M2_dpmpyuu_acc_s0: 517 return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs); 518 case M2_dpmpyuu_nac_s0: 519 return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs); 520 //case M2_mpysu_up: 521 522 // Logical/bitwise: 523 524 case A2_andir: 525 return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs); 526 case A2_and: 527 case A2_andp: 528 return rr0(eAND(rc(1), rc(2)), Outputs); 529 case A4_andn: 530 case A4_andnp: 531 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 532 case S4_andi_asl_ri: { 533 RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3))); 534 return rr0(RC, Outputs); 535 } 536 case S4_andi_lsr_ri: { 537 RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3))); 538 return rr0(RC, Outputs); 539 } 540 case M4_and_and: 541 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 542 case M4_and_andn: 543 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 544 case M4_and_or: 545 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 546 case M4_and_xor: 547 return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs); 548 case A2_orir: 549 return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs); 550 case A2_or: 551 case A2_orp: 552 return rr0(eORL(rc(1), rc(2)), Outputs); 553 case A4_orn: 554 case A4_ornp: 555 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 556 case S4_ori_asl_ri: { 557 RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3))); 558 return rr0(RC, Outputs); 559 } 560 case S4_ori_lsr_ri: { 561 RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3))); 562 return rr0(RC, Outputs); 563 } 564 case M4_or_and: 565 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 566 case M4_or_andn: 567 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 568 case S4_or_andi: 569 case S4_or_andix: { 570 RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0))); 571 return rr0(RC, Outputs); 572 } 573 case S4_or_ori: { 574 RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0))); 575 return rr0(RC, Outputs); 576 } 577 case M4_or_or: 578 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 579 case M4_or_xor: 580 return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs); 581 case A2_xor: 582 case A2_xorp: 583 return rr0(eXOR(rc(1), rc(2)), Outputs); 584 case M4_xor_and: 585 return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs); 586 case M4_xor_andn: 587 return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 588 case M4_xor_or: 589 return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs); 590 case M4_xor_xacc: 591 return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs); 592 case A2_not: 593 case A2_notp: 594 return rr0(eNOT(rc(1)), Outputs); 595 596 case S2_asl_i_r: 597 case S2_asl_i_p: 598 return rr0(eASL(rc(1), im(2)), Outputs); 599 case A2_aslh: 600 return rr0(eASL(rc(1), 16), Outputs); 601 case S2_asl_i_r_acc: 602 case S2_asl_i_p_acc: 603 return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs); 604 case S2_asl_i_r_nac: 605 case S2_asl_i_p_nac: 606 return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs); 607 case S2_asl_i_r_and: 608 case S2_asl_i_p_and: 609 return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs); 610 case S2_asl_i_r_or: 611 case S2_asl_i_p_or: 612 return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs); 613 case S2_asl_i_r_xacc: 614 case S2_asl_i_p_xacc: 615 return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs); 616 case S2_asl_i_vh: 617 case S2_asl_i_vw: 618 // TODO 619 break; 620 621 case S2_asr_i_r: 622 case S2_asr_i_p: 623 return rr0(eASR(rc(1), im(2)), Outputs); 624 case A2_asrh: 625 return rr0(eASR(rc(1), 16), Outputs); 626 case S2_asr_i_r_acc: 627 case S2_asr_i_p_acc: 628 return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs); 629 case S2_asr_i_r_nac: 630 case S2_asr_i_p_nac: 631 return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs); 632 case S2_asr_i_r_and: 633 case S2_asr_i_p_and: 634 return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs); 635 case S2_asr_i_r_or: 636 case S2_asr_i_p_or: 637 return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs); 638 case S2_asr_i_r_rnd: { 639 // The input is first sign-extended to 64 bits, then the output 640 // is truncated back to 32 bits. 641 assert(W0 == 32); 642 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 643 RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1); 644 return rr0(eXTR(RC, 0, W0), Outputs); 645 } 646 case S2_asr_i_r_rnd_goodsyntax: { 647 int64_t S = im(2); 648 if (S == 0) 649 return rr0(rc(1), Outputs); 650 // Result: S2_asr_i_r_rnd Rs, u5-1 651 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 652 RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1); 653 return rr0(eXTR(RC, 0, W0), Outputs); 654 } 655 case S2_asr_r_vh: 656 case S2_asr_i_vw: 657 case S2_asr_i_svw_trun: 658 // TODO 659 break; 660 661 case S2_lsr_i_r: 662 case S2_lsr_i_p: 663 return rr0(eLSR(rc(1), im(2)), Outputs); 664 case S2_lsr_i_r_acc: 665 case S2_lsr_i_p_acc: 666 return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs); 667 case S2_lsr_i_r_nac: 668 case S2_lsr_i_p_nac: 669 return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs); 670 case S2_lsr_i_r_and: 671 case S2_lsr_i_p_and: 672 return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs); 673 case S2_lsr_i_r_or: 674 case S2_lsr_i_p_or: 675 return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs); 676 case S2_lsr_i_r_xacc: 677 case S2_lsr_i_p_xacc: 678 return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs); 679 680 case S2_clrbit_i: { 681 RegisterCell RC = rc(1); 682 RC[im(2)] = BT::BitValue::Zero; 683 return rr0(RC, Outputs); 684 } 685 case S2_setbit_i: { 686 RegisterCell RC = rc(1); 687 RC[im(2)] = BT::BitValue::One; 688 return rr0(RC, Outputs); 689 } 690 case S2_togglebit_i: { 691 RegisterCell RC = rc(1); 692 uint16_t BX = im(2); 693 RC[BX] = RC[BX].is(0) ? BT::BitValue::One 694 : RC[BX].is(1) ? BT::BitValue::Zero 695 : BT::BitValue::self(); 696 return rr0(RC, Outputs); 697 } 698 699 case A4_bitspliti: { 700 uint16_t W1 = getRegBitWidth(Reg[1]); 701 uint16_t BX = im(2); 702 // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx] 703 const BT::BitValue Zero = BT::BitValue::Zero; 704 RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero) 705 .fill(W1+(W1-BX), W0, Zero); 706 RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1); 707 RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1); 708 return rr0(RC, Outputs); 709 } 710 case S4_extract: 711 case S4_extractp: 712 case S2_extractu: 713 case S2_extractup: { 714 uint16_t Wd = im(2), Of = im(3); 715 assert(Wd <= W0); 716 if (Wd == 0) 717 return rr0(eIMM(0, W0), Outputs); 718 // If the width extends beyond the register size, pad the register 719 // with 0 bits. 720 RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1); 721 RegisterCell Ext = eXTR(Pad, Of, Wd+Of); 722 // Ext is short, need to extend it with 0s or sign bit. 723 RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1)); 724 if (Opc == S2_extractu || Opc == S2_extractup) 725 return rr0(eZXT(RC, Wd), Outputs); 726 return rr0(eSXT(RC, Wd), Outputs); 727 } 728 case S2_insert: 729 case S2_insertp: { 730 uint16_t Wd = im(3), Of = im(4); 731 assert(Wd < W0 && Of < W0); 732 // If Wd+Of exceeds W0, the inserted bits are truncated. 733 if (Wd+Of > W0) 734 Wd = W0-Of; 735 if (Wd == 0) 736 return rr0(rc(1), Outputs); 737 return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs); 738 } 739 740 // Bit permutations: 741 742 case A2_combineii: 743 case A4_combineii: 744 case A4_combineir: 745 case A4_combineri: 746 case A2_combinew: 747 case V6_vcombine: 748 assert(W0 % 2 == 0); 749 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs); 750 case A2_combine_ll: 751 case A2_combine_lh: 752 case A2_combine_hl: 753 case A2_combine_hh: { 754 assert(W0 == 32); 755 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 756 // Low half in the output is 0 for _ll and _hl, 1 otherwise: 757 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl); 758 // High half in the output is 0 for _ll and _lh, 1 otherwise: 759 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh); 760 RegisterCell R1 = rc(1); 761 RegisterCell R2 = rc(2); 762 RegisterCell RC = half(R2, LoH).cat(half(R1, HiH)); 763 return rr0(RC, Outputs); 764 } 765 case S2_packhl: { 766 assert(W0 == 64); 767 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 768 RegisterCell R1 = rc(1); 769 RegisterCell R2 = rc(2); 770 RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1)) 771 .cat(half(R1, 1)); 772 return rr0(RC, Outputs); 773 } 774 case S2_shuffeb: { 775 RegisterCell RC = shuffle(rc(1), rc(2), 8, false); 776 return rr0(RC, Outputs); 777 } 778 case S2_shuffeh: { 779 RegisterCell RC = shuffle(rc(1), rc(2), 16, false); 780 return rr0(RC, Outputs); 781 } 782 case S2_shuffob: { 783 RegisterCell RC = shuffle(rc(1), rc(2), 8, true); 784 return rr0(RC, Outputs); 785 } 786 case S2_shuffoh: { 787 RegisterCell RC = shuffle(rc(1), rc(2), 16, true); 788 return rr0(RC, Outputs); 789 } 790 case C2_mask: { 791 uint16_t WR = W0; 792 uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 793 assert(WR == 64 && WP == 8); 794 RegisterCell R1 = rc(1); 795 RegisterCell RC(WR); 796 for (uint16_t i = 0; i < WP; ++i) { 797 const BT::BitValue &V = R1[i]; 798 BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self(); 799 RC.fill(i*8, i*8+8, F); 800 } 801 return rr0(RC, Outputs); 802 } 803 804 // Mux: 805 806 case C2_muxii: 807 case C2_muxir: 808 case C2_muxri: 809 case C2_mux: { 810 BT::BitValue PC0 = rc(1)[0]; 811 RegisterCell R2 = cop(2, W0); 812 RegisterCell R3 = cop(3, W0); 813 if (PC0.is(0) || PC0.is(1)) 814 return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs); 815 R2.meet(R3, Reg[0].Reg); 816 return rr0(R2, Outputs); 817 } 818 case C2_vmux: 819 // TODO 820 break; 821 822 // Sign- and zero-extension: 823 824 case A2_sxtb: 825 return rr0(eSXT(rc(1), 8), Outputs); 826 case A2_sxth: 827 return rr0(eSXT(rc(1), 16), Outputs); 828 case A2_sxtw: { 829 uint16_t W1 = getRegBitWidth(Reg[1]); 830 assert(W0 == 64 && W1 == 32); 831 RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1); 832 return rr0(RC, Outputs); 833 } 834 case A2_zxtb: 835 return rr0(eZXT(rc(1), 8), Outputs); 836 case A2_zxth: 837 return rr0(eZXT(rc(1), 16), Outputs); 838 839 // Saturations 840 841 case A2_satb: 842 return rr0(eSXT(RegisterCell::self(0, W0).regify(Reg0), 8), Outputs); 843 case A2_sath: 844 return rr0(eSXT(RegisterCell::self(0, W0).regify(Reg0), 16), Outputs); 845 case A2_satub: 846 return rr0(eZXT(RegisterCell::self(0, W0).regify(Reg0), 8), Outputs); 847 case A2_satuh: 848 return rr0(eZXT(RegisterCell::self(0, W0).regify(Reg0), 16), Outputs); 849 850 // Bit count: 851 852 case S2_cl0: 853 case S2_cl0p: 854 // Always produce a 32-bit result. 855 return rr0(eCLB(rc(1), false/*bit*/, 32), Outputs); 856 case S2_cl1: 857 case S2_cl1p: 858 return rr0(eCLB(rc(1), true/*bit*/, 32), Outputs); 859 case S2_clb: 860 case S2_clbp: { 861 uint16_t W1 = getRegBitWidth(Reg[1]); 862 RegisterCell R1 = rc(1); 863 BT::BitValue TV = R1[W1-1]; 864 if (TV.is(0) || TV.is(1)) 865 return rr0(eCLB(R1, TV, 32), Outputs); 866 break; 867 } 868 case S2_ct0: 869 case S2_ct0p: 870 return rr0(eCTB(rc(1), false/*bit*/, 32), Outputs); 871 case S2_ct1: 872 case S2_ct1p: 873 return rr0(eCTB(rc(1), true/*bit*/, 32), Outputs); 874 case S5_popcountp: 875 // TODO 876 break; 877 878 case C2_all8: { 879 RegisterCell P1 = rc(1); 880 bool Has0 = false, All1 = true; 881 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 882 if (!P1[i].is(1)) 883 All1 = false; 884 if (!P1[i].is(0)) 885 continue; 886 Has0 = true; 887 break; 888 } 889 if (!Has0 && !All1) 890 break; 891 RegisterCell RC(W0); 892 RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero)); 893 return rr0(RC, Outputs); 894 } 895 case C2_any8: { 896 RegisterCell P1 = rc(1); 897 bool Has1 = false, All0 = true; 898 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 899 if (!P1[i].is(0)) 900 All0 = false; 901 if (!P1[i].is(1)) 902 continue; 903 Has1 = true; 904 break; 905 } 906 if (!Has1 && !All0) 907 break; 908 RegisterCell RC(W0); 909 RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero)); 910 return rr0(RC, Outputs); 911 } 912 case C2_and: 913 return rr0(eAND(rc(1), rc(2)), Outputs); 914 case C2_andn: 915 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 916 case C2_not: 917 return rr0(eNOT(rc(1)), Outputs); 918 case C2_or: 919 return rr0(eORL(rc(1), rc(2)), Outputs); 920 case C2_orn: 921 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 922 case C2_xor: 923 return rr0(eXOR(rc(1), rc(2)), Outputs); 924 case C4_and_and: 925 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 926 case C4_and_andn: 927 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 928 case C4_and_or: 929 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 930 case C4_and_orn: 931 return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 932 case C4_or_and: 933 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 934 case C4_or_andn: 935 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 936 case C4_or_or: 937 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 938 case C4_or_orn: 939 return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 940 case C2_bitsclr: 941 case C2_bitsclri: 942 case C2_bitsset: 943 case C4_nbitsclr: 944 case C4_nbitsclri: 945 case C4_nbitsset: 946 // TODO 947 break; 948 case S2_tstbit_i: 949 case S4_ntstbit_i: { 950 BT::BitValue V = rc(1)[im(2)]; 951 if (V.is(0) || V.is(1)) { 952 // If instruction is S2_tstbit_i, test for 1, otherwise test for 0. 953 bool TV = (Opc == S2_tstbit_i); 954 BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero; 955 return rr0(RegisterCell(W0).fill(0, W0, F), Outputs); 956 } 957 break; 958 } 959 960 default: 961 // For instructions that define a single predicate registers, store 962 // the low 8 bits of the register only. 963 if (unsigned DefR = getUniqueDefVReg(MI)) { 964 if (MRI.getRegClass(DefR) == &Hexagon::PredRegsRegClass) { 965 BT::RegisterRef PD(DefR, 0); 966 uint16_t RW = getRegBitWidth(PD); 967 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 968 RegisterCell RC = RegisterCell::self(DefR, RW); 969 RC.fill(PW, RW, BT::BitValue::Zero); 970 putCell(PD, RC, Outputs); 971 return true; 972 } 973 } 974 return MachineEvaluator::evaluate(MI, Inputs, Outputs); 975 } 976 #undef im 977 #undef rc 978 #undef op 979 return false; 980 } 981 982 bool HexagonEvaluator::evaluate(const MachineInstr &BI, 983 const CellMapType &Inputs, 984 BranchTargetList &Targets, 985 bool &FallsThru) const { 986 // We need to evaluate one branch at a time. TII::analyzeBranch checks 987 // all the branches in a basic block at once, so we cannot use it. 988 unsigned Opc = BI.getOpcode(); 989 bool SimpleBranch = false; 990 bool Negated = false; 991 switch (Opc) { 992 case Hexagon::J2_jumpf: 993 case Hexagon::J2_jumpfpt: 994 case Hexagon::J2_jumpfnew: 995 case Hexagon::J2_jumpfnewpt: 996 Negated = true; 997 LLVM_FALLTHROUGH; 998 case Hexagon::J2_jumpt: 999 case Hexagon::J2_jumptpt: 1000 case Hexagon::J2_jumptnew: 1001 case Hexagon::J2_jumptnewpt: 1002 // Simple branch: if([!]Pn) jump ... 1003 // i.e. Op0 = predicate, Op1 = branch target. 1004 SimpleBranch = true; 1005 break; 1006 case Hexagon::J2_jump: 1007 Targets.insert(BI.getOperand(0).getMBB()); 1008 FallsThru = false; 1009 return true; 1010 default: 1011 // If the branch is of unknown type, assume that all successors are 1012 // executable. 1013 return false; 1014 } 1015 1016 if (!SimpleBranch) 1017 return false; 1018 1019 // BI is a conditional branch if we got here. 1020 RegisterRef PR = BI.getOperand(0); 1021 RegisterCell PC = getCell(PR, Inputs); 1022 const BT::BitValue &Test = PC[0]; 1023 1024 // If the condition is neither true nor false, then it's unknown. 1025 if (!Test.is(0) && !Test.is(1)) 1026 return false; 1027 1028 // "Test.is(!Negated)" means "branch condition is true". 1029 if (!Test.is(!Negated)) { 1030 // Condition known to be false. 1031 FallsThru = true; 1032 return true; 1033 } 1034 1035 Targets.insert(BI.getOperand(1).getMBB()); 1036 FallsThru = false; 1037 return true; 1038 } 1039 1040 unsigned HexagonEvaluator::getUniqueDefVReg(const MachineInstr &MI) const { 1041 unsigned DefReg = 0; 1042 for (const MachineOperand &Op : MI.operands()) { 1043 if (!Op.isReg() || !Op.isDef()) 1044 continue; 1045 Register R = Op.getReg(); 1046 if (!Register::isVirtualRegister(R)) 1047 continue; 1048 if (DefReg != 0) 1049 return 0; 1050 DefReg = R; 1051 } 1052 return DefReg; 1053 } 1054 1055 bool HexagonEvaluator::evaluateLoad(const MachineInstr &MI, 1056 const CellMapType &Inputs, 1057 CellMapType &Outputs) const { 1058 using namespace Hexagon; 1059 1060 if (TII.isPredicated(MI)) 1061 return false; 1062 assert(MI.mayLoad() && "A load that mayn't?"); 1063 unsigned Opc = MI.getOpcode(); 1064 1065 uint16_t BitNum; 1066 bool SignEx; 1067 1068 switch (Opc) { 1069 default: 1070 return false; 1071 1072 #if 0 1073 // memb_fifo 1074 case L2_loadalignb_pbr: 1075 case L2_loadalignb_pcr: 1076 case L2_loadalignb_pi: 1077 // memh_fifo 1078 case L2_loadalignh_pbr: 1079 case L2_loadalignh_pcr: 1080 case L2_loadalignh_pi: 1081 // membh 1082 case L2_loadbsw2_pbr: 1083 case L2_loadbsw2_pci: 1084 case L2_loadbsw2_pcr: 1085 case L2_loadbsw2_pi: 1086 case L2_loadbsw4_pbr: 1087 case L2_loadbsw4_pci: 1088 case L2_loadbsw4_pcr: 1089 case L2_loadbsw4_pi: 1090 // memubh 1091 case L2_loadbzw2_pbr: 1092 case L2_loadbzw2_pci: 1093 case L2_loadbzw2_pcr: 1094 case L2_loadbzw2_pi: 1095 case L2_loadbzw4_pbr: 1096 case L2_loadbzw4_pci: 1097 case L2_loadbzw4_pcr: 1098 case L2_loadbzw4_pi: 1099 #endif 1100 1101 case L2_loadrbgp: 1102 case L2_loadrb_io: 1103 case L2_loadrb_pbr: 1104 case L2_loadrb_pci: 1105 case L2_loadrb_pcr: 1106 case L2_loadrb_pi: 1107 case PS_loadrbabs: 1108 case L4_loadrb_ap: 1109 case L4_loadrb_rr: 1110 case L4_loadrb_ur: 1111 BitNum = 8; 1112 SignEx = true; 1113 break; 1114 1115 case L2_loadrubgp: 1116 case L2_loadrub_io: 1117 case L2_loadrub_pbr: 1118 case L2_loadrub_pci: 1119 case L2_loadrub_pcr: 1120 case L2_loadrub_pi: 1121 case PS_loadrubabs: 1122 case L4_loadrub_ap: 1123 case L4_loadrub_rr: 1124 case L4_loadrub_ur: 1125 BitNum = 8; 1126 SignEx = false; 1127 break; 1128 1129 case L2_loadrhgp: 1130 case L2_loadrh_io: 1131 case L2_loadrh_pbr: 1132 case L2_loadrh_pci: 1133 case L2_loadrh_pcr: 1134 case L2_loadrh_pi: 1135 case PS_loadrhabs: 1136 case L4_loadrh_ap: 1137 case L4_loadrh_rr: 1138 case L4_loadrh_ur: 1139 BitNum = 16; 1140 SignEx = true; 1141 break; 1142 1143 case L2_loadruhgp: 1144 case L2_loadruh_io: 1145 case L2_loadruh_pbr: 1146 case L2_loadruh_pci: 1147 case L2_loadruh_pcr: 1148 case L2_loadruh_pi: 1149 case L4_loadruh_rr: 1150 case PS_loadruhabs: 1151 case L4_loadruh_ap: 1152 case L4_loadruh_ur: 1153 BitNum = 16; 1154 SignEx = false; 1155 break; 1156 1157 case L2_loadrigp: 1158 case L2_loadri_io: 1159 case L2_loadri_pbr: 1160 case L2_loadri_pci: 1161 case L2_loadri_pcr: 1162 case L2_loadri_pi: 1163 case L2_loadw_locked: 1164 case PS_loadriabs: 1165 case L4_loadri_ap: 1166 case L4_loadri_rr: 1167 case L4_loadri_ur: 1168 case LDriw_pred: 1169 BitNum = 32; 1170 SignEx = true; 1171 break; 1172 1173 case L2_loadrdgp: 1174 case L2_loadrd_io: 1175 case L2_loadrd_pbr: 1176 case L2_loadrd_pci: 1177 case L2_loadrd_pcr: 1178 case L2_loadrd_pi: 1179 case L4_loadd_locked: 1180 case PS_loadrdabs: 1181 case L4_loadrd_ap: 1182 case L4_loadrd_rr: 1183 case L4_loadrd_ur: 1184 BitNum = 64; 1185 SignEx = true; 1186 break; 1187 } 1188 1189 const MachineOperand &MD = MI.getOperand(0); 1190 assert(MD.isReg() && MD.isDef()); 1191 RegisterRef RD = MD; 1192 1193 uint16_t W = getRegBitWidth(RD); 1194 assert(W >= BitNum && BitNum > 0); 1195 RegisterCell Res(W); 1196 1197 for (uint16_t i = 0; i < BitNum; ++i) 1198 Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i)); 1199 1200 if (SignEx) { 1201 const BT::BitValue &Sign = Res[BitNum-1]; 1202 for (uint16_t i = BitNum; i < W; ++i) 1203 Res[i] = BT::BitValue::ref(Sign); 1204 } else { 1205 for (uint16_t i = BitNum; i < W; ++i) 1206 Res[i] = BT::BitValue::Zero; 1207 } 1208 1209 putCell(RD, Res, Outputs); 1210 return true; 1211 } 1212 1213 bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr &MI, 1214 const CellMapType &Inputs, 1215 CellMapType &Outputs) const { 1216 // If MI defines a formal parameter, but is not a copy (loads are handled 1217 // in evaluateLoad), then it's not clear what to do. 1218 assert(MI.isCopy()); 1219 1220 RegisterRef RD = MI.getOperand(0); 1221 RegisterRef RS = MI.getOperand(1); 1222 assert(RD.Sub == 0); 1223 if (!Register::isPhysicalRegister(RS.Reg)) 1224 return false; 1225 RegExtMap::const_iterator F = VRX.find(RD.Reg); 1226 if (F == VRX.end()) 1227 return false; 1228 1229 uint16_t EW = F->second.Width; 1230 // Store RD's cell into the map. This will associate the cell with a virtual 1231 // register, and make zero-/sign-extends possible (otherwise we would be ex- 1232 // tending "self" bit values, which will have no effect, since "self" values 1233 // cannot be references to anything). 1234 putCell(RD, getCell(RS, Inputs), Outputs); 1235 1236 RegisterCell Res; 1237 // Read RD's cell from the outputs instead of RS's cell from the inputs: 1238 if (F->second.Type == ExtType::SExt) 1239 Res = eSXT(getCell(RD, Outputs), EW); 1240 else if (F->second.Type == ExtType::ZExt) 1241 Res = eZXT(getCell(RD, Outputs), EW); 1242 1243 putCell(RD, Res, Outputs); 1244 return true; 1245 } 1246 1247 unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const { 1248 using namespace Hexagon; 1249 1250 bool Is64 = DoubleRegsRegClass.contains(PReg); 1251 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg)); 1252 1253 static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 }; 1254 static const unsigned Phys64[] = { D0, D1, D2 }; 1255 const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned); 1256 const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned); 1257 1258 // Return the first parameter register of the required width. 1259 if (PReg == 0) 1260 return (Width <= 32) ? Phys32[0] : Phys64[0]; 1261 1262 // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the 1263 // next register. 1264 unsigned Idx32 = 0, Idx64 = 0; 1265 if (!Is64) { 1266 while (Idx32 < Num32) { 1267 if (Phys32[Idx32] == PReg) 1268 break; 1269 Idx32++; 1270 } 1271 Idx64 = Idx32/2; 1272 } else { 1273 while (Idx64 < Num64) { 1274 if (Phys64[Idx64] == PReg) 1275 break; 1276 Idx64++; 1277 } 1278 Idx32 = Idx64*2+1; 1279 } 1280 1281 if (Width <= 32) 1282 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0; 1283 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0; 1284 } 1285 1286 unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const { 1287 for (std::pair<unsigned,unsigned> P : MRI.liveins()) 1288 if (P.first == PReg) 1289 return P.second; 1290 return 0; 1291 } 1292