1 //===- HexagonSubtarget.cpp - Hexagon Subtarget Information ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Hexagon specific subclass of TargetSubtarget. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Hexagon.h" 14 #include "HexagonInstrInfo.h" 15 #include "HexagonRegisterInfo.h" 16 #include "HexagonSubtarget.h" 17 #include "MCTargetDesc/HexagonMCTargetDesc.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/StringRef.h" 22 #include "llvm/CodeGen/MachineInstr.h" 23 #include "llvm/CodeGen/MachineOperand.h" 24 #include "llvm/CodeGen/MachineScheduler.h" 25 #include "llvm/CodeGen/ScheduleDAG.h" 26 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include <algorithm> 30 #include <cassert> 31 #include <map> 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "hexagon-subtarget" 36 37 #define GET_SUBTARGETINFO_CTOR 38 #define GET_SUBTARGETINFO_TARGET_DESC 39 #include "HexagonGenSubtargetInfo.inc" 40 41 42 static cl::opt<bool> EnableBSBSched("enable-bsb-sched", 43 cl::Hidden, cl::ZeroOrMore, cl::init(true)); 44 45 static cl::opt<bool> EnableTCLatencySched("enable-tc-latency-sched", 46 cl::Hidden, cl::ZeroOrMore, cl::init(false)); 47 48 static cl::opt<bool> EnableDotCurSched("enable-cur-sched", 49 cl::Hidden, cl::ZeroOrMore, cl::init(true), 50 cl::desc("Enable the scheduler to generate .cur")); 51 52 static cl::opt<bool> DisableHexagonMISched("disable-hexagon-misched", 53 cl::Hidden, cl::ZeroOrMore, cl::init(false), 54 cl::desc("Disable Hexagon MI Scheduling")); 55 56 static cl::opt<bool> EnableSubregLiveness("hexagon-subreg-liveness", 57 cl::Hidden, cl::ZeroOrMore, cl::init(true), 58 cl::desc("Enable subregister liveness tracking for Hexagon")); 59 60 static cl::opt<bool> OverrideLongCalls("hexagon-long-calls", 61 cl::Hidden, cl::ZeroOrMore, cl::init(false), 62 cl::desc("If present, forces/disables the use of long calls")); 63 64 static cl::opt<bool> EnablePredicatedCalls("hexagon-pred-calls", 65 cl::Hidden, cl::ZeroOrMore, cl::init(false), 66 cl::desc("Consider calls to be predicable")); 67 68 static cl::opt<bool> SchedPredsCloser("sched-preds-closer", 69 cl::Hidden, cl::ZeroOrMore, cl::init(true)); 70 71 static cl::opt<bool> SchedRetvalOptimization("sched-retval-optimization", 72 cl::Hidden, cl::ZeroOrMore, cl::init(true)); 73 74 static cl::opt<bool> EnableCheckBankConflict("hexagon-check-bank-conflict", 75 cl::Hidden, cl::ZeroOrMore, cl::init(true), 76 cl::desc("Enable checking for cache bank conflicts")); 77 78 79 HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU, 80 StringRef FS, const TargetMachine &TM) 81 : HexagonGenSubtargetInfo(TT, CPU, FS), OptLevel(TM.getOptLevel()), 82 CPUString(Hexagon_MC::selectHexagonCPU(CPU)), 83 InstrInfo(initializeSubtargetDependencies(CPU, FS)), 84 RegInfo(getHwMode()), TLInfo(TM, *this), 85 InstrItins(getInstrItineraryForCPU(CPUString)) { 86 // Beware of the default constructor of InstrItineraryData: it will 87 // reset all members to 0. 88 assert(InstrItins.Itineraries != nullptr && "InstrItins not initialized"); 89 } 90 91 HexagonSubtarget & 92 HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { 93 static std::map<StringRef, Hexagon::ArchEnum> CpuTable{ 94 {"generic", Hexagon::ArchEnum::V60}, 95 {"hexagonv5", Hexagon::ArchEnum::V5}, 96 {"hexagonv55", Hexagon::ArchEnum::V55}, 97 {"hexagonv60", Hexagon::ArchEnum::V60}, 98 {"hexagonv62", Hexagon::ArchEnum::V62}, 99 {"hexagonv65", Hexagon::ArchEnum::V65}, 100 {"hexagonv66", Hexagon::ArchEnum::V66}, 101 }; 102 103 auto FoundIt = CpuTable.find(CPUString); 104 if (FoundIt != CpuTable.end()) 105 HexagonArchVersion = FoundIt->second; 106 else 107 llvm_unreachable("Unrecognized Hexagon processor version"); 108 109 UseHVX128BOps = false; 110 UseHVX64BOps = false; 111 UseLongCalls = false; 112 113 UseBSBScheduling = hasV60Ops() && EnableBSBSched; 114 115 ParseSubtargetFeatures(CPUString, FS); 116 117 if (OverrideLongCalls.getPosition()) 118 UseLongCalls = OverrideLongCalls; 119 120 FeatureBitset Features = getFeatureBits(); 121 if (HexagonDisableDuplex) 122 setFeatureBits(Features.reset(Hexagon::FeatureDuplex)); 123 setFeatureBits(Hexagon_MC::completeHVXFeatures(Features)); 124 125 return *this; 126 } 127 128 void HexagonSubtarget::UsrOverflowMutation::apply(ScheduleDAGInstrs *DAG) { 129 for (SUnit &SU : DAG->SUnits) { 130 if (!SU.isInstr()) 131 continue; 132 SmallVector<SDep, 4> Erase; 133 for (auto &D : SU.Preds) 134 if (D.getKind() == SDep::Output && D.getReg() == Hexagon::USR_OVF) 135 Erase.push_back(D); 136 for (auto &E : Erase) 137 SU.removePred(E); 138 } 139 } 140 141 void HexagonSubtarget::HVXMemLatencyMutation::apply(ScheduleDAGInstrs *DAG) { 142 for (SUnit &SU : DAG->SUnits) { 143 // Update the latency of chain edges between v60 vector load or store 144 // instructions to be 1. These instruction cannot be scheduled in the 145 // same packet. 146 MachineInstr &MI1 = *SU.getInstr(); 147 auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII); 148 bool IsStoreMI1 = MI1.mayStore(); 149 bool IsLoadMI1 = MI1.mayLoad(); 150 if (!QII->isHVXVec(MI1) || !(IsStoreMI1 || IsLoadMI1)) 151 continue; 152 for (SDep &SI : SU.Succs) { 153 if (SI.getKind() != SDep::Order || SI.getLatency() != 0) 154 continue; 155 MachineInstr &MI2 = *SI.getSUnit()->getInstr(); 156 if (!QII->isHVXVec(MI2)) 157 continue; 158 if ((IsStoreMI1 && MI2.mayStore()) || (IsLoadMI1 && MI2.mayLoad())) { 159 SI.setLatency(1); 160 SU.setHeightDirty(); 161 // Change the dependence in the opposite direction too. 162 for (SDep &PI : SI.getSUnit()->Preds) { 163 if (PI.getSUnit() != &SU || PI.getKind() != SDep::Order) 164 continue; 165 PI.setLatency(1); 166 SI.getSUnit()->setDepthDirty(); 167 } 168 } 169 } 170 } 171 } 172 173 // Check if a call and subsequent A2_tfrpi instructions should maintain 174 // scheduling affinity. We are looking for the TFRI to be consumed in 175 // the next instruction. This should help reduce the instances of 176 // double register pairs being allocated and scheduled before a call 177 // when not used until after the call. This situation is exacerbated 178 // by the fact that we allocate the pair from the callee saves list, 179 // leading to excess spills and restores. 180 bool HexagonSubtarget::CallMutation::shouldTFRICallBind( 181 const HexagonInstrInfo &HII, const SUnit &Inst1, 182 const SUnit &Inst2) const { 183 if (Inst1.getInstr()->getOpcode() != Hexagon::A2_tfrpi) 184 return false; 185 186 // TypeXTYPE are 64 bit operations. 187 unsigned Type = HII.getType(*Inst2.getInstr()); 188 return Type == HexagonII::TypeS_2op || Type == HexagonII::TypeS_3op || 189 Type == HexagonII::TypeALU64 || Type == HexagonII::TypeM; 190 } 191 192 void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAGInstrs) { 193 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 194 SUnit* LastSequentialCall = nullptr; 195 // Map from virtual register to physical register from the copy. 196 DenseMap<unsigned, unsigned> VRegHoldingReg; 197 // Map from the physical register to the instruction that uses virtual 198 // register. This is used to create the barrier edge. 199 DenseMap<unsigned, SUnit *> LastVRegUse; 200 auto &TRI = *DAG->MF.getSubtarget().getRegisterInfo(); 201 auto &HII = *DAG->MF.getSubtarget<HexagonSubtarget>().getInstrInfo(); 202 203 // Currently we only catch the situation when compare gets scheduled 204 // before preceding call. 205 for (unsigned su = 0, e = DAG->SUnits.size(); su != e; ++su) { 206 // Remember the call. 207 if (DAG->SUnits[su].getInstr()->isCall()) 208 LastSequentialCall = &DAG->SUnits[su]; 209 // Look for a compare that defines a predicate. 210 else if (DAG->SUnits[su].getInstr()->isCompare() && LastSequentialCall) 211 DAG->addEdge(&DAG->SUnits[su], SDep(LastSequentialCall, SDep::Barrier)); 212 // Look for call and tfri* instructions. 213 else if (SchedPredsCloser && LastSequentialCall && su > 1 && su < e-1 && 214 shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1])) 215 DAG->addEdge(&DAG->SUnits[su], SDep(&DAG->SUnits[su-1], SDep::Barrier)); 216 // Prevent redundant register copies due to reads and writes of physical 217 // registers. The original motivation for this was the code generated 218 // between two calls, which are caused both the return value and the 219 // argument for the next call being in %r0. 220 // Example: 221 // 1: <call1> 222 // 2: %vreg = COPY %r0 223 // 3: <use of %vreg> 224 // 4: %r0 = ... 225 // 5: <call2> 226 // The scheduler would often swap 3 and 4, so an additional register is 227 // needed. This code inserts a Barrier dependence between 3 & 4 to prevent 228 // this. 229 // The code below checks for all the physical registers, not just R0/D0/V0. 230 else if (SchedRetvalOptimization) { 231 const MachineInstr *MI = DAG->SUnits[su].getInstr(); 232 if (MI->isCopy() && 233 Register::isPhysicalRegister(MI->getOperand(1).getReg())) { 234 // %vregX = COPY %r0 235 VRegHoldingReg[MI->getOperand(0).getReg()] = MI->getOperand(1).getReg(); 236 LastVRegUse.erase(MI->getOperand(1).getReg()); 237 } else { 238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 239 const MachineOperand &MO = MI->getOperand(i); 240 if (!MO.isReg()) 241 continue; 242 if (MO.isUse() && !MI->isCopy() && 243 VRegHoldingReg.count(MO.getReg())) { 244 // <use of %vregX> 245 LastVRegUse[VRegHoldingReg[MO.getReg()]] = &DAG->SUnits[su]; 246 } else if (MO.isDef() && Register::isPhysicalRegister(MO.getReg())) { 247 for (MCRegAliasIterator AI(MO.getReg(), &TRI, true); AI.isValid(); 248 ++AI) { 249 if (LastVRegUse.count(*AI) && 250 LastVRegUse[*AI] != &DAG->SUnits[su]) 251 // %r0 = ... 252 DAG->addEdge(&DAG->SUnits[su], SDep(LastVRegUse[*AI], SDep::Barrier)); 253 LastVRegUse.erase(*AI); 254 } 255 } 256 } 257 } 258 } 259 } 260 } 261 262 void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) { 263 if (!EnableCheckBankConflict) 264 return; 265 266 const auto &HII = static_cast<const HexagonInstrInfo&>(*DAG->TII); 267 268 // Create artificial edges between loads that could likely cause a bank 269 // conflict. Since such loads would normally not have any dependency 270 // between them, we cannot rely on existing edges. 271 for (unsigned i = 0, e = DAG->SUnits.size(); i != e; ++i) { 272 SUnit &S0 = DAG->SUnits[i]; 273 MachineInstr &L0 = *S0.getInstr(); 274 if (!L0.mayLoad() || L0.mayStore() || 275 HII.getAddrMode(L0) != HexagonII::BaseImmOffset) 276 continue; 277 int64_t Offset0; 278 unsigned Size0; 279 MachineOperand *BaseOp0 = HII.getBaseAndOffset(L0, Offset0, Size0); 280 // Is the access size is longer than the L1 cache line, skip the check. 281 if (BaseOp0 == nullptr || !BaseOp0->isReg() || Size0 >= 32) 282 continue; 283 // Scan only up to 32 instructions ahead (to avoid n^2 complexity). 284 for (unsigned j = i+1, m = std::min(i+32, e); j != m; ++j) { 285 SUnit &S1 = DAG->SUnits[j]; 286 MachineInstr &L1 = *S1.getInstr(); 287 if (!L1.mayLoad() || L1.mayStore() || 288 HII.getAddrMode(L1) != HexagonII::BaseImmOffset) 289 continue; 290 int64_t Offset1; 291 unsigned Size1; 292 MachineOperand *BaseOp1 = HII.getBaseAndOffset(L1, Offset1, Size1); 293 if (BaseOp1 == nullptr || !BaseOp1->isReg() || Size1 >= 32 || 294 BaseOp0->getReg() != BaseOp1->getReg()) 295 continue; 296 // Check bits 3 and 4 of the offset: if they differ, a bank conflict 297 // is unlikely. 298 if (((Offset0 ^ Offset1) & 0x18) != 0) 299 continue; 300 // Bits 3 and 4 are the same, add an artificial edge and set extra 301 // latency. 302 SDep A(&S0, SDep::Artificial); 303 A.setLatency(1); 304 S1.addPred(A, true); 305 } 306 } 307 } 308 309 /// Enable use of alias analysis during code generation (during MI 310 /// scheduling, DAGCombine, etc.). 311 bool HexagonSubtarget::useAA() const { 312 if (OptLevel != CodeGenOpt::None) 313 return true; 314 return false; 315 } 316 317 /// Perform target specific adjustments to the latency of a schedule 318 /// dependency. 319 void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst, 320 SDep &Dep) const { 321 MachineInstr *SrcInst = Src->getInstr(); 322 MachineInstr *DstInst = Dst->getInstr(); 323 if (!Src->isInstr() || !Dst->isInstr()) 324 return; 325 326 const HexagonInstrInfo *QII = getInstrInfo(); 327 328 // Instructions with .new operands have zero latency. 329 SmallSet<SUnit *, 4> ExclSrc; 330 SmallSet<SUnit *, 4> ExclDst; 331 if (QII->canExecuteInBundle(*SrcInst, *DstInst) && 332 isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) { 333 Dep.setLatency(0); 334 return; 335 } 336 337 if (!hasV60Ops()) 338 return; 339 340 // Set the latency for a copy to zero since we hope that is will get removed. 341 if (DstInst->isCopy()) 342 Dep.setLatency(0); 343 344 // If it's a REG_SEQUENCE/COPY, use its destination instruction to determine 345 // the correct latency. 346 if ((DstInst->isRegSequence() || DstInst->isCopy()) && Dst->NumSuccs == 1) { 347 Register DReg = DstInst->getOperand(0).getReg(); 348 MachineInstr *DDst = Dst->Succs[0].getSUnit()->getInstr(); 349 unsigned UseIdx = -1; 350 for (unsigned OpNum = 0; OpNum < DDst->getNumOperands(); OpNum++) { 351 const MachineOperand &MO = DDst->getOperand(OpNum); 352 if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == DReg) { 353 UseIdx = OpNum; 354 break; 355 } 356 } 357 int DLatency = (InstrInfo.getOperandLatency(&InstrItins, *SrcInst, 358 0, *DDst, UseIdx)); 359 DLatency = std::max(DLatency, 0); 360 Dep.setLatency((unsigned)DLatency); 361 } 362 363 // Try to schedule uses near definitions to generate .cur. 364 ExclSrc.clear(); 365 ExclDst.clear(); 366 if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) && 367 isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) { 368 Dep.setLatency(0); 369 return; 370 } 371 372 updateLatency(*SrcInst, *DstInst, Dep); 373 } 374 375 void HexagonSubtarget::getPostRAMutations( 376 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const { 377 Mutations.push_back(std::make_unique<UsrOverflowMutation>()); 378 Mutations.push_back(std::make_unique<HVXMemLatencyMutation>()); 379 Mutations.push_back(std::make_unique<BankConflictMutation>()); 380 } 381 382 void HexagonSubtarget::getSMSMutations( 383 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const { 384 Mutations.push_back(std::make_unique<UsrOverflowMutation>()); 385 Mutations.push_back(std::make_unique<HVXMemLatencyMutation>()); 386 } 387 388 // Pin the vtable to this file. 389 void HexagonSubtarget::anchor() {} 390 391 bool HexagonSubtarget::enableMachineScheduler() const { 392 if (DisableHexagonMISched.getNumOccurrences()) 393 return !DisableHexagonMISched; 394 return true; 395 } 396 397 bool HexagonSubtarget::usePredicatedCalls() const { 398 return EnablePredicatedCalls; 399 } 400 401 void HexagonSubtarget::updateLatency(MachineInstr &SrcInst, 402 MachineInstr &DstInst, SDep &Dep) const { 403 if (Dep.isArtificial()) { 404 Dep.setLatency(1); 405 return; 406 } 407 408 if (!hasV60Ops()) 409 return; 410 411 auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo()); 412 413 // BSB scheduling. 414 if (QII.isHVXVec(SrcInst) || useBSBScheduling()) 415 Dep.setLatency((Dep.getLatency() + 1) >> 1); 416 } 417 418 void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const { 419 MachineInstr *SrcI = Src->getInstr(); 420 for (auto &I : Src->Succs) { 421 if (!I.isAssignedRegDep() || I.getSUnit() != Dst) 422 continue; 423 unsigned DepR = I.getReg(); 424 int DefIdx = -1; 425 for (unsigned OpNum = 0; OpNum < SrcI->getNumOperands(); OpNum++) { 426 const MachineOperand &MO = SrcI->getOperand(OpNum); 427 if (MO.isReg() && MO.isDef() && MO.getReg() == DepR) 428 DefIdx = OpNum; 429 } 430 assert(DefIdx >= 0 && "Def Reg not found in Src MI"); 431 MachineInstr *DstI = Dst->getInstr(); 432 SDep T = I; 433 for (unsigned OpNum = 0; OpNum < DstI->getNumOperands(); OpNum++) { 434 const MachineOperand &MO = DstI->getOperand(OpNum); 435 if (MO.isReg() && MO.isUse() && MO.getReg() == DepR) { 436 int Latency = (InstrInfo.getOperandLatency(&InstrItins, *SrcI, 437 DefIdx, *DstI, OpNum)); 438 439 // For some instructions (ex: COPY), we might end up with < 0 latency 440 // as they don't have any Itinerary class associated with them. 441 Latency = std::max(Latency, 0); 442 443 I.setLatency(Latency); 444 updateLatency(*SrcI, *DstI, I); 445 } 446 } 447 448 // Update the latency of opposite edge too. 449 T.setSUnit(Src); 450 auto F = std::find(Dst->Preds.begin(), Dst->Preds.end(), T); 451 assert(F != Dst->Preds.end()); 452 F->setLatency(I.getLatency()); 453 } 454 } 455 456 /// Change the latency between the two SUnits. 457 void HexagonSubtarget::changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) 458 const { 459 for (auto &I : Src->Succs) { 460 if (!I.isAssignedRegDep() || I.getSUnit() != Dst) 461 continue; 462 SDep T = I; 463 I.setLatency(Lat); 464 465 // Update the latency of opposite edge too. 466 T.setSUnit(Src); 467 auto F = std::find(Dst->Preds.begin(), Dst->Preds.end(), T); 468 assert(F != Dst->Preds.end()); 469 F->setLatency(Lat); 470 } 471 } 472 473 /// If the SUnit has a zero latency edge, return the other SUnit. 474 static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) { 475 for (auto &I : Deps) 476 if (I.isAssignedRegDep() && I.getLatency() == 0 && 477 !I.getSUnit()->getInstr()->isPseudo()) 478 return I.getSUnit(); 479 return nullptr; 480 } 481 482 // Return true if these are the best two instructions to schedule 483 // together with a zero latency. Only one dependence should have a zero 484 // latency. If there are multiple choices, choose the best, and change 485 // the others, if needed. 486 bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst, 487 const HexagonInstrInfo *TII, SmallSet<SUnit*, 4> &ExclSrc, 488 SmallSet<SUnit*, 4> &ExclDst) const { 489 MachineInstr &SrcInst = *Src->getInstr(); 490 MachineInstr &DstInst = *Dst->getInstr(); 491 492 // Ignore Boundary SU nodes as these have null instructions. 493 if (Dst->isBoundaryNode()) 494 return false; 495 496 if (SrcInst.isPHI() || DstInst.isPHI()) 497 return false; 498 499 if (!TII->isToBeScheduledASAP(SrcInst, DstInst) && 500 !TII->canExecuteInBundle(SrcInst, DstInst)) 501 return false; 502 503 // The architecture doesn't allow three dependent instructions in the same 504 // packet. So, if the destination has a zero latency successor, then it's 505 // not a candidate for a zero latency predecessor. 506 if (getZeroLatency(Dst, Dst->Succs) != nullptr) 507 return false; 508 509 // Check if the Dst instruction is the best candidate first. 510 SUnit *Best = nullptr; 511 SUnit *DstBest = nullptr; 512 SUnit *SrcBest = getZeroLatency(Dst, Dst->Preds); 513 if (SrcBest == nullptr || Src->NodeNum >= SrcBest->NodeNum) { 514 // Check that Src doesn't have a better candidate. 515 DstBest = getZeroLatency(Src, Src->Succs); 516 if (DstBest == nullptr || Dst->NodeNum <= DstBest->NodeNum) 517 Best = Dst; 518 } 519 if (Best != Dst) 520 return false; 521 522 // The caller frequently adds the same dependence twice. If so, then 523 // return true for this case too. 524 if ((Src == SrcBest && Dst == DstBest ) || 525 (SrcBest == nullptr && Dst == DstBest) || 526 (Src == SrcBest && Dst == nullptr)) 527 return true; 528 529 // Reassign the latency for the previous bests, which requires setting 530 // the dependence edge in both directions. 531 if (SrcBest != nullptr) { 532 if (!hasV60Ops()) 533 changeLatency(SrcBest, Dst, 1); 534 else 535 restoreLatency(SrcBest, Dst); 536 } 537 if (DstBest != nullptr) { 538 if (!hasV60Ops()) 539 changeLatency(Src, DstBest, 1); 540 else 541 restoreLatency(Src, DstBest); 542 } 543 544 // Attempt to find another opprotunity for zero latency in a different 545 // dependence. 546 if (SrcBest && DstBest) 547 // If there is an edge from SrcBest to DstBst, then try to change that 548 // to 0 now. 549 changeLatency(SrcBest, DstBest, 0); 550 else if (DstBest) { 551 // Check if the previous best destination instruction has a new zero 552 // latency dependence opportunity. 553 ExclSrc.insert(Src); 554 for (auto &I : DstBest->Preds) 555 if (ExclSrc.count(I.getSUnit()) == 0 && 556 isBestZeroLatency(I.getSUnit(), DstBest, TII, ExclSrc, ExclDst)) 557 changeLatency(I.getSUnit(), DstBest, 0); 558 } else if (SrcBest) { 559 // Check if previous best source instruction has a new zero latency 560 // dependence opportunity. 561 ExclDst.insert(Dst); 562 for (auto &I : SrcBest->Succs) 563 if (ExclDst.count(I.getSUnit()) == 0 && 564 isBestZeroLatency(SrcBest, I.getSUnit(), TII, ExclSrc, ExclDst)) 565 changeLatency(SrcBest, I.getSUnit(), 0); 566 } 567 568 return true; 569 } 570 571 unsigned HexagonSubtarget::getL1CacheLineSize() const { 572 return 32; 573 } 574 575 unsigned HexagonSubtarget::getL1PrefetchDistance() const { 576 return 32; 577 } 578 579 bool HexagonSubtarget::enableSubRegLiveness() const { 580 return EnableSubregLiveness; 581 } 582