Lines Matching +full:de +full:- +full:asserting

1 //====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
17 /// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
20 //===----------------------------------------------------------------------===//
63 #define PASS_KEY "x86-slh"
71 "Number of post-load register values hardened");
78 "x86-speculative-load-hardening",
83 PASS_KEY "-lfence",
90 PASS_KEY "-post-load",
97 PASS_KEY "-fence-call-and-ret",
103 PASS_KEY "-ip",
109 HardenLoads(PASS_KEY "-loads",
115 PASS_KEY "-indirect",
235 // don't know what layout-successor relationships the successor has and we in splitEdge()
241 assert(Br->getOperand(0).getMBB() == &Succ && in splitEdge()
243 Br->getOperand(0).setMBB(&NewMBB); in splitEdge()
264 TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc()); in splitEdge()
270 "A non-branch successor must have been a layout successor before " in splitEdge()
311 // Inherit live-ins from the successor in splitEdge()
323 /// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR
325 /// a single predecessor. This makes CFG-updating extremely complex, so here we
345 // that these are stored as a vector making this element-wise removal in canonicalizePHIOperands()
350 // removal algorithm here. There should be a better way, but the use-def in canonicalizePHIOperands()
411 TII = Subtarget->getInstrInfo(); in runOnMachineFunction()
412 TRI = Subtarget->getRegisterInfo(); in runOnMachineFunction()
414 // FIXME: Support for 32-bit. in runOnMachineFunction()
444 // The poison value is required to be an all-ones value for many aspects of in runOnMachineFunction()
446 const int PoisonVal = -1; in runOnMachineFunction()
447 PS->PoisonReg = MRI->createVirtualRegister(PS->RC); in runOnMachineFunction()
448 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg) in runOnMachineFunction()
453 // get a full fence-based mitigation, inject that fence. in runOnMachineFunction()
456 // incoming misspeculation from the caller. This helps two-fold: the caller in runOnMachineFunction()
461 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE)); in runOnMachineFunction()
476 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc); in runOnMachineFunction()
480 PS->InitialReg = MRI->createVirtualRegister(PS->RC); in runOnMachineFunction()
481 Register PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass); in runOnMachineFunction()
482 auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0), in runOnMachineFunction()
486 ZeroI->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr); in runOnMachineFunction()
487 assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() && in runOnMachineFunction()
489 ZeroEFLAGSDefOp->setIsDead(true); in runOnMachineFunction()
490 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::SUBREG_TO_REG), in runOnMachineFunction()
491 PS->InitialReg) in runOnMachineFunction()
504 PS->SSA.Initialize(PS->InitialReg); in runOnMachineFunction()
505 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg); in runOnMachineFunction()
512 // re-capture the predicate state from the throwing code. In the Itanium ABI, in runOnMachineFunction()
516 // FIXME: Handle non-itanium ABI EH models. in runOnMachineFunction()
524 PS->SSA.AddAvailableValue( in runOnMachineFunction()
548 for (MachineOperand &Op : CMovI->operands()) { in runOnMachineFunction()
549 if (!Op.isReg() || Op.getReg() != PS->InitialReg) in runOnMachineFunction()
552 PS->SSA.RewriteUse(Op); in runOnMachineFunction()
561 /// potentially mis-predicted control flow construct.
565 /// practical for any real-world users.
579 if (TermIt == MBB.end() || !TermIt->isBranch()) in hardenEdgesWithLFENCE()
582 // Add all the non-EH-pad succossors to the blocks we want to harden. We in hardenEdgesWithLFENCE()
586 if (!SuccMBB->isEHPad()) in hardenEdgesWithLFENCE()
591 auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin()); in hardenEdgesWithLFENCE()
592 BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE)); in hardenEdgesWithLFENCE()
614 // any unconditional non-indirect branch, and track all conditional edges in collectBlockCondInfo()
617 // condition code in order to inject a "no-op" cmov into that successor in collectBlockCondInfo()
638 // If we see a non-branch terminator, we can't handle anything so bail. in collectBlockCondInfo()
708 // Compute the non-conditional successor as either the target of any in tracePredStateThroughCFG()
711 UncondBr ? (UncondBr->getOpcode() == X86::JMP_1 in tracePredStateThroughCFG()
712 ? UncondBr->getOperand(0).getMBB() in tracePredStateThroughCFG()
721 ++SuccCounts[CondBr->getOperand(0).getMBB()]; in tracePredStateThroughCFG()
742 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) && in tracePredStateThroughCFG()
748 unsigned CurStateReg = PS->InitialReg; in tracePredStateThroughCFG()
751 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8; in tracePredStateThroughCFG()
754 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC); in tracePredStateThroughCFG()
758 TII->get(CMovOp), UpdatedStateReg) in tracePredStateThroughCFG()
760 .addReg(PS->PoisonReg) in tracePredStateThroughCFG()
763 // live-in, mark them as killed. in tracePredStateThroughCFG()
765 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr) in tracePredStateThroughCFG()
766 ->setIsKill(true); in tracePredStateThroughCFG()
769 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); in tracePredStateThroughCFG()
774 if (CurStateReg == PS->InitialReg) in tracePredStateThroughCFG()
783 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg); in tracePredStateThroughCFG()
788 MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB(); in tracePredStateThroughCFG()
802 --SuccCount; in tracePredStateThroughCFG()
909 Register Reg = MRI->createVirtualRegister(UnfoldedRC); in unfoldCallAndJumpLoads()
914 TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true, in unfoldCallAndJumpLoads()
931 NewMI->dump(); in unfoldCallAndJumpLoads()
954 /// blocks speculation. This mitigation can replace these retpoline-style
965 // this avoids us having to re-implement the PHI construction logic. in tracePredStateThroughIndirectBranches()
967 TargetAddrSSA.Initialize(MRI->createVirtualRegister(&X86::GR64RegClass)); in tracePredStateThroughIndirectBranches()
982 while (MII != MBB.instr_rend() && MII->isDebugInstr()) in tracePredStateThroughIndirectBranches()
988 // No terminator or non-branch terminator. in tracePredStateThroughIndirectBranches()
1016 "Support for 16-bit indirect branches is not implemented."); in tracePredStateThroughIndirectBranches()
1019 "Support for 32-bit indirect branches is not implemented."); in tracePredStateThroughIndirectBranches()
1067 // than asserting. in tracePredStateThroughIndirectBranches()
1074 // reaching here, and the inserted block will handle the EFLAGS-based in tracePredStateThroughIndirectBranches()
1077 "Cannot check within a block that already has live-in EFLAGS!"); in tracePredStateThroughIndirectBranches()
1079 // We can't handle having non-indirect edges into this block unless this is in tracePredStateThroughIndirectBranches()
1092 if (!llvm::all_of(Pred->successors(), [&](MachineBasicBlock *Succ) { in tracePredStateThroughIndirectBranches()
1093 return Succ->isEHPad() || Succ == &MBB; in tracePredStateThroughIndirectBranches()
1098 Pred->dump(); in tracePredStateThroughIndirectBranches()
1108 auto InsertPt = Pred->getFirstTerminator(); in tracePredStateThroughIndirectBranches()
1109 Register TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass); in tracePredStateThroughIndirectBranches()
1111 !Subtarget->isPositionIndependent()) { in tracePredStateThroughIndirectBranches()
1114 TII->get(X86::MOV64ri32), TargetReg) in tracePredStateThroughIndirectBranches()
1118 LLVM_DEBUG(dbgs() << " Inserting mov: "; AddrI->dump(); in tracePredStateThroughIndirectBranches()
1121 auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(), TII->get(X86::LEA64r), in tracePredStateThroughIndirectBranches()
1130 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump(); in tracePredStateThroughIndirectBranches()
1150 !Subtarget->isPositionIndependent()) { in tracePredStateThroughIndirectBranches()
1152 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64ri32)) in tracePredStateThroughIndirectBranches()
1157 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n"); in tracePredStateThroughIndirectBranches()
1160 Register AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass); in tracePredStateThroughIndirectBranches()
1162 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::LEA64r), AddrReg) in tracePredStateThroughIndirectBranches()
1170 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump(); dbgs() << "\n"); in tracePredStateThroughIndirectBranches()
1171 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr)) in tracePredStateThroughIndirectBranches()
1176 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n"); in tracePredStateThroughIndirectBranches()
1180 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8; in tracePredStateThroughIndirectBranches()
1182 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC); in tracePredStateThroughIndirectBranches()
1184 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg) in tracePredStateThroughIndirectBranches()
1185 .addReg(PS->InitialReg) in tracePredStateThroughIndirectBranches()
1186 .addReg(PS->PoisonReg) in tracePredStateThroughIndirectBranches()
1188 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr) in tracePredStateThroughIndirectBranches()
1189 ->setIsKill(true); in tracePredStateThroughIndirectBranches()
1191 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n"); in tracePredStateThroughIndirectBranches()
1196 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg); in tracePredStateThroughIndirectBranches()
1208 return !DefOp->isDead(); in isEFLAGSDefLive()
1216 // live-in, and then seeing if that def is in turn used. in isEFLAGSLive()
1221 if (DefOp->isDead()) in isEFLAGSLive()
1243 /// it through the instructions within each basic block, and for non-returning
1254 /// strategies may interact -- later hardening may change what strategy we wish
1274 // Track the set of load-dependent registers through the basic block. Because in tracePredStateThroughBlocksAndHarden()
1287 // as that often successfully re-uses hardened addresses and minimizes in tracePredStateThroughBlocksAndHarden()
1336 // If we have at least one (non-frame-index, non-RIP) register operand, in tracePredStateThroughBlocksAndHarden()
1337 // and neither operand is load-dependent, we need to check the load. in tracePredStateThroughBlocksAndHarden()
1357 // If post-load hardening is enabled, this load is compatible with in tracePredStateThroughBlocksAndHarden()
1358 // post-load hardening, and we aren't already going to harden one of the in tracePredStateThroughBlocksAndHarden()
1359 // address registers, queue it up to be hardened post-load. Notably, in tracePredStateThroughBlocksAndHarden()
1374 // operands as being address-hardened. in tracePredStateThroughBlocksAndHarden()
1386 // Now re-walk the instructions in the basic block, and apply whichever in tracePredStateThroughBlocksAndHarden()
1389 // which we will do post-load hardening and can defer it in certain in tracePredStateThroughBlocksAndHarden()
1413 assert(!MI.isCall() && "Must not try to post-load harden a call!"); in tracePredStateThroughBlocksAndHarden()
1415 // If this is a data-invariant load and there is no EFLAGS in tracePredStateThroughBlocksAndHarden()
1440 // Mark the resulting hardened register as such so we don't re-harden. in tracePredStateThroughBlocksAndHarden()
1481 // Currently, we only track data-dependent loads within a basic block. in tracePredStateThroughBlocksAndHarden()
1497 // FIXME: Hard coding this to a 32-bit register class seems weird, but matches in saveEFLAGS()
1499 Register Reg = MRI->createVirtualRegister(&X86::GR32RegClass); in saveEFLAGS()
1502 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS); in saveEFLAGS()
1515 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg); in restoreEFLAGS()
1521 /// a way that won't form non-canonical pointers and also will be preserved
1526 Register TmpReg = MRI->createVirtualRegister(PS->RC); in mergePredStateIntoSP()
1528 // to stay canonical on 64-bit. We should compute this somehow and support in mergePredStateIntoSP()
1529 // 32-bit as part of that. in mergePredStateIntoSP()
1530 auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg) in mergePredStateIntoSP()
1533 ShiftI->addRegisterDead(X86::EFLAGS, TRI); in mergePredStateIntoSP()
1535 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP) in mergePredStateIntoSP()
1538 OrI->addRegisterDead(X86::EFLAGS, TRI); in mergePredStateIntoSP()
1546 Register PredStateReg = MRI->createVirtualRegister(PS->RC); in extractPredStateFromSP()
1547 Register TmpReg = MRI->createVirtualRegister(PS->RC); in extractPredStateFromSP()
1552 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg) in extractPredStateFromSP()
1555 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg) in extractPredStateFromSP()
1557 .addImm(TRI->getRegSizeInBits(*PS->RC) - 1); in extractPredStateFromSP()
1558 ShiftI->addRegisterDead(X86::EFLAGS, TRI); in extractPredStateFromSP()
1571 // live-in, and then seeing if that def is in turn used. in hardenLoadAddr()
1592 // For both RIP-relative addressed loads or absolute loads, we cannot in hardenLoadAddr()
1597 // dynamic address being the base plus -1 because we can't mutate the in hardenLoadAddr()
1598 // segment register here. This allows the signed 32-bit offset to point at in hardenLoadAddr()
1599 // valid segment-relative addresses and load them successfully. in hardenLoadAddr()
1602 << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base") in hardenLoadAddr()
1612 HardenOpRegs.front()->getReg() != IndexMO.getReg())) in hardenLoadAddr()
1618 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) && in hardenLoadAddr()
1624 auto It = AddrRegToHardenedReg.find(Op->getReg()); in hardenLoadAddr()
1630 Op->setReg(It->second); in hardenLoadAddr()
1638 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB); in hardenLoadAddr()
1646 if (EFLAGSLive && !Subtarget->hasBMI2()) { in hardenLoadAddr()
1652 Register OpReg = Op->getReg(); in hardenLoadAddr()
1653 auto *OpRC = MRI->getRegClass(OpReg); in hardenLoadAddr()
1654 Register TmpReg = MRI->createVirtualRegister(OpRC); in hardenLoadAddr()
1658 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) || in hardenLoadAddr()
1659 OpRC->hasSuperClassEq(&X86::VR256RegClass))) { in hardenLoadAddr()
1660 assert(Subtarget->hasAVX2() && "AVX2-specific register classes!"); in hardenLoadAddr()
1661 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass); in hardenLoadAddr()
1664 // FIXME: We could skip this at the cost of longer encodings with AVX-512 in hardenLoadAddr()
1666 Register VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass); in hardenLoadAddr()
1668 BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg) in hardenLoadAddr()
1672 LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n"); in hardenLoadAddr()
1675 Register VBStateReg = MRI->createVirtualRegister(OpRC); in hardenLoadAddr()
1677 TII->get(Is128Bit ? X86::VPBROADCASTQrr in hardenLoadAddr()
1683 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump(); in hardenLoadAddr()
1689 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg) in hardenLoadAddr()
1694 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); in hardenLoadAddr()
1695 } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) || in hardenLoadAddr()
1696 OpRC->hasSuperClassEq(&X86::VR256XRegClass) || in hardenLoadAddr()
1697 OpRC->hasSuperClassEq(&X86::VR512RegClass)) { in hardenLoadAddr()
1698 assert(Subtarget->hasAVX512() && "AVX512-specific register classes!"); in hardenLoadAddr()
1699 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass); in hardenLoadAddr()
1700 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass); in hardenLoadAddr()
1702 assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!"); in hardenLoadAddr()
1705 Register VStateReg = MRI->createVirtualRegister(OpRC); in hardenLoadAddr()
1710 BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg) in hardenLoadAddr()
1714 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump(); in hardenLoadAddr()
1720 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg) in hardenLoadAddr()
1725 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); in hardenLoadAddr()
1727 // FIXME: Need to support GR32 here for 32-bit code. in hardenLoadAddr()
1728 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) && in hardenLoadAddr()
1733 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg) in hardenLoadAddr()
1736 OrI->addRegisterDead(X86::EFLAGS, TRI); in hardenLoadAddr()
1738 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); in hardenLoadAddr()
1743 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg) in hardenLoadAddr()
1748 LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump(); in hardenLoadAddr()
1754 assert(!AddrRegToHardenedReg.count(Op->getReg()) && in hardenLoadAddr()
1756 AddrRegToHardenedReg[Op->getReg()] = TmpReg; in hardenLoadAddr()
1757 Op->setReg(TmpReg); in hardenLoadAddr()
1769 "Cannot get here with a non-invariant load!"); in sinkPostLoadHardenedInst()
1776 [&](MachineInstr &MI) -> std::optional<MachineInstr *> { in sinkPostLoadHardenedInst()
1783 for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) { in sinkPostLoadHardenedInst()
1788 // If we've already decided to harden a non-load, we must have sunk in sinkPostLoadHardenedInst()
1789 // some other post-load hardened instruction to it and it must itself in sinkPostLoadHardenedInst()
1790 // be data-invariant. in sinkPostLoadHardenedInst()
1861 auto *RC = MRI->getRegClass(Reg); in canHardenRegister()
1862 int RegBytes = TRI->getRegSizeInBits(*RC) / 8; in canHardenRegister()
1864 // We don't support post-load hardening of vectors. in canHardenRegister()
1874 // end up both with a NOREX and REX-only register as operands to the hardening in canHardenRegister()
1886 return RC->hasSuperClassEq(GPRRegClasses[RegIdx]); in canHardenRegister()
1891 /// This is the low-level logic to fully harden a value sitting in a register
1908 auto *RC = MRI->getRegClass(Reg); in hardenValueInRegister()
1909 int Bytes = TRI->getRegSizeInBits(*RC) / 8; in hardenValueInRegister()
1910 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB); in hardenValueInRegister()
1914 // FIXME: Need to teach this about 32-bit mode. in hardenValueInRegister()
1918 Register NarrowStateReg = MRI->createVirtualRegister(RC); in hardenValueInRegister()
1919 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg) in hardenValueInRegister()
1928 Register NewReg = MRI->createVirtualRegister(RC); in hardenValueInRegister()
1931 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg) in hardenValueInRegister()
1934 OrI->addRegisterDead(X86::EFLAGS, TRI); in hardenValueInRegister()
1936 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); in hardenValueInRegister()
1946 /// We can harden a non-leaking load into a register without touching the
1959 auto *DefRC = MRI->getRegClass(OldDefReg); in hardenPostLoad()
1964 Register UnhardenedReg = MRI->createVirtualRegister(DefRC); in hardenPostLoad()
1975 MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg); in hardenPostLoad()
2001 /// speculatively even during a BCBS-attacked return until the steering takes
2017 mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB)); in hardenReturnInstr()
2039 /// https://christian-rossow.de/publications/ret2spec-ccs2018.pdf
2043 /// advantage of the red-zone to load the return address from `8(%rsp)` where it
2067 BuildMI(MBB, std::next(InsertPt), Loc, TII->get(X86::LFENCE)); in tracePredStateThroughCall()
2075 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB); in tracePredStateThroughCall()
2099 if (!Subtarget->getFrameLowering()->has128ByteRedZone(MF) || in tracePredStateThroughCall()
2107 // when a callee-saved register is used and the callee doesn't push it onto in tracePredStateThroughCall()
2119 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC); in tracePredStateThroughCall()
2121 !Subtarget->isPositionIndependent()) { in tracePredStateThroughCall()
2122 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64ri32), ExpectedRetAddrReg) in tracePredStateThroughCall()
2125 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ExpectedRetAddrReg) in tracePredStateThroughCall()
2137 // If we didn't pre-compute the expected return address into a register, then in tracePredStateThroughCall()
2142 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC); in tracePredStateThroughCall()
2143 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64rm), ExpectedRetAddrReg) in tracePredStateThroughCall()
2147 .addImm(/*Displacement*/ -8) // The stack pointer has been popped, so in tracePredStateThroughCall()
2148 // the return address is 8-bytes past it. in tracePredStateThroughCall()
2159 !Subtarget->isPositionIndependent()) { in tracePredStateThroughCall()
2162 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64ri32)) in tracePredStateThroughCall()
2166 Register ActualRetAddrReg = MRI->createVirtualRegister(AddrRC); in tracePredStateThroughCall()
2167 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ActualRetAddrReg) in tracePredStateThroughCall()
2173 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64rr)) in tracePredStateThroughCall()
2180 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8; in tracePredStateThroughCall()
2183 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC); in tracePredStateThroughCall()
2184 auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg) in tracePredStateThroughCall()
2186 .addReg(PS->PoisonReg) in tracePredStateThroughCall()
2188 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true); in tracePredStateThroughCall()
2190 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n"); in tracePredStateThroughCall()
2192 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg); in tracePredStateThroughCall()
2204 /// definitively treated as needing post-load hardening. While address hardening
2208 /// have an opportunity to post-load harden here, we just need to scan for cases