1*0b57cec5SDimitry Andric //===- HexagonFrameLowering.cpp - Define frame lowering -------------------===// 2*0b57cec5SDimitry Andric // 3*0b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*0b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*0b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*0b57cec5SDimitry Andric // 7*0b57cec5SDimitry Andric // 8*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 9*0b57cec5SDimitry Andric 10*0b57cec5SDimitry Andric #include "HexagonFrameLowering.h" 11*0b57cec5SDimitry Andric #include "HexagonBlockRanges.h" 12*0b57cec5SDimitry Andric #include "HexagonInstrInfo.h" 13*0b57cec5SDimitry Andric #include "HexagonMachineFunctionInfo.h" 14*0b57cec5SDimitry Andric #include "HexagonRegisterInfo.h" 15*0b57cec5SDimitry Andric #include "HexagonSubtarget.h" 16*0b57cec5SDimitry Andric #include "HexagonTargetMachine.h" 17*0b57cec5SDimitry Andric #include "MCTargetDesc/HexagonBaseInfo.h" 18*0b57cec5SDimitry Andric #include "llvm/ADT/BitVector.h" 19*0b57cec5SDimitry Andric #include "llvm/ADT/DenseMap.h" 20*0b57cec5SDimitry Andric #include "llvm/ADT/None.h" 21*0b57cec5SDimitry Andric #include "llvm/ADT/Optional.h" 22*0b57cec5SDimitry Andric #include "llvm/ADT/PostOrderIterator.h" 23*0b57cec5SDimitry Andric #include "llvm/ADT/SetVector.h" 24*0b57cec5SDimitry Andric #include "llvm/ADT/SmallSet.h" 25*0b57cec5SDimitry Andric #include "llvm/ADT/SmallVector.h" 26*0b57cec5SDimitry Andric #include "llvm/CodeGen/LivePhysRegs.h" 27*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineBasicBlock.h" 28*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineDominators.h" 29*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h" 30*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunction.h" 31*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunctionPass.h" 32*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstr.h" 33*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstrBuilder.h" 34*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineMemOperand.h" 35*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineModuleInfo.h" 36*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineOperand.h" 37*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachinePostDominators.h" 38*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineRegisterInfo.h" 39*0b57cec5SDimitry Andric #include "llvm/CodeGen/RegisterScavenging.h" 40*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetRegisterInfo.h" 41*0b57cec5SDimitry Andric #include "llvm/IR/Attributes.h" 42*0b57cec5SDimitry Andric #include "llvm/IR/DebugLoc.h" 43*0b57cec5SDimitry Andric #include "llvm/IR/Function.h" 44*0b57cec5SDimitry Andric #include "llvm/MC/MCDwarf.h" 45*0b57cec5SDimitry Andric #include "llvm/MC/MCRegisterInfo.h" 46*0b57cec5SDimitry Andric #include "llvm/Pass.h" 47*0b57cec5SDimitry Andric #include "llvm/Support/CodeGen.h" 48*0b57cec5SDimitry Andric #include "llvm/Support/CommandLine.h" 49*0b57cec5SDimitry Andric #include "llvm/Support/Compiler.h" 50*0b57cec5SDimitry Andric #include "llvm/Support/Debug.h" 51*0b57cec5SDimitry Andric #include "llvm/Support/ErrorHandling.h" 52*0b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h" 53*0b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h" 54*0b57cec5SDimitry Andric #include "llvm/Target/TargetMachine.h" 55*0b57cec5SDimitry Andric #include "llvm/Target/TargetOptions.h" 56*0b57cec5SDimitry Andric #include <algorithm> 57*0b57cec5SDimitry Andric #include <cassert> 58*0b57cec5SDimitry Andric #include <cstdint> 59*0b57cec5SDimitry Andric #include <iterator> 60*0b57cec5SDimitry Andric #include <limits> 61*0b57cec5SDimitry Andric #include <map> 62*0b57cec5SDimitry Andric #include <utility> 63*0b57cec5SDimitry Andric #include <vector> 64*0b57cec5SDimitry Andric 65*0b57cec5SDimitry Andric #define DEBUG_TYPE "hexagon-pei" 66*0b57cec5SDimitry Andric 67*0b57cec5SDimitry Andric // Hexagon stack frame layout as defined by the ABI: 68*0b57cec5SDimitry Andric // 69*0b57cec5SDimitry Andric // Incoming arguments 70*0b57cec5SDimitry Andric // passed via stack 71*0b57cec5SDimitry Andric // | 72*0b57cec5SDimitry Andric // | 73*0b57cec5SDimitry Andric // SP during function's FP during function's | 74*0b57cec5SDimitry Andric // +-- runtime (top of stack) runtime (bottom) --+ | 75*0b57cec5SDimitry Andric // | | | 76*0b57cec5SDimitry Andric // --++---------------------+------------------+-----------------++-+------- 77*0b57cec5SDimitry Andric // | parameter area for | variable-size | fixed-size |LR| arg 78*0b57cec5SDimitry Andric // | called functions | local objects | local objects |FP| 79*0b57cec5SDimitry Andric // --+----------------------+------------------+-----------------+--+------- 80*0b57cec5SDimitry Andric // <- size known -> <- size unknown -> <- size known -> 81*0b57cec5SDimitry Andric // 82*0b57cec5SDimitry Andric // Low address High address 83*0b57cec5SDimitry Andric // 84*0b57cec5SDimitry Andric // <--- stack growth 85*0b57cec5SDimitry Andric // 86*0b57cec5SDimitry Andric // 87*0b57cec5SDimitry Andric // - In any circumstances, the outgoing function arguments are always accessi- 88*0b57cec5SDimitry Andric // ble using the SP, and the incoming arguments are accessible using the FP. 89*0b57cec5SDimitry Andric // - If the local objects are not aligned, they can always be accessed using 90*0b57cec5SDimitry Andric // the FP. 91*0b57cec5SDimitry Andric // - If there are no variable-sized objects, the local objects can always be 92*0b57cec5SDimitry Andric // accessed using the SP, regardless whether they are aligned or not. (The 93*0b57cec5SDimitry Andric // alignment padding will be at the bottom of the stack (highest address), 94*0b57cec5SDimitry Andric // and so the offset with respect to the SP will be known at the compile- 95*0b57cec5SDimitry Andric // -time.) 96*0b57cec5SDimitry Andric // 97*0b57cec5SDimitry Andric // The only complication occurs if there are both, local aligned objects, and 98*0b57cec5SDimitry Andric // dynamically allocated (variable-sized) objects. The alignment pad will be 99*0b57cec5SDimitry Andric // placed between the FP and the local objects, thus preventing the use of the 100*0b57cec5SDimitry Andric // FP to access the local objects. At the same time, the variable-sized objects 101*0b57cec5SDimitry Andric // will be between the SP and the local objects, thus introducing an unknown 102*0b57cec5SDimitry Andric // distance from the SP to the locals. 103*0b57cec5SDimitry Andric // 104*0b57cec5SDimitry Andric // To avoid this problem, a new register is created that holds the aligned 105*0b57cec5SDimitry Andric // address of the bottom of the stack, referred in the sources as AP (aligned 106*0b57cec5SDimitry Andric // pointer). The AP will be equal to "FP-p", where "p" is the smallest pad 107*0b57cec5SDimitry Andric // that aligns AP to the required boundary (a maximum of the alignments of 108*0b57cec5SDimitry Andric // all stack objects, fixed- and variable-sized). All local objects[1] will 109*0b57cec5SDimitry Andric // then use AP as the base pointer. 110*0b57cec5SDimitry Andric // [1] The exception is with "fixed" stack objects. "Fixed" stack objects get 111*0b57cec5SDimitry Andric // their name from being allocated at fixed locations on the stack, relative 112*0b57cec5SDimitry Andric // to the FP. In the presence of dynamic allocation and local alignment, such 113*0b57cec5SDimitry Andric // objects can only be accessed through the FP. 114*0b57cec5SDimitry Andric // 115*0b57cec5SDimitry Andric // Illustration of the AP: 116*0b57cec5SDimitry Andric // FP --+ 117*0b57cec5SDimitry Andric // | 118*0b57cec5SDimitry Andric // ---------------+---------------------+-----+-----------------------++-+-- 119*0b57cec5SDimitry Andric // Rest of the | Local stack objects | Pad | Fixed stack objects |LR| 120*0b57cec5SDimitry Andric // stack frame | (aligned) | | (CSR, spills, etc.) |FP| 121*0b57cec5SDimitry Andric // ---------------+---------------------+-----+-----------------+-----+--+-- 122*0b57cec5SDimitry Andric // |<-- Multiple of the -->| 123*0b57cec5SDimitry Andric // stack alignment +-- AP 124*0b57cec5SDimitry Andric // 125*0b57cec5SDimitry Andric // The AP is set up at the beginning of the function. Since it is not a dedi- 126*0b57cec5SDimitry Andric // cated (reserved) register, it needs to be kept live throughout the function 127*0b57cec5SDimitry Andric // to be available as the base register for local object accesses. 128*0b57cec5SDimitry Andric // Normally, an address of a stack objects is obtained by a pseudo-instruction 129*0b57cec5SDimitry Andric // PS_fi. To access local objects with the AP register present, a different 130*0b57cec5SDimitry Andric // pseudo-instruction needs to be used: PS_fia. The PS_fia takes one extra 131*0b57cec5SDimitry Andric // argument compared to PS_fi: the first input register is the AP register. 132*0b57cec5SDimitry Andric // This keeps the register live between its definition and its uses. 133*0b57cec5SDimitry Andric 134*0b57cec5SDimitry Andric // The AP register is originally set up using pseudo-instruction PS_aligna: 135*0b57cec5SDimitry Andric // AP = PS_aligna A 136*0b57cec5SDimitry Andric // where 137*0b57cec5SDimitry Andric // A - required stack alignment 138*0b57cec5SDimitry Andric // The alignment value must be the maximum of all alignments required by 139*0b57cec5SDimitry Andric // any stack object. 140*0b57cec5SDimitry Andric 141*0b57cec5SDimitry Andric // The dynamic allocation uses a pseudo-instruction PS_alloca: 142*0b57cec5SDimitry Andric // Rd = PS_alloca Rs, A 143*0b57cec5SDimitry Andric // where 144*0b57cec5SDimitry Andric // Rd - address of the allocated space 145*0b57cec5SDimitry Andric // Rs - minimum size (the actual allocated can be larger to accommodate 146*0b57cec5SDimitry Andric // alignment) 147*0b57cec5SDimitry Andric // A - required alignment 148*0b57cec5SDimitry Andric 149*0b57cec5SDimitry Andric using namespace llvm; 150*0b57cec5SDimitry Andric 151*0b57cec5SDimitry Andric static cl::opt<bool> DisableDeallocRet("disable-hexagon-dealloc-ret", 152*0b57cec5SDimitry Andric cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target")); 153*0b57cec5SDimitry Andric 154*0b57cec5SDimitry Andric static cl::opt<unsigned> NumberScavengerSlots("number-scavenger-slots", 155*0b57cec5SDimitry Andric cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2), 156*0b57cec5SDimitry Andric cl::ZeroOrMore); 157*0b57cec5SDimitry Andric 158*0b57cec5SDimitry Andric static cl::opt<int> SpillFuncThreshold("spill-func-threshold", 159*0b57cec5SDimitry Andric cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), 160*0b57cec5SDimitry Andric cl::init(6), cl::ZeroOrMore); 161*0b57cec5SDimitry Andric 162*0b57cec5SDimitry Andric static cl::opt<int> SpillFuncThresholdOs("spill-func-threshold-Os", 163*0b57cec5SDimitry Andric cl::Hidden, cl::desc("Specify Os spill func threshold"), 164*0b57cec5SDimitry Andric cl::init(1), cl::ZeroOrMore); 165*0b57cec5SDimitry Andric 166*0b57cec5SDimitry Andric static cl::opt<bool> EnableStackOVFSanitizer("enable-stackovf-sanitizer", 167*0b57cec5SDimitry Andric cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), 168*0b57cec5SDimitry Andric cl::init(false), cl::ZeroOrMore); 169*0b57cec5SDimitry Andric 170*0b57cec5SDimitry Andric static cl::opt<bool> EnableShrinkWrapping("hexagon-shrink-frame", 171*0b57cec5SDimitry Andric cl::init(true), cl::Hidden, cl::ZeroOrMore, 172*0b57cec5SDimitry Andric cl::desc("Enable stack frame shrink wrapping")); 173*0b57cec5SDimitry Andric 174*0b57cec5SDimitry Andric static cl::opt<unsigned> ShrinkLimit("shrink-frame-limit", 175*0b57cec5SDimitry Andric cl::init(std::numeric_limits<unsigned>::max()), cl::Hidden, cl::ZeroOrMore, 176*0b57cec5SDimitry Andric cl::desc("Max count of stack frame shrink-wraps")); 177*0b57cec5SDimitry Andric 178*0b57cec5SDimitry Andric static cl::opt<bool> EnableSaveRestoreLong("enable-save-restore-long", 179*0b57cec5SDimitry Andric cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), 180*0b57cec5SDimitry Andric cl::init(false), cl::ZeroOrMore); 181*0b57cec5SDimitry Andric 182*0b57cec5SDimitry Andric static cl::opt<bool> EliminateFramePointer("hexagon-fp-elim", cl::init(true), 183*0b57cec5SDimitry Andric cl::Hidden, cl::desc("Refrain from using FP whenever possible")); 184*0b57cec5SDimitry Andric 185*0b57cec5SDimitry Andric static cl::opt<bool> OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, 186*0b57cec5SDimitry Andric cl::init(true), cl::desc("Optimize spill slots")); 187*0b57cec5SDimitry Andric 188*0b57cec5SDimitry Andric #ifndef NDEBUG 189*0b57cec5SDimitry Andric static cl::opt<unsigned> SpillOptMax("spill-opt-max", cl::Hidden, 190*0b57cec5SDimitry Andric cl::init(std::numeric_limits<unsigned>::max())); 191*0b57cec5SDimitry Andric static unsigned SpillOptCount = 0; 192*0b57cec5SDimitry Andric #endif 193*0b57cec5SDimitry Andric 194*0b57cec5SDimitry Andric namespace llvm { 195*0b57cec5SDimitry Andric 196*0b57cec5SDimitry Andric void initializeHexagonCallFrameInformationPass(PassRegistry&); 197*0b57cec5SDimitry Andric FunctionPass *createHexagonCallFrameInformation(); 198*0b57cec5SDimitry Andric 199*0b57cec5SDimitry Andric } // end namespace llvm 200*0b57cec5SDimitry Andric 201*0b57cec5SDimitry Andric namespace { 202*0b57cec5SDimitry Andric 203*0b57cec5SDimitry Andric class HexagonCallFrameInformation : public MachineFunctionPass { 204*0b57cec5SDimitry Andric public: 205*0b57cec5SDimitry Andric static char ID; 206*0b57cec5SDimitry Andric 207*0b57cec5SDimitry Andric HexagonCallFrameInformation() : MachineFunctionPass(ID) { 208*0b57cec5SDimitry Andric PassRegistry &PR = *PassRegistry::getPassRegistry(); 209*0b57cec5SDimitry Andric initializeHexagonCallFrameInformationPass(PR); 210*0b57cec5SDimitry Andric } 211*0b57cec5SDimitry Andric 212*0b57cec5SDimitry Andric bool runOnMachineFunction(MachineFunction &MF) override; 213*0b57cec5SDimitry Andric 214*0b57cec5SDimitry Andric MachineFunctionProperties getRequiredProperties() const override { 215*0b57cec5SDimitry Andric return MachineFunctionProperties().set( 216*0b57cec5SDimitry Andric MachineFunctionProperties::Property::NoVRegs); 217*0b57cec5SDimitry Andric } 218*0b57cec5SDimitry Andric }; 219*0b57cec5SDimitry Andric 220*0b57cec5SDimitry Andric char HexagonCallFrameInformation::ID = 0; 221*0b57cec5SDimitry Andric 222*0b57cec5SDimitry Andric } // end anonymous namespace 223*0b57cec5SDimitry Andric 224*0b57cec5SDimitry Andric bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) { 225*0b57cec5SDimitry Andric auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering(); 226*0b57cec5SDimitry Andric bool NeedCFI = MF.getMMI().hasDebugInfo() || 227*0b57cec5SDimitry Andric MF.getFunction().needsUnwindTableEntry(); 228*0b57cec5SDimitry Andric 229*0b57cec5SDimitry Andric if (!NeedCFI) 230*0b57cec5SDimitry Andric return false; 231*0b57cec5SDimitry Andric HFI.insertCFIInstructions(MF); 232*0b57cec5SDimitry Andric return true; 233*0b57cec5SDimitry Andric } 234*0b57cec5SDimitry Andric 235*0b57cec5SDimitry Andric INITIALIZE_PASS(HexagonCallFrameInformation, "hexagon-cfi", 236*0b57cec5SDimitry Andric "Hexagon call frame information", false, false) 237*0b57cec5SDimitry Andric 238*0b57cec5SDimitry Andric FunctionPass *llvm::createHexagonCallFrameInformation() { 239*0b57cec5SDimitry Andric return new HexagonCallFrameInformation(); 240*0b57cec5SDimitry Andric } 241*0b57cec5SDimitry Andric 242*0b57cec5SDimitry Andric /// Map a register pair Reg to the subregister that has the greater "number", 243*0b57cec5SDimitry Andric /// i.e. D3 (aka R7:6) will be mapped to R7, etc. 244*0b57cec5SDimitry Andric static unsigned getMax32BitSubRegister(unsigned Reg, 245*0b57cec5SDimitry Andric const TargetRegisterInfo &TRI, 246*0b57cec5SDimitry Andric bool hireg = true) { 247*0b57cec5SDimitry Andric if (Reg < Hexagon::D0 || Reg > Hexagon::D15) 248*0b57cec5SDimitry Andric return Reg; 249*0b57cec5SDimitry Andric 250*0b57cec5SDimitry Andric unsigned RegNo = 0; 251*0b57cec5SDimitry Andric for (MCSubRegIterator SubRegs(Reg, &TRI); SubRegs.isValid(); ++SubRegs) { 252*0b57cec5SDimitry Andric if (hireg) { 253*0b57cec5SDimitry Andric if (*SubRegs > RegNo) 254*0b57cec5SDimitry Andric RegNo = *SubRegs; 255*0b57cec5SDimitry Andric } else { 256*0b57cec5SDimitry Andric if (!RegNo || *SubRegs < RegNo) 257*0b57cec5SDimitry Andric RegNo = *SubRegs; 258*0b57cec5SDimitry Andric } 259*0b57cec5SDimitry Andric } 260*0b57cec5SDimitry Andric return RegNo; 261*0b57cec5SDimitry Andric } 262*0b57cec5SDimitry Andric 263*0b57cec5SDimitry Andric /// Returns the callee saved register with the largest id in the vector. 264*0b57cec5SDimitry Andric static unsigned getMaxCalleeSavedReg(const std::vector<CalleeSavedInfo> &CSI, 265*0b57cec5SDimitry Andric const TargetRegisterInfo &TRI) { 266*0b57cec5SDimitry Andric static_assert(Hexagon::R1 > 0, 267*0b57cec5SDimitry Andric "Assume physical registers are encoded as positive integers"); 268*0b57cec5SDimitry Andric if (CSI.empty()) 269*0b57cec5SDimitry Andric return 0; 270*0b57cec5SDimitry Andric 271*0b57cec5SDimitry Andric unsigned Max = getMax32BitSubRegister(CSI[0].getReg(), TRI); 272*0b57cec5SDimitry Andric for (unsigned I = 1, E = CSI.size(); I < E; ++I) { 273*0b57cec5SDimitry Andric unsigned Reg = getMax32BitSubRegister(CSI[I].getReg(), TRI); 274*0b57cec5SDimitry Andric if (Reg > Max) 275*0b57cec5SDimitry Andric Max = Reg; 276*0b57cec5SDimitry Andric } 277*0b57cec5SDimitry Andric return Max; 278*0b57cec5SDimitry Andric } 279*0b57cec5SDimitry Andric 280*0b57cec5SDimitry Andric /// Checks if the basic block contains any instruction that needs a stack 281*0b57cec5SDimitry Andric /// frame to be already in place. 282*0b57cec5SDimitry Andric static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, 283*0b57cec5SDimitry Andric const HexagonRegisterInfo &HRI) { 284*0b57cec5SDimitry Andric for (auto &I : MBB) { 285*0b57cec5SDimitry Andric const MachineInstr *MI = &I; 286*0b57cec5SDimitry Andric if (MI->isCall()) 287*0b57cec5SDimitry Andric return true; 288*0b57cec5SDimitry Andric unsigned Opc = MI->getOpcode(); 289*0b57cec5SDimitry Andric switch (Opc) { 290*0b57cec5SDimitry Andric case Hexagon::PS_alloca: 291*0b57cec5SDimitry Andric case Hexagon::PS_aligna: 292*0b57cec5SDimitry Andric return true; 293*0b57cec5SDimitry Andric default: 294*0b57cec5SDimitry Andric break; 295*0b57cec5SDimitry Andric } 296*0b57cec5SDimitry Andric // Check individual operands. 297*0b57cec5SDimitry Andric for (const MachineOperand &MO : MI->operands()) { 298*0b57cec5SDimitry Andric // While the presence of a frame index does not prove that a stack 299*0b57cec5SDimitry Andric // frame will be required, all frame indexes should be within alloc- 300*0b57cec5SDimitry Andric // frame/deallocframe. Otherwise, the code that translates a frame 301*0b57cec5SDimitry Andric // index into an offset would have to be aware of the placement of 302*0b57cec5SDimitry Andric // the frame creation/destruction instructions. 303*0b57cec5SDimitry Andric if (MO.isFI()) 304*0b57cec5SDimitry Andric return true; 305*0b57cec5SDimitry Andric if (MO.isReg()) { 306*0b57cec5SDimitry Andric unsigned R = MO.getReg(); 307*0b57cec5SDimitry Andric // Virtual registers will need scavenging, which then may require 308*0b57cec5SDimitry Andric // a stack slot. 309*0b57cec5SDimitry Andric if (TargetRegisterInfo::isVirtualRegister(R)) 310*0b57cec5SDimitry Andric return true; 311*0b57cec5SDimitry Andric for (MCSubRegIterator S(R, &HRI, true); S.isValid(); ++S) 312*0b57cec5SDimitry Andric if (CSR[*S]) 313*0b57cec5SDimitry Andric return true; 314*0b57cec5SDimitry Andric continue; 315*0b57cec5SDimitry Andric } 316*0b57cec5SDimitry Andric if (MO.isRegMask()) { 317*0b57cec5SDimitry Andric // A regmask would normally have all callee-saved registers marked 318*0b57cec5SDimitry Andric // as preserved, so this check would not be needed, but in case of 319*0b57cec5SDimitry Andric // ever having other regmasks (for other calling conventions), 320*0b57cec5SDimitry Andric // make sure they would be processed correctly. 321*0b57cec5SDimitry Andric const uint32_t *BM = MO.getRegMask(); 322*0b57cec5SDimitry Andric for (int x = CSR.find_first(); x >= 0; x = CSR.find_next(x)) { 323*0b57cec5SDimitry Andric unsigned R = x; 324*0b57cec5SDimitry Andric // If this regmask does not preserve a CSR, a frame will be needed. 325*0b57cec5SDimitry Andric if (!(BM[R/32] & (1u << (R%32)))) 326*0b57cec5SDimitry Andric return true; 327*0b57cec5SDimitry Andric } 328*0b57cec5SDimitry Andric } 329*0b57cec5SDimitry Andric } 330*0b57cec5SDimitry Andric } 331*0b57cec5SDimitry Andric return false; 332*0b57cec5SDimitry Andric } 333*0b57cec5SDimitry Andric 334*0b57cec5SDimitry Andric /// Returns true if MBB has a machine instructions that indicates a tail call 335*0b57cec5SDimitry Andric /// in the block. 336*0b57cec5SDimitry Andric static bool hasTailCall(const MachineBasicBlock &MBB) { 337*0b57cec5SDimitry Andric MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr(); 338*0b57cec5SDimitry Andric if (I == MBB.end()) 339*0b57cec5SDimitry Andric return false; 340*0b57cec5SDimitry Andric unsigned RetOpc = I->getOpcode(); 341*0b57cec5SDimitry Andric return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r; 342*0b57cec5SDimitry Andric } 343*0b57cec5SDimitry Andric 344*0b57cec5SDimitry Andric /// Returns true if MBB contains an instruction that returns. 345*0b57cec5SDimitry Andric static bool hasReturn(const MachineBasicBlock &MBB) { 346*0b57cec5SDimitry Andric for (auto I = MBB.getFirstTerminator(), E = MBB.end(); I != E; ++I) 347*0b57cec5SDimitry Andric if (I->isReturn()) 348*0b57cec5SDimitry Andric return true; 349*0b57cec5SDimitry Andric return false; 350*0b57cec5SDimitry Andric } 351*0b57cec5SDimitry Andric 352*0b57cec5SDimitry Andric /// Returns the "return" instruction from this block, or nullptr if there 353*0b57cec5SDimitry Andric /// isn't any. 354*0b57cec5SDimitry Andric static MachineInstr *getReturn(MachineBasicBlock &MBB) { 355*0b57cec5SDimitry Andric for (auto &I : MBB) 356*0b57cec5SDimitry Andric if (I.isReturn()) 357*0b57cec5SDimitry Andric return &I; 358*0b57cec5SDimitry Andric return nullptr; 359*0b57cec5SDimitry Andric } 360*0b57cec5SDimitry Andric 361*0b57cec5SDimitry Andric static bool isRestoreCall(unsigned Opc) { 362*0b57cec5SDimitry Andric switch (Opc) { 363*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_RET_JMP_V4: 364*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC: 365*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT: 366*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC: 367*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT: 368*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC: 369*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4: 370*0b57cec5SDimitry Andric case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC: 371*0b57cec5SDimitry Andric return true; 372*0b57cec5SDimitry Andric } 373*0b57cec5SDimitry Andric return false; 374*0b57cec5SDimitry Andric } 375*0b57cec5SDimitry Andric 376*0b57cec5SDimitry Andric static inline bool isOptNone(const MachineFunction &MF) { 377*0b57cec5SDimitry Andric return MF.getFunction().hasOptNone() || 378*0b57cec5SDimitry Andric MF.getTarget().getOptLevel() == CodeGenOpt::None; 379*0b57cec5SDimitry Andric } 380*0b57cec5SDimitry Andric 381*0b57cec5SDimitry Andric static inline bool isOptSize(const MachineFunction &MF) { 382*0b57cec5SDimitry Andric const Function &F = MF.getFunction(); 383*0b57cec5SDimitry Andric return F.hasOptSize() && !F.hasMinSize(); 384*0b57cec5SDimitry Andric } 385*0b57cec5SDimitry Andric 386*0b57cec5SDimitry Andric static inline bool isMinSize(const MachineFunction &MF) { 387*0b57cec5SDimitry Andric return MF.getFunction().hasMinSize(); 388*0b57cec5SDimitry Andric } 389*0b57cec5SDimitry Andric 390*0b57cec5SDimitry Andric /// Implements shrink-wrapping of the stack frame. By default, stack frame 391*0b57cec5SDimitry Andric /// is created in the function entry block, and is cleaned up in every block 392*0b57cec5SDimitry Andric /// that returns. This function finds alternate blocks: one for the frame 393*0b57cec5SDimitry Andric /// setup (prolog) and one for the cleanup (epilog). 394*0b57cec5SDimitry Andric void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF, 395*0b57cec5SDimitry Andric MachineBasicBlock *&PrologB, MachineBasicBlock *&EpilogB) const { 396*0b57cec5SDimitry Andric static unsigned ShrinkCounter = 0; 397*0b57cec5SDimitry Andric 398*0b57cec5SDimitry Andric if (ShrinkLimit.getPosition()) { 399*0b57cec5SDimitry Andric if (ShrinkCounter >= ShrinkLimit) 400*0b57cec5SDimitry Andric return; 401*0b57cec5SDimitry Andric ShrinkCounter++; 402*0b57cec5SDimitry Andric } 403*0b57cec5SDimitry Andric 404*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 405*0b57cec5SDimitry Andric 406*0b57cec5SDimitry Andric MachineDominatorTree MDT; 407*0b57cec5SDimitry Andric MDT.runOnMachineFunction(MF); 408*0b57cec5SDimitry Andric MachinePostDominatorTree MPT; 409*0b57cec5SDimitry Andric MPT.runOnMachineFunction(MF); 410*0b57cec5SDimitry Andric 411*0b57cec5SDimitry Andric using UnsignedMap = DenseMap<unsigned, unsigned>; 412*0b57cec5SDimitry Andric using RPOTType = ReversePostOrderTraversal<const MachineFunction *>; 413*0b57cec5SDimitry Andric 414*0b57cec5SDimitry Andric UnsignedMap RPO; 415*0b57cec5SDimitry Andric RPOTType RPOT(&MF); 416*0b57cec5SDimitry Andric unsigned RPON = 0; 417*0b57cec5SDimitry Andric for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I) 418*0b57cec5SDimitry Andric RPO[(*I)->getNumber()] = RPON++; 419*0b57cec5SDimitry Andric 420*0b57cec5SDimitry Andric // Don't process functions that have loops, at least for now. Placement 421*0b57cec5SDimitry Andric // of prolog and epilog must take loop structure into account. For simpli- 422*0b57cec5SDimitry Andric // city don't do it right now. 423*0b57cec5SDimitry Andric for (auto &I : MF) { 424*0b57cec5SDimitry Andric unsigned BN = RPO[I.getNumber()]; 425*0b57cec5SDimitry Andric for (auto SI = I.succ_begin(), SE = I.succ_end(); SI != SE; ++SI) { 426*0b57cec5SDimitry Andric // If found a back-edge, return. 427*0b57cec5SDimitry Andric if (RPO[(*SI)->getNumber()] <= BN) 428*0b57cec5SDimitry Andric return; 429*0b57cec5SDimitry Andric } 430*0b57cec5SDimitry Andric } 431*0b57cec5SDimitry Andric 432*0b57cec5SDimitry Andric // Collect the set of blocks that need a stack frame to execute. Scan 433*0b57cec5SDimitry Andric // each block for uses/defs of callee-saved registers, calls, etc. 434*0b57cec5SDimitry Andric SmallVector<MachineBasicBlock*,16> SFBlocks; 435*0b57cec5SDimitry Andric BitVector CSR(Hexagon::NUM_TARGET_REGS); 436*0b57cec5SDimitry Andric for (const MCPhysReg *P = HRI.getCalleeSavedRegs(&MF); *P; ++P) 437*0b57cec5SDimitry Andric for (MCSubRegIterator S(*P, &HRI, true); S.isValid(); ++S) 438*0b57cec5SDimitry Andric CSR[*S] = true; 439*0b57cec5SDimitry Andric 440*0b57cec5SDimitry Andric for (auto &I : MF) 441*0b57cec5SDimitry Andric if (needsStackFrame(I, CSR, HRI)) 442*0b57cec5SDimitry Andric SFBlocks.push_back(&I); 443*0b57cec5SDimitry Andric 444*0b57cec5SDimitry Andric LLVM_DEBUG({ 445*0b57cec5SDimitry Andric dbgs() << "Blocks needing SF: {"; 446*0b57cec5SDimitry Andric for (auto &B : SFBlocks) 447*0b57cec5SDimitry Andric dbgs() << " " << printMBBReference(*B); 448*0b57cec5SDimitry Andric dbgs() << " }\n"; 449*0b57cec5SDimitry Andric }); 450*0b57cec5SDimitry Andric // No frame needed? 451*0b57cec5SDimitry Andric if (SFBlocks.empty()) 452*0b57cec5SDimitry Andric return; 453*0b57cec5SDimitry Andric 454*0b57cec5SDimitry Andric // Pick a common dominator and a common post-dominator. 455*0b57cec5SDimitry Andric MachineBasicBlock *DomB = SFBlocks[0]; 456*0b57cec5SDimitry Andric for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) { 457*0b57cec5SDimitry Andric DomB = MDT.findNearestCommonDominator(DomB, SFBlocks[i]); 458*0b57cec5SDimitry Andric if (!DomB) 459*0b57cec5SDimitry Andric break; 460*0b57cec5SDimitry Andric } 461*0b57cec5SDimitry Andric MachineBasicBlock *PDomB = SFBlocks[0]; 462*0b57cec5SDimitry Andric for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) { 463*0b57cec5SDimitry Andric PDomB = MPT.findNearestCommonDominator(PDomB, SFBlocks[i]); 464*0b57cec5SDimitry Andric if (!PDomB) 465*0b57cec5SDimitry Andric break; 466*0b57cec5SDimitry Andric } 467*0b57cec5SDimitry Andric LLVM_DEBUG({ 468*0b57cec5SDimitry Andric dbgs() << "Computed dom block: "; 469*0b57cec5SDimitry Andric if (DomB) 470*0b57cec5SDimitry Andric dbgs() << printMBBReference(*DomB); 471*0b57cec5SDimitry Andric else 472*0b57cec5SDimitry Andric dbgs() << "<null>"; 473*0b57cec5SDimitry Andric dbgs() << ", computed pdom block: "; 474*0b57cec5SDimitry Andric if (PDomB) 475*0b57cec5SDimitry Andric dbgs() << printMBBReference(*PDomB); 476*0b57cec5SDimitry Andric else 477*0b57cec5SDimitry Andric dbgs() << "<null>"; 478*0b57cec5SDimitry Andric dbgs() << "\n"; 479*0b57cec5SDimitry Andric }); 480*0b57cec5SDimitry Andric if (!DomB || !PDomB) 481*0b57cec5SDimitry Andric return; 482*0b57cec5SDimitry Andric 483*0b57cec5SDimitry Andric // Make sure that DomB dominates PDomB and PDomB post-dominates DomB. 484*0b57cec5SDimitry Andric if (!MDT.dominates(DomB, PDomB)) { 485*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Dom block does not dominate pdom block\n"); 486*0b57cec5SDimitry Andric return; 487*0b57cec5SDimitry Andric } 488*0b57cec5SDimitry Andric if (!MPT.dominates(PDomB, DomB)) { 489*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "PDom block does not post-dominate dom block\n"); 490*0b57cec5SDimitry Andric return; 491*0b57cec5SDimitry Andric } 492*0b57cec5SDimitry Andric 493*0b57cec5SDimitry Andric // Finally, everything seems right. 494*0b57cec5SDimitry Andric PrologB = DomB; 495*0b57cec5SDimitry Andric EpilogB = PDomB; 496*0b57cec5SDimitry Andric } 497*0b57cec5SDimitry Andric 498*0b57cec5SDimitry Andric /// Perform most of the PEI work here: 499*0b57cec5SDimitry Andric /// - saving/restoring of the callee-saved registers, 500*0b57cec5SDimitry Andric /// - stack frame creation and destruction. 501*0b57cec5SDimitry Andric /// Normally, this work is distributed among various functions, but doing it 502*0b57cec5SDimitry Andric /// in one place allows shrink-wrapping of the stack frame. 503*0b57cec5SDimitry Andric void HexagonFrameLowering::emitPrologue(MachineFunction &MF, 504*0b57cec5SDimitry Andric MachineBasicBlock &MBB) const { 505*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 506*0b57cec5SDimitry Andric 507*0b57cec5SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 508*0b57cec5SDimitry Andric const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 509*0b57cec5SDimitry Andric 510*0b57cec5SDimitry Andric MachineBasicBlock *PrologB = &MF.front(), *EpilogB = nullptr; 511*0b57cec5SDimitry Andric if (EnableShrinkWrapping) 512*0b57cec5SDimitry Andric findShrunkPrologEpilog(MF, PrologB, EpilogB); 513*0b57cec5SDimitry Andric 514*0b57cec5SDimitry Andric bool PrologueStubs = false; 515*0b57cec5SDimitry Andric insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs); 516*0b57cec5SDimitry Andric insertPrologueInBlock(*PrologB, PrologueStubs); 517*0b57cec5SDimitry Andric updateEntryPaths(MF, *PrologB); 518*0b57cec5SDimitry Andric 519*0b57cec5SDimitry Andric if (EpilogB) { 520*0b57cec5SDimitry Andric insertCSRRestoresInBlock(*EpilogB, CSI, HRI); 521*0b57cec5SDimitry Andric insertEpilogueInBlock(*EpilogB); 522*0b57cec5SDimitry Andric } else { 523*0b57cec5SDimitry Andric for (auto &B : MF) 524*0b57cec5SDimitry Andric if (B.isReturnBlock()) 525*0b57cec5SDimitry Andric insertCSRRestoresInBlock(B, CSI, HRI); 526*0b57cec5SDimitry Andric 527*0b57cec5SDimitry Andric for (auto &B : MF) 528*0b57cec5SDimitry Andric if (B.isReturnBlock()) 529*0b57cec5SDimitry Andric insertEpilogueInBlock(B); 530*0b57cec5SDimitry Andric 531*0b57cec5SDimitry Andric for (auto &B : MF) { 532*0b57cec5SDimitry Andric if (B.empty()) 533*0b57cec5SDimitry Andric continue; 534*0b57cec5SDimitry Andric MachineInstr *RetI = getReturn(B); 535*0b57cec5SDimitry Andric if (!RetI || isRestoreCall(RetI->getOpcode())) 536*0b57cec5SDimitry Andric continue; 537*0b57cec5SDimitry Andric for (auto &R : CSI) 538*0b57cec5SDimitry Andric RetI->addOperand(MachineOperand::CreateReg(R.getReg(), false, true)); 539*0b57cec5SDimitry Andric } 540*0b57cec5SDimitry Andric } 541*0b57cec5SDimitry Andric 542*0b57cec5SDimitry Andric if (EpilogB) { 543*0b57cec5SDimitry Andric // If there is an epilog block, it may not have a return instruction. 544*0b57cec5SDimitry Andric // In such case, we need to add the callee-saved registers as live-ins 545*0b57cec5SDimitry Andric // in all blocks on all paths from the epilog to any return block. 546*0b57cec5SDimitry Andric unsigned MaxBN = MF.getNumBlockIDs(); 547*0b57cec5SDimitry Andric BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1); 548*0b57cec5SDimitry Andric updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path); 549*0b57cec5SDimitry Andric } 550*0b57cec5SDimitry Andric } 551*0b57cec5SDimitry Andric 552*0b57cec5SDimitry Andric /// Returns true if the target can safely skip saving callee-saved registers 553*0b57cec5SDimitry Andric /// for noreturn nounwind functions. 554*0b57cec5SDimitry Andric bool HexagonFrameLowering::enableCalleeSaveSkip( 555*0b57cec5SDimitry Andric const MachineFunction &MF) const { 556*0b57cec5SDimitry Andric const auto &F = MF.getFunction(); 557*0b57cec5SDimitry Andric assert(F.hasFnAttribute(Attribute::NoReturn) && 558*0b57cec5SDimitry Andric F.getFunction().hasFnAttribute(Attribute::NoUnwind) && 559*0b57cec5SDimitry Andric !F.getFunction().hasFnAttribute(Attribute::UWTable)); 560*0b57cec5SDimitry Andric (void)F; 561*0b57cec5SDimitry Andric 562*0b57cec5SDimitry Andric // No need to save callee saved registers if the function does not return. 563*0b57cec5SDimitry Andric return MF.getSubtarget<HexagonSubtarget>().noreturnStackElim(); 564*0b57cec5SDimitry Andric } 565*0b57cec5SDimitry Andric 566*0b57cec5SDimitry Andric // Helper function used to determine when to eliminate the stack frame for 567*0b57cec5SDimitry Andric // functions marked as noreturn and when the noreturn-stack-elim options are 568*0b57cec5SDimitry Andric // specified. When both these conditions are true, then a FP may not be needed 569*0b57cec5SDimitry Andric // if the function makes a call. It is very similar to enableCalleeSaveSkip, 570*0b57cec5SDimitry Andric // but it used to check if the allocframe can be eliminated as well. 571*0b57cec5SDimitry Andric static bool enableAllocFrameElim(const MachineFunction &MF) { 572*0b57cec5SDimitry Andric const auto &F = MF.getFunction(); 573*0b57cec5SDimitry Andric const auto &MFI = MF.getFrameInfo(); 574*0b57cec5SDimitry Andric const auto &HST = MF.getSubtarget<HexagonSubtarget>(); 575*0b57cec5SDimitry Andric assert(!MFI.hasVarSizedObjects() && 576*0b57cec5SDimitry Andric !HST.getRegisterInfo()->needsStackRealignment(MF)); 577*0b57cec5SDimitry Andric return F.hasFnAttribute(Attribute::NoReturn) && 578*0b57cec5SDimitry Andric F.hasFnAttribute(Attribute::NoUnwind) && 579*0b57cec5SDimitry Andric !F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() && 580*0b57cec5SDimitry Andric MFI.getStackSize() == 0; 581*0b57cec5SDimitry Andric } 582*0b57cec5SDimitry Andric 583*0b57cec5SDimitry Andric void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB, 584*0b57cec5SDimitry Andric bool PrologueStubs) const { 585*0b57cec5SDimitry Andric MachineFunction &MF = *MBB.getParent(); 586*0b57cec5SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 587*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 588*0b57cec5SDimitry Andric auto &HII = *HST.getInstrInfo(); 589*0b57cec5SDimitry Andric auto &HRI = *HST.getRegisterInfo(); 590*0b57cec5SDimitry Andric 591*0b57cec5SDimitry Andric unsigned MaxAlign = std::max(MFI.getMaxAlignment(), getStackAlignment()); 592*0b57cec5SDimitry Andric 593*0b57cec5SDimitry Andric // Calculate the total stack frame size. 594*0b57cec5SDimitry Andric // Get the number of bytes to allocate from the FrameInfo. 595*0b57cec5SDimitry Andric unsigned FrameSize = MFI.getStackSize(); 596*0b57cec5SDimitry Andric // Round up the max call frame size to the max alignment on the stack. 597*0b57cec5SDimitry Andric unsigned MaxCFA = alignTo(MFI.getMaxCallFrameSize(), MaxAlign); 598*0b57cec5SDimitry Andric MFI.setMaxCallFrameSize(MaxCFA); 599*0b57cec5SDimitry Andric 600*0b57cec5SDimitry Andric FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign); 601*0b57cec5SDimitry Andric MFI.setStackSize(FrameSize); 602*0b57cec5SDimitry Andric 603*0b57cec5SDimitry Andric bool AlignStack = (MaxAlign > getStackAlignment()); 604*0b57cec5SDimitry Andric 605*0b57cec5SDimitry Andric // Get the number of bytes to allocate from the FrameInfo. 606*0b57cec5SDimitry Andric unsigned NumBytes = MFI.getStackSize(); 607*0b57cec5SDimitry Andric unsigned SP = HRI.getStackRegister(); 608*0b57cec5SDimitry Andric unsigned MaxCF = MFI.getMaxCallFrameSize(); 609*0b57cec5SDimitry Andric MachineBasicBlock::iterator InsertPt = MBB.begin(); 610*0b57cec5SDimitry Andric 611*0b57cec5SDimitry Andric SmallVector<MachineInstr *, 4> AdjustRegs; 612*0b57cec5SDimitry Andric for (auto &MBB : MF) 613*0b57cec5SDimitry Andric for (auto &MI : MBB) 614*0b57cec5SDimitry Andric if (MI.getOpcode() == Hexagon::PS_alloca) 615*0b57cec5SDimitry Andric AdjustRegs.push_back(&MI); 616*0b57cec5SDimitry Andric 617*0b57cec5SDimitry Andric for (auto MI : AdjustRegs) { 618*0b57cec5SDimitry Andric assert((MI->getOpcode() == Hexagon::PS_alloca) && "Expected alloca"); 619*0b57cec5SDimitry Andric expandAlloca(MI, HII, SP, MaxCF); 620*0b57cec5SDimitry Andric MI->eraseFromParent(); 621*0b57cec5SDimitry Andric } 622*0b57cec5SDimitry Andric 623*0b57cec5SDimitry Andric DebugLoc dl = MBB.findDebugLoc(InsertPt); 624*0b57cec5SDimitry Andric 625*0b57cec5SDimitry Andric if (hasFP(MF)) { 626*0b57cec5SDimitry Andric insertAllocframe(MBB, InsertPt, NumBytes); 627*0b57cec5SDimitry Andric if (AlignStack) { 628*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP) 629*0b57cec5SDimitry Andric .addReg(SP) 630*0b57cec5SDimitry Andric .addImm(-int64_t(MaxAlign)); 631*0b57cec5SDimitry Andric } 632*0b57cec5SDimitry Andric // If the stack-checking is enabled, and we spilled the callee-saved 633*0b57cec5SDimitry Andric // registers inline (i.e. did not use a spill function), then call 634*0b57cec5SDimitry Andric // the stack checker directly. 635*0b57cec5SDimitry Andric if (EnableStackOVFSanitizer && !PrologueStubs) 636*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk)) 637*0b57cec5SDimitry Andric .addExternalSymbol("__runtime_stack_check"); 638*0b57cec5SDimitry Andric } else if (NumBytes > 0) { 639*0b57cec5SDimitry Andric assert(alignTo(NumBytes, 8) == NumBytes); 640*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP) 641*0b57cec5SDimitry Andric .addReg(SP) 642*0b57cec5SDimitry Andric .addImm(-int(NumBytes)); 643*0b57cec5SDimitry Andric } 644*0b57cec5SDimitry Andric } 645*0b57cec5SDimitry Andric 646*0b57cec5SDimitry Andric void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const { 647*0b57cec5SDimitry Andric MachineFunction &MF = *MBB.getParent(); 648*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 649*0b57cec5SDimitry Andric auto &HII = *HST.getInstrInfo(); 650*0b57cec5SDimitry Andric auto &HRI = *HST.getRegisterInfo(); 651*0b57cec5SDimitry Andric unsigned SP = HRI.getStackRegister(); 652*0b57cec5SDimitry Andric 653*0b57cec5SDimitry Andric MachineBasicBlock::iterator InsertPt = MBB.getFirstTerminator(); 654*0b57cec5SDimitry Andric DebugLoc dl = MBB.findDebugLoc(InsertPt); 655*0b57cec5SDimitry Andric 656*0b57cec5SDimitry Andric if (!hasFP(MF)) { 657*0b57cec5SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 658*0b57cec5SDimitry Andric if (unsigned NumBytes = MFI.getStackSize()) { 659*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP) 660*0b57cec5SDimitry Andric .addReg(SP) 661*0b57cec5SDimitry Andric .addImm(NumBytes); 662*0b57cec5SDimitry Andric } 663*0b57cec5SDimitry Andric return; 664*0b57cec5SDimitry Andric } 665*0b57cec5SDimitry Andric 666*0b57cec5SDimitry Andric MachineInstr *RetI = getReturn(MBB); 667*0b57cec5SDimitry Andric unsigned RetOpc = RetI ? RetI->getOpcode() : 0; 668*0b57cec5SDimitry Andric 669*0b57cec5SDimitry Andric // Handle EH_RETURN. 670*0b57cec5SDimitry Andric if (RetOpc == Hexagon::EH_RETURN_JMPR) { 671*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe)) 672*0b57cec5SDimitry Andric .addDef(Hexagon::D15) 673*0b57cec5SDimitry Andric .addReg(Hexagon::R30); 674*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP) 675*0b57cec5SDimitry Andric .addReg(SP) 676*0b57cec5SDimitry Andric .addReg(Hexagon::R28); 677*0b57cec5SDimitry Andric return; 678*0b57cec5SDimitry Andric } 679*0b57cec5SDimitry Andric 680*0b57cec5SDimitry Andric // Check for RESTORE_DEALLOC_RET* tail call. Don't emit an extra dealloc- 681*0b57cec5SDimitry Andric // frame instruction if we encounter it. 682*0b57cec5SDimitry Andric if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 || 683*0b57cec5SDimitry Andric RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC || 684*0b57cec5SDimitry Andric RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT || 685*0b57cec5SDimitry Andric RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) { 686*0b57cec5SDimitry Andric MachineBasicBlock::iterator It = RetI; 687*0b57cec5SDimitry Andric ++It; 688*0b57cec5SDimitry Andric // Delete all instructions after the RESTORE (except labels). 689*0b57cec5SDimitry Andric while (It != MBB.end()) { 690*0b57cec5SDimitry Andric if (!It->isLabel()) 691*0b57cec5SDimitry Andric It = MBB.erase(It); 692*0b57cec5SDimitry Andric else 693*0b57cec5SDimitry Andric ++It; 694*0b57cec5SDimitry Andric } 695*0b57cec5SDimitry Andric return; 696*0b57cec5SDimitry Andric } 697*0b57cec5SDimitry Andric 698*0b57cec5SDimitry Andric // It is possible that the restoring code is a call to a library function. 699*0b57cec5SDimitry Andric // All of the restore* functions include "deallocframe", so we need to make 700*0b57cec5SDimitry Andric // sure that we don't add an extra one. 701*0b57cec5SDimitry Andric bool NeedsDeallocframe = true; 702*0b57cec5SDimitry Andric if (!MBB.empty() && InsertPt != MBB.begin()) { 703*0b57cec5SDimitry Andric MachineBasicBlock::iterator PrevIt = std::prev(InsertPt); 704*0b57cec5SDimitry Andric unsigned COpc = PrevIt->getOpcode(); 705*0b57cec5SDimitry Andric if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 || 706*0b57cec5SDimitry Andric COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC || 707*0b57cec5SDimitry Andric COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT || 708*0b57cec5SDimitry Andric COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC || 709*0b57cec5SDimitry Andric COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr) 710*0b57cec5SDimitry Andric NeedsDeallocframe = false; 711*0b57cec5SDimitry Andric } 712*0b57cec5SDimitry Andric 713*0b57cec5SDimitry Andric if (!NeedsDeallocframe) 714*0b57cec5SDimitry Andric return; 715*0b57cec5SDimitry Andric // If the returning instruction is PS_jmpret, replace it with dealloc_return, 716*0b57cec5SDimitry Andric // otherwise just add deallocframe. The function could be returning via a 717*0b57cec5SDimitry Andric // tail call. 718*0b57cec5SDimitry Andric if (RetOpc != Hexagon::PS_jmpret || DisableDeallocRet) { 719*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe)) 720*0b57cec5SDimitry Andric .addDef(Hexagon::D15) 721*0b57cec5SDimitry Andric .addReg(Hexagon::R30); 722*0b57cec5SDimitry Andric return; 723*0b57cec5SDimitry Andric } 724*0b57cec5SDimitry Andric unsigned NewOpc = Hexagon::L4_return; 725*0b57cec5SDimitry Andric MachineInstr *NewI = BuildMI(MBB, RetI, dl, HII.get(NewOpc)) 726*0b57cec5SDimitry Andric .addDef(Hexagon::D15) 727*0b57cec5SDimitry Andric .addReg(Hexagon::R30); 728*0b57cec5SDimitry Andric // Transfer the function live-out registers. 729*0b57cec5SDimitry Andric NewI->copyImplicitOps(MF, *RetI); 730*0b57cec5SDimitry Andric MBB.erase(RetI); 731*0b57cec5SDimitry Andric } 732*0b57cec5SDimitry Andric 733*0b57cec5SDimitry Andric void HexagonFrameLowering::insertAllocframe(MachineBasicBlock &MBB, 734*0b57cec5SDimitry Andric MachineBasicBlock::iterator InsertPt, unsigned NumBytes) const { 735*0b57cec5SDimitry Andric MachineFunction &MF = *MBB.getParent(); 736*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 737*0b57cec5SDimitry Andric auto &HII = *HST.getInstrInfo(); 738*0b57cec5SDimitry Andric auto &HRI = *HST.getRegisterInfo(); 739*0b57cec5SDimitry Andric 740*0b57cec5SDimitry Andric // Check for overflow. 741*0b57cec5SDimitry Andric // Hexagon_TODO: Ugh! hardcoding. Is there an API that can be used? 742*0b57cec5SDimitry Andric const unsigned int ALLOCFRAME_MAX = 16384; 743*0b57cec5SDimitry Andric 744*0b57cec5SDimitry Andric // Create a dummy memory operand to avoid allocframe from being treated as 745*0b57cec5SDimitry Andric // a volatile memory reference. 746*0b57cec5SDimitry Andric auto *MMO = MF.getMachineMemOperand(MachinePointerInfo::getStack(MF, 0), 747*0b57cec5SDimitry Andric MachineMemOperand::MOStore, 4, 4); 748*0b57cec5SDimitry Andric 749*0b57cec5SDimitry Andric DebugLoc dl = MBB.findDebugLoc(InsertPt); 750*0b57cec5SDimitry Andric unsigned SP = HRI.getStackRegister(); 751*0b57cec5SDimitry Andric 752*0b57cec5SDimitry Andric if (NumBytes >= ALLOCFRAME_MAX) { 753*0b57cec5SDimitry Andric // Emit allocframe(#0). 754*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe)) 755*0b57cec5SDimitry Andric .addDef(SP) 756*0b57cec5SDimitry Andric .addReg(SP) 757*0b57cec5SDimitry Andric .addImm(0) 758*0b57cec5SDimitry Andric .addMemOperand(MMO); 759*0b57cec5SDimitry Andric 760*0b57cec5SDimitry Andric // Subtract the size from the stack pointer. 761*0b57cec5SDimitry Andric unsigned SP = HRI.getStackRegister(); 762*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP) 763*0b57cec5SDimitry Andric .addReg(SP) 764*0b57cec5SDimitry Andric .addImm(-int(NumBytes)); 765*0b57cec5SDimitry Andric } else { 766*0b57cec5SDimitry Andric BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe)) 767*0b57cec5SDimitry Andric .addDef(SP) 768*0b57cec5SDimitry Andric .addReg(SP) 769*0b57cec5SDimitry Andric .addImm(NumBytes) 770*0b57cec5SDimitry Andric .addMemOperand(MMO); 771*0b57cec5SDimitry Andric } 772*0b57cec5SDimitry Andric } 773*0b57cec5SDimitry Andric 774*0b57cec5SDimitry Andric void HexagonFrameLowering::updateEntryPaths(MachineFunction &MF, 775*0b57cec5SDimitry Andric MachineBasicBlock &SaveB) const { 776*0b57cec5SDimitry Andric SetVector<unsigned> Worklist; 777*0b57cec5SDimitry Andric 778*0b57cec5SDimitry Andric MachineBasicBlock &EntryB = MF.front(); 779*0b57cec5SDimitry Andric Worklist.insert(EntryB.getNumber()); 780*0b57cec5SDimitry Andric 781*0b57cec5SDimitry Andric unsigned SaveN = SaveB.getNumber(); 782*0b57cec5SDimitry Andric auto &CSI = MF.getFrameInfo().getCalleeSavedInfo(); 783*0b57cec5SDimitry Andric 784*0b57cec5SDimitry Andric for (unsigned i = 0; i < Worklist.size(); ++i) { 785*0b57cec5SDimitry Andric unsigned BN = Worklist[i]; 786*0b57cec5SDimitry Andric MachineBasicBlock &MBB = *MF.getBlockNumbered(BN); 787*0b57cec5SDimitry Andric for (auto &R : CSI) 788*0b57cec5SDimitry Andric if (!MBB.isLiveIn(R.getReg())) 789*0b57cec5SDimitry Andric MBB.addLiveIn(R.getReg()); 790*0b57cec5SDimitry Andric if (BN != SaveN) 791*0b57cec5SDimitry Andric for (auto &SB : MBB.successors()) 792*0b57cec5SDimitry Andric Worklist.insert(SB->getNumber()); 793*0b57cec5SDimitry Andric } 794*0b57cec5SDimitry Andric } 795*0b57cec5SDimitry Andric 796*0b57cec5SDimitry Andric bool HexagonFrameLowering::updateExitPaths(MachineBasicBlock &MBB, 797*0b57cec5SDimitry Andric MachineBasicBlock &RestoreB, BitVector &DoneT, BitVector &DoneF, 798*0b57cec5SDimitry Andric BitVector &Path) const { 799*0b57cec5SDimitry Andric assert(MBB.getNumber() >= 0); 800*0b57cec5SDimitry Andric unsigned BN = MBB.getNumber(); 801*0b57cec5SDimitry Andric if (Path[BN] || DoneF[BN]) 802*0b57cec5SDimitry Andric return false; 803*0b57cec5SDimitry Andric if (DoneT[BN]) 804*0b57cec5SDimitry Andric return true; 805*0b57cec5SDimitry Andric 806*0b57cec5SDimitry Andric auto &CSI = MBB.getParent()->getFrameInfo().getCalleeSavedInfo(); 807*0b57cec5SDimitry Andric 808*0b57cec5SDimitry Andric Path[BN] = true; 809*0b57cec5SDimitry Andric bool ReachedExit = false; 810*0b57cec5SDimitry Andric for (auto &SB : MBB.successors()) 811*0b57cec5SDimitry Andric ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path); 812*0b57cec5SDimitry Andric 813*0b57cec5SDimitry Andric if (!MBB.empty() && MBB.back().isReturn()) { 814*0b57cec5SDimitry Andric // Add implicit uses of all callee-saved registers to the reached 815*0b57cec5SDimitry Andric // return instructions. This is to prevent the anti-dependency breaker 816*0b57cec5SDimitry Andric // from renaming these registers. 817*0b57cec5SDimitry Andric MachineInstr &RetI = MBB.back(); 818*0b57cec5SDimitry Andric if (!isRestoreCall(RetI.getOpcode())) 819*0b57cec5SDimitry Andric for (auto &R : CSI) 820*0b57cec5SDimitry Andric RetI.addOperand(MachineOperand::CreateReg(R.getReg(), false, true)); 821*0b57cec5SDimitry Andric ReachedExit = true; 822*0b57cec5SDimitry Andric } 823*0b57cec5SDimitry Andric 824*0b57cec5SDimitry Andric // We don't want to add unnecessary live-ins to the restore block: since 825*0b57cec5SDimitry Andric // the callee-saved registers are being defined in it, the entry of the 826*0b57cec5SDimitry Andric // restore block cannot be on the path from the definitions to any exit. 827*0b57cec5SDimitry Andric if (ReachedExit && &MBB != &RestoreB) { 828*0b57cec5SDimitry Andric for (auto &R : CSI) 829*0b57cec5SDimitry Andric if (!MBB.isLiveIn(R.getReg())) 830*0b57cec5SDimitry Andric MBB.addLiveIn(R.getReg()); 831*0b57cec5SDimitry Andric DoneT[BN] = true; 832*0b57cec5SDimitry Andric } 833*0b57cec5SDimitry Andric if (!ReachedExit) 834*0b57cec5SDimitry Andric DoneF[BN] = true; 835*0b57cec5SDimitry Andric 836*0b57cec5SDimitry Andric Path[BN] = false; 837*0b57cec5SDimitry Andric return ReachedExit; 838*0b57cec5SDimitry Andric } 839*0b57cec5SDimitry Andric 840*0b57cec5SDimitry Andric static Optional<MachineBasicBlock::iterator> 841*0b57cec5SDimitry Andric findCFILocation(MachineBasicBlock &B) { 842*0b57cec5SDimitry Andric // The CFI instructions need to be inserted right after allocframe. 843*0b57cec5SDimitry Andric // An exception to this is a situation where allocframe is bundled 844*0b57cec5SDimitry Andric // with a call: then the CFI instructions need to be inserted before 845*0b57cec5SDimitry Andric // the packet with the allocframe+call (in case the call throws an 846*0b57cec5SDimitry Andric // exception). 847*0b57cec5SDimitry Andric auto End = B.instr_end(); 848*0b57cec5SDimitry Andric 849*0b57cec5SDimitry Andric for (MachineInstr &I : B) { 850*0b57cec5SDimitry Andric MachineBasicBlock::iterator It = I.getIterator(); 851*0b57cec5SDimitry Andric if (!I.isBundle()) { 852*0b57cec5SDimitry Andric if (I.getOpcode() == Hexagon::S2_allocframe) 853*0b57cec5SDimitry Andric return std::next(It); 854*0b57cec5SDimitry Andric continue; 855*0b57cec5SDimitry Andric } 856*0b57cec5SDimitry Andric // I is a bundle. 857*0b57cec5SDimitry Andric bool HasCall = false, HasAllocFrame = false; 858*0b57cec5SDimitry Andric auto T = It.getInstrIterator(); 859*0b57cec5SDimitry Andric while (++T != End && T->isBundled()) { 860*0b57cec5SDimitry Andric if (T->getOpcode() == Hexagon::S2_allocframe) 861*0b57cec5SDimitry Andric HasAllocFrame = true; 862*0b57cec5SDimitry Andric else if (T->isCall()) 863*0b57cec5SDimitry Andric HasCall = true; 864*0b57cec5SDimitry Andric } 865*0b57cec5SDimitry Andric if (HasAllocFrame) 866*0b57cec5SDimitry Andric return HasCall ? It : std::next(It); 867*0b57cec5SDimitry Andric } 868*0b57cec5SDimitry Andric return None; 869*0b57cec5SDimitry Andric } 870*0b57cec5SDimitry Andric 871*0b57cec5SDimitry Andric void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const { 872*0b57cec5SDimitry Andric for (auto &B : MF) { 873*0b57cec5SDimitry Andric auto At = findCFILocation(B); 874*0b57cec5SDimitry Andric if (At.hasValue()) 875*0b57cec5SDimitry Andric insertCFIInstructionsAt(B, At.getValue()); 876*0b57cec5SDimitry Andric } 877*0b57cec5SDimitry Andric } 878*0b57cec5SDimitry Andric 879*0b57cec5SDimitry Andric void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB, 880*0b57cec5SDimitry Andric MachineBasicBlock::iterator At) const { 881*0b57cec5SDimitry Andric MachineFunction &MF = *MBB.getParent(); 882*0b57cec5SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 883*0b57cec5SDimitry Andric MachineModuleInfo &MMI = MF.getMMI(); 884*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 885*0b57cec5SDimitry Andric auto &HII = *HST.getInstrInfo(); 886*0b57cec5SDimitry Andric auto &HRI = *HST.getRegisterInfo(); 887*0b57cec5SDimitry Andric 888*0b57cec5SDimitry Andric // If CFI instructions have debug information attached, something goes 889*0b57cec5SDimitry Andric // wrong with the final assembly generation: the prolog_end is placed 890*0b57cec5SDimitry Andric // in a wrong location. 891*0b57cec5SDimitry Andric DebugLoc DL; 892*0b57cec5SDimitry Andric const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION); 893*0b57cec5SDimitry Andric 894*0b57cec5SDimitry Andric MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); 895*0b57cec5SDimitry Andric bool HasFP = hasFP(MF); 896*0b57cec5SDimitry Andric 897*0b57cec5SDimitry Andric if (HasFP) { 898*0b57cec5SDimitry Andric unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(), true); 899*0b57cec5SDimitry Andric unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(), true); 900*0b57cec5SDimitry Andric 901*0b57cec5SDimitry Andric // Define CFA via an offset from the value of FP. 902*0b57cec5SDimitry Andric // 903*0b57cec5SDimitry Andric // -8 -4 0 (SP) 904*0b57cec5SDimitry Andric // --+----+----+--------------------- 905*0b57cec5SDimitry Andric // | FP | LR | increasing addresses --> 906*0b57cec5SDimitry Andric // --+----+----+--------------------- 907*0b57cec5SDimitry Andric // | +-- Old SP (before allocframe) 908*0b57cec5SDimitry Andric // +-- New FP (after allocframe) 909*0b57cec5SDimitry Andric // 910*0b57cec5SDimitry Andric // MCCFIInstruction::createDefCfa subtracts the offset from the register. 911*0b57cec5SDimitry Andric // MCCFIInstruction::createOffset takes the offset without sign change. 912*0b57cec5SDimitry Andric auto DefCfa = MCCFIInstruction::createDefCfa(FrameLabel, DwFPReg, -8); 913*0b57cec5SDimitry Andric BuildMI(MBB, At, DL, CFID) 914*0b57cec5SDimitry Andric .addCFIIndex(MF.addFrameInst(DefCfa)); 915*0b57cec5SDimitry Andric // R31 (return addr) = CFA - 4 916*0b57cec5SDimitry Andric auto OffR31 = MCCFIInstruction::createOffset(FrameLabel, DwRAReg, -4); 917*0b57cec5SDimitry Andric BuildMI(MBB, At, DL, CFID) 918*0b57cec5SDimitry Andric .addCFIIndex(MF.addFrameInst(OffR31)); 919*0b57cec5SDimitry Andric // R30 (frame ptr) = CFA - 8 920*0b57cec5SDimitry Andric auto OffR30 = MCCFIInstruction::createOffset(FrameLabel, DwFPReg, -8); 921*0b57cec5SDimitry Andric BuildMI(MBB, At, DL, CFID) 922*0b57cec5SDimitry Andric .addCFIIndex(MF.addFrameInst(OffR30)); 923*0b57cec5SDimitry Andric } 924*0b57cec5SDimitry Andric 925*0b57cec5SDimitry Andric static unsigned int RegsToMove[] = { 926*0b57cec5SDimitry Andric Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2, 927*0b57cec5SDimitry Andric Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18, 928*0b57cec5SDimitry Andric Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22, 929*0b57cec5SDimitry Andric Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26, 930*0b57cec5SDimitry Andric Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9, 931*0b57cec5SDimitry Andric Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13, 932*0b57cec5SDimitry Andric Hexagon::NoRegister 933*0b57cec5SDimitry Andric }; 934*0b57cec5SDimitry Andric 935*0b57cec5SDimitry Andric const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 936*0b57cec5SDimitry Andric 937*0b57cec5SDimitry Andric for (unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) { 938*0b57cec5SDimitry Andric unsigned Reg = RegsToMove[i]; 939*0b57cec5SDimitry Andric auto IfR = [Reg] (const CalleeSavedInfo &C) -> bool { 940*0b57cec5SDimitry Andric return C.getReg() == Reg; 941*0b57cec5SDimitry Andric }; 942*0b57cec5SDimitry Andric auto F = find_if(CSI, IfR); 943*0b57cec5SDimitry Andric if (F == CSI.end()) 944*0b57cec5SDimitry Andric continue; 945*0b57cec5SDimitry Andric 946*0b57cec5SDimitry Andric int64_t Offset; 947*0b57cec5SDimitry Andric if (HasFP) { 948*0b57cec5SDimitry Andric // If the function has a frame pointer (i.e. has an allocframe), 949*0b57cec5SDimitry Andric // then the CFA has been defined in terms of FP. Any offsets in 950*0b57cec5SDimitry Andric // the following CFI instructions have to be defined relative 951*0b57cec5SDimitry Andric // to FP, which points to the bottom of the stack frame. 952*0b57cec5SDimitry Andric // The function getFrameIndexReference can still choose to use SP 953*0b57cec5SDimitry Andric // for the offset calculation, so we cannot simply call it here. 954*0b57cec5SDimitry Andric // Instead, get the offset (relative to the FP) directly. 955*0b57cec5SDimitry Andric Offset = MFI.getObjectOffset(F->getFrameIdx()); 956*0b57cec5SDimitry Andric } else { 957*0b57cec5SDimitry Andric unsigned FrameReg; 958*0b57cec5SDimitry Andric Offset = getFrameIndexReference(MF, F->getFrameIdx(), FrameReg); 959*0b57cec5SDimitry Andric } 960*0b57cec5SDimitry Andric // Subtract 8 to make room for R30 and R31, which are added above. 961*0b57cec5SDimitry Andric Offset -= 8; 962*0b57cec5SDimitry Andric 963*0b57cec5SDimitry Andric if (Reg < Hexagon::D0 || Reg > Hexagon::D15) { 964*0b57cec5SDimitry Andric unsigned DwarfReg = HRI.getDwarfRegNum(Reg, true); 965*0b57cec5SDimitry Andric auto OffReg = MCCFIInstruction::createOffset(FrameLabel, DwarfReg, 966*0b57cec5SDimitry Andric Offset); 967*0b57cec5SDimitry Andric BuildMI(MBB, At, DL, CFID) 968*0b57cec5SDimitry Andric .addCFIIndex(MF.addFrameInst(OffReg)); 969*0b57cec5SDimitry Andric } else { 970*0b57cec5SDimitry Andric // Split the double regs into subregs, and generate appropriate 971*0b57cec5SDimitry Andric // cfi_offsets. 972*0b57cec5SDimitry Andric // The only reason, we are split double regs is, llvm-mc does not 973*0b57cec5SDimitry Andric // understand paired registers for cfi_offset. 974*0b57cec5SDimitry Andric // Eg .cfi_offset r1:0, -64 975*0b57cec5SDimitry Andric 976*0b57cec5SDimitry Andric unsigned HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi); 977*0b57cec5SDimitry Andric unsigned LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo); 978*0b57cec5SDimitry Andric unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg, true); 979*0b57cec5SDimitry Andric unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg, true); 980*0b57cec5SDimitry Andric auto OffHi = MCCFIInstruction::createOffset(FrameLabel, HiDwarfReg, 981*0b57cec5SDimitry Andric Offset+4); 982*0b57cec5SDimitry Andric BuildMI(MBB, At, DL, CFID) 983*0b57cec5SDimitry Andric .addCFIIndex(MF.addFrameInst(OffHi)); 984*0b57cec5SDimitry Andric auto OffLo = MCCFIInstruction::createOffset(FrameLabel, LoDwarfReg, 985*0b57cec5SDimitry Andric Offset); 986*0b57cec5SDimitry Andric BuildMI(MBB, At, DL, CFID) 987*0b57cec5SDimitry Andric .addCFIIndex(MF.addFrameInst(OffLo)); 988*0b57cec5SDimitry Andric } 989*0b57cec5SDimitry Andric } 990*0b57cec5SDimitry Andric } 991*0b57cec5SDimitry Andric 992*0b57cec5SDimitry Andric bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const { 993*0b57cec5SDimitry Andric if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 994*0b57cec5SDimitry Andric return false; 995*0b57cec5SDimitry Andric 996*0b57cec5SDimitry Andric auto &MFI = MF.getFrameInfo(); 997*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 998*0b57cec5SDimitry Andric bool HasExtraAlign = HRI.needsStackRealignment(MF); 999*0b57cec5SDimitry Andric bool HasAlloca = MFI.hasVarSizedObjects(); 1000*0b57cec5SDimitry Andric 1001*0b57cec5SDimitry Andric // Insert ALLOCFRAME if we need to or at -O0 for the debugger. Think 1002*0b57cec5SDimitry Andric // that this shouldn't be required, but doing so now because gcc does and 1003*0b57cec5SDimitry Andric // gdb can't break at the start of the function without it. Will remove if 1004*0b57cec5SDimitry Andric // this turns out to be a gdb bug. 1005*0b57cec5SDimitry Andric // 1006*0b57cec5SDimitry Andric if (MF.getTarget().getOptLevel() == CodeGenOpt::None) 1007*0b57cec5SDimitry Andric return true; 1008*0b57cec5SDimitry Andric 1009*0b57cec5SDimitry Andric // By default we want to use SP (since it's always there). FP requires 1010*0b57cec5SDimitry Andric // some setup (i.e. ALLOCFRAME). 1011*0b57cec5SDimitry Andric // Both, alloca and stack alignment modify the stack pointer by an 1012*0b57cec5SDimitry Andric // undetermined value, so we need to save it at the entry to the function 1013*0b57cec5SDimitry Andric // (i.e. use allocframe). 1014*0b57cec5SDimitry Andric if (HasAlloca || HasExtraAlign) 1015*0b57cec5SDimitry Andric return true; 1016*0b57cec5SDimitry Andric 1017*0b57cec5SDimitry Andric if (MFI.getStackSize() > 0) { 1018*0b57cec5SDimitry Andric // If FP-elimination is disabled, we have to use FP at this point. 1019*0b57cec5SDimitry Andric const TargetMachine &TM = MF.getTarget(); 1020*0b57cec5SDimitry Andric if (TM.Options.DisableFramePointerElim(MF) || !EliminateFramePointer) 1021*0b57cec5SDimitry Andric return true; 1022*0b57cec5SDimitry Andric if (EnableStackOVFSanitizer) 1023*0b57cec5SDimitry Andric return true; 1024*0b57cec5SDimitry Andric } 1025*0b57cec5SDimitry Andric 1026*0b57cec5SDimitry Andric const auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); 1027*0b57cec5SDimitry Andric if ((MFI.hasCalls() && !enableAllocFrameElim(MF)) || HMFI.hasClobberLR()) 1028*0b57cec5SDimitry Andric return true; 1029*0b57cec5SDimitry Andric 1030*0b57cec5SDimitry Andric return false; 1031*0b57cec5SDimitry Andric } 1032*0b57cec5SDimitry Andric 1033*0b57cec5SDimitry Andric enum SpillKind { 1034*0b57cec5SDimitry Andric SK_ToMem, 1035*0b57cec5SDimitry Andric SK_FromMem, 1036*0b57cec5SDimitry Andric SK_FromMemTailcall 1037*0b57cec5SDimitry Andric }; 1038*0b57cec5SDimitry Andric 1039*0b57cec5SDimitry Andric static const char *getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType, 1040*0b57cec5SDimitry Andric bool Stkchk = false) { 1041*0b57cec5SDimitry Andric const char * V4SpillToMemoryFunctions[] = { 1042*0b57cec5SDimitry Andric "__save_r16_through_r17", 1043*0b57cec5SDimitry Andric "__save_r16_through_r19", 1044*0b57cec5SDimitry Andric "__save_r16_through_r21", 1045*0b57cec5SDimitry Andric "__save_r16_through_r23", 1046*0b57cec5SDimitry Andric "__save_r16_through_r25", 1047*0b57cec5SDimitry Andric "__save_r16_through_r27" }; 1048*0b57cec5SDimitry Andric 1049*0b57cec5SDimitry Andric const char * V4SpillToMemoryStkchkFunctions[] = { 1050*0b57cec5SDimitry Andric "__save_r16_through_r17_stkchk", 1051*0b57cec5SDimitry Andric "__save_r16_through_r19_stkchk", 1052*0b57cec5SDimitry Andric "__save_r16_through_r21_stkchk", 1053*0b57cec5SDimitry Andric "__save_r16_through_r23_stkchk", 1054*0b57cec5SDimitry Andric "__save_r16_through_r25_stkchk", 1055*0b57cec5SDimitry Andric "__save_r16_through_r27_stkchk" }; 1056*0b57cec5SDimitry Andric 1057*0b57cec5SDimitry Andric const char * V4SpillFromMemoryFunctions[] = { 1058*0b57cec5SDimitry Andric "__restore_r16_through_r17_and_deallocframe", 1059*0b57cec5SDimitry Andric "__restore_r16_through_r19_and_deallocframe", 1060*0b57cec5SDimitry Andric "__restore_r16_through_r21_and_deallocframe", 1061*0b57cec5SDimitry Andric "__restore_r16_through_r23_and_deallocframe", 1062*0b57cec5SDimitry Andric "__restore_r16_through_r25_and_deallocframe", 1063*0b57cec5SDimitry Andric "__restore_r16_through_r27_and_deallocframe" }; 1064*0b57cec5SDimitry Andric 1065*0b57cec5SDimitry Andric const char * V4SpillFromMemoryTailcallFunctions[] = { 1066*0b57cec5SDimitry Andric "__restore_r16_through_r17_and_deallocframe_before_tailcall", 1067*0b57cec5SDimitry Andric "__restore_r16_through_r19_and_deallocframe_before_tailcall", 1068*0b57cec5SDimitry Andric "__restore_r16_through_r21_and_deallocframe_before_tailcall", 1069*0b57cec5SDimitry Andric "__restore_r16_through_r23_and_deallocframe_before_tailcall", 1070*0b57cec5SDimitry Andric "__restore_r16_through_r25_and_deallocframe_before_tailcall", 1071*0b57cec5SDimitry Andric "__restore_r16_through_r27_and_deallocframe_before_tailcall" 1072*0b57cec5SDimitry Andric }; 1073*0b57cec5SDimitry Andric 1074*0b57cec5SDimitry Andric const char **SpillFunc = nullptr; 1075*0b57cec5SDimitry Andric 1076*0b57cec5SDimitry Andric switch(SpillType) { 1077*0b57cec5SDimitry Andric case SK_ToMem: 1078*0b57cec5SDimitry Andric SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions 1079*0b57cec5SDimitry Andric : V4SpillToMemoryFunctions; 1080*0b57cec5SDimitry Andric break; 1081*0b57cec5SDimitry Andric case SK_FromMem: 1082*0b57cec5SDimitry Andric SpillFunc = V4SpillFromMemoryFunctions; 1083*0b57cec5SDimitry Andric break; 1084*0b57cec5SDimitry Andric case SK_FromMemTailcall: 1085*0b57cec5SDimitry Andric SpillFunc = V4SpillFromMemoryTailcallFunctions; 1086*0b57cec5SDimitry Andric break; 1087*0b57cec5SDimitry Andric } 1088*0b57cec5SDimitry Andric assert(SpillFunc && "Unknown spill kind"); 1089*0b57cec5SDimitry Andric 1090*0b57cec5SDimitry Andric // Spill all callee-saved registers up to the highest register used. 1091*0b57cec5SDimitry Andric switch (MaxReg) { 1092*0b57cec5SDimitry Andric case Hexagon::R17: 1093*0b57cec5SDimitry Andric return SpillFunc[0]; 1094*0b57cec5SDimitry Andric case Hexagon::R19: 1095*0b57cec5SDimitry Andric return SpillFunc[1]; 1096*0b57cec5SDimitry Andric case Hexagon::R21: 1097*0b57cec5SDimitry Andric return SpillFunc[2]; 1098*0b57cec5SDimitry Andric case Hexagon::R23: 1099*0b57cec5SDimitry Andric return SpillFunc[3]; 1100*0b57cec5SDimitry Andric case Hexagon::R25: 1101*0b57cec5SDimitry Andric return SpillFunc[4]; 1102*0b57cec5SDimitry Andric case Hexagon::R27: 1103*0b57cec5SDimitry Andric return SpillFunc[5]; 1104*0b57cec5SDimitry Andric default: 1105*0b57cec5SDimitry Andric llvm_unreachable("Unhandled maximum callee save register"); 1106*0b57cec5SDimitry Andric } 1107*0b57cec5SDimitry Andric return nullptr; 1108*0b57cec5SDimitry Andric } 1109*0b57cec5SDimitry Andric 1110*0b57cec5SDimitry Andric int HexagonFrameLowering::getFrameIndexReference(const MachineFunction &MF, 1111*0b57cec5SDimitry Andric int FI, unsigned &FrameReg) const { 1112*0b57cec5SDimitry Andric auto &MFI = MF.getFrameInfo(); 1113*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1114*0b57cec5SDimitry Andric 1115*0b57cec5SDimitry Andric int Offset = MFI.getObjectOffset(FI); 1116*0b57cec5SDimitry Andric bool HasAlloca = MFI.hasVarSizedObjects(); 1117*0b57cec5SDimitry Andric bool HasExtraAlign = HRI.needsStackRealignment(MF); 1118*0b57cec5SDimitry Andric bool NoOpt = MF.getTarget().getOptLevel() == CodeGenOpt::None; 1119*0b57cec5SDimitry Andric 1120*0b57cec5SDimitry Andric auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); 1121*0b57cec5SDimitry Andric unsigned FrameSize = MFI.getStackSize(); 1122*0b57cec5SDimitry Andric unsigned SP = HRI.getStackRegister(); 1123*0b57cec5SDimitry Andric unsigned FP = HRI.getFrameRegister(); 1124*0b57cec5SDimitry Andric unsigned AP = HMFI.getStackAlignBasePhysReg(); 1125*0b57cec5SDimitry Andric // It may happen that AP will be absent even HasAlloca && HasExtraAlign 1126*0b57cec5SDimitry Andric // is true. HasExtraAlign may be set because of vector spills, without 1127*0b57cec5SDimitry Andric // aligned locals or aligned outgoing function arguments. Since vector 1128*0b57cec5SDimitry Andric // spills will ultimately be "unaligned", it is safe to use FP as the 1129*0b57cec5SDimitry Andric // base register. 1130*0b57cec5SDimitry Andric // In fact, in such a scenario the stack is actually not required to be 1131*0b57cec5SDimitry Andric // aligned, although it may end up being aligned anyway, since this 1132*0b57cec5SDimitry Andric // particular case is not easily detectable. The alignment will be 1133*0b57cec5SDimitry Andric // unnecessary, but not incorrect. 1134*0b57cec5SDimitry Andric // Unfortunately there is no quick way to verify that the above is 1135*0b57cec5SDimitry Andric // indeed the case (and that it's not a result of an error), so just 1136*0b57cec5SDimitry Andric // assume that missing AP will be replaced by FP. 1137*0b57cec5SDimitry Andric // (A better fix would be to rematerialize AP from FP and always align 1138*0b57cec5SDimitry Andric // vector spills.) 1139*0b57cec5SDimitry Andric if (AP == 0) 1140*0b57cec5SDimitry Andric AP = FP; 1141*0b57cec5SDimitry Andric 1142*0b57cec5SDimitry Andric bool UseFP = false, UseAP = false; // Default: use SP (except at -O0). 1143*0b57cec5SDimitry Andric // Use FP at -O0, except when there are objects with extra alignment. 1144*0b57cec5SDimitry Andric // That additional alignment requirement may cause a pad to be inserted, 1145*0b57cec5SDimitry Andric // which will make it impossible to use FP to access objects located 1146*0b57cec5SDimitry Andric // past the pad. 1147*0b57cec5SDimitry Andric if (NoOpt && !HasExtraAlign) 1148*0b57cec5SDimitry Andric UseFP = true; 1149*0b57cec5SDimitry Andric if (MFI.isFixedObjectIndex(FI) || MFI.isObjectPreAllocated(FI)) { 1150*0b57cec5SDimitry Andric // Fixed and preallocated objects will be located before any padding 1151*0b57cec5SDimitry Andric // so FP must be used to access them. 1152*0b57cec5SDimitry Andric UseFP |= (HasAlloca || HasExtraAlign); 1153*0b57cec5SDimitry Andric } else { 1154*0b57cec5SDimitry Andric if (HasAlloca) { 1155*0b57cec5SDimitry Andric if (HasExtraAlign) 1156*0b57cec5SDimitry Andric UseAP = true; 1157*0b57cec5SDimitry Andric else 1158*0b57cec5SDimitry Andric UseFP = true; 1159*0b57cec5SDimitry Andric } 1160*0b57cec5SDimitry Andric } 1161*0b57cec5SDimitry Andric 1162*0b57cec5SDimitry Andric // If FP was picked, then there had better be FP. 1163*0b57cec5SDimitry Andric bool HasFP = hasFP(MF); 1164*0b57cec5SDimitry Andric assert((HasFP || !UseFP) && "This function must have frame pointer"); 1165*0b57cec5SDimitry Andric 1166*0b57cec5SDimitry Andric // Having FP implies allocframe. Allocframe will store extra 8 bytes: 1167*0b57cec5SDimitry Andric // FP/LR. If the base register is used to access an object across these 1168*0b57cec5SDimitry Andric // 8 bytes, then the offset will need to be adjusted by 8. 1169*0b57cec5SDimitry Andric // 1170*0b57cec5SDimitry Andric // After allocframe: 1171*0b57cec5SDimitry Andric // HexagonISelLowering adds 8 to ---+ 1172*0b57cec5SDimitry Andric // the offsets of all stack-based | 1173*0b57cec5SDimitry Andric // arguments (*) | 1174*0b57cec5SDimitry Andric // | 1175*0b57cec5SDimitry Andric // getObjectOffset < 0 0 8 getObjectOffset >= 8 1176*0b57cec5SDimitry Andric // ------------------------+-----+------------------------> increasing 1177*0b57cec5SDimitry Andric // <local objects> |FP/LR| <input arguments> addresses 1178*0b57cec5SDimitry Andric // -----------------+------+-----+------------------------> 1179*0b57cec5SDimitry Andric // | | 1180*0b57cec5SDimitry Andric // SP/AP point --+ +-- FP points here (**) 1181*0b57cec5SDimitry Andric // somewhere on 1182*0b57cec5SDimitry Andric // this side of FP/LR 1183*0b57cec5SDimitry Andric // 1184*0b57cec5SDimitry Andric // (*) See LowerFormalArguments. The FP/LR is assumed to be present. 1185*0b57cec5SDimitry Andric // (**) *FP == old-FP. FP+0..7 are the bytes of FP/LR. 1186*0b57cec5SDimitry Andric 1187*0b57cec5SDimitry Andric // The lowering assumes that FP/LR is present, and so the offsets of 1188*0b57cec5SDimitry Andric // the formal arguments start at 8. If FP/LR is not there we need to 1189*0b57cec5SDimitry Andric // reduce the offset by 8. 1190*0b57cec5SDimitry Andric if (Offset > 0 && !HasFP) 1191*0b57cec5SDimitry Andric Offset -= 8; 1192*0b57cec5SDimitry Andric 1193*0b57cec5SDimitry Andric if (UseFP) 1194*0b57cec5SDimitry Andric FrameReg = FP; 1195*0b57cec5SDimitry Andric else if (UseAP) 1196*0b57cec5SDimitry Andric FrameReg = AP; 1197*0b57cec5SDimitry Andric else 1198*0b57cec5SDimitry Andric FrameReg = SP; 1199*0b57cec5SDimitry Andric 1200*0b57cec5SDimitry Andric // Calculate the actual offset in the instruction. If there is no FP 1201*0b57cec5SDimitry Andric // (in other words, no allocframe), then SP will not be adjusted (i.e. 1202*0b57cec5SDimitry Andric // there will be no SP -= FrameSize), so the frame size should not be 1203*0b57cec5SDimitry Andric // added to the calculated offset. 1204*0b57cec5SDimitry Andric int RealOffset = Offset; 1205*0b57cec5SDimitry Andric if (!UseFP && !UseAP) 1206*0b57cec5SDimitry Andric RealOffset = FrameSize+Offset; 1207*0b57cec5SDimitry Andric return RealOffset; 1208*0b57cec5SDimitry Andric } 1209*0b57cec5SDimitry Andric 1210*0b57cec5SDimitry Andric bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB, 1211*0b57cec5SDimitry Andric const CSIVect &CSI, const HexagonRegisterInfo &HRI, 1212*0b57cec5SDimitry Andric bool &PrologueStubs) const { 1213*0b57cec5SDimitry Andric if (CSI.empty()) 1214*0b57cec5SDimitry Andric return true; 1215*0b57cec5SDimitry Andric 1216*0b57cec5SDimitry Andric MachineBasicBlock::iterator MI = MBB.begin(); 1217*0b57cec5SDimitry Andric PrologueStubs = false; 1218*0b57cec5SDimitry Andric MachineFunction &MF = *MBB.getParent(); 1219*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 1220*0b57cec5SDimitry Andric auto &HII = *HST.getInstrInfo(); 1221*0b57cec5SDimitry Andric 1222*0b57cec5SDimitry Andric if (useSpillFunction(MF, CSI)) { 1223*0b57cec5SDimitry Andric PrologueStubs = true; 1224*0b57cec5SDimitry Andric unsigned MaxReg = getMaxCalleeSavedReg(CSI, HRI); 1225*0b57cec5SDimitry Andric bool StkOvrFlowEnabled = EnableStackOVFSanitizer; 1226*0b57cec5SDimitry Andric const char *SpillFun = getSpillFunctionFor(MaxReg, SK_ToMem, 1227*0b57cec5SDimitry Andric StkOvrFlowEnabled); 1228*0b57cec5SDimitry Andric auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget()); 1229*0b57cec5SDimitry Andric bool IsPIC = HTM.isPositionIndependent(); 1230*0b57cec5SDimitry Andric bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong; 1231*0b57cec5SDimitry Andric 1232*0b57cec5SDimitry Andric // Call spill function. 1233*0b57cec5SDimitry Andric DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc(); 1234*0b57cec5SDimitry Andric unsigned SpillOpc; 1235*0b57cec5SDimitry Andric if (StkOvrFlowEnabled) { 1236*0b57cec5SDimitry Andric if (LongCalls) 1237*0b57cec5SDimitry Andric SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC 1238*0b57cec5SDimitry Andric : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT; 1239*0b57cec5SDimitry Andric else 1240*0b57cec5SDimitry Andric SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC 1241*0b57cec5SDimitry Andric : Hexagon::SAVE_REGISTERS_CALL_V4STK; 1242*0b57cec5SDimitry Andric } else { 1243*0b57cec5SDimitry Andric if (LongCalls) 1244*0b57cec5SDimitry Andric SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC 1245*0b57cec5SDimitry Andric : Hexagon::SAVE_REGISTERS_CALL_V4_EXT; 1246*0b57cec5SDimitry Andric else 1247*0b57cec5SDimitry Andric SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC 1248*0b57cec5SDimitry Andric : Hexagon::SAVE_REGISTERS_CALL_V4; 1249*0b57cec5SDimitry Andric } 1250*0b57cec5SDimitry Andric 1251*0b57cec5SDimitry Andric MachineInstr *SaveRegsCall = 1252*0b57cec5SDimitry Andric BuildMI(MBB, MI, DL, HII.get(SpillOpc)) 1253*0b57cec5SDimitry Andric .addExternalSymbol(SpillFun); 1254*0b57cec5SDimitry Andric 1255*0b57cec5SDimitry Andric // Add callee-saved registers as use. 1256*0b57cec5SDimitry Andric addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI, false, true); 1257*0b57cec5SDimitry Andric // Add live in registers. 1258*0b57cec5SDimitry Andric for (unsigned I = 0; I < CSI.size(); ++I) 1259*0b57cec5SDimitry Andric MBB.addLiveIn(CSI[I].getReg()); 1260*0b57cec5SDimitry Andric return true; 1261*0b57cec5SDimitry Andric } 1262*0b57cec5SDimitry Andric 1263*0b57cec5SDimitry Andric for (unsigned i = 0, n = CSI.size(); i < n; ++i) { 1264*0b57cec5SDimitry Andric unsigned Reg = CSI[i].getReg(); 1265*0b57cec5SDimitry Andric // Add live in registers. We treat eh_return callee saved register r0 - r3 1266*0b57cec5SDimitry Andric // specially. They are not really callee saved registers as they are not 1267*0b57cec5SDimitry Andric // supposed to be killed. 1268*0b57cec5SDimitry Andric bool IsKill = !HRI.isEHReturnCalleeSaveReg(Reg); 1269*0b57cec5SDimitry Andric int FI = CSI[i].getFrameIdx(); 1270*0b57cec5SDimitry Andric const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg); 1271*0b57cec5SDimitry Andric HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI); 1272*0b57cec5SDimitry Andric if (IsKill) 1273*0b57cec5SDimitry Andric MBB.addLiveIn(Reg); 1274*0b57cec5SDimitry Andric } 1275*0b57cec5SDimitry Andric return true; 1276*0b57cec5SDimitry Andric } 1277*0b57cec5SDimitry Andric 1278*0b57cec5SDimitry Andric bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB, 1279*0b57cec5SDimitry Andric const CSIVect &CSI, const HexagonRegisterInfo &HRI) const { 1280*0b57cec5SDimitry Andric if (CSI.empty()) 1281*0b57cec5SDimitry Andric return false; 1282*0b57cec5SDimitry Andric 1283*0b57cec5SDimitry Andric MachineBasicBlock::iterator MI = MBB.getFirstTerminator(); 1284*0b57cec5SDimitry Andric MachineFunction &MF = *MBB.getParent(); 1285*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 1286*0b57cec5SDimitry Andric auto &HII = *HST.getInstrInfo(); 1287*0b57cec5SDimitry Andric 1288*0b57cec5SDimitry Andric if (useRestoreFunction(MF, CSI)) { 1289*0b57cec5SDimitry Andric bool HasTC = hasTailCall(MBB) || !hasReturn(MBB); 1290*0b57cec5SDimitry Andric unsigned MaxR = getMaxCalleeSavedReg(CSI, HRI); 1291*0b57cec5SDimitry Andric SpillKind Kind = HasTC ? SK_FromMemTailcall : SK_FromMem; 1292*0b57cec5SDimitry Andric const char *RestoreFn = getSpillFunctionFor(MaxR, Kind); 1293*0b57cec5SDimitry Andric auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget()); 1294*0b57cec5SDimitry Andric bool IsPIC = HTM.isPositionIndependent(); 1295*0b57cec5SDimitry Andric bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong; 1296*0b57cec5SDimitry Andric 1297*0b57cec5SDimitry Andric // Call spill function. 1298*0b57cec5SDimitry Andric DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() 1299*0b57cec5SDimitry Andric : MBB.findDebugLoc(MBB.end()); 1300*0b57cec5SDimitry Andric MachineInstr *DeallocCall = nullptr; 1301*0b57cec5SDimitry Andric 1302*0b57cec5SDimitry Andric if (HasTC) { 1303*0b57cec5SDimitry Andric unsigned RetOpc; 1304*0b57cec5SDimitry Andric if (LongCalls) 1305*0b57cec5SDimitry Andric RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC 1306*0b57cec5SDimitry Andric : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT; 1307*0b57cec5SDimitry Andric else 1308*0b57cec5SDimitry Andric RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC 1309*0b57cec5SDimitry Andric : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4; 1310*0b57cec5SDimitry Andric DeallocCall = BuildMI(MBB, MI, DL, HII.get(RetOpc)) 1311*0b57cec5SDimitry Andric .addExternalSymbol(RestoreFn); 1312*0b57cec5SDimitry Andric } else { 1313*0b57cec5SDimitry Andric // The block has a return. 1314*0b57cec5SDimitry Andric MachineBasicBlock::iterator It = MBB.getFirstTerminator(); 1315*0b57cec5SDimitry Andric assert(It->isReturn() && std::next(It) == MBB.end()); 1316*0b57cec5SDimitry Andric unsigned RetOpc; 1317*0b57cec5SDimitry Andric if (LongCalls) 1318*0b57cec5SDimitry Andric RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC 1319*0b57cec5SDimitry Andric : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT; 1320*0b57cec5SDimitry Andric else 1321*0b57cec5SDimitry Andric RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC 1322*0b57cec5SDimitry Andric : Hexagon::RESTORE_DEALLOC_RET_JMP_V4; 1323*0b57cec5SDimitry Andric DeallocCall = BuildMI(MBB, It, DL, HII.get(RetOpc)) 1324*0b57cec5SDimitry Andric .addExternalSymbol(RestoreFn); 1325*0b57cec5SDimitry Andric // Transfer the function live-out registers. 1326*0b57cec5SDimitry Andric DeallocCall->copyImplicitOps(MF, *It); 1327*0b57cec5SDimitry Andric } 1328*0b57cec5SDimitry Andric addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI, true, false); 1329*0b57cec5SDimitry Andric return true; 1330*0b57cec5SDimitry Andric } 1331*0b57cec5SDimitry Andric 1332*0b57cec5SDimitry Andric for (unsigned i = 0; i < CSI.size(); ++i) { 1333*0b57cec5SDimitry Andric unsigned Reg = CSI[i].getReg(); 1334*0b57cec5SDimitry Andric const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg); 1335*0b57cec5SDimitry Andric int FI = CSI[i].getFrameIdx(); 1336*0b57cec5SDimitry Andric HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI); 1337*0b57cec5SDimitry Andric } 1338*0b57cec5SDimitry Andric 1339*0b57cec5SDimitry Andric return true; 1340*0b57cec5SDimitry Andric } 1341*0b57cec5SDimitry Andric 1342*0b57cec5SDimitry Andric MachineBasicBlock::iterator HexagonFrameLowering::eliminateCallFramePseudoInstr( 1343*0b57cec5SDimitry Andric MachineFunction &MF, MachineBasicBlock &MBB, 1344*0b57cec5SDimitry Andric MachineBasicBlock::iterator I) const { 1345*0b57cec5SDimitry Andric MachineInstr &MI = *I; 1346*0b57cec5SDimitry Andric unsigned Opc = MI.getOpcode(); 1347*0b57cec5SDimitry Andric (void)Opc; // Silence compiler warning. 1348*0b57cec5SDimitry Andric assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) && 1349*0b57cec5SDimitry Andric "Cannot handle this call frame pseudo instruction"); 1350*0b57cec5SDimitry Andric return MBB.erase(I); 1351*0b57cec5SDimitry Andric } 1352*0b57cec5SDimitry Andric 1353*0b57cec5SDimitry Andric void HexagonFrameLowering::processFunctionBeforeFrameFinalized( 1354*0b57cec5SDimitry Andric MachineFunction &MF, RegScavenger *RS) const { 1355*0b57cec5SDimitry Andric // If this function has uses aligned stack and also has variable sized stack 1356*0b57cec5SDimitry Andric // objects, then we need to map all spill slots to fixed positions, so that 1357*0b57cec5SDimitry Andric // they can be accessed through FP. Otherwise they would have to be accessed 1358*0b57cec5SDimitry Andric // via AP, which may not be available at the particular place in the program. 1359*0b57cec5SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 1360*0b57cec5SDimitry Andric bool HasAlloca = MFI.hasVarSizedObjects(); 1361*0b57cec5SDimitry Andric bool NeedsAlign = (MFI.getMaxAlignment() > getStackAlignment()); 1362*0b57cec5SDimitry Andric 1363*0b57cec5SDimitry Andric if (!HasAlloca || !NeedsAlign) 1364*0b57cec5SDimitry Andric return; 1365*0b57cec5SDimitry Andric 1366*0b57cec5SDimitry Andric unsigned LFS = MFI.getLocalFrameSize(); 1367*0b57cec5SDimitry Andric for (int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1368*0b57cec5SDimitry Andric if (!MFI.isSpillSlotObjectIndex(i) || MFI.isDeadObjectIndex(i)) 1369*0b57cec5SDimitry Andric continue; 1370*0b57cec5SDimitry Andric unsigned S = MFI.getObjectSize(i); 1371*0b57cec5SDimitry Andric // Reduce the alignment to at most 8. This will require unaligned vector 1372*0b57cec5SDimitry Andric // stores if they happen here. 1373*0b57cec5SDimitry Andric unsigned A = std::max(MFI.getObjectAlignment(i), 8U); 1374*0b57cec5SDimitry Andric MFI.setObjectAlignment(i, 8); 1375*0b57cec5SDimitry Andric LFS = alignTo(LFS+S, A); 1376*0b57cec5SDimitry Andric MFI.mapLocalFrameObject(i, -LFS); 1377*0b57cec5SDimitry Andric } 1378*0b57cec5SDimitry Andric 1379*0b57cec5SDimitry Andric MFI.setLocalFrameSize(LFS); 1380*0b57cec5SDimitry Andric unsigned A = MFI.getLocalFrameMaxAlign(); 1381*0b57cec5SDimitry Andric assert(A <= 8 && "Unexpected local frame alignment"); 1382*0b57cec5SDimitry Andric if (A == 0) 1383*0b57cec5SDimitry Andric MFI.setLocalFrameMaxAlign(8); 1384*0b57cec5SDimitry Andric MFI.setUseLocalStackAllocationBlock(true); 1385*0b57cec5SDimitry Andric 1386*0b57cec5SDimitry Andric // Set the physical aligned-stack base address register. 1387*0b57cec5SDimitry Andric unsigned AP = 0; 1388*0b57cec5SDimitry Andric if (const MachineInstr *AI = getAlignaInstr(MF)) 1389*0b57cec5SDimitry Andric AP = AI->getOperand(0).getReg(); 1390*0b57cec5SDimitry Andric auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); 1391*0b57cec5SDimitry Andric HMFI.setStackAlignBasePhysReg(AP); 1392*0b57cec5SDimitry Andric } 1393*0b57cec5SDimitry Andric 1394*0b57cec5SDimitry Andric /// Returns true if there are no caller-saved registers available in class RC. 1395*0b57cec5SDimitry Andric static bool needToReserveScavengingSpillSlots(MachineFunction &MF, 1396*0b57cec5SDimitry Andric const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC) { 1397*0b57cec5SDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo(); 1398*0b57cec5SDimitry Andric 1399*0b57cec5SDimitry Andric auto IsUsed = [&HRI,&MRI] (unsigned Reg) -> bool { 1400*0b57cec5SDimitry Andric for (MCRegAliasIterator AI(Reg, &HRI, true); AI.isValid(); ++AI) 1401*0b57cec5SDimitry Andric if (MRI.isPhysRegUsed(*AI)) 1402*0b57cec5SDimitry Andric return true; 1403*0b57cec5SDimitry Andric return false; 1404*0b57cec5SDimitry Andric }; 1405*0b57cec5SDimitry Andric 1406*0b57cec5SDimitry Andric // Check for an unused caller-saved register. Callee-saved registers 1407*0b57cec5SDimitry Andric // have become pristine by now. 1408*0b57cec5SDimitry Andric for (const MCPhysReg *P = HRI.getCallerSavedRegs(&MF, RC); *P; ++P) 1409*0b57cec5SDimitry Andric if (!IsUsed(*P)) 1410*0b57cec5SDimitry Andric return false; 1411*0b57cec5SDimitry Andric 1412*0b57cec5SDimitry Andric // All caller-saved registers are used. 1413*0b57cec5SDimitry Andric return true; 1414*0b57cec5SDimitry Andric } 1415*0b57cec5SDimitry Andric 1416*0b57cec5SDimitry Andric #ifndef NDEBUG 1417*0b57cec5SDimitry Andric static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI) { 1418*0b57cec5SDimitry Andric dbgs() << '{'; 1419*0b57cec5SDimitry Andric for (int x = Regs.find_first(); x >= 0; x = Regs.find_next(x)) { 1420*0b57cec5SDimitry Andric unsigned R = x; 1421*0b57cec5SDimitry Andric dbgs() << ' ' << printReg(R, &TRI); 1422*0b57cec5SDimitry Andric } 1423*0b57cec5SDimitry Andric dbgs() << " }"; 1424*0b57cec5SDimitry Andric } 1425*0b57cec5SDimitry Andric #endif 1426*0b57cec5SDimitry Andric 1427*0b57cec5SDimitry Andric bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF, 1428*0b57cec5SDimitry Andric const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const { 1429*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n'); 1430*0b57cec5SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 1431*0b57cec5SDimitry Andric BitVector SRegs(Hexagon::NUM_TARGET_REGS); 1432*0b57cec5SDimitry Andric 1433*0b57cec5SDimitry Andric // Generate a set of unique, callee-saved registers (SRegs), where each 1434*0b57cec5SDimitry Andric // register in the set is maximal in terms of sub-/super-register relation, 1435*0b57cec5SDimitry Andric // i.e. for each R in SRegs, no proper super-register of R is also in SRegs. 1436*0b57cec5SDimitry Andric 1437*0b57cec5SDimitry Andric // (1) For each callee-saved register, add that register and all of its 1438*0b57cec5SDimitry Andric // sub-registers to SRegs. 1439*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Initial CS registers: {"); 1440*0b57cec5SDimitry Andric for (unsigned i = 0, n = CSI.size(); i < n; ++i) { 1441*0b57cec5SDimitry Andric unsigned R = CSI[i].getReg(); 1442*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI)); 1443*0b57cec5SDimitry Andric for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR) 1444*0b57cec5SDimitry Andric SRegs[*SR] = true; 1445*0b57cec5SDimitry Andric } 1446*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << " }\n"); 1447*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "SRegs.1: "; dump_registers(SRegs, *TRI); 1448*0b57cec5SDimitry Andric dbgs() << "\n"); 1449*0b57cec5SDimitry Andric 1450*0b57cec5SDimitry Andric // (2) For each reserved register, remove that register and all of its 1451*0b57cec5SDimitry Andric // sub- and super-registers from SRegs. 1452*0b57cec5SDimitry Andric BitVector Reserved = TRI->getReservedRegs(MF); 1453*0b57cec5SDimitry Andric for (int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) { 1454*0b57cec5SDimitry Andric unsigned R = x; 1455*0b57cec5SDimitry Andric for (MCSuperRegIterator SR(R, TRI, true); SR.isValid(); ++SR) 1456*0b57cec5SDimitry Andric SRegs[*SR] = false; 1457*0b57cec5SDimitry Andric } 1458*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Res: "; dump_registers(Reserved, *TRI); 1459*0b57cec5SDimitry Andric dbgs() << "\n"); 1460*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "SRegs.2: "; dump_registers(SRegs, *TRI); 1461*0b57cec5SDimitry Andric dbgs() << "\n"); 1462*0b57cec5SDimitry Andric 1463*0b57cec5SDimitry Andric // (3) Collect all registers that have at least one sub-register in SRegs, 1464*0b57cec5SDimitry Andric // and also have no sub-registers that are reserved. These will be the can- 1465*0b57cec5SDimitry Andric // didates for saving as a whole instead of their individual sub-registers. 1466*0b57cec5SDimitry Andric // (Saving R17:16 instead of R16 is fine, but only if R17 was not reserved.) 1467*0b57cec5SDimitry Andric BitVector TmpSup(Hexagon::NUM_TARGET_REGS); 1468*0b57cec5SDimitry Andric for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) { 1469*0b57cec5SDimitry Andric unsigned R = x; 1470*0b57cec5SDimitry Andric for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR) 1471*0b57cec5SDimitry Andric TmpSup[*SR] = true; 1472*0b57cec5SDimitry Andric } 1473*0b57cec5SDimitry Andric for (int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) { 1474*0b57cec5SDimitry Andric unsigned R = x; 1475*0b57cec5SDimitry Andric for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR) { 1476*0b57cec5SDimitry Andric if (!Reserved[*SR]) 1477*0b57cec5SDimitry Andric continue; 1478*0b57cec5SDimitry Andric TmpSup[R] = false; 1479*0b57cec5SDimitry Andric break; 1480*0b57cec5SDimitry Andric } 1481*0b57cec5SDimitry Andric } 1482*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "TmpSup: "; dump_registers(TmpSup, *TRI); 1483*0b57cec5SDimitry Andric dbgs() << "\n"); 1484*0b57cec5SDimitry Andric 1485*0b57cec5SDimitry Andric // (4) Include all super-registers found in (3) into SRegs. 1486*0b57cec5SDimitry Andric SRegs |= TmpSup; 1487*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "SRegs.4: "; dump_registers(SRegs, *TRI); 1488*0b57cec5SDimitry Andric dbgs() << "\n"); 1489*0b57cec5SDimitry Andric 1490*0b57cec5SDimitry Andric // (5) For each register R in SRegs, if any super-register of R is in SRegs, 1491*0b57cec5SDimitry Andric // remove R from SRegs. 1492*0b57cec5SDimitry Andric for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) { 1493*0b57cec5SDimitry Andric unsigned R = x; 1494*0b57cec5SDimitry Andric for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR) { 1495*0b57cec5SDimitry Andric if (!SRegs[*SR]) 1496*0b57cec5SDimitry Andric continue; 1497*0b57cec5SDimitry Andric SRegs[R] = false; 1498*0b57cec5SDimitry Andric break; 1499*0b57cec5SDimitry Andric } 1500*0b57cec5SDimitry Andric } 1501*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "SRegs.5: "; dump_registers(SRegs, *TRI); 1502*0b57cec5SDimitry Andric dbgs() << "\n"); 1503*0b57cec5SDimitry Andric 1504*0b57cec5SDimitry Andric // Now, for each register that has a fixed stack slot, create the stack 1505*0b57cec5SDimitry Andric // object for it. 1506*0b57cec5SDimitry Andric CSI.clear(); 1507*0b57cec5SDimitry Andric 1508*0b57cec5SDimitry Andric using SpillSlot = TargetFrameLowering::SpillSlot; 1509*0b57cec5SDimitry Andric 1510*0b57cec5SDimitry Andric unsigned NumFixed; 1511*0b57cec5SDimitry Andric int MinOffset = 0; // CS offsets are negative. 1512*0b57cec5SDimitry Andric const SpillSlot *FixedSlots = getCalleeSavedSpillSlots(NumFixed); 1513*0b57cec5SDimitry Andric for (const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) { 1514*0b57cec5SDimitry Andric if (!SRegs[S->Reg]) 1515*0b57cec5SDimitry Andric continue; 1516*0b57cec5SDimitry Andric const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg); 1517*0b57cec5SDimitry Andric int FI = MFI.CreateFixedSpillStackObject(TRI->getSpillSize(*RC), S->Offset); 1518*0b57cec5SDimitry Andric MinOffset = std::min(MinOffset, S->Offset); 1519*0b57cec5SDimitry Andric CSI.push_back(CalleeSavedInfo(S->Reg, FI)); 1520*0b57cec5SDimitry Andric SRegs[S->Reg] = false; 1521*0b57cec5SDimitry Andric } 1522*0b57cec5SDimitry Andric 1523*0b57cec5SDimitry Andric // There can be some registers that don't have fixed slots. For example, 1524*0b57cec5SDimitry Andric // we need to store R0-R3 in functions with exception handling. For each 1525*0b57cec5SDimitry Andric // such register, create a non-fixed stack object. 1526*0b57cec5SDimitry Andric for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) { 1527*0b57cec5SDimitry Andric unsigned R = x; 1528*0b57cec5SDimitry Andric const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R); 1529*0b57cec5SDimitry Andric unsigned Size = TRI->getSpillSize(*RC); 1530*0b57cec5SDimitry Andric int Off = MinOffset - Size; 1531*0b57cec5SDimitry Andric unsigned Align = std::min(TRI->getSpillAlignment(*RC), getStackAlignment()); 1532*0b57cec5SDimitry Andric assert(isPowerOf2_32(Align)); 1533*0b57cec5SDimitry Andric Off &= -Align; 1534*0b57cec5SDimitry Andric int FI = MFI.CreateFixedSpillStackObject(Size, Off); 1535*0b57cec5SDimitry Andric MinOffset = std::min(MinOffset, Off); 1536*0b57cec5SDimitry Andric CSI.push_back(CalleeSavedInfo(R, FI)); 1537*0b57cec5SDimitry Andric SRegs[R] = false; 1538*0b57cec5SDimitry Andric } 1539*0b57cec5SDimitry Andric 1540*0b57cec5SDimitry Andric LLVM_DEBUG({ 1541*0b57cec5SDimitry Andric dbgs() << "CS information: {"; 1542*0b57cec5SDimitry Andric for (unsigned i = 0, n = CSI.size(); i < n; ++i) { 1543*0b57cec5SDimitry Andric int FI = CSI[i].getFrameIdx(); 1544*0b57cec5SDimitry Andric int Off = MFI.getObjectOffset(FI); 1545*0b57cec5SDimitry Andric dbgs() << ' ' << printReg(CSI[i].getReg(), TRI) << ":fi#" << FI << ":sp"; 1546*0b57cec5SDimitry Andric if (Off >= 0) 1547*0b57cec5SDimitry Andric dbgs() << '+'; 1548*0b57cec5SDimitry Andric dbgs() << Off; 1549*0b57cec5SDimitry Andric } 1550*0b57cec5SDimitry Andric dbgs() << " }\n"; 1551*0b57cec5SDimitry Andric }); 1552*0b57cec5SDimitry Andric 1553*0b57cec5SDimitry Andric #ifndef NDEBUG 1554*0b57cec5SDimitry Andric // Verify that all registers were handled. 1555*0b57cec5SDimitry Andric bool MissedReg = false; 1556*0b57cec5SDimitry Andric for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) { 1557*0b57cec5SDimitry Andric unsigned R = x; 1558*0b57cec5SDimitry Andric dbgs() << printReg(R, TRI) << ' '; 1559*0b57cec5SDimitry Andric MissedReg = true; 1560*0b57cec5SDimitry Andric } 1561*0b57cec5SDimitry Andric if (MissedReg) 1562*0b57cec5SDimitry Andric llvm_unreachable("...there are unhandled callee-saved registers!"); 1563*0b57cec5SDimitry Andric #endif 1564*0b57cec5SDimitry Andric 1565*0b57cec5SDimitry Andric return true; 1566*0b57cec5SDimitry Andric } 1567*0b57cec5SDimitry Andric 1568*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B, 1569*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1570*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1571*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1572*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1573*0b57cec5SDimitry Andric unsigned DstR = MI->getOperand(0).getReg(); 1574*0b57cec5SDimitry Andric unsigned SrcR = MI->getOperand(1).getReg(); 1575*0b57cec5SDimitry Andric if (!Hexagon::ModRegsRegClass.contains(DstR) || 1576*0b57cec5SDimitry Andric !Hexagon::ModRegsRegClass.contains(SrcR)) 1577*0b57cec5SDimitry Andric return false; 1578*0b57cec5SDimitry Andric 1579*0b57cec5SDimitry Andric unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1580*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1)); 1581*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR) 1582*0b57cec5SDimitry Andric .addReg(TmpR, RegState::Kill); 1583*0b57cec5SDimitry Andric 1584*0b57cec5SDimitry Andric NewRegs.push_back(TmpR); 1585*0b57cec5SDimitry Andric B.erase(It); 1586*0b57cec5SDimitry Andric return true; 1587*0b57cec5SDimitry Andric } 1588*0b57cec5SDimitry Andric 1589*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B, 1590*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1591*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1592*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1593*0b57cec5SDimitry Andric if (!MI->getOperand(0).isFI()) 1594*0b57cec5SDimitry Andric return false; 1595*0b57cec5SDimitry Andric 1596*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1597*0b57cec5SDimitry Andric unsigned Opc = MI->getOpcode(); 1598*0b57cec5SDimitry Andric unsigned SrcR = MI->getOperand(2).getReg(); 1599*0b57cec5SDimitry Andric bool IsKill = MI->getOperand(2).isKill(); 1600*0b57cec5SDimitry Andric int FI = MI->getOperand(0).getIndex(); 1601*0b57cec5SDimitry Andric 1602*0b57cec5SDimitry Andric // TmpR = C2_tfrpr SrcR if SrcR is a predicate register 1603*0b57cec5SDimitry Andric // TmpR = A2_tfrcrr SrcR if SrcR is a modifier register 1604*0b57cec5SDimitry Andric unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1605*0b57cec5SDimitry Andric unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr 1606*0b57cec5SDimitry Andric : Hexagon::A2_tfrcrr; 1607*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(TfrOpc), TmpR) 1608*0b57cec5SDimitry Andric .addReg(SrcR, getKillRegState(IsKill)); 1609*0b57cec5SDimitry Andric 1610*0b57cec5SDimitry Andric // S2_storeri_io FI, 0, TmpR 1611*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io)) 1612*0b57cec5SDimitry Andric .addFrameIndex(FI) 1613*0b57cec5SDimitry Andric .addImm(0) 1614*0b57cec5SDimitry Andric .addReg(TmpR, RegState::Kill) 1615*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1616*0b57cec5SDimitry Andric 1617*0b57cec5SDimitry Andric NewRegs.push_back(TmpR); 1618*0b57cec5SDimitry Andric B.erase(It); 1619*0b57cec5SDimitry Andric return true; 1620*0b57cec5SDimitry Andric } 1621*0b57cec5SDimitry Andric 1622*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B, 1623*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1624*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1625*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1626*0b57cec5SDimitry Andric if (!MI->getOperand(1).isFI()) 1627*0b57cec5SDimitry Andric return false; 1628*0b57cec5SDimitry Andric 1629*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1630*0b57cec5SDimitry Andric unsigned Opc = MI->getOpcode(); 1631*0b57cec5SDimitry Andric unsigned DstR = MI->getOperand(0).getReg(); 1632*0b57cec5SDimitry Andric int FI = MI->getOperand(1).getIndex(); 1633*0b57cec5SDimitry Andric 1634*0b57cec5SDimitry Andric // TmpR = L2_loadri_io FI, 0 1635*0b57cec5SDimitry Andric unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1636*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR) 1637*0b57cec5SDimitry Andric .addFrameIndex(FI) 1638*0b57cec5SDimitry Andric .addImm(0) 1639*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1640*0b57cec5SDimitry Andric 1641*0b57cec5SDimitry Andric // DstR = C2_tfrrp TmpR if DstR is a predicate register 1642*0b57cec5SDimitry Andric // DstR = A2_tfrrcr TmpR if DstR is a modifier register 1643*0b57cec5SDimitry Andric unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp 1644*0b57cec5SDimitry Andric : Hexagon::A2_tfrrcr; 1645*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(TfrOpc), DstR) 1646*0b57cec5SDimitry Andric .addReg(TmpR, RegState::Kill); 1647*0b57cec5SDimitry Andric 1648*0b57cec5SDimitry Andric NewRegs.push_back(TmpR); 1649*0b57cec5SDimitry Andric B.erase(It); 1650*0b57cec5SDimitry Andric return true; 1651*0b57cec5SDimitry Andric } 1652*0b57cec5SDimitry Andric 1653*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B, 1654*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1655*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1656*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1657*0b57cec5SDimitry Andric if (!MI->getOperand(0).isFI()) 1658*0b57cec5SDimitry Andric return false; 1659*0b57cec5SDimitry Andric 1660*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1661*0b57cec5SDimitry Andric unsigned SrcR = MI->getOperand(2).getReg(); 1662*0b57cec5SDimitry Andric bool IsKill = MI->getOperand(2).isKill(); 1663*0b57cec5SDimitry Andric int FI = MI->getOperand(0).getIndex(); 1664*0b57cec5SDimitry Andric auto *RC = &Hexagon::HvxVRRegClass; 1665*0b57cec5SDimitry Andric 1666*0b57cec5SDimitry Andric // Insert transfer to general vector register. 1667*0b57cec5SDimitry Andric // TmpR0 = A2_tfrsi 0x01010101 1668*0b57cec5SDimitry Andric // TmpR1 = V6_vandqrt Qx, TmpR0 1669*0b57cec5SDimitry Andric // store FI, 0, TmpR1 1670*0b57cec5SDimitry Andric unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1671*0b57cec5SDimitry Andric unsigned TmpR1 = MRI.createVirtualRegister(RC); 1672*0b57cec5SDimitry Andric 1673*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0) 1674*0b57cec5SDimitry Andric .addImm(0x01010101); 1675*0b57cec5SDimitry Andric 1676*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1) 1677*0b57cec5SDimitry Andric .addReg(SrcR, getKillRegState(IsKill)) 1678*0b57cec5SDimitry Andric .addReg(TmpR0, RegState::Kill); 1679*0b57cec5SDimitry Andric 1680*0b57cec5SDimitry Andric auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1681*0b57cec5SDimitry Andric HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI); 1682*0b57cec5SDimitry Andric expandStoreVec(B, std::prev(It), MRI, HII, NewRegs); 1683*0b57cec5SDimitry Andric 1684*0b57cec5SDimitry Andric NewRegs.push_back(TmpR0); 1685*0b57cec5SDimitry Andric NewRegs.push_back(TmpR1); 1686*0b57cec5SDimitry Andric B.erase(It); 1687*0b57cec5SDimitry Andric return true; 1688*0b57cec5SDimitry Andric } 1689*0b57cec5SDimitry Andric 1690*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B, 1691*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1692*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1693*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1694*0b57cec5SDimitry Andric if (!MI->getOperand(1).isFI()) 1695*0b57cec5SDimitry Andric return false; 1696*0b57cec5SDimitry Andric 1697*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1698*0b57cec5SDimitry Andric unsigned DstR = MI->getOperand(0).getReg(); 1699*0b57cec5SDimitry Andric int FI = MI->getOperand(1).getIndex(); 1700*0b57cec5SDimitry Andric auto *RC = &Hexagon::HvxVRRegClass; 1701*0b57cec5SDimitry Andric 1702*0b57cec5SDimitry Andric // TmpR0 = A2_tfrsi 0x01010101 1703*0b57cec5SDimitry Andric // TmpR1 = load FI, 0 1704*0b57cec5SDimitry Andric // DstR = V6_vandvrt TmpR1, TmpR0 1705*0b57cec5SDimitry Andric unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1706*0b57cec5SDimitry Andric unsigned TmpR1 = MRI.createVirtualRegister(RC); 1707*0b57cec5SDimitry Andric 1708*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0) 1709*0b57cec5SDimitry Andric .addImm(0x01010101); 1710*0b57cec5SDimitry Andric MachineFunction &MF = *B.getParent(); 1711*0b57cec5SDimitry Andric auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1712*0b57cec5SDimitry Andric HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI); 1713*0b57cec5SDimitry Andric expandLoadVec(B, std::prev(It), MRI, HII, NewRegs); 1714*0b57cec5SDimitry Andric 1715*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR) 1716*0b57cec5SDimitry Andric .addReg(TmpR1, RegState::Kill) 1717*0b57cec5SDimitry Andric .addReg(TmpR0, RegState::Kill); 1718*0b57cec5SDimitry Andric 1719*0b57cec5SDimitry Andric NewRegs.push_back(TmpR0); 1720*0b57cec5SDimitry Andric NewRegs.push_back(TmpR1); 1721*0b57cec5SDimitry Andric B.erase(It); 1722*0b57cec5SDimitry Andric return true; 1723*0b57cec5SDimitry Andric } 1724*0b57cec5SDimitry Andric 1725*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B, 1726*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1727*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1728*0b57cec5SDimitry Andric MachineFunction &MF = *B.getParent(); 1729*0b57cec5SDimitry Andric auto &MFI = MF.getFrameInfo(); 1730*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1731*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1732*0b57cec5SDimitry Andric if (!MI->getOperand(0).isFI()) 1733*0b57cec5SDimitry Andric return false; 1734*0b57cec5SDimitry Andric 1735*0b57cec5SDimitry Andric // It is possible that the double vector being stored is only partially 1736*0b57cec5SDimitry Andric // defined. From the point of view of the liveness tracking, it is ok to 1737*0b57cec5SDimitry Andric // store it as a whole, but if we break it up we may end up storing a 1738*0b57cec5SDimitry Andric // register that is entirely undefined. 1739*0b57cec5SDimitry Andric LivePhysRegs LPR(HRI); 1740*0b57cec5SDimitry Andric LPR.addLiveIns(B); 1741*0b57cec5SDimitry Andric SmallVector<std::pair<MCPhysReg, const MachineOperand*>,2> Clobbers; 1742*0b57cec5SDimitry Andric for (auto R = B.begin(); R != It; ++R) { 1743*0b57cec5SDimitry Andric Clobbers.clear(); 1744*0b57cec5SDimitry Andric LPR.stepForward(*R, Clobbers); 1745*0b57cec5SDimitry Andric } 1746*0b57cec5SDimitry Andric 1747*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1748*0b57cec5SDimitry Andric unsigned SrcR = MI->getOperand(2).getReg(); 1749*0b57cec5SDimitry Andric unsigned SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo); 1750*0b57cec5SDimitry Andric unsigned SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi); 1751*0b57cec5SDimitry Andric bool IsKill = MI->getOperand(2).isKill(); 1752*0b57cec5SDimitry Andric int FI = MI->getOperand(0).getIndex(); 1753*0b57cec5SDimitry Andric 1754*0b57cec5SDimitry Andric unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass); 1755*0b57cec5SDimitry Andric unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); 1756*0b57cec5SDimitry Andric unsigned HasAlign = MFI.getObjectAlignment(FI); 1757*0b57cec5SDimitry Andric unsigned StoreOpc; 1758*0b57cec5SDimitry Andric 1759*0b57cec5SDimitry Andric // Store low part. 1760*0b57cec5SDimitry Andric if (LPR.contains(SrcLo)) { 1761*0b57cec5SDimitry Andric StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai 1762*0b57cec5SDimitry Andric : Hexagon::V6_vS32Ub_ai; 1763*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(StoreOpc)) 1764*0b57cec5SDimitry Andric .addFrameIndex(FI) 1765*0b57cec5SDimitry Andric .addImm(0) 1766*0b57cec5SDimitry Andric .addReg(SrcLo, getKillRegState(IsKill)) 1767*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1768*0b57cec5SDimitry Andric } 1769*0b57cec5SDimitry Andric 1770*0b57cec5SDimitry Andric // Store high part. 1771*0b57cec5SDimitry Andric if (LPR.contains(SrcHi)) { 1772*0b57cec5SDimitry Andric StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai 1773*0b57cec5SDimitry Andric : Hexagon::V6_vS32Ub_ai; 1774*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(StoreOpc)) 1775*0b57cec5SDimitry Andric .addFrameIndex(FI) 1776*0b57cec5SDimitry Andric .addImm(Size) 1777*0b57cec5SDimitry Andric .addReg(SrcHi, getKillRegState(IsKill)) 1778*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1779*0b57cec5SDimitry Andric } 1780*0b57cec5SDimitry Andric 1781*0b57cec5SDimitry Andric B.erase(It); 1782*0b57cec5SDimitry Andric return true; 1783*0b57cec5SDimitry Andric } 1784*0b57cec5SDimitry Andric 1785*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B, 1786*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1787*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1788*0b57cec5SDimitry Andric MachineFunction &MF = *B.getParent(); 1789*0b57cec5SDimitry Andric auto &MFI = MF.getFrameInfo(); 1790*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1791*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1792*0b57cec5SDimitry Andric if (!MI->getOperand(1).isFI()) 1793*0b57cec5SDimitry Andric return false; 1794*0b57cec5SDimitry Andric 1795*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1796*0b57cec5SDimitry Andric unsigned DstR = MI->getOperand(0).getReg(); 1797*0b57cec5SDimitry Andric unsigned DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi); 1798*0b57cec5SDimitry Andric unsigned DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo); 1799*0b57cec5SDimitry Andric int FI = MI->getOperand(1).getIndex(); 1800*0b57cec5SDimitry Andric 1801*0b57cec5SDimitry Andric unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass); 1802*0b57cec5SDimitry Andric unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); 1803*0b57cec5SDimitry Andric unsigned HasAlign = MFI.getObjectAlignment(FI); 1804*0b57cec5SDimitry Andric unsigned LoadOpc; 1805*0b57cec5SDimitry Andric 1806*0b57cec5SDimitry Andric // Load low part. 1807*0b57cec5SDimitry Andric LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai 1808*0b57cec5SDimitry Andric : Hexagon::V6_vL32Ub_ai; 1809*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(LoadOpc), DstLo) 1810*0b57cec5SDimitry Andric .addFrameIndex(FI) 1811*0b57cec5SDimitry Andric .addImm(0) 1812*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1813*0b57cec5SDimitry Andric 1814*0b57cec5SDimitry Andric // Load high part. 1815*0b57cec5SDimitry Andric LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai 1816*0b57cec5SDimitry Andric : Hexagon::V6_vL32Ub_ai; 1817*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(LoadOpc), DstHi) 1818*0b57cec5SDimitry Andric .addFrameIndex(FI) 1819*0b57cec5SDimitry Andric .addImm(Size) 1820*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1821*0b57cec5SDimitry Andric 1822*0b57cec5SDimitry Andric B.erase(It); 1823*0b57cec5SDimitry Andric return true; 1824*0b57cec5SDimitry Andric } 1825*0b57cec5SDimitry Andric 1826*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B, 1827*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1828*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1829*0b57cec5SDimitry Andric MachineFunction &MF = *B.getParent(); 1830*0b57cec5SDimitry Andric auto &MFI = MF.getFrameInfo(); 1831*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1832*0b57cec5SDimitry Andric if (!MI->getOperand(0).isFI()) 1833*0b57cec5SDimitry Andric return false; 1834*0b57cec5SDimitry Andric 1835*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1836*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1837*0b57cec5SDimitry Andric unsigned SrcR = MI->getOperand(2).getReg(); 1838*0b57cec5SDimitry Andric bool IsKill = MI->getOperand(2).isKill(); 1839*0b57cec5SDimitry Andric int FI = MI->getOperand(0).getIndex(); 1840*0b57cec5SDimitry Andric 1841*0b57cec5SDimitry Andric unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); 1842*0b57cec5SDimitry Andric unsigned HasAlign = MFI.getObjectAlignment(FI); 1843*0b57cec5SDimitry Andric unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai 1844*0b57cec5SDimitry Andric : Hexagon::V6_vS32Ub_ai; 1845*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(StoreOpc)) 1846*0b57cec5SDimitry Andric .addFrameIndex(FI) 1847*0b57cec5SDimitry Andric .addImm(0) 1848*0b57cec5SDimitry Andric .addReg(SrcR, getKillRegState(IsKill)) 1849*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1850*0b57cec5SDimitry Andric 1851*0b57cec5SDimitry Andric B.erase(It); 1852*0b57cec5SDimitry Andric return true; 1853*0b57cec5SDimitry Andric } 1854*0b57cec5SDimitry Andric 1855*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B, 1856*0b57cec5SDimitry Andric MachineBasicBlock::iterator It, MachineRegisterInfo &MRI, 1857*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const { 1858*0b57cec5SDimitry Andric MachineFunction &MF = *B.getParent(); 1859*0b57cec5SDimitry Andric auto &MFI = MF.getFrameInfo(); 1860*0b57cec5SDimitry Andric MachineInstr *MI = &*It; 1861*0b57cec5SDimitry Andric if (!MI->getOperand(1).isFI()) 1862*0b57cec5SDimitry Andric return false; 1863*0b57cec5SDimitry Andric 1864*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1865*0b57cec5SDimitry Andric DebugLoc DL = MI->getDebugLoc(); 1866*0b57cec5SDimitry Andric unsigned DstR = MI->getOperand(0).getReg(); 1867*0b57cec5SDimitry Andric int FI = MI->getOperand(1).getIndex(); 1868*0b57cec5SDimitry Andric 1869*0b57cec5SDimitry Andric unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); 1870*0b57cec5SDimitry Andric unsigned HasAlign = MFI.getObjectAlignment(FI); 1871*0b57cec5SDimitry Andric unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai 1872*0b57cec5SDimitry Andric : Hexagon::V6_vL32Ub_ai; 1873*0b57cec5SDimitry Andric BuildMI(B, It, DL, HII.get(LoadOpc), DstR) 1874*0b57cec5SDimitry Andric .addFrameIndex(FI) 1875*0b57cec5SDimitry Andric .addImm(0) 1876*0b57cec5SDimitry Andric .cloneMemRefs(*MI); 1877*0b57cec5SDimitry Andric 1878*0b57cec5SDimitry Andric B.erase(It); 1879*0b57cec5SDimitry Andric return true; 1880*0b57cec5SDimitry Andric } 1881*0b57cec5SDimitry Andric 1882*0b57cec5SDimitry Andric bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF, 1883*0b57cec5SDimitry Andric SmallVectorImpl<unsigned> &NewRegs) const { 1884*0b57cec5SDimitry Andric auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo(); 1885*0b57cec5SDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo(); 1886*0b57cec5SDimitry Andric bool Changed = false; 1887*0b57cec5SDimitry Andric 1888*0b57cec5SDimitry Andric for (auto &B : MF) { 1889*0b57cec5SDimitry Andric // Traverse the basic block. 1890*0b57cec5SDimitry Andric MachineBasicBlock::iterator NextI; 1891*0b57cec5SDimitry Andric for (auto I = B.begin(), E = B.end(); I != E; I = NextI) { 1892*0b57cec5SDimitry Andric MachineInstr *MI = &*I; 1893*0b57cec5SDimitry Andric NextI = std::next(I); 1894*0b57cec5SDimitry Andric unsigned Opc = MI->getOpcode(); 1895*0b57cec5SDimitry Andric 1896*0b57cec5SDimitry Andric switch (Opc) { 1897*0b57cec5SDimitry Andric case TargetOpcode::COPY: 1898*0b57cec5SDimitry Andric Changed |= expandCopy(B, I, MRI, HII, NewRegs); 1899*0b57cec5SDimitry Andric break; 1900*0b57cec5SDimitry Andric case Hexagon::STriw_pred: 1901*0b57cec5SDimitry Andric case Hexagon::STriw_ctr: 1902*0b57cec5SDimitry Andric Changed |= expandStoreInt(B, I, MRI, HII, NewRegs); 1903*0b57cec5SDimitry Andric break; 1904*0b57cec5SDimitry Andric case Hexagon::LDriw_pred: 1905*0b57cec5SDimitry Andric case Hexagon::LDriw_ctr: 1906*0b57cec5SDimitry Andric Changed |= expandLoadInt(B, I, MRI, HII, NewRegs); 1907*0b57cec5SDimitry Andric break; 1908*0b57cec5SDimitry Andric case Hexagon::PS_vstorerq_ai: 1909*0b57cec5SDimitry Andric Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs); 1910*0b57cec5SDimitry Andric break; 1911*0b57cec5SDimitry Andric case Hexagon::PS_vloadrq_ai: 1912*0b57cec5SDimitry Andric Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs); 1913*0b57cec5SDimitry Andric break; 1914*0b57cec5SDimitry Andric case Hexagon::PS_vloadrw_ai: 1915*0b57cec5SDimitry Andric case Hexagon::PS_vloadrwu_ai: 1916*0b57cec5SDimitry Andric Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs); 1917*0b57cec5SDimitry Andric break; 1918*0b57cec5SDimitry Andric case Hexagon::PS_vstorerw_ai: 1919*0b57cec5SDimitry Andric case Hexagon::PS_vstorerwu_ai: 1920*0b57cec5SDimitry Andric Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs); 1921*0b57cec5SDimitry Andric break; 1922*0b57cec5SDimitry Andric } 1923*0b57cec5SDimitry Andric } 1924*0b57cec5SDimitry Andric } 1925*0b57cec5SDimitry Andric 1926*0b57cec5SDimitry Andric return Changed; 1927*0b57cec5SDimitry Andric } 1928*0b57cec5SDimitry Andric 1929*0b57cec5SDimitry Andric void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF, 1930*0b57cec5SDimitry Andric BitVector &SavedRegs, 1931*0b57cec5SDimitry Andric RegScavenger *RS) const { 1932*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1933*0b57cec5SDimitry Andric 1934*0b57cec5SDimitry Andric SavedRegs.resize(HRI.getNumRegs()); 1935*0b57cec5SDimitry Andric 1936*0b57cec5SDimitry Andric // If we have a function containing __builtin_eh_return we want to spill and 1937*0b57cec5SDimitry Andric // restore all callee saved registers. Pretend that they are used. 1938*0b57cec5SDimitry Andric if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn()) 1939*0b57cec5SDimitry Andric for (const MCPhysReg *R = HRI.getCalleeSavedRegs(&MF); *R; ++R) 1940*0b57cec5SDimitry Andric SavedRegs.set(*R); 1941*0b57cec5SDimitry Andric 1942*0b57cec5SDimitry Andric // Replace predicate register pseudo spill code. 1943*0b57cec5SDimitry Andric SmallVector<unsigned,8> NewRegs; 1944*0b57cec5SDimitry Andric expandSpillMacros(MF, NewRegs); 1945*0b57cec5SDimitry Andric if (OptimizeSpillSlots && !isOptNone(MF)) 1946*0b57cec5SDimitry Andric optimizeSpillSlots(MF, NewRegs); 1947*0b57cec5SDimitry Andric 1948*0b57cec5SDimitry Andric // We need to reserve a spill slot if scavenging could potentially require 1949*0b57cec5SDimitry Andric // spilling a scavenged register. 1950*0b57cec5SDimitry Andric if (!NewRegs.empty() || mayOverflowFrameOffset(MF)) { 1951*0b57cec5SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 1952*0b57cec5SDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo(); 1953*0b57cec5SDimitry Andric SetVector<const TargetRegisterClass*> SpillRCs; 1954*0b57cec5SDimitry Andric // Reserve an int register in any case, because it could be used to hold 1955*0b57cec5SDimitry Andric // the stack offset in case it does not fit into a spill instruction. 1956*0b57cec5SDimitry Andric SpillRCs.insert(&Hexagon::IntRegsRegClass); 1957*0b57cec5SDimitry Andric 1958*0b57cec5SDimitry Andric for (unsigned VR : NewRegs) 1959*0b57cec5SDimitry Andric SpillRCs.insert(MRI.getRegClass(VR)); 1960*0b57cec5SDimitry Andric 1961*0b57cec5SDimitry Andric for (auto *RC : SpillRCs) { 1962*0b57cec5SDimitry Andric if (!needToReserveScavengingSpillSlots(MF, HRI, RC)) 1963*0b57cec5SDimitry Andric continue; 1964*0b57cec5SDimitry Andric unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1; 1965*0b57cec5SDimitry Andric unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC); 1966*0b57cec5SDimitry Andric for (unsigned i = 0; i < Num; i++) { 1967*0b57cec5SDimitry Andric int NewFI = MFI.CreateSpillStackObject(S, A); 1968*0b57cec5SDimitry Andric RS->addScavengingFrameIndex(NewFI); 1969*0b57cec5SDimitry Andric } 1970*0b57cec5SDimitry Andric } 1971*0b57cec5SDimitry Andric } 1972*0b57cec5SDimitry Andric 1973*0b57cec5SDimitry Andric TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1974*0b57cec5SDimitry Andric } 1975*0b57cec5SDimitry Andric 1976*0b57cec5SDimitry Andric unsigned HexagonFrameLowering::findPhysReg(MachineFunction &MF, 1977*0b57cec5SDimitry Andric HexagonBlockRanges::IndexRange &FIR, 1978*0b57cec5SDimitry Andric HexagonBlockRanges::InstrIndexMap &IndexMap, 1979*0b57cec5SDimitry Andric HexagonBlockRanges::RegToRangeMap &DeadMap, 1980*0b57cec5SDimitry Andric const TargetRegisterClass *RC) const { 1981*0b57cec5SDimitry Andric auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); 1982*0b57cec5SDimitry Andric auto &MRI = MF.getRegInfo(); 1983*0b57cec5SDimitry Andric 1984*0b57cec5SDimitry Andric auto isDead = [&FIR,&DeadMap] (unsigned Reg) -> bool { 1985*0b57cec5SDimitry Andric auto F = DeadMap.find({Reg,0}); 1986*0b57cec5SDimitry Andric if (F == DeadMap.end()) 1987*0b57cec5SDimitry Andric return false; 1988*0b57cec5SDimitry Andric for (auto &DR : F->second) 1989*0b57cec5SDimitry Andric if (DR.contains(FIR)) 1990*0b57cec5SDimitry Andric return true; 1991*0b57cec5SDimitry Andric return false; 1992*0b57cec5SDimitry Andric }; 1993*0b57cec5SDimitry Andric 1994*0b57cec5SDimitry Andric for (unsigned Reg : RC->getRawAllocationOrder(MF)) { 1995*0b57cec5SDimitry Andric bool Dead = true; 1996*0b57cec5SDimitry Andric for (auto R : HexagonBlockRanges::expandToSubRegs({Reg,0}, MRI, HRI)) { 1997*0b57cec5SDimitry Andric if (isDead(R.Reg)) 1998*0b57cec5SDimitry Andric continue; 1999*0b57cec5SDimitry Andric Dead = false; 2000*0b57cec5SDimitry Andric break; 2001*0b57cec5SDimitry Andric } 2002*0b57cec5SDimitry Andric if (Dead) 2003*0b57cec5SDimitry Andric return Reg; 2004*0b57cec5SDimitry Andric } 2005*0b57cec5SDimitry Andric return 0; 2006*0b57cec5SDimitry Andric } 2007*0b57cec5SDimitry Andric 2008*0b57cec5SDimitry Andric void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF, 2009*0b57cec5SDimitry Andric SmallVectorImpl<unsigned> &VRegs) const { 2010*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2011*0b57cec5SDimitry Andric auto &HII = *HST.getInstrInfo(); 2012*0b57cec5SDimitry Andric auto &HRI = *HST.getRegisterInfo(); 2013*0b57cec5SDimitry Andric auto &MRI = MF.getRegInfo(); 2014*0b57cec5SDimitry Andric HexagonBlockRanges HBR(MF); 2015*0b57cec5SDimitry Andric 2016*0b57cec5SDimitry Andric using BlockIndexMap = 2017*0b57cec5SDimitry Andric std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>; 2018*0b57cec5SDimitry Andric using BlockRangeMap = 2019*0b57cec5SDimitry Andric std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>; 2020*0b57cec5SDimitry Andric using IndexType = HexagonBlockRanges::IndexType; 2021*0b57cec5SDimitry Andric 2022*0b57cec5SDimitry Andric struct SlotInfo { 2023*0b57cec5SDimitry Andric BlockRangeMap Map; 2024*0b57cec5SDimitry Andric unsigned Size = 0; 2025*0b57cec5SDimitry Andric const TargetRegisterClass *RC = nullptr; 2026*0b57cec5SDimitry Andric 2027*0b57cec5SDimitry Andric SlotInfo() = default; 2028*0b57cec5SDimitry Andric }; 2029*0b57cec5SDimitry Andric 2030*0b57cec5SDimitry Andric BlockIndexMap BlockIndexes; 2031*0b57cec5SDimitry Andric SmallSet<int,4> BadFIs; 2032*0b57cec5SDimitry Andric std::map<int,SlotInfo> FIRangeMap; 2033*0b57cec5SDimitry Andric 2034*0b57cec5SDimitry Andric // Accumulate register classes: get a common class for a pre-existing 2035*0b57cec5SDimitry Andric // class HaveRC and a new class NewRC. Return nullptr if a common class 2036*0b57cec5SDimitry Andric // cannot be found, otherwise return the resulting class. If HaveRC is 2037*0b57cec5SDimitry Andric // nullptr, assume that it is still unset. 2038*0b57cec5SDimitry Andric auto getCommonRC = 2039*0b57cec5SDimitry Andric [](const TargetRegisterClass *HaveRC, 2040*0b57cec5SDimitry Andric const TargetRegisterClass *NewRC) -> const TargetRegisterClass * { 2041*0b57cec5SDimitry Andric if (HaveRC == nullptr || HaveRC == NewRC) 2042*0b57cec5SDimitry Andric return NewRC; 2043*0b57cec5SDimitry Andric // Different classes, both non-null. Pick the more general one. 2044*0b57cec5SDimitry Andric if (HaveRC->hasSubClassEq(NewRC)) 2045*0b57cec5SDimitry Andric return HaveRC; 2046*0b57cec5SDimitry Andric if (NewRC->hasSubClassEq(HaveRC)) 2047*0b57cec5SDimitry Andric return NewRC; 2048*0b57cec5SDimitry Andric return nullptr; 2049*0b57cec5SDimitry Andric }; 2050*0b57cec5SDimitry Andric 2051*0b57cec5SDimitry Andric // Scan all blocks in the function. Check all occurrences of frame indexes, 2052*0b57cec5SDimitry Andric // and collect relevant information. 2053*0b57cec5SDimitry Andric for (auto &B : MF) { 2054*0b57cec5SDimitry Andric std::map<int,IndexType> LastStore, LastLoad; 2055*0b57cec5SDimitry Andric // Emplace appears not to be supported in gcc 4.7.2-4. 2056*0b57cec5SDimitry Andric //auto P = BlockIndexes.emplace(&B, HexagonBlockRanges::InstrIndexMap(B)); 2057*0b57cec5SDimitry Andric auto P = BlockIndexes.insert( 2058*0b57cec5SDimitry Andric std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B))); 2059*0b57cec5SDimitry Andric auto &IndexMap = P.first->second; 2060*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n" 2061*0b57cec5SDimitry Andric << IndexMap << '\n'); 2062*0b57cec5SDimitry Andric 2063*0b57cec5SDimitry Andric for (auto &In : B) { 2064*0b57cec5SDimitry Andric int LFI, SFI; 2065*0b57cec5SDimitry Andric bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In); 2066*0b57cec5SDimitry Andric bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In); 2067*0b57cec5SDimitry Andric if (Load && Store) { 2068*0b57cec5SDimitry Andric // If it's both a load and a store, then we won't handle it. 2069*0b57cec5SDimitry Andric BadFIs.insert(LFI); 2070*0b57cec5SDimitry Andric BadFIs.insert(SFI); 2071*0b57cec5SDimitry Andric continue; 2072*0b57cec5SDimitry Andric } 2073*0b57cec5SDimitry Andric // Check for register classes of the register used as the source for 2074*0b57cec5SDimitry Andric // the store, and the register used as the destination for the load. 2075*0b57cec5SDimitry Andric // Also, only accept base+imm_offset addressing modes. Other addressing 2076*0b57cec5SDimitry Andric // modes can have side-effects (post-increments, etc.). For stack 2077*0b57cec5SDimitry Andric // slots they are very unlikely, so there is not much loss due to 2078*0b57cec5SDimitry Andric // this restriction. 2079*0b57cec5SDimitry Andric if (Load || Store) { 2080*0b57cec5SDimitry Andric int TFI = Load ? LFI : SFI; 2081*0b57cec5SDimitry Andric unsigned AM = HII.getAddrMode(In); 2082*0b57cec5SDimitry Andric SlotInfo &SI = FIRangeMap[TFI]; 2083*0b57cec5SDimitry Andric bool Bad = (AM != HexagonII::BaseImmOffset); 2084*0b57cec5SDimitry Andric if (!Bad) { 2085*0b57cec5SDimitry Andric // If the addressing mode is ok, check the register class. 2086*0b57cec5SDimitry Andric unsigned OpNum = Load ? 0 : 2; 2087*0b57cec5SDimitry Andric auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI, MF); 2088*0b57cec5SDimitry Andric RC = getCommonRC(SI.RC, RC); 2089*0b57cec5SDimitry Andric if (RC == nullptr) 2090*0b57cec5SDimitry Andric Bad = true; 2091*0b57cec5SDimitry Andric else 2092*0b57cec5SDimitry Andric SI.RC = RC; 2093*0b57cec5SDimitry Andric } 2094*0b57cec5SDimitry Andric if (!Bad) { 2095*0b57cec5SDimitry Andric // Check sizes. 2096*0b57cec5SDimitry Andric unsigned S = HII.getMemAccessSize(In); 2097*0b57cec5SDimitry Andric if (SI.Size != 0 && SI.Size != S) 2098*0b57cec5SDimitry Andric Bad = true; 2099*0b57cec5SDimitry Andric else 2100*0b57cec5SDimitry Andric SI.Size = S; 2101*0b57cec5SDimitry Andric } 2102*0b57cec5SDimitry Andric if (!Bad) { 2103*0b57cec5SDimitry Andric for (auto *Mo : In.memoperands()) { 2104*0b57cec5SDimitry Andric if (!Mo->isVolatile() && !Mo->isAtomic()) 2105*0b57cec5SDimitry Andric continue; 2106*0b57cec5SDimitry Andric Bad = true; 2107*0b57cec5SDimitry Andric break; 2108*0b57cec5SDimitry Andric } 2109*0b57cec5SDimitry Andric } 2110*0b57cec5SDimitry Andric if (Bad) 2111*0b57cec5SDimitry Andric BadFIs.insert(TFI); 2112*0b57cec5SDimitry Andric } 2113*0b57cec5SDimitry Andric 2114*0b57cec5SDimitry Andric // Locate uses of frame indices. 2115*0b57cec5SDimitry Andric for (unsigned i = 0, n = In.getNumOperands(); i < n; ++i) { 2116*0b57cec5SDimitry Andric const MachineOperand &Op = In.getOperand(i); 2117*0b57cec5SDimitry Andric if (!Op.isFI()) 2118*0b57cec5SDimitry Andric continue; 2119*0b57cec5SDimitry Andric int FI = Op.getIndex(); 2120*0b57cec5SDimitry Andric // Make sure that the following operand is an immediate and that 2121*0b57cec5SDimitry Andric // it is 0. This is the offset in the stack object. 2122*0b57cec5SDimitry Andric if (i+1 >= n || !In.getOperand(i+1).isImm() || 2123*0b57cec5SDimitry Andric In.getOperand(i+1).getImm() != 0) 2124*0b57cec5SDimitry Andric BadFIs.insert(FI); 2125*0b57cec5SDimitry Andric if (BadFIs.count(FI)) 2126*0b57cec5SDimitry Andric continue; 2127*0b57cec5SDimitry Andric 2128*0b57cec5SDimitry Andric IndexType Index = IndexMap.getIndex(&In); 2129*0b57cec5SDimitry Andric if (Load) { 2130*0b57cec5SDimitry Andric if (LastStore[FI] == IndexType::None) 2131*0b57cec5SDimitry Andric LastStore[FI] = IndexType::Entry; 2132*0b57cec5SDimitry Andric LastLoad[FI] = Index; 2133*0b57cec5SDimitry Andric } else if (Store) { 2134*0b57cec5SDimitry Andric HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B]; 2135*0b57cec5SDimitry Andric if (LastStore[FI] != IndexType::None) 2136*0b57cec5SDimitry Andric RL.add(LastStore[FI], LastLoad[FI], false, false); 2137*0b57cec5SDimitry Andric else if (LastLoad[FI] != IndexType::None) 2138*0b57cec5SDimitry Andric RL.add(IndexType::Entry, LastLoad[FI], false, false); 2139*0b57cec5SDimitry Andric LastLoad[FI] = IndexType::None; 2140*0b57cec5SDimitry Andric LastStore[FI] = Index; 2141*0b57cec5SDimitry Andric } else { 2142*0b57cec5SDimitry Andric BadFIs.insert(FI); 2143*0b57cec5SDimitry Andric } 2144*0b57cec5SDimitry Andric } 2145*0b57cec5SDimitry Andric } 2146*0b57cec5SDimitry Andric 2147*0b57cec5SDimitry Andric for (auto &I : LastLoad) { 2148*0b57cec5SDimitry Andric IndexType LL = I.second; 2149*0b57cec5SDimitry Andric if (LL == IndexType::None) 2150*0b57cec5SDimitry Andric continue; 2151*0b57cec5SDimitry Andric auto &RL = FIRangeMap[I.first].Map[&B]; 2152*0b57cec5SDimitry Andric IndexType &LS = LastStore[I.first]; 2153*0b57cec5SDimitry Andric if (LS != IndexType::None) 2154*0b57cec5SDimitry Andric RL.add(LS, LL, false, false); 2155*0b57cec5SDimitry Andric else 2156*0b57cec5SDimitry Andric RL.add(IndexType::Entry, LL, false, false); 2157*0b57cec5SDimitry Andric LS = IndexType::None; 2158*0b57cec5SDimitry Andric } 2159*0b57cec5SDimitry Andric for (auto &I : LastStore) { 2160*0b57cec5SDimitry Andric IndexType LS = I.second; 2161*0b57cec5SDimitry Andric if (LS == IndexType::None) 2162*0b57cec5SDimitry Andric continue; 2163*0b57cec5SDimitry Andric auto &RL = FIRangeMap[I.first].Map[&B]; 2164*0b57cec5SDimitry Andric RL.add(LS, IndexType::None, false, false); 2165*0b57cec5SDimitry Andric } 2166*0b57cec5SDimitry Andric } 2167*0b57cec5SDimitry Andric 2168*0b57cec5SDimitry Andric LLVM_DEBUG({ 2169*0b57cec5SDimitry Andric for (auto &P : FIRangeMap) { 2170*0b57cec5SDimitry Andric dbgs() << "fi#" << P.first; 2171*0b57cec5SDimitry Andric if (BadFIs.count(P.first)) 2172*0b57cec5SDimitry Andric dbgs() << " (bad)"; 2173*0b57cec5SDimitry Andric dbgs() << " RC: "; 2174*0b57cec5SDimitry Andric if (P.second.RC != nullptr) 2175*0b57cec5SDimitry Andric dbgs() << HRI.getRegClassName(P.second.RC) << '\n'; 2176*0b57cec5SDimitry Andric else 2177*0b57cec5SDimitry Andric dbgs() << "<null>\n"; 2178*0b57cec5SDimitry Andric for (auto &R : P.second.Map) 2179*0b57cec5SDimitry Andric dbgs() << " " << printMBBReference(*R.first) << " { " << R.second 2180*0b57cec5SDimitry Andric << "}\n"; 2181*0b57cec5SDimitry Andric } 2182*0b57cec5SDimitry Andric }); 2183*0b57cec5SDimitry Andric 2184*0b57cec5SDimitry Andric // When a slot is loaded from in a block without being stored to in the 2185*0b57cec5SDimitry Andric // same block, it is live-on-entry to this block. To avoid CFG analysis, 2186*0b57cec5SDimitry Andric // consider this slot to be live-on-exit from all blocks. 2187*0b57cec5SDimitry Andric SmallSet<int,4> LoxFIs; 2188*0b57cec5SDimitry Andric 2189*0b57cec5SDimitry Andric std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap; 2190*0b57cec5SDimitry Andric 2191*0b57cec5SDimitry Andric for (auto &P : FIRangeMap) { 2192*0b57cec5SDimitry Andric // P = pair(FI, map: BB->RangeList) 2193*0b57cec5SDimitry Andric if (BadFIs.count(P.first)) 2194*0b57cec5SDimitry Andric continue; 2195*0b57cec5SDimitry Andric for (auto &B : MF) { 2196*0b57cec5SDimitry Andric auto F = P.second.Map.find(&B); 2197*0b57cec5SDimitry Andric // F = pair(BB, RangeList) 2198*0b57cec5SDimitry Andric if (F == P.second.Map.end() || F->second.empty()) 2199*0b57cec5SDimitry Andric continue; 2200*0b57cec5SDimitry Andric HexagonBlockRanges::IndexRange &IR = F->second.front(); 2201*0b57cec5SDimitry Andric if (IR.start() == IndexType::Entry) 2202*0b57cec5SDimitry Andric LoxFIs.insert(P.first); 2203*0b57cec5SDimitry Andric BlockFIMap[&B].push_back(P.first); 2204*0b57cec5SDimitry Andric } 2205*0b57cec5SDimitry Andric } 2206*0b57cec5SDimitry Andric 2207*0b57cec5SDimitry Andric LLVM_DEBUG({ 2208*0b57cec5SDimitry Andric dbgs() << "Block-to-FI map (* -- live-on-exit):\n"; 2209*0b57cec5SDimitry Andric for (auto &P : BlockFIMap) { 2210*0b57cec5SDimitry Andric auto &FIs = P.second; 2211*0b57cec5SDimitry Andric if (FIs.empty()) 2212*0b57cec5SDimitry Andric continue; 2213*0b57cec5SDimitry Andric dbgs() << " " << printMBBReference(*P.first) << ": {"; 2214*0b57cec5SDimitry Andric for (auto I : FIs) { 2215*0b57cec5SDimitry Andric dbgs() << " fi#" << I; 2216*0b57cec5SDimitry Andric if (LoxFIs.count(I)) 2217*0b57cec5SDimitry Andric dbgs() << '*'; 2218*0b57cec5SDimitry Andric } 2219*0b57cec5SDimitry Andric dbgs() << " }\n"; 2220*0b57cec5SDimitry Andric } 2221*0b57cec5SDimitry Andric }); 2222*0b57cec5SDimitry Andric 2223*0b57cec5SDimitry Andric #ifndef NDEBUG 2224*0b57cec5SDimitry Andric bool HasOptLimit = SpillOptMax.getPosition(); 2225*0b57cec5SDimitry Andric #endif 2226*0b57cec5SDimitry Andric 2227*0b57cec5SDimitry Andric // eliminate loads, when all loads eliminated, eliminate all stores. 2228*0b57cec5SDimitry Andric for (auto &B : MF) { 2229*0b57cec5SDimitry Andric auto F = BlockIndexes.find(&B); 2230*0b57cec5SDimitry Andric assert(F != BlockIndexes.end()); 2231*0b57cec5SDimitry Andric HexagonBlockRanges::InstrIndexMap &IM = F->second; 2232*0b57cec5SDimitry Andric HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM); 2233*0b57cec5SDimitry Andric HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM); 2234*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << printMBBReference(B) << " dead map\n" 2235*0b57cec5SDimitry Andric << HexagonBlockRanges::PrintRangeMap(DM, HRI)); 2236*0b57cec5SDimitry Andric 2237*0b57cec5SDimitry Andric for (auto FI : BlockFIMap[&B]) { 2238*0b57cec5SDimitry Andric if (BadFIs.count(FI)) 2239*0b57cec5SDimitry Andric continue; 2240*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Working on fi#" << FI << '\n'); 2241*0b57cec5SDimitry Andric HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B]; 2242*0b57cec5SDimitry Andric for (auto &Range : RL) { 2243*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "--Examining range:" << RL << '\n'); 2244*0b57cec5SDimitry Andric if (!IndexType::isInstr(Range.start()) || 2245*0b57cec5SDimitry Andric !IndexType::isInstr(Range.end())) 2246*0b57cec5SDimitry Andric continue; 2247*0b57cec5SDimitry Andric MachineInstr &SI = *IM.getInstr(Range.start()); 2248*0b57cec5SDimitry Andric MachineInstr &EI = *IM.getInstr(Range.end()); 2249*0b57cec5SDimitry Andric assert(SI.mayStore() && "Unexpected start instruction"); 2250*0b57cec5SDimitry Andric assert(EI.mayLoad() && "Unexpected end instruction"); 2251*0b57cec5SDimitry Andric MachineOperand &SrcOp = SI.getOperand(2); 2252*0b57cec5SDimitry Andric 2253*0b57cec5SDimitry Andric HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.getReg(), 2254*0b57cec5SDimitry Andric SrcOp.getSubReg() }; 2255*0b57cec5SDimitry Andric auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI, MF); 2256*0b57cec5SDimitry Andric // The this-> is needed to unconfuse MSVC. 2257*0b57cec5SDimitry Andric unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC); 2258*0b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI) 2259*0b57cec5SDimitry Andric << '\n'); 2260*0b57cec5SDimitry Andric if (FoundR == 0) 2261*0b57cec5SDimitry Andric continue; 2262*0b57cec5SDimitry Andric #ifndef NDEBUG 2263*0b57cec5SDimitry Andric if (HasOptLimit) { 2264*0b57cec5SDimitry Andric if (SpillOptCount >= SpillOptMax) 2265*0b57cec5SDimitry Andric return; 2266*0b57cec5SDimitry Andric SpillOptCount++; 2267*0b57cec5SDimitry Andric } 2268*0b57cec5SDimitry Andric #endif 2269*0b57cec5SDimitry Andric 2270*0b57cec5SDimitry Andric // Generate the copy-in: "FoundR = COPY SrcR" at the store location. 2271*0b57cec5SDimitry Andric MachineBasicBlock::iterator StartIt = SI.getIterator(), NextIt; 2272*0b57cec5SDimitry Andric MachineInstr *CopyIn = nullptr; 2273*0b57cec5SDimitry Andric if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) { 2274*0b57cec5SDimitry Andric const DebugLoc &DL = SI.getDebugLoc(); 2275*0b57cec5SDimitry Andric CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR) 2276*0b57cec5SDimitry Andric .add(SrcOp); 2277*0b57cec5SDimitry Andric } 2278*0b57cec5SDimitry Andric 2279*0b57cec5SDimitry Andric ++StartIt; 2280*0b57cec5SDimitry Andric // Check if this is a last store and the FI is live-on-exit. 2281*0b57cec5SDimitry Andric if (LoxFIs.count(FI) && (&Range == &RL.back())) { 2282*0b57cec5SDimitry Andric // Update store's source register. 2283*0b57cec5SDimitry Andric if (unsigned SR = SrcOp.getSubReg()) 2284*0b57cec5SDimitry Andric SrcOp.setReg(HRI.getSubReg(FoundR, SR)); 2285*0b57cec5SDimitry Andric else 2286*0b57cec5SDimitry Andric SrcOp.setReg(FoundR); 2287*0b57cec5SDimitry Andric SrcOp.setSubReg(0); 2288*0b57cec5SDimitry Andric // We are keeping this register live. 2289*0b57cec5SDimitry Andric SrcOp.setIsKill(false); 2290*0b57cec5SDimitry Andric } else { 2291*0b57cec5SDimitry Andric B.erase(&SI); 2292*0b57cec5SDimitry Andric IM.replaceInstr(&SI, CopyIn); 2293*0b57cec5SDimitry Andric } 2294*0b57cec5SDimitry Andric 2295*0b57cec5SDimitry Andric auto EndIt = std::next(EI.getIterator()); 2296*0b57cec5SDimitry Andric for (auto It = StartIt; It != EndIt; It = NextIt) { 2297*0b57cec5SDimitry Andric MachineInstr &MI = *It; 2298*0b57cec5SDimitry Andric NextIt = std::next(It); 2299*0b57cec5SDimitry Andric int TFI; 2300*0b57cec5SDimitry Andric if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI) 2301*0b57cec5SDimitry Andric continue; 2302*0b57cec5SDimitry Andric unsigned DstR = MI.getOperand(0).getReg(); 2303*0b57cec5SDimitry Andric assert(MI.getOperand(0).getSubReg() == 0); 2304*0b57cec5SDimitry Andric MachineInstr *CopyOut = nullptr; 2305*0b57cec5SDimitry Andric if (DstR != FoundR) { 2306*0b57cec5SDimitry Andric DebugLoc DL = MI.getDebugLoc(); 2307*0b57cec5SDimitry Andric unsigned MemSize = HII.getMemAccessSize(MI); 2308*0b57cec5SDimitry Andric assert(HII.getAddrMode(MI) == HexagonII::BaseImmOffset); 2309*0b57cec5SDimitry Andric unsigned CopyOpc = TargetOpcode::COPY; 2310*0b57cec5SDimitry Andric if (HII.isSignExtendingLoad(MI)) 2311*0b57cec5SDimitry Andric CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth; 2312*0b57cec5SDimitry Andric else if (HII.isZeroExtendingLoad(MI)) 2313*0b57cec5SDimitry Andric CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth; 2314*0b57cec5SDimitry Andric CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR) 2315*0b57cec5SDimitry Andric .addReg(FoundR, getKillRegState(&MI == &EI)); 2316*0b57cec5SDimitry Andric } 2317*0b57cec5SDimitry Andric IM.replaceInstr(&MI, CopyOut); 2318*0b57cec5SDimitry Andric B.erase(It); 2319*0b57cec5SDimitry Andric } 2320*0b57cec5SDimitry Andric 2321*0b57cec5SDimitry Andric // Update the dead map. 2322*0b57cec5SDimitry Andric HexagonBlockRanges::RegisterRef FoundRR = { FoundR, 0 }; 2323*0b57cec5SDimitry Andric for (auto RR : HexagonBlockRanges::expandToSubRegs(FoundRR, MRI, HRI)) 2324*0b57cec5SDimitry Andric DM[RR].subtract(Range); 2325*0b57cec5SDimitry Andric } // for Range in range list 2326*0b57cec5SDimitry Andric } 2327*0b57cec5SDimitry Andric } 2328*0b57cec5SDimitry Andric } 2329*0b57cec5SDimitry Andric 2330*0b57cec5SDimitry Andric void HexagonFrameLowering::expandAlloca(MachineInstr *AI, 2331*0b57cec5SDimitry Andric const HexagonInstrInfo &HII, unsigned SP, unsigned CF) const { 2332*0b57cec5SDimitry Andric MachineBasicBlock &MB = *AI->getParent(); 2333*0b57cec5SDimitry Andric DebugLoc DL = AI->getDebugLoc(); 2334*0b57cec5SDimitry Andric unsigned A = AI->getOperand(2).getImm(); 2335*0b57cec5SDimitry Andric 2336*0b57cec5SDimitry Andric // Have 2337*0b57cec5SDimitry Andric // Rd = alloca Rs, #A 2338*0b57cec5SDimitry Andric // 2339*0b57cec5SDimitry Andric // If Rs and Rd are different registers, use this sequence: 2340*0b57cec5SDimitry Andric // Rd = sub(r29, Rs) 2341*0b57cec5SDimitry Andric // r29 = sub(r29, Rs) 2342*0b57cec5SDimitry Andric // Rd = and(Rd, #-A) ; if necessary 2343*0b57cec5SDimitry Andric // r29 = and(r29, #-A) ; if necessary 2344*0b57cec5SDimitry Andric // Rd = add(Rd, #CF) ; CF size aligned to at most A 2345*0b57cec5SDimitry Andric // otherwise, do 2346*0b57cec5SDimitry Andric // Rd = sub(r29, Rs) 2347*0b57cec5SDimitry Andric // Rd = and(Rd, #-A) ; if necessary 2348*0b57cec5SDimitry Andric // r29 = Rd 2349*0b57cec5SDimitry Andric // Rd = add(Rd, #CF) ; CF size aligned to at most A 2350*0b57cec5SDimitry Andric 2351*0b57cec5SDimitry Andric MachineOperand &RdOp = AI->getOperand(0); 2352*0b57cec5SDimitry Andric MachineOperand &RsOp = AI->getOperand(1); 2353*0b57cec5SDimitry Andric unsigned Rd = RdOp.getReg(), Rs = RsOp.getReg(); 2354*0b57cec5SDimitry Andric 2355*0b57cec5SDimitry Andric // Rd = sub(r29, Rs) 2356*0b57cec5SDimitry Andric BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd) 2357*0b57cec5SDimitry Andric .addReg(SP) 2358*0b57cec5SDimitry Andric .addReg(Rs); 2359*0b57cec5SDimitry Andric if (Rs != Rd) { 2360*0b57cec5SDimitry Andric // r29 = sub(r29, Rs) 2361*0b57cec5SDimitry Andric BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP) 2362*0b57cec5SDimitry Andric .addReg(SP) 2363*0b57cec5SDimitry Andric .addReg(Rs); 2364*0b57cec5SDimitry Andric } 2365*0b57cec5SDimitry Andric if (A > 8) { 2366*0b57cec5SDimitry Andric // Rd = and(Rd, #-A) 2367*0b57cec5SDimitry Andric BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd) 2368*0b57cec5SDimitry Andric .addReg(Rd) 2369*0b57cec5SDimitry Andric .addImm(-int64_t(A)); 2370*0b57cec5SDimitry Andric if (Rs != Rd) 2371*0b57cec5SDimitry Andric BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP) 2372*0b57cec5SDimitry Andric .addReg(SP) 2373*0b57cec5SDimitry Andric .addImm(-int64_t(A)); 2374*0b57cec5SDimitry Andric } 2375*0b57cec5SDimitry Andric if (Rs == Rd) { 2376*0b57cec5SDimitry Andric // r29 = Rd 2377*0b57cec5SDimitry Andric BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP) 2378*0b57cec5SDimitry Andric .addReg(Rd); 2379*0b57cec5SDimitry Andric } 2380*0b57cec5SDimitry Andric if (CF > 0) { 2381*0b57cec5SDimitry Andric // Rd = add(Rd, #CF) 2382*0b57cec5SDimitry Andric BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd) 2383*0b57cec5SDimitry Andric .addReg(Rd) 2384*0b57cec5SDimitry Andric .addImm(CF); 2385*0b57cec5SDimitry Andric } 2386*0b57cec5SDimitry Andric } 2387*0b57cec5SDimitry Andric 2388*0b57cec5SDimitry Andric bool HexagonFrameLowering::needsAligna(const MachineFunction &MF) const { 2389*0b57cec5SDimitry Andric const MachineFrameInfo &MFI = MF.getFrameInfo(); 2390*0b57cec5SDimitry Andric if (!MFI.hasVarSizedObjects()) 2391*0b57cec5SDimitry Andric return false; 2392*0b57cec5SDimitry Andric unsigned MaxA = MFI.getMaxAlignment(); 2393*0b57cec5SDimitry Andric if (MaxA <= getStackAlignment()) 2394*0b57cec5SDimitry Andric return false; 2395*0b57cec5SDimitry Andric return true; 2396*0b57cec5SDimitry Andric } 2397*0b57cec5SDimitry Andric 2398*0b57cec5SDimitry Andric const MachineInstr *HexagonFrameLowering::getAlignaInstr( 2399*0b57cec5SDimitry Andric const MachineFunction &MF) const { 2400*0b57cec5SDimitry Andric for (auto &B : MF) 2401*0b57cec5SDimitry Andric for (auto &I : B) 2402*0b57cec5SDimitry Andric if (I.getOpcode() == Hexagon::PS_aligna) 2403*0b57cec5SDimitry Andric return &I; 2404*0b57cec5SDimitry Andric return nullptr; 2405*0b57cec5SDimitry Andric } 2406*0b57cec5SDimitry Andric 2407*0b57cec5SDimitry Andric /// Adds all callee-saved registers as implicit uses or defs to the 2408*0b57cec5SDimitry Andric /// instruction. 2409*0b57cec5SDimitry Andric void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(MachineInstr *MI, 2410*0b57cec5SDimitry Andric const CSIVect &CSI, bool IsDef, bool IsKill) const { 2411*0b57cec5SDimitry Andric // Add the callee-saved registers as implicit uses. 2412*0b57cec5SDimitry Andric for (auto &R : CSI) 2413*0b57cec5SDimitry Andric MI->addOperand(MachineOperand::CreateReg(R.getReg(), IsDef, true, IsKill)); 2414*0b57cec5SDimitry Andric } 2415*0b57cec5SDimitry Andric 2416*0b57cec5SDimitry Andric /// Determine whether the callee-saved register saves and restores should 2417*0b57cec5SDimitry Andric /// be generated via inline code. If this function returns "true", inline 2418*0b57cec5SDimitry Andric /// code will be generated. If this function returns "false", additional 2419*0b57cec5SDimitry Andric /// checks are performed, which may still lead to the inline code. 2420*0b57cec5SDimitry Andric bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF, 2421*0b57cec5SDimitry Andric const CSIVect &CSI) const { 2422*0b57cec5SDimitry Andric if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn()) 2423*0b57cec5SDimitry Andric return true; 2424*0b57cec5SDimitry Andric if (!hasFP(MF)) 2425*0b57cec5SDimitry Andric return true; 2426*0b57cec5SDimitry Andric if (!isOptSize(MF) && !isMinSize(MF)) 2427*0b57cec5SDimitry Andric if (MF.getTarget().getOptLevel() > CodeGenOpt::Default) 2428*0b57cec5SDimitry Andric return true; 2429*0b57cec5SDimitry Andric 2430*0b57cec5SDimitry Andric // Check if CSI only has double registers, and if the registers form 2431*0b57cec5SDimitry Andric // a contiguous block starting from D8. 2432*0b57cec5SDimitry Andric BitVector Regs(Hexagon::NUM_TARGET_REGS); 2433*0b57cec5SDimitry Andric for (unsigned i = 0, n = CSI.size(); i < n; ++i) { 2434*0b57cec5SDimitry Andric unsigned R = CSI[i].getReg(); 2435*0b57cec5SDimitry Andric if (!Hexagon::DoubleRegsRegClass.contains(R)) 2436*0b57cec5SDimitry Andric return true; 2437*0b57cec5SDimitry Andric Regs[R] = true; 2438*0b57cec5SDimitry Andric } 2439*0b57cec5SDimitry Andric int F = Regs.find_first(); 2440*0b57cec5SDimitry Andric if (F != Hexagon::D8) 2441*0b57cec5SDimitry Andric return true; 2442*0b57cec5SDimitry Andric while (F >= 0) { 2443*0b57cec5SDimitry Andric int N = Regs.find_next(F); 2444*0b57cec5SDimitry Andric if (N >= 0 && N != F+1) 2445*0b57cec5SDimitry Andric return true; 2446*0b57cec5SDimitry Andric F = N; 2447*0b57cec5SDimitry Andric } 2448*0b57cec5SDimitry Andric 2449*0b57cec5SDimitry Andric return false; 2450*0b57cec5SDimitry Andric } 2451*0b57cec5SDimitry Andric 2452*0b57cec5SDimitry Andric bool HexagonFrameLowering::useSpillFunction(const MachineFunction &MF, 2453*0b57cec5SDimitry Andric const CSIVect &CSI) const { 2454*0b57cec5SDimitry Andric if (shouldInlineCSR(MF, CSI)) 2455*0b57cec5SDimitry Andric return false; 2456*0b57cec5SDimitry Andric unsigned NumCSI = CSI.size(); 2457*0b57cec5SDimitry Andric if (NumCSI <= 1) 2458*0b57cec5SDimitry Andric return false; 2459*0b57cec5SDimitry Andric 2460*0b57cec5SDimitry Andric unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs 2461*0b57cec5SDimitry Andric : SpillFuncThreshold; 2462*0b57cec5SDimitry Andric return Threshold < NumCSI; 2463*0b57cec5SDimitry Andric } 2464*0b57cec5SDimitry Andric 2465*0b57cec5SDimitry Andric bool HexagonFrameLowering::useRestoreFunction(const MachineFunction &MF, 2466*0b57cec5SDimitry Andric const CSIVect &CSI) const { 2467*0b57cec5SDimitry Andric if (shouldInlineCSR(MF, CSI)) 2468*0b57cec5SDimitry Andric return false; 2469*0b57cec5SDimitry Andric // The restore functions do a bit more than just restoring registers. 2470*0b57cec5SDimitry Andric // The non-returning versions will go back directly to the caller's 2471*0b57cec5SDimitry Andric // caller, others will clean up the stack frame in preparation for 2472*0b57cec5SDimitry Andric // a tail call. Using them can still save code size even if only one 2473*0b57cec5SDimitry Andric // register is getting restores. Make the decision based on -Oz: 2474*0b57cec5SDimitry Andric // using -Os will use inline restore for a single register. 2475*0b57cec5SDimitry Andric if (isMinSize(MF)) 2476*0b57cec5SDimitry Andric return true; 2477*0b57cec5SDimitry Andric unsigned NumCSI = CSI.size(); 2478*0b57cec5SDimitry Andric if (NumCSI <= 1) 2479*0b57cec5SDimitry Andric return false; 2480*0b57cec5SDimitry Andric 2481*0b57cec5SDimitry Andric unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs-1 2482*0b57cec5SDimitry Andric : SpillFuncThreshold; 2483*0b57cec5SDimitry Andric return Threshold < NumCSI; 2484*0b57cec5SDimitry Andric } 2485*0b57cec5SDimitry Andric 2486*0b57cec5SDimitry Andric bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF) const { 2487*0b57cec5SDimitry Andric unsigned StackSize = MF.getFrameInfo().estimateStackSize(MF); 2488*0b57cec5SDimitry Andric auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2489*0b57cec5SDimitry Andric // A fairly simplistic guess as to whether a potential load/store to a 2490*0b57cec5SDimitry Andric // stack location could require an extra register. 2491*0b57cec5SDimitry Andric if (HST.useHVXOps() && StackSize > 256) 2492*0b57cec5SDimitry Andric return true; 2493*0b57cec5SDimitry Andric 2494*0b57cec5SDimitry Andric // Check if the function has store-immediate instructions that access 2495*0b57cec5SDimitry Andric // the stack. Since the offset field is not extendable, if the stack 2496*0b57cec5SDimitry Andric // size exceeds the offset limit (6 bits, shifted), the stores will 2497*0b57cec5SDimitry Andric // require a new base register. 2498*0b57cec5SDimitry Andric bool HasImmStack = false; 2499*0b57cec5SDimitry Andric unsigned MinLS = ~0u; // Log_2 of the memory access size. 2500*0b57cec5SDimitry Andric 2501*0b57cec5SDimitry Andric for (const MachineBasicBlock &B : MF) { 2502*0b57cec5SDimitry Andric for (const MachineInstr &MI : B) { 2503*0b57cec5SDimitry Andric unsigned LS = 0; 2504*0b57cec5SDimitry Andric switch (MI.getOpcode()) { 2505*0b57cec5SDimitry Andric case Hexagon::S4_storeirit_io: 2506*0b57cec5SDimitry Andric case Hexagon::S4_storeirif_io: 2507*0b57cec5SDimitry Andric case Hexagon::S4_storeiri_io: 2508*0b57cec5SDimitry Andric ++LS; 2509*0b57cec5SDimitry Andric LLVM_FALLTHROUGH; 2510*0b57cec5SDimitry Andric case Hexagon::S4_storeirht_io: 2511*0b57cec5SDimitry Andric case Hexagon::S4_storeirhf_io: 2512*0b57cec5SDimitry Andric case Hexagon::S4_storeirh_io: 2513*0b57cec5SDimitry Andric ++LS; 2514*0b57cec5SDimitry Andric LLVM_FALLTHROUGH; 2515*0b57cec5SDimitry Andric case Hexagon::S4_storeirbt_io: 2516*0b57cec5SDimitry Andric case Hexagon::S4_storeirbf_io: 2517*0b57cec5SDimitry Andric case Hexagon::S4_storeirb_io: 2518*0b57cec5SDimitry Andric if (MI.getOperand(0).isFI()) 2519*0b57cec5SDimitry Andric HasImmStack = true; 2520*0b57cec5SDimitry Andric MinLS = std::min(MinLS, LS); 2521*0b57cec5SDimitry Andric break; 2522*0b57cec5SDimitry Andric } 2523*0b57cec5SDimitry Andric } 2524*0b57cec5SDimitry Andric } 2525*0b57cec5SDimitry Andric 2526*0b57cec5SDimitry Andric if (HasImmStack) 2527*0b57cec5SDimitry Andric return !isUInt<6>(StackSize >> MinLS); 2528*0b57cec5SDimitry Andric 2529*0b57cec5SDimitry Andric return false; 2530*0b57cec5SDimitry Andric } 2531