1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MipsCallLowering.h" 16 #include "MipsCCState.h" 17 #include "MipsMachineFunction.h" 18 #include "MipsTargetMachine.h" 19 #include "llvm/CodeGen/Analysis.h" 20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 21 22 using namespace llvm; 23 24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) 25 : CallLowering(&TLI) {} 26 27 bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA, 28 const EVT &VT) { 29 if (VA.isRegLoc()) { 30 assignValueToReg(VReg, VA, VT); 31 } else if (VA.isMemLoc()) { 32 assignValueToAddress(VReg, VA); 33 } else { 34 return false; 35 } 36 return true; 37 } 38 39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs, 40 ArrayRef<CCValAssign> ArgLocs, 41 unsigned ArgLocsStartIndex, 42 const EVT &VT) { 43 for (unsigned i = 0; i < VRegs.size(); ++i) 44 if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT)) 45 return false; 46 return true; 47 } 48 49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst( 50 SmallVectorImpl<Register> &VRegs) { 51 if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) 52 std::reverse(VRegs.begin(), VRegs.end()); 53 } 54 55 bool MipsCallLowering::MipsHandler::handle( 56 ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) { 57 SmallVector<Register, 4> VRegs; 58 unsigned SplitLength; 59 const Function &F = MIRBuilder.getMF().getFunction(); 60 const DataLayout &DL = F.getParent()->getDataLayout(); 61 const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>( 62 MIRBuilder.getMF().getSubtarget().getTargetLowering()); 63 64 for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size(); 65 ++ArgsIndex, ArgLocsIndex += SplitLength) { 66 EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty); 67 SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(), 68 F.getCallingConv(), VT); 69 assert(Args[ArgsIndex].Regs.size() == 1 && "Can't handle multple regs yet"); 70 71 if (SplitLength > 1) { 72 VRegs.clear(); 73 MVT RegisterVT = TLI.getRegisterTypeForCallingConv( 74 F.getContext(), F.getCallingConv(), VT); 75 for (unsigned i = 0; i < SplitLength; ++i) 76 VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT})); 77 78 if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Regs[0], 79 VT)) 80 return false; 81 } else { 82 if (!assign(Args[ArgsIndex].Regs[0], ArgLocs[ArgLocsIndex], VT)) 83 return false; 84 } 85 } 86 return true; 87 } 88 89 namespace { 90 class IncomingValueHandler : public MipsCallLowering::MipsHandler { 91 public: 92 IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) 93 : MipsHandler(MIRBuilder, MRI) {} 94 95 private: 96 void assignValueToReg(Register ValVReg, const CCValAssign &VA, 97 const EVT &VT) override; 98 99 Register getStackAddress(const CCValAssign &VA, 100 MachineMemOperand *&MMO) override; 101 102 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; 103 104 bool handleSplit(SmallVectorImpl<Register> &VRegs, 105 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 106 Register ArgsReg, const EVT &VT) override; 107 108 virtual void markPhysRegUsed(unsigned PhysReg) { 109 MIRBuilder.getMRI()->addLiveIn(PhysReg); 110 MIRBuilder.getMBB().addLiveIn(PhysReg); 111 } 112 113 void buildLoad(Register Val, const CCValAssign &VA) { 114 MachineMemOperand *MMO; 115 Register Addr = getStackAddress(VA, MMO); 116 MIRBuilder.buildLoad(Val, Addr, *MMO); 117 } 118 }; 119 120 class CallReturnHandler : public IncomingValueHandler { 121 public: 122 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 123 MachineInstrBuilder &MIB) 124 : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} 125 126 private: 127 void markPhysRegUsed(unsigned PhysReg) override { 128 MIB.addDef(PhysReg, RegState::Implicit); 129 } 130 131 MachineInstrBuilder &MIB; 132 }; 133 134 } // end anonymous namespace 135 136 void IncomingValueHandler::assignValueToReg(Register ValVReg, 137 const CCValAssign &VA, 138 const EVT &VT) { 139 const MipsSubtarget &STI = 140 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 141 Register PhysReg = VA.getLocReg(); 142 if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 143 const MipsSubtarget &STI = 144 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 145 146 MIRBuilder 147 .buildInstr(STI.isFP64bit() ? Mips::BuildPairF64_64 148 : Mips::BuildPairF64) 149 .addDef(ValVReg) 150 .addUse(PhysReg + (STI.isLittle() ? 0 : 1)) 151 .addUse(PhysReg + (STI.isLittle() ? 1 : 0)) 152 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 153 *STI.getRegBankInfo()); 154 markPhysRegUsed(PhysReg); 155 markPhysRegUsed(PhysReg + 1); 156 } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 157 MIRBuilder.buildInstr(Mips::MTC1) 158 .addDef(ValVReg) 159 .addUse(PhysReg) 160 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 161 *STI.getRegBankInfo()); 162 markPhysRegUsed(PhysReg); 163 } else { 164 switch (VA.getLocInfo()) { 165 case CCValAssign::LocInfo::SExt: 166 case CCValAssign::LocInfo::ZExt: 167 case CCValAssign::LocInfo::AExt: { 168 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 169 MIRBuilder.buildTrunc(ValVReg, Copy); 170 break; 171 } 172 default: 173 MIRBuilder.buildCopy(ValVReg, PhysReg); 174 break; 175 } 176 markPhysRegUsed(PhysReg); 177 } 178 } 179 180 Register IncomingValueHandler::getStackAddress(const CCValAssign &VA, 181 MachineMemOperand *&MMO) { 182 MachineFunction &MF = MIRBuilder.getMF(); 183 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 184 unsigned Offset = VA.getLocMemOffset(); 185 MachineFrameInfo &MFI = MF.getFrameInfo(); 186 187 int FI = MFI.CreateFixedObject(Size, Offset, true); 188 MachinePointerInfo MPO = 189 MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 190 191 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 192 unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); 193 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align); 194 195 Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); 196 MIRBuilder.buildFrameIndex(AddrReg, FI); 197 198 return AddrReg; 199 } 200 201 void IncomingValueHandler::assignValueToAddress(Register ValVReg, 202 const CCValAssign &VA) { 203 if (VA.getLocInfo() == CCValAssign::SExt || 204 VA.getLocInfo() == CCValAssign::ZExt || 205 VA.getLocInfo() == CCValAssign::AExt) { 206 Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); 207 buildLoad(LoadReg, VA); 208 MIRBuilder.buildTrunc(ValVReg, LoadReg); 209 } else 210 buildLoad(ValVReg, VA); 211 } 212 213 bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs, 214 ArrayRef<CCValAssign> ArgLocs, 215 unsigned ArgLocsStartIndex, 216 Register ArgsReg, const EVT &VT) { 217 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) 218 return false; 219 setLeastSignificantFirst(VRegs); 220 MIRBuilder.buildMerge(ArgsReg, VRegs); 221 return true; 222 } 223 224 namespace { 225 class OutgoingValueHandler : public MipsCallLowering::MipsHandler { 226 public: 227 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 228 MachineInstrBuilder &MIB) 229 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} 230 231 private: 232 void assignValueToReg(Register ValVReg, const CCValAssign &VA, 233 const EVT &VT) override; 234 235 Register getStackAddress(const CCValAssign &VA, 236 MachineMemOperand *&MMO) override; 237 238 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; 239 240 bool handleSplit(SmallVectorImpl<Register> &VRegs, 241 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 242 Register ArgsReg, const EVT &VT) override; 243 244 Register extendRegister(Register ValReg, const CCValAssign &VA); 245 246 MachineInstrBuilder &MIB; 247 }; 248 } // end anonymous namespace 249 250 void OutgoingValueHandler::assignValueToReg(Register ValVReg, 251 const CCValAssign &VA, 252 const EVT &VT) { 253 Register PhysReg = VA.getLocReg(); 254 const MipsSubtarget &STI = 255 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 256 257 if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 258 MIRBuilder 259 .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64 260 : Mips::ExtractElementF64) 261 .addDef(PhysReg + (STI.isLittle() ? 1 : 0)) 262 .addUse(ValVReg) 263 .addImm(1) 264 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 265 *STI.getRegBankInfo()); 266 MIRBuilder 267 .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64 268 : Mips::ExtractElementF64) 269 .addDef(PhysReg + (STI.isLittle() ? 0 : 1)) 270 .addUse(ValVReg) 271 .addImm(0) 272 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 273 *STI.getRegBankInfo()); 274 } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 275 MIRBuilder.buildInstr(Mips::MFC1) 276 .addDef(PhysReg) 277 .addUse(ValVReg) 278 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 279 *STI.getRegBankInfo()); 280 } else { 281 Register ExtReg = extendRegister(ValVReg, VA); 282 MIRBuilder.buildCopy(PhysReg, ExtReg); 283 MIB.addUse(PhysReg, RegState::Implicit); 284 } 285 } 286 287 Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA, 288 MachineMemOperand *&MMO) { 289 MachineFunction &MF = MIRBuilder.getMF(); 290 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 291 292 LLT p0 = LLT::pointer(0, 32); 293 LLT s32 = LLT::scalar(32); 294 Register SPReg = MRI.createGenericVirtualRegister(p0); 295 MIRBuilder.buildCopy(SPReg, Register(Mips::SP)); 296 297 Register OffsetReg = MRI.createGenericVirtualRegister(s32); 298 unsigned Offset = VA.getLocMemOffset(); 299 MIRBuilder.buildConstant(OffsetReg, Offset); 300 301 Register AddrReg = MRI.createGenericVirtualRegister(p0); 302 MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); 303 304 MachinePointerInfo MPO = 305 MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); 306 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 307 unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); 308 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align); 309 310 return AddrReg; 311 } 312 313 void OutgoingValueHandler::assignValueToAddress(Register ValVReg, 314 const CCValAssign &VA) { 315 MachineMemOperand *MMO; 316 Register Addr = getStackAddress(VA, MMO); 317 Register ExtReg = extendRegister(ValVReg, VA); 318 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 319 } 320 321 Register OutgoingValueHandler::extendRegister(Register ValReg, 322 const CCValAssign &VA) { 323 LLT LocTy{VA.getLocVT()}; 324 switch (VA.getLocInfo()) { 325 case CCValAssign::SExt: { 326 Register ExtReg = MRI.createGenericVirtualRegister(LocTy); 327 MIRBuilder.buildSExt(ExtReg, ValReg); 328 return ExtReg; 329 } 330 case CCValAssign::ZExt: { 331 Register ExtReg = MRI.createGenericVirtualRegister(LocTy); 332 MIRBuilder.buildZExt(ExtReg, ValReg); 333 return ExtReg; 334 } 335 case CCValAssign::AExt: { 336 Register ExtReg = MRI.createGenericVirtualRegister(LocTy); 337 MIRBuilder.buildAnyExt(ExtReg, ValReg); 338 return ExtReg; 339 } 340 // TODO : handle upper extends 341 case CCValAssign::Full: 342 return ValReg; 343 default: 344 break; 345 } 346 llvm_unreachable("unable to extend register"); 347 } 348 349 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs, 350 ArrayRef<CCValAssign> ArgLocs, 351 unsigned ArgLocsStartIndex, 352 Register ArgsReg, const EVT &VT) { 353 MIRBuilder.buildUnmerge(VRegs, ArgsReg); 354 setLeastSignificantFirst(VRegs); 355 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) 356 return false; 357 358 return true; 359 } 360 361 static bool isSupportedArgumentType(Type *T) { 362 if (T->isIntegerTy()) 363 return true; 364 if (T->isPointerTy()) 365 return true; 366 if (T->isFloatingPointTy()) 367 return true; 368 return false; 369 } 370 371 static bool isSupportedReturnType(Type *T) { 372 if (T->isIntegerTy()) 373 return true; 374 if (T->isPointerTy()) 375 return true; 376 if (T->isFloatingPointTy()) 377 return true; 378 if (T->isAggregateType()) 379 return true; 380 return false; 381 } 382 383 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, 384 const ISD::ArgFlagsTy &Flags) { 385 // > does not mean loss of information as type RegisterVT can't hold type VT, 386 // it means that type VT is split into multiple registers of type RegisterVT 387 if (VT.getSizeInBits() >= RegisterVT.getSizeInBits()) 388 return CCValAssign::LocInfo::Full; 389 if (Flags.isSExt()) 390 return CCValAssign::LocInfo::SExt; 391 if (Flags.isZExt()) 392 return CCValAssign::LocInfo::ZExt; 393 return CCValAssign::LocInfo::AExt; 394 } 395 396 template <typename T> 397 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs, 398 const SmallVectorImpl<T> &Arguments) { 399 for (unsigned i = 0; i < ArgLocs.size(); ++i) { 400 const CCValAssign &VA = ArgLocs[i]; 401 CCValAssign::LocInfo LocInfo = determineLocInfo( 402 Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags); 403 if (VA.isMemLoc()) 404 ArgLocs[i] = 405 CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 406 VA.getLocMemOffset(), VA.getLocVT(), LocInfo); 407 else 408 ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 409 VA.getLocReg(), VA.getLocVT(), LocInfo); 410 } 411 } 412 413 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 414 const Value *Val, 415 ArrayRef<Register> VRegs) const { 416 417 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); 418 419 if (Val != nullptr && !isSupportedReturnType(Val->getType())) 420 return false; 421 422 if (!VRegs.empty()) { 423 MachineFunction &MF = MIRBuilder.getMF(); 424 const Function &F = MF.getFunction(); 425 const DataLayout &DL = MF.getDataLayout(); 426 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 427 428 SmallVector<ArgInfo, 8> RetInfos; 429 SmallVector<unsigned, 8> OrigArgIndices; 430 431 ArgInfo ArgRetInfo(VRegs, Val->getType()); 432 setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F); 433 splitToValueTypes(DL, ArgRetInfo, 0, RetInfos, OrigArgIndices); 434 435 SmallVector<ISD::OutputArg, 8> Outs; 436 subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs); 437 438 SmallVector<CCValAssign, 16> ArgLocs; 439 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 440 F.getContext()); 441 CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); 442 setLocInfo(ArgLocs, Outs); 443 444 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); 445 if (!RetHandler.handle(ArgLocs, RetInfos)) { 446 return false; 447 } 448 } 449 MIRBuilder.insertInstr(Ret); 450 return true; 451 } 452 453 bool MipsCallLowering::lowerFormalArguments( 454 MachineIRBuilder &MIRBuilder, const Function &F, 455 ArrayRef<ArrayRef<Register>> VRegs) const { 456 457 // Quick exit if there aren't any args. 458 if (F.arg_empty()) 459 return true; 460 461 for (auto &Arg : F.args()) { 462 if (!isSupportedArgumentType(Arg.getType())) 463 return false; 464 } 465 466 MachineFunction &MF = MIRBuilder.getMF(); 467 const DataLayout &DL = MF.getDataLayout(); 468 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 469 470 SmallVector<ArgInfo, 8> ArgInfos; 471 SmallVector<unsigned, 8> OrigArgIndices; 472 unsigned i = 0; 473 for (auto &Arg : F.args()) { 474 ArgInfo AInfo(VRegs[i], Arg.getType()); 475 setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F); 476 ArgInfos.push_back(AInfo); 477 OrigArgIndices.push_back(i); 478 ++i; 479 } 480 481 SmallVector<ISD::InputArg, 8> Ins; 482 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins); 483 484 SmallVector<CCValAssign, 16> ArgLocs; 485 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 486 F.getContext()); 487 488 const MipsTargetMachine &TM = 489 static_cast<const MipsTargetMachine &>(MF.getTarget()); 490 const MipsABIInfo &ABI = TM.getABI(); 491 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 492 1); 493 CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); 494 setLocInfo(ArgLocs, Ins); 495 496 IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); 497 if (!Handler.handle(ArgLocs, ArgInfos)) 498 return false; 499 500 if (F.isVarArg()) { 501 ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs(); 502 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 503 504 int VaArgOffset; 505 unsigned RegSize = 4; 506 if (ArgRegs.size() == Idx) 507 VaArgOffset = alignTo(CCInfo.getNextStackOffset(), RegSize); 508 else { 509 VaArgOffset = 510 (int)ABI.GetCalleeAllocdArgSizeInBytes(CCInfo.getCallingConv()) - 511 (int)(RegSize * (ArgRegs.size() - Idx)); 512 } 513 514 MachineFrameInfo &MFI = MF.getFrameInfo(); 515 int FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); 516 MF.getInfo<MipsFunctionInfo>()->setVarArgsFrameIndex(FI); 517 518 for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += RegSize) { 519 MIRBuilder.getMBB().addLiveIn(ArgRegs[I]); 520 521 MachineInstrBuilder Copy = 522 MIRBuilder.buildCopy(LLT::scalar(RegSize * 8), Register(ArgRegs[I])); 523 FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); 524 MachinePointerInfo MPO = MachinePointerInfo::getFixedStack(MF, FI); 525 MachineInstrBuilder FrameIndex = 526 MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI); 527 MachineMemOperand *MMO = 528 MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, RegSize, 529 /* Alignment */ RegSize); 530 MIRBuilder.buildStore(Copy, FrameIndex, *MMO); 531 } 532 } 533 534 return true; 535 } 536 537 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 538 CallLoweringInfo &Info) const { 539 540 if (Info.CallConv != CallingConv::C) 541 return false; 542 543 for (auto &Arg : Info.OrigArgs) { 544 if (!isSupportedArgumentType(Arg.Ty)) 545 return false; 546 if (Arg.Flags[0].isByVal()) 547 return false; 548 if (Arg.Flags[0].isSRet() && !Arg.Ty->isPointerTy()) 549 return false; 550 } 551 552 if (!Info.OrigRet.Ty->isVoidTy() && !isSupportedReturnType(Info.OrigRet.Ty)) 553 return false; 554 555 MachineFunction &MF = MIRBuilder.getMF(); 556 const Function &F = MF.getFunction(); 557 const DataLayout &DL = MF.getDataLayout(); 558 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 559 const MipsTargetMachine &TM = 560 static_cast<const MipsTargetMachine &>(MF.getTarget()); 561 const MipsABIInfo &ABI = TM.getABI(); 562 563 MachineInstrBuilder CallSeqStart = 564 MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); 565 566 const bool IsCalleeGlobalPIC = 567 Info.Callee.isGlobal() && TM.isPositionIndependent(); 568 569 MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert( 570 Info.Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL); 571 MIB.addDef(Mips::SP, RegState::Implicit); 572 if (IsCalleeGlobalPIC) { 573 Register CalleeReg = 574 MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32)); 575 MachineInstr *CalleeGlobalValue = 576 MIRBuilder.buildGlobalValue(CalleeReg, Info.Callee.getGlobal()); 577 if (!Info.Callee.getGlobal()->hasLocalLinkage()) 578 CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL); 579 MIB.addUse(CalleeReg); 580 } else 581 MIB.add(Info.Callee); 582 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 583 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); 584 585 TargetLowering::ArgListTy FuncOrigArgs; 586 FuncOrigArgs.reserve(Info.OrigArgs.size()); 587 588 SmallVector<ArgInfo, 8> ArgInfos; 589 SmallVector<unsigned, 8> OrigArgIndices; 590 unsigned i = 0; 591 for (auto &Arg : Info.OrigArgs) { 592 593 TargetLowering::ArgListEntry Entry; 594 Entry.Ty = Arg.Ty; 595 FuncOrigArgs.push_back(Entry); 596 597 ArgInfos.push_back(Arg); 598 OrigArgIndices.push_back(i); 599 ++i; 600 } 601 602 SmallVector<ISD::OutputArg, 8> Outs; 603 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs); 604 605 SmallVector<CCValAssign, 8> ArgLocs; 606 bool IsCalleeVarArg = false; 607 if (Info.Callee.isGlobal()) { 608 const Function *CF = static_cast<const Function *>(Info.Callee.getGlobal()); 609 IsCalleeVarArg = CF->isVarArg(); 610 } 611 MipsCCState CCInfo(F.getCallingConv(), IsCalleeVarArg, MF, ArgLocs, 612 F.getContext()); 613 614 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), 1); 615 const char *Call = 616 Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr; 617 CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); 618 setLocInfo(ArgLocs, Outs); 619 620 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); 621 if (!RetHandler.handle(ArgLocs, ArgInfos)) { 622 return false; 623 } 624 625 unsigned NextStackOffset = CCInfo.getNextStackOffset(); 626 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 627 unsigned StackAlignment = TFL->getStackAlignment(); 628 NextStackOffset = alignTo(NextStackOffset, StackAlignment); 629 CallSeqStart.addImm(NextStackOffset).addImm(0); 630 631 if (IsCalleeGlobalPIC) { 632 MIRBuilder.buildCopy( 633 Register(Mips::GP), 634 MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel()); 635 MIB.addDef(Mips::GP, RegState::Implicit); 636 } 637 MIRBuilder.insertInstr(MIB); 638 if (MIB->getOpcode() == Mips::JALRPseudo) { 639 const MipsSubtarget &STI = 640 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 641 MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 642 *STI.getRegBankInfo()); 643 } 644 645 if (!Info.OrigRet.Ty->isVoidTy()) { 646 ArgInfos.clear(); 647 SmallVector<unsigned, 8> OrigRetIndices; 648 649 splitToValueTypes(DL, Info.OrigRet, 0, ArgInfos, OrigRetIndices); 650 651 SmallVector<ISD::InputArg, 8> Ins; 652 subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); 653 654 SmallVector<CCValAssign, 8> ArgLocs; 655 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 656 F.getContext()); 657 658 CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), Info.OrigRet.Ty, Call); 659 setLocInfo(ArgLocs, Ins); 660 661 CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); 662 if (!Handler.handle(ArgLocs, ArgInfos)) 663 return false; 664 } 665 666 MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); 667 668 return true; 669 } 670 671 template <typename T> 672 void MipsCallLowering::subTargetRegTypeForCallingConv( 673 const Function &F, ArrayRef<ArgInfo> Args, 674 ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const { 675 const DataLayout &DL = F.getParent()->getDataLayout(); 676 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 677 678 unsigned ArgNo = 0; 679 for (auto &Arg : Args) { 680 681 EVT VT = TLI.getValueType(DL, Arg.Ty); 682 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), 683 F.getCallingConv(), VT); 684 unsigned NumRegs = TLI.getNumRegistersForCallingConv( 685 F.getContext(), F.getCallingConv(), VT); 686 687 for (unsigned i = 0; i < NumRegs; ++i) { 688 ISD::ArgFlagsTy Flags = Arg.Flags[0]; 689 690 if (i == 0) 691 Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); 692 else 693 Flags.setOrigAlign(Align::None()); 694 695 ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 696 0); 697 } 698 ++ArgNo; 699 } 700 } 701 702 void MipsCallLowering::splitToValueTypes( 703 const DataLayout &DL, const ArgInfo &OrigArg, unsigned OriginalIndex, 704 SmallVectorImpl<ArgInfo> &SplitArgs, 705 SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const { 706 707 SmallVector<EVT, 4> SplitEVTs; 708 SmallVector<Register, 4> SplitVRegs; 709 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 710 LLVMContext &Ctx = OrigArg.Ty->getContext(); 711 712 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitEVTs); 713 714 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 715 ArgInfo Info = ArgInfo{OrigArg.Regs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; 716 Info.Flags = OrigArg.Flags; 717 SplitArgs.push_back(Info); 718 SplitArgsOrigIndices.push_back(OriginalIndex); 719 } 720 } 721