1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 15 #include "llvm/CodeGen/Analysis.h" 16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/Module.h" 23 24 #define DEBUG_TYPE "call-lowering" 25 26 using namespace llvm; 27 28 void CallLowering::anchor() {} 29 30 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, 31 ArrayRef<Register> ResRegs, 32 ArrayRef<ArrayRef<Register>> ArgRegs, 33 Register SwiftErrorVReg, 34 std::function<unsigned()> GetCalleeReg) const { 35 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); 36 37 // First step is to marshall all the function's parameters into the correct 38 // physregs and memory locations. Gather the sequence of argument types that 39 // we'll pass to the assigner function. 40 SmallVector<ArgInfo, 8> OrigArgs; 41 unsigned i = 0; 42 unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); 43 for (auto &Arg : CS.args()) { 44 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 45 i < NumFixedArgs}; 46 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); 47 // We don't currently support swiftself args. 48 if (OrigArg.Flags.isSwiftSelf()) 49 return false; 50 OrigArgs.push_back(OrigArg); 51 ++i; 52 } 53 54 MachineOperand Callee = MachineOperand::CreateImm(0); 55 if (const Function *F = CS.getCalledFunction()) 56 Callee = MachineOperand::CreateGA(F, 0); 57 else 58 Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 59 60 ArgInfo OrigRet{ResRegs, CS.getType(), ISD::ArgFlagsTy{}}; 61 if (!OrigRet.Ty->isVoidTy()) 62 setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS); 63 64 return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs, 65 SwiftErrorVReg); 66 } 67 68 template <typename FuncInfoTy> 69 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 70 const DataLayout &DL, 71 const FuncInfoTy &FuncInfo) const { 72 const AttributeList &Attrs = FuncInfo.getAttributes(); 73 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 74 Arg.Flags.setZExt(); 75 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 76 Arg.Flags.setSExt(); 77 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 78 Arg.Flags.setInReg(); 79 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 80 Arg.Flags.setSRet(); 81 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 82 Arg.Flags.setSwiftSelf(); 83 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 84 Arg.Flags.setSwiftError(); 85 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 86 Arg.Flags.setByVal(); 87 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 88 Arg.Flags.setInAlloca(); 89 90 if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { 91 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 92 93 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 94 Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 95 96 // For ByVal, alignment should be passed from FE. BE will guess if 97 // this info is not there but there are cases it cannot get right. 98 unsigned FrameAlign; 99 if (FuncInfo.getParamAlignment(OpIdx - 2)) 100 FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2); 101 else 102 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); 103 Arg.Flags.setByValAlign(FrameAlign); 104 } 105 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 106 Arg.Flags.setNest(); 107 Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty)); 108 } 109 110 template void 111 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 112 const DataLayout &DL, 113 const Function &FuncInfo) const; 114 115 template void 116 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 117 const DataLayout &DL, 118 const CallInst &FuncInfo) const; 119 120 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, 121 MachineIRBuilder &MIRBuilder) const { 122 assert(SrcRegs.size() > 1 && "Nothing to pack"); 123 124 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 125 MachineRegisterInfo *MRI = MIRBuilder.getMRI(); 126 127 LLT PackedLLT = getLLTForType(*PackedTy, DL); 128 129 SmallVector<LLT, 8> LLTs; 130 SmallVector<uint64_t, 8> Offsets; 131 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 132 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); 133 134 Register Dst = MRI->createGenericVirtualRegister(PackedLLT); 135 MIRBuilder.buildUndef(Dst); 136 for (unsigned i = 0; i < SrcRegs.size(); ++i) { 137 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); 138 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); 139 Dst = NewDst; 140 } 141 142 return Dst; 143 } 144 145 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, 146 Type *PackedTy, 147 MachineIRBuilder &MIRBuilder) const { 148 assert(DstRegs.size() > 1 && "Nothing to unpack"); 149 150 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 151 152 SmallVector<LLT, 8> LLTs; 153 SmallVector<uint64_t, 8> Offsets; 154 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 155 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); 156 157 for (unsigned i = 0; i < DstRegs.size(); ++i) 158 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); 159 } 160 161 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 162 ArrayRef<ArgInfo> Args, 163 ValueHandler &Handler) const { 164 MachineFunction &MF = MIRBuilder.getMF(); 165 const Function &F = MF.getFunction(); 166 SmallVector<CCValAssign, 16> ArgLocs; 167 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 168 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); 169 } 170 171 bool CallLowering::handleAssignments(CCState &CCInfo, 172 SmallVectorImpl<CCValAssign> &ArgLocs, 173 MachineIRBuilder &MIRBuilder, 174 ArrayRef<ArgInfo> Args, 175 ValueHandler &Handler) const { 176 MachineFunction &MF = MIRBuilder.getMF(); 177 const Function &F = MF.getFunction(); 178 const DataLayout &DL = F.getParent()->getDataLayout(); 179 180 unsigned NumArgs = Args.size(); 181 for (unsigned i = 0; i != NumArgs; ++i) { 182 MVT CurVT = MVT::getVT(Args[i].Ty); 183 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) { 184 // Try to use the register type if we couldn't assign the VT. 185 if (!Handler.isArgumentHandler() || !CurVT.isValid()) 186 return false; 187 CurVT = TLI->getRegisterTypeForCallingConv( 188 F.getContext(), F.getCallingConv(), EVT(CurVT)); 189 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) 190 return false; 191 } 192 } 193 194 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 195 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 196 197 CCValAssign &VA = ArgLocs[j]; 198 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 199 200 if (VA.needsCustom()) { 201 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 202 continue; 203 } 204 205 assert(Args[i].Regs.size() == 1 && 206 "Can't handle multiple virtual regs yet"); 207 208 // FIXME: Pack registers if we have more than one. 209 Register ArgReg = Args[i].Regs[0]; 210 211 if (VA.isRegLoc()) { 212 MVT OrigVT = MVT::getVT(Args[i].Ty); 213 MVT VAVT = VA.getValVT(); 214 if (Handler.isArgumentHandler() && VAVT != OrigVT) { 215 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) 216 return false; // Can't handle this type of arg yet. 217 const LLT VATy(VAVT); 218 Register NewReg = 219 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 220 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 221 // If it's a vector type, we either need to truncate the elements 222 // or do an unmerge to get the lower block of elements. 223 if (VATy.isVector() && 224 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 225 const LLT OrigTy(OrigVT); 226 // Just handle the case where the VA type is 2 * original type. 227 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 228 LLVM_DEBUG(dbgs() 229 << "Incoming promoted vector arg has too many elts"); 230 return false; 231 } 232 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 233 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 234 } else { 235 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 236 } 237 } else { 238 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); 239 } 240 } else if (VA.isMemLoc()) { 241 MVT VT = MVT::getVT(Args[i].Ty); 242 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() 243 : alignTo(VT.getSizeInBits(), 8) / 8; 244 unsigned Offset = VA.getLocMemOffset(); 245 MachinePointerInfo MPO; 246 Register StackAddr = Handler.getStackAddress(Size, Offset, MPO); 247 Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA); 248 } else { 249 // FIXME: Support byvals and other weirdness 250 return false; 251 } 252 } 253 return true; 254 } 255 256 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 257 CCValAssign &VA) { 258 LLT LocTy{VA.getLocVT()}; 259 if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits()) 260 return ValReg; 261 switch (VA.getLocInfo()) { 262 default: break; 263 case CCValAssign::Full: 264 case CCValAssign::BCvt: 265 // FIXME: bitconverting between vector types may or may not be a 266 // nop in big-endian situations. 267 return ValReg; 268 case CCValAssign::AExt: { 269 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 270 return MIB->getOperand(0).getReg(); 271 } 272 case CCValAssign::SExt: { 273 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 274 MIRBuilder.buildSExt(NewReg, ValReg); 275 return NewReg; 276 } 277 case CCValAssign::ZExt: { 278 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 279 MIRBuilder.buildZExt(NewReg, ValReg); 280 return NewReg; 281 } 282 } 283 llvm_unreachable("unable to extend register"); 284 } 285 286 void CallLowering::ValueHandler::anchor() {} 287