1 //===- ARCISelLowering.cpp - ARC DAG Lowering Impl --------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ARCTargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARCISelLowering.h" 14 #include "ARC.h" 15 #include "ARCMachineFunctionInfo.h" 16 #include "ARCSubtarget.h" 17 #include "ARCTargetMachine.h" 18 #include "MCTargetDesc/ARCInfo.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/ValueTypes.h" 26 #include "llvm/IR/CallingConv.h" 27 #include "llvm/IR/Intrinsics.h" 28 #include "llvm/Support/Debug.h" 29 #include <algorithm> 30 31 #define DEBUG_TYPE "arc-lower" 32 33 using namespace llvm; 34 35 static SDValue lowerCallResult(SDValue Chain, SDValue InFlag, 36 const SmallVectorImpl<CCValAssign> &RVLocs, 37 SDLoc dl, SelectionDAG &DAG, 38 SmallVectorImpl<SDValue> &InVals); 39 40 static ARCCC::CondCode ISDCCtoARCCC(ISD::CondCode isdCC) { 41 switch (isdCC) { 42 case ISD::SETUEQ: 43 return ARCCC::EQ; 44 case ISD::SETUGT: 45 return ARCCC::HI; 46 case ISD::SETUGE: 47 return ARCCC::HS; 48 case ISD::SETULT: 49 return ARCCC::LO; 50 case ISD::SETULE: 51 return ARCCC::LS; 52 case ISD::SETUNE: 53 return ARCCC::NE; 54 case ISD::SETEQ: 55 return ARCCC::EQ; 56 case ISD::SETGT: 57 return ARCCC::GT; 58 case ISD::SETGE: 59 return ARCCC::GE; 60 case ISD::SETLT: 61 return ARCCC::LT; 62 case ISD::SETLE: 63 return ARCCC::LE; 64 case ISD::SETNE: 65 return ARCCC::NE; 66 default: 67 llvm_unreachable("Unhandled ISDCC code."); 68 } 69 } 70 71 ARCTargetLowering::ARCTargetLowering(const TargetMachine &TM, 72 const ARCSubtarget &Subtarget) 73 : TargetLowering(TM), Subtarget(Subtarget) { 74 // Set up the register classes. 75 addRegisterClass(MVT::i32, &ARC::GPR32RegClass); 76 77 // Compute derived properties from the register classes 78 computeRegisterProperties(Subtarget.getRegisterInfo()); 79 80 setStackPointerRegisterToSaveRestore(ARC::SP); 81 82 setSchedulingPreference(Sched::Source); 83 84 // Use i32 for setcc operations results (slt, sgt, ...). 85 setBooleanContents(ZeroOrOneBooleanContent); 86 setBooleanVectorContents(ZeroOrOneBooleanContent); 87 88 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 89 setOperationAction(Opc, MVT::i32, Expand); 90 91 // Operations to get us off of the ground. 92 // Basic. 93 setOperationAction(ISD::ADD, MVT::i32, Legal); 94 setOperationAction(ISD::SUB, MVT::i32, Legal); 95 setOperationAction(ISD::AND, MVT::i32, Legal); 96 setOperationAction(ISD::SMAX, MVT::i32, Legal); 97 setOperationAction(ISD::SMIN, MVT::i32, Legal); 98 99 // Need barrel shifter. 100 setOperationAction(ISD::SHL, MVT::i32, Legal); 101 setOperationAction(ISD::SRA, MVT::i32, Legal); 102 setOperationAction(ISD::SRL, MVT::i32, Legal); 103 setOperationAction(ISD::ROTR, MVT::i32, Legal); 104 105 setOperationAction(ISD::Constant, MVT::i32, Legal); 106 setOperationAction(ISD::UNDEF, MVT::i32, Legal); 107 108 // Need multiplier 109 setOperationAction(ISD::MUL, MVT::i32, Legal); 110 setOperationAction(ISD::MULHS, MVT::i32, Legal); 111 setOperationAction(ISD::MULHU, MVT::i32, Legal); 112 setOperationAction(ISD::LOAD, MVT::i32, Legal); 113 setOperationAction(ISD::STORE, MVT::i32, Legal); 114 115 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 116 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 117 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 118 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 119 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 120 121 // Have pseudo instruction for frame addresses. 122 setOperationAction(ISD::FRAMEADDR, MVT::i32, Legal); 123 // Custom lower global addresses. 124 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 125 126 // Expand var-args ops. 127 setOperationAction(ISD::VASTART, MVT::Other, Custom); 128 setOperationAction(ISD::VAEND, MVT::Other, Expand); 129 setOperationAction(ISD::VAARG, MVT::Other, Expand); 130 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 131 132 // Other expansions 133 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 134 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 135 136 // Sign extend inreg 137 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom); 138 } 139 140 const char *ARCTargetLowering::getTargetNodeName(unsigned Opcode) const { 141 switch (Opcode) { 142 case ARCISD::BL: 143 return "ARCISD::BL"; 144 case ARCISD::CMOV: 145 return "ARCISD::CMOV"; 146 case ARCISD::CMP: 147 return "ARCISD::CMP"; 148 case ARCISD::BRcc: 149 return "ARCISD::BRcc"; 150 case ARCISD::RET: 151 return "ARCISD::RET"; 152 case ARCISD::GAWRAPPER: 153 return "ARCISD::GAWRAPPER"; 154 } 155 return nullptr; 156 } 157 158 //===----------------------------------------------------------------------===// 159 // Misc Lower Operation implementation 160 //===----------------------------------------------------------------------===// 161 162 SDValue ARCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 163 SDValue LHS = Op.getOperand(0); 164 SDValue RHS = Op.getOperand(1); 165 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 166 SDValue TVal = Op.getOperand(2); 167 SDValue FVal = Op.getOperand(3); 168 SDLoc dl(Op); 169 ARCCC::CondCode ArcCC = ISDCCtoARCCC(CC); 170 assert(LHS.getValueType() == MVT::i32 && "Only know how to SELECT_CC i32"); 171 SDValue Cmp = DAG.getNode(ARCISD::CMP, dl, MVT::Glue, LHS, RHS); 172 return DAG.getNode(ARCISD::CMOV, dl, TVal.getValueType(), TVal, FVal, 173 DAG.getConstant(ArcCC, dl, MVT::i32), Cmp); 174 } 175 176 SDValue ARCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 177 SelectionDAG &DAG) const { 178 SDValue Op0 = Op.getOperand(0); 179 SDLoc dl(Op); 180 assert(Op.getValueType() == MVT::i32 && 181 "Unhandled target sign_extend_inreg."); 182 // These are legal 183 unsigned Width = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 184 if (Width == 16 || Width == 8) 185 return Op; 186 if (Width >= 32) { 187 return {}; 188 } 189 SDValue LS = DAG.getNode(ISD::SHL, dl, MVT::i32, Op0, 190 DAG.getConstant(32 - Width, dl, MVT::i32)); 191 SDValue SR = DAG.getNode(ISD::SRA, dl, MVT::i32, LS, 192 DAG.getConstant(32 - Width, dl, MVT::i32)); 193 return SR; 194 } 195 196 SDValue ARCTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 197 SDValue Chain = Op.getOperand(0); 198 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 199 SDValue LHS = Op.getOperand(2); 200 SDValue RHS = Op.getOperand(3); 201 SDValue Dest = Op.getOperand(4); 202 SDLoc dl(Op); 203 ARCCC::CondCode arcCC = ISDCCtoARCCC(CC); 204 assert(LHS.getValueType() == MVT::i32 && "Only know how to BR_CC i32"); 205 return DAG.getNode(ARCISD::BRcc, dl, MVT::Other, Chain, Dest, LHS, RHS, 206 DAG.getConstant(arcCC, dl, MVT::i32)); 207 } 208 209 SDValue ARCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 210 auto *N = cast<JumpTableSDNode>(Op); 211 SDValue GA = DAG.getTargetJumpTable(N->getIndex(), MVT::i32); 212 return DAG.getNode(ARCISD::GAWRAPPER, SDLoc(N), MVT::i32, GA); 213 } 214 215 #include "ARCGenCallingConv.inc" 216 217 //===----------------------------------------------------------------------===// 218 // Call Calling Convention Implementation 219 //===----------------------------------------------------------------------===// 220 221 /// ARC call implementation 222 SDValue ARCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 223 SmallVectorImpl<SDValue> &InVals) const { 224 SelectionDAG &DAG = CLI.DAG; 225 SDLoc &dl = CLI.DL; 226 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 227 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 228 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 229 SDValue Chain = CLI.Chain; 230 SDValue Callee = CLI.Callee; 231 CallingConv::ID CallConv = CLI.CallConv; 232 bool IsVarArg = CLI.IsVarArg; 233 bool &IsTailCall = CLI.IsTailCall; 234 235 IsTailCall = false; // Do not support tail calls yet. 236 237 SmallVector<CCValAssign, 16> ArgLocs; 238 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 239 *DAG.getContext()); 240 241 CCInfo.AnalyzeCallOperands(Outs, CC_ARC); 242 243 SmallVector<CCValAssign, 16> RVLocs; 244 // Analyze return values to determine the number of bytes of stack required. 245 CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 246 *DAG.getContext()); 247 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4)); 248 RetCCInfo.AnalyzeCallResult(Ins, RetCC_ARC); 249 250 // Get a count of how many bytes are to be pushed on the stack. 251 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 252 auto PtrVT = getPointerTy(DAG.getDataLayout()); 253 254 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 255 256 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 257 SmallVector<SDValue, 12> MemOpChains; 258 259 SDValue StackPtr; 260 // Walk the register/memloc assignments, inserting copies/loads. 261 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 262 CCValAssign &VA = ArgLocs[i]; 263 SDValue Arg = OutVals[i]; 264 265 // Promote the value if needed. 266 switch (VA.getLocInfo()) { 267 default: 268 llvm_unreachable("Unknown loc info!"); 269 case CCValAssign::Full: 270 break; 271 case CCValAssign::SExt: 272 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 273 break; 274 case CCValAssign::ZExt: 275 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 276 break; 277 case CCValAssign::AExt: 278 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 279 break; 280 } 281 282 // Arguments that can be passed on register must be kept at 283 // RegsToPass vector 284 if (VA.isRegLoc()) { 285 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 286 } else { 287 assert(VA.isMemLoc() && "Must be register or memory argument."); 288 if (!StackPtr.getNode()) 289 StackPtr = DAG.getCopyFromReg(Chain, dl, ARC::SP, 290 getPointerTy(DAG.getDataLayout())); 291 // Calculate the stack position. 292 SDValue SOffset = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl); 293 SDValue PtrOff = DAG.getNode( 294 ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), StackPtr, SOffset); 295 296 SDValue Store = 297 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 298 MemOpChains.push_back(Store); 299 IsTailCall = false; 300 } 301 } 302 303 // Transform all store nodes into one single node because 304 // all store nodes are independent of each other. 305 if (!MemOpChains.empty()) 306 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 307 308 // Build a sequence of copy-to-reg nodes chained together with token 309 // chain and flag operands which copy the outgoing args into registers. 310 // The InFlag in necessary since all emitted instructions must be 311 // stuck together. 312 SDValue Glue; 313 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 314 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 315 RegsToPass[i].second, Glue); 316 Glue = Chain.getValue(1); 317 } 318 319 // If the callee is a GlobalAddress node (quite common, every direct call is) 320 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 321 // Likewise ExternalSymbol -> TargetExternalSymbol. 322 bool IsDirect = true; 323 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 324 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 325 else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 326 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 327 else 328 IsDirect = false; 329 // Branch + Link = #chain, #target_address, #opt_in_flags... 330 // = Chain, Callee, Reg#1, Reg#2, ... 331 // 332 // Returns a chain & a flag for retval copy to use. 333 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 334 SmallVector<SDValue, 8> Ops; 335 Ops.push_back(Chain); 336 Ops.push_back(Callee); 337 338 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 339 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 340 RegsToPass[i].second.getValueType())); 341 342 // Add a register mask operand representing the call-preserved registers. 343 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 344 const uint32_t *Mask = 345 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 346 assert(Mask && "Missing call preserved mask for calling convention"); 347 Ops.push_back(DAG.getRegisterMask(Mask)); 348 349 if (Glue.getNode()) 350 Ops.push_back(Glue); 351 352 Chain = DAG.getNode(IsDirect ? ARCISD::BL : ARCISD::JL, dl, NodeTys, Ops); 353 Glue = Chain.getValue(1); 354 355 // Create the CALLSEQ_END node. 356 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 357 DAG.getConstant(0, dl, PtrVT, true), Glue, dl); 358 Glue = Chain.getValue(1); 359 360 // Handle result values, copying them out of physregs into vregs that we 361 // return. 362 if (IsTailCall) 363 return Chain; 364 return lowerCallResult(Chain, Glue, RVLocs, dl, DAG, InVals); 365 } 366 367 /// Lower the result values of a call into the appropriate copies out of 368 /// physical registers / memory locations. 369 static SDValue lowerCallResult(SDValue Chain, SDValue Glue, 370 const SmallVectorImpl<CCValAssign> &RVLocs, 371 SDLoc dl, SelectionDAG &DAG, 372 SmallVectorImpl<SDValue> &InVals) { 373 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 374 // Copy results out of physical registers. 375 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 376 const CCValAssign &VA = RVLocs[i]; 377 if (VA.isRegLoc()) { 378 SDValue RetValue; 379 RetValue = 380 DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), Glue); 381 Chain = RetValue.getValue(1); 382 Glue = RetValue.getValue(2); 383 InVals.push_back(RetValue); 384 } else { 385 assert(VA.isMemLoc() && "Must be memory location."); 386 ResultMemLocs.push_back( 387 std::make_pair(VA.getLocMemOffset(), InVals.size())); 388 389 // Reserve space for this result. 390 InVals.push_back(SDValue()); 391 } 392 } 393 394 // Copy results out of memory. 395 SmallVector<SDValue, 4> MemOpChains; 396 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 397 int Offset = ResultMemLocs[i].first; 398 unsigned Index = ResultMemLocs[i].second; 399 SDValue StackPtr = DAG.getRegister(ARC::SP, MVT::i32); 400 SDValue SpLoc = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, 401 DAG.getConstant(Offset, dl, MVT::i32)); 402 SDValue Load = 403 DAG.getLoad(MVT::i32, dl, Chain, SpLoc, MachinePointerInfo()); 404 InVals[Index] = Load; 405 MemOpChains.push_back(Load.getValue(1)); 406 } 407 408 // Transform all loads nodes into one single node because 409 // all load nodes are independent of each other. 410 if (!MemOpChains.empty()) 411 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 412 413 return Chain; 414 } 415 416 //===----------------------------------------------------------------------===// 417 // Formal Arguments Calling Convention Implementation 418 //===----------------------------------------------------------------------===// 419 420 namespace { 421 422 struct ArgDataPair { 423 SDValue SDV; 424 ISD::ArgFlagsTy Flags; 425 }; 426 427 } // end anonymous namespace 428 429 /// ARC formal arguments implementation 430 SDValue ARCTargetLowering::LowerFormalArguments( 431 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 432 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 433 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 434 switch (CallConv) { 435 default: 436 llvm_unreachable("Unsupported calling convention"); 437 case CallingConv::C: 438 case CallingConv::Fast: 439 return LowerCallArguments(Chain, CallConv, IsVarArg, Ins, dl, DAG, InVals); 440 } 441 } 442 443 /// Transform physical registers into virtual registers, and generate load 444 /// operations for argument places on the stack. 445 SDValue ARCTargetLowering::LowerCallArguments( 446 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 447 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG, 448 SmallVectorImpl<SDValue> &InVals) const { 449 MachineFunction &MF = DAG.getMachineFunction(); 450 MachineFrameInfo &MFI = MF.getFrameInfo(); 451 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 452 auto *AFI = MF.getInfo<ARCFunctionInfo>(); 453 454 // Assign locations to all of the incoming arguments. 455 SmallVector<CCValAssign, 16> ArgLocs; 456 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 457 *DAG.getContext()); 458 459 CCInfo.AnalyzeFormalArguments(Ins, CC_ARC); 460 461 unsigned StackSlotSize = 4; 462 463 if (!IsVarArg) 464 AFI->setReturnStackOffset(CCInfo.getNextStackOffset()); 465 466 // All getCopyFromReg ops must precede any getMemcpys to prevent the 467 // scheduler clobbering a register before it has been copied. 468 // The stages are: 469 // 1. CopyFromReg (and load) arg & vararg registers. 470 // 2. Chain CopyFromReg nodes into a TokenFactor. 471 // 3. Memcpy 'byVal' args & push final InVals. 472 // 4. Chain mem ops nodes into a TokenFactor. 473 SmallVector<SDValue, 4> CFRegNode; 474 SmallVector<ArgDataPair, 4> ArgData; 475 SmallVector<SDValue, 4> MemOps; 476 477 // 1a. CopyFromReg (and load) arg registers. 478 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 479 CCValAssign &VA = ArgLocs[i]; 480 SDValue ArgIn; 481 482 if (VA.isRegLoc()) { 483 // Arguments passed in registers 484 EVT RegVT = VA.getLocVT(); 485 switch (RegVT.getSimpleVT().SimpleTy) { 486 default: { 487 LLVM_DEBUG(errs() << "LowerFormalArguments Unhandled argument type: " 488 << (unsigned)RegVT.getSimpleVT().SimpleTy << "\n"); 489 llvm_unreachable("Unhandled LowerFormalArguments type."); 490 } 491 case MVT::i32: 492 unsigned VReg = RegInfo.createVirtualRegister(&ARC::GPR32RegClass); 493 RegInfo.addLiveIn(VA.getLocReg(), VReg); 494 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 495 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 496 } 497 } else { 498 // sanity check 499 assert(VA.isMemLoc()); 500 // Load the argument to a virtual register 501 unsigned ObjSize = VA.getLocVT().getStoreSize(); 502 assert((ObjSize <= StackSlotSize) && "Unhandled argument"); 503 504 // Create the frame index object for this incoming parameter... 505 int FI = MFI.CreateFixedObject(ObjSize, VA.getLocMemOffset(), true); 506 507 // Create the SelectionDAG nodes corresponding to a load 508 // from this parameter 509 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 510 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 511 MachinePointerInfo::getFixedStack(MF, FI)); 512 } 513 const ArgDataPair ADP = {ArgIn, Ins[i].Flags}; 514 ArgData.push_back(ADP); 515 } 516 517 // 1b. CopyFromReg vararg registers. 518 if (IsVarArg) { 519 // Argument registers 520 static const MCPhysReg ArgRegs[] = {ARC::R0, ARC::R1, ARC::R2, ARC::R3, 521 ARC::R4, ARC::R5, ARC::R6, ARC::R7}; 522 auto *AFI = MF.getInfo<ARCFunctionInfo>(); 523 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 524 if (FirstVAReg < array_lengthof(ArgRegs)) { 525 int Offset = 0; 526 // Save remaining registers, storing higher register numbers at a higher 527 // address 528 // There are (array_lengthof(ArgRegs) - FirstVAReg) registers which 529 // need to be saved. 530 int VarFI = 531 MFI.CreateFixedObject((array_lengthof(ArgRegs) - FirstVAReg) * 4, 532 CCInfo.getNextStackOffset(), true); 533 AFI->setVarArgsFrameIndex(VarFI); 534 SDValue FIN = DAG.getFrameIndex(VarFI, MVT::i32); 535 for (unsigned i = FirstVAReg; i < array_lengthof(ArgRegs); i++) { 536 // Move argument from phys reg -> virt reg 537 unsigned VReg = RegInfo.createVirtualRegister(&ARC::GPR32RegClass); 538 RegInfo.addLiveIn(ArgRegs[i], VReg); 539 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 540 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 541 SDValue VAObj = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN, 542 DAG.getConstant(Offset, dl, MVT::i32)); 543 // Move argument from virt reg -> stack 544 SDValue Store = 545 DAG.getStore(Val.getValue(1), dl, Val, VAObj, MachinePointerInfo()); 546 MemOps.push_back(Store); 547 Offset += 4; 548 } 549 } else { 550 llvm_unreachable("Too many var args parameters."); 551 } 552 } 553 554 // 2. Chain CopyFromReg nodes into a TokenFactor. 555 if (!CFRegNode.empty()) 556 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 557 558 // 3. Memcpy 'byVal' args & push final InVals. 559 // Aggregates passed "byVal" need to be copied by the callee. 560 // The callee will use a pointer to this copy, rather than the original 561 // pointer. 562 for (const auto &ArgDI : ArgData) { 563 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) { 564 unsigned Size = ArgDI.Flags.getByValSize(); 565 Align Alignment = 566 std::max(Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign()); 567 // Create a new object on the stack and copy the pointee into it. 568 int FI = MFI.CreateStackObject(Size, Alignment, false); 569 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 570 InVals.push_back(FIN); 571 MemOps.push_back(DAG.getMemcpy( 572 Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32), 573 Alignment, false, false, false, MachinePointerInfo(), 574 MachinePointerInfo())); 575 } else { 576 InVals.push_back(ArgDI.SDV); 577 } 578 } 579 580 // 4. Chain mem ops nodes into a TokenFactor. 581 if (!MemOps.empty()) { 582 MemOps.push_back(Chain); 583 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 584 } 585 586 return Chain; 587 } 588 589 //===----------------------------------------------------------------------===// 590 // Return Value Calling Convention Implementation 591 //===----------------------------------------------------------------------===// 592 593 bool ARCTargetLowering::CanLowerReturn( 594 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 595 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 596 SmallVector<CCValAssign, 16> RVLocs; 597 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 598 if (!CCInfo.CheckReturn(Outs, RetCC_ARC)) 599 return false; 600 if (CCInfo.getNextStackOffset() != 0 && IsVarArg) 601 return false; 602 return true; 603 } 604 605 SDValue 606 ARCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 607 bool IsVarArg, 608 const SmallVectorImpl<ISD::OutputArg> &Outs, 609 const SmallVectorImpl<SDValue> &OutVals, 610 const SDLoc &dl, SelectionDAG &DAG) const { 611 auto *AFI = DAG.getMachineFunction().getInfo<ARCFunctionInfo>(); 612 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 613 614 // CCValAssign - represent the assignment of 615 // the return value to a location 616 SmallVector<CCValAssign, 16> RVLocs; 617 618 // CCState - Info about the registers and stack slot. 619 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 620 *DAG.getContext()); 621 622 // Analyze return values. 623 if (!IsVarArg) 624 CCInfo.AllocateStack(AFI->getReturnStackOffset(), Align(4)); 625 626 CCInfo.AnalyzeReturn(Outs, RetCC_ARC); 627 628 SDValue Flag; 629 SmallVector<SDValue, 4> RetOps(1, Chain); 630 SmallVector<SDValue, 4> MemOpChains; 631 // Handle return values that must be copied to memory. 632 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 633 CCValAssign &VA = RVLocs[i]; 634 if (VA.isRegLoc()) 635 continue; 636 assert(VA.isMemLoc()); 637 if (IsVarArg) { 638 report_fatal_error("Can't return value from vararg function in memory"); 639 } 640 641 int Offset = VA.getLocMemOffset(); 642 unsigned ObjSize = VA.getLocVT().getStoreSize(); 643 // Create the frame index object for the memory location. 644 int FI = MFI.CreateFixedObject(ObjSize, Offset, false); 645 646 // Create a SelectionDAG node corresponding to a store 647 // to this memory location. 648 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 649 MemOpChains.push_back(DAG.getStore( 650 Chain, dl, OutVals[i], FIN, 651 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 652 } 653 654 // Transform all store nodes into one single node because 655 // all stores are independent of each other. 656 if (!MemOpChains.empty()) 657 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 658 659 // Now handle return values copied to registers. 660 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 661 CCValAssign &VA = RVLocs[i]; 662 if (!VA.isRegLoc()) 663 continue; 664 // Copy the result values into the output registers. 665 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 666 667 // guarantee that all emitted copies are 668 // stuck together, avoiding something bad 669 Flag = Chain.getValue(1); 670 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 671 } 672 673 RetOps[0] = Chain; // Update chain. 674 675 // Add the flag if we have it. 676 if (Flag.getNode()) 677 RetOps.push_back(Flag); 678 679 // What to do with the RetOps? 680 return DAG.getNode(ARCISD::RET, dl, MVT::Other, RetOps); 681 } 682 683 //===----------------------------------------------------------------------===// 684 // Target Optimization Hooks 685 //===----------------------------------------------------------------------===// 686 687 SDValue ARCTargetLowering::PerformDAGCombine(SDNode *N, 688 DAGCombinerInfo &DCI) const { 689 return {}; 690 } 691 692 //===----------------------------------------------------------------------===// 693 // Addressing mode description hooks 694 //===----------------------------------------------------------------------===// 695 696 /// Return true if the addressing mode represented by AM is legal for this 697 /// target, for a load/store of the specified type. 698 bool ARCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 699 const AddrMode &AM, Type *Ty, 700 unsigned AS, 701 Instruction *I) const { 702 return AM.Scale == 0; 703 } 704 705 // Don't emit tail calls for the time being. 706 bool ARCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 707 return false; 708 } 709 710 SDValue ARCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 711 const ARCRegisterInfo &ARI = *Subtarget.getRegisterInfo(); 712 MachineFunction &MF = DAG.getMachineFunction(); 713 MachineFrameInfo &MFI = MF.getFrameInfo(); 714 MFI.setFrameAddressIsTaken(true); 715 716 EVT VT = Op.getValueType(); 717 SDLoc dl(Op); 718 assert(cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0 && 719 "Only support lowering frame addr of current frame."); 720 Register FrameReg = ARI.getFrameRegister(MF); 721 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 722 } 723 724 SDValue ARCTargetLowering::LowerGlobalAddress(SDValue Op, 725 SelectionDAG &DAG) const { 726 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 727 const GlobalValue *GV = GN->getGlobal(); 728 SDLoc dl(GN); 729 int64_t Offset = GN->getOffset(); 730 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, Offset); 731 return DAG.getNode(ARCISD::GAWRAPPER, dl, MVT::i32, GA); 732 } 733 734 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 735 MachineFunction &MF = DAG.getMachineFunction(); 736 auto *FuncInfo = MF.getInfo<ARCFunctionInfo>(); 737 738 // vastart just stores the address of the VarArgsFrameIndex slot into the 739 // memory location argument. 740 SDLoc dl(Op); 741 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 742 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 743 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 744 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 745 MachinePointerInfo(SV)); 746 } 747 748 SDValue ARCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 749 switch (Op.getOpcode()) { 750 case ISD::GlobalAddress: 751 return LowerGlobalAddress(Op, DAG); 752 case ISD::FRAMEADDR: 753 return LowerFRAMEADDR(Op, DAG); 754 case ISD::SELECT_CC: 755 return LowerSELECT_CC(Op, DAG); 756 case ISD::BR_CC: 757 return LowerBR_CC(Op, DAG); 758 case ISD::SIGN_EXTEND_INREG: 759 return LowerSIGN_EXTEND_INREG(Op, DAG); 760 case ISD::JumpTable: 761 return LowerJumpTable(Op, DAG); 762 case ISD::VASTART: 763 return LowerVASTART(Op, DAG); 764 default: 765 llvm_unreachable("unimplemented operand"); 766 } 767 } 768