1 //===- ARCISelLowering.cpp - ARC DAG Lowering Impl --------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ARCTargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARCISelLowering.h" 14 #include "ARC.h" 15 #include "ARCMachineFunctionInfo.h" 16 #include "ARCSubtarget.h" 17 #include "ARCTargetMachine.h" 18 #include "MCTargetDesc/ARCInfo.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/SelectionDAGISel.h" 26 #include "llvm/CodeGen/ValueTypes.h" 27 #include "llvm/IR/CallingConv.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/Support/Debug.h" 30 #include <algorithm> 31 32 #define DEBUG_TYPE "arc-lower" 33 34 using namespace llvm; 35 36 static SDValue lowerCallResult(SDValue Chain, SDValue InFlag, 37 const SmallVectorImpl<CCValAssign> &RVLocs, 38 SDLoc dl, SelectionDAG &DAG, 39 SmallVectorImpl<SDValue> &InVals); 40 41 static ARCCC::CondCode ISDCCtoARCCC(ISD::CondCode isdCC) { 42 switch (isdCC) { 43 case ISD::SETUEQ: 44 return ARCCC::EQ; 45 case ISD::SETUGT: 46 return ARCCC::HI; 47 case ISD::SETUGE: 48 return ARCCC::HS; 49 case ISD::SETULT: 50 return ARCCC::LO; 51 case ISD::SETULE: 52 return ARCCC::LS; 53 case ISD::SETUNE: 54 return ARCCC::NE; 55 case ISD::SETEQ: 56 return ARCCC::EQ; 57 case ISD::SETGT: 58 return ARCCC::GT; 59 case ISD::SETGE: 60 return ARCCC::GE; 61 case ISD::SETLT: 62 return ARCCC::LT; 63 case ISD::SETLE: 64 return ARCCC::LE; 65 case ISD::SETNE: 66 return ARCCC::NE; 67 default: 68 llvm_unreachable("Unhandled ISDCC code."); 69 } 70 } 71 72 ARCTargetLowering::ARCTargetLowering(const TargetMachine &TM, 73 const ARCSubtarget &Subtarget) 74 : TargetLowering(TM), Subtarget(Subtarget) { 75 // Set up the register classes. 76 addRegisterClass(MVT::i32, &ARC::GPR32RegClass); 77 78 // Compute derived properties from the register classes 79 computeRegisterProperties(Subtarget.getRegisterInfo()); 80 81 setStackPointerRegisterToSaveRestore(ARC::SP); 82 83 setSchedulingPreference(Sched::Source); 84 85 // Use i32 for setcc operations results (slt, sgt, ...). 86 setBooleanContents(ZeroOrOneBooleanContent); 87 setBooleanVectorContents(ZeroOrOneBooleanContent); 88 89 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 90 setOperationAction(Opc, MVT::i32, Expand); 91 92 // Operations to get us off of the ground. 93 // Basic. 94 setOperationAction(ISD::ADD, MVT::i32, Legal); 95 setOperationAction(ISD::SUB, MVT::i32, Legal); 96 setOperationAction(ISD::AND, MVT::i32, Legal); 97 setOperationAction(ISD::SMAX, MVT::i32, Legal); 98 setOperationAction(ISD::SMIN, MVT::i32, Legal); 99 100 // Need barrel shifter. 101 setOperationAction(ISD::SHL, MVT::i32, Legal); 102 setOperationAction(ISD::SRA, MVT::i32, Legal); 103 setOperationAction(ISD::SRL, MVT::i32, Legal); 104 setOperationAction(ISD::ROTR, MVT::i32, Legal); 105 106 setOperationAction(ISD::Constant, MVT::i32, Legal); 107 setOperationAction(ISD::UNDEF, MVT::i32, Legal); 108 109 // Need multiplier 110 setOperationAction(ISD::MUL, MVT::i32, Legal); 111 setOperationAction(ISD::MULHS, MVT::i32, Legal); 112 setOperationAction(ISD::MULHU, MVT::i32, Legal); 113 setOperationAction(ISD::LOAD, MVT::i32, Legal); 114 setOperationAction(ISD::STORE, MVT::i32, Legal); 115 116 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 117 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 118 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 119 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 120 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 121 122 // Have psuedo instruction for frame addresses. 123 setOperationAction(ISD::FRAMEADDR, MVT::i32, Legal); 124 // Custom lower global addresses. 125 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 126 127 // Expand var-args ops. 128 setOperationAction(ISD::VASTART, MVT::Other, Custom); 129 setOperationAction(ISD::VAEND, MVT::Other, Expand); 130 setOperationAction(ISD::VAARG, MVT::Other, Expand); 131 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 132 133 // Other expansions 134 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 135 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 136 137 // Sign extend inreg 138 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom); 139 } 140 141 const char *ARCTargetLowering::getTargetNodeName(unsigned Opcode) const { 142 switch (Opcode) { 143 case ARCISD::BL: 144 return "ARCISD::BL"; 145 case ARCISD::CMOV: 146 return "ARCISD::CMOV"; 147 case ARCISD::CMP: 148 return "ARCISD::CMP"; 149 case ARCISD::BRcc: 150 return "ARCISD::BRcc"; 151 case ARCISD::RET: 152 return "ARCISD::RET"; 153 case ARCISD::GAWRAPPER: 154 return "ARCISD::GAWRAPPER"; 155 } 156 return nullptr; 157 } 158 159 //===----------------------------------------------------------------------===// 160 // Misc Lower Operation implementation 161 //===----------------------------------------------------------------------===// 162 163 SDValue ARCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 164 SDValue LHS = Op.getOperand(0); 165 SDValue RHS = Op.getOperand(1); 166 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 167 SDValue TVal = Op.getOperand(2); 168 SDValue FVal = Op.getOperand(3); 169 SDLoc dl(Op); 170 ARCCC::CondCode ArcCC = ISDCCtoARCCC(CC); 171 assert(LHS.getValueType() == MVT::i32 && "Only know how to SELECT_CC i32"); 172 SDValue Cmp = DAG.getNode(ARCISD::CMP, dl, MVT::Glue, LHS, RHS); 173 return DAG.getNode(ARCISD::CMOV, dl, TVal.getValueType(), TVal, FVal, 174 DAG.getConstant(ArcCC, dl, MVT::i32), Cmp); 175 } 176 177 SDValue ARCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 178 SelectionDAG &DAG) const { 179 SDValue Op0 = Op.getOperand(0); 180 SDLoc dl(Op); 181 assert(Op.getValueType() == MVT::i32 && 182 "Unhandled target sign_extend_inreg."); 183 // These are legal 184 unsigned Width = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 185 if (Width == 16 || Width == 8) 186 return Op; 187 if (Width >= 32) { 188 return {}; 189 } 190 SDValue LS = DAG.getNode(ISD::SHL, dl, MVT::i32, Op0, 191 DAG.getConstant(32 - Width, dl, MVT::i32)); 192 SDValue SR = DAG.getNode(ISD::SRA, dl, MVT::i32, LS, 193 DAG.getConstant(32 - Width, dl, MVT::i32)); 194 return SR; 195 } 196 197 SDValue ARCTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 198 SDValue Chain = Op.getOperand(0); 199 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 200 SDValue LHS = Op.getOperand(2); 201 SDValue RHS = Op.getOperand(3); 202 SDValue Dest = Op.getOperand(4); 203 SDLoc dl(Op); 204 ARCCC::CondCode arcCC = ISDCCtoARCCC(CC); 205 assert(LHS.getValueType() == MVT::i32 && "Only know how to BR_CC i32"); 206 return DAG.getNode(ARCISD::BRcc, dl, MVT::Other, Chain, Dest, LHS, RHS, 207 DAG.getConstant(arcCC, dl, MVT::i32)); 208 } 209 210 SDValue ARCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 211 auto *N = cast<JumpTableSDNode>(Op); 212 SDValue GA = DAG.getTargetJumpTable(N->getIndex(), MVT::i32); 213 return DAG.getNode(ARCISD::GAWRAPPER, SDLoc(N), MVT::i32, GA); 214 } 215 216 #include "ARCGenCallingConv.inc" 217 218 //===----------------------------------------------------------------------===// 219 // Call Calling Convention Implementation 220 //===----------------------------------------------------------------------===// 221 222 /// ARC call implementation 223 SDValue ARCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 224 SmallVectorImpl<SDValue> &InVals) const { 225 SelectionDAG &DAG = CLI.DAG; 226 SDLoc &dl = CLI.DL; 227 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 228 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 229 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 230 SDValue Chain = CLI.Chain; 231 SDValue Callee = CLI.Callee; 232 CallingConv::ID CallConv = CLI.CallConv; 233 bool IsVarArg = CLI.IsVarArg; 234 bool &IsTailCall = CLI.IsTailCall; 235 236 IsTailCall = false; // Do not support tail calls yet. 237 238 SmallVector<CCValAssign, 16> ArgLocs; 239 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 240 *DAG.getContext()); 241 242 CCInfo.AnalyzeCallOperands(Outs, CC_ARC); 243 244 SmallVector<CCValAssign, 16> RVLocs; 245 // Analyze return values to determine the number of bytes of stack required. 246 CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 247 *DAG.getContext()); 248 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); 249 RetCCInfo.AnalyzeCallResult(Ins, RetCC_ARC); 250 251 // Get a count of how many bytes are to be pushed on the stack. 252 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 253 auto PtrVT = getPointerTy(DAG.getDataLayout()); 254 255 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 256 257 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 258 SmallVector<SDValue, 12> MemOpChains; 259 260 SDValue StackPtr; 261 // Walk the register/memloc assignments, inserting copies/loads. 262 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 263 CCValAssign &VA = ArgLocs[i]; 264 SDValue Arg = OutVals[i]; 265 266 // Promote the value if needed. 267 switch (VA.getLocInfo()) { 268 default: 269 llvm_unreachable("Unknown loc info!"); 270 case CCValAssign::Full: 271 break; 272 case CCValAssign::SExt: 273 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 274 break; 275 case CCValAssign::ZExt: 276 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 277 break; 278 case CCValAssign::AExt: 279 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 280 break; 281 } 282 283 // Arguments that can be passed on register must be kept at 284 // RegsToPass vector 285 if (VA.isRegLoc()) { 286 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 287 } else { 288 assert(VA.isMemLoc() && "Must be register or memory argument."); 289 if (!StackPtr.getNode()) 290 StackPtr = DAG.getCopyFromReg(Chain, dl, ARC::SP, 291 getPointerTy(DAG.getDataLayout())); 292 // Calculate the stack position. 293 SDValue SOffset = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl); 294 SDValue PtrOff = DAG.getNode( 295 ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), StackPtr, SOffset); 296 297 SDValue Store = 298 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 299 MemOpChains.push_back(Store); 300 IsTailCall = false; 301 } 302 } 303 304 // Transform all store nodes into one single node because 305 // all store nodes are independent of each other. 306 if (!MemOpChains.empty()) 307 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 308 309 // Build a sequence of copy-to-reg nodes chained together with token 310 // chain and flag operands which copy the outgoing args into registers. 311 // The InFlag in necessary since all emitted instructions must be 312 // stuck together. 313 SDValue Glue; 314 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 315 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 316 RegsToPass[i].second, Glue); 317 Glue = Chain.getValue(1); 318 } 319 320 // If the callee is a GlobalAddress node (quite common, every direct call is) 321 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 322 // Likewise ExternalSymbol -> TargetExternalSymbol. 323 bool IsDirect = true; 324 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 325 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 326 else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 327 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 328 else 329 IsDirect = false; 330 // Branch + Link = #chain, #target_address, #opt_in_flags... 331 // = Chain, Callee, Reg#1, Reg#2, ... 332 // 333 // Returns a chain & a flag for retval copy to use. 334 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 335 SmallVector<SDValue, 8> Ops; 336 Ops.push_back(Chain); 337 Ops.push_back(Callee); 338 339 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 340 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 341 RegsToPass[i].second.getValueType())); 342 343 // Add a register mask operand representing the call-preserved registers. 344 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 345 const uint32_t *Mask = 346 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 347 assert(Mask && "Missing call preserved mask for calling convention"); 348 Ops.push_back(DAG.getRegisterMask(Mask)); 349 350 if (Glue.getNode()) 351 Ops.push_back(Glue); 352 353 Chain = DAG.getNode(IsDirect ? ARCISD::BL : ARCISD::JL, dl, NodeTys, Ops); 354 Glue = Chain.getValue(1); 355 356 // Create the CALLSEQ_END node. 357 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 358 DAG.getConstant(0, dl, PtrVT, true), Glue, dl); 359 Glue = Chain.getValue(1); 360 361 // Handle result values, copying them out of physregs into vregs that we 362 // return. 363 if (IsTailCall) 364 return Chain; 365 return lowerCallResult(Chain, Glue, RVLocs, dl, DAG, InVals); 366 } 367 368 /// Lower the result values of a call into the appropriate copies out of 369 /// physical registers / memory locations. 370 static SDValue lowerCallResult(SDValue Chain, SDValue Glue, 371 const SmallVectorImpl<CCValAssign> &RVLocs, 372 SDLoc dl, SelectionDAG &DAG, 373 SmallVectorImpl<SDValue> &InVals) { 374 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 375 // Copy results out of physical registers. 376 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 377 const CCValAssign &VA = RVLocs[i]; 378 if (VA.isRegLoc()) { 379 SDValue RetValue; 380 RetValue = 381 DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), Glue); 382 Chain = RetValue.getValue(1); 383 Glue = RetValue.getValue(2); 384 InVals.push_back(RetValue); 385 } else { 386 assert(VA.isMemLoc() && "Must be memory location."); 387 ResultMemLocs.push_back( 388 std::make_pair(VA.getLocMemOffset(), InVals.size())); 389 390 // Reserve space for this result. 391 InVals.push_back(SDValue()); 392 } 393 } 394 395 // Copy results out of memory. 396 SmallVector<SDValue, 4> MemOpChains; 397 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 398 int Offset = ResultMemLocs[i].first; 399 unsigned Index = ResultMemLocs[i].second; 400 SDValue StackPtr = DAG.getRegister(ARC::SP, MVT::i32); 401 SDValue SpLoc = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, 402 DAG.getConstant(Offset, dl, MVT::i32)); 403 SDValue Load = 404 DAG.getLoad(MVT::i32, dl, Chain, SpLoc, MachinePointerInfo()); 405 InVals[Index] = Load; 406 MemOpChains.push_back(Load.getValue(1)); 407 } 408 409 // Transform all loads nodes into one single node because 410 // all load nodes are independent of each other. 411 if (!MemOpChains.empty()) 412 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 413 414 return Chain; 415 } 416 417 //===----------------------------------------------------------------------===// 418 // Formal Arguments Calling Convention Implementation 419 //===----------------------------------------------------------------------===// 420 421 namespace { 422 423 struct ArgDataPair { 424 SDValue SDV; 425 ISD::ArgFlagsTy Flags; 426 }; 427 428 } // end anonymous namespace 429 430 /// ARC formal arguments implementation 431 SDValue ARCTargetLowering::LowerFormalArguments( 432 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 433 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 434 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 435 switch (CallConv) { 436 default: 437 llvm_unreachable("Unsupported calling convention"); 438 case CallingConv::C: 439 case CallingConv::Fast: 440 return LowerCallArguments(Chain, CallConv, IsVarArg, Ins, dl, DAG, InVals); 441 } 442 } 443 444 /// Transform physical registers into virtual registers, and generate load 445 /// operations for argument places on the stack. 446 SDValue ARCTargetLowering::LowerCallArguments( 447 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 448 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG, 449 SmallVectorImpl<SDValue> &InVals) const { 450 MachineFunction &MF = DAG.getMachineFunction(); 451 MachineFrameInfo &MFI = MF.getFrameInfo(); 452 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 453 auto *AFI = MF.getInfo<ARCFunctionInfo>(); 454 455 // Assign locations to all of the incoming arguments. 456 SmallVector<CCValAssign, 16> ArgLocs; 457 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 458 *DAG.getContext()); 459 460 CCInfo.AnalyzeFormalArguments(Ins, CC_ARC); 461 462 unsigned StackSlotSize = 4; 463 464 if (!IsVarArg) 465 AFI->setReturnStackOffset(CCInfo.getNextStackOffset()); 466 467 // All getCopyFromReg ops must precede any getMemcpys to prevent the 468 // scheduler clobbering a register before it has been copied. 469 // The stages are: 470 // 1. CopyFromReg (and load) arg & vararg registers. 471 // 2. Chain CopyFromReg nodes into a TokenFactor. 472 // 3. Memcpy 'byVal' args & push final InVals. 473 // 4. Chain mem ops nodes into a TokenFactor. 474 SmallVector<SDValue, 4> CFRegNode; 475 SmallVector<ArgDataPair, 4> ArgData; 476 SmallVector<SDValue, 4> MemOps; 477 478 // 1a. CopyFromReg (and load) arg registers. 479 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 480 CCValAssign &VA = ArgLocs[i]; 481 SDValue ArgIn; 482 483 if (VA.isRegLoc()) { 484 // Arguments passed in registers 485 EVT RegVT = VA.getLocVT(); 486 switch (RegVT.getSimpleVT().SimpleTy) { 487 default: { 488 LLVM_DEBUG(errs() << "LowerFormalArguments Unhandled argument type: " 489 << (unsigned)RegVT.getSimpleVT().SimpleTy << "\n"); 490 llvm_unreachable("Unhandled LowerFormalArguments type."); 491 } 492 case MVT::i32: 493 unsigned VReg = RegInfo.createVirtualRegister(&ARC::GPR32RegClass); 494 RegInfo.addLiveIn(VA.getLocReg(), VReg); 495 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 496 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 497 } 498 } else { 499 // sanity check 500 assert(VA.isMemLoc()); 501 // Load the argument to a virtual register 502 unsigned ObjSize = VA.getLocVT().getStoreSize(); 503 assert((ObjSize <= StackSlotSize) && "Unhandled argument"); 504 505 // Create the frame index object for this incoming parameter... 506 int FI = MFI.CreateFixedObject(ObjSize, VA.getLocMemOffset(), true); 507 508 // Create the SelectionDAG nodes corresponding to a load 509 // from this parameter 510 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 511 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 512 MachinePointerInfo::getFixedStack(MF, FI)); 513 } 514 const ArgDataPair ADP = {ArgIn, Ins[i].Flags}; 515 ArgData.push_back(ADP); 516 } 517 518 // 1b. CopyFromReg vararg registers. 519 if (IsVarArg) { 520 // Argument registers 521 static const MCPhysReg ArgRegs[] = {ARC::R0, ARC::R1, ARC::R2, ARC::R3, 522 ARC::R4, ARC::R5, ARC::R6, ARC::R7}; 523 auto *AFI = MF.getInfo<ARCFunctionInfo>(); 524 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 525 if (FirstVAReg < array_lengthof(ArgRegs)) { 526 int Offset = 0; 527 // Save remaining registers, storing higher register numbers at a higher 528 // address 529 // There are (array_lengthof(ArgRegs) - FirstVAReg) registers which 530 // need to be saved. 531 int VarFI = 532 MFI.CreateFixedObject((array_lengthof(ArgRegs) - FirstVAReg) * 4, 533 CCInfo.getNextStackOffset(), true); 534 AFI->setVarArgsFrameIndex(VarFI); 535 SDValue FIN = DAG.getFrameIndex(VarFI, MVT::i32); 536 for (unsigned i = FirstVAReg; i < array_lengthof(ArgRegs); i++) { 537 // Move argument from phys reg -> virt reg 538 unsigned VReg = RegInfo.createVirtualRegister(&ARC::GPR32RegClass); 539 RegInfo.addLiveIn(ArgRegs[i], VReg); 540 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 541 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 542 SDValue VAObj = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN, 543 DAG.getConstant(Offset, dl, MVT::i32)); 544 // Move argument from virt reg -> stack 545 SDValue Store = 546 DAG.getStore(Val.getValue(1), dl, Val, VAObj, MachinePointerInfo()); 547 MemOps.push_back(Store); 548 Offset += 4; 549 } 550 } else { 551 llvm_unreachable("Too many var args parameters."); 552 } 553 } 554 555 // 2. Chain CopyFromReg nodes into a TokenFactor. 556 if (!CFRegNode.empty()) 557 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 558 559 // 3. Memcpy 'byVal' args & push final InVals. 560 // Aggregates passed "byVal" need to be copied by the callee. 561 // The callee will use a pointer to this copy, rather than the original 562 // pointer. 563 for (const auto &ArgDI : ArgData) { 564 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) { 565 unsigned Size = ArgDI.Flags.getByValSize(); 566 unsigned Align = std::max(StackSlotSize, ArgDI.Flags.getByValAlign()); 567 // Create a new object on the stack and copy the pointee into it. 568 int FI = MFI.CreateStackObject(Size, Align, false); 569 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 570 InVals.push_back(FIN); 571 MemOps.push_back(DAG.getMemcpy( 572 Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32), Align, 573 false, false, false, MachinePointerInfo(), MachinePointerInfo())); 574 } else { 575 InVals.push_back(ArgDI.SDV); 576 } 577 } 578 579 // 4. Chain mem ops nodes into a TokenFactor. 580 if (!MemOps.empty()) { 581 MemOps.push_back(Chain); 582 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 583 } 584 585 return Chain; 586 } 587 588 //===----------------------------------------------------------------------===// 589 // Return Value Calling Convention Implementation 590 //===----------------------------------------------------------------------===// 591 592 bool ARCTargetLowering::CanLowerReturn( 593 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 594 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 595 SmallVector<CCValAssign, 16> RVLocs; 596 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 597 if (!CCInfo.CheckReturn(Outs, RetCC_ARC)) 598 return false; 599 if (CCInfo.getNextStackOffset() != 0 && IsVarArg) 600 return false; 601 return true; 602 } 603 604 SDValue 605 ARCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 606 bool IsVarArg, 607 const SmallVectorImpl<ISD::OutputArg> &Outs, 608 const SmallVectorImpl<SDValue> &OutVals, 609 const SDLoc &dl, SelectionDAG &DAG) const { 610 auto *AFI = DAG.getMachineFunction().getInfo<ARCFunctionInfo>(); 611 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 612 613 // CCValAssign - represent the assignment of 614 // the return value to a location 615 SmallVector<CCValAssign, 16> RVLocs; 616 617 // CCState - Info about the registers and stack slot. 618 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 619 *DAG.getContext()); 620 621 // Analyze return values. 622 if (!IsVarArg) 623 CCInfo.AllocateStack(AFI->getReturnStackOffset(), 4); 624 625 CCInfo.AnalyzeReturn(Outs, RetCC_ARC); 626 627 SDValue Flag; 628 SmallVector<SDValue, 4> RetOps(1, Chain); 629 SmallVector<SDValue, 4> MemOpChains; 630 // Handle return values that must be copied to memory. 631 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 632 CCValAssign &VA = RVLocs[i]; 633 if (VA.isRegLoc()) 634 continue; 635 assert(VA.isMemLoc()); 636 if (IsVarArg) { 637 report_fatal_error("Can't return value from vararg function in memory"); 638 } 639 640 int Offset = VA.getLocMemOffset(); 641 unsigned ObjSize = VA.getLocVT().getStoreSize(); 642 // Create the frame index object for the memory location. 643 int FI = MFI.CreateFixedObject(ObjSize, Offset, false); 644 645 // Create a SelectionDAG node corresponding to a store 646 // to this memory location. 647 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 648 MemOpChains.push_back(DAG.getStore( 649 Chain, dl, OutVals[i], FIN, 650 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 651 } 652 653 // Transform all store nodes into one single node because 654 // all stores are independent of each other. 655 if (!MemOpChains.empty()) 656 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 657 658 // Now handle return values copied to registers. 659 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 660 CCValAssign &VA = RVLocs[i]; 661 if (!VA.isRegLoc()) 662 continue; 663 // Copy the result values into the output registers. 664 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 665 666 // guarantee that all emitted copies are 667 // stuck together, avoiding something bad 668 Flag = Chain.getValue(1); 669 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 670 } 671 672 RetOps[0] = Chain; // Update chain. 673 674 // Add the flag if we have it. 675 if (Flag.getNode()) 676 RetOps.push_back(Flag); 677 678 // What to do with the RetOps? 679 return DAG.getNode(ARCISD::RET, dl, MVT::Other, RetOps); 680 } 681 682 //===----------------------------------------------------------------------===// 683 // Target Optimization Hooks 684 //===----------------------------------------------------------------------===// 685 686 SDValue ARCTargetLowering::PerformDAGCombine(SDNode *N, 687 DAGCombinerInfo &DCI) const { 688 return {}; 689 } 690 691 //===----------------------------------------------------------------------===// 692 // Addressing mode description hooks 693 //===----------------------------------------------------------------------===// 694 695 /// Return true if the addressing mode represented by AM is legal for this 696 /// target, for a load/store of the specified type. 697 bool ARCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 698 const AddrMode &AM, Type *Ty, 699 unsigned AS, 700 Instruction *I) const { 701 return AM.Scale == 0; 702 } 703 704 // Don't emit tail calls for the time being. 705 bool ARCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 706 return false; 707 } 708 709 SDValue ARCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 710 const ARCRegisterInfo &ARI = *Subtarget.getRegisterInfo(); 711 MachineFunction &MF = DAG.getMachineFunction(); 712 MachineFrameInfo &MFI = MF.getFrameInfo(); 713 MFI.setFrameAddressIsTaken(true); 714 715 EVT VT = Op.getValueType(); 716 SDLoc dl(Op); 717 assert(cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0 && 718 "Only support lowering frame addr of current frame."); 719 Register FrameReg = ARI.getFrameRegister(MF); 720 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 721 } 722 723 SDValue ARCTargetLowering::LowerGlobalAddress(SDValue Op, 724 SelectionDAG &DAG) const { 725 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 726 const GlobalValue *GV = GN->getGlobal(); 727 SDLoc dl(GN); 728 int64_t Offset = GN->getOffset(); 729 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, Offset); 730 return DAG.getNode(ARCISD::GAWRAPPER, dl, MVT::i32, GA); 731 } 732 733 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 734 MachineFunction &MF = DAG.getMachineFunction(); 735 auto *FuncInfo = MF.getInfo<ARCFunctionInfo>(); 736 737 // vastart just stores the address of the VarArgsFrameIndex slot into the 738 // memory location argument. 739 SDLoc dl(Op); 740 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 741 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 742 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 743 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 744 MachinePointerInfo(SV)); 745 } 746 747 SDValue ARCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 748 switch (Op.getOpcode()) { 749 case ISD::GlobalAddress: 750 return LowerGlobalAddress(Op, DAG); 751 case ISD::FRAMEADDR: 752 return LowerFRAMEADDR(Op, DAG); 753 case ISD::SELECT_CC: 754 return LowerSELECT_CC(Op, DAG); 755 case ISD::BR_CC: 756 return LowerBR_CC(Op, DAG); 757 case ISD::SIGN_EXTEND_INREG: 758 return LowerSIGN_EXTEND_INREG(Op, DAG); 759 case ISD::JumpTable: 760 return LowerJumpTable(Op, DAG); 761 case ISD::VASTART: 762 return LowerVASTART(Op, DAG); 763 default: 764 llvm_unreachable("unimplemented operand"); 765 } 766 } 767