1 //===-- CSKYISelLowering.cpp - CSKY DAG Lowering Implementation ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that CSKY uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CSKYISelLowering.h" 15 #include "CSKYCallingConv.h" 16 #include "CSKYConstantPoolValue.h" 17 #include "CSKYMachineFunctionInfo.h" 18 #include "CSKYRegisterInfo.h" 19 #include "CSKYSubtarget.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/Support/Debug.h" 25 26 using namespace llvm; 27 28 #define DEBUG_TYPE "csky-isel-lowering" 29 30 STATISTIC(NumTailCalls, "Number of tail calls"); 31 32 #include "CSKYGenCallingConv.inc" 33 34 static const MCPhysReg GPRArgRegs[] = {CSKY::R0, CSKY::R1, CSKY::R2, CSKY::R3}; 35 36 CSKYTargetLowering::CSKYTargetLowering(const TargetMachine &TM, 37 const CSKYSubtarget &STI) 38 : TargetLowering(TM), Subtarget(STI) { 39 // Register Class 40 addRegisterClass(MVT::i32, &CSKY::GPRRegClass); 41 42 if (STI.useHardFloat()) { 43 if (STI.hasFPUv2SingleFloat()) 44 addRegisterClass(MVT::f32, &CSKY::sFPR32RegClass); 45 else if (STI.hasFPUv3SingleFloat()) 46 addRegisterClass(MVT::f32, &CSKY::FPR32RegClass); 47 48 if (STI.hasFPUv2DoubleFloat()) 49 addRegisterClass(MVT::f64, &CSKY::sFPR64RegClass); 50 else if (STI.hasFPUv3DoubleFloat()) 51 addRegisterClass(MVT::f64, &CSKY::FPR64RegClass); 52 } 53 54 setOperationAction(ISD::UADDO_CARRY, MVT::i32, Legal); 55 setOperationAction(ISD::USUBO_CARRY, MVT::i32, Legal); 56 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 57 58 setOperationAction(ISD::SREM, MVT::i32, Expand); 59 setOperationAction(ISD::UREM, MVT::i32, Expand); 60 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 61 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 62 setOperationAction(ISD::CTTZ, MVT::i32, Expand); 63 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 64 setOperationAction(ISD::ROTR, MVT::i32, Expand); 65 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 66 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 67 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 68 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 69 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 70 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 71 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 72 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 73 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 74 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 75 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 76 setOperationAction(ISD::MULHS, MVT::i32, Expand); 77 setOperationAction(ISD::MULHU, MVT::i32, Expand); 78 setOperationAction(ISD::VAARG, MVT::Other, Expand); 79 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 80 setOperationAction(ISD::VAEND, MVT::Other, Expand); 81 82 setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i1, Promote); 83 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i1, Promote); 84 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i1, Promote); 85 86 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 87 setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom); 88 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 89 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 90 if (!Subtarget.hasE2()) { 91 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 92 } 93 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 94 setOperationAction(ISD::VASTART, MVT::Other, Custom); 95 96 if (!Subtarget.hasE2()) { 97 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i8, Expand); 98 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i16, Expand); 99 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 100 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 101 } 102 103 if (!Subtarget.has2E3()) { 104 setOperationAction(ISD::ABS, MVT::i32, Expand); 105 setOperationAction(ISD::BITREVERSE, MVT::i32, Expand); 106 setOperationAction(ISD::SDIV, MVT::i32, Expand); 107 setOperationAction(ISD::UDIV, MVT::i32, Expand); 108 } 109 110 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 111 112 // Float 113 114 ISD::CondCode FPCCToExtend[] = { 115 ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 116 ISD::SETUGE, ISD::SETULT, ISD::SETULE, 117 }; 118 119 ISD::NodeType FPOpToExpand[] = { 120 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, 121 ISD::FREM, ISD::FCOPYSIGN, ISD::FP16_TO_FP, ISD::FP_TO_FP16}; 122 123 if (STI.useHardFloat()) { 124 125 MVT AllVTy[] = {MVT::f32, MVT::f64}; 126 127 for (auto VT : AllVTy) { 128 setOperationAction(ISD::FREM, VT, Expand); 129 setOperationAction(ISD::SELECT_CC, VT, Expand); 130 setOperationAction(ISD::BR_CC, VT, Expand); 131 132 for (auto CC : FPCCToExtend) 133 setCondCodeAction(CC, VT, Expand); 134 for (auto Op : FPOpToExpand) 135 setOperationAction(Op, VT, Expand); 136 } 137 138 if (STI.hasFPUv2SingleFloat() || STI.hasFPUv3SingleFloat()) { 139 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 140 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 141 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 142 } 143 if (STI.hasFPUv2DoubleFloat() || STI.hasFPUv3DoubleFloat()) { 144 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 145 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 146 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 147 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 148 } 149 } 150 151 // Compute derived properties from the register classes. 152 computeRegisterProperties(STI.getRegisterInfo()); 153 154 setBooleanContents(UndefinedBooleanContent); 155 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 156 157 // TODO: Add atomic support fully. 158 setMaxAtomicSizeInBitsSupported(0); 159 160 setStackPointerRegisterToSaveRestore(CSKY::R14); 161 setMinFunctionAlignment(Align(2)); 162 setSchedulingPreference(Sched::Source); 163 } 164 165 SDValue CSKYTargetLowering::LowerOperation(SDValue Op, 166 SelectionDAG &DAG) const { 167 switch (Op.getOpcode()) { 168 default: 169 llvm_unreachable("unimplemented op"); 170 case ISD::GlobalAddress: 171 return LowerGlobalAddress(Op, DAG); 172 case ISD::ExternalSymbol: 173 return LowerExternalSymbol(Op, DAG); 174 case ISD::GlobalTLSAddress: 175 return LowerGlobalTLSAddress(Op, DAG); 176 case ISD::JumpTable: 177 return LowerJumpTable(Op, DAG); 178 case ISD::BlockAddress: 179 return LowerBlockAddress(Op, DAG); 180 case ISD::ConstantPool: 181 return LowerConstantPool(Op, DAG); 182 case ISD::VASTART: 183 return LowerVASTART(Op, DAG); 184 case ISD::FRAMEADDR: 185 return LowerFRAMEADDR(Op, DAG); 186 case ISD::RETURNADDR: 187 return LowerRETURNADDR(Op, DAG); 188 } 189 } 190 191 EVT CSKYTargetLowering::getSetCCResultType(const DataLayout &DL, 192 LLVMContext &Context, EVT VT) const { 193 if (!VT.isVector()) 194 return MVT::i32; 195 196 return VT.changeVectorElementTypeToInteger(); 197 } 198 199 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 200 const CCValAssign &VA, const SDLoc &DL) { 201 EVT LocVT = VA.getLocVT(); 202 203 switch (VA.getLocInfo()) { 204 default: 205 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 206 case CCValAssign::Full: 207 break; 208 case CCValAssign::BCvt: 209 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 210 break; 211 } 212 return Val; 213 } 214 215 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 216 const CCValAssign &VA, const SDLoc &DL) { 217 switch (VA.getLocInfo()) { 218 default: 219 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 220 case CCValAssign::Full: 221 break; 222 case CCValAssign::BCvt: 223 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 224 break; 225 } 226 return Val; 227 } 228 229 static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, 230 SelectionDAG &DAG, SDValue Chain, 231 const CCValAssign &VA, const SDLoc &DL) { 232 MachineFunction &MF = DAG.getMachineFunction(); 233 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 234 EVT LocVT = VA.getLocVT(); 235 SDValue Val; 236 const TargetRegisterClass *RC; 237 238 switch (LocVT.getSimpleVT().SimpleTy) { 239 default: 240 llvm_unreachable("Unexpected register type"); 241 case MVT::i32: 242 RC = &CSKY::GPRRegClass; 243 break; 244 case MVT::f32: 245 RC = Subtarget.hasFPUv2SingleFloat() ? &CSKY::sFPR32RegClass 246 : &CSKY::FPR32RegClass; 247 break; 248 case MVT::f64: 249 RC = Subtarget.hasFPUv2DoubleFloat() ? &CSKY::sFPR64RegClass 250 : &CSKY::FPR64RegClass; 251 break; 252 } 253 254 Register VReg = RegInfo.createVirtualRegister(RC); 255 RegInfo.addLiveIn(VA.getLocReg(), VReg); 256 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 257 258 return convertLocVTToValVT(DAG, Val, VA, DL); 259 } 260 261 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 262 const CCValAssign &VA, const SDLoc &DL) { 263 MachineFunction &MF = DAG.getMachineFunction(); 264 MachineFrameInfo &MFI = MF.getFrameInfo(); 265 EVT LocVT = VA.getLocVT(); 266 EVT ValVT = VA.getValVT(); 267 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 268 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 269 VA.getLocMemOffset(), /*Immutable=*/true); 270 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 271 SDValue Val; 272 273 ISD::LoadExtType ExtType; 274 switch (VA.getLocInfo()) { 275 default: 276 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 277 case CCValAssign::Full: 278 case CCValAssign::BCvt: 279 ExtType = ISD::NON_EXTLOAD; 280 break; 281 } 282 Val = DAG.getExtLoad( 283 ExtType, DL, LocVT, Chain, FIN, 284 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 285 return Val; 286 } 287 288 static SDValue unpack64(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, 289 const SDLoc &DL) { 290 assert(VA.getLocVT() == MVT::i32 && 291 (VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::i64) && 292 "Unexpected VA"); 293 MachineFunction &MF = DAG.getMachineFunction(); 294 MachineFrameInfo &MFI = MF.getFrameInfo(); 295 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 296 297 if (VA.isMemLoc()) { 298 // f64/i64 is passed on the stack. 299 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 300 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 301 return DAG.getLoad(VA.getValVT(), DL, Chain, FIN, 302 MachinePointerInfo::getFixedStack(MF, FI)); 303 } 304 305 assert(VA.isRegLoc() && "Expected register VA assignment"); 306 307 Register LoVReg = RegInfo.createVirtualRegister(&CSKY::GPRRegClass); 308 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 309 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 310 SDValue Hi; 311 if (VA.getLocReg() == CSKY::R3) { 312 // Second half of f64/i64 is passed on the stack. 313 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 314 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 315 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 316 MachinePointerInfo::getFixedStack(MF, FI)); 317 } else { 318 // Second half of f64/i64 is passed in another GPR. 319 Register HiVReg = RegInfo.createVirtualRegister(&CSKY::GPRRegClass); 320 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 321 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 322 } 323 return DAG.getNode(CSKYISD::BITCAST_FROM_LOHI, DL, VA.getValVT(), Lo, Hi); 324 } 325 326 // Transform physical registers into virtual registers. 327 SDValue CSKYTargetLowering::LowerFormalArguments( 328 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 329 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 330 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 331 332 switch (CallConv) { 333 default: 334 report_fatal_error("Unsupported calling convention"); 335 case CallingConv::C: 336 case CallingConv::Fast: 337 break; 338 } 339 340 MachineFunction &MF = DAG.getMachineFunction(); 341 342 // Used with vargs to acumulate store chains. 343 std::vector<SDValue> OutChains; 344 345 // Assign locations to all of the incoming arguments. 346 SmallVector<CCValAssign, 16> ArgLocs; 347 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 348 349 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, IsVarArg)); 350 351 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 352 CCValAssign &VA = ArgLocs[i]; 353 SDValue ArgValue; 354 355 bool IsF64OnCSKY = VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 356 357 if (IsF64OnCSKY) 358 ArgValue = unpack64(DAG, Chain, VA, DL); 359 else if (VA.isRegLoc()) 360 ArgValue = unpackFromRegLoc(Subtarget, DAG, Chain, VA, DL); 361 else 362 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 363 364 InVals.push_back(ArgValue); 365 } 366 367 if (IsVarArg) { 368 const unsigned XLenInBytes = 4; 369 const MVT XLenVT = MVT::i32; 370 371 ArrayRef<MCPhysReg> ArgRegs = ArrayRef(GPRArgRegs); 372 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 373 const TargetRegisterClass *RC = &CSKY::GPRRegClass; 374 MachineFrameInfo &MFI = MF.getFrameInfo(); 375 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 376 CSKYMachineFunctionInfo *CSKYFI = MF.getInfo<CSKYMachineFunctionInfo>(); 377 378 // Offset of the first variable argument from stack pointer, and size of 379 // the vararg save area. For now, the varargs save area is either zero or 380 // large enough to hold a0-a4. 381 int VaArgOffset, VarArgsSaveSize; 382 383 // If all registers are allocated, then all varargs must be passed on the 384 // stack and we don't need to save any argregs. 385 if (ArgRegs.size() == Idx) { 386 VaArgOffset = CCInfo.getStackSize(); 387 VarArgsSaveSize = 0; 388 } else { 389 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 390 VaArgOffset = -VarArgsSaveSize; 391 } 392 393 // Record the frame index of the first variable argument 394 // which is a value necessary to VASTART. 395 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 396 CSKYFI->setVarArgsFrameIndex(FI); 397 398 // Copy the integer registers that may have been used for passing varargs 399 // to the vararg save area. 400 for (unsigned I = Idx; I < ArgRegs.size(); 401 ++I, VaArgOffset += XLenInBytes) { 402 const Register Reg = RegInfo.createVirtualRegister(RC); 403 RegInfo.addLiveIn(ArgRegs[I], Reg); 404 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 405 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 406 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 407 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 408 MachinePointerInfo::getFixedStack(MF, FI)); 409 cast<StoreSDNode>(Store.getNode()) 410 ->getMemOperand() 411 ->setValue((Value *)nullptr); 412 OutChains.push_back(Store); 413 } 414 CSKYFI->setVarArgsSaveSize(VarArgsSaveSize); 415 } 416 417 // All stores are grouped in one node to allow the matching between 418 // the size of Ins and InVals. This only happens for vararg functions. 419 if (!OutChains.empty()) { 420 OutChains.push_back(Chain); 421 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 422 } 423 424 return Chain; 425 } 426 427 bool CSKYTargetLowering::CanLowerReturn( 428 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 429 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 430 SmallVector<CCValAssign, 16> CSKYLocs; 431 CCState CCInfo(CallConv, IsVarArg, MF, CSKYLocs, Context); 432 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 433 } 434 435 SDValue 436 CSKYTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 437 bool IsVarArg, 438 const SmallVectorImpl<ISD::OutputArg> &Outs, 439 const SmallVectorImpl<SDValue> &OutVals, 440 const SDLoc &DL, SelectionDAG &DAG) const { 441 // Stores the assignment of the return value to a location. 442 SmallVector<CCValAssign, 16> CSKYLocs; 443 444 // Info about the registers and stack slot. 445 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), CSKYLocs, 446 *DAG.getContext()); 447 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 448 449 SDValue Glue; 450 SmallVector<SDValue, 4> RetOps(1, Chain); 451 452 // Copy the result values into the output registers. 453 for (unsigned i = 0, e = CSKYLocs.size(); i < e; ++i) { 454 SDValue Val = OutVals[i]; 455 CCValAssign &VA = CSKYLocs[i]; 456 assert(VA.isRegLoc() && "Can only return in registers!"); 457 458 bool IsF64OnCSKY = VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 459 460 if (IsF64OnCSKY) { 461 462 assert(VA.isRegLoc() && "Expected return via registers"); 463 SDValue Split64 = DAG.getNode(CSKYISD::BITCAST_TO_LOHI, DL, 464 DAG.getVTList(MVT::i32, MVT::i32), Val); 465 SDValue Lo = Split64.getValue(0); 466 SDValue Hi = Split64.getValue(1); 467 468 Register RegLo = VA.getLocReg(); 469 assert(RegLo < CSKY::R31 && "Invalid register pair"); 470 Register RegHi = RegLo + 1; 471 472 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 473 Glue = Chain.getValue(1); 474 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 475 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 476 Glue = Chain.getValue(1); 477 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 478 } else { 479 // Handle a 'normal' return. 480 Val = convertValVTToLocVT(DAG, Val, VA, DL); 481 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 482 483 // Guarantee that all emitted copies are stuck together. 484 Glue = Chain.getValue(1); 485 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 486 } 487 } 488 489 RetOps[0] = Chain; // Update chain. 490 491 // Add the glue node if we have it. 492 if (Glue.getNode()) { 493 RetOps.push_back(Glue); 494 } 495 496 // Interrupt service routines use different return instructions. 497 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt")) 498 return DAG.getNode(CSKYISD::NIR, DL, MVT::Other, RetOps); 499 500 return DAG.getNode(CSKYISD::RET, DL, MVT::Other, RetOps); 501 } 502 503 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 504 // and output parameter nodes. 505 SDValue CSKYTargetLowering::LowerCall(CallLoweringInfo &CLI, 506 SmallVectorImpl<SDValue> &InVals) const { 507 SelectionDAG &DAG = CLI.DAG; 508 SDLoc &DL = CLI.DL; 509 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 510 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 511 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 512 SDValue Chain = CLI.Chain; 513 SDValue Callee = CLI.Callee; 514 bool &IsTailCall = CLI.IsTailCall; 515 CallingConv::ID CallConv = CLI.CallConv; 516 bool IsVarArg = CLI.IsVarArg; 517 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 518 MVT XLenVT = MVT::i32; 519 520 MachineFunction &MF = DAG.getMachineFunction(); 521 522 // Analyze the operands of the call, assigning locations to each operand. 523 SmallVector<CCValAssign, 16> ArgLocs; 524 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 525 526 ArgCCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, IsVarArg)); 527 528 // Check if it's really possible to do a tail call. 529 if (IsTailCall) 530 IsTailCall = false; // TODO: TailCallOptimization; 531 532 if (IsTailCall) 533 ++NumTailCalls; 534 else if (CLI.CB && CLI.CB->isMustTailCall()) 535 report_fatal_error("failed to perform tail call elimination on a call " 536 "site marked musttail"); 537 538 // Get a count of how many bytes are to be pushed on the stack. 539 unsigned NumBytes = ArgCCInfo.getStackSize(); 540 541 // Create local copies for byval args 542 SmallVector<SDValue, 8> ByValArgs; 543 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 544 ISD::ArgFlagsTy Flags = Outs[i].Flags; 545 if (!Flags.isByVal()) 546 continue; 547 548 SDValue Arg = OutVals[i]; 549 unsigned Size = Flags.getByValSize(); 550 Align Alignment = Flags.getNonZeroByValAlign(); 551 552 int FI = 553 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 554 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 555 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 556 557 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 558 /*IsVolatile=*/false, 559 /*AlwaysInline=*/false, IsTailCall, 560 MachinePointerInfo(), MachinePointerInfo()); 561 ByValArgs.push_back(FIPtr); 562 } 563 564 if (!IsTailCall) 565 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 566 567 // Copy argument values to their designated locations. 568 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 569 SmallVector<SDValue, 8> MemOpChains; 570 SDValue StackPtr; 571 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 572 CCValAssign &VA = ArgLocs[i]; 573 SDValue ArgValue = OutVals[i]; 574 ISD::ArgFlagsTy Flags = Outs[i].Flags; 575 576 bool IsF64OnCSKY = VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 577 578 if (IsF64OnCSKY && VA.isRegLoc()) { 579 SDValue Split64 = 580 DAG.getNode(CSKYISD::BITCAST_TO_LOHI, DL, 581 DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 582 SDValue Lo = Split64.getValue(0); 583 SDValue Hi = Split64.getValue(1); 584 585 Register RegLo = VA.getLocReg(); 586 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 587 588 if (RegLo == CSKY::R3) { 589 // Second half of f64/i64 is passed on the stack. 590 // Work out the address of the stack slot. 591 if (!StackPtr.getNode()) 592 StackPtr = DAG.getCopyFromReg(Chain, DL, CSKY::R14, PtrVT); 593 // Emit the store. 594 MemOpChains.push_back( 595 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 596 } else { 597 // Second half of f64/i64 is passed in another GPR. 598 assert(RegLo < CSKY::R31 && "Invalid register pair"); 599 Register RegHigh = RegLo + 1; 600 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 601 } 602 continue; 603 } 604 605 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 606 607 // Use local copy if it is a byval arg. 608 if (Flags.isByVal()) 609 ArgValue = ByValArgs[j++]; 610 611 if (VA.isRegLoc()) { 612 // Queue up the argument copies and emit them at the end. 613 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 614 } else { 615 assert(VA.isMemLoc() && "Argument not register or memory"); 616 assert(!IsTailCall && "Tail call not allowed if stack is used " 617 "for passing parameters"); 618 619 // Work out the address of the stack slot. 620 if (!StackPtr.getNode()) 621 StackPtr = DAG.getCopyFromReg(Chain, DL, CSKY::R14, PtrVT); 622 SDValue Address = 623 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 624 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 625 626 // Emit the store. 627 MemOpChains.push_back( 628 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 629 } 630 } 631 632 // Join the stores, which are independent of one another. 633 if (!MemOpChains.empty()) 634 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 635 636 SDValue Glue; 637 638 // Build a sequence of copy-to-reg nodes, chained and glued together. 639 for (auto &Reg : RegsToPass) { 640 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 641 Glue = Chain.getValue(1); 642 } 643 644 SmallVector<SDValue, 8> Ops; 645 EVT Ty = getPointerTy(DAG.getDataLayout()); 646 bool IsRegCall = false; 647 648 Ops.push_back(Chain); 649 650 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 651 const GlobalValue *GV = S->getGlobal(); 652 bool IsLocal = 653 getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 654 655 if (isPositionIndependent() || !Subtarget.has2E3()) { 656 IsRegCall = true; 657 Ops.push_back(getAddr<GlobalAddressSDNode, true>(S, DAG, IsLocal)); 658 } else { 659 Ops.push_back(getTargetNode(cast<GlobalAddressSDNode>(Callee), DL, Ty, 660 DAG, CSKYII::MO_None)); 661 Ops.push_back(getTargetConstantPoolValue( 662 cast<GlobalAddressSDNode>(Callee), Ty, DAG, CSKYII::MO_None)); 663 } 664 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 665 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal( 666 *MF.getFunction().getParent(), nullptr); 667 668 if (isPositionIndependent() || !Subtarget.has2E3()) { 669 IsRegCall = true; 670 Ops.push_back(getAddr<ExternalSymbolSDNode, true>(S, DAG, IsLocal)); 671 } else { 672 Ops.push_back(getTargetNode(cast<ExternalSymbolSDNode>(Callee), DL, Ty, 673 DAG, CSKYII::MO_None)); 674 Ops.push_back(getTargetConstantPoolValue( 675 cast<ExternalSymbolSDNode>(Callee), Ty, DAG, CSKYII::MO_None)); 676 } 677 } else { 678 IsRegCall = true; 679 Ops.push_back(Callee); 680 } 681 682 // Add argument registers to the end of the list so that they are 683 // known live into the call. 684 for (auto &Reg : RegsToPass) 685 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 686 687 if (!IsTailCall) { 688 // Add a register mask operand representing the call-preserved registers. 689 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 690 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 691 assert(Mask && "Missing call preserved mask for calling convention"); 692 Ops.push_back(DAG.getRegisterMask(Mask)); 693 } 694 695 // Glue the call to the argument copies, if any. 696 if (Glue.getNode()) 697 Ops.push_back(Glue); 698 699 // Emit the call. 700 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 701 702 if (IsTailCall) { 703 MF.getFrameInfo().setHasTailCall(); 704 return DAG.getNode(IsRegCall ? CSKYISD::TAILReg : CSKYISD::TAIL, DL, 705 NodeTys, Ops); 706 } 707 708 Chain = DAG.getNode(IsRegCall ? CSKYISD::CALLReg : CSKYISD::CALL, DL, NodeTys, 709 Ops); 710 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 711 Glue = Chain.getValue(1); 712 713 // Mark the end of the call, which is glued to the call itself. 714 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, DL); 715 Glue = Chain.getValue(1); 716 717 // Assign locations to each value returned by this call. 718 SmallVector<CCValAssign, 16> CSKYLocs; 719 CCState RetCCInfo(CallConv, IsVarArg, MF, CSKYLocs, *DAG.getContext()); 720 RetCCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, IsVarArg)); 721 722 // Copy all of the result registers out of their specified physreg. 723 for (auto &VA : CSKYLocs) { 724 // Copy the value out 725 SDValue RetValue = 726 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 727 // Glue the RetValue to the end of the call sequence 728 Chain = RetValue.getValue(1); 729 Glue = RetValue.getValue(2); 730 731 bool IsF64OnCSKY = VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 732 733 if (IsF64OnCSKY) { 734 assert(VA.getLocReg() == GPRArgRegs[0] && "Unexpected reg assignment"); 735 SDValue RetValue2 = 736 DAG.getCopyFromReg(Chain, DL, GPRArgRegs[1], MVT::i32, Glue); 737 Chain = RetValue2.getValue(1); 738 Glue = RetValue2.getValue(2); 739 RetValue = DAG.getNode(CSKYISD::BITCAST_FROM_LOHI, DL, VA.getValVT(), 740 RetValue, RetValue2); 741 } 742 743 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 744 745 InVals.push_back(RetValue); 746 } 747 748 return Chain; 749 } 750 751 CCAssignFn *CSKYTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 752 bool IsVarArg) const { 753 if (IsVarArg || !Subtarget.useHardFloatABI()) 754 return RetCC_CSKY_ABIV2_SOFT; 755 else 756 return RetCC_CSKY_ABIV2_FP; 757 } 758 759 CCAssignFn *CSKYTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 760 bool IsVarArg) const { 761 if (IsVarArg || !Subtarget.useHardFloatABI()) 762 return CC_CSKY_ABIV2_SOFT; 763 else 764 return CC_CSKY_ABIV2_FP; 765 } 766 767 static CSKYCP::CSKYCPModifier getModifier(unsigned Flags) { 768 769 if (Flags == CSKYII::MO_ADDR32) 770 return CSKYCP::ADDR; 771 else if (Flags == CSKYII::MO_GOT32) 772 return CSKYCP::GOT; 773 else if (Flags == CSKYII::MO_GOTOFF) 774 return CSKYCP::GOTOFF; 775 else if (Flags == CSKYII::MO_PLT32) 776 return CSKYCP::PLT; 777 else if (Flags == CSKYII::MO_None) 778 return CSKYCP::NO_MOD; 779 else 780 assert(0 && "unknown CSKYII Modifier"); 781 return CSKYCP::NO_MOD; 782 } 783 784 SDValue CSKYTargetLowering::getTargetConstantPoolValue(GlobalAddressSDNode *N, 785 EVT Ty, 786 SelectionDAG &DAG, 787 unsigned Flags) const { 788 CSKYConstantPoolValue *CPV = CSKYConstantPoolConstant::Create( 789 N->getGlobal(), CSKYCP::CPValue, 0, getModifier(Flags), false); 790 791 return DAG.getTargetConstantPool(CPV, Ty); 792 } 793 794 CSKYTargetLowering::ConstraintType 795 CSKYTargetLowering::getConstraintType(StringRef Constraint) const { 796 if (Constraint.size() == 1) { 797 switch (Constraint[0]) { 798 default: 799 break; 800 case 'a': 801 case 'b': 802 case 'v': 803 case 'w': 804 case 'y': 805 return C_RegisterClass; 806 case 'c': 807 case 'l': 808 case 'h': 809 case 'z': 810 return C_Register; 811 } 812 } 813 return TargetLowering::getConstraintType(Constraint); 814 } 815 816 std::pair<unsigned, const TargetRegisterClass *> 817 CSKYTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 818 StringRef Constraint, 819 MVT VT) const { 820 if (Constraint.size() == 1) { 821 switch (Constraint[0]) { 822 case 'r': 823 return std::make_pair(0U, &CSKY::GPRRegClass); 824 case 'a': 825 return std::make_pair(0U, &CSKY::mGPRRegClass); 826 case 'b': 827 return std::make_pair(0U, &CSKY::sGPRRegClass); 828 case 'z': 829 return std::make_pair(CSKY::R14, &CSKY::GPRRegClass); 830 case 'c': 831 return std::make_pair(CSKY::C, &CSKY::CARRYRegClass); 832 case 'w': 833 if ((Subtarget.hasFPUv2SingleFloat() || 834 Subtarget.hasFPUv3SingleFloat()) && 835 VT == MVT::f32) 836 return std::make_pair(0U, &CSKY::sFPR32RegClass); 837 if ((Subtarget.hasFPUv2DoubleFloat() || 838 Subtarget.hasFPUv3DoubleFloat()) && 839 VT == MVT::f64) 840 return std::make_pair(0U, &CSKY::sFPR64RegClass); 841 break; 842 case 'v': 843 if (Subtarget.hasFPUv2SingleFloat() && VT == MVT::f32) 844 return std::make_pair(0U, &CSKY::sFPR32RegClass); 845 if (Subtarget.hasFPUv3SingleFloat() && VT == MVT::f32) 846 return std::make_pair(0U, &CSKY::FPR32RegClass); 847 if (Subtarget.hasFPUv2DoubleFloat() && VT == MVT::f64) 848 return std::make_pair(0U, &CSKY::sFPR64RegClass); 849 if (Subtarget.hasFPUv3DoubleFloat() && VT == MVT::f64) 850 return std::make_pair(0U, &CSKY::FPR64RegClass); 851 break; 852 default: 853 break; 854 } 855 } 856 857 if (Constraint == "{c}") 858 return std::make_pair(CSKY::C, &CSKY::CARRYRegClass); 859 860 // Clang will correctly decode the usage of register name aliases into their 861 // official names. However, other frontends like `rustc` do not. This allows 862 // users of these frontends to use the ABI names for registers in LLVM-style 863 // register constraints. 864 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 865 .Case("{a0}", CSKY::R0) 866 .Case("{a1}", CSKY::R1) 867 .Case("{a2}", CSKY::R2) 868 .Case("{a3}", CSKY::R3) 869 .Case("{l0}", CSKY::R4) 870 .Case("{l1}", CSKY::R5) 871 .Case("{l2}", CSKY::R6) 872 .Case("{l3}", CSKY::R7) 873 .Case("{l4}", CSKY::R8) 874 .Case("{l5}", CSKY::R9) 875 .Case("{l6}", CSKY::R10) 876 .Case("{l7}", CSKY::R11) 877 .Case("{t0}", CSKY::R12) 878 .Case("{t1}", CSKY::R13) 879 .Case("{sp}", CSKY::R14) 880 .Case("{lr}", CSKY::R15) 881 .Case("{l8}", CSKY::R16) 882 .Case("{l9}", CSKY::R17) 883 .Case("{t2}", CSKY::R18) 884 .Case("{t3}", CSKY::R19) 885 .Case("{t4}", CSKY::R20) 886 .Case("{t5}", CSKY::R21) 887 .Case("{t6}", CSKY::R22) 888 .Cases("{t7}", "{fp}", CSKY::R23) 889 .Cases("{t8}", "{top}", CSKY::R24) 890 .Cases("{t9}", "{bsp}", CSKY::R25) 891 .Case("{r26}", CSKY::R26) 892 .Case("{r27}", CSKY::R27) 893 .Cases("{gb}", "{rgb}", "{rdb}", CSKY::R28) 894 .Cases("{tb}", "{rtb}", CSKY::R29) 895 .Case("{svbr}", CSKY::R30) 896 .Case("{tls}", CSKY::R31) 897 .Default(CSKY::NoRegister); 898 899 if (XRegFromAlias != CSKY::NoRegister) 900 return std::make_pair(XRegFromAlias, &CSKY::GPRRegClass); 901 902 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 903 // TableGen record rather than the AsmName to choose registers for InlineAsm 904 // constraints, plus we want to match those names to the widest floating point 905 // register type available, manually select floating point registers here. 906 // 907 // The second case is the ABI name of the register, so that frontends can also 908 // use the ABI names in register constraint lists. 909 if (Subtarget.useHardFloat()) { 910 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 911 .Cases("{fr0}", "{vr0}", CSKY::F0_32) 912 .Cases("{fr1}", "{vr1}", CSKY::F1_32) 913 .Cases("{fr2}", "{vr2}", CSKY::F2_32) 914 .Cases("{fr3}", "{vr3}", CSKY::F3_32) 915 .Cases("{fr4}", "{vr4}", CSKY::F4_32) 916 .Cases("{fr5}", "{vr5}", CSKY::F5_32) 917 .Cases("{fr6}", "{vr6}", CSKY::F6_32) 918 .Cases("{fr7}", "{vr7}", CSKY::F7_32) 919 .Cases("{fr8}", "{vr8}", CSKY::F8_32) 920 .Cases("{fr9}", "{vr9}", CSKY::F9_32) 921 .Cases("{fr10}", "{vr10}", CSKY::F10_32) 922 .Cases("{fr11}", "{vr11}", CSKY::F11_32) 923 .Cases("{fr12}", "{vr12}", CSKY::F12_32) 924 .Cases("{fr13}", "{vr13}", CSKY::F13_32) 925 .Cases("{fr14}", "{vr14}", CSKY::F14_32) 926 .Cases("{fr15}", "{vr15}", CSKY::F15_32) 927 .Cases("{fr16}", "{vr16}", CSKY::F16_32) 928 .Cases("{fr17}", "{vr17}", CSKY::F17_32) 929 .Cases("{fr18}", "{vr18}", CSKY::F18_32) 930 .Cases("{fr19}", "{vr19}", CSKY::F19_32) 931 .Cases("{fr20}", "{vr20}", CSKY::F20_32) 932 .Cases("{fr21}", "{vr21}", CSKY::F21_32) 933 .Cases("{fr22}", "{vr22}", CSKY::F22_32) 934 .Cases("{fr23}", "{vr23}", CSKY::F23_32) 935 .Cases("{fr24}", "{vr24}", CSKY::F24_32) 936 .Cases("{fr25}", "{vr25}", CSKY::F25_32) 937 .Cases("{fr26}", "{vr26}", CSKY::F26_32) 938 .Cases("{fr27}", "{vr27}", CSKY::F27_32) 939 .Cases("{fr28}", "{vr28}", CSKY::F28_32) 940 .Cases("{fr29}", "{vr29}", CSKY::F29_32) 941 .Cases("{fr30}", "{vr30}", CSKY::F30_32) 942 .Cases("{fr31}", "{vr31}", CSKY::F31_32) 943 .Default(CSKY::NoRegister); 944 if (FReg != CSKY::NoRegister) { 945 assert(CSKY::F0_32 <= FReg && FReg <= CSKY::F31_32 && "Unknown fp-reg"); 946 unsigned RegNo = FReg - CSKY::F0_32; 947 unsigned DReg = CSKY::F0_64 + RegNo; 948 949 if (Subtarget.hasFPUv2DoubleFloat()) 950 return std::make_pair(DReg, &CSKY::sFPR64RegClass); 951 else if (Subtarget.hasFPUv3DoubleFloat()) 952 return std::make_pair(DReg, &CSKY::FPR64RegClass); 953 else if (Subtarget.hasFPUv2SingleFloat()) 954 return std::make_pair(FReg, &CSKY::sFPR32RegClass); 955 else if (Subtarget.hasFPUv3SingleFloat()) 956 return std::make_pair(FReg, &CSKY::FPR32RegClass); 957 } 958 } 959 960 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 961 } 962 963 static MachineBasicBlock * 964 emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode) { 965 966 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 967 DebugLoc DL = MI.getDebugLoc(); 968 969 // To "insert" a SELECT instruction, we actually have to insert the 970 // diamond control-flow pattern. The incoming instruction knows the 971 // destination vreg to set, the condition code register to branch on, the 972 // true/false values to select between, and a branch opcode to use. 973 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 974 MachineFunction::iterator It = ++BB->getIterator(); 975 976 // thisMBB: 977 // ... 978 // TrueVal = ... 979 // bt32 c, sinkMBB 980 // fallthrough --> copyMBB 981 MachineBasicBlock *thisMBB = BB; 982 MachineFunction *F = BB->getParent(); 983 MachineBasicBlock *copyMBB = F->CreateMachineBasicBlock(LLVM_BB); 984 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 985 F->insert(It, copyMBB); 986 F->insert(It, sinkMBB); 987 988 // Transfer the remainder of BB and its successor edges to sinkMBB. 989 sinkMBB->splice(sinkMBB->begin(), BB, 990 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 991 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 992 993 // Next, add the true and fallthrough blocks as its successors. 994 BB->addSuccessor(copyMBB); 995 BB->addSuccessor(sinkMBB); 996 997 // bt32 condition, sinkMBB 998 BuildMI(BB, DL, TII.get(Opcode)) 999 .addReg(MI.getOperand(1).getReg()) 1000 .addMBB(sinkMBB); 1001 1002 // copyMBB: 1003 // %FalseValue = ... 1004 // # fallthrough to sinkMBB 1005 BB = copyMBB; 1006 1007 // Update machine-CFG edges 1008 BB->addSuccessor(sinkMBB); 1009 1010 // sinkMBB: 1011 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copyMBB ] 1012 // ... 1013 BB = sinkMBB; 1014 1015 BuildMI(*BB, BB->begin(), DL, TII.get(CSKY::PHI), MI.getOperand(0).getReg()) 1016 .addReg(MI.getOperand(2).getReg()) 1017 .addMBB(thisMBB) 1018 .addReg(MI.getOperand(3).getReg()) 1019 .addMBB(copyMBB); 1020 1021 MI.eraseFromParent(); // The pseudo instruction is gone now. 1022 1023 return BB; 1024 } 1025 1026 MachineBasicBlock * 1027 CSKYTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1028 MachineBasicBlock *BB) const { 1029 switch (MI.getOpcode()) { 1030 default: 1031 llvm_unreachable("Unexpected instr type to insert"); 1032 case CSKY::FSELS: 1033 case CSKY::FSELD: 1034 if (Subtarget.hasE2()) 1035 return emitSelectPseudo(MI, BB, CSKY::BT32); 1036 else 1037 return emitSelectPseudo(MI, BB, CSKY::BT16); 1038 case CSKY::ISEL32: 1039 return emitSelectPseudo(MI, BB, CSKY::BT32); 1040 case CSKY::ISEL16: 1041 return emitSelectPseudo(MI, BB, CSKY::BT16); 1042 } 1043 } 1044 1045 SDValue CSKYTargetLowering::getTargetConstantPoolValue(ExternalSymbolSDNode *N, 1046 EVT Ty, 1047 SelectionDAG &DAG, 1048 unsigned Flags) const { 1049 CSKYConstantPoolValue *CPV = 1050 CSKYConstantPoolSymbol::Create(Type::getInt32Ty(*DAG.getContext()), 1051 N->getSymbol(), 0, getModifier(Flags)); 1052 1053 return DAG.getTargetConstantPool(CPV, Ty); 1054 } 1055 1056 SDValue CSKYTargetLowering::getTargetConstantPoolValue(JumpTableSDNode *N, 1057 EVT Ty, 1058 SelectionDAG &DAG, 1059 unsigned Flags) const { 1060 CSKYConstantPoolValue *CPV = 1061 CSKYConstantPoolJT::Create(Type::getInt32Ty(*DAG.getContext()), 1062 N->getIndex(), 0, getModifier(Flags)); 1063 return DAG.getTargetConstantPool(CPV, Ty); 1064 } 1065 1066 SDValue CSKYTargetLowering::getTargetConstantPoolValue(BlockAddressSDNode *N, 1067 EVT Ty, 1068 SelectionDAG &DAG, 1069 unsigned Flags) const { 1070 assert(N->getOffset() == 0); 1071 CSKYConstantPoolValue *CPV = CSKYConstantPoolConstant::Create( 1072 N->getBlockAddress(), CSKYCP::CPBlockAddress, 0, getModifier(Flags), 1073 false); 1074 return DAG.getTargetConstantPool(CPV, Ty); 1075 } 1076 1077 SDValue CSKYTargetLowering::getTargetConstantPoolValue(ConstantPoolSDNode *N, 1078 EVT Ty, 1079 SelectionDAG &DAG, 1080 unsigned Flags) const { 1081 assert(N->getOffset() == 0); 1082 CSKYConstantPoolValue *CPV = CSKYConstantPoolConstant::Create( 1083 N->getConstVal(), Type::getInt32Ty(*DAG.getContext()), 1084 CSKYCP::CPConstPool, 0, getModifier(Flags), false); 1085 return DAG.getTargetConstantPool(CPV, Ty); 1086 } 1087 1088 SDValue CSKYTargetLowering::getTargetNode(GlobalAddressSDNode *N, SDLoc DL, 1089 EVT Ty, SelectionDAG &DAG, 1090 unsigned Flags) const { 1091 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 1092 } 1093 1094 SDValue CSKYTargetLowering::getTargetNode(ExternalSymbolSDNode *N, SDLoc DL, 1095 EVT Ty, SelectionDAG &DAG, 1096 unsigned Flags) const { 1097 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flags); 1098 } 1099 1100 SDValue CSKYTargetLowering::getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, 1101 SelectionDAG &DAG, 1102 unsigned Flags) const { 1103 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); 1104 } 1105 1106 SDValue CSKYTargetLowering::getTargetNode(BlockAddressSDNode *N, SDLoc DL, 1107 EVT Ty, SelectionDAG &DAG, 1108 unsigned Flags) const { 1109 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 1110 Flags); 1111 } 1112 1113 SDValue CSKYTargetLowering::getTargetNode(ConstantPoolSDNode *N, SDLoc DL, 1114 EVT Ty, SelectionDAG &DAG, 1115 unsigned Flags) const { 1116 1117 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 1118 N->getOffset(), Flags); 1119 } 1120 1121 const char *CSKYTargetLowering::getTargetNodeName(unsigned Opcode) const { 1122 switch (Opcode) { 1123 default: 1124 llvm_unreachable("unknown CSKYISD node"); 1125 case CSKYISD::NIE: 1126 return "CSKYISD::NIE"; 1127 case CSKYISD::NIR: 1128 return "CSKYISD::NIR"; 1129 case CSKYISD::RET: 1130 return "CSKYISD::RET"; 1131 case CSKYISD::CALL: 1132 return "CSKYISD::CALL"; 1133 case CSKYISD::CALLReg: 1134 return "CSKYISD::CALLReg"; 1135 case CSKYISD::TAIL: 1136 return "CSKYISD::TAIL"; 1137 case CSKYISD::TAILReg: 1138 return "CSKYISD::TAILReg"; 1139 case CSKYISD::LOAD_ADDR: 1140 return "CSKYISD::LOAD_ADDR"; 1141 case CSKYISD::BITCAST_TO_LOHI: 1142 return "CSKYISD::BITCAST_TO_LOHI"; 1143 case CSKYISD::BITCAST_FROM_LOHI: 1144 return "CSKYISD::BITCAST_FROM_LOHI"; 1145 } 1146 } 1147 1148 SDValue CSKYTargetLowering::LowerGlobalAddress(SDValue Op, 1149 SelectionDAG &DAG) const { 1150 SDLoc DL(Op); 1151 EVT Ty = Op.getValueType(); 1152 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 1153 int64_t Offset = N->getOffset(); 1154 1155 const GlobalValue *GV = N->getGlobal(); 1156 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 1157 SDValue Addr = getAddr<GlobalAddressSDNode, false>(N, DAG, IsLocal); 1158 1159 // In order to maximise the opportunity for common subexpression elimination, 1160 // emit a separate ADD node for the global address offset instead of folding 1161 // it in the global address node. Later peephole optimisations may choose to 1162 // fold it back in when profitable. 1163 if (Offset != 0) 1164 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 1165 DAG.getConstant(Offset, DL, MVT::i32)); 1166 return Addr; 1167 } 1168 1169 SDValue CSKYTargetLowering::LowerExternalSymbol(SDValue Op, 1170 SelectionDAG &DAG) const { 1171 ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op); 1172 1173 return getAddr(N, DAG, false); 1174 } 1175 1176 SDValue CSKYTargetLowering::LowerJumpTable(SDValue Op, 1177 SelectionDAG &DAG) const { 1178 JumpTableSDNode *N = cast<JumpTableSDNode>(Op); 1179 1180 return getAddr<JumpTableSDNode, false>(N, DAG); 1181 } 1182 1183 SDValue CSKYTargetLowering::LowerBlockAddress(SDValue Op, 1184 SelectionDAG &DAG) const { 1185 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 1186 1187 return getAddr(N, DAG); 1188 } 1189 1190 SDValue CSKYTargetLowering::LowerConstantPool(SDValue Op, 1191 SelectionDAG &DAG) const { 1192 assert(!Subtarget.hasE2()); 1193 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 1194 1195 return getAddr(N, DAG); 1196 } 1197 1198 SDValue CSKYTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 1199 MachineFunction &MF = DAG.getMachineFunction(); 1200 CSKYMachineFunctionInfo *FuncInfo = MF.getInfo<CSKYMachineFunctionInfo>(); 1201 1202 SDLoc DL(Op); 1203 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1204 getPointerTy(MF.getDataLayout())); 1205 1206 // vastart just stores the address of the VarArgsFrameIndex slot into the 1207 // memory location argument. 1208 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1209 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 1210 MachinePointerInfo(SV)); 1211 } 1212 1213 SDValue CSKYTargetLowering::LowerFRAMEADDR(SDValue Op, 1214 SelectionDAG &DAG) const { 1215 const CSKYRegisterInfo &RI = *Subtarget.getRegisterInfo(); 1216 MachineFunction &MF = DAG.getMachineFunction(); 1217 MachineFrameInfo &MFI = MF.getFrameInfo(); 1218 MFI.setFrameAddressIsTaken(true); 1219 1220 EVT VT = Op.getValueType(); 1221 SDLoc dl(Op); 1222 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1223 Register FrameReg = RI.getFrameRegister(MF); 1224 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 1225 while (Depth--) 1226 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 1227 MachinePointerInfo()); 1228 return FrameAddr; 1229 } 1230 1231 SDValue CSKYTargetLowering::LowerRETURNADDR(SDValue Op, 1232 SelectionDAG &DAG) const { 1233 const CSKYRegisterInfo &RI = *Subtarget.getRegisterInfo(); 1234 MachineFunction &MF = DAG.getMachineFunction(); 1235 MachineFrameInfo &MFI = MF.getFrameInfo(); 1236 MFI.setReturnAddressIsTaken(true); 1237 1238 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1239 return SDValue(); 1240 1241 EVT VT = Op.getValueType(); 1242 SDLoc dl(Op); 1243 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1244 if (Depth) { 1245 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 1246 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 1247 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 1248 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 1249 MachinePointerInfo()); 1250 } 1251 // Return the value of the return address register, marking it an implicit 1252 // live-in. 1253 unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(MVT::i32)); 1254 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 1255 } 1256 1257 Register CSKYTargetLowering::getExceptionPointerRegister( 1258 const Constant *PersonalityFn) const { 1259 return CSKY::R0; 1260 } 1261 1262 Register CSKYTargetLowering::getExceptionSelectorRegister( 1263 const Constant *PersonalityFn) const { 1264 return CSKY::R1; 1265 } 1266 1267 SDValue CSKYTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1268 SelectionDAG &DAG) const { 1269 SDLoc DL(Op); 1270 EVT Ty = Op.getValueType(); 1271 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 1272 int64_t Offset = N->getOffset(); 1273 MVT XLenVT = MVT::i32; 1274 1275 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 1276 SDValue Addr; 1277 switch (Model) { 1278 case TLSModel::LocalExec: 1279 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 1280 break; 1281 case TLSModel::InitialExec: 1282 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 1283 break; 1284 case TLSModel::LocalDynamic: 1285 case TLSModel::GeneralDynamic: 1286 Addr = getDynamicTLSAddr(N, DAG); 1287 break; 1288 } 1289 1290 // In order to maximise the opportunity for common subexpression elimination, 1291 // emit a separate ADD node for the global address offset instead of folding 1292 // it in the global address node. Later peephole optimisations may choose to 1293 // fold it back in when profitable. 1294 if (Offset != 0) 1295 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 1296 DAG.getConstant(Offset, DL, XLenVT)); 1297 return Addr; 1298 } 1299 1300 SDValue CSKYTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 1301 SelectionDAG &DAG, 1302 bool UseGOT) const { 1303 MachineFunction &MF = DAG.getMachineFunction(); 1304 CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>(); 1305 1306 unsigned CSKYPCLabelIndex = CFI->createPICLabelUId(); 1307 1308 SDLoc DL(N); 1309 EVT Ty = getPointerTy(DAG.getDataLayout()); 1310 1311 CSKYCP::CSKYCPModifier Flag = UseGOT ? CSKYCP::TLSIE : CSKYCP::TLSLE; 1312 bool AddCurrentAddr = UseGOT ? true : false; 1313 unsigned char PCAjust = UseGOT ? 4 : 0; 1314 1315 CSKYConstantPoolValue *CPV = 1316 CSKYConstantPoolConstant::Create(N->getGlobal(), CSKYCP::CPValue, PCAjust, 1317 Flag, AddCurrentAddr, CSKYPCLabelIndex); 1318 SDValue CAddr = DAG.getTargetConstantPool(CPV, Ty); 1319 1320 SDValue Load; 1321 if (UseGOT) { 1322 SDValue PICLabel = DAG.getTargetConstant(CSKYPCLabelIndex, DL, MVT::i32); 1323 auto *LRWGRS = DAG.getMachineNode(CSKY::PseudoTLSLA32, DL, {Ty, Ty}, 1324 {CAddr, PICLabel}); 1325 auto LRWADDGRS = 1326 DAG.getNode(ISD::ADD, DL, Ty, SDValue(LRWGRS, 0), SDValue(LRWGRS, 1)); 1327 Load = DAG.getLoad(Ty, DL, DAG.getEntryNode(), LRWADDGRS, 1328 MachinePointerInfo(N->getGlobal())); 1329 } else { 1330 Load = SDValue(DAG.getMachineNode(CSKY::LRW32, DL, Ty, CAddr), 0); 1331 } 1332 1333 // Add the thread pointer. 1334 SDValue TPReg = DAG.getRegister(CSKY::R31, MVT::i32); 1335 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 1336 } 1337 1338 SDValue CSKYTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 1339 SelectionDAG &DAG) const { 1340 MachineFunction &MF = DAG.getMachineFunction(); 1341 CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>(); 1342 1343 unsigned CSKYPCLabelIndex = CFI->createPICLabelUId(); 1344 1345 SDLoc DL(N); 1346 EVT Ty = getPointerTy(DAG.getDataLayout()); 1347 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 1348 1349 CSKYConstantPoolValue *CPV = 1350 CSKYConstantPoolConstant::Create(N->getGlobal(), CSKYCP::CPValue, 4, 1351 CSKYCP::TLSGD, true, CSKYPCLabelIndex); 1352 SDValue Addr = DAG.getTargetConstantPool(CPV, Ty); 1353 SDValue PICLabel = DAG.getTargetConstant(CSKYPCLabelIndex, DL, MVT::i32); 1354 1355 auto *LRWGRS = 1356 DAG.getMachineNode(CSKY::PseudoTLSLA32, DL, {Ty, Ty}, {Addr, PICLabel}); 1357 1358 auto Load = 1359 DAG.getNode(ISD::ADD, DL, Ty, SDValue(LRWGRS, 0), SDValue(LRWGRS, 1)); 1360 1361 // Prepare argument list to generate call. 1362 ArgListTy Args; 1363 ArgListEntry Entry; 1364 Entry.Node = Load; 1365 Entry.Ty = CallTy; 1366 Args.push_back(Entry); 1367 1368 // Setup call to __tls_get_addr. 1369 TargetLowering::CallLoweringInfo CLI(DAG); 1370 CLI.setDebugLoc(DL) 1371 .setChain(DAG.getEntryNode()) 1372 .setLibCallee(CallingConv::C, CallTy, 1373 DAG.getExternalSymbol("__tls_get_addr", Ty), 1374 std::move(Args)); 1375 SDValue V = LowerCallTo(CLI).first; 1376 1377 return V; 1378 } 1379 1380 bool CSKYTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 1381 SDValue C) const { 1382 if (!VT.isScalarInteger()) 1383 return false; 1384 1385 // Omit if data size exceeds. 1386 if (VT.getSizeInBits() > Subtarget.XLen) 1387 return false; 1388 1389 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 1390 const APInt &Imm = ConstNode->getAPIntValue(); 1391 // Break MULT to LSLI + ADDU/SUBU. 1392 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || 1393 (1 - Imm).isPowerOf2()) 1394 return true; 1395 // Only break MULT for sub targets without MULT32, since an extra 1396 // instruction will be generated against the above 3 cases. We leave it 1397 // unchanged on sub targets with MULT32, since not sure it is better. 1398 if (!Subtarget.hasE2() && (-1 - Imm).isPowerOf2()) 1399 return true; 1400 } 1401 1402 return false; 1403 } 1404