1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the interfaces that Hexagon uses to lower LLVM code 10 // into a selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "HexagonISelLowering.h" 15 #include "Hexagon.h" 16 #include "HexagonMachineFunctionInfo.h" 17 #include "HexagonRegisterInfo.h" 18 #include "HexagonSubtarget.h" 19 #include "HexagonTargetMachine.h" 20 #include "HexagonTargetObjectFile.h" 21 #include "llvm/ADT/APInt.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/CodeGen/CallingConvLower.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/RuntimeLibcalls.h" 31 #include "llvm/CodeGen/SelectionDAG.h" 32 #include "llvm/CodeGen/TargetCallingConv.h" 33 #include "llvm/CodeGen/ValueTypes.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/DiagnosticInfo.h" 39 #include "llvm/IR/DiagnosticPrinter.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalValue.h" 42 #include "llvm/IR/InlineAsm.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/IntrinsicsHexagon.h" 47 #include "llvm/IR/IRBuilder.h" 48 #include "llvm/IR/Module.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/Value.h" 51 #include "llvm/MC/MCRegisterInfo.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CodeGen.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Target/TargetMachine.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstddef> 63 #include <cstdint> 64 #include <limits> 65 #include <utility> 66 67 using namespace llvm; 68 69 #define DEBUG_TYPE "hexagon-lowering" 70 71 static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables", 72 cl::init(true), cl::Hidden, 73 cl::desc("Control jump table emission on Hexagon target")); 74 75 static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched", 76 cl::Hidden, cl::ZeroOrMore, cl::init(false), 77 cl::desc("Enable Hexagon SDNode scheduling")); 78 79 static cl::opt<bool> EnableFastMath("ffast-math", 80 cl::Hidden, cl::ZeroOrMore, cl::init(false), 81 cl::desc("Enable Fast Math processing")); 82 83 static cl::opt<int> MinimumJumpTables("minimum-jump-tables", 84 cl::Hidden, cl::ZeroOrMore, cl::init(5), 85 cl::desc("Set minimum jump tables")); 86 87 static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy", 88 cl::Hidden, cl::ZeroOrMore, cl::init(6), 89 cl::desc("Max #stores to inline memcpy")); 90 91 static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", 92 cl::Hidden, cl::ZeroOrMore, cl::init(4), 93 cl::desc("Max #stores to inline memcpy")); 94 95 static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove", 96 cl::Hidden, cl::ZeroOrMore, cl::init(6), 97 cl::desc("Max #stores to inline memmove")); 98 99 static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", 100 cl::Hidden, cl::ZeroOrMore, cl::init(4), 101 cl::desc("Max #stores to inline memmove")); 102 103 static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset", 104 cl::Hidden, cl::ZeroOrMore, cl::init(8), 105 cl::desc("Max #stores to inline memset")); 106 107 static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", 108 cl::Hidden, cl::ZeroOrMore, cl::init(4), 109 cl::desc("Max #stores to inline memset")); 110 111 static cl::opt<bool> AlignLoads("hexagon-align-loads", 112 cl::Hidden, cl::init(false), 113 cl::desc("Rewrite unaligned loads as a pair of aligned loads")); 114 115 static cl::opt<bool> 116 DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, 117 cl::init(false), 118 cl::desc("Disable minimum alignment of 1 for " 119 "arguments passed by value on stack")); 120 121 namespace { 122 123 class HexagonCCState : public CCState { 124 unsigned NumNamedVarArgParams = 0; 125 126 public: 127 HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF, 128 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, 129 unsigned NumNamedArgs) 130 : CCState(CC, IsVarArg, MF, locs, C), 131 NumNamedVarArgParams(NumNamedArgs) {} 132 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; } 133 }; 134 135 } // end anonymous namespace 136 137 138 // Implement calling convention for Hexagon. 139 140 static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 141 CCValAssign::LocInfo &LocInfo, 142 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 143 static const MCPhysReg ArgRegs[] = { 144 Hexagon::R0, Hexagon::R1, Hexagon::R2, 145 Hexagon::R3, Hexagon::R4, Hexagon::R5 146 }; 147 const unsigned NumArgRegs = array_lengthof(ArgRegs); 148 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 149 150 // RegNum is an index into ArgRegs: skip a register if RegNum is odd. 151 if (RegNum != NumArgRegs && RegNum % 2 == 1) 152 State.AllocateReg(ArgRegs[RegNum]); 153 154 // Always return false here, as this function only makes sure that the first 155 // unallocated register has an even register number and does not actually 156 // allocate a register for the current argument. 157 return false; 158 } 159 160 #include "HexagonGenCallingConv.inc" 161 162 163 SDValue 164 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) 165 const { 166 return SDValue(); 167 } 168 169 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 170 /// by "Src" to address "Dst" of size "Size". Alignment information is 171 /// specified by the specific parameter attribute. The copy will be passed as 172 /// a byval function parameter. Sometimes what we are copying is the end of a 173 /// larger object, the part that does not fit in registers. 174 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 175 SDValue Chain, ISD::ArgFlagsTy Flags, 176 SelectionDAG &DAG, const SDLoc &dl) { 177 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 178 return DAG.getMemcpy( 179 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(), 180 /*isVolatile=*/false, /*AlwaysInline=*/false, 181 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo()); 182 } 183 184 bool 185 HexagonTargetLowering::CanLowerReturn( 186 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 187 const SmallVectorImpl<ISD::OutputArg> &Outs, 188 LLVMContext &Context) const { 189 SmallVector<CCValAssign, 16> RVLocs; 190 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 191 192 if (MF.getSubtarget<HexagonSubtarget>().useHVXOps()) 193 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX); 194 return CCInfo.CheckReturn(Outs, RetCC_Hexagon); 195 } 196 197 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is 198 // passed by value, the function prototype is modified to return void and 199 // the value is stored in memory pointed by a pointer passed by caller. 200 SDValue 201 HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 202 bool IsVarArg, 203 const SmallVectorImpl<ISD::OutputArg> &Outs, 204 const SmallVectorImpl<SDValue> &OutVals, 205 const SDLoc &dl, SelectionDAG &DAG) const { 206 // CCValAssign - represent the assignment of the return value to locations. 207 SmallVector<CCValAssign, 16> RVLocs; 208 209 // CCState - Info about the registers and stack slot. 210 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 211 *DAG.getContext()); 212 213 // Analyze return values of ISD::RET 214 if (Subtarget.useHVXOps()) 215 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX); 216 else 217 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon); 218 219 SDValue Flag; 220 SmallVector<SDValue, 4> RetOps(1, Chain); 221 222 // Copy the result values into the output registers. 223 for (unsigned i = 0; i != RVLocs.size(); ++i) { 224 CCValAssign &VA = RVLocs[i]; 225 SDValue Val = OutVals[i]; 226 227 switch (VA.getLocInfo()) { 228 default: 229 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. 230 llvm_unreachable("Unknown loc info!"); 231 case CCValAssign::Full: 232 break; 233 case CCValAssign::BCvt: 234 Val = DAG.getBitcast(VA.getLocVT(), Val); 235 break; 236 case CCValAssign::SExt: 237 Val = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Val); 238 break; 239 case CCValAssign::ZExt: 240 Val = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Val); 241 break; 242 case CCValAssign::AExt: 243 Val = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Val); 244 break; 245 } 246 247 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Val, Flag); 248 249 // Guarantee that all emitted copies are stuck together with flags. 250 Flag = Chain.getValue(1); 251 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 252 } 253 254 RetOps[0] = Chain; // Update chain. 255 256 // Add the flag if we have it. 257 if (Flag.getNode()) 258 RetOps.push_back(Flag); 259 260 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps); 261 } 262 263 bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 264 // If either no tail call or told not to tail call at all, don't. 265 return CI->isTailCall(); 266 } 267 268 Register HexagonTargetLowering::getRegisterByName( 269 const char* RegName, LLT VT, const MachineFunction &) const { 270 // Just support r19, the linux kernel uses it. 271 Register Reg = StringSwitch<Register>(RegName) 272 .Case("r0", Hexagon::R0) 273 .Case("r1", Hexagon::R1) 274 .Case("r2", Hexagon::R2) 275 .Case("r3", Hexagon::R3) 276 .Case("r4", Hexagon::R4) 277 .Case("r5", Hexagon::R5) 278 .Case("r6", Hexagon::R6) 279 .Case("r7", Hexagon::R7) 280 .Case("r8", Hexagon::R8) 281 .Case("r9", Hexagon::R9) 282 .Case("r10", Hexagon::R10) 283 .Case("r11", Hexagon::R11) 284 .Case("r12", Hexagon::R12) 285 .Case("r13", Hexagon::R13) 286 .Case("r14", Hexagon::R14) 287 .Case("r15", Hexagon::R15) 288 .Case("r16", Hexagon::R16) 289 .Case("r17", Hexagon::R17) 290 .Case("r18", Hexagon::R18) 291 .Case("r19", Hexagon::R19) 292 .Case("r20", Hexagon::R20) 293 .Case("r21", Hexagon::R21) 294 .Case("r22", Hexagon::R22) 295 .Case("r23", Hexagon::R23) 296 .Case("r24", Hexagon::R24) 297 .Case("r25", Hexagon::R25) 298 .Case("r26", Hexagon::R26) 299 .Case("r27", Hexagon::R27) 300 .Case("r28", Hexagon::R28) 301 .Case("r29", Hexagon::R29) 302 .Case("r30", Hexagon::R30) 303 .Case("r31", Hexagon::R31) 304 .Case("r1:0", Hexagon::D0) 305 .Case("r3:2", Hexagon::D1) 306 .Case("r5:4", Hexagon::D2) 307 .Case("r7:6", Hexagon::D3) 308 .Case("r9:8", Hexagon::D4) 309 .Case("r11:10", Hexagon::D5) 310 .Case("r13:12", Hexagon::D6) 311 .Case("r15:14", Hexagon::D7) 312 .Case("r17:16", Hexagon::D8) 313 .Case("r19:18", Hexagon::D9) 314 .Case("r21:20", Hexagon::D10) 315 .Case("r23:22", Hexagon::D11) 316 .Case("r25:24", Hexagon::D12) 317 .Case("r27:26", Hexagon::D13) 318 .Case("r29:28", Hexagon::D14) 319 .Case("r31:30", Hexagon::D15) 320 .Case("sp", Hexagon::R29) 321 .Case("fp", Hexagon::R30) 322 .Case("lr", Hexagon::R31) 323 .Case("p0", Hexagon::P0) 324 .Case("p1", Hexagon::P1) 325 .Case("p2", Hexagon::P2) 326 .Case("p3", Hexagon::P3) 327 .Case("sa0", Hexagon::SA0) 328 .Case("lc0", Hexagon::LC0) 329 .Case("sa1", Hexagon::SA1) 330 .Case("lc1", Hexagon::LC1) 331 .Case("m0", Hexagon::M0) 332 .Case("m1", Hexagon::M1) 333 .Case("usr", Hexagon::USR) 334 .Case("ugp", Hexagon::UGP) 335 .Case("cs0", Hexagon::CS0) 336 .Case("cs1", Hexagon::CS1) 337 .Default(Register()); 338 if (Reg) 339 return Reg; 340 341 report_fatal_error("Invalid register name global variable"); 342 } 343 344 /// LowerCallResult - Lower the result values of an ISD::CALL into the 345 /// appropriate copies out of appropriate physical registers. This assumes that 346 /// Chain/Glue are the input chain/glue to use, and that TheCall is the call 347 /// being lowered. Returns a SDNode with the same number of values as the 348 /// ISD::CALL. 349 SDValue HexagonTargetLowering::LowerCallResult( 350 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg, 351 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 352 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 353 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const { 354 // Assign locations to each value returned by this call. 355 SmallVector<CCValAssign, 16> RVLocs; 356 357 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 358 *DAG.getContext()); 359 360 if (Subtarget.useHVXOps()) 361 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX); 362 else 363 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon); 364 365 // Copy all of the result registers out of their specified physreg. 366 for (unsigned i = 0; i != RVLocs.size(); ++i) { 367 SDValue RetVal; 368 if (RVLocs[i].getValVT() == MVT::i1) { 369 // Return values of type MVT::i1 require special handling. The reason 370 // is that MVT::i1 is associated with the PredRegs register class, but 371 // values of that type are still returned in R0. Generate an explicit 372 // copy into a predicate register from R0, and treat the value of the 373 // predicate register as the call result. 374 auto &MRI = DAG.getMachineFunction().getRegInfo(); 375 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 376 MVT::i32, Glue); 377 // FR0 = (Value, Chain, Glue) 378 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 379 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR, 380 FR0.getValue(0), FR0.getValue(2)); 381 // TPR = (Chain, Glue) 382 // Don't glue this CopyFromReg, because it copies from a virtual 383 // register. If it is glued to the call, InstrEmitter will add it 384 // as an implicit def to the call (EmitMachineNode). 385 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1); 386 Glue = TPR.getValue(1); 387 Chain = TPR.getValue(0); 388 } else { 389 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 390 RVLocs[i].getValVT(), Glue); 391 Glue = RetVal.getValue(2); 392 Chain = RetVal.getValue(1); 393 } 394 InVals.push_back(RetVal.getValue(0)); 395 } 396 397 return Chain; 398 } 399 400 /// LowerCall - Functions arguments are copied from virtual regs to 401 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. 402 SDValue 403 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 404 SmallVectorImpl<SDValue> &InVals) const { 405 SelectionDAG &DAG = CLI.DAG; 406 SDLoc &dl = CLI.DL; 407 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 408 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 409 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 410 SDValue Chain = CLI.Chain; 411 SDValue Callee = CLI.Callee; 412 CallingConv::ID CallConv = CLI.CallConv; 413 bool IsVarArg = CLI.IsVarArg; 414 bool DoesNotReturn = CLI.DoesNotReturn; 415 416 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 417 MachineFunction &MF = DAG.getMachineFunction(); 418 MachineFrameInfo &MFI = MF.getFrameInfo(); 419 auto PtrVT = getPointerTy(MF.getDataLayout()); 420 421 unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0; 422 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee)) 423 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32); 424 425 // Linux ABI treats var-arg calls the same way as regular ones. 426 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; 427 428 // Analyze operands of the call, assigning locations to each operand. 429 SmallVector<CCValAssign, 16> ArgLocs; 430 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(), 431 NumParams); 432 433 if (Subtarget.useHVXOps()) 434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX); 435 else if (DisableArgsMinAlignment) 436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy); 437 else 438 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon); 439 440 if (CLI.IsTailCall) { 441 bool StructAttrFlag = MF.getFunction().hasStructRetAttr(); 442 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 443 IsVarArg, IsStructRet, StructAttrFlag, Outs, 444 OutVals, Ins, DAG); 445 for (const CCValAssign &VA : ArgLocs) { 446 if (VA.isMemLoc()) { 447 CLI.IsTailCall = false; 448 break; 449 } 450 } 451 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n" 452 : "Argument must be passed on stack. " 453 "Not eligible for Tail Call\n")); 454 } 455 // Get a count of how many bytes are to be pushed on the stack. 456 unsigned NumBytes = CCInfo.getNextStackOffset(); 457 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; 458 SmallVector<SDValue, 8> MemOpChains; 459 460 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 461 SDValue StackPtr = 462 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT); 463 464 bool NeedsArgAlign = false; 465 Align LargestAlignSeen; 466 // Walk the register/memloc assignments, inserting copies/loads. 467 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 468 CCValAssign &VA = ArgLocs[i]; 469 SDValue Arg = OutVals[i]; 470 ISD::ArgFlagsTy Flags = Outs[i].Flags; 471 // Record if we need > 8 byte alignment on an argument. 472 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT()); 473 NeedsArgAlign |= ArgAlign; 474 475 // Promote the value if needed. 476 switch (VA.getLocInfo()) { 477 default: 478 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. 479 llvm_unreachable("Unknown loc info!"); 480 case CCValAssign::Full: 481 break; 482 case CCValAssign::BCvt: 483 Arg = DAG.getBitcast(VA.getLocVT(), Arg); 484 break; 485 case CCValAssign::SExt: 486 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 487 break; 488 case CCValAssign::ZExt: 489 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 490 break; 491 case CCValAssign::AExt: 492 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 493 break; 494 } 495 496 if (VA.isMemLoc()) { 497 unsigned LocMemOffset = VA.getLocMemOffset(); 498 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl, 499 StackPtr.getValueType()); 500 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr); 501 if (ArgAlign) 502 LargestAlignSeen = std::max( 503 LargestAlignSeen, Align(VA.getLocVT().getStoreSizeInBits() / 8)); 504 if (Flags.isByVal()) { 505 // The argument is a struct passed by value. According to LLVM, "Arg" 506 // is a pointer. 507 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain, 508 Flags, DAG, dl)); 509 } else { 510 MachinePointerInfo LocPI = MachinePointerInfo::getStack( 511 DAG.getMachineFunction(), LocMemOffset); 512 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI); 513 MemOpChains.push_back(S); 514 } 515 continue; 516 } 517 518 // Arguments that can be passed on register must be kept at RegsToPass 519 // vector. 520 if (VA.isRegLoc()) 521 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 522 } 523 524 if (NeedsArgAlign && Subtarget.hasV60Ops()) { 525 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n"); 526 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass); 527 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign); 528 MFI.ensureMaxAlignment(LargestAlignSeen); 529 } 530 // Transform all store nodes into one single node because all store 531 // nodes are independent of each other. 532 if (!MemOpChains.empty()) 533 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 534 535 SDValue Glue; 536 if (!CLI.IsTailCall) { 537 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 538 Glue = Chain.getValue(1); 539 } 540 541 // Build a sequence of copy-to-reg nodes chained together with token 542 // chain and flag operands which copy the outgoing args into registers. 543 // The Glue is necessary since all emitted instructions must be 544 // stuck together. 545 if (!CLI.IsTailCall) { 546 for (const auto &R : RegsToPass) { 547 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue); 548 Glue = Chain.getValue(1); 549 } 550 } else { 551 // For tail calls lower the arguments to the 'real' stack slot. 552 // 553 // Force all the incoming stack arguments to be loaded from the stack 554 // before any new outgoing arguments are stored to the stack, because the 555 // outgoing stack slots may alias the incoming argument stack slots, and 556 // the alias isn't otherwise explicit. This is slightly more conservative 557 // than necessary, because it means that each store effectively depends 558 // on every argument instead of just those arguments it would clobber. 559 // 560 // Do not flag preceding copytoreg stuff together with the following stuff. 561 Glue = SDValue(); 562 for (const auto &R : RegsToPass) { 563 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue); 564 Glue = Chain.getValue(1); 565 } 566 Glue = SDValue(); 567 } 568 569 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls(); 570 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0; 571 572 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 573 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 574 // node so that legalize doesn't hack it. 575 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 576 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags); 577 } else if (ExternalSymbolSDNode *S = 578 dyn_cast<ExternalSymbolSDNode>(Callee)) { 579 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags); 580 } 581 582 // Returns a chain & a flag for retval copy to use. 583 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 584 SmallVector<SDValue, 8> Ops; 585 Ops.push_back(Chain); 586 Ops.push_back(Callee); 587 588 // Add argument registers to the end of the list so that they are 589 // known live into the call. 590 for (const auto &R : RegsToPass) 591 Ops.push_back(DAG.getRegister(R.first, R.second.getValueType())); 592 593 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv); 594 assert(Mask && "Missing call preserved mask for calling convention"); 595 Ops.push_back(DAG.getRegisterMask(Mask)); 596 597 if (Glue.getNode()) 598 Ops.push_back(Glue); 599 600 if (CLI.IsTailCall) { 601 MFI.setHasTailCall(); 602 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops); 603 } 604 605 // Set this here because we need to know this for "hasFP" in frame lowering. 606 // The target-independent code calls getFrameRegister before setting it, and 607 // getFrameRegister uses hasFP to determine whether the function has FP. 608 MFI.setHasCalls(true); 609 610 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL; 611 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops); 612 Glue = Chain.getValue(1); 613 614 // Create the CALLSEQ_END node. 615 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 616 DAG.getIntPtrConstant(0, dl, true), Glue, dl); 617 Glue = Chain.getValue(1); 618 619 // Handle result values, copying them out of physregs into vregs that we 620 // return. 621 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG, 622 InVals, OutVals, Callee); 623 } 624 625 /// Returns true by value, base pointer and offset pointer and addressing 626 /// mode by reference if this node can be combined with a load / store to 627 /// form a post-indexed load / store. 628 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 629 SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, 630 SelectionDAG &DAG) const { 631 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N); 632 if (!LSN) 633 return false; 634 EVT VT = LSN->getMemoryVT(); 635 if (!VT.isSimple()) 636 return false; 637 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || 638 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 || 639 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 || 640 VT == MVT::v4i16 || VT == MVT::v8i8 || 641 Subtarget.isHVXVectorType(VT.getSimpleVT()); 642 if (!IsLegalType) 643 return false; 644 645 if (Op->getOpcode() != ISD::ADD) 646 return false; 647 Base = Op->getOperand(0); 648 Offset = Op->getOperand(1); 649 if (!isa<ConstantSDNode>(Offset.getNode())) 650 return false; 651 AM = ISD::POST_INC; 652 653 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue(); 654 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V); 655 } 656 657 SDValue 658 HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { 659 MachineFunction &MF = DAG.getMachineFunction(); 660 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); 661 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 662 unsigned LR = HRI.getRARegister(); 663 664 if ((Op.getOpcode() != ISD::INLINEASM && 665 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR()) 666 return Op; 667 668 unsigned NumOps = Op.getNumOperands(); 669 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue) 670 --NumOps; // Ignore the flag operand. 671 672 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 673 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue(); 674 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); 675 ++i; // Skip the ID value. 676 677 switch (InlineAsm::getKind(Flags)) { 678 default: 679 llvm_unreachable("Bad flags!"); 680 case InlineAsm::Kind_RegUse: 681 case InlineAsm::Kind_Imm: 682 case InlineAsm::Kind_Mem: 683 i += NumVals; 684 break; 685 case InlineAsm::Kind_Clobber: 686 case InlineAsm::Kind_RegDef: 687 case InlineAsm::Kind_RegDefEarlyClobber: { 688 for (; NumVals; --NumVals, ++i) { 689 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg(); 690 if (Reg != LR) 691 continue; 692 HMFI.setHasClobberLR(true); 693 return Op; 694 } 695 break; 696 } 697 } 698 } 699 700 return Op; 701 } 702 703 // Need to transform ISD::PREFETCH into something that doesn't inherit 704 // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and 705 // SDNPMayStore. 706 SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op, 707 SelectionDAG &DAG) const { 708 SDValue Chain = Op.getOperand(0); 709 SDValue Addr = Op.getOperand(1); 710 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in, 711 // if the "reg" is fed by an "add". 712 SDLoc DL(Op); 713 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 714 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero); 715 } 716 717 // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode 718 // is marked as having side-effects, while the register read on Hexagon does 719 // not have any. TableGen refuses to accept the direct pattern from that node 720 // to the A4_tfrcpp. 721 SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 722 SelectionDAG &DAG) const { 723 SDValue Chain = Op.getOperand(0); 724 SDLoc dl(Op); 725 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other); 726 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain); 727 } 728 729 SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 730 SelectionDAG &DAG) const { 731 SDValue Chain = Op.getOperand(0); 732 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 733 // Lower the hexagon_prefetch builtin to DCFETCH, as above. 734 if (IntNo == Intrinsic::hexagon_prefetch) { 735 SDValue Addr = Op.getOperand(2); 736 SDLoc DL(Op); 737 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 738 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero); 739 } 740 return SDValue(); 741 } 742 743 SDValue 744 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 745 SelectionDAG &DAG) const { 746 SDValue Chain = Op.getOperand(0); 747 SDValue Size = Op.getOperand(1); 748 SDValue Align = Op.getOperand(2); 749 SDLoc dl(Op); 750 751 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align); 752 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC"); 753 754 unsigned A = AlignConst->getSExtValue(); 755 auto &HFI = *Subtarget.getFrameLowering(); 756 // "Zero" means natural stack alignment. 757 if (A == 0) 758 A = HFI.getStackAlign().value(); 759 760 LLVM_DEBUG({ 761 dbgs () << __func__ << " Align: " << A << " Size: "; 762 Size.getNode()->dump(&DAG); 763 dbgs() << "\n"; 764 }); 765 766 SDValue AC = DAG.getConstant(A, dl, MVT::i32); 767 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 768 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC); 769 770 DAG.ReplaceAllUsesOfValueWith(Op, AA); 771 return AA; 772 } 773 774 SDValue HexagonTargetLowering::LowerFormalArguments( 775 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 776 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 777 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 778 MachineFunction &MF = DAG.getMachineFunction(); 779 MachineFrameInfo &MFI = MF.getFrameInfo(); 780 MachineRegisterInfo &MRI = MF.getRegInfo(); 781 782 // Linux ABI treats var-arg calls the same way as regular ones. 783 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; 784 785 // Assign locations to all of the incoming arguments. 786 SmallVector<CCValAssign, 16> ArgLocs; 787 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, 788 *DAG.getContext(), 789 MF.getFunction().getFunctionType()->getNumParams()); 790 791 if (Subtarget.useHVXOps()) 792 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX); 793 else if (DisableArgsMinAlignment) 794 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy); 795 else 796 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon); 797 798 // For LLVM, in the case when returning a struct by value (>8byte), 799 // the first argument is a pointer that points to the location on caller's 800 // stack where the return value will be stored. For Hexagon, the location on 801 // caller's stack is passed only when the struct size is smaller than (and 802 // equal to) 8 bytes. If not, no address will be passed into callee and 803 // callee return the result direclty through R0/R1. 804 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) { 805 switch (RC.getID()) { 806 case Hexagon::IntRegsRegClassID: 807 return Reg - Hexagon::R0 + 1; 808 case Hexagon::DoubleRegsRegClassID: 809 return (Reg - Hexagon::D0 + 1) * 2; 810 case Hexagon::HvxVRRegClassID: 811 return Reg - Hexagon::V0 + 1; 812 case Hexagon::HvxWRRegClassID: 813 return (Reg - Hexagon::W0 + 1) * 2; 814 } 815 llvm_unreachable("Unexpected register class"); 816 }; 817 818 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering()); 819 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); 820 HFL.FirstVarArgSavedReg = 0; 821 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); 822 823 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 824 CCValAssign &VA = ArgLocs[i]; 825 ISD::ArgFlagsTy Flags = Ins[i].Flags; 826 bool ByVal = Flags.isByVal(); 827 828 // Arguments passed in registers: 829 // 1. 32- and 64-bit values and HVX vectors are passed directly, 830 // 2. Large structs are passed via an address, and the address is 831 // passed in a register. 832 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8) 833 llvm_unreachable("ByValSize must be bigger than 8 bytes"); 834 835 bool InReg = VA.isRegLoc() && 836 (!ByVal || (ByVal && Flags.getByValSize() > 8)); 837 838 if (InReg) { 839 MVT RegVT = VA.getLocVT(); 840 if (VA.getLocInfo() == CCValAssign::BCvt) 841 RegVT = VA.getValVT(); 842 843 const TargetRegisterClass *RC = getRegClassFor(RegVT); 844 Register VReg = MRI.createVirtualRegister(RC); 845 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 846 847 // Treat values of type MVT::i1 specially: they are passed in 848 // registers of type i32, but they need to remain as values of 849 // type i1 for consistency of the argument lowering. 850 if (VA.getValVT() == MVT::i1) { 851 assert(RegVT.getSizeInBits() <= 32); 852 SDValue T = DAG.getNode(ISD::AND, dl, RegVT, 853 Copy, DAG.getConstant(1, dl, RegVT)); 854 Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT), 855 ISD::SETNE); 856 } else { 857 #ifndef NDEBUG 858 unsigned RegSize = RegVT.getSizeInBits(); 859 assert(RegSize == 32 || RegSize == 64 || 860 Subtarget.isHVXVectorType(RegVT)); 861 #endif 862 } 863 InVals.push_back(Copy); 864 MRI.addLiveIn(VA.getLocReg(), VReg); 865 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg()); 866 } else { 867 assert(VA.isMemLoc() && "Argument should be passed in memory"); 868 869 // If it's a byval parameter, then we need to compute the 870 // "real" size, not the size of the pointer. 871 unsigned ObjSize = Flags.isByVal() 872 ? Flags.getByValSize() 873 : VA.getLocVT().getStoreSizeInBits() / 8; 874 875 // Create the frame index object for this incoming parameter. 876 int Offset = HEXAGON_LRFP_SIZE + VA.getLocMemOffset(); 877 int FI = MFI.CreateFixedObject(ObjSize, Offset, true); 878 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 879 880 if (Flags.isByVal()) { 881 // If it's a pass-by-value aggregate, then do not dereference the stack 882 // location. Instead, we should generate a reference to the stack 883 // location. 884 InVals.push_back(FIN); 885 } else { 886 SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 887 MachinePointerInfo::getFixedStack(MF, FI, 0)); 888 InVals.push_back(L); 889 } 890 } 891 } 892 893 if (IsVarArg && Subtarget.isEnvironmentMusl()) { 894 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++) 895 MRI.addLiveIn(Hexagon::R0+i); 896 } 897 898 if (IsVarArg && Subtarget.isEnvironmentMusl()) { 899 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1); 900 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); 901 902 // Create Frame index for the start of register saved area. 903 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg; 904 bool RequiresPadding = (NumVarArgRegs & 1); 905 int RegSaveAreaSizePlusPadding = RequiresPadding 906 ? (NumVarArgRegs + 1) * 4 907 : NumVarArgRegs * 4; 908 909 if (RegSaveAreaSizePlusPadding > 0) { 910 // The offset to saved register area should be 8 byte aligned. 911 int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset(); 912 if (!(RegAreaStart % 8)) 913 RegAreaStart = (RegAreaStart + 7) & -8; 914 915 int RegSaveAreaFrameIndex = 916 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true); 917 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex); 918 919 // This will point to the next argument passed via stack. 920 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding; 921 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); 922 HMFI.setVarArgsFrameIndex(FI); 923 } else { 924 // This will point to the next argument passed via stack, when 925 // there is no saved register area. 926 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset(); 927 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); 928 HMFI.setRegSavedAreaStartFrameIndex(FI); 929 HMFI.setVarArgsFrameIndex(FI); 930 } 931 } 932 933 934 if (IsVarArg && !Subtarget.isEnvironmentMusl()) { 935 // This will point to the next argument passed via stack. 936 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset(); 937 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); 938 HMFI.setVarArgsFrameIndex(FI); 939 } 940 941 return Chain; 942 } 943 944 SDValue 945 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 946 // VASTART stores the address of the VarArgsFrameIndex slot into the 947 // memory location argument. 948 MachineFunction &MF = DAG.getMachineFunction(); 949 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>(); 950 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32); 951 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 952 953 if (!Subtarget.isEnvironmentMusl()) { 954 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1), 955 MachinePointerInfo(SV)); 956 } 957 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>(); 958 auto &HFL = *Subtarget.getFrameLowering(); 959 SDLoc DL(Op); 960 SmallVector<SDValue, 8> MemOps; 961 962 // Get frame index of va_list. 963 SDValue FIN = Op.getOperand(1); 964 965 // If first Vararg register is odd, add 4 bytes to start of 966 // saved register area to point to the first register location. 967 // This is because the saved register area has to be 8 byte aligned. 968 // Incase of an odd start register, there will be 4 bytes of padding in 969 // the beginning of saved register area. If all registers area used up, 970 // the following condition will handle it correctly. 971 SDValue SavedRegAreaStartFrameIndex = 972 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32); 973 974 auto PtrVT = getPointerTy(DAG.getDataLayout()); 975 976 if (HFL.FirstVarArgSavedReg & 1) 977 SavedRegAreaStartFrameIndex = 978 DAG.getNode(ISD::ADD, DL, PtrVT, 979 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), 980 MVT::i32), 981 DAG.getIntPtrConstant(4, DL)); 982 983 // Store the saved register area start pointer. 984 SDValue Store = 985 DAG.getStore(Op.getOperand(0), DL, 986 SavedRegAreaStartFrameIndex, 987 FIN, MachinePointerInfo(SV)); 988 MemOps.push_back(Store); 989 990 // Store saved register area end pointer. 991 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, 992 FIN, DAG.getIntPtrConstant(4, DL)); 993 Store = DAG.getStore(Op.getOperand(0), DL, 994 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(), 995 PtrVT), 996 FIN, MachinePointerInfo(SV, 4)); 997 MemOps.push_back(Store); 998 999 // Store overflow area pointer. 1000 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, 1001 FIN, DAG.getIntPtrConstant(4, DL)); 1002 Store = DAG.getStore(Op.getOperand(0), DL, 1003 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(), 1004 PtrVT), 1005 FIN, MachinePointerInfo(SV, 8)); 1006 MemOps.push_back(Store); 1007 1008 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 1009 } 1010 1011 SDValue 1012 HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 1013 // Assert that the linux ABI is enabled for the current compilation. 1014 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled"); 1015 SDValue Chain = Op.getOperand(0); 1016 SDValue DestPtr = Op.getOperand(1); 1017 SDValue SrcPtr = Op.getOperand(2); 1018 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1019 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1020 SDLoc DL(Op); 1021 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore, 1022 // we need to memcopy 12 bytes from va_list to another similar list. 1023 return DAG.getMemcpy(Chain, DL, DestPtr, SrcPtr, 1024 DAG.getIntPtrConstant(12, DL), Align(4), 1025 /*isVolatile*/ false, false, false, 1026 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); 1027 } 1028 1029 SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1030 const SDLoc &dl(Op); 1031 SDValue LHS = Op.getOperand(0); 1032 SDValue RHS = Op.getOperand(1); 1033 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1034 MVT ResTy = ty(Op); 1035 MVT OpTy = ty(LHS); 1036 1037 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { 1038 MVT ElemTy = OpTy.getVectorElementType(); 1039 assert(ElemTy.isScalarInteger()); 1040 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()), 1041 OpTy.getVectorNumElements()); 1042 return DAG.getSetCC(dl, ResTy, 1043 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy), 1044 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC); 1045 } 1046 1047 // Treat all other vector types as legal. 1048 if (ResTy.isVector()) 1049 return Op; 1050 1051 // Comparisons of short integers should use sign-extend, not zero-extend, 1052 // since we can represent small negative values in the compare instructions. 1053 // The LLVM default is to use zero-extend arbitrarily in these cases. 1054 auto isSExtFree = [this](SDValue N) { 1055 switch (N.getOpcode()) { 1056 case ISD::TRUNCATE: { 1057 // A sign-extend of a truncate of a sign-extend is free. 1058 SDValue Op = N.getOperand(0); 1059 if (Op.getOpcode() != ISD::AssertSext) 1060 return false; 1061 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1062 unsigned ThisBW = ty(N).getSizeInBits(); 1063 unsigned OrigBW = OrigTy.getSizeInBits(); 1064 // The type that was sign-extended to get the AssertSext must be 1065 // narrower than the type of N (so that N has still the same value 1066 // as the original). 1067 return ThisBW >= OrigBW; 1068 } 1069 case ISD::LOAD: 1070 // We have sign-extended loads. 1071 return true; 1072 } 1073 return false; 1074 }; 1075 1076 if (OpTy == MVT::i8 || OpTy == MVT::i16) { 1077 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS); 1078 bool IsNegative = C && C->getAPIntValue().isNegative(); 1079 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS)) 1080 return DAG.getSetCC(dl, ResTy, 1081 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32), 1082 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC); 1083 } 1084 1085 return SDValue(); 1086 } 1087 1088 SDValue 1089 HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const { 1090 SDValue PredOp = Op.getOperand(0); 1091 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2); 1092 MVT OpTy = ty(Op1); 1093 const SDLoc &dl(Op); 1094 1095 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { 1096 MVT ElemTy = OpTy.getVectorElementType(); 1097 assert(ElemTy.isScalarInteger()); 1098 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()), 1099 OpTy.getVectorNumElements()); 1100 // Generate (trunc (select (_, sext, sext))). 1101 return DAG.getSExtOrTrunc( 1102 DAG.getSelect(dl, WideTy, PredOp, 1103 DAG.getSExtOrTrunc(Op1, dl, WideTy), 1104 DAG.getSExtOrTrunc(Op2, dl, WideTy)), 1105 dl, OpTy); 1106 } 1107 1108 return SDValue(); 1109 } 1110 1111 SDValue 1112 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 1113 EVT ValTy = Op.getValueType(); 1114 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op); 1115 Constant *CVal = nullptr; 1116 bool isVTi1Type = false; 1117 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) { 1118 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) { 1119 IRBuilder<> IRB(CV->getContext()); 1120 SmallVector<Constant*, 128> NewConst; 1121 unsigned VecLen = CV->getNumOperands(); 1122 assert(isPowerOf2_32(VecLen) && 1123 "conversion only supported for pow2 VectorSize"); 1124 for (unsigned i = 0; i < VecLen; ++i) 1125 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue())); 1126 1127 CVal = ConstantVector::get(NewConst); 1128 isVTi1Type = true; 1129 } 1130 } 1131 Align Alignment = CPN->getAlign(); 1132 bool IsPositionIndependent = isPositionIndependent(); 1133 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0; 1134 1135 unsigned Offset = 0; 1136 SDValue T; 1137 if (CPN->isMachineConstantPoolEntry()) 1138 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Alignment, 1139 Offset, TF); 1140 else if (isVTi1Type) 1141 T = DAG.getTargetConstantPool(CVal, ValTy, Alignment, Offset, TF); 1142 else 1143 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Alignment, Offset, 1144 TF); 1145 1146 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && 1147 "Inconsistent target flag encountered"); 1148 1149 if (IsPositionIndependent) 1150 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T); 1151 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T); 1152 } 1153 1154 SDValue 1155 HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1156 EVT VT = Op.getValueType(); 1157 int Idx = cast<JumpTableSDNode>(Op)->getIndex(); 1158 if (isPositionIndependent()) { 1159 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL); 1160 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T); 1161 } 1162 1163 SDValue T = DAG.getTargetJumpTable(Idx, VT); 1164 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T); 1165 } 1166 1167 SDValue 1168 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 1169 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 1170 MachineFunction &MF = DAG.getMachineFunction(); 1171 MachineFrameInfo &MFI = MF.getFrameInfo(); 1172 MFI.setReturnAddressIsTaken(true); 1173 1174 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1175 return SDValue(); 1176 1177 EVT VT = Op.getValueType(); 1178 SDLoc dl(Op); 1179 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1180 if (Depth) { 1181 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 1182 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 1183 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 1184 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 1185 MachinePointerInfo()); 1186 } 1187 1188 // Return LR, which contains the return address. Mark it an implicit live-in. 1189 Register Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32)); 1190 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 1191 } 1192 1193 SDValue 1194 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 1195 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 1196 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 1197 MFI.setFrameAddressIsTaken(true); 1198 1199 EVT VT = Op.getValueType(); 1200 SDLoc dl(Op); 1201 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1202 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 1203 HRI.getFrameRegister(), VT); 1204 while (Depth--) 1205 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 1206 MachinePointerInfo()); 1207 return FrameAddr; 1208 } 1209 1210 SDValue 1211 HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const { 1212 SDLoc dl(Op); 1213 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0)); 1214 } 1215 1216 SDValue 1217 HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const { 1218 SDLoc dl(Op); 1219 auto *GAN = cast<GlobalAddressSDNode>(Op); 1220 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1221 auto *GV = GAN->getGlobal(); 1222 int64_t Offset = GAN->getOffset(); 1223 1224 auto &HLOF = *HTM.getObjFileLowering(); 1225 Reloc::Model RM = HTM.getRelocationModel(); 1226 1227 if (RM == Reloc::Static) { 1228 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset); 1229 const GlobalObject *GO = GV->getAliaseeObject(); 1230 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM)) 1231 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA); 1232 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA); 1233 } 1234 1235 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 1236 if (UsePCRel) { 1237 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset, 1238 HexagonII::MO_PCREL); 1239 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA); 1240 } 1241 1242 // Use GOT index. 1243 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1244 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT); 1245 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32); 1246 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off); 1247 } 1248 1249 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT. 1250 SDValue 1251 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 1252 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1253 SDLoc dl(Op); 1254 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1255 1256 Reloc::Model RM = HTM.getRelocationModel(); 1257 if (RM == Reloc::Static) { 1258 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT); 1259 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A); 1260 } 1261 1262 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL); 1263 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A); 1264 } 1265 1266 SDValue 1267 HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) 1268 const { 1269 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1270 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, PtrVT, 1271 HexagonII::MO_PCREL); 1272 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym); 1273 } 1274 1275 SDValue 1276 HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, 1277 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg, 1278 unsigned char OperandFlags) const { 1279 MachineFunction &MF = DAG.getMachineFunction(); 1280 MachineFrameInfo &MFI = MF.getFrameInfo(); 1281 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1282 SDLoc dl(GA); 1283 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 1284 GA->getValueType(0), 1285 GA->getOffset(), 1286 OperandFlags); 1287 // Create Operands for the call.The Operands should have the following: 1288 // 1. Chain SDValue 1289 // 2. Callee which in this case is the Global address value. 1290 // 3. Registers live into the call.In this case its R0, as we 1291 // have just one argument to be passed. 1292 // 4. Glue. 1293 // Note: The order is important. 1294 1295 const auto &HRI = *Subtarget.getRegisterInfo(); 1296 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C); 1297 assert(Mask && "Missing call preserved mask for calling convention"); 1298 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT), 1299 DAG.getRegisterMask(Mask), Glue }; 1300 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops); 1301 1302 // Inform MFI that function has calls. 1303 MFI.setAdjustsStack(true); 1304 1305 Glue = Chain.getValue(1); 1306 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue); 1307 } 1308 1309 // 1310 // Lower using the intial executable model for TLS addresses 1311 // 1312 SDValue 1313 HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, 1314 SelectionDAG &DAG) const { 1315 SDLoc dl(GA); 1316 int64_t Offset = GA->getOffset(); 1317 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1318 1319 // Get the thread pointer. 1320 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT); 1321 1322 bool IsPositionIndependent = isPositionIndependent(); 1323 unsigned char TF = 1324 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE; 1325 1326 // First generate the TLS symbol address 1327 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, 1328 Offset, TF); 1329 1330 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA); 1331 1332 if (IsPositionIndependent) { 1333 // Generate the GOT pointer in case of position independent code 1334 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG); 1335 1336 // Add the TLS Symbol address to GOT pointer.This gives 1337 // GOT relative relocation for the symbol. 1338 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym); 1339 } 1340 1341 // Load the offset value for TLS symbol.This offset is relative to 1342 // thread pointer. 1343 SDValue LoadOffset = 1344 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo()); 1345 1346 // Address of the thread local variable is the add of thread 1347 // pointer and the offset of the variable. 1348 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset); 1349 } 1350 1351 // 1352 // Lower using the local executable model for TLS addresses 1353 // 1354 SDValue 1355 HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, 1356 SelectionDAG &DAG) const { 1357 SDLoc dl(GA); 1358 int64_t Offset = GA->getOffset(); 1359 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1360 1361 // Get the thread pointer. 1362 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT); 1363 // Generate the TLS symbol address 1364 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset, 1365 HexagonII::MO_TPREL); 1366 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA); 1367 1368 // Address of the thread local variable is the add of thread 1369 // pointer and the offset of the variable. 1370 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym); 1371 } 1372 1373 // 1374 // Lower using the general dynamic model for TLS addresses 1375 // 1376 SDValue 1377 HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1378 SelectionDAG &DAG) const { 1379 SDLoc dl(GA); 1380 int64_t Offset = GA->getOffset(); 1381 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1382 1383 // First generate the TLS symbol address 1384 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset, 1385 HexagonII::MO_GDGOT); 1386 1387 // Then, generate the GOT pointer 1388 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG); 1389 1390 // Add the TLS symbol and the GOT pointer 1391 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA); 1392 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym); 1393 1394 // Copy over the argument to R0 1395 SDValue InFlag; 1396 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag); 1397 InFlag = Chain.getValue(1); 1398 1399 unsigned Flags = 1400 static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls() 1401 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended 1402 : HexagonII::MO_GDPLT; 1403 1404 return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT, 1405 Hexagon::R0, Flags); 1406 } 1407 1408 // 1409 // Lower TLS addresses. 1410 // 1411 // For now for dynamic models, we only support the general dynamic model. 1412 // 1413 SDValue 1414 HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1415 SelectionDAG &DAG) const { 1416 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1417 1418 switch (HTM.getTLSModel(GA->getGlobal())) { 1419 case TLSModel::GeneralDynamic: 1420 case TLSModel::LocalDynamic: 1421 return LowerToTLSGeneralDynamicModel(GA, DAG); 1422 case TLSModel::InitialExec: 1423 return LowerToTLSInitialExecModel(GA, DAG); 1424 case TLSModel::LocalExec: 1425 return LowerToTLSLocalExecModel(GA, DAG); 1426 } 1427 llvm_unreachable("Bogus TLS model"); 1428 } 1429 1430 //===----------------------------------------------------------------------===// 1431 // TargetLowering Implementation 1432 //===----------------------------------------------------------------------===// 1433 1434 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, 1435 const HexagonSubtarget &ST) 1436 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)), 1437 Subtarget(ST) { 1438 auto &HRI = *Subtarget.getRegisterInfo(); 1439 1440 setPrefLoopAlignment(Align(16)); 1441 setMinFunctionAlignment(Align(4)); 1442 setPrefFunctionAlignment(Align(16)); 1443 setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); 1444 setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); 1445 setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); 1446 1447 setMaxAtomicSizeInBitsSupported(64); 1448 setMinCmpXchgSizeInBits(32); 1449 1450 if (EnableHexSDNodeSched) 1451 setSchedulingPreference(Sched::VLIW); 1452 else 1453 setSchedulingPreference(Sched::Source); 1454 1455 // Limits for inline expansion of memcpy/memmove 1456 MaxStoresPerMemcpy = MaxStoresPerMemcpyCL; 1457 MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL; 1458 MaxStoresPerMemmove = MaxStoresPerMemmoveCL; 1459 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL; 1460 MaxStoresPerMemset = MaxStoresPerMemsetCL; 1461 MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL; 1462 1463 // 1464 // Set up register classes. 1465 // 1466 1467 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass); 1468 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa 1469 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa 1470 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba 1471 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass); 1472 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass); 1473 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass); 1474 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass); 1475 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass); 1476 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass); 1477 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass); 1478 1479 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass); 1480 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass); 1481 1482 // 1483 // Handling of scalar operations. 1484 // 1485 // All operations default to "legal", except: 1486 // - indexed loads and stores (pre-/post-incremented), 1487 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS, 1488 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN, 1489 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP, 1490 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG, 1491 // which default to "expand" for at least one type. 1492 1493 // Misc operations. 1494 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 1495 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 1496 setOperationAction(ISD::TRAP, MVT::Other, Legal); 1497 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 1498 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 1499 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 1500 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 1501 setOperationAction(ISD::INLINEASM, MVT::Other, Custom); 1502 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom); 1503 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 1504 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 1505 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 1506 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 1507 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 1508 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 1509 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 1510 1511 // Custom legalize GlobalAddress nodes into CONST32. 1512 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 1513 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom); 1514 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 1515 1516 // Hexagon needs to optimize cases with negative constants. 1517 setOperationAction(ISD::SETCC, MVT::i8, Custom); 1518 setOperationAction(ISD::SETCC, MVT::i16, Custom); 1519 setOperationAction(ISD::SETCC, MVT::v4i8, Custom); 1520 setOperationAction(ISD::SETCC, MVT::v2i16, Custom); 1521 1522 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1523 setOperationAction(ISD::VASTART, MVT::Other, Custom); 1524 setOperationAction(ISD::VAEND, MVT::Other, Expand); 1525 setOperationAction(ISD::VAARG, MVT::Other, Expand); 1526 if (Subtarget.isEnvironmentMusl()) 1527 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 1528 else 1529 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 1530 1531 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 1532 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 1533 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 1534 1535 if (EmitJumpTables) 1536 setMinimumJumpTableEntries(MinimumJumpTables); 1537 else 1538 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max()); 1539 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1540 1541 for (unsigned LegalIntOp : 1542 {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) { 1543 setOperationAction(LegalIntOp, MVT::i32, Legal); 1544 setOperationAction(LegalIntOp, MVT::i64, Legal); 1545 } 1546 1547 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit, 1548 // but they only operate on i64. 1549 for (MVT VT : MVT::integer_valuetypes()) { 1550 setOperationAction(ISD::UADDO, VT, Custom); 1551 setOperationAction(ISD::USUBO, VT, Custom); 1552 setOperationAction(ISD::SADDO, VT, Expand); 1553 setOperationAction(ISD::SSUBO, VT, Expand); 1554 setOperationAction(ISD::ADDCARRY, VT, Expand); 1555 setOperationAction(ISD::SUBCARRY, VT, Expand); 1556 } 1557 setOperationAction(ISD::ADDCARRY, MVT::i64, Custom); 1558 setOperationAction(ISD::SUBCARRY, MVT::i64, Custom); 1559 1560 setOperationAction(ISD::CTLZ, MVT::i8, Promote); 1561 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 1562 setOperationAction(ISD::CTTZ, MVT::i8, Promote); 1563 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 1564 1565 // Popcount can count # of 1s in i64 but returns i32. 1566 setOperationAction(ISD::CTPOP, MVT::i8, Promote); 1567 setOperationAction(ISD::CTPOP, MVT::i16, Promote); 1568 setOperationAction(ISD::CTPOP, MVT::i32, Promote); 1569 setOperationAction(ISD::CTPOP, MVT::i64, Legal); 1570 1571 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 1572 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 1573 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 1574 setOperationAction(ISD::BSWAP, MVT::i64, Legal); 1575 1576 setOperationAction(ISD::FSHL, MVT::i32, Legal); 1577 setOperationAction(ISD::FSHL, MVT::i64, Legal); 1578 setOperationAction(ISD::FSHR, MVT::i32, Legal); 1579 setOperationAction(ISD::FSHR, MVT::i64, Legal); 1580 1581 for (unsigned IntExpOp : 1582 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, 1583 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR, 1584 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, 1585 ISD::SMUL_LOHI, ISD::UMUL_LOHI}) { 1586 for (MVT VT : MVT::integer_valuetypes()) 1587 setOperationAction(IntExpOp, VT, Expand); 1588 } 1589 1590 for (unsigned FPExpOp : 1591 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS, 1592 ISD::FPOW, ISD::FCOPYSIGN}) { 1593 for (MVT VT : MVT::fp_valuetypes()) 1594 setOperationAction(FPExpOp, VT, Expand); 1595 } 1596 1597 // No extending loads from i32. 1598 for (MVT VT : MVT::integer_valuetypes()) { 1599 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 1600 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 1601 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 1602 } 1603 // Turn FP truncstore into trunc + store. 1604 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1605 // Turn FP extload into load/fpextend. 1606 for (MVT VT : MVT::fp_valuetypes()) 1607 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 1608 1609 // Expand BR_CC and SELECT_CC for all integer and fp types. 1610 for (MVT VT : MVT::integer_valuetypes()) { 1611 setOperationAction(ISD::BR_CC, VT, Expand); 1612 setOperationAction(ISD::SELECT_CC, VT, Expand); 1613 } 1614 for (MVT VT : MVT::fp_valuetypes()) { 1615 setOperationAction(ISD::BR_CC, VT, Expand); 1616 setOperationAction(ISD::SELECT_CC, VT, Expand); 1617 } 1618 setOperationAction(ISD::BR_CC, MVT::Other, Expand); 1619 1620 // 1621 // Handling of vector operations. 1622 // 1623 1624 // Set the action for vector operations to "expand", then override it with 1625 // either "custom" or "legal" for specific cases. 1626 static const unsigned VectExpOps[] = { 1627 // Integer arithmetic: 1628 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV, 1629 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO, 1630 ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI, 1631 // Logical/bit: 1632 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR, 1633 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ, 1634 // Floating point arithmetic/math functions: 1635 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV, 1636 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, 1637 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, 1638 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC, 1639 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR, 1640 ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS, 1641 // Misc: 1642 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool, 1643 // Vector: 1644 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR, 1645 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, 1646 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR, 1647 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE, 1648 ISD::SPLAT_VECTOR, 1649 }; 1650 1651 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 1652 for (unsigned VectExpOp : VectExpOps) 1653 setOperationAction(VectExpOp, VT, Expand); 1654 1655 // Expand all extending loads and truncating stores: 1656 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) { 1657 if (TargetVT == VT) 1658 continue; 1659 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand); 1660 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand); 1661 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand); 1662 setTruncStoreAction(VT, TargetVT, Expand); 1663 } 1664 1665 // Normalize all inputs to SELECT to be vectors of i32. 1666 if (VT.getVectorElementType() != MVT::i32) { 1667 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32); 1668 setOperationAction(ISD::SELECT, VT, Promote); 1669 AddPromotedToType(ISD::SELECT, VT, VT32); 1670 } 1671 setOperationAction(ISD::SRA, VT, Custom); 1672 setOperationAction(ISD::SHL, VT, Custom); 1673 setOperationAction(ISD::SRL, VT, Custom); 1674 } 1675 1676 // Extending loads from (native) vectors of i8 into (native) vectors of i16 1677 // are legal. 1678 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal); 1679 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal); 1680 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal); 1681 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal); 1682 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); 1683 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); 1684 1685 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 1686 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 1687 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 1688 1689 // Types natively supported: 1690 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8, 1691 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) { 1692 setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom); 1693 setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom); 1694 setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom); 1695 setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom); 1696 setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom); 1697 setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom); 1698 1699 setOperationAction(ISD::ADD, NativeVT, Legal); 1700 setOperationAction(ISD::SUB, NativeVT, Legal); 1701 setOperationAction(ISD::MUL, NativeVT, Legal); 1702 setOperationAction(ISD::AND, NativeVT, Legal); 1703 setOperationAction(ISD::OR, NativeVT, Legal); 1704 setOperationAction(ISD::XOR, NativeVT, Legal); 1705 1706 if (NativeVT.getVectorElementType() != MVT::i1) 1707 setOperationAction(ISD::SPLAT_VECTOR, NativeVT, Legal); 1708 } 1709 1710 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) { 1711 setOperationAction(ISD::SMIN, VT, Legal); 1712 setOperationAction(ISD::SMAX, VT, Legal); 1713 setOperationAction(ISD::UMIN, VT, Legal); 1714 setOperationAction(ISD::UMAX, VT, Legal); 1715 } 1716 1717 // Custom lower unaligned loads. 1718 // Also, for both loads and stores, verify the alignment of the address 1719 // in case it is a compile-time constant. This is a usability feature to 1720 // provide a meaningful error message to users. 1721 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8, 1722 MVT::v2i16, MVT::v4i16, MVT::v2i32}) { 1723 setOperationAction(ISD::LOAD, VT, Custom); 1724 setOperationAction(ISD::STORE, VT, Custom); 1725 } 1726 1727 // Custom-lower load/stores of boolean vectors. 1728 for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) { 1729 setOperationAction(ISD::LOAD, VT, Custom); 1730 setOperationAction(ISD::STORE, VT, Custom); 1731 } 1732 1733 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16, 1734 MVT::v2i32}) { 1735 setCondCodeAction(ISD::SETNE, VT, Expand); 1736 setCondCodeAction(ISD::SETLE, VT, Expand); 1737 setCondCodeAction(ISD::SETGE, VT, Expand); 1738 setCondCodeAction(ISD::SETLT, VT, Expand); 1739 setCondCodeAction(ISD::SETULE, VT, Expand); 1740 setCondCodeAction(ISD::SETUGE, VT, Expand); 1741 setCondCodeAction(ISD::SETULT, VT, Expand); 1742 } 1743 1744 // Custom-lower bitcasts from i8 to v8i1. 1745 setOperationAction(ISD::BITCAST, MVT::i8, Custom); 1746 setOperationAction(ISD::SETCC, MVT::v2i16, Custom); 1747 setOperationAction(ISD::VSELECT, MVT::v4i8, Custom); 1748 setOperationAction(ISD::VSELECT, MVT::v2i16, Custom); 1749 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom); 1750 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 1751 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 1752 1753 // V5+. 1754 setOperationAction(ISD::FMA, MVT::f64, Expand); 1755 setOperationAction(ISD::FADD, MVT::f64, Expand); 1756 setOperationAction(ISD::FSUB, MVT::f64, Expand); 1757 setOperationAction(ISD::FMUL, MVT::f64, Expand); 1758 1759 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 1760 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 1761 1762 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); 1763 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); 1764 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 1765 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); 1766 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); 1767 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 1768 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 1769 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); 1770 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 1771 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 1772 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); 1773 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 1774 1775 // Special handling for half-precision floating point conversions. 1776 // Lower half float conversions into library calls. 1777 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 1778 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 1779 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 1780 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 1781 1782 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 1783 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 1784 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 1785 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 1786 1787 // Handling of indexed loads/stores: default is "expand". 1788 // 1789 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64, 1790 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) { 1791 setIndexedLoadAction(ISD::POST_INC, VT, Legal); 1792 setIndexedStoreAction(ISD::POST_INC, VT, Legal); 1793 } 1794 1795 // Subtarget-specific operation actions. 1796 // 1797 if (Subtarget.hasV60Ops()) { 1798 setOperationAction(ISD::ROTL, MVT::i32, Legal); 1799 setOperationAction(ISD::ROTL, MVT::i64, Legal); 1800 setOperationAction(ISD::ROTR, MVT::i32, Legal); 1801 setOperationAction(ISD::ROTR, MVT::i64, Legal); 1802 } 1803 if (Subtarget.hasV66Ops()) { 1804 setOperationAction(ISD::FADD, MVT::f64, Legal); 1805 setOperationAction(ISD::FSUB, MVT::f64, Legal); 1806 } 1807 if (Subtarget.hasV67Ops()) { 1808 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1809 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1810 setOperationAction(ISD::FMUL, MVT::f64, Legal); 1811 } 1812 1813 setTargetDAGCombine(ISD::VSELECT); 1814 1815 if (Subtarget.useHVXOps()) 1816 initializeHVXLowering(); 1817 1818 computeRegisterProperties(&HRI); 1819 1820 // 1821 // Library calls for unsupported operations 1822 // 1823 bool FastMath = EnableFastMath; 1824 1825 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3"); 1826 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3"); 1827 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3"); 1828 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3"); 1829 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); 1830 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3"); 1831 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3"); 1832 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3"); 1833 1834 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf"); 1835 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf"); 1836 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti"); 1837 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti"); 1838 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti"); 1839 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti"); 1840 1841 // This is the only fast library function for sqrtd. 1842 if (FastMath) 1843 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2"); 1844 1845 // Prefix is: nothing for "slow-math", 1846 // "fast2_" for V5+ fast-math double-precision 1847 // (actually, keep fast-math and fast-math2 separate for now) 1848 if (FastMath) { 1849 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3"); 1850 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3"); 1851 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3"); 1852 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3"); 1853 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3"); 1854 } else { 1855 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); 1856 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); 1857 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); 1858 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3"); 1859 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3"); 1860 } 1861 1862 if (FastMath) 1863 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf"); 1864 else 1865 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf"); 1866 1867 // Routines to handle fp16 storage type. 1868 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 1869 setLibcallName(RTLIB::FPROUND_F64_F16, "__truncdfhf2"); 1870 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 1871 1872 // These cause problems when the shift amount is non-constant. 1873 setLibcallName(RTLIB::SHL_I128, nullptr); 1874 setLibcallName(RTLIB::SRL_I128, nullptr); 1875 setLibcallName(RTLIB::SRA_I128, nullptr); 1876 } 1877 1878 const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { 1879 switch ((HexagonISD::NodeType)Opcode) { 1880 case HexagonISD::ADDC: return "HexagonISD::ADDC"; 1881 case HexagonISD::SUBC: return "HexagonISD::SUBC"; 1882 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA"; 1883 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT"; 1884 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL"; 1885 case HexagonISD::BARRIER: return "HexagonISD::BARRIER"; 1886 case HexagonISD::CALL: return "HexagonISD::CALL"; 1887 case HexagonISD::CALLnr: return "HexagonISD::CALLnr"; 1888 case HexagonISD::CALLR: return "HexagonISD::CALLR"; 1889 case HexagonISD::COMBINE: return "HexagonISD::COMBINE"; 1890 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP"; 1891 case HexagonISD::CONST32: return "HexagonISD::CONST32"; 1892 case HexagonISD::CP: return "HexagonISD::CP"; 1893 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH"; 1894 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN"; 1895 case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT"; 1896 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU"; 1897 case HexagonISD::INSERT: return "HexagonISD::INSERT"; 1898 case HexagonISD::JT: return "HexagonISD::JT"; 1899 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG"; 1900 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; 1901 case HexagonISD::VASL: return "HexagonISD::VASL"; 1902 case HexagonISD::VASR: return "HexagonISD::VASR"; 1903 case HexagonISD::VLSR: return "HexagonISD::VLSR"; 1904 case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW"; 1905 case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0"; 1906 case HexagonISD::VROR: return "HexagonISD::VROR"; 1907 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE"; 1908 case HexagonISD::PTRUE: return "HexagonISD::PTRUE"; 1909 case HexagonISD::PFALSE: return "HexagonISD::PFALSE"; 1910 case HexagonISD::D2P: return "HexagonISD::D2P"; 1911 case HexagonISD::P2D: return "HexagonISD::P2D"; 1912 case HexagonISD::V2Q: return "HexagonISD::V2Q"; 1913 case HexagonISD::Q2V: return "HexagonISD::Q2V"; 1914 case HexagonISD::QCAT: return "HexagonISD::QCAT"; 1915 case HexagonISD::QTRUE: return "HexagonISD::QTRUE"; 1916 case HexagonISD::QFALSE: return "HexagonISD::QFALSE"; 1917 case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST"; 1918 case HexagonISD::VALIGN: return "HexagonISD::VALIGN"; 1919 case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR"; 1920 case HexagonISD::VPACKL: return "HexagonISD::VPACKL"; 1921 case HexagonISD::VUNPACK: return "HexagonISD::VUNPACK"; 1922 case HexagonISD::VUNPACKU: return "HexagonISD::VUNPACKU"; 1923 case HexagonISD::ISEL: return "HexagonISD::ISEL"; 1924 case HexagonISD::OP_END: break; 1925 } 1926 return nullptr; 1927 } 1928 1929 bool 1930 HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign, 1931 const SDLoc &dl, SelectionDAG &DAG) const { 1932 auto *CA = dyn_cast<ConstantSDNode>(Ptr); 1933 if (!CA) 1934 return true; 1935 unsigned Addr = CA->getZExtValue(); 1936 Align HaveAlign = 1937 Addr != 0 ? Align(1ull << countTrailingZeros(Addr)) : NeedAlign; 1938 if (HaveAlign >= NeedAlign) 1939 return true; 1940 1941 static int DK_MisalignedTrap = llvm::getNextAvailablePluginDiagnosticKind(); 1942 1943 struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo { 1944 DiagnosticInfoMisalignedTrap(StringRef M) 1945 : DiagnosticInfo(DK_MisalignedTrap, DS_Remark), Msg(M) {} 1946 void print(DiagnosticPrinter &DP) const override { 1947 DP << Msg; 1948 } 1949 static bool classof(const DiagnosticInfo *DI) { 1950 return DI->getKind() == DK_MisalignedTrap; 1951 } 1952 StringRef Msg; 1953 }; 1954 1955 std::string ErrMsg; 1956 raw_string_ostream O(ErrMsg); 1957 O << "Misaligned constant address: " << format_hex(Addr, 10) 1958 << " has alignment " << HaveAlign.value() 1959 << ", but the memory access requires " << NeedAlign.value(); 1960 if (DebugLoc DL = dl.getDebugLoc()) 1961 DL.print(O << ", at "); 1962 O << ". The instruction has been replaced with a trap."; 1963 1964 DAG.getContext()->diagnose(DiagnosticInfoMisalignedTrap(O.str())); 1965 return false; 1966 } 1967 1968 SDValue 1969 HexagonTargetLowering::replaceMemWithUndef(SDValue Op, SelectionDAG &DAG) 1970 const { 1971 const SDLoc &dl(Op); 1972 auto *LS = cast<LSBaseSDNode>(Op.getNode()); 1973 assert(!LS->isIndexed() && "Not expecting indexed ops on constant address"); 1974 1975 SDValue Chain = LS->getChain(); 1976 SDValue Trap = DAG.getNode(ISD::TRAP, dl, MVT::Other, Chain); 1977 if (LS->getOpcode() == ISD::LOAD) 1978 return DAG.getMergeValues({DAG.getUNDEF(ty(Op)), Trap}, dl); 1979 return Trap; 1980 } 1981 1982 // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load 1983 // intrinsic. 1984 static bool isBrevLdIntrinsic(const Value *Inst) { 1985 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID(); 1986 return (ID == Intrinsic::hexagon_L2_loadrd_pbr || 1987 ID == Intrinsic::hexagon_L2_loadri_pbr || 1988 ID == Intrinsic::hexagon_L2_loadrh_pbr || 1989 ID == Intrinsic::hexagon_L2_loadruh_pbr || 1990 ID == Intrinsic::hexagon_L2_loadrb_pbr || 1991 ID == Intrinsic::hexagon_L2_loadrub_pbr); 1992 } 1993 1994 // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous 1995 // instruction. So far we only handle bitcast, extract value and bit reverse 1996 // load intrinsic instructions. Should we handle CGEP ? 1997 static Value *getBrevLdObject(Value *V) { 1998 if (Operator::getOpcode(V) == Instruction::ExtractValue || 1999 Operator::getOpcode(V) == Instruction::BitCast) 2000 V = cast<Operator>(V)->getOperand(0); 2001 else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V)) 2002 V = cast<Instruction>(V)->getOperand(0); 2003 return V; 2004 } 2005 2006 // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or 2007 // a back edge. If the back edge comes from the intrinsic itself, the incoming 2008 // edge is returned. 2009 static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) { 2010 const BasicBlock *Parent = PN->getParent(); 2011 int Idx = -1; 2012 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) { 2013 BasicBlock *Blk = PN->getIncomingBlock(i); 2014 // Determine if the back edge is originated from intrinsic. 2015 if (Blk == Parent) { 2016 Value *BackEdgeVal = PN->getIncomingValue(i); 2017 Value *BaseVal; 2018 // Loop over till we return the same Value or we hit the IntrBaseVal. 2019 do { 2020 BaseVal = BackEdgeVal; 2021 BackEdgeVal = getBrevLdObject(BackEdgeVal); 2022 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal)); 2023 // If the getBrevLdObject returns IntrBaseVal, we should return the 2024 // incoming edge. 2025 if (IntrBaseVal == BackEdgeVal) 2026 continue; 2027 Idx = i; 2028 break; 2029 } else // Set the node to incoming edge. 2030 Idx = i; 2031 } 2032 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI"); 2033 return PN->getIncomingValue(Idx); 2034 } 2035 2036 // Bit-reverse Load Intrinsic: Figure out the underlying object the base 2037 // pointer points to, for the bit-reverse load intrinsic. Setting this to 2038 // memoperand might help alias analysis to figure out the dependencies. 2039 static Value *getUnderLyingObjectForBrevLdIntr(Value *V) { 2040 Value *IntrBaseVal = V; 2041 Value *BaseVal; 2042 // Loop over till we return the same Value, implies we either figure out 2043 // the object or we hit a PHI 2044 do { 2045 BaseVal = V; 2046 V = getBrevLdObject(V); 2047 } while (BaseVal != V); 2048 2049 // Identify the object from PHINode. 2050 if (const PHINode *PN = dyn_cast<PHINode>(V)) 2051 return returnEdge(PN, IntrBaseVal); 2052 // For non PHI nodes, the object is the last value returned by getBrevLdObject 2053 else 2054 return V; 2055 } 2056 2057 /// Given an intrinsic, checks if on the target the intrinsic will need to map 2058 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 2059 /// true and store the intrinsic information into the IntrinsicInfo that was 2060 /// passed to the function. 2061 bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 2062 const CallInst &I, 2063 MachineFunction &MF, 2064 unsigned Intrinsic) const { 2065 switch (Intrinsic) { 2066 case Intrinsic::hexagon_L2_loadrd_pbr: 2067 case Intrinsic::hexagon_L2_loadri_pbr: 2068 case Intrinsic::hexagon_L2_loadrh_pbr: 2069 case Intrinsic::hexagon_L2_loadruh_pbr: 2070 case Intrinsic::hexagon_L2_loadrb_pbr: 2071 case Intrinsic::hexagon_L2_loadrub_pbr: { 2072 Info.opc = ISD::INTRINSIC_W_CHAIN; 2073 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 2074 auto &Cont = I.getCalledFunction()->getParent()->getContext(); 2075 // The intrinsic function call is of the form { ElTy, i8* } 2076 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type 2077 // should be derived from ElTy. 2078 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0); 2079 Info.memVT = MVT::getVT(ElTy); 2080 llvm::Value *BasePtrVal = I.getOperand(0); 2081 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal); 2082 // The offset value comes through Modifier register. For now, assume the 2083 // offset is 0. 2084 Info.offset = 0; 2085 Info.align = DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont)); 2086 Info.flags = MachineMemOperand::MOLoad; 2087 return true; 2088 } 2089 case Intrinsic::hexagon_V6_vgathermw: 2090 case Intrinsic::hexagon_V6_vgathermw_128B: 2091 case Intrinsic::hexagon_V6_vgathermh: 2092 case Intrinsic::hexagon_V6_vgathermh_128B: 2093 case Intrinsic::hexagon_V6_vgathermhw: 2094 case Intrinsic::hexagon_V6_vgathermhw_128B: 2095 case Intrinsic::hexagon_V6_vgathermwq: 2096 case Intrinsic::hexagon_V6_vgathermwq_128B: 2097 case Intrinsic::hexagon_V6_vgathermhq: 2098 case Intrinsic::hexagon_V6_vgathermhq_128B: 2099 case Intrinsic::hexagon_V6_vgathermhwq: 2100 case Intrinsic::hexagon_V6_vgathermhwq_128B: { 2101 const Module &M = *I.getParent()->getParent()->getParent(); 2102 Info.opc = ISD::INTRINSIC_W_CHAIN; 2103 Type *VecTy = I.getArgOperand(1)->getType(); 2104 Info.memVT = MVT::getVT(VecTy); 2105 Info.ptrVal = I.getArgOperand(0); 2106 Info.offset = 0; 2107 Info.align = 2108 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8); 2109 Info.flags = MachineMemOperand::MOLoad | 2110 MachineMemOperand::MOStore | 2111 MachineMemOperand::MOVolatile; 2112 return true; 2113 } 2114 default: 2115 break; 2116 } 2117 return false; 2118 } 2119 2120 bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const { 2121 return X.getValueType().isScalarInteger(); // 'tstbit' 2122 } 2123 2124 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 2125 return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2)); 2126 } 2127 2128 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 2129 if (!VT1.isSimple() || !VT2.isSimple()) 2130 return false; 2131 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32; 2132 } 2133 2134 bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd( 2135 const MachineFunction &MF, EVT VT) const { 2136 return isOperationLegalOrCustom(ISD::FMA, VT); 2137 } 2138 2139 // Should we expand the build vector with shuffles? 2140 bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT, 2141 unsigned DefinedValues) const { 2142 return false; 2143 } 2144 2145 bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, 2146 EVT VT) const { 2147 return true; 2148 } 2149 2150 TargetLoweringBase::LegalizeTypeAction 2151 HexagonTargetLowering::getPreferredVectorAction(MVT VT) const { 2152 unsigned VecLen = VT.getVectorMinNumElements(); 2153 MVT ElemTy = VT.getVectorElementType(); 2154 2155 if (VecLen == 1 || VT.isScalableVector()) 2156 return TargetLoweringBase::TypeScalarizeVector; 2157 2158 if (Subtarget.useHVXOps()) { 2159 unsigned Action = getPreferredHvxVectorAction(VT); 2160 if (Action != ~0u) 2161 return static_cast<TargetLoweringBase::LegalizeTypeAction>(Action); 2162 } 2163 2164 // Always widen (remaining) vectors of i1. 2165 if (ElemTy == MVT::i1) 2166 return TargetLoweringBase::TypeWidenVector; 2167 2168 return TargetLoweringBase::TypeSplitVector; 2169 } 2170 2171 std::pair<SDValue, int> 2172 HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const { 2173 if (Addr.getOpcode() == ISD::ADD) { 2174 SDValue Op1 = Addr.getOperand(1); 2175 if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode())) 2176 return { Addr.getOperand(0), CN->getSExtValue() }; 2177 } 2178 return { Addr, 0 }; 2179 } 2180 2181 // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors 2182 // to select data from, V3 is the permutation. 2183 SDValue 2184 HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) 2185 const { 2186 const auto *SVN = cast<ShuffleVectorSDNode>(Op); 2187 ArrayRef<int> AM = SVN->getMask(); 2188 assert(AM.size() <= 8 && "Unexpected shuffle mask"); 2189 unsigned VecLen = AM.size(); 2190 2191 MVT VecTy = ty(Op); 2192 assert(!Subtarget.isHVXVectorType(VecTy, true) && 2193 "HVX shuffles should be legal"); 2194 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length"); 2195 2196 SDValue Op0 = Op.getOperand(0); 2197 SDValue Op1 = Op.getOperand(1); 2198 const SDLoc &dl(Op); 2199 2200 // If the inputs are not the same as the output, bail. This is not an 2201 // error situation, but complicates the handling and the default expansion 2202 // (into BUILD_VECTOR) should be adequate. 2203 if (ty(Op0) != VecTy || ty(Op1) != VecTy) 2204 return SDValue(); 2205 2206 // Normalize the mask so that the first non-negative index comes from 2207 // the first operand. 2208 SmallVector<int,8> Mask(AM.begin(), AM.end()); 2209 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data(); 2210 if (F == AM.size()) 2211 return DAG.getUNDEF(VecTy); 2212 if (AM[F] >= int(VecLen)) { 2213 ShuffleVectorSDNode::commuteMask(Mask); 2214 std::swap(Op0, Op1); 2215 } 2216 2217 // Express the shuffle mask in terms of bytes. 2218 SmallVector<int,8> ByteMask; 2219 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8; 2220 for (int M : Mask) { 2221 if (M < 0) { 2222 for (unsigned j = 0; j != ElemBytes; ++j) 2223 ByteMask.push_back(-1); 2224 } else { 2225 for (unsigned j = 0; j != ElemBytes; ++j) 2226 ByteMask.push_back(M*ElemBytes + j); 2227 } 2228 } 2229 assert(ByteMask.size() <= 8); 2230 2231 // All non-undef (non-negative) indexes are well within [0..127], so they 2232 // fit in a single byte. Build two 64-bit words: 2233 // - MaskIdx where each byte is the corresponding index (for non-negative 2234 // indexes), and 0xFF for negative indexes, and 2235 // - MaskUnd that has 0xFF for each negative index. 2236 uint64_t MaskIdx = 0; 2237 uint64_t MaskUnd = 0; 2238 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) { 2239 unsigned S = 8*i; 2240 uint64_t M = ByteMask[i] & 0xFF; 2241 if (M == 0xFF) 2242 MaskUnd |= M << S; 2243 MaskIdx |= M << S; 2244 } 2245 2246 if (ByteMask.size() == 4) { 2247 // Identity. 2248 if (MaskIdx == (0x03020100 | MaskUnd)) 2249 return Op0; 2250 // Byte swap. 2251 if (MaskIdx == (0x00010203 | MaskUnd)) { 2252 SDValue T0 = DAG.getBitcast(MVT::i32, Op0); 2253 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0); 2254 return DAG.getBitcast(VecTy, T1); 2255 } 2256 2257 // Byte packs. 2258 SDValue Concat10 = DAG.getNode(HexagonISD::COMBINE, dl, 2259 typeJoin({ty(Op1), ty(Op0)}), {Op1, Op0}); 2260 if (MaskIdx == (0x06040200 | MaskUnd)) 2261 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG); 2262 if (MaskIdx == (0x07050301 | MaskUnd)) 2263 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG); 2264 2265 SDValue Concat01 = DAG.getNode(HexagonISD::COMBINE, dl, 2266 typeJoin({ty(Op0), ty(Op1)}), {Op0, Op1}); 2267 if (MaskIdx == (0x02000604 | MaskUnd)) 2268 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG); 2269 if (MaskIdx == (0x03010705 | MaskUnd)) 2270 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG); 2271 } 2272 2273 if (ByteMask.size() == 8) { 2274 // Identity. 2275 if (MaskIdx == (0x0706050403020100ull | MaskUnd)) 2276 return Op0; 2277 // Byte swap. 2278 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) { 2279 SDValue T0 = DAG.getBitcast(MVT::i64, Op0); 2280 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0); 2281 return DAG.getBitcast(VecTy, T1); 2282 } 2283 2284 // Halfword picks. 2285 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd)) 2286 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG); 2287 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd)) 2288 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG); 2289 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd)) 2290 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG); 2291 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd)) 2292 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG); 2293 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) { 2294 VectorPair P = opSplit(Op0, dl, DAG); 2295 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG); 2296 } 2297 2298 // Byte packs. 2299 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd)) 2300 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG); 2301 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd)) 2302 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG); 2303 } 2304 2305 return SDValue(); 2306 } 2307 2308 // Create a Hexagon-specific node for shifting a vector by an integer. 2309 SDValue 2310 HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) 2311 const { 2312 unsigned NewOpc; 2313 switch (Op.getOpcode()) { 2314 case ISD::SHL: 2315 NewOpc = HexagonISD::VASL; 2316 break; 2317 case ISD::SRA: 2318 NewOpc = HexagonISD::VASR; 2319 break; 2320 case ISD::SRL: 2321 NewOpc = HexagonISD::VLSR; 2322 break; 2323 default: 2324 llvm_unreachable("Unexpected shift opcode"); 2325 } 2326 2327 SDValue Op0 = Op.getOperand(0); 2328 SDValue Op1 = Op.getOperand(1); 2329 const SDLoc &dl(Op); 2330 2331 switch (Op1.getOpcode()) { 2332 case ISD::BUILD_VECTOR: 2333 if (SDValue S = cast<BuildVectorSDNode>(Op1)->getSplatValue()) 2334 return DAG.getNode(NewOpc, dl, ty(Op), Op0, S); 2335 break; 2336 case ISD::SPLAT_VECTOR: 2337 return DAG.getNode(NewOpc, dl, ty(Op), Op0, Op1.getOperand(0)); 2338 } 2339 return SDValue(); 2340 } 2341 2342 SDValue 2343 HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const { 2344 return getVectorShiftByInt(Op, DAG); 2345 } 2346 2347 SDValue 2348 HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { 2349 if (isa<ConstantSDNode>(Op.getOperand(1).getNode())) 2350 return Op; 2351 return SDValue(); 2352 } 2353 2354 SDValue 2355 HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 2356 MVT ResTy = ty(Op); 2357 SDValue InpV = Op.getOperand(0); 2358 MVT InpTy = ty(InpV); 2359 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits()); 2360 const SDLoc &dl(Op); 2361 2362 // Handle conversion from i8 to v8i1. 2363 if (InpTy == MVT::i8) { 2364 if (ResTy == MVT::v8i1) { 2365 SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV); 2366 SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32); 2367 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG); 2368 } 2369 return SDValue(); 2370 } 2371 2372 return Op; 2373 } 2374 2375 bool 2376 HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values, 2377 MVT VecTy, SelectionDAG &DAG, 2378 MutableArrayRef<ConstantInt*> Consts) const { 2379 MVT ElemTy = VecTy.getVectorElementType(); 2380 unsigned ElemWidth = ElemTy.getSizeInBits(); 2381 IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth); 2382 bool AllConst = true; 2383 2384 for (unsigned i = 0, e = Values.size(); i != e; ++i) { 2385 SDValue V = Values[i]; 2386 if (V.isUndef()) { 2387 Consts[i] = ConstantInt::get(IntTy, 0); 2388 continue; 2389 } 2390 // Make sure to always cast to IntTy. 2391 if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) { 2392 const ConstantInt *CI = CN->getConstantIntValue(); 2393 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue()); 2394 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) { 2395 const ConstantFP *CF = CN->getConstantFPValue(); 2396 APInt A = CF->getValueAPF().bitcastToAPInt(); 2397 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue()); 2398 } else { 2399 AllConst = false; 2400 } 2401 } 2402 return AllConst; 2403 } 2404 2405 SDValue 2406 HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, 2407 MVT VecTy, SelectionDAG &DAG) const { 2408 MVT ElemTy = VecTy.getVectorElementType(); 2409 assert(VecTy.getVectorNumElements() == Elem.size()); 2410 2411 SmallVector<ConstantInt*,4> Consts(Elem.size()); 2412 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts); 2413 2414 unsigned First, Num = Elem.size(); 2415 for (First = 0; First != Num; ++First) { 2416 if (!isUndef(Elem[First])) 2417 break; 2418 } 2419 if (First == Num) 2420 return DAG.getUNDEF(VecTy); 2421 2422 if (AllConst && 2423 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); })) 2424 return getZero(dl, VecTy, DAG); 2425 2426 if (ElemTy == MVT::i16) { 2427 assert(Elem.size() == 2); 2428 if (AllConst) { 2429 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) | 2430 Consts[1]->getZExtValue() << 16; 2431 return DAG.getBitcast(MVT::v2i16, DAG.getConstant(V, dl, MVT::i32)); 2432 } 2433 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, 2434 {Elem[1], Elem[0]}, DAG); 2435 return DAG.getBitcast(MVT::v2i16, N); 2436 } 2437 2438 if (ElemTy == MVT::i8) { 2439 // First try generating a constant. 2440 if (AllConst) { 2441 int32_t V = (Consts[0]->getZExtValue() & 0xFF) | 2442 (Consts[1]->getZExtValue() & 0xFF) << 8 | 2443 (Consts[2]->getZExtValue() & 0xFF) << 16 | 2444 Consts[3]->getZExtValue() << 24; 2445 return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32)); 2446 } 2447 2448 // Then try splat. 2449 bool IsSplat = true; 2450 for (unsigned i = First+1; i != Num; ++i) { 2451 if (Elem[i] == Elem[First] || isUndef(Elem[i])) 2452 continue; 2453 IsSplat = false; 2454 break; 2455 } 2456 if (IsSplat) { 2457 // Legalize the operand of SPLAT_VECTOR. 2458 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32); 2459 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext); 2460 } 2461 2462 // Generate 2463 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) | 2464 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16 2465 assert(Elem.size() == 4); 2466 SDValue Vs[4]; 2467 for (unsigned i = 0; i != 4; ++i) { 2468 Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32); 2469 Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8); 2470 } 2471 SDValue S8 = DAG.getConstant(8, dl, MVT::i32); 2472 SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8}); 2473 SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8}); 2474 SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0}); 2475 SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1}); 2476 2477 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG); 2478 return DAG.getBitcast(MVT::v4i8, R); 2479 } 2480 2481 #ifndef NDEBUG 2482 dbgs() << "VecTy: " << EVT(VecTy).getEVTString() << '\n'; 2483 #endif 2484 llvm_unreachable("Unexpected vector element type"); 2485 } 2486 2487 SDValue 2488 HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, 2489 MVT VecTy, SelectionDAG &DAG) const { 2490 MVT ElemTy = VecTy.getVectorElementType(); 2491 assert(VecTy.getVectorNumElements() == Elem.size()); 2492 2493 SmallVector<ConstantInt*,8> Consts(Elem.size()); 2494 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts); 2495 2496 unsigned First, Num = Elem.size(); 2497 for (First = 0; First != Num; ++First) { 2498 if (!isUndef(Elem[First])) 2499 break; 2500 } 2501 if (First == Num) 2502 return DAG.getUNDEF(VecTy); 2503 2504 if (AllConst && 2505 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); })) 2506 return getZero(dl, VecTy, DAG); 2507 2508 // First try splat if possible. 2509 if (ElemTy == MVT::i16) { 2510 bool IsSplat = true; 2511 for (unsigned i = First+1; i != Num; ++i) { 2512 if (Elem[i] == Elem[First] || isUndef(Elem[i])) 2513 continue; 2514 IsSplat = false; 2515 break; 2516 } 2517 if (IsSplat) { 2518 // Legalize the operand of SPLAT_VECTOR 2519 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32); 2520 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext); 2521 } 2522 } 2523 2524 // Then try constant. 2525 if (AllConst) { 2526 uint64_t Val = 0; 2527 unsigned W = ElemTy.getSizeInBits(); 2528 uint64_t Mask = (ElemTy == MVT::i8) ? 0xFFull 2529 : (ElemTy == MVT::i16) ? 0xFFFFull : 0xFFFFFFFFull; 2530 for (unsigned i = 0; i != Num; ++i) 2531 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask); 2532 SDValue V0 = DAG.getConstant(Val, dl, MVT::i64); 2533 return DAG.getBitcast(VecTy, V0); 2534 } 2535 2536 // Build two 32-bit vectors and concatenate. 2537 MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2); 2538 SDValue L = (ElemTy == MVT::i32) 2539 ? Elem[0] 2540 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG); 2541 SDValue H = (ElemTy == MVT::i32) 2542 ? Elem[1] 2543 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG); 2544 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, {H, L}); 2545 } 2546 2547 SDValue 2548 HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV, 2549 const SDLoc &dl, MVT ValTy, MVT ResTy, 2550 SelectionDAG &DAG) const { 2551 MVT VecTy = ty(VecV); 2552 assert(!ValTy.isVector() || 2553 VecTy.getVectorElementType() == ValTy.getVectorElementType()); 2554 unsigned VecWidth = VecTy.getSizeInBits(); 2555 unsigned ValWidth = ValTy.getSizeInBits(); 2556 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits(); 2557 assert((VecWidth % ElemWidth) == 0); 2558 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV); 2559 2560 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon 2561 // without any coprocessors). 2562 if (ElemWidth == 1) { 2563 assert(VecWidth == VecTy.getVectorNumElements() && 2564 "Vector elements should equal vector width size"); 2565 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2); 2566 // Check if this is an extract of the lowest bit. 2567 if (IdxN) { 2568 // Extracting the lowest bit is a no-op, but it changes the type, 2569 // so it must be kept as an operation to avoid errors related to 2570 // type mismatches. 2571 if (IdxN->isZero() && ValTy.getSizeInBits() == 1) 2572 return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV); 2573 } 2574 2575 // If the value extracted is a single bit, use tstbit. 2576 if (ValWidth == 1) { 2577 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG); 2578 SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32); 2579 SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0); 2580 return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0); 2581 } 2582 2583 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in 2584 // a predicate register. The elements of the vector are repeated 2585 // in the register (if necessary) so that the total number is 8. 2586 // The extracted subvector will need to be expanded in such a way. 2587 unsigned Scale = VecWidth / ValWidth; 2588 2589 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to 2590 // position 0. 2591 assert(ty(IdxV) == MVT::i32); 2592 unsigned VecRep = 8 / VecWidth; 2593 SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 2594 DAG.getConstant(8*VecRep, dl, MVT::i32)); 2595 SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV); 2596 SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0); 2597 while (Scale > 1) { 2598 // The longest possible subvector is at most 32 bits, so it is always 2599 // contained in the low subregister. 2600 T1 = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, T1); 2601 T1 = expandPredicate(T1, dl, DAG); 2602 Scale /= 2; 2603 } 2604 2605 return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1); 2606 } 2607 2608 assert(VecWidth == 32 || VecWidth == 64); 2609 2610 // Cast everything to scalar integer types. 2611 MVT ScalarTy = tyScalar(VecTy); 2612 VecV = DAG.getBitcast(ScalarTy, VecV); 2613 2614 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32); 2615 SDValue ExtV; 2616 2617 if (IdxN) { 2618 unsigned Off = IdxN->getZExtValue() * ElemWidth; 2619 if (VecWidth == 64 && ValWidth == 32) { 2620 assert(Off == 0 || Off == 32); 2621 unsigned SubIdx = Off == 0 ? Hexagon::isub_lo : Hexagon::isub_hi; 2622 ExtV = DAG.getTargetExtractSubreg(SubIdx, dl, MVT::i32, VecV); 2623 } else if (Off == 0 && (ValWidth % 8) == 0) { 2624 ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy)); 2625 } else { 2626 SDValue OffV = DAG.getConstant(Off, dl, MVT::i32); 2627 // The return type of EXTRACTU must be the same as the type of the 2628 // input vector. 2629 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy, 2630 {VecV, WidthV, OffV}); 2631 } 2632 } else { 2633 if (ty(IdxV) != MVT::i32) 2634 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32); 2635 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 2636 DAG.getConstant(ElemWidth, dl, MVT::i32)); 2637 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy, 2638 {VecV, WidthV, OffV}); 2639 } 2640 2641 // Cast ExtV to the requested result type. 2642 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy)); 2643 ExtV = DAG.getBitcast(ResTy, ExtV); 2644 return ExtV; 2645 } 2646 2647 SDValue 2648 HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV, 2649 const SDLoc &dl, MVT ValTy, 2650 SelectionDAG &DAG) const { 2651 MVT VecTy = ty(VecV); 2652 if (VecTy.getVectorElementType() == MVT::i1) { 2653 MVT ValTy = ty(ValV); 2654 assert(ValTy.getVectorElementType() == MVT::i1); 2655 SDValue ValR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV); 2656 unsigned VecLen = VecTy.getVectorNumElements(); 2657 unsigned Scale = VecLen / ValTy.getVectorNumElements(); 2658 assert(Scale > 1); 2659 2660 for (unsigned R = Scale; R > 1; R /= 2) { 2661 ValR = contractPredicate(ValR, dl, DAG); 2662 ValR = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64, 2663 DAG.getUNDEF(MVT::i32), ValR); 2664 } 2665 // The longest possible subvector is at most 32 bits, so it is always 2666 // contained in the low subregister. 2667 ValR = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, ValR); 2668 2669 unsigned ValBytes = 64 / Scale; 2670 SDValue Width = DAG.getConstant(ValBytes*8, dl, MVT::i32); 2671 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 2672 DAG.getConstant(8, dl, MVT::i32)); 2673 SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV); 2674 SDValue Ins = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, 2675 {VecR, ValR, Width, Idx}); 2676 return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins); 2677 } 2678 2679 unsigned VecWidth = VecTy.getSizeInBits(); 2680 unsigned ValWidth = ValTy.getSizeInBits(); 2681 assert(VecWidth == 32 || VecWidth == 64); 2682 assert((VecWidth % ValWidth) == 0); 2683 2684 // Cast everything to scalar integer types. 2685 MVT ScalarTy = MVT::getIntegerVT(VecWidth); 2686 // The actual type of ValV may be different than ValTy (which is related 2687 // to the vector type). 2688 unsigned VW = ty(ValV).getSizeInBits(); 2689 ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV); 2690 VecV = DAG.getBitcast(ScalarTy, VecV); 2691 if (VW != VecWidth) 2692 ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy); 2693 2694 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32); 2695 SDValue InsV; 2696 2697 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) { 2698 unsigned W = C->getZExtValue() * ValWidth; 2699 SDValue OffV = DAG.getConstant(W, dl, MVT::i32); 2700 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy, 2701 {VecV, ValV, WidthV, OffV}); 2702 } else { 2703 if (ty(IdxV) != MVT::i32) 2704 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32); 2705 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV); 2706 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy, 2707 {VecV, ValV, WidthV, OffV}); 2708 } 2709 2710 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV); 2711 } 2712 2713 SDValue 2714 HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl, 2715 SelectionDAG &DAG) const { 2716 assert(ty(Vec32).getSizeInBits() == 32); 2717 if (isUndef(Vec32)) 2718 return DAG.getUNDEF(MVT::i64); 2719 return getInstr(Hexagon::S2_vsxtbh, dl, MVT::i64, {Vec32}, DAG); 2720 } 2721 2722 SDValue 2723 HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl, 2724 SelectionDAG &DAG) const { 2725 assert(ty(Vec64).getSizeInBits() == 64); 2726 if (isUndef(Vec64)) 2727 return DAG.getUNDEF(MVT::i32); 2728 return getInstr(Hexagon::S2_vtrunehb, dl, MVT::i32, {Vec64}, DAG); 2729 } 2730 2731 SDValue 2732 HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) 2733 const { 2734 if (Ty.isVector()) { 2735 unsigned W = Ty.getSizeInBits(); 2736 if (W <= 64) 2737 return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W))); 2738 return DAG.getNode(ISD::SPLAT_VECTOR, dl, Ty, getZero(dl, MVT::i32, DAG)); 2739 } 2740 2741 if (Ty.isInteger()) 2742 return DAG.getConstant(0, dl, Ty); 2743 if (Ty.isFloatingPoint()) 2744 return DAG.getConstantFP(0.0, dl, Ty); 2745 llvm_unreachable("Invalid type for zero"); 2746 } 2747 2748 SDValue 2749 HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) 2750 const { 2751 MVT ValTy = ty(Val); 2752 assert(ValTy.getVectorElementType() == ResTy.getVectorElementType()); 2753 2754 unsigned ValLen = ValTy.getVectorNumElements(); 2755 unsigned ResLen = ResTy.getVectorNumElements(); 2756 if (ValLen == ResLen) 2757 return Val; 2758 2759 const SDLoc &dl(Val); 2760 assert(ValLen < ResLen); 2761 assert(ResLen % ValLen == 0); 2762 2763 SmallVector<SDValue, 4> Concats = {Val}; 2764 for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i) 2765 Concats.push_back(DAG.getUNDEF(ValTy)); 2766 2767 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResTy, Concats); 2768 } 2769 2770 SDValue 2771 HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 2772 MVT VecTy = ty(Op); 2773 unsigned BW = VecTy.getSizeInBits(); 2774 const SDLoc &dl(Op); 2775 SmallVector<SDValue,8> Ops; 2776 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) 2777 Ops.push_back(Op.getOperand(i)); 2778 2779 if (BW == 32) 2780 return buildVector32(Ops, dl, VecTy, DAG); 2781 if (BW == 64) 2782 return buildVector64(Ops, dl, VecTy, DAG); 2783 2784 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) { 2785 // Check if this is a special case or all-0 or all-1. 2786 bool All0 = true, All1 = true; 2787 for (SDValue P : Ops) { 2788 auto *CN = dyn_cast<ConstantSDNode>(P.getNode()); 2789 if (CN == nullptr) { 2790 All0 = All1 = false; 2791 break; 2792 } 2793 uint32_t C = CN->getZExtValue(); 2794 All0 &= (C == 0); 2795 All1 &= (C == 1); 2796 } 2797 if (All0) 2798 return DAG.getNode(HexagonISD::PFALSE, dl, VecTy); 2799 if (All1) 2800 return DAG.getNode(HexagonISD::PTRUE, dl, VecTy); 2801 2802 // For each i1 element in the resulting predicate register, put 1 2803 // shifted by the index of the element into a general-purpose register, 2804 // then or them together and transfer it back into a predicate register. 2805 SDValue Rs[8]; 2806 SDValue Z = getZero(dl, MVT::i32, DAG); 2807 // Always produce 8 bits, repeat inputs if necessary. 2808 unsigned Rep = 8 / VecTy.getVectorNumElements(); 2809 for (unsigned i = 0; i != 8; ++i) { 2810 SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32); 2811 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z); 2812 } 2813 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) { 2814 for (unsigned i = 0, e = A.size()/2; i != e; ++i) 2815 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]); 2816 } 2817 // Move the value directly to a predicate register. 2818 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG); 2819 } 2820 2821 return SDValue(); 2822 } 2823 2824 SDValue 2825 HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 2826 SelectionDAG &DAG) const { 2827 MVT VecTy = ty(Op); 2828 const SDLoc &dl(Op); 2829 if (VecTy.getSizeInBits() == 64) { 2830 assert(Op.getNumOperands() == 2); 2831 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, Op.getOperand(1), 2832 Op.getOperand(0)); 2833 } 2834 2835 MVT ElemTy = VecTy.getVectorElementType(); 2836 if (ElemTy == MVT::i1) { 2837 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1); 2838 MVT OpTy = ty(Op.getOperand(0)); 2839 // Scale is how many times the operands need to be contracted to match 2840 // the representation in the target register. 2841 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements(); 2842 assert(Scale == Op.getNumOperands() && Scale > 1); 2843 2844 // First, convert all bool vectors to integers, then generate pairwise 2845 // inserts to form values of doubled length. Up until there are only 2846 // two values left to concatenate, all of these values will fit in a 2847 // 32-bit integer, so keep them as i32 to use 32-bit inserts. 2848 SmallVector<SDValue,4> Words[2]; 2849 unsigned IdxW = 0; 2850 2851 for (SDValue P : Op.getNode()->op_values()) { 2852 SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P); 2853 for (unsigned R = Scale; R > 1; R /= 2) { 2854 W = contractPredicate(W, dl, DAG); 2855 W = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64, 2856 DAG.getUNDEF(MVT::i32), W); 2857 } 2858 W = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, W); 2859 Words[IdxW].push_back(W); 2860 } 2861 2862 while (Scale > 2) { 2863 SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32); 2864 Words[IdxW ^ 1].clear(); 2865 2866 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) { 2867 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1]; 2868 // Insert W1 into W0 right next to the significant bits of W0. 2869 SDValue T = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, 2870 {W0, W1, WidthV, WidthV}); 2871 Words[IdxW ^ 1].push_back(T); 2872 } 2873 IdxW ^= 1; 2874 Scale /= 2; 2875 } 2876 2877 // At this point there should only be two words left, and Scale should be 2. 2878 assert(Scale == 2 && Words[IdxW].size() == 2); 2879 2880 SDValue WW = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64, 2881 Words[IdxW][1], Words[IdxW][0]); 2882 return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW); 2883 } 2884 2885 return SDValue(); 2886 } 2887 2888 SDValue 2889 HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 2890 SelectionDAG &DAG) const { 2891 SDValue Vec = Op.getOperand(0); 2892 MVT ElemTy = ty(Vec).getVectorElementType(); 2893 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG); 2894 } 2895 2896 SDValue 2897 HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 2898 SelectionDAG &DAG) const { 2899 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op), 2900 ty(Op), ty(Op), DAG); 2901 } 2902 2903 SDValue 2904 HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 2905 SelectionDAG &DAG) const { 2906 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2), 2907 SDLoc(Op), ty(Op).getVectorElementType(), DAG); 2908 } 2909 2910 SDValue 2911 HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, 2912 SelectionDAG &DAG) const { 2913 SDValue ValV = Op.getOperand(1); 2914 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2), 2915 SDLoc(Op), ty(ValV), DAG); 2916 } 2917 2918 bool 2919 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 2920 // Assuming the caller does not have either a signext or zeroext modifier, and 2921 // only one value is accepted, any reasonable truncation is allowed. 2922 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 2923 return false; 2924 2925 // FIXME: in principle up to 64-bit could be made safe, but it would be very 2926 // fragile at the moment: any support for multiple value returns would be 2927 // liable to disallow tail calls involving i64 -> iN truncation in many cases. 2928 return Ty1->getPrimitiveSizeInBits() <= 32; 2929 } 2930 2931 SDValue 2932 HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const { 2933 MVT Ty = ty(Op); 2934 const SDLoc &dl(Op); 2935 // Lower loads of scalar predicate vectors (v2i1, v4i1, v8i1) to loads of i1 2936 // followed by a TYPECAST. 2937 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 2938 bool DoCast = (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1); 2939 if (DoCast) { 2940 SDValue NL = DAG.getLoad( 2941 LN->getAddressingMode(), LN->getExtensionType(), MVT::i1, dl, 2942 LN->getChain(), LN->getBasePtr(), LN->getOffset(), LN->getPointerInfo(), 2943 /*MemoryVT*/ MVT::i1, LN->getAlign(), LN->getMemOperand()->getFlags(), 2944 LN->getAAInfo(), LN->getRanges()); 2945 LN = cast<LoadSDNode>(NL.getNode()); 2946 } 2947 2948 Align ClaimAlign = LN->getAlign(); 2949 if (!validateConstPtrAlignment(LN->getBasePtr(), ClaimAlign, dl, DAG)) 2950 return replaceMemWithUndef(Op, DAG); 2951 2952 // Call LowerUnalignedLoad for all loads, it recognizes loads that 2953 // don't need extra aligning. 2954 SDValue LU = LowerUnalignedLoad(SDValue(LN, 0), DAG); 2955 if (DoCast) { 2956 SDValue TC = DAG.getNode(HexagonISD::TYPECAST, dl, Ty, LU); 2957 SDValue Ch = cast<LoadSDNode>(LU.getNode())->getChain(); 2958 return DAG.getMergeValues({TC, Ch}, dl); 2959 } 2960 return LU; 2961 } 2962 2963 SDValue 2964 HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const { 2965 const SDLoc &dl(Op); 2966 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 2967 SDValue Val = SN->getValue(); 2968 MVT Ty = ty(Val); 2969 2970 bool DoCast = (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1); 2971 if (DoCast) { 2972 SDValue TC = DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, Val); 2973 SDValue NS = DAG.getStore(SN->getChain(), dl, TC, SN->getBasePtr(), 2974 SN->getMemOperand()); 2975 if (SN->isIndexed()) { 2976 NS = DAG.getIndexedStore(NS, dl, SN->getBasePtr(), SN->getOffset(), 2977 SN->getAddressingMode()); 2978 } 2979 SN = cast<StoreSDNode>(NS.getNode()); 2980 } 2981 2982 Align ClaimAlign = SN->getAlign(); 2983 if (!validateConstPtrAlignment(SN->getBasePtr(), ClaimAlign, dl, DAG)) 2984 return replaceMemWithUndef(Op, DAG); 2985 2986 MVT StoreTy = SN->getMemoryVT().getSimpleVT(); 2987 Align NeedAlign = Subtarget.getTypeAlignment(StoreTy); 2988 if (ClaimAlign < NeedAlign) 2989 return expandUnalignedStore(SN, DAG); 2990 return SDValue(SN, 0); 2991 } 2992 2993 SDValue 2994 HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) 2995 const { 2996 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 2997 MVT LoadTy = ty(Op); 2998 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy).value(); 2999 unsigned HaveAlign = LN->getAlign().value(); 3000 if (HaveAlign >= NeedAlign) 3001 return Op; 3002 3003 const SDLoc &dl(Op); 3004 const DataLayout &DL = DAG.getDataLayout(); 3005 LLVMContext &Ctx = *DAG.getContext(); 3006 3007 // If the load aligning is disabled or the load can be broken up into two 3008 // smaller legal loads, do the default (target-independent) expansion. 3009 bool DoDefault = false; 3010 // Handle it in the default way if this is an indexed load. 3011 if (!LN->isUnindexed()) 3012 DoDefault = true; 3013 3014 if (!AlignLoads) { 3015 if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(), 3016 *LN->getMemOperand())) 3017 return Op; 3018 DoDefault = true; 3019 } 3020 if (!DoDefault && (2 * HaveAlign) == NeedAlign) { 3021 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)". 3022 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign) 3023 : MVT::getVectorVT(MVT::i8, HaveAlign); 3024 DoDefault = 3025 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand()); 3026 } 3027 if (DoDefault) { 3028 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG); 3029 return DAG.getMergeValues({P.first, P.second}, dl); 3030 } 3031 3032 // The code below generates two loads, both aligned as NeedAlign, and 3033 // with the distance of NeedAlign between them. For that to cover the 3034 // bits that need to be loaded (and without overlapping), the size of 3035 // the loads should be equal to NeedAlign. This is true for all loadable 3036 // types, but add an assertion in case something changes in the future. 3037 assert(LoadTy.getSizeInBits() == 8*NeedAlign); 3038 3039 unsigned LoadLen = NeedAlign; 3040 SDValue Base = LN->getBasePtr(); 3041 SDValue Chain = LN->getChain(); 3042 auto BO = getBaseAndOffset(Base); 3043 unsigned BaseOpc = BO.first.getOpcode(); 3044 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0) 3045 return Op; 3046 3047 if (BO.second % LoadLen != 0) { 3048 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first, 3049 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32)); 3050 BO.second -= BO.second % LoadLen; 3051 } 3052 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR) 3053 ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first, 3054 DAG.getConstant(NeedAlign, dl, MVT::i32)) 3055 : BO.first; 3056 SDValue Base0 = 3057 DAG.getMemBasePlusOffset(BaseNoOff, TypeSize::Fixed(BO.second), dl); 3058 SDValue Base1 = DAG.getMemBasePlusOffset( 3059 BaseNoOff, TypeSize::Fixed(BO.second + LoadLen), dl); 3060 3061 MachineMemOperand *WideMMO = nullptr; 3062 if (MachineMemOperand *MMO = LN->getMemOperand()) { 3063 MachineFunction &MF = DAG.getMachineFunction(); 3064 WideMMO = MF.getMachineMemOperand( 3065 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen), 3066 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), 3067 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 3068 } 3069 3070 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO); 3071 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO); 3072 3073 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy, 3074 {Load1, Load0, BaseNoOff.getOperand(0)}); 3075 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3076 Load0.getValue(1), Load1.getValue(1)); 3077 SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl); 3078 return M; 3079 } 3080 3081 SDValue 3082 HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const { 3083 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 3084 auto *CY = dyn_cast<ConstantSDNode>(Y); 3085 if (!CY) 3086 return SDValue(); 3087 3088 const SDLoc &dl(Op); 3089 SDVTList VTs = Op.getNode()->getVTList(); 3090 assert(VTs.NumVTs == 2); 3091 assert(VTs.VTs[1] == MVT::i1); 3092 unsigned Opc = Op.getOpcode(); 3093 3094 if (CY) { 3095 uint32_t VY = CY->getZExtValue(); 3096 assert(VY != 0 && "This should have been folded"); 3097 // X +/- 1 3098 if (VY != 1) 3099 return SDValue(); 3100 3101 if (Opc == ISD::UADDO) { 3102 SDValue Op = DAG.getNode(ISD::ADD, dl, VTs.VTs[0], {X, Y}); 3103 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, getZero(dl, ty(Op), DAG), 3104 ISD::SETEQ); 3105 return DAG.getMergeValues({Op, Ov}, dl); 3106 } 3107 if (Opc == ISD::USUBO) { 3108 SDValue Op = DAG.getNode(ISD::SUB, dl, VTs.VTs[0], {X, Y}); 3109 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, 3110 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ); 3111 return DAG.getMergeValues({Op, Ov}, dl); 3112 } 3113 } 3114 3115 return SDValue(); 3116 } 3117 3118 SDValue 3119 HexagonTargetLowering::LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const { 3120 const SDLoc &dl(Op); 3121 unsigned Opc = Op.getOpcode(); 3122 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2); 3123 3124 if (Opc == ISD::ADDCARRY) 3125 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(), 3126 { X, Y, C }); 3127 3128 EVT CarryTy = C.getValueType(); 3129 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(), 3130 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) }); 3131 SDValue Out[] = { SubC.getValue(0), 3132 DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) }; 3133 return DAG.getMergeValues(Out, dl); 3134 } 3135 3136 SDValue 3137 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 3138 SDValue Chain = Op.getOperand(0); 3139 SDValue Offset = Op.getOperand(1); 3140 SDValue Handler = Op.getOperand(2); 3141 SDLoc dl(Op); 3142 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3143 3144 // Mark function as containing a call to EH_RETURN. 3145 HexagonMachineFunctionInfo *FuncInfo = 3146 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>(); 3147 FuncInfo->setHasEHReturn(); 3148 3149 unsigned OffsetReg = Hexagon::R28; 3150 3151 SDValue StoreAddr = 3152 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT), 3153 DAG.getIntPtrConstant(4, dl)); 3154 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo()); 3155 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset); 3156 3157 // Not needed we already use it as explict input to EH_RETURN. 3158 // MF.getRegInfo().addLiveOut(OffsetReg); 3159 3160 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain); 3161 } 3162 3163 SDValue 3164 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3165 unsigned Opc = Op.getOpcode(); 3166 3167 // Handle INLINEASM first. 3168 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR) 3169 return LowerINLINEASM(Op, DAG); 3170 3171 if (isHvxOperation(Op.getNode(), DAG)) { 3172 // If HVX lowering returns nothing, try the default lowering. 3173 if (SDValue V = LowerHvxOperation(Op, DAG)) 3174 return V; 3175 } 3176 3177 switch (Opc) { 3178 default: 3179 #ifndef NDEBUG 3180 Op.getNode()->dumpr(&DAG); 3181 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END) 3182 errs() << "Error: check for a non-legal type in this operation\n"; 3183 #endif 3184 llvm_unreachable("Should not custom lower this!"); 3185 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 3186 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); 3187 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 3188 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 3189 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 3190 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 3191 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3192 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 3193 case ISD::LOAD: return LowerLoad(Op, DAG); 3194 case ISD::STORE: return LowerStore(Op, DAG); 3195 case ISD::UADDO: 3196 case ISD::USUBO: return LowerUAddSubO(Op, DAG); 3197 case ISD::ADDCARRY: 3198 case ISD::SUBCARRY: return LowerAddSubCarry(Op, DAG); 3199 case ISD::SRA: 3200 case ISD::SHL: 3201 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG); 3202 case ISD::ROTL: return LowerROTL(Op, DAG); 3203 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3204 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 3205 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 3206 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3207 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3208 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3209 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 3210 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG); 3211 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 3212 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 3213 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 3214 case ISD::VASTART: return LowerVASTART(Op, DAG); 3215 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 3216 case ISD::SETCC: return LowerSETCC(Op, DAG); 3217 case ISD::VSELECT: return LowerVSELECT(Op, DAG); 3218 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3219 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 3220 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG); 3221 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 3222 break; 3223 } 3224 3225 return SDValue(); 3226 } 3227 3228 void 3229 HexagonTargetLowering::LowerOperationWrapper(SDNode *N, 3230 SmallVectorImpl<SDValue> &Results, 3231 SelectionDAG &DAG) const { 3232 if (isHvxOperation(N, DAG)) { 3233 LowerHvxOperationWrapper(N, Results, DAG); 3234 if (!Results.empty()) 3235 return; 3236 } 3237 3238 // We are only custom-lowering stores to verify the alignment of the 3239 // address if it is a compile-time constant. Since a store can be modified 3240 // during type-legalization (the value being stored may need legalization), 3241 // return empty Results here to indicate that we don't really make any 3242 // changes in the custom lowering. 3243 if (N->getOpcode() != ISD::STORE) 3244 return TargetLowering::LowerOperationWrapper(N, Results, DAG); 3245 } 3246 3247 void 3248 HexagonTargetLowering::ReplaceNodeResults(SDNode *N, 3249 SmallVectorImpl<SDValue> &Results, 3250 SelectionDAG &DAG) const { 3251 if (isHvxOperation(N, DAG)) { 3252 ReplaceHvxNodeResults(N, Results, DAG); 3253 if (!Results.empty()) 3254 return; 3255 } 3256 3257 const SDLoc &dl(N); 3258 switch (N->getOpcode()) { 3259 case ISD::SRL: 3260 case ISD::SRA: 3261 case ISD::SHL: 3262 return; 3263 case ISD::BITCAST: 3264 // Handle a bitcast from v8i1 to i8. 3265 if (N->getValueType(0) == MVT::i8) { 3266 if (N->getOperand(0).getValueType() == MVT::v8i1) { 3267 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, 3268 N->getOperand(0), DAG); 3269 SDValue T = DAG.getAnyExtOrTrunc(P, dl, MVT::i8); 3270 Results.push_back(T); 3271 } 3272 } 3273 break; 3274 } 3275 } 3276 3277 SDValue 3278 HexagonTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) 3279 const { 3280 if (isHvxOperation(N, DCI.DAG)) { 3281 if (SDValue V = PerformHvxDAGCombine(N, DCI)) 3282 return V; 3283 return SDValue(); 3284 } 3285 3286 if (DCI.isBeforeLegalizeOps()) 3287 return SDValue(); 3288 3289 SDValue Op(N, 0); 3290 const SDLoc &dl(Op); 3291 unsigned Opc = Op.getOpcode(); 3292 3293 if (Opc == HexagonISD::P2D) { 3294 SDValue P = Op.getOperand(0); 3295 switch (P.getOpcode()) { 3296 case HexagonISD::PTRUE: 3297 return DCI.DAG.getConstant(-1, dl, ty(Op)); 3298 case HexagonISD::PFALSE: 3299 return getZero(dl, ty(Op), DCI.DAG); 3300 default: 3301 break; 3302 } 3303 } else if (Opc == ISD::VSELECT) { 3304 // This is pretty much duplicated in HexagonISelLoweringHVX... 3305 // 3306 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0) 3307 SDValue Cond = Op.getOperand(0); 3308 if (Cond->getOpcode() == ISD::XOR) { 3309 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1); 3310 if (C1->getOpcode() == HexagonISD::PTRUE) { 3311 SDValue VSel = DCI.DAG.getNode(ISD::VSELECT, dl, ty(Op), C0, 3312 Op.getOperand(2), Op.getOperand(1)); 3313 return VSel; 3314 } 3315 } 3316 } 3317 3318 return SDValue(); 3319 } 3320 3321 /// Returns relocation base for the given PIC jumptable. 3322 SDValue 3323 HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table, 3324 SelectionDAG &DAG) const { 3325 int Idx = cast<JumpTableSDNode>(Table)->getIndex(); 3326 EVT VT = Table.getValueType(); 3327 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL); 3328 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T); 3329 } 3330 3331 //===----------------------------------------------------------------------===// 3332 // Inline Assembly Support 3333 //===----------------------------------------------------------------------===// 3334 3335 TargetLowering::ConstraintType 3336 HexagonTargetLowering::getConstraintType(StringRef Constraint) const { 3337 if (Constraint.size() == 1) { 3338 switch (Constraint[0]) { 3339 case 'q': 3340 case 'v': 3341 if (Subtarget.useHVXOps()) 3342 return C_RegisterClass; 3343 break; 3344 case 'a': 3345 return C_RegisterClass; 3346 default: 3347 break; 3348 } 3349 } 3350 return TargetLowering::getConstraintType(Constraint); 3351 } 3352 3353 std::pair<unsigned, const TargetRegisterClass*> 3354 HexagonTargetLowering::getRegForInlineAsmConstraint( 3355 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 3356 3357 if (Constraint.size() == 1) { 3358 switch (Constraint[0]) { 3359 case 'r': // R0-R31 3360 switch (VT.SimpleTy) { 3361 default: 3362 return {0u, nullptr}; 3363 case MVT::i1: 3364 case MVT::i8: 3365 case MVT::i16: 3366 case MVT::i32: 3367 case MVT::f32: 3368 return {0u, &Hexagon::IntRegsRegClass}; 3369 case MVT::i64: 3370 case MVT::f64: 3371 return {0u, &Hexagon::DoubleRegsRegClass}; 3372 } 3373 break; 3374 case 'a': // M0-M1 3375 if (VT != MVT::i32) 3376 return {0u, nullptr}; 3377 return {0u, &Hexagon::ModRegsRegClass}; 3378 case 'q': // q0-q3 3379 switch (VT.getSizeInBits()) { 3380 default: 3381 return {0u, nullptr}; 3382 case 64: 3383 case 128: 3384 return {0u, &Hexagon::HvxQRRegClass}; 3385 } 3386 break; 3387 case 'v': // V0-V31 3388 switch (VT.getSizeInBits()) { 3389 default: 3390 return {0u, nullptr}; 3391 case 512: 3392 return {0u, &Hexagon::HvxVRRegClass}; 3393 case 1024: 3394 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps()) 3395 return {0u, &Hexagon::HvxVRRegClass}; 3396 return {0u, &Hexagon::HvxWRRegClass}; 3397 case 2048: 3398 return {0u, &Hexagon::HvxWRRegClass}; 3399 } 3400 break; 3401 default: 3402 return {0u, nullptr}; 3403 } 3404 } 3405 3406 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 3407 } 3408 3409 /// isFPImmLegal - Returns true if the target can instruction select the 3410 /// specified FP immediate natively. If false, the legalizer will 3411 /// materialize the FP immediate as a load from a constant pool. 3412 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 3413 bool ForCodeSize) const { 3414 return true; 3415 } 3416 3417 /// isLegalAddressingMode - Return true if the addressing mode represented by 3418 /// AM is legal for this target, for a load/store of the specified type. 3419 bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL, 3420 const AddrMode &AM, Type *Ty, 3421 unsigned AS, Instruction *I) const { 3422 if (Ty->isSized()) { 3423 // When LSR detects uses of the same base address to access different 3424 // types (e.g. unions), it will assume a conservative type for these 3425 // uses: 3426 // LSR Use: Kind=Address of void in addrspace(4294967295), ... 3427 // The type Ty passed here would then be "void". Skip the alignment 3428 // checks, but do not return false right away, since that confuses 3429 // LSR into crashing. 3430 Align A = DL.getABITypeAlign(Ty); 3431 // The base offset must be a multiple of the alignment. 3432 if (!isAligned(A, AM.BaseOffs)) 3433 return false; 3434 // The shifted offset must fit in 11 bits. 3435 if (!isInt<11>(AM.BaseOffs >> Log2(A))) 3436 return false; 3437 } 3438 3439 // No global is ever allowed as a base. 3440 if (AM.BaseGV) 3441 return false; 3442 3443 int Scale = AM.Scale; 3444 if (Scale < 0) 3445 Scale = -Scale; 3446 switch (Scale) { 3447 case 0: // No scale reg, "r+i", "r", or just "i". 3448 break; 3449 default: // No scaled addressing mode. 3450 return false; 3451 } 3452 return true; 3453 } 3454 3455 /// Return true if folding a constant offset with the given GlobalAddress is 3456 /// legal. It is frequently not legal in PIC relocation models. 3457 bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) 3458 const { 3459 return HTM.getRelocationModel() == Reloc::Static; 3460 } 3461 3462 /// isLegalICmpImmediate - Return true if the specified immediate is legal 3463 /// icmp immediate, that is the target has icmp instructions which can compare 3464 /// a register against the immediate without having to materialize the 3465 /// immediate into a register. 3466 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 3467 return Imm >= -512 && Imm <= 511; 3468 } 3469 3470 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3471 /// for tail call optimization. Targets which want to do tail call 3472 /// optimization should implement this function. 3473 bool HexagonTargetLowering::IsEligibleForTailCallOptimization( 3474 SDValue Callee, 3475 CallingConv::ID CalleeCC, 3476 bool IsVarArg, 3477 bool IsCalleeStructRet, 3478 bool IsCallerStructRet, 3479 const SmallVectorImpl<ISD::OutputArg> &Outs, 3480 const SmallVectorImpl<SDValue> &OutVals, 3481 const SmallVectorImpl<ISD::InputArg> &Ins, 3482 SelectionDAG& DAG) const { 3483 const Function &CallerF = DAG.getMachineFunction().getFunction(); 3484 CallingConv::ID CallerCC = CallerF.getCallingConv(); 3485 bool CCMatch = CallerCC == CalleeCC; 3486 3487 // *************************************************************************** 3488 // Look for obvious safe cases to perform tail call optimization that do not 3489 // require ABI changes. 3490 // *************************************************************************** 3491 3492 // If this is a tail call via a function pointer, then don't do it! 3493 if (!isa<GlobalAddressSDNode>(Callee) && 3494 !isa<ExternalSymbolSDNode>(Callee)) { 3495 return false; 3496 } 3497 3498 // Do not optimize if the calling conventions do not match and the conventions 3499 // used are not C or Fast. 3500 if (!CCMatch) { 3501 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast); 3502 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast); 3503 // If R & E, then ok. 3504 if (!R || !E) 3505 return false; 3506 } 3507 3508 // Do not tail call optimize vararg calls. 3509 if (IsVarArg) 3510 return false; 3511 3512 // Also avoid tail call optimization if either caller or callee uses struct 3513 // return semantics. 3514 if (IsCalleeStructRet || IsCallerStructRet) 3515 return false; 3516 3517 // In addition to the cases above, we also disable Tail Call Optimization if 3518 // the calling convention code that at least one outgoing argument needs to 3519 // go on the stack. We cannot check that here because at this point that 3520 // information is not available. 3521 return true; 3522 } 3523 3524 /// Returns the target specific optimal type for load and store operations as 3525 /// a result of memset, memcpy, and memmove lowering. 3526 /// 3527 /// If DstAlign is zero that means it's safe to destination alignment can 3528 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't 3529 /// a need to check it against alignment requirement, probably because the 3530 /// source does not need to be loaded. If 'IsMemset' is true, that means it's 3531 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of 3532 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it 3533 /// does not need to be loaded. It returns EVT::Other if the type should be 3534 /// determined using generic target-independent logic. 3535 EVT HexagonTargetLowering::getOptimalMemOpType( 3536 const MemOp &Op, const AttributeList &FuncAttributes) const { 3537 if (Op.size() >= 8 && Op.isAligned(Align(8))) 3538 return MVT::i64; 3539 if (Op.size() >= 4 && Op.isAligned(Align(4))) 3540 return MVT::i32; 3541 if (Op.size() >= 2 && Op.isAligned(Align(2))) 3542 return MVT::i16; 3543 return MVT::Other; 3544 } 3545 3546 bool HexagonTargetLowering::allowsMemoryAccess( 3547 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 3548 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { 3549 MVT SVT = VT.getSimpleVT(); 3550 if (Subtarget.isHVXVectorType(SVT, true)) 3551 return allowsHvxMemoryAccess(SVT, Flags, Fast); 3552 return TargetLoweringBase::allowsMemoryAccess( 3553 Context, DL, VT, AddrSpace, Alignment, Flags, Fast); 3554 } 3555 3556 bool HexagonTargetLowering::allowsMisalignedMemoryAccesses( 3557 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 3558 bool *Fast) const { 3559 MVT SVT = VT.getSimpleVT(); 3560 if (Subtarget.isHVXVectorType(SVT, true)) 3561 return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast); 3562 if (Fast) 3563 *Fast = false; 3564 return false; 3565 } 3566 3567 std::pair<const TargetRegisterClass*, uint8_t> 3568 HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 3569 MVT VT) const { 3570 if (Subtarget.isHVXVectorType(VT, true)) { 3571 unsigned BitWidth = VT.getSizeInBits(); 3572 unsigned VecWidth = Subtarget.getVectorLength() * 8; 3573 3574 if (VT.getVectorElementType() == MVT::i1) 3575 return std::make_pair(&Hexagon::HvxQRRegClass, 1); 3576 if (BitWidth == VecWidth) 3577 return std::make_pair(&Hexagon::HvxVRRegClass, 1); 3578 assert(BitWidth == 2 * VecWidth); 3579 return std::make_pair(&Hexagon::HvxWRRegClass, 1); 3580 } 3581 3582 return TargetLowering::findRepresentativeClass(TRI, VT); 3583 } 3584 3585 bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load, 3586 ISD::LoadExtType ExtTy, EVT NewVT) const { 3587 // TODO: This may be worth removing. Check regression tests for diffs. 3588 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT)) 3589 return false; 3590 3591 auto *L = cast<LoadSDNode>(Load); 3592 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr()); 3593 // Small-data object, do not shrink. 3594 if (BO.first.getOpcode() == HexagonISD::CONST32_GP) 3595 return false; 3596 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) { 3597 auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine()); 3598 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal()); 3599 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM); 3600 } 3601 return true; 3602 } 3603 3604 Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder, 3605 Type *ValueTy, Value *Addr, 3606 AtomicOrdering Ord) const { 3607 BasicBlock *BB = Builder.GetInsertBlock(); 3608 Module *M = BB->getParent()->getParent(); 3609 unsigned SZ = ValueTy->getPrimitiveSizeInBits(); 3610 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported"); 3611 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked 3612 : Intrinsic::hexagon_L4_loadd_locked; 3613 Function *Fn = Intrinsic::getDeclaration(M, IntID); 3614 3615 auto PtrTy = cast<PointerType>(Addr->getType()); 3616 PointerType *NewPtrTy = 3617 Builder.getIntNTy(SZ)->getPointerTo(PtrTy->getAddressSpace()); 3618 Addr = Builder.CreateBitCast(Addr, NewPtrTy); 3619 3620 Value *Call = Builder.CreateCall(Fn, Addr, "larx"); 3621 3622 return Builder.CreateBitCast(Call, ValueTy); 3623 } 3624 3625 /// Perform a store-conditional operation to Addr. Return the status of the 3626 /// store. This should be 0 if the store succeeded, non-zero otherwise. 3627 Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder, 3628 Value *Val, Value *Addr, 3629 AtomicOrdering Ord) const { 3630 BasicBlock *BB = Builder.GetInsertBlock(); 3631 Module *M = BB->getParent()->getParent(); 3632 Type *Ty = Val->getType(); 3633 unsigned SZ = Ty->getPrimitiveSizeInBits(); 3634 3635 Type *CastTy = Builder.getIntNTy(SZ); 3636 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported"); 3637 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked 3638 : Intrinsic::hexagon_S4_stored_locked; 3639 Function *Fn = Intrinsic::getDeclaration(M, IntID); 3640 3641 unsigned AS = Addr->getType()->getPointerAddressSpace(); 3642 Addr = Builder.CreateBitCast(Addr, CastTy->getPointerTo(AS)); 3643 Val = Builder.CreateBitCast(Val, CastTy); 3644 3645 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx"); 3646 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), ""); 3647 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext())); 3648 return Ext; 3649 } 3650 3651 TargetLowering::AtomicExpansionKind 3652 HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 3653 // Do not expand loads and stores that don't exceed 64 bits. 3654 return LI->getType()->getPrimitiveSizeInBits() > 64 3655 ? AtomicExpansionKind::LLOnly 3656 : AtomicExpansionKind::None; 3657 } 3658 3659 bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 3660 // Do not expand loads and stores that don't exceed 64 bits. 3661 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64; 3662 } 3663 3664 TargetLowering::AtomicExpansionKind 3665 HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR( 3666 AtomicCmpXchgInst *AI) const { 3667 return AtomicExpansionKind::LLSC; 3668 } 3669