1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the interfaces that Hexagon uses to lower LLVM code 10 // into a selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "HexagonISelLowering.h" 15 #include "Hexagon.h" 16 #include "HexagonMachineFunctionInfo.h" 17 #include "HexagonRegisterInfo.h" 18 #include "HexagonSubtarget.h" 19 #include "HexagonTargetMachine.h" 20 #include "HexagonTargetObjectFile.h" 21 #include "llvm/ADT/APInt.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/CodeGen/CallingConvLower.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/RuntimeLibcalls.h" 31 #include "llvm/CodeGen/SelectionDAG.h" 32 #include "llvm/CodeGen/TargetCallingConv.h" 33 #include "llvm/CodeGen/ValueTypes.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/DiagnosticInfo.h" 39 #include "llvm/IR/DiagnosticPrinter.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalValue.h" 42 #include "llvm/IR/InlineAsm.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/IntrinsicsHexagon.h" 47 #include "llvm/IR/IRBuilder.h" 48 #include "llvm/IR/Module.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/Value.h" 51 #include "llvm/MC/MCRegisterInfo.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CodeGen.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Target/TargetMachine.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstddef> 63 #include <cstdint> 64 #include <limits> 65 #include <utility> 66 67 using namespace llvm; 68 69 #define DEBUG_TYPE "hexagon-lowering" 70 71 static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables", 72 cl::init(true), cl::Hidden, 73 cl::desc("Control jump table emission on Hexagon target")); 74 75 static cl::opt<bool> 76 EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, 77 cl::desc("Enable Hexagon SDNode scheduling")); 78 79 static cl::opt<bool> EnableFastMath("ffast-math", cl::Hidden, 80 cl::desc("Enable Fast Math processing")); 81 82 static cl::opt<int> MinimumJumpTables("minimum-jump-tables", cl::Hidden, 83 cl::init(5), 84 cl::desc("Set minimum jump tables")); 85 86 static cl::opt<int> 87 MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), 88 cl::desc("Max #stores to inline memcpy")); 89 90 static cl::opt<int> 91 MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), 92 cl::desc("Max #stores to inline memcpy")); 93 94 static cl::opt<int> 95 MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), 96 cl::desc("Max #stores to inline memmove")); 97 98 static cl::opt<int> 99 MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, 100 cl::init(4), 101 cl::desc("Max #stores to inline memmove")); 102 103 static cl::opt<int> 104 MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), 105 cl::desc("Max #stores to inline memset")); 106 107 static cl::opt<int> 108 MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), 109 cl::desc("Max #stores to inline memset")); 110 111 static cl::opt<bool> AlignLoads("hexagon-align-loads", 112 cl::Hidden, cl::init(false), 113 cl::desc("Rewrite unaligned loads as a pair of aligned loads")); 114 115 static cl::opt<bool> 116 DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, 117 cl::init(false), 118 cl::desc("Disable minimum alignment of 1 for " 119 "arguments passed by value on stack")); 120 121 namespace { 122 123 class HexagonCCState : public CCState { 124 unsigned NumNamedVarArgParams = 0; 125 126 public: 127 HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF, 128 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, 129 unsigned NumNamedArgs) 130 : CCState(CC, IsVarArg, MF, locs, C), 131 NumNamedVarArgParams(NumNamedArgs) {} 132 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; } 133 }; 134 135 } // end anonymous namespace 136 137 138 // Implement calling convention for Hexagon. 139 140 static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 141 CCValAssign::LocInfo &LocInfo, 142 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 143 static const MCPhysReg ArgRegs[] = { 144 Hexagon::R0, Hexagon::R1, Hexagon::R2, 145 Hexagon::R3, Hexagon::R4, Hexagon::R5 146 }; 147 const unsigned NumArgRegs = std::size(ArgRegs); 148 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 149 150 // RegNum is an index into ArgRegs: skip a register if RegNum is odd. 151 if (RegNum != NumArgRegs && RegNum % 2 == 1) 152 State.AllocateReg(ArgRegs[RegNum]); 153 154 // Always return false here, as this function only makes sure that the first 155 // unallocated register has an even register number and does not actually 156 // allocate a register for the current argument. 157 return false; 158 } 159 160 #include "HexagonGenCallingConv.inc" 161 162 163 SDValue 164 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) 165 const { 166 return SDValue(); 167 } 168 169 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 170 /// by "Src" to address "Dst" of size "Size". Alignment information is 171 /// specified by the specific parameter attribute. The copy will be passed as 172 /// a byval function parameter. Sometimes what we are copying is the end of a 173 /// larger object, the part that does not fit in registers. 174 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 175 SDValue Chain, ISD::ArgFlagsTy Flags, 176 SelectionDAG &DAG, const SDLoc &dl) { 177 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 178 return DAG.getMemcpy( 179 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(), 180 /*isVolatile=*/false, /*AlwaysInline=*/false, 181 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo()); 182 } 183 184 bool 185 HexagonTargetLowering::CanLowerReturn( 186 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 187 const SmallVectorImpl<ISD::OutputArg> &Outs, 188 LLVMContext &Context) const { 189 SmallVector<CCValAssign, 16> RVLocs; 190 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 191 192 if (MF.getSubtarget<HexagonSubtarget>().useHVXOps()) 193 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX); 194 return CCInfo.CheckReturn(Outs, RetCC_Hexagon); 195 } 196 197 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is 198 // passed by value, the function prototype is modified to return void and 199 // the value is stored in memory pointed by a pointer passed by caller. 200 SDValue 201 HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 202 bool IsVarArg, 203 const SmallVectorImpl<ISD::OutputArg> &Outs, 204 const SmallVectorImpl<SDValue> &OutVals, 205 const SDLoc &dl, SelectionDAG &DAG) const { 206 // CCValAssign - represent the assignment of the return value to locations. 207 SmallVector<CCValAssign, 16> RVLocs; 208 209 // CCState - Info about the registers and stack slot. 210 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 211 *DAG.getContext()); 212 213 // Analyze return values of ISD::RET 214 if (Subtarget.useHVXOps()) 215 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX); 216 else 217 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon); 218 219 SDValue Glue; 220 SmallVector<SDValue, 4> RetOps(1, Chain); 221 222 // Copy the result values into the output registers. 223 for (unsigned i = 0; i != RVLocs.size(); ++i) { 224 CCValAssign &VA = RVLocs[i]; 225 SDValue Val = OutVals[i]; 226 227 switch (VA.getLocInfo()) { 228 default: 229 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. 230 llvm_unreachable("Unknown loc info!"); 231 case CCValAssign::Full: 232 break; 233 case CCValAssign::BCvt: 234 Val = DAG.getBitcast(VA.getLocVT(), Val); 235 break; 236 case CCValAssign::SExt: 237 Val = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Val); 238 break; 239 case CCValAssign::ZExt: 240 Val = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Val); 241 break; 242 case CCValAssign::AExt: 243 Val = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Val); 244 break; 245 } 246 247 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Val, Glue); 248 249 // Guarantee that all emitted copies are stuck together with flags. 250 Glue = Chain.getValue(1); 251 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 252 } 253 254 RetOps[0] = Chain; // Update chain. 255 256 // Add the glue if we have it. 257 if (Glue.getNode()) 258 RetOps.push_back(Glue); 259 260 return DAG.getNode(HexagonISD::RET_GLUE, dl, MVT::Other, RetOps); 261 } 262 263 bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 264 // If either no tail call or told not to tail call at all, don't. 265 return CI->isTailCall(); 266 } 267 268 Register HexagonTargetLowering::getRegisterByName( 269 const char* RegName, LLT VT, const MachineFunction &) const { 270 // Just support r19, the linux kernel uses it. 271 Register Reg = StringSwitch<Register>(RegName) 272 .Case("r0", Hexagon::R0) 273 .Case("r1", Hexagon::R1) 274 .Case("r2", Hexagon::R2) 275 .Case("r3", Hexagon::R3) 276 .Case("r4", Hexagon::R4) 277 .Case("r5", Hexagon::R5) 278 .Case("r6", Hexagon::R6) 279 .Case("r7", Hexagon::R7) 280 .Case("r8", Hexagon::R8) 281 .Case("r9", Hexagon::R9) 282 .Case("r10", Hexagon::R10) 283 .Case("r11", Hexagon::R11) 284 .Case("r12", Hexagon::R12) 285 .Case("r13", Hexagon::R13) 286 .Case("r14", Hexagon::R14) 287 .Case("r15", Hexagon::R15) 288 .Case("r16", Hexagon::R16) 289 .Case("r17", Hexagon::R17) 290 .Case("r18", Hexagon::R18) 291 .Case("r19", Hexagon::R19) 292 .Case("r20", Hexagon::R20) 293 .Case("r21", Hexagon::R21) 294 .Case("r22", Hexagon::R22) 295 .Case("r23", Hexagon::R23) 296 .Case("r24", Hexagon::R24) 297 .Case("r25", Hexagon::R25) 298 .Case("r26", Hexagon::R26) 299 .Case("r27", Hexagon::R27) 300 .Case("r28", Hexagon::R28) 301 .Case("r29", Hexagon::R29) 302 .Case("r30", Hexagon::R30) 303 .Case("r31", Hexagon::R31) 304 .Case("r1:0", Hexagon::D0) 305 .Case("r3:2", Hexagon::D1) 306 .Case("r5:4", Hexagon::D2) 307 .Case("r7:6", Hexagon::D3) 308 .Case("r9:8", Hexagon::D4) 309 .Case("r11:10", Hexagon::D5) 310 .Case("r13:12", Hexagon::D6) 311 .Case("r15:14", Hexagon::D7) 312 .Case("r17:16", Hexagon::D8) 313 .Case("r19:18", Hexagon::D9) 314 .Case("r21:20", Hexagon::D10) 315 .Case("r23:22", Hexagon::D11) 316 .Case("r25:24", Hexagon::D12) 317 .Case("r27:26", Hexagon::D13) 318 .Case("r29:28", Hexagon::D14) 319 .Case("r31:30", Hexagon::D15) 320 .Case("sp", Hexagon::R29) 321 .Case("fp", Hexagon::R30) 322 .Case("lr", Hexagon::R31) 323 .Case("p0", Hexagon::P0) 324 .Case("p1", Hexagon::P1) 325 .Case("p2", Hexagon::P2) 326 .Case("p3", Hexagon::P3) 327 .Case("sa0", Hexagon::SA0) 328 .Case("lc0", Hexagon::LC0) 329 .Case("sa1", Hexagon::SA1) 330 .Case("lc1", Hexagon::LC1) 331 .Case("m0", Hexagon::M0) 332 .Case("m1", Hexagon::M1) 333 .Case("usr", Hexagon::USR) 334 .Case("ugp", Hexagon::UGP) 335 .Case("cs0", Hexagon::CS0) 336 .Case("cs1", Hexagon::CS1) 337 .Default(Register()); 338 if (Reg) 339 return Reg; 340 341 report_fatal_error("Invalid register name global variable"); 342 } 343 344 /// LowerCallResult - Lower the result values of an ISD::CALL into the 345 /// appropriate copies out of appropriate physical registers. This assumes that 346 /// Chain/Glue are the input chain/glue to use, and that TheCall is the call 347 /// being lowered. Returns a SDNode with the same number of values as the 348 /// ISD::CALL. 349 SDValue HexagonTargetLowering::LowerCallResult( 350 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg, 351 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 352 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 353 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const { 354 // Assign locations to each value returned by this call. 355 SmallVector<CCValAssign, 16> RVLocs; 356 357 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 358 *DAG.getContext()); 359 360 if (Subtarget.useHVXOps()) 361 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX); 362 else 363 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon); 364 365 // Copy all of the result registers out of their specified physreg. 366 for (unsigned i = 0; i != RVLocs.size(); ++i) { 367 SDValue RetVal; 368 if (RVLocs[i].getValVT() == MVT::i1) { 369 // Return values of type MVT::i1 require special handling. The reason 370 // is that MVT::i1 is associated with the PredRegs register class, but 371 // values of that type are still returned in R0. Generate an explicit 372 // copy into a predicate register from R0, and treat the value of the 373 // predicate register as the call result. 374 auto &MRI = DAG.getMachineFunction().getRegInfo(); 375 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 376 MVT::i32, Glue); 377 // FR0 = (Value, Chain, Glue) 378 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 379 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR, 380 FR0.getValue(0), FR0.getValue(2)); 381 // TPR = (Chain, Glue) 382 // Don't glue this CopyFromReg, because it copies from a virtual 383 // register. If it is glued to the call, InstrEmitter will add it 384 // as an implicit def to the call (EmitMachineNode). 385 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1); 386 Glue = TPR.getValue(1); 387 Chain = TPR.getValue(0); 388 } else { 389 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 390 RVLocs[i].getValVT(), Glue); 391 Glue = RetVal.getValue(2); 392 Chain = RetVal.getValue(1); 393 } 394 InVals.push_back(RetVal.getValue(0)); 395 } 396 397 return Chain; 398 } 399 400 /// LowerCall - Functions arguments are copied from virtual regs to 401 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. 402 SDValue 403 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 404 SmallVectorImpl<SDValue> &InVals) const { 405 SelectionDAG &DAG = CLI.DAG; 406 SDLoc &dl = CLI.DL; 407 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 408 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 409 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 410 SDValue Chain = CLI.Chain; 411 SDValue Callee = CLI.Callee; 412 CallingConv::ID CallConv = CLI.CallConv; 413 bool IsVarArg = CLI.IsVarArg; 414 bool DoesNotReturn = CLI.DoesNotReturn; 415 416 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 417 MachineFunction &MF = DAG.getMachineFunction(); 418 MachineFrameInfo &MFI = MF.getFrameInfo(); 419 auto PtrVT = getPointerTy(MF.getDataLayout()); 420 421 unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0; 422 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee)) 423 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32); 424 425 // Linux ABI treats var-arg calls the same way as regular ones. 426 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; 427 428 // Analyze operands of the call, assigning locations to each operand. 429 SmallVector<CCValAssign, 16> ArgLocs; 430 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(), 431 NumParams); 432 433 if (Subtarget.useHVXOps()) 434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX); 435 else if (DisableArgsMinAlignment) 436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy); 437 else 438 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon); 439 440 if (CLI.IsTailCall) { 441 bool StructAttrFlag = MF.getFunction().hasStructRetAttr(); 442 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 443 IsVarArg, IsStructRet, StructAttrFlag, Outs, 444 OutVals, Ins, DAG); 445 for (const CCValAssign &VA : ArgLocs) { 446 if (VA.isMemLoc()) { 447 CLI.IsTailCall = false; 448 break; 449 } 450 } 451 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n" 452 : "Argument must be passed on stack. " 453 "Not eligible for Tail Call\n")); 454 } 455 // Get a count of how many bytes are to be pushed on the stack. 456 unsigned NumBytes = CCInfo.getStackSize(); 457 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; 458 SmallVector<SDValue, 8> MemOpChains; 459 460 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 461 SDValue StackPtr = 462 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT); 463 464 bool NeedsArgAlign = false; 465 Align LargestAlignSeen; 466 // Walk the register/memloc assignments, inserting copies/loads. 467 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 468 CCValAssign &VA = ArgLocs[i]; 469 SDValue Arg = OutVals[i]; 470 ISD::ArgFlagsTy Flags = Outs[i].Flags; 471 // Record if we need > 8 byte alignment on an argument. 472 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT()); 473 NeedsArgAlign |= ArgAlign; 474 475 // Promote the value if needed. 476 switch (VA.getLocInfo()) { 477 default: 478 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt. 479 llvm_unreachable("Unknown loc info!"); 480 case CCValAssign::Full: 481 break; 482 case CCValAssign::BCvt: 483 Arg = DAG.getBitcast(VA.getLocVT(), Arg); 484 break; 485 case CCValAssign::SExt: 486 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 487 break; 488 case CCValAssign::ZExt: 489 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 490 break; 491 case CCValAssign::AExt: 492 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 493 break; 494 } 495 496 if (VA.isMemLoc()) { 497 unsigned LocMemOffset = VA.getLocMemOffset(); 498 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl, 499 StackPtr.getValueType()); 500 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr); 501 if (ArgAlign) 502 LargestAlignSeen = std::max( 503 LargestAlignSeen, Align(VA.getLocVT().getStoreSizeInBits() / 8)); 504 if (Flags.isByVal()) { 505 // The argument is a struct passed by value. According to LLVM, "Arg" 506 // is a pointer. 507 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain, 508 Flags, DAG, dl)); 509 } else { 510 MachinePointerInfo LocPI = MachinePointerInfo::getStack( 511 DAG.getMachineFunction(), LocMemOffset); 512 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI); 513 MemOpChains.push_back(S); 514 } 515 continue; 516 } 517 518 // Arguments that can be passed on register must be kept at RegsToPass 519 // vector. 520 if (VA.isRegLoc()) 521 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 522 } 523 524 if (NeedsArgAlign && Subtarget.hasV60Ops()) { 525 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n"); 526 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass); 527 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign); 528 MFI.ensureMaxAlignment(LargestAlignSeen); 529 } 530 // Transform all store nodes into one single node because all store 531 // nodes are independent of each other. 532 if (!MemOpChains.empty()) 533 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 534 535 SDValue Glue; 536 if (!CLI.IsTailCall) { 537 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 538 Glue = Chain.getValue(1); 539 } 540 541 // Build a sequence of copy-to-reg nodes chained together with token 542 // chain and flag operands which copy the outgoing args into registers. 543 // The Glue is necessary since all emitted instructions must be 544 // stuck together. 545 if (!CLI.IsTailCall) { 546 for (const auto &R : RegsToPass) { 547 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue); 548 Glue = Chain.getValue(1); 549 } 550 } else { 551 // For tail calls lower the arguments to the 'real' stack slot. 552 // 553 // Force all the incoming stack arguments to be loaded from the stack 554 // before any new outgoing arguments are stored to the stack, because the 555 // outgoing stack slots may alias the incoming argument stack slots, and 556 // the alias isn't otherwise explicit. This is slightly more conservative 557 // than necessary, because it means that each store effectively depends 558 // on every argument instead of just those arguments it would clobber. 559 // 560 // Do not flag preceding copytoreg stuff together with the following stuff. 561 Glue = SDValue(); 562 for (const auto &R : RegsToPass) { 563 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue); 564 Glue = Chain.getValue(1); 565 } 566 Glue = SDValue(); 567 } 568 569 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls(); 570 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0; 571 572 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 573 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 574 // node so that legalize doesn't hack it. 575 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 576 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags); 577 } else if (ExternalSymbolSDNode *S = 578 dyn_cast<ExternalSymbolSDNode>(Callee)) { 579 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags); 580 } 581 582 // Returns a chain & a flag for retval copy to use. 583 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 584 SmallVector<SDValue, 8> Ops; 585 Ops.push_back(Chain); 586 Ops.push_back(Callee); 587 588 // Add argument registers to the end of the list so that they are 589 // known live into the call. 590 for (const auto &R : RegsToPass) 591 Ops.push_back(DAG.getRegister(R.first, R.second.getValueType())); 592 593 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv); 594 assert(Mask && "Missing call preserved mask for calling convention"); 595 Ops.push_back(DAG.getRegisterMask(Mask)); 596 597 if (Glue.getNode()) 598 Ops.push_back(Glue); 599 600 if (CLI.IsTailCall) { 601 MFI.setHasTailCall(); 602 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops); 603 } 604 605 // Set this here because we need to know this for "hasFP" in frame lowering. 606 // The target-independent code calls getFrameRegister before setting it, and 607 // getFrameRegister uses hasFP to determine whether the function has FP. 608 MFI.setHasCalls(true); 609 610 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL; 611 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops); 612 Glue = Chain.getValue(1); 613 614 // Create the CALLSEQ_END node. 615 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, dl); 616 Glue = Chain.getValue(1); 617 618 // Handle result values, copying them out of physregs into vregs that we 619 // return. 620 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG, 621 InVals, OutVals, Callee); 622 } 623 624 /// Returns true by value, base pointer and offset pointer and addressing 625 /// mode by reference if this node can be combined with a load / store to 626 /// form a post-indexed load / store. 627 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 628 SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, 629 SelectionDAG &DAG) const { 630 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N); 631 if (!LSN) 632 return false; 633 EVT VT = LSN->getMemoryVT(); 634 if (!VT.isSimple()) 635 return false; 636 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || 637 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 || 638 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 || 639 VT == MVT::v4i16 || VT == MVT::v8i8 || 640 Subtarget.isHVXVectorType(VT.getSimpleVT()); 641 if (!IsLegalType) 642 return false; 643 644 if (Op->getOpcode() != ISD::ADD) 645 return false; 646 Base = Op->getOperand(0); 647 Offset = Op->getOperand(1); 648 if (!isa<ConstantSDNode>(Offset.getNode())) 649 return false; 650 AM = ISD::POST_INC; 651 652 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue(); 653 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V); 654 } 655 656 SDValue 657 HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { 658 MachineFunction &MF = DAG.getMachineFunction(); 659 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); 660 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 661 unsigned LR = HRI.getRARegister(); 662 663 if ((Op.getOpcode() != ISD::INLINEASM && 664 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR()) 665 return Op; 666 667 unsigned NumOps = Op.getNumOperands(); 668 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue) 669 --NumOps; // Ignore the flag operand. 670 671 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 672 const InlineAsm::Flag Flags(Op.getConstantOperandVal(i)); 673 unsigned NumVals = Flags.getNumOperandRegisters(); 674 ++i; // Skip the ID value. 675 676 switch (Flags.getKind()) { 677 default: 678 llvm_unreachable("Bad flags!"); 679 case InlineAsm::Kind::RegUse: 680 case InlineAsm::Kind::Imm: 681 case InlineAsm::Kind::Mem: 682 i += NumVals; 683 break; 684 case InlineAsm::Kind::Clobber: 685 case InlineAsm::Kind::RegDef: 686 case InlineAsm::Kind::RegDefEarlyClobber: { 687 for (; NumVals; --NumVals, ++i) { 688 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg(); 689 if (Reg != LR) 690 continue; 691 HMFI.setHasClobberLR(true); 692 return Op; 693 } 694 break; 695 } 696 } 697 } 698 699 return Op; 700 } 701 702 // Need to transform ISD::PREFETCH into something that doesn't inherit 703 // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and 704 // SDNPMayStore. 705 SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op, 706 SelectionDAG &DAG) const { 707 SDValue Chain = Op.getOperand(0); 708 SDValue Addr = Op.getOperand(1); 709 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in, 710 // if the "reg" is fed by an "add". 711 SDLoc DL(Op); 712 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 713 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero); 714 } 715 716 // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode 717 // is marked as having side-effects, while the register read on Hexagon does 718 // not have any. TableGen refuses to accept the direct pattern from that node 719 // to the A4_tfrcpp. 720 SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 721 SelectionDAG &DAG) const { 722 SDValue Chain = Op.getOperand(0); 723 SDLoc dl(Op); 724 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other); 725 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain); 726 } 727 728 SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 729 SelectionDAG &DAG) const { 730 SDValue Chain = Op.getOperand(0); 731 unsigned IntNo = Op.getConstantOperandVal(1); 732 // Lower the hexagon_prefetch builtin to DCFETCH, as above. 733 if (IntNo == Intrinsic::hexagon_prefetch) { 734 SDValue Addr = Op.getOperand(2); 735 SDLoc DL(Op); 736 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 737 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero); 738 } 739 return SDValue(); 740 } 741 742 SDValue 743 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 744 SelectionDAG &DAG) const { 745 SDValue Chain = Op.getOperand(0); 746 SDValue Size = Op.getOperand(1); 747 SDValue Align = Op.getOperand(2); 748 SDLoc dl(Op); 749 750 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align); 751 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC"); 752 753 unsigned A = AlignConst->getSExtValue(); 754 auto &HFI = *Subtarget.getFrameLowering(); 755 // "Zero" means natural stack alignment. 756 if (A == 0) 757 A = HFI.getStackAlign().value(); 758 759 LLVM_DEBUG({ 760 dbgs () << __func__ << " Align: " << A << " Size: "; 761 Size.getNode()->dump(&DAG); 762 dbgs() << "\n"; 763 }); 764 765 SDValue AC = DAG.getConstant(A, dl, MVT::i32); 766 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 767 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC); 768 769 DAG.ReplaceAllUsesOfValueWith(Op, AA); 770 return AA; 771 } 772 773 SDValue HexagonTargetLowering::LowerFormalArguments( 774 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 775 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 776 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 777 MachineFunction &MF = DAG.getMachineFunction(); 778 MachineFrameInfo &MFI = MF.getFrameInfo(); 779 MachineRegisterInfo &MRI = MF.getRegInfo(); 780 781 // Linux ABI treats var-arg calls the same way as regular ones. 782 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg; 783 784 // Assign locations to all of the incoming arguments. 785 SmallVector<CCValAssign, 16> ArgLocs; 786 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, 787 *DAG.getContext(), 788 MF.getFunction().getFunctionType()->getNumParams()); 789 790 if (Subtarget.useHVXOps()) 791 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX); 792 else if (DisableArgsMinAlignment) 793 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy); 794 else 795 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon); 796 797 // For LLVM, in the case when returning a struct by value (>8byte), 798 // the first argument is a pointer that points to the location on caller's 799 // stack where the return value will be stored. For Hexagon, the location on 800 // caller's stack is passed only when the struct size is smaller than (and 801 // equal to) 8 bytes. If not, no address will be passed into callee and 802 // callee return the result direclty through R0/R1. 803 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) { 804 switch (RC.getID()) { 805 case Hexagon::IntRegsRegClassID: 806 return Reg - Hexagon::R0 + 1; 807 case Hexagon::DoubleRegsRegClassID: 808 return (Reg - Hexagon::D0 + 1) * 2; 809 case Hexagon::HvxVRRegClassID: 810 return Reg - Hexagon::V0 + 1; 811 case Hexagon::HvxWRRegClassID: 812 return (Reg - Hexagon::W0 + 1) * 2; 813 } 814 llvm_unreachable("Unexpected register class"); 815 }; 816 817 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering()); 818 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>(); 819 HFL.FirstVarArgSavedReg = 0; 820 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); 821 822 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 823 CCValAssign &VA = ArgLocs[i]; 824 ISD::ArgFlagsTy Flags = Ins[i].Flags; 825 bool ByVal = Flags.isByVal(); 826 827 // Arguments passed in registers: 828 // 1. 32- and 64-bit values and HVX vectors are passed directly, 829 // 2. Large structs are passed via an address, and the address is 830 // passed in a register. 831 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8) 832 llvm_unreachable("ByValSize must be bigger than 8 bytes"); 833 834 bool InReg = VA.isRegLoc() && 835 (!ByVal || (ByVal && Flags.getByValSize() > 8)); 836 837 if (InReg) { 838 MVT RegVT = VA.getLocVT(); 839 if (VA.getLocInfo() == CCValAssign::BCvt) 840 RegVT = VA.getValVT(); 841 842 const TargetRegisterClass *RC = getRegClassFor(RegVT); 843 Register VReg = MRI.createVirtualRegister(RC); 844 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 845 846 // Treat values of type MVT::i1 specially: they are passed in 847 // registers of type i32, but they need to remain as values of 848 // type i1 for consistency of the argument lowering. 849 if (VA.getValVT() == MVT::i1) { 850 assert(RegVT.getSizeInBits() <= 32); 851 SDValue T = DAG.getNode(ISD::AND, dl, RegVT, 852 Copy, DAG.getConstant(1, dl, RegVT)); 853 Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT), 854 ISD::SETNE); 855 } else { 856 #ifndef NDEBUG 857 unsigned RegSize = RegVT.getSizeInBits(); 858 assert(RegSize == 32 || RegSize == 64 || 859 Subtarget.isHVXVectorType(RegVT)); 860 #endif 861 } 862 InVals.push_back(Copy); 863 MRI.addLiveIn(VA.getLocReg(), VReg); 864 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg()); 865 } else { 866 assert(VA.isMemLoc() && "Argument should be passed in memory"); 867 868 // If it's a byval parameter, then we need to compute the 869 // "real" size, not the size of the pointer. 870 unsigned ObjSize = Flags.isByVal() 871 ? Flags.getByValSize() 872 : VA.getLocVT().getStoreSizeInBits() / 8; 873 874 // Create the frame index object for this incoming parameter. 875 int Offset = HEXAGON_LRFP_SIZE + VA.getLocMemOffset(); 876 int FI = MFI.CreateFixedObject(ObjSize, Offset, true); 877 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 878 879 if (Flags.isByVal()) { 880 // If it's a pass-by-value aggregate, then do not dereference the stack 881 // location. Instead, we should generate a reference to the stack 882 // location. 883 InVals.push_back(FIN); 884 } else { 885 SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 886 MachinePointerInfo::getFixedStack(MF, FI, 0)); 887 InVals.push_back(L); 888 } 889 } 890 } 891 892 if (IsVarArg && Subtarget.isEnvironmentMusl()) { 893 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++) 894 MRI.addLiveIn(Hexagon::R0+i); 895 } 896 897 if (IsVarArg && Subtarget.isEnvironmentMusl()) { 898 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1); 899 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); 900 901 // Create Frame index for the start of register saved area. 902 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg; 903 bool RequiresPadding = (NumVarArgRegs & 1); 904 int RegSaveAreaSizePlusPadding = RequiresPadding 905 ? (NumVarArgRegs + 1) * 4 906 : NumVarArgRegs * 4; 907 908 if (RegSaveAreaSizePlusPadding > 0) { 909 // The offset to saved register area should be 8 byte aligned. 910 int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); 911 if (!(RegAreaStart % 8)) 912 RegAreaStart = (RegAreaStart + 7) & -8; 913 914 int RegSaveAreaFrameIndex = 915 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true); 916 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex); 917 918 // This will point to the next argument passed via stack. 919 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding; 920 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); 921 HMFI.setVarArgsFrameIndex(FI); 922 } else { 923 // This will point to the next argument passed via stack, when 924 // there is no saved register area. 925 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); 926 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); 927 HMFI.setRegSavedAreaStartFrameIndex(FI); 928 HMFI.setVarArgsFrameIndex(FI); 929 } 930 } 931 932 933 if (IsVarArg && !Subtarget.isEnvironmentMusl()) { 934 // This will point to the next argument passed via stack. 935 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); 936 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); 937 HMFI.setVarArgsFrameIndex(FI); 938 } 939 940 return Chain; 941 } 942 943 SDValue 944 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 945 // VASTART stores the address of the VarArgsFrameIndex slot into the 946 // memory location argument. 947 MachineFunction &MF = DAG.getMachineFunction(); 948 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>(); 949 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32); 950 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 951 952 if (!Subtarget.isEnvironmentMusl()) { 953 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1), 954 MachinePointerInfo(SV)); 955 } 956 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>(); 957 auto &HFL = *Subtarget.getFrameLowering(); 958 SDLoc DL(Op); 959 SmallVector<SDValue, 8> MemOps; 960 961 // Get frame index of va_list. 962 SDValue FIN = Op.getOperand(1); 963 964 // If first Vararg register is odd, add 4 bytes to start of 965 // saved register area to point to the first register location. 966 // This is because the saved register area has to be 8 byte aligned. 967 // Incase of an odd start register, there will be 4 bytes of padding in 968 // the beginning of saved register area. If all registers area used up, 969 // the following condition will handle it correctly. 970 SDValue SavedRegAreaStartFrameIndex = 971 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32); 972 973 auto PtrVT = getPointerTy(DAG.getDataLayout()); 974 975 if (HFL.FirstVarArgSavedReg & 1) 976 SavedRegAreaStartFrameIndex = 977 DAG.getNode(ISD::ADD, DL, PtrVT, 978 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), 979 MVT::i32), 980 DAG.getIntPtrConstant(4, DL)); 981 982 // Store the saved register area start pointer. 983 SDValue Store = 984 DAG.getStore(Op.getOperand(0), DL, 985 SavedRegAreaStartFrameIndex, 986 FIN, MachinePointerInfo(SV)); 987 MemOps.push_back(Store); 988 989 // Store saved register area end pointer. 990 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, 991 FIN, DAG.getIntPtrConstant(4, DL)); 992 Store = DAG.getStore(Op.getOperand(0), DL, 993 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(), 994 PtrVT), 995 FIN, MachinePointerInfo(SV, 4)); 996 MemOps.push_back(Store); 997 998 // Store overflow area pointer. 999 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, 1000 FIN, DAG.getIntPtrConstant(4, DL)); 1001 Store = DAG.getStore(Op.getOperand(0), DL, 1002 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(), 1003 PtrVT), 1004 FIN, MachinePointerInfo(SV, 8)); 1005 MemOps.push_back(Store); 1006 1007 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 1008 } 1009 1010 SDValue 1011 HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 1012 // Assert that the linux ABI is enabled for the current compilation. 1013 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled"); 1014 SDValue Chain = Op.getOperand(0); 1015 SDValue DestPtr = Op.getOperand(1); 1016 SDValue SrcPtr = Op.getOperand(2); 1017 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1018 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1019 SDLoc DL(Op); 1020 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore, 1021 // we need to memcopy 12 bytes from va_list to another similar list. 1022 return DAG.getMemcpy(Chain, DL, DestPtr, SrcPtr, 1023 DAG.getIntPtrConstant(12, DL), Align(4), 1024 /*isVolatile*/ false, false, false, 1025 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); 1026 } 1027 1028 SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1029 const SDLoc &dl(Op); 1030 SDValue LHS = Op.getOperand(0); 1031 SDValue RHS = Op.getOperand(1); 1032 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1033 MVT ResTy = ty(Op); 1034 MVT OpTy = ty(LHS); 1035 1036 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { 1037 MVT ElemTy = OpTy.getVectorElementType(); 1038 assert(ElemTy.isScalarInteger()); 1039 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()), 1040 OpTy.getVectorNumElements()); 1041 return DAG.getSetCC(dl, ResTy, 1042 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy), 1043 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC); 1044 } 1045 1046 // Treat all other vector types as legal. 1047 if (ResTy.isVector()) 1048 return Op; 1049 1050 // Comparisons of short integers should use sign-extend, not zero-extend, 1051 // since we can represent small negative values in the compare instructions. 1052 // The LLVM default is to use zero-extend arbitrarily in these cases. 1053 auto isSExtFree = [this](SDValue N) { 1054 switch (N.getOpcode()) { 1055 case ISD::TRUNCATE: { 1056 // A sign-extend of a truncate of a sign-extend is free. 1057 SDValue Op = N.getOperand(0); 1058 if (Op.getOpcode() != ISD::AssertSext) 1059 return false; 1060 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1061 unsigned ThisBW = ty(N).getSizeInBits(); 1062 unsigned OrigBW = OrigTy.getSizeInBits(); 1063 // The type that was sign-extended to get the AssertSext must be 1064 // narrower than the type of N (so that N has still the same value 1065 // as the original). 1066 return ThisBW >= OrigBW; 1067 } 1068 case ISD::LOAD: 1069 // We have sign-extended loads. 1070 return true; 1071 } 1072 return false; 1073 }; 1074 1075 if (OpTy == MVT::i8 || OpTy == MVT::i16) { 1076 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS); 1077 bool IsNegative = C && C->getAPIntValue().isNegative(); 1078 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS)) 1079 return DAG.getSetCC(dl, ResTy, 1080 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32), 1081 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC); 1082 } 1083 1084 return SDValue(); 1085 } 1086 1087 SDValue 1088 HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const { 1089 SDValue PredOp = Op.getOperand(0); 1090 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2); 1091 MVT OpTy = ty(Op1); 1092 const SDLoc &dl(Op); 1093 1094 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) { 1095 MVT ElemTy = OpTy.getVectorElementType(); 1096 assert(ElemTy.isScalarInteger()); 1097 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()), 1098 OpTy.getVectorNumElements()); 1099 // Generate (trunc (select (_, sext, sext))). 1100 return DAG.getSExtOrTrunc( 1101 DAG.getSelect(dl, WideTy, PredOp, 1102 DAG.getSExtOrTrunc(Op1, dl, WideTy), 1103 DAG.getSExtOrTrunc(Op2, dl, WideTy)), 1104 dl, OpTy); 1105 } 1106 1107 return SDValue(); 1108 } 1109 1110 SDValue 1111 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 1112 EVT ValTy = Op.getValueType(); 1113 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op); 1114 Constant *CVal = nullptr; 1115 bool isVTi1Type = false; 1116 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) { 1117 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) { 1118 IRBuilder<> IRB(CV->getContext()); 1119 SmallVector<Constant*, 128> NewConst; 1120 unsigned VecLen = CV->getNumOperands(); 1121 assert(isPowerOf2_32(VecLen) && 1122 "conversion only supported for pow2 VectorSize"); 1123 for (unsigned i = 0; i < VecLen; ++i) 1124 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue())); 1125 1126 CVal = ConstantVector::get(NewConst); 1127 isVTi1Type = true; 1128 } 1129 } 1130 Align Alignment = CPN->getAlign(); 1131 bool IsPositionIndependent = isPositionIndependent(); 1132 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0; 1133 1134 unsigned Offset = 0; 1135 SDValue T; 1136 if (CPN->isMachineConstantPoolEntry()) 1137 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Alignment, 1138 Offset, TF); 1139 else if (isVTi1Type) 1140 T = DAG.getTargetConstantPool(CVal, ValTy, Alignment, Offset, TF); 1141 else 1142 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Alignment, Offset, 1143 TF); 1144 1145 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && 1146 "Inconsistent target flag encountered"); 1147 1148 if (IsPositionIndependent) 1149 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T); 1150 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T); 1151 } 1152 1153 SDValue 1154 HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1155 EVT VT = Op.getValueType(); 1156 int Idx = cast<JumpTableSDNode>(Op)->getIndex(); 1157 if (isPositionIndependent()) { 1158 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL); 1159 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T); 1160 } 1161 1162 SDValue T = DAG.getTargetJumpTable(Idx, VT); 1163 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T); 1164 } 1165 1166 SDValue 1167 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 1168 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 1169 MachineFunction &MF = DAG.getMachineFunction(); 1170 MachineFrameInfo &MFI = MF.getFrameInfo(); 1171 MFI.setReturnAddressIsTaken(true); 1172 1173 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1174 return SDValue(); 1175 1176 EVT VT = Op.getValueType(); 1177 SDLoc dl(Op); 1178 unsigned Depth = Op.getConstantOperandVal(0); 1179 if (Depth) { 1180 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 1181 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 1182 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 1183 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 1184 MachinePointerInfo()); 1185 } 1186 1187 // Return LR, which contains the return address. Mark it an implicit live-in. 1188 Register Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32)); 1189 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 1190 } 1191 1192 SDValue 1193 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 1194 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); 1195 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 1196 MFI.setFrameAddressIsTaken(true); 1197 1198 EVT VT = Op.getValueType(); 1199 SDLoc dl(Op); 1200 unsigned Depth = Op.getConstantOperandVal(0); 1201 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 1202 HRI.getFrameRegister(), VT); 1203 while (Depth--) 1204 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 1205 MachinePointerInfo()); 1206 return FrameAddr; 1207 } 1208 1209 SDValue 1210 HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const { 1211 SDLoc dl(Op); 1212 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0)); 1213 } 1214 1215 SDValue 1216 HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const { 1217 SDLoc dl(Op); 1218 auto *GAN = cast<GlobalAddressSDNode>(Op); 1219 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1220 auto *GV = GAN->getGlobal(); 1221 int64_t Offset = GAN->getOffset(); 1222 1223 auto &HLOF = *HTM.getObjFileLowering(); 1224 Reloc::Model RM = HTM.getRelocationModel(); 1225 1226 if (RM == Reloc::Static) { 1227 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset); 1228 const GlobalObject *GO = GV->getAliaseeObject(); 1229 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM)) 1230 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA); 1231 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA); 1232 } 1233 1234 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 1235 if (UsePCRel) { 1236 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset, 1237 HexagonII::MO_PCREL); 1238 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA); 1239 } 1240 1241 // Use GOT index. 1242 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1243 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT); 1244 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32); 1245 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off); 1246 } 1247 1248 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT. 1249 SDValue 1250 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 1251 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1252 SDLoc dl(Op); 1253 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1254 1255 Reloc::Model RM = HTM.getRelocationModel(); 1256 if (RM == Reloc::Static) { 1257 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT); 1258 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A); 1259 } 1260 1261 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL); 1262 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A); 1263 } 1264 1265 SDValue 1266 HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) 1267 const { 1268 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1269 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, PtrVT, 1270 HexagonII::MO_PCREL); 1271 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym); 1272 } 1273 1274 SDValue 1275 HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, 1276 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg, 1277 unsigned char OperandFlags) const { 1278 MachineFunction &MF = DAG.getMachineFunction(); 1279 MachineFrameInfo &MFI = MF.getFrameInfo(); 1280 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1281 SDLoc dl(GA); 1282 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 1283 GA->getValueType(0), 1284 GA->getOffset(), 1285 OperandFlags); 1286 // Create Operands for the call.The Operands should have the following: 1287 // 1. Chain SDValue 1288 // 2. Callee which in this case is the Global address value. 1289 // 3. Registers live into the call.In this case its R0, as we 1290 // have just one argument to be passed. 1291 // 4. Glue. 1292 // Note: The order is important. 1293 1294 const auto &HRI = *Subtarget.getRegisterInfo(); 1295 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C); 1296 assert(Mask && "Missing call preserved mask for calling convention"); 1297 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT), 1298 DAG.getRegisterMask(Mask), Glue }; 1299 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops); 1300 1301 // Inform MFI that function has calls. 1302 MFI.setAdjustsStack(true); 1303 1304 Glue = Chain.getValue(1); 1305 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue); 1306 } 1307 1308 // 1309 // Lower using the intial executable model for TLS addresses 1310 // 1311 SDValue 1312 HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, 1313 SelectionDAG &DAG) const { 1314 SDLoc dl(GA); 1315 int64_t Offset = GA->getOffset(); 1316 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1317 1318 // Get the thread pointer. 1319 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT); 1320 1321 bool IsPositionIndependent = isPositionIndependent(); 1322 unsigned char TF = 1323 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE; 1324 1325 // First generate the TLS symbol address 1326 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, 1327 Offset, TF); 1328 1329 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA); 1330 1331 if (IsPositionIndependent) { 1332 // Generate the GOT pointer in case of position independent code 1333 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG); 1334 1335 // Add the TLS Symbol address to GOT pointer.This gives 1336 // GOT relative relocation for the symbol. 1337 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym); 1338 } 1339 1340 // Load the offset value for TLS symbol.This offset is relative to 1341 // thread pointer. 1342 SDValue LoadOffset = 1343 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo()); 1344 1345 // Address of the thread local variable is the add of thread 1346 // pointer and the offset of the variable. 1347 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset); 1348 } 1349 1350 // 1351 // Lower using the local executable model for TLS addresses 1352 // 1353 SDValue 1354 HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, 1355 SelectionDAG &DAG) const { 1356 SDLoc dl(GA); 1357 int64_t Offset = GA->getOffset(); 1358 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1359 1360 // Get the thread pointer. 1361 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT); 1362 // Generate the TLS symbol address 1363 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset, 1364 HexagonII::MO_TPREL); 1365 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA); 1366 1367 // Address of the thread local variable is the add of thread 1368 // pointer and the offset of the variable. 1369 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym); 1370 } 1371 1372 // 1373 // Lower using the general dynamic model for TLS addresses 1374 // 1375 SDValue 1376 HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1377 SelectionDAG &DAG) const { 1378 SDLoc dl(GA); 1379 int64_t Offset = GA->getOffset(); 1380 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1381 1382 // First generate the TLS symbol address 1383 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset, 1384 HexagonII::MO_GDGOT); 1385 1386 // Then, generate the GOT pointer 1387 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG); 1388 1389 // Add the TLS symbol and the GOT pointer 1390 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA); 1391 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym); 1392 1393 // Copy over the argument to R0 1394 SDValue InGlue; 1395 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InGlue); 1396 InGlue = Chain.getValue(1); 1397 1398 unsigned Flags = DAG.getSubtarget<HexagonSubtarget>().useLongCalls() 1399 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended 1400 : HexagonII::MO_GDPLT; 1401 1402 return GetDynamicTLSAddr(DAG, Chain, GA, InGlue, PtrVT, 1403 Hexagon::R0, Flags); 1404 } 1405 1406 // 1407 // Lower TLS addresses. 1408 // 1409 // For now for dynamic models, we only support the general dynamic model. 1410 // 1411 SDValue 1412 HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1413 SelectionDAG &DAG) const { 1414 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1415 1416 switch (HTM.getTLSModel(GA->getGlobal())) { 1417 case TLSModel::GeneralDynamic: 1418 case TLSModel::LocalDynamic: 1419 return LowerToTLSGeneralDynamicModel(GA, DAG); 1420 case TLSModel::InitialExec: 1421 return LowerToTLSInitialExecModel(GA, DAG); 1422 case TLSModel::LocalExec: 1423 return LowerToTLSLocalExecModel(GA, DAG); 1424 } 1425 llvm_unreachable("Bogus TLS model"); 1426 } 1427 1428 //===----------------------------------------------------------------------===// 1429 // TargetLowering Implementation 1430 //===----------------------------------------------------------------------===// 1431 1432 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, 1433 const HexagonSubtarget &ST) 1434 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)), 1435 Subtarget(ST) { 1436 auto &HRI = *Subtarget.getRegisterInfo(); 1437 1438 setPrefLoopAlignment(Align(16)); 1439 setMinFunctionAlignment(Align(4)); 1440 setPrefFunctionAlignment(Align(16)); 1441 setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); 1442 setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); 1443 setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); 1444 1445 setMaxAtomicSizeInBitsSupported(64); 1446 setMinCmpXchgSizeInBits(32); 1447 1448 if (EnableHexSDNodeSched) 1449 setSchedulingPreference(Sched::VLIW); 1450 else 1451 setSchedulingPreference(Sched::Source); 1452 1453 // Limits for inline expansion of memcpy/memmove 1454 MaxStoresPerMemcpy = MaxStoresPerMemcpyCL; 1455 MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL; 1456 MaxStoresPerMemmove = MaxStoresPerMemmoveCL; 1457 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL; 1458 MaxStoresPerMemset = MaxStoresPerMemsetCL; 1459 MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL; 1460 1461 // 1462 // Set up register classes. 1463 // 1464 1465 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass); 1466 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa 1467 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa 1468 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba 1469 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass); 1470 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass); 1471 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass); 1472 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass); 1473 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass); 1474 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass); 1475 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass); 1476 1477 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass); 1478 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass); 1479 1480 // 1481 // Handling of scalar operations. 1482 // 1483 // All operations default to "legal", except: 1484 // - indexed loads and stores (pre-/post-incremented), 1485 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS, 1486 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN, 1487 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP, 1488 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG, 1489 // which default to "expand" for at least one type. 1490 1491 // Misc operations. 1492 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 1493 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 1494 setOperationAction(ISD::TRAP, MVT::Other, Legal); 1495 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 1496 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 1497 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 1498 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 1499 setOperationAction(ISD::INLINEASM, MVT::Other, Custom); 1500 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom); 1501 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 1502 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 1503 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 1504 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 1505 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 1506 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 1507 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 1508 1509 // Custom legalize GlobalAddress nodes into CONST32. 1510 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 1511 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom); 1512 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 1513 1514 // Hexagon needs to optimize cases with negative constants. 1515 setOperationAction(ISD::SETCC, MVT::i8, Custom); 1516 setOperationAction(ISD::SETCC, MVT::i16, Custom); 1517 setOperationAction(ISD::SETCC, MVT::v4i8, Custom); 1518 setOperationAction(ISD::SETCC, MVT::v2i16, Custom); 1519 1520 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1521 setOperationAction(ISD::VASTART, MVT::Other, Custom); 1522 setOperationAction(ISD::VAEND, MVT::Other, Expand); 1523 setOperationAction(ISD::VAARG, MVT::Other, Expand); 1524 if (Subtarget.isEnvironmentMusl()) 1525 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 1526 else 1527 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 1528 1529 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 1530 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 1531 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 1532 1533 if (EmitJumpTables) 1534 setMinimumJumpTableEntries(MinimumJumpTables); 1535 else 1536 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max()); 1537 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1538 1539 for (unsigned LegalIntOp : 1540 {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) { 1541 setOperationAction(LegalIntOp, MVT::i32, Legal); 1542 setOperationAction(LegalIntOp, MVT::i64, Legal); 1543 } 1544 1545 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit, 1546 // but they only operate on i64. 1547 for (MVT VT : MVT::integer_valuetypes()) { 1548 setOperationAction(ISD::UADDO, VT, Custom); 1549 setOperationAction(ISD::USUBO, VT, Custom); 1550 setOperationAction(ISD::SADDO, VT, Expand); 1551 setOperationAction(ISD::SSUBO, VT, Expand); 1552 setOperationAction(ISD::UADDO_CARRY, VT, Expand); 1553 setOperationAction(ISD::USUBO_CARRY, VT, Expand); 1554 } 1555 setOperationAction(ISD::UADDO_CARRY, MVT::i64, Custom); 1556 setOperationAction(ISD::USUBO_CARRY, MVT::i64, Custom); 1557 1558 setOperationAction(ISD::CTLZ, MVT::i8, Promote); 1559 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 1560 setOperationAction(ISD::CTTZ, MVT::i8, Promote); 1561 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 1562 1563 // Popcount can count # of 1s in i64 but returns i32. 1564 setOperationAction(ISD::CTPOP, MVT::i8, Promote); 1565 setOperationAction(ISD::CTPOP, MVT::i16, Promote); 1566 setOperationAction(ISD::CTPOP, MVT::i32, Promote); 1567 setOperationAction(ISD::CTPOP, MVT::i64, Legal); 1568 1569 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 1570 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 1571 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 1572 setOperationAction(ISD::BSWAP, MVT::i64, Legal); 1573 1574 setOperationAction(ISD::FSHL, MVT::i32, Legal); 1575 setOperationAction(ISD::FSHL, MVT::i64, Legal); 1576 setOperationAction(ISD::FSHR, MVT::i32, Legal); 1577 setOperationAction(ISD::FSHR, MVT::i64, Legal); 1578 1579 for (unsigned IntExpOp : 1580 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, 1581 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR, 1582 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, 1583 ISD::SMUL_LOHI, ISD::UMUL_LOHI}) { 1584 for (MVT VT : MVT::integer_valuetypes()) 1585 setOperationAction(IntExpOp, VT, Expand); 1586 } 1587 1588 for (unsigned FPExpOp : 1589 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS, 1590 ISD::FPOW, ISD::FCOPYSIGN}) { 1591 for (MVT VT : MVT::fp_valuetypes()) 1592 setOperationAction(FPExpOp, VT, Expand); 1593 } 1594 1595 // No extending loads from i32. 1596 for (MVT VT : MVT::integer_valuetypes()) { 1597 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 1598 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 1599 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 1600 } 1601 // Turn FP truncstore into trunc + store. 1602 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1603 // Turn FP extload into load/fpextend. 1604 for (MVT VT : MVT::fp_valuetypes()) 1605 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 1606 1607 // Expand BR_CC and SELECT_CC for all integer and fp types. 1608 for (MVT VT : MVT::integer_valuetypes()) { 1609 setOperationAction(ISD::BR_CC, VT, Expand); 1610 setOperationAction(ISD::SELECT_CC, VT, Expand); 1611 } 1612 for (MVT VT : MVT::fp_valuetypes()) { 1613 setOperationAction(ISD::BR_CC, VT, Expand); 1614 setOperationAction(ISD::SELECT_CC, VT, Expand); 1615 } 1616 setOperationAction(ISD::BR_CC, MVT::Other, Expand); 1617 1618 // 1619 // Handling of vector operations. 1620 // 1621 1622 // Set the action for vector operations to "expand", then override it with 1623 // either "custom" or "legal" for specific cases. 1624 static const unsigned VectExpOps[] = { 1625 // Integer arithmetic: 1626 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV, 1627 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO, 1628 ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI, 1629 // Logical/bit: 1630 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR, 1631 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ, ISD::BSWAP, ISD::BITREVERSE, 1632 // Floating point arithmetic/math functions: 1633 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV, 1634 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, 1635 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, 1636 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC, 1637 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR, 1638 ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS, ISD::FLDEXP, 1639 // Misc: 1640 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool, 1641 // Vector: 1642 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR, 1643 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, 1644 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR, 1645 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE, 1646 ISD::SPLAT_VECTOR, 1647 }; 1648 1649 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 1650 for (unsigned VectExpOp : VectExpOps) 1651 setOperationAction(VectExpOp, VT, Expand); 1652 1653 // Expand all extending loads and truncating stores: 1654 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) { 1655 if (TargetVT == VT) 1656 continue; 1657 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand); 1658 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand); 1659 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand); 1660 setTruncStoreAction(VT, TargetVT, Expand); 1661 } 1662 1663 // Normalize all inputs to SELECT to be vectors of i32. 1664 if (VT.getVectorElementType() != MVT::i32) { 1665 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32); 1666 setOperationAction(ISD::SELECT, VT, Promote); 1667 AddPromotedToType(ISD::SELECT, VT, VT32); 1668 } 1669 setOperationAction(ISD::SRA, VT, Custom); 1670 setOperationAction(ISD::SHL, VT, Custom); 1671 setOperationAction(ISD::SRL, VT, Custom); 1672 } 1673 1674 // Extending loads from (native) vectors of i8 into (native) vectors of i16 1675 // are legal. 1676 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal); 1677 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal); 1678 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal); 1679 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal); 1680 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); 1681 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); 1682 1683 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 1684 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 1685 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 1686 1687 // Types natively supported: 1688 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8, 1689 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) { 1690 setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom); 1691 setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom); 1692 setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom); 1693 setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom); 1694 setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom); 1695 setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom); 1696 1697 setOperationAction(ISD::ADD, NativeVT, Legal); 1698 setOperationAction(ISD::SUB, NativeVT, Legal); 1699 setOperationAction(ISD::MUL, NativeVT, Legal); 1700 setOperationAction(ISD::AND, NativeVT, Legal); 1701 setOperationAction(ISD::OR, NativeVT, Legal); 1702 setOperationAction(ISD::XOR, NativeVT, Legal); 1703 1704 if (NativeVT.getVectorElementType() != MVT::i1) { 1705 setOperationAction(ISD::SPLAT_VECTOR, NativeVT, Legal); 1706 setOperationAction(ISD::BSWAP, NativeVT, Legal); 1707 setOperationAction(ISD::BITREVERSE, NativeVT, Legal); 1708 } 1709 } 1710 1711 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) { 1712 setOperationAction(ISD::SMIN, VT, Legal); 1713 setOperationAction(ISD::SMAX, VT, Legal); 1714 setOperationAction(ISD::UMIN, VT, Legal); 1715 setOperationAction(ISD::UMAX, VT, Legal); 1716 } 1717 1718 // Custom lower unaligned loads. 1719 // Also, for both loads and stores, verify the alignment of the address 1720 // in case it is a compile-time constant. This is a usability feature to 1721 // provide a meaningful error message to users. 1722 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8, 1723 MVT::v2i16, MVT::v4i16, MVT::v2i32}) { 1724 setOperationAction(ISD::LOAD, VT, Custom); 1725 setOperationAction(ISD::STORE, VT, Custom); 1726 } 1727 1728 // Custom-lower load/stores of boolean vectors. 1729 for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) { 1730 setOperationAction(ISD::LOAD, VT, Custom); 1731 setOperationAction(ISD::STORE, VT, Custom); 1732 } 1733 1734 // Normalize integer compares to EQ/GT/UGT 1735 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16, 1736 MVT::v2i32}) { 1737 setCondCodeAction(ISD::SETNE, VT, Expand); 1738 setCondCodeAction(ISD::SETLE, VT, Expand); 1739 setCondCodeAction(ISD::SETGE, VT, Expand); 1740 setCondCodeAction(ISD::SETLT, VT, Expand); 1741 setCondCodeAction(ISD::SETULE, VT, Expand); 1742 setCondCodeAction(ISD::SETUGE, VT, Expand); 1743 setCondCodeAction(ISD::SETULT, VT, Expand); 1744 } 1745 1746 // Normalize boolean compares to [U]LE/[U]LT 1747 for (MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) { 1748 setCondCodeAction(ISD::SETGE, VT, Expand); 1749 setCondCodeAction(ISD::SETGT, VT, Expand); 1750 setCondCodeAction(ISD::SETUGE, VT, Expand); 1751 setCondCodeAction(ISD::SETUGT, VT, Expand); 1752 } 1753 1754 // Custom-lower bitcasts from i8 to v8i1. 1755 setOperationAction(ISD::BITCAST, MVT::i8, Custom); 1756 setOperationAction(ISD::SETCC, MVT::v2i16, Custom); 1757 setOperationAction(ISD::VSELECT, MVT::v4i8, Custom); 1758 setOperationAction(ISD::VSELECT, MVT::v2i16, Custom); 1759 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom); 1760 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 1761 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 1762 1763 // V5+. 1764 setOperationAction(ISD::FMA, MVT::f64, Expand); 1765 setOperationAction(ISD::FADD, MVT::f64, Expand); 1766 setOperationAction(ISD::FSUB, MVT::f64, Expand); 1767 setOperationAction(ISD::FMUL, MVT::f64, Expand); 1768 1769 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 1770 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 1771 1772 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); 1773 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); 1774 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 1775 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); 1776 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); 1777 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 1778 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 1779 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); 1780 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 1781 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 1782 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); 1783 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 1784 1785 // Special handling for half-precision floating point conversions. 1786 // Lower half float conversions into library calls. 1787 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 1788 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 1789 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 1790 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 1791 1792 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 1793 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 1794 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 1795 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 1796 1797 // Handling of indexed loads/stores: default is "expand". 1798 // 1799 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64, 1800 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) { 1801 setIndexedLoadAction(ISD::POST_INC, VT, Legal); 1802 setIndexedStoreAction(ISD::POST_INC, VT, Legal); 1803 } 1804 1805 // Subtarget-specific operation actions. 1806 // 1807 if (Subtarget.hasV60Ops()) { 1808 setOperationAction(ISD::ROTL, MVT::i32, Legal); 1809 setOperationAction(ISD::ROTL, MVT::i64, Legal); 1810 setOperationAction(ISD::ROTR, MVT::i32, Legal); 1811 setOperationAction(ISD::ROTR, MVT::i64, Legal); 1812 } 1813 if (Subtarget.hasV66Ops()) { 1814 setOperationAction(ISD::FADD, MVT::f64, Legal); 1815 setOperationAction(ISD::FSUB, MVT::f64, Legal); 1816 } 1817 if (Subtarget.hasV67Ops()) { 1818 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1819 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1820 setOperationAction(ISD::FMUL, MVT::f64, Legal); 1821 } 1822 1823 setTargetDAGCombine(ISD::OR); 1824 setTargetDAGCombine(ISD::TRUNCATE); 1825 setTargetDAGCombine(ISD::VSELECT); 1826 1827 if (Subtarget.useHVXOps()) 1828 initializeHVXLowering(); 1829 1830 computeRegisterProperties(&HRI); 1831 1832 // 1833 // Library calls for unsupported operations 1834 // 1835 bool FastMath = EnableFastMath; 1836 1837 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3"); 1838 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3"); 1839 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3"); 1840 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3"); 1841 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); 1842 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3"); 1843 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3"); 1844 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3"); 1845 1846 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf"); 1847 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf"); 1848 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti"); 1849 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti"); 1850 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti"); 1851 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti"); 1852 1853 // This is the only fast library function for sqrtd. 1854 if (FastMath) 1855 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2"); 1856 1857 // Prefix is: nothing for "slow-math", 1858 // "fast2_" for V5+ fast-math double-precision 1859 // (actually, keep fast-math and fast-math2 separate for now) 1860 if (FastMath) { 1861 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3"); 1862 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3"); 1863 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3"); 1864 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3"); 1865 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3"); 1866 } else { 1867 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); 1868 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); 1869 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); 1870 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3"); 1871 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3"); 1872 } 1873 1874 if (FastMath) 1875 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf"); 1876 else 1877 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf"); 1878 1879 // Routines to handle fp16 storage type. 1880 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 1881 setLibcallName(RTLIB::FPROUND_F64_F16, "__truncdfhf2"); 1882 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 1883 1884 // These cause problems when the shift amount is non-constant. 1885 setLibcallName(RTLIB::SHL_I128, nullptr); 1886 setLibcallName(RTLIB::SRL_I128, nullptr); 1887 setLibcallName(RTLIB::SRA_I128, nullptr); 1888 } 1889 1890 const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { 1891 switch ((HexagonISD::NodeType)Opcode) { 1892 case HexagonISD::ADDC: return "HexagonISD::ADDC"; 1893 case HexagonISD::SUBC: return "HexagonISD::SUBC"; 1894 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA"; 1895 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT"; 1896 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL"; 1897 case HexagonISD::BARRIER: return "HexagonISD::BARRIER"; 1898 case HexagonISD::CALL: return "HexagonISD::CALL"; 1899 case HexagonISD::CALLnr: return "HexagonISD::CALLnr"; 1900 case HexagonISD::CALLR: return "HexagonISD::CALLR"; 1901 case HexagonISD::COMBINE: return "HexagonISD::COMBINE"; 1902 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP"; 1903 case HexagonISD::CONST32: return "HexagonISD::CONST32"; 1904 case HexagonISD::CP: return "HexagonISD::CP"; 1905 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH"; 1906 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN"; 1907 case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT"; 1908 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU"; 1909 case HexagonISD::INSERT: return "HexagonISD::INSERT"; 1910 case HexagonISD::JT: return "HexagonISD::JT"; 1911 case HexagonISD::RET_GLUE: return "HexagonISD::RET_GLUE"; 1912 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; 1913 case HexagonISD::VASL: return "HexagonISD::VASL"; 1914 case HexagonISD::VASR: return "HexagonISD::VASR"; 1915 case HexagonISD::VLSR: return "HexagonISD::VLSR"; 1916 case HexagonISD::MFSHL: return "HexagonISD::MFSHL"; 1917 case HexagonISD::MFSHR: return "HexagonISD::MFSHR"; 1918 case HexagonISD::SSAT: return "HexagonISD::SSAT"; 1919 case HexagonISD::USAT: return "HexagonISD::USAT"; 1920 case HexagonISD::SMUL_LOHI: return "HexagonISD::SMUL_LOHI"; 1921 case HexagonISD::UMUL_LOHI: return "HexagonISD::UMUL_LOHI"; 1922 case HexagonISD::USMUL_LOHI: return "HexagonISD::USMUL_LOHI"; 1923 case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW"; 1924 case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0"; 1925 case HexagonISD::VROR: return "HexagonISD::VROR"; 1926 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE"; 1927 case HexagonISD::PTRUE: return "HexagonISD::PTRUE"; 1928 case HexagonISD::PFALSE: return "HexagonISD::PFALSE"; 1929 case HexagonISD::D2P: return "HexagonISD::D2P"; 1930 case HexagonISD::P2D: return "HexagonISD::P2D"; 1931 case HexagonISD::V2Q: return "HexagonISD::V2Q"; 1932 case HexagonISD::Q2V: return "HexagonISD::Q2V"; 1933 case HexagonISD::QCAT: return "HexagonISD::QCAT"; 1934 case HexagonISD::QTRUE: return "HexagonISD::QTRUE"; 1935 case HexagonISD::QFALSE: return "HexagonISD::QFALSE"; 1936 case HexagonISD::TL_EXTEND: return "HexagonISD::TL_EXTEND"; 1937 case HexagonISD::TL_TRUNCATE: return "HexagonISD::TL_TRUNCATE"; 1938 case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST"; 1939 case HexagonISD::VALIGN: return "HexagonISD::VALIGN"; 1940 case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR"; 1941 case HexagonISD::ISEL: return "HexagonISD::ISEL"; 1942 case HexagonISD::OP_END: break; 1943 } 1944 return nullptr; 1945 } 1946 1947 bool 1948 HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign, 1949 const SDLoc &dl, SelectionDAG &DAG) const { 1950 auto *CA = dyn_cast<ConstantSDNode>(Ptr); 1951 if (!CA) 1952 return true; 1953 unsigned Addr = CA->getZExtValue(); 1954 Align HaveAlign = 1955 Addr != 0 ? Align(1ull << llvm::countr_zero(Addr)) : NeedAlign; 1956 if (HaveAlign >= NeedAlign) 1957 return true; 1958 1959 static int DK_MisalignedTrap = llvm::getNextAvailablePluginDiagnosticKind(); 1960 1961 struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo { 1962 DiagnosticInfoMisalignedTrap(StringRef M) 1963 : DiagnosticInfo(DK_MisalignedTrap, DS_Remark), Msg(M) {} 1964 void print(DiagnosticPrinter &DP) const override { 1965 DP << Msg; 1966 } 1967 static bool classof(const DiagnosticInfo *DI) { 1968 return DI->getKind() == DK_MisalignedTrap; 1969 } 1970 StringRef Msg; 1971 }; 1972 1973 std::string ErrMsg; 1974 raw_string_ostream O(ErrMsg); 1975 O << "Misaligned constant address: " << format_hex(Addr, 10) 1976 << " has alignment " << HaveAlign.value() 1977 << ", but the memory access requires " << NeedAlign.value(); 1978 if (DebugLoc DL = dl.getDebugLoc()) 1979 DL.print(O << ", at "); 1980 O << ". The instruction has been replaced with a trap."; 1981 1982 DAG.getContext()->diagnose(DiagnosticInfoMisalignedTrap(O.str())); 1983 return false; 1984 } 1985 1986 SDValue 1987 HexagonTargetLowering::replaceMemWithUndef(SDValue Op, SelectionDAG &DAG) 1988 const { 1989 const SDLoc &dl(Op); 1990 auto *LS = cast<LSBaseSDNode>(Op.getNode()); 1991 assert(!LS->isIndexed() && "Not expecting indexed ops on constant address"); 1992 1993 SDValue Chain = LS->getChain(); 1994 SDValue Trap = DAG.getNode(ISD::TRAP, dl, MVT::Other, Chain); 1995 if (LS->getOpcode() == ISD::LOAD) 1996 return DAG.getMergeValues({DAG.getUNDEF(ty(Op)), Trap}, dl); 1997 return Trap; 1998 } 1999 2000 // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load 2001 // intrinsic. 2002 static bool isBrevLdIntrinsic(const Value *Inst) { 2003 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID(); 2004 return (ID == Intrinsic::hexagon_L2_loadrd_pbr || 2005 ID == Intrinsic::hexagon_L2_loadri_pbr || 2006 ID == Intrinsic::hexagon_L2_loadrh_pbr || 2007 ID == Intrinsic::hexagon_L2_loadruh_pbr || 2008 ID == Intrinsic::hexagon_L2_loadrb_pbr || 2009 ID == Intrinsic::hexagon_L2_loadrub_pbr); 2010 } 2011 2012 // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous 2013 // instruction. So far we only handle bitcast, extract value and bit reverse 2014 // load intrinsic instructions. Should we handle CGEP ? 2015 static Value *getBrevLdObject(Value *V) { 2016 if (Operator::getOpcode(V) == Instruction::ExtractValue || 2017 Operator::getOpcode(V) == Instruction::BitCast) 2018 V = cast<Operator>(V)->getOperand(0); 2019 else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V)) 2020 V = cast<Instruction>(V)->getOperand(0); 2021 return V; 2022 } 2023 2024 // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or 2025 // a back edge. If the back edge comes from the intrinsic itself, the incoming 2026 // edge is returned. 2027 static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) { 2028 const BasicBlock *Parent = PN->getParent(); 2029 int Idx = -1; 2030 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) { 2031 BasicBlock *Blk = PN->getIncomingBlock(i); 2032 // Determine if the back edge is originated from intrinsic. 2033 if (Blk == Parent) { 2034 Value *BackEdgeVal = PN->getIncomingValue(i); 2035 Value *BaseVal; 2036 // Loop over till we return the same Value or we hit the IntrBaseVal. 2037 do { 2038 BaseVal = BackEdgeVal; 2039 BackEdgeVal = getBrevLdObject(BackEdgeVal); 2040 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal)); 2041 // If the getBrevLdObject returns IntrBaseVal, we should return the 2042 // incoming edge. 2043 if (IntrBaseVal == BackEdgeVal) 2044 continue; 2045 Idx = i; 2046 break; 2047 } else // Set the node to incoming edge. 2048 Idx = i; 2049 } 2050 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI"); 2051 return PN->getIncomingValue(Idx); 2052 } 2053 2054 // Bit-reverse Load Intrinsic: Figure out the underlying object the base 2055 // pointer points to, for the bit-reverse load intrinsic. Setting this to 2056 // memoperand might help alias analysis to figure out the dependencies. 2057 static Value *getUnderLyingObjectForBrevLdIntr(Value *V) { 2058 Value *IntrBaseVal = V; 2059 Value *BaseVal; 2060 // Loop over till we return the same Value, implies we either figure out 2061 // the object or we hit a PHI 2062 do { 2063 BaseVal = V; 2064 V = getBrevLdObject(V); 2065 } while (BaseVal != V); 2066 2067 // Identify the object from PHINode. 2068 if (const PHINode *PN = dyn_cast<PHINode>(V)) 2069 return returnEdge(PN, IntrBaseVal); 2070 // For non PHI nodes, the object is the last value returned by getBrevLdObject 2071 else 2072 return V; 2073 } 2074 2075 /// Given an intrinsic, checks if on the target the intrinsic will need to map 2076 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 2077 /// true and store the intrinsic information into the IntrinsicInfo that was 2078 /// passed to the function. 2079 bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 2080 const CallInst &I, 2081 MachineFunction &MF, 2082 unsigned Intrinsic) const { 2083 switch (Intrinsic) { 2084 case Intrinsic::hexagon_L2_loadrd_pbr: 2085 case Intrinsic::hexagon_L2_loadri_pbr: 2086 case Intrinsic::hexagon_L2_loadrh_pbr: 2087 case Intrinsic::hexagon_L2_loadruh_pbr: 2088 case Intrinsic::hexagon_L2_loadrb_pbr: 2089 case Intrinsic::hexagon_L2_loadrub_pbr: { 2090 Info.opc = ISD::INTRINSIC_W_CHAIN; 2091 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 2092 auto &Cont = I.getCalledFunction()->getParent()->getContext(); 2093 // The intrinsic function call is of the form { ElTy, i8* } 2094 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type 2095 // should be derived from ElTy. 2096 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0); 2097 Info.memVT = MVT::getVT(ElTy); 2098 llvm::Value *BasePtrVal = I.getOperand(0); 2099 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal); 2100 // The offset value comes through Modifier register. For now, assume the 2101 // offset is 0. 2102 Info.offset = 0; 2103 Info.align = DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont)); 2104 Info.flags = MachineMemOperand::MOLoad; 2105 return true; 2106 } 2107 case Intrinsic::hexagon_V6_vgathermw: 2108 case Intrinsic::hexagon_V6_vgathermw_128B: 2109 case Intrinsic::hexagon_V6_vgathermh: 2110 case Intrinsic::hexagon_V6_vgathermh_128B: 2111 case Intrinsic::hexagon_V6_vgathermhw: 2112 case Intrinsic::hexagon_V6_vgathermhw_128B: 2113 case Intrinsic::hexagon_V6_vgathermwq: 2114 case Intrinsic::hexagon_V6_vgathermwq_128B: 2115 case Intrinsic::hexagon_V6_vgathermhq: 2116 case Intrinsic::hexagon_V6_vgathermhq_128B: 2117 case Intrinsic::hexagon_V6_vgathermhwq: 2118 case Intrinsic::hexagon_V6_vgathermhwq_128B: { 2119 const Module &M = *I.getParent()->getParent()->getParent(); 2120 Info.opc = ISD::INTRINSIC_W_CHAIN; 2121 Type *VecTy = I.getArgOperand(1)->getType(); 2122 Info.memVT = MVT::getVT(VecTy); 2123 Info.ptrVal = I.getArgOperand(0); 2124 Info.offset = 0; 2125 Info.align = 2126 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8); 2127 Info.flags = MachineMemOperand::MOLoad | 2128 MachineMemOperand::MOStore | 2129 MachineMemOperand::MOVolatile; 2130 return true; 2131 } 2132 default: 2133 break; 2134 } 2135 return false; 2136 } 2137 2138 bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const { 2139 return X.getValueType().isScalarInteger(); // 'tstbit' 2140 } 2141 2142 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 2143 return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2)); 2144 } 2145 2146 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 2147 if (!VT1.isSimple() || !VT2.isSimple()) 2148 return false; 2149 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32; 2150 } 2151 2152 bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd( 2153 const MachineFunction &MF, EVT VT) const { 2154 return isOperationLegalOrCustom(ISD::FMA, VT); 2155 } 2156 2157 // Should we expand the build vector with shuffles? 2158 bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT, 2159 unsigned DefinedValues) const { 2160 return false; 2161 } 2162 2163 bool HexagonTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 2164 unsigned Index) const { 2165 assert(ResVT.getVectorElementType() == SrcVT.getVectorElementType()); 2166 if (!ResVT.isSimple() || !SrcVT.isSimple()) 2167 return false; 2168 2169 MVT ResTy = ResVT.getSimpleVT(), SrcTy = SrcVT.getSimpleVT(); 2170 if (ResTy.getVectorElementType() != MVT::i1) 2171 return true; 2172 2173 // Non-HVX bool vectors are relatively cheap. 2174 return SrcTy.getVectorNumElements() <= 8; 2175 } 2176 2177 bool HexagonTargetLowering::isTargetCanonicalConstantNode(SDValue Op) const { 2178 return Op.getOpcode() == ISD::CONCAT_VECTORS || 2179 TargetLowering::isTargetCanonicalConstantNode(Op); 2180 } 2181 2182 bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, 2183 EVT VT) const { 2184 return true; 2185 } 2186 2187 TargetLoweringBase::LegalizeTypeAction 2188 HexagonTargetLowering::getPreferredVectorAction(MVT VT) const { 2189 unsigned VecLen = VT.getVectorMinNumElements(); 2190 MVT ElemTy = VT.getVectorElementType(); 2191 2192 if (VecLen == 1 || VT.isScalableVector()) 2193 return TargetLoweringBase::TypeScalarizeVector; 2194 2195 if (Subtarget.useHVXOps()) { 2196 unsigned Action = getPreferredHvxVectorAction(VT); 2197 if (Action != ~0u) 2198 return static_cast<TargetLoweringBase::LegalizeTypeAction>(Action); 2199 } 2200 2201 // Always widen (remaining) vectors of i1. 2202 if (ElemTy == MVT::i1) 2203 return TargetLoweringBase::TypeWidenVector; 2204 // Widen non-power-of-2 vectors. Such types cannot be split right now, 2205 // and computeRegisterProperties will override "split" with "widen", 2206 // which can cause other issues. 2207 if (!isPowerOf2_32(VecLen)) 2208 return TargetLoweringBase::TypeWidenVector; 2209 2210 return TargetLoweringBase::TypeSplitVector; 2211 } 2212 2213 TargetLoweringBase::LegalizeAction 2214 HexagonTargetLowering::getCustomOperationAction(SDNode &Op) const { 2215 if (Subtarget.useHVXOps()) { 2216 unsigned Action = getCustomHvxOperationAction(Op); 2217 if (Action != ~0u) 2218 return static_cast<TargetLoweringBase::LegalizeAction>(Action); 2219 } 2220 return TargetLoweringBase::Legal; 2221 } 2222 2223 std::pair<SDValue, int> 2224 HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const { 2225 if (Addr.getOpcode() == ISD::ADD) { 2226 SDValue Op1 = Addr.getOperand(1); 2227 if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode())) 2228 return { Addr.getOperand(0), CN->getSExtValue() }; 2229 } 2230 return { Addr, 0 }; 2231 } 2232 2233 // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors 2234 // to select data from, V3 is the permutation. 2235 SDValue 2236 HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) 2237 const { 2238 const auto *SVN = cast<ShuffleVectorSDNode>(Op); 2239 ArrayRef<int> AM = SVN->getMask(); 2240 assert(AM.size() <= 8 && "Unexpected shuffle mask"); 2241 unsigned VecLen = AM.size(); 2242 2243 MVT VecTy = ty(Op); 2244 assert(!Subtarget.isHVXVectorType(VecTy, true) && 2245 "HVX shuffles should be legal"); 2246 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length"); 2247 2248 SDValue Op0 = Op.getOperand(0); 2249 SDValue Op1 = Op.getOperand(1); 2250 const SDLoc &dl(Op); 2251 2252 // If the inputs are not the same as the output, bail. This is not an 2253 // error situation, but complicates the handling and the default expansion 2254 // (into BUILD_VECTOR) should be adequate. 2255 if (ty(Op0) != VecTy || ty(Op1) != VecTy) 2256 return SDValue(); 2257 2258 // Normalize the mask so that the first non-negative index comes from 2259 // the first operand. 2260 SmallVector<int,8> Mask(AM.begin(), AM.end()); 2261 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data(); 2262 if (F == AM.size()) 2263 return DAG.getUNDEF(VecTy); 2264 if (AM[F] >= int(VecLen)) { 2265 ShuffleVectorSDNode::commuteMask(Mask); 2266 std::swap(Op0, Op1); 2267 } 2268 2269 // Express the shuffle mask in terms of bytes. 2270 SmallVector<int,8> ByteMask; 2271 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8; 2272 for (int M : Mask) { 2273 if (M < 0) { 2274 for (unsigned j = 0; j != ElemBytes; ++j) 2275 ByteMask.push_back(-1); 2276 } else { 2277 for (unsigned j = 0; j != ElemBytes; ++j) 2278 ByteMask.push_back(M*ElemBytes + j); 2279 } 2280 } 2281 assert(ByteMask.size() <= 8); 2282 2283 // All non-undef (non-negative) indexes are well within [0..127], so they 2284 // fit in a single byte. Build two 64-bit words: 2285 // - MaskIdx where each byte is the corresponding index (for non-negative 2286 // indexes), and 0xFF for negative indexes, and 2287 // - MaskUnd that has 0xFF for each negative index. 2288 uint64_t MaskIdx = 0; 2289 uint64_t MaskUnd = 0; 2290 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) { 2291 unsigned S = 8*i; 2292 uint64_t M = ByteMask[i] & 0xFF; 2293 if (M == 0xFF) 2294 MaskUnd |= M << S; 2295 MaskIdx |= M << S; 2296 } 2297 2298 if (ByteMask.size() == 4) { 2299 // Identity. 2300 if (MaskIdx == (0x03020100 | MaskUnd)) 2301 return Op0; 2302 // Byte swap. 2303 if (MaskIdx == (0x00010203 | MaskUnd)) { 2304 SDValue T0 = DAG.getBitcast(MVT::i32, Op0); 2305 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0); 2306 return DAG.getBitcast(VecTy, T1); 2307 } 2308 2309 // Byte packs. 2310 SDValue Concat10 = 2311 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG); 2312 if (MaskIdx == (0x06040200 | MaskUnd)) 2313 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG); 2314 if (MaskIdx == (0x07050301 | MaskUnd)) 2315 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG); 2316 2317 SDValue Concat01 = 2318 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG); 2319 if (MaskIdx == (0x02000604 | MaskUnd)) 2320 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG); 2321 if (MaskIdx == (0x03010705 | MaskUnd)) 2322 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG); 2323 } 2324 2325 if (ByteMask.size() == 8) { 2326 // Identity. 2327 if (MaskIdx == (0x0706050403020100ull | MaskUnd)) 2328 return Op0; 2329 // Byte swap. 2330 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) { 2331 SDValue T0 = DAG.getBitcast(MVT::i64, Op0); 2332 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0); 2333 return DAG.getBitcast(VecTy, T1); 2334 } 2335 2336 // Halfword picks. 2337 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd)) 2338 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG); 2339 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd)) 2340 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG); 2341 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd)) 2342 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG); 2343 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd)) 2344 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG); 2345 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) { 2346 VectorPair P = opSplit(Op0, dl, DAG); 2347 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG); 2348 } 2349 2350 // Byte packs. 2351 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd)) 2352 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG); 2353 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd)) 2354 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG); 2355 } 2356 2357 return SDValue(); 2358 } 2359 2360 SDValue 2361 HexagonTargetLowering::getSplatValue(SDValue Op, SelectionDAG &DAG) const { 2362 switch (Op.getOpcode()) { 2363 case ISD::BUILD_VECTOR: 2364 if (SDValue S = cast<BuildVectorSDNode>(Op)->getSplatValue()) 2365 return S; 2366 break; 2367 case ISD::SPLAT_VECTOR: 2368 return Op.getOperand(0); 2369 } 2370 return SDValue(); 2371 } 2372 2373 // Create a Hexagon-specific node for shifting a vector by an integer. 2374 SDValue 2375 HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) 2376 const { 2377 unsigned NewOpc; 2378 switch (Op.getOpcode()) { 2379 case ISD::SHL: 2380 NewOpc = HexagonISD::VASL; 2381 break; 2382 case ISD::SRA: 2383 NewOpc = HexagonISD::VASR; 2384 break; 2385 case ISD::SRL: 2386 NewOpc = HexagonISD::VLSR; 2387 break; 2388 default: 2389 llvm_unreachable("Unexpected shift opcode"); 2390 } 2391 2392 if (SDValue Sp = getSplatValue(Op.getOperand(1), DAG)) 2393 return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), Sp); 2394 return SDValue(); 2395 } 2396 2397 SDValue 2398 HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const { 2399 const SDLoc &dl(Op); 2400 2401 // First try to convert the shift (by vector) to a shift by a scalar. 2402 // If we first split the shift, the shift amount will become 'extract 2403 // subvector', and will no longer be recognized as scalar. 2404 SDValue Res = Op; 2405 if (SDValue S = getVectorShiftByInt(Op, DAG)) 2406 Res = S; 2407 2408 unsigned Opc = Res.getOpcode(); 2409 switch (Opc) { 2410 case HexagonISD::VASR: 2411 case HexagonISD::VLSR: 2412 case HexagonISD::VASL: 2413 break; 2414 default: 2415 // No instructions for shifts by non-scalars. 2416 return SDValue(); 2417 } 2418 2419 MVT ResTy = ty(Res); 2420 if (ResTy.getVectorElementType() != MVT::i8) 2421 return Res; 2422 2423 // For shifts of i8, extend the inputs to i16, then truncate back to i8. 2424 assert(ResTy.getVectorElementType() == MVT::i8); 2425 SDValue Val = Res.getOperand(0), Amt = Res.getOperand(1); 2426 2427 auto ShiftPartI8 = [&dl, &DAG, this](unsigned Opc, SDValue V, SDValue A) { 2428 MVT Ty = ty(V); 2429 MVT ExtTy = MVT::getVectorVT(MVT::i16, Ty.getVectorNumElements()); 2430 SDValue ExtV = Opc == HexagonISD::VASR ? DAG.getSExtOrTrunc(V, dl, ExtTy) 2431 : DAG.getZExtOrTrunc(V, dl, ExtTy); 2432 SDValue ExtS = DAG.getNode(Opc, dl, ExtTy, {ExtV, A}); 2433 return DAG.getZExtOrTrunc(ExtS, dl, Ty); 2434 }; 2435 2436 if (ResTy.getSizeInBits() == 32) 2437 return ShiftPartI8(Opc, Val, Amt); 2438 2439 auto [LoV, HiV] = opSplit(Val, dl, DAG); 2440 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResTy, 2441 {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)}); 2442 } 2443 2444 SDValue 2445 HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { 2446 if (isa<ConstantSDNode>(Op.getOperand(1).getNode())) 2447 return Op; 2448 return SDValue(); 2449 } 2450 2451 SDValue 2452 HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 2453 MVT ResTy = ty(Op); 2454 SDValue InpV = Op.getOperand(0); 2455 MVT InpTy = ty(InpV); 2456 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits()); 2457 const SDLoc &dl(Op); 2458 2459 // Handle conversion from i8 to v8i1. 2460 if (InpTy == MVT::i8) { 2461 if (ResTy == MVT::v8i1) { 2462 SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV); 2463 SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32); 2464 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG); 2465 } 2466 return SDValue(); 2467 } 2468 2469 return Op; 2470 } 2471 2472 bool 2473 HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values, 2474 MVT VecTy, SelectionDAG &DAG, 2475 MutableArrayRef<ConstantInt*> Consts) const { 2476 MVT ElemTy = VecTy.getVectorElementType(); 2477 unsigned ElemWidth = ElemTy.getSizeInBits(); 2478 IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth); 2479 bool AllConst = true; 2480 2481 for (unsigned i = 0, e = Values.size(); i != e; ++i) { 2482 SDValue V = Values[i]; 2483 if (V.isUndef()) { 2484 Consts[i] = ConstantInt::get(IntTy, 0); 2485 continue; 2486 } 2487 // Make sure to always cast to IntTy. 2488 if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) { 2489 const ConstantInt *CI = CN->getConstantIntValue(); 2490 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue()); 2491 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) { 2492 const ConstantFP *CF = CN->getConstantFPValue(); 2493 APInt A = CF->getValueAPF().bitcastToAPInt(); 2494 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue()); 2495 } else { 2496 AllConst = false; 2497 } 2498 } 2499 return AllConst; 2500 } 2501 2502 SDValue 2503 HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, 2504 MVT VecTy, SelectionDAG &DAG) const { 2505 MVT ElemTy = VecTy.getVectorElementType(); 2506 assert(VecTy.getVectorNumElements() == Elem.size()); 2507 2508 SmallVector<ConstantInt*,4> Consts(Elem.size()); 2509 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts); 2510 2511 unsigned First, Num = Elem.size(); 2512 for (First = 0; First != Num; ++First) { 2513 if (!isUndef(Elem[First])) 2514 break; 2515 } 2516 if (First == Num) 2517 return DAG.getUNDEF(VecTy); 2518 2519 if (AllConst && 2520 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); })) 2521 return getZero(dl, VecTy, DAG); 2522 2523 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) { 2524 assert(Elem.size() == 2); 2525 if (AllConst) { 2526 // The 'Consts' array will have all values as integers regardless 2527 // of the vector element type. 2528 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) | 2529 Consts[1]->getZExtValue() << 16; 2530 return DAG.getBitcast(VecTy, DAG.getConstant(V, dl, MVT::i32)); 2531 } 2532 SDValue E0, E1; 2533 if (ElemTy == MVT::f16) { 2534 E0 = DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Elem[0]), dl, MVT::i32); 2535 E1 = DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Elem[1]), dl, MVT::i32); 2536 } else { 2537 E0 = Elem[0]; 2538 E1 = Elem[1]; 2539 } 2540 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG); 2541 return DAG.getBitcast(VecTy, N); 2542 } 2543 2544 if (ElemTy == MVT::i8) { 2545 // First try generating a constant. 2546 if (AllConst) { 2547 int32_t V = (Consts[0]->getZExtValue() & 0xFF) | 2548 (Consts[1]->getZExtValue() & 0xFF) << 8 | 2549 (Consts[2]->getZExtValue() & 0xFF) << 16 | 2550 Consts[3]->getZExtValue() << 24; 2551 return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32)); 2552 } 2553 2554 // Then try splat. 2555 bool IsSplat = true; 2556 for (unsigned i = First+1; i != Num; ++i) { 2557 if (Elem[i] == Elem[First] || isUndef(Elem[i])) 2558 continue; 2559 IsSplat = false; 2560 break; 2561 } 2562 if (IsSplat) { 2563 // Legalize the operand of SPLAT_VECTOR. 2564 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32); 2565 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext); 2566 } 2567 2568 // Generate 2569 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) | 2570 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16 2571 assert(Elem.size() == 4); 2572 SDValue Vs[4]; 2573 for (unsigned i = 0; i != 4; ++i) { 2574 Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32); 2575 Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8); 2576 } 2577 SDValue S8 = DAG.getConstant(8, dl, MVT::i32); 2578 SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8}); 2579 SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8}); 2580 SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0}); 2581 SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1}); 2582 2583 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG); 2584 return DAG.getBitcast(MVT::v4i8, R); 2585 } 2586 2587 #ifndef NDEBUG 2588 dbgs() << "VecTy: " << VecTy << '\n'; 2589 #endif 2590 llvm_unreachable("Unexpected vector element type"); 2591 } 2592 2593 SDValue 2594 HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, 2595 MVT VecTy, SelectionDAG &DAG) const { 2596 MVT ElemTy = VecTy.getVectorElementType(); 2597 assert(VecTy.getVectorNumElements() == Elem.size()); 2598 2599 SmallVector<ConstantInt*,8> Consts(Elem.size()); 2600 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts); 2601 2602 unsigned First, Num = Elem.size(); 2603 for (First = 0; First != Num; ++First) { 2604 if (!isUndef(Elem[First])) 2605 break; 2606 } 2607 if (First == Num) 2608 return DAG.getUNDEF(VecTy); 2609 2610 if (AllConst && 2611 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); })) 2612 return getZero(dl, VecTy, DAG); 2613 2614 // First try splat if possible. 2615 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) { 2616 bool IsSplat = true; 2617 for (unsigned i = First+1; i != Num; ++i) { 2618 if (Elem[i] == Elem[First] || isUndef(Elem[i])) 2619 continue; 2620 IsSplat = false; 2621 break; 2622 } 2623 if (IsSplat) { 2624 // Legalize the operand of SPLAT_VECTOR 2625 SDValue S = ElemTy == MVT::f16 ? DAG.getBitcast(MVT::i16, Elem[First]) 2626 : Elem[First]; 2627 SDValue Ext = DAG.getZExtOrTrunc(S, dl, MVT::i32); 2628 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext); 2629 } 2630 } 2631 2632 // Then try constant. 2633 if (AllConst) { 2634 uint64_t Val = 0; 2635 unsigned W = ElemTy.getSizeInBits(); 2636 uint64_t Mask = (1ull << W) - 1; 2637 for (unsigned i = 0; i != Num; ++i) 2638 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask); 2639 SDValue V0 = DAG.getConstant(Val, dl, MVT::i64); 2640 return DAG.getBitcast(VecTy, V0); 2641 } 2642 2643 // Build two 32-bit vectors and concatenate. 2644 MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2); 2645 SDValue L = (ElemTy == MVT::i32) 2646 ? Elem[0] 2647 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG); 2648 SDValue H = (ElemTy == MVT::i32) 2649 ? Elem[1] 2650 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG); 2651 return getCombine(H, L, dl, VecTy, DAG); 2652 } 2653 2654 SDValue 2655 HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV, 2656 const SDLoc &dl, MVT ValTy, MVT ResTy, 2657 SelectionDAG &DAG) const { 2658 MVT VecTy = ty(VecV); 2659 assert(!ValTy.isVector() || 2660 VecTy.getVectorElementType() == ValTy.getVectorElementType()); 2661 if (VecTy.getVectorElementType() == MVT::i1) 2662 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG); 2663 2664 unsigned VecWidth = VecTy.getSizeInBits(); 2665 unsigned ValWidth = ValTy.getSizeInBits(); 2666 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits(); 2667 assert((VecWidth % ElemWidth) == 0); 2668 assert(VecWidth == 32 || VecWidth == 64); 2669 2670 // Cast everything to scalar integer types. 2671 MVT ScalarTy = tyScalar(VecTy); 2672 VecV = DAG.getBitcast(ScalarTy, VecV); 2673 2674 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32); 2675 SDValue ExtV; 2676 2677 if (auto *IdxN = dyn_cast<ConstantSDNode>(IdxV)) { 2678 unsigned Off = IdxN->getZExtValue() * ElemWidth; 2679 if (VecWidth == 64 && ValWidth == 32) { 2680 assert(Off == 0 || Off == 32); 2681 ExtV = Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG); 2682 } else if (Off == 0 && (ValWidth % 8) == 0) { 2683 ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy)); 2684 } else { 2685 SDValue OffV = DAG.getConstant(Off, dl, MVT::i32); 2686 // The return type of EXTRACTU must be the same as the type of the 2687 // input vector. 2688 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy, 2689 {VecV, WidthV, OffV}); 2690 } 2691 } else { 2692 if (ty(IdxV) != MVT::i32) 2693 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32); 2694 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 2695 DAG.getConstant(ElemWidth, dl, MVT::i32)); 2696 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy, 2697 {VecV, WidthV, OffV}); 2698 } 2699 2700 // Cast ExtV to the requested result type. 2701 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy)); 2702 ExtV = DAG.getBitcast(ResTy, ExtV); 2703 return ExtV; 2704 } 2705 2706 SDValue 2707 HexagonTargetLowering::extractVectorPred(SDValue VecV, SDValue IdxV, 2708 const SDLoc &dl, MVT ValTy, MVT ResTy, 2709 SelectionDAG &DAG) const { 2710 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon 2711 // without any coprocessors). 2712 MVT VecTy = ty(VecV); 2713 unsigned VecWidth = VecTy.getSizeInBits(); 2714 unsigned ValWidth = ValTy.getSizeInBits(); 2715 assert(VecWidth == VecTy.getVectorNumElements() && 2716 "Vector elements should equal vector width size"); 2717 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2); 2718 2719 // Check if this is an extract of the lowest bit. 2720 if (isNullConstant(IdxV) && ValTy.getSizeInBits() == 1) { 2721 // Extracting the lowest bit is a no-op, but it changes the type, 2722 // so it must be kept as an operation to avoid errors related to 2723 // type mismatches. 2724 return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV); 2725 } 2726 2727 // If the value extracted is a single bit, use tstbit. 2728 if (ValWidth == 1) { 2729 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG); 2730 SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32); 2731 SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0); 2732 return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0); 2733 } 2734 2735 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in 2736 // a predicate register. The elements of the vector are repeated 2737 // in the register (if necessary) so that the total number is 8. 2738 // The extracted subvector will need to be expanded in such a way. 2739 unsigned Scale = VecWidth / ValWidth; 2740 2741 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to 2742 // position 0. 2743 assert(ty(IdxV) == MVT::i32); 2744 unsigned VecRep = 8 / VecWidth; 2745 SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 2746 DAG.getConstant(8*VecRep, dl, MVT::i32)); 2747 SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV); 2748 SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0); 2749 while (Scale > 1) { 2750 // The longest possible subvector is at most 32 bits, so it is always 2751 // contained in the low subregister. 2752 T1 = LoHalf(T1, DAG); 2753 T1 = expandPredicate(T1, dl, DAG); 2754 Scale /= 2; 2755 } 2756 2757 return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1); 2758 } 2759 2760 SDValue 2761 HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV, 2762 const SDLoc &dl, MVT ValTy, 2763 SelectionDAG &DAG) const { 2764 MVT VecTy = ty(VecV); 2765 if (VecTy.getVectorElementType() == MVT::i1) 2766 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG); 2767 2768 unsigned VecWidth = VecTy.getSizeInBits(); 2769 unsigned ValWidth = ValTy.getSizeInBits(); 2770 assert(VecWidth == 32 || VecWidth == 64); 2771 assert((VecWidth % ValWidth) == 0); 2772 2773 // Cast everything to scalar integer types. 2774 MVT ScalarTy = MVT::getIntegerVT(VecWidth); 2775 // The actual type of ValV may be different than ValTy (which is related 2776 // to the vector type). 2777 unsigned VW = ty(ValV).getSizeInBits(); 2778 ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV); 2779 VecV = DAG.getBitcast(ScalarTy, VecV); 2780 if (VW != VecWidth) 2781 ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy); 2782 2783 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32); 2784 SDValue InsV; 2785 2786 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) { 2787 unsigned W = C->getZExtValue() * ValWidth; 2788 SDValue OffV = DAG.getConstant(W, dl, MVT::i32); 2789 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy, 2790 {VecV, ValV, WidthV, OffV}); 2791 } else { 2792 if (ty(IdxV) != MVT::i32) 2793 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32); 2794 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV); 2795 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy, 2796 {VecV, ValV, WidthV, OffV}); 2797 } 2798 2799 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV); 2800 } 2801 2802 SDValue 2803 HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV, 2804 SDValue IdxV, const SDLoc &dl, 2805 MVT ValTy, SelectionDAG &DAG) const { 2806 MVT VecTy = ty(VecV); 2807 unsigned VecLen = VecTy.getVectorNumElements(); 2808 2809 if (ValTy == MVT::i1) { 2810 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG); 2811 SDValue Ext = DAG.getSExtOrTrunc(ValV, dl, MVT::i32); 2812 SDValue Width = DAG.getConstant(8 / VecLen, dl, MVT::i32); 2813 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, Width); 2814 SDValue Ins = 2815 DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, {ToReg, Ext, Width, Idx}); 2816 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Ins}, DAG); 2817 } 2818 2819 assert(ValTy.getVectorElementType() == MVT::i1); 2820 SDValue ValR = ValTy.isVector() 2821 ? DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV) 2822 : DAG.getSExtOrTrunc(ValV, dl, MVT::i64); 2823 2824 unsigned Scale = VecLen / ValTy.getVectorNumElements(); 2825 assert(Scale > 1); 2826 2827 for (unsigned R = Scale; R > 1; R /= 2) { 2828 ValR = contractPredicate(ValR, dl, DAG); 2829 ValR = getCombine(DAG.getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG); 2830 } 2831 2832 SDValue Width = DAG.getConstant(64 / Scale, dl, MVT::i32); 2833 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, Width); 2834 SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV); 2835 SDValue Ins = 2836 DAG.getNode(HexagonISD::INSERT, dl, MVT::i64, {VecR, ValR, Width, Idx}); 2837 return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins); 2838 } 2839 2840 SDValue 2841 HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl, 2842 SelectionDAG &DAG) const { 2843 assert(ty(Vec32).getSizeInBits() == 32); 2844 if (isUndef(Vec32)) 2845 return DAG.getUNDEF(MVT::i64); 2846 SDValue P = DAG.getBitcast(MVT::v4i8, Vec32); 2847 SDValue X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i16, P); 2848 return DAG.getBitcast(MVT::i64, X); 2849 } 2850 2851 SDValue 2852 HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl, 2853 SelectionDAG &DAG) const { 2854 assert(ty(Vec64).getSizeInBits() == 64); 2855 if (isUndef(Vec64)) 2856 return DAG.getUNDEF(MVT::i32); 2857 // Collect even bytes: 2858 SDValue A = DAG.getBitcast(MVT::v8i8, Vec64); 2859 SDValue S = DAG.getVectorShuffle(MVT::v8i8, dl, A, DAG.getUNDEF(MVT::v8i8), 2860 {0, 2, 4, 6, 1, 3, 5, 7}); 2861 return extractVector(S, DAG.getConstant(0, dl, MVT::i32), dl, MVT::v4i8, 2862 MVT::i32, DAG); 2863 } 2864 2865 SDValue 2866 HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) 2867 const { 2868 if (Ty.isVector()) { 2869 unsigned W = Ty.getSizeInBits(); 2870 if (W <= 64) 2871 return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W))); 2872 return DAG.getNode(ISD::SPLAT_VECTOR, dl, Ty, getZero(dl, MVT::i32, DAG)); 2873 } 2874 2875 if (Ty.isInteger()) 2876 return DAG.getConstant(0, dl, Ty); 2877 if (Ty.isFloatingPoint()) 2878 return DAG.getConstantFP(0.0, dl, Ty); 2879 llvm_unreachable("Invalid type for zero"); 2880 } 2881 2882 SDValue 2883 HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) 2884 const { 2885 MVT ValTy = ty(Val); 2886 assert(ValTy.getVectorElementType() == ResTy.getVectorElementType()); 2887 2888 unsigned ValLen = ValTy.getVectorNumElements(); 2889 unsigned ResLen = ResTy.getVectorNumElements(); 2890 if (ValLen == ResLen) 2891 return Val; 2892 2893 const SDLoc &dl(Val); 2894 assert(ValLen < ResLen); 2895 assert(ResLen % ValLen == 0); 2896 2897 SmallVector<SDValue, 4> Concats = {Val}; 2898 for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i) 2899 Concats.push_back(DAG.getUNDEF(ValTy)); 2900 2901 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResTy, Concats); 2902 } 2903 2904 SDValue 2905 HexagonTargetLowering::getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl, 2906 MVT ResTy, SelectionDAG &DAG) const { 2907 MVT ElemTy = ty(Hi); 2908 assert(ElemTy == ty(Lo)); 2909 2910 if (!ElemTy.isVector()) { 2911 assert(ElemTy.isScalarInteger()); 2912 MVT PairTy = MVT::getIntegerVT(2 * ElemTy.getSizeInBits()); 2913 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, PairTy, Lo, Hi); 2914 return DAG.getBitcast(ResTy, Pair); 2915 } 2916 2917 unsigned Width = ElemTy.getSizeInBits(); 2918 MVT IntTy = MVT::getIntegerVT(Width); 2919 MVT PairTy = MVT::getIntegerVT(2 * Width); 2920 SDValue Pair = 2921 DAG.getNode(ISD::BUILD_PAIR, dl, PairTy, 2922 {DAG.getBitcast(IntTy, Lo), DAG.getBitcast(IntTy, Hi)}); 2923 return DAG.getBitcast(ResTy, Pair); 2924 } 2925 2926 SDValue 2927 HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 2928 MVT VecTy = ty(Op); 2929 unsigned BW = VecTy.getSizeInBits(); 2930 const SDLoc &dl(Op); 2931 SmallVector<SDValue,8> Ops; 2932 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) 2933 Ops.push_back(Op.getOperand(i)); 2934 2935 if (BW == 32) 2936 return buildVector32(Ops, dl, VecTy, DAG); 2937 if (BW == 64) 2938 return buildVector64(Ops, dl, VecTy, DAG); 2939 2940 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) { 2941 // Check if this is a special case or all-0 or all-1. 2942 bool All0 = true, All1 = true; 2943 for (SDValue P : Ops) { 2944 auto *CN = dyn_cast<ConstantSDNode>(P.getNode()); 2945 if (CN == nullptr) { 2946 All0 = All1 = false; 2947 break; 2948 } 2949 uint32_t C = CN->getZExtValue(); 2950 All0 &= (C == 0); 2951 All1 &= (C == 1); 2952 } 2953 if (All0) 2954 return DAG.getNode(HexagonISD::PFALSE, dl, VecTy); 2955 if (All1) 2956 return DAG.getNode(HexagonISD::PTRUE, dl, VecTy); 2957 2958 // For each i1 element in the resulting predicate register, put 1 2959 // shifted by the index of the element into a general-purpose register, 2960 // then or them together and transfer it back into a predicate register. 2961 SDValue Rs[8]; 2962 SDValue Z = getZero(dl, MVT::i32, DAG); 2963 // Always produce 8 bits, repeat inputs if necessary. 2964 unsigned Rep = 8 / VecTy.getVectorNumElements(); 2965 for (unsigned i = 0; i != 8; ++i) { 2966 SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32); 2967 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z); 2968 } 2969 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) { 2970 for (unsigned i = 0, e = A.size()/2; i != e; ++i) 2971 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]); 2972 } 2973 // Move the value directly to a predicate register. 2974 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG); 2975 } 2976 2977 return SDValue(); 2978 } 2979 2980 SDValue 2981 HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 2982 SelectionDAG &DAG) const { 2983 MVT VecTy = ty(Op); 2984 const SDLoc &dl(Op); 2985 if (VecTy.getSizeInBits() == 64) { 2986 assert(Op.getNumOperands() == 2); 2987 return getCombine(Op.getOperand(1), Op.getOperand(0), dl, VecTy, DAG); 2988 } 2989 2990 MVT ElemTy = VecTy.getVectorElementType(); 2991 if (ElemTy == MVT::i1) { 2992 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1); 2993 MVT OpTy = ty(Op.getOperand(0)); 2994 // Scale is how many times the operands need to be contracted to match 2995 // the representation in the target register. 2996 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements(); 2997 assert(Scale == Op.getNumOperands() && Scale > 1); 2998 2999 // First, convert all bool vectors to integers, then generate pairwise 3000 // inserts to form values of doubled length. Up until there are only 3001 // two values left to concatenate, all of these values will fit in a 3002 // 32-bit integer, so keep them as i32 to use 32-bit inserts. 3003 SmallVector<SDValue,4> Words[2]; 3004 unsigned IdxW = 0; 3005 3006 for (SDValue P : Op.getNode()->op_values()) { 3007 SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P); 3008 for (unsigned R = Scale; R > 1; R /= 2) { 3009 W = contractPredicate(W, dl, DAG); 3010 W = getCombine(DAG.getUNDEF(MVT::i32), W, dl, MVT::i64, DAG); 3011 } 3012 W = LoHalf(W, DAG); 3013 Words[IdxW].push_back(W); 3014 } 3015 3016 while (Scale > 2) { 3017 SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32); 3018 Words[IdxW ^ 1].clear(); 3019 3020 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) { 3021 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1]; 3022 // Insert W1 into W0 right next to the significant bits of W0. 3023 SDValue T = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, 3024 {W0, W1, WidthV, WidthV}); 3025 Words[IdxW ^ 1].push_back(T); 3026 } 3027 IdxW ^= 1; 3028 Scale /= 2; 3029 } 3030 3031 // At this point there should only be two words left, and Scale should be 2. 3032 assert(Scale == 2 && Words[IdxW].size() == 2); 3033 3034 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG); 3035 return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW); 3036 } 3037 3038 return SDValue(); 3039 } 3040 3041 SDValue 3042 HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 3043 SelectionDAG &DAG) const { 3044 SDValue Vec = Op.getOperand(0); 3045 MVT ElemTy = ty(Vec).getVectorElementType(); 3046 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG); 3047 } 3048 3049 SDValue 3050 HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 3051 SelectionDAG &DAG) const { 3052 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op), 3053 ty(Op), ty(Op), DAG); 3054 } 3055 3056 SDValue 3057 HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 3058 SelectionDAG &DAG) const { 3059 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2), 3060 SDLoc(Op), ty(Op).getVectorElementType(), DAG); 3061 } 3062 3063 SDValue 3064 HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, 3065 SelectionDAG &DAG) const { 3066 SDValue ValV = Op.getOperand(1); 3067 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2), 3068 SDLoc(Op), ty(ValV), DAG); 3069 } 3070 3071 bool 3072 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 3073 // Assuming the caller does not have either a signext or zeroext modifier, and 3074 // only one value is accepted, any reasonable truncation is allowed. 3075 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 3076 return false; 3077 3078 // FIXME: in principle up to 64-bit could be made safe, but it would be very 3079 // fragile at the moment: any support for multiple value returns would be 3080 // liable to disallow tail calls involving i64 -> iN truncation in many cases. 3081 return Ty1->getPrimitiveSizeInBits() <= 32; 3082 } 3083 3084 SDValue 3085 HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const { 3086 MVT Ty = ty(Op); 3087 const SDLoc &dl(Op); 3088 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 3089 MVT MemTy = LN->getMemoryVT().getSimpleVT(); 3090 ISD::LoadExtType ET = LN->getExtensionType(); 3091 3092 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1; 3093 if (LoadPred) { 3094 SDValue NL = DAG.getLoad( 3095 LN->getAddressingMode(), ISD::ZEXTLOAD, MVT::i32, dl, LN->getChain(), 3096 LN->getBasePtr(), LN->getOffset(), LN->getPointerInfo(), 3097 /*MemoryVT*/ MVT::i8, LN->getAlign(), LN->getMemOperand()->getFlags(), 3098 LN->getAAInfo(), LN->getRanges()); 3099 LN = cast<LoadSDNode>(NL.getNode()); 3100 } 3101 3102 Align ClaimAlign = LN->getAlign(); 3103 if (!validateConstPtrAlignment(LN->getBasePtr(), ClaimAlign, dl, DAG)) 3104 return replaceMemWithUndef(Op, DAG); 3105 3106 // Call LowerUnalignedLoad for all loads, it recognizes loads that 3107 // don't need extra aligning. 3108 SDValue LU = LowerUnalignedLoad(SDValue(LN, 0), DAG); 3109 if (LoadPred) { 3110 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG); 3111 if (ET == ISD::SEXTLOAD) { 3112 TP = DAG.getSExtOrTrunc(TP, dl, Ty); 3113 } else if (ET != ISD::NON_EXTLOAD) { 3114 TP = DAG.getZExtOrTrunc(TP, dl, Ty); 3115 } 3116 SDValue Ch = cast<LoadSDNode>(LU.getNode())->getChain(); 3117 return DAG.getMergeValues({TP, Ch}, dl); 3118 } 3119 return LU; 3120 } 3121 3122 SDValue 3123 HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const { 3124 const SDLoc &dl(Op); 3125 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 3126 SDValue Val = SN->getValue(); 3127 MVT Ty = ty(Val); 3128 3129 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) { 3130 // Store the exact predicate (all bits). 3131 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG); 3132 SDValue NS = DAG.getTruncStore(SN->getChain(), dl, TR, SN->getBasePtr(), 3133 MVT::i8, SN->getMemOperand()); 3134 if (SN->isIndexed()) { 3135 NS = DAG.getIndexedStore(NS, dl, SN->getBasePtr(), SN->getOffset(), 3136 SN->getAddressingMode()); 3137 } 3138 SN = cast<StoreSDNode>(NS.getNode()); 3139 } 3140 3141 Align ClaimAlign = SN->getAlign(); 3142 if (!validateConstPtrAlignment(SN->getBasePtr(), ClaimAlign, dl, DAG)) 3143 return replaceMemWithUndef(Op, DAG); 3144 3145 MVT StoreTy = SN->getMemoryVT().getSimpleVT(); 3146 Align NeedAlign = Subtarget.getTypeAlignment(StoreTy); 3147 if (ClaimAlign < NeedAlign) 3148 return expandUnalignedStore(SN, DAG); 3149 return SDValue(SN, 0); 3150 } 3151 3152 SDValue 3153 HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) 3154 const { 3155 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 3156 MVT LoadTy = ty(Op); 3157 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy).value(); 3158 unsigned HaveAlign = LN->getAlign().value(); 3159 if (HaveAlign >= NeedAlign) 3160 return Op; 3161 3162 const SDLoc &dl(Op); 3163 const DataLayout &DL = DAG.getDataLayout(); 3164 LLVMContext &Ctx = *DAG.getContext(); 3165 3166 // If the load aligning is disabled or the load can be broken up into two 3167 // smaller legal loads, do the default (target-independent) expansion. 3168 bool DoDefault = false; 3169 // Handle it in the default way if this is an indexed load. 3170 if (!LN->isUnindexed()) 3171 DoDefault = true; 3172 3173 if (!AlignLoads) { 3174 if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(), 3175 *LN->getMemOperand())) 3176 return Op; 3177 DoDefault = true; 3178 } 3179 if (!DoDefault && (2 * HaveAlign) == NeedAlign) { 3180 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)". 3181 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign) 3182 : MVT::getVectorVT(MVT::i8, HaveAlign); 3183 DoDefault = 3184 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand()); 3185 } 3186 if (DoDefault) { 3187 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG); 3188 return DAG.getMergeValues({P.first, P.second}, dl); 3189 } 3190 3191 // The code below generates two loads, both aligned as NeedAlign, and 3192 // with the distance of NeedAlign between them. For that to cover the 3193 // bits that need to be loaded (and without overlapping), the size of 3194 // the loads should be equal to NeedAlign. This is true for all loadable 3195 // types, but add an assertion in case something changes in the future. 3196 assert(LoadTy.getSizeInBits() == 8*NeedAlign); 3197 3198 unsigned LoadLen = NeedAlign; 3199 SDValue Base = LN->getBasePtr(); 3200 SDValue Chain = LN->getChain(); 3201 auto BO = getBaseAndOffset(Base); 3202 unsigned BaseOpc = BO.first.getOpcode(); 3203 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0) 3204 return Op; 3205 3206 if (BO.second % LoadLen != 0) { 3207 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first, 3208 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32)); 3209 BO.second -= BO.second % LoadLen; 3210 } 3211 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR) 3212 ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first, 3213 DAG.getConstant(NeedAlign, dl, MVT::i32)) 3214 : BO.first; 3215 SDValue Base0 = 3216 DAG.getMemBasePlusOffset(BaseNoOff, TypeSize::getFixed(BO.second), dl); 3217 SDValue Base1 = DAG.getMemBasePlusOffset( 3218 BaseNoOff, TypeSize::getFixed(BO.second + LoadLen), dl); 3219 3220 MachineMemOperand *WideMMO = nullptr; 3221 if (MachineMemOperand *MMO = LN->getMemOperand()) { 3222 MachineFunction &MF = DAG.getMachineFunction(); 3223 WideMMO = MF.getMachineMemOperand( 3224 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen), 3225 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), 3226 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 3227 } 3228 3229 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO); 3230 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO); 3231 3232 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy, 3233 {Load1, Load0, BaseNoOff.getOperand(0)}); 3234 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3235 Load0.getValue(1), Load1.getValue(1)); 3236 SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl); 3237 return M; 3238 } 3239 3240 SDValue 3241 HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const { 3242 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 3243 auto *CY = dyn_cast<ConstantSDNode>(Y); 3244 if (!CY) 3245 return SDValue(); 3246 3247 const SDLoc &dl(Op); 3248 SDVTList VTs = Op.getNode()->getVTList(); 3249 assert(VTs.NumVTs == 2); 3250 assert(VTs.VTs[1] == MVT::i1); 3251 unsigned Opc = Op.getOpcode(); 3252 3253 if (CY) { 3254 uint64_t VY = CY->getZExtValue(); 3255 assert(VY != 0 && "This should have been folded"); 3256 // X +/- 1 3257 if (VY != 1) 3258 return SDValue(); 3259 3260 if (Opc == ISD::UADDO) { 3261 SDValue Op = DAG.getNode(ISD::ADD, dl, VTs.VTs[0], {X, Y}); 3262 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, getZero(dl, ty(Op), DAG), 3263 ISD::SETEQ); 3264 return DAG.getMergeValues({Op, Ov}, dl); 3265 } 3266 if (Opc == ISD::USUBO) { 3267 SDValue Op = DAG.getNode(ISD::SUB, dl, VTs.VTs[0], {X, Y}); 3268 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, 3269 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ); 3270 return DAG.getMergeValues({Op, Ov}, dl); 3271 } 3272 } 3273 3274 return SDValue(); 3275 } 3276 3277 SDValue HexagonTargetLowering::LowerUAddSubOCarry(SDValue Op, 3278 SelectionDAG &DAG) const { 3279 const SDLoc &dl(Op); 3280 unsigned Opc = Op.getOpcode(); 3281 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2); 3282 3283 if (Opc == ISD::UADDO_CARRY) 3284 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(), 3285 { X, Y, C }); 3286 3287 EVT CarryTy = C.getValueType(); 3288 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(), 3289 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) }); 3290 SDValue Out[] = { SubC.getValue(0), 3291 DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) }; 3292 return DAG.getMergeValues(Out, dl); 3293 } 3294 3295 SDValue 3296 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 3297 SDValue Chain = Op.getOperand(0); 3298 SDValue Offset = Op.getOperand(1); 3299 SDValue Handler = Op.getOperand(2); 3300 SDLoc dl(Op); 3301 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3302 3303 // Mark function as containing a call to EH_RETURN. 3304 HexagonMachineFunctionInfo *FuncInfo = 3305 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>(); 3306 FuncInfo->setHasEHReturn(); 3307 3308 unsigned OffsetReg = Hexagon::R28; 3309 3310 SDValue StoreAddr = 3311 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT), 3312 DAG.getIntPtrConstant(4, dl)); 3313 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo()); 3314 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset); 3315 3316 // Not needed we already use it as explict input to EH_RETURN. 3317 // MF.getRegInfo().addLiveOut(OffsetReg); 3318 3319 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain); 3320 } 3321 3322 SDValue 3323 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3324 unsigned Opc = Op.getOpcode(); 3325 3326 // Handle INLINEASM first. 3327 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR) 3328 return LowerINLINEASM(Op, DAG); 3329 3330 if (isHvxOperation(Op.getNode(), DAG)) { 3331 // If HVX lowering returns nothing, try the default lowering. 3332 if (SDValue V = LowerHvxOperation(Op, DAG)) 3333 return V; 3334 } 3335 3336 switch (Opc) { 3337 default: 3338 #ifndef NDEBUG 3339 Op.getNode()->dumpr(&DAG); 3340 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END) 3341 errs() << "Error: check for a non-legal type in this operation\n"; 3342 #endif 3343 llvm_unreachable("Should not custom lower this!"); 3344 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 3345 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); 3346 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 3347 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 3348 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 3349 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 3350 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3351 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 3352 case ISD::LOAD: return LowerLoad(Op, DAG); 3353 case ISD::STORE: return LowerStore(Op, DAG); 3354 case ISD::UADDO: 3355 case ISD::USUBO: return LowerUAddSubO(Op, DAG); 3356 case ISD::UADDO_CARRY: 3357 case ISD::USUBO_CARRY: return LowerUAddSubOCarry(Op, DAG); 3358 case ISD::SRA: 3359 case ISD::SHL: 3360 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG); 3361 case ISD::ROTL: return LowerROTL(Op, DAG); 3362 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3363 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 3364 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 3365 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3366 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3367 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3368 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 3369 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG); 3370 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 3371 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 3372 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 3373 case ISD::VASTART: return LowerVASTART(Op, DAG); 3374 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 3375 case ISD::SETCC: return LowerSETCC(Op, DAG); 3376 case ISD::VSELECT: return LowerVSELECT(Op, DAG); 3377 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3378 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 3379 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG); 3380 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 3381 break; 3382 } 3383 3384 return SDValue(); 3385 } 3386 3387 void 3388 HexagonTargetLowering::LowerOperationWrapper(SDNode *N, 3389 SmallVectorImpl<SDValue> &Results, 3390 SelectionDAG &DAG) const { 3391 if (isHvxOperation(N, DAG)) { 3392 LowerHvxOperationWrapper(N, Results, DAG); 3393 if (!Results.empty()) 3394 return; 3395 } 3396 3397 SDValue Op(N, 0); 3398 unsigned Opc = N->getOpcode(); 3399 3400 switch (Opc) { 3401 case HexagonISD::SSAT: 3402 case HexagonISD::USAT: 3403 Results.push_back(opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG)); 3404 break; 3405 case ISD::STORE: 3406 // We are only custom-lowering stores to verify the alignment of the 3407 // address if it is a compile-time constant. Since a store can be 3408 // modified during type-legalization (the value being stored may need 3409 // legalization), return empty Results here to indicate that we don't 3410 // really make any changes in the custom lowering. 3411 return; 3412 default: 3413 TargetLowering::LowerOperationWrapper(N, Results, DAG); 3414 break; 3415 } 3416 } 3417 3418 void 3419 HexagonTargetLowering::ReplaceNodeResults(SDNode *N, 3420 SmallVectorImpl<SDValue> &Results, 3421 SelectionDAG &DAG) const { 3422 if (isHvxOperation(N, DAG)) { 3423 ReplaceHvxNodeResults(N, Results, DAG); 3424 if (!Results.empty()) 3425 return; 3426 } 3427 3428 const SDLoc &dl(N); 3429 switch (N->getOpcode()) { 3430 case ISD::SRL: 3431 case ISD::SRA: 3432 case ISD::SHL: 3433 return; 3434 case ISD::BITCAST: 3435 // Handle a bitcast from v8i1 to i8. 3436 if (N->getValueType(0) == MVT::i8) { 3437 if (N->getOperand(0).getValueType() == MVT::v8i1) { 3438 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, 3439 N->getOperand(0), DAG); 3440 SDValue T = DAG.getAnyExtOrTrunc(P, dl, MVT::i8); 3441 Results.push_back(T); 3442 } 3443 } 3444 break; 3445 } 3446 } 3447 3448 SDValue 3449 HexagonTargetLowering::PerformDAGCombine(SDNode *N, 3450 DAGCombinerInfo &DCI) const { 3451 if (isHvxOperation(N, DCI.DAG)) { 3452 if (SDValue V = PerformHvxDAGCombine(N, DCI)) 3453 return V; 3454 return SDValue(); 3455 } 3456 3457 SDValue Op(N, 0); 3458 const SDLoc &dl(Op); 3459 unsigned Opc = Op.getOpcode(); 3460 3461 if (Opc == ISD::TRUNCATE) { 3462 SDValue Op0 = Op.getOperand(0); 3463 // fold (truncate (build pair x, y)) -> (truncate x) or x 3464 if (Op0.getOpcode() == ISD::BUILD_PAIR) { 3465 EVT TruncTy = Op.getValueType(); 3466 SDValue Elem0 = Op0.getOperand(0); 3467 // if we match the low element of the pair, just return it. 3468 if (Elem0.getValueType() == TruncTy) 3469 return Elem0; 3470 // otherwise, if the low part is still too large, apply the truncate. 3471 if (Elem0.getValueType().bitsGT(TruncTy)) 3472 return DCI.DAG.getNode(ISD::TRUNCATE, dl, TruncTy, Elem0); 3473 } 3474 } 3475 3476 if (DCI.isBeforeLegalizeOps()) 3477 return SDValue(); 3478 3479 if (Opc == HexagonISD::P2D) { 3480 SDValue P = Op.getOperand(0); 3481 switch (P.getOpcode()) { 3482 case HexagonISD::PTRUE: 3483 return DCI.DAG.getConstant(-1, dl, ty(Op)); 3484 case HexagonISD::PFALSE: 3485 return getZero(dl, ty(Op), DCI.DAG); 3486 default: 3487 break; 3488 } 3489 } else if (Opc == ISD::VSELECT) { 3490 // This is pretty much duplicated in HexagonISelLoweringHVX... 3491 // 3492 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0) 3493 SDValue Cond = Op.getOperand(0); 3494 if (Cond->getOpcode() == ISD::XOR) { 3495 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1); 3496 if (C1->getOpcode() == HexagonISD::PTRUE) { 3497 SDValue VSel = DCI.DAG.getNode(ISD::VSELECT, dl, ty(Op), C0, 3498 Op.getOperand(2), Op.getOperand(1)); 3499 return VSel; 3500 } 3501 } 3502 } else if (Opc == ISD::TRUNCATE) { 3503 SDValue Op0 = Op.getOperand(0); 3504 // fold (truncate (build pair x, y)) -> (truncate x) or x 3505 if (Op0.getOpcode() == ISD::BUILD_PAIR) { 3506 MVT TruncTy = ty(Op); 3507 SDValue Elem0 = Op0.getOperand(0); 3508 // if we match the low element of the pair, just return it. 3509 if (ty(Elem0) == TruncTy) 3510 return Elem0; 3511 // otherwise, if the low part is still too large, apply the truncate. 3512 if (ty(Elem0).bitsGT(TruncTy)) 3513 return DCI.DAG.getNode(ISD::TRUNCATE, dl, TruncTy, Elem0); 3514 } 3515 } else if (Opc == ISD::OR) { 3516 // fold (or (shl xx, s), (zext y)) -> (COMBINE (shl xx, s-32), y) 3517 // if s >= 32 3518 auto fold0 = [&, this](SDValue Op) { 3519 if (ty(Op) != MVT::i64) 3520 return SDValue(); 3521 SDValue Shl = Op.getOperand(0); 3522 SDValue Zxt = Op.getOperand(1); 3523 if (Shl.getOpcode() != ISD::SHL) 3524 std::swap(Shl, Zxt); 3525 3526 if (Shl.getOpcode() != ISD::SHL || Zxt.getOpcode() != ISD::ZERO_EXTEND) 3527 return SDValue(); 3528 3529 SDValue Z = Zxt.getOperand(0); 3530 auto *Amt = dyn_cast<ConstantSDNode>(Shl.getOperand(1)); 3531 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) { 3532 unsigned A = Amt->getZExtValue(); 3533 SDValue S = Shl.getOperand(0); 3534 SDValue T0 = DCI.DAG.getNode(ISD::SHL, dl, ty(S), S, 3535 DCI.DAG.getConstant(32 - A, dl, MVT::i32)); 3536 SDValue T1 = DCI.DAG.getZExtOrTrunc(T0, dl, MVT::i32); 3537 SDValue T2 = DCI.DAG.getZExtOrTrunc(Z, dl, MVT::i32); 3538 return DCI.DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64, {T1, T2}); 3539 } 3540 return SDValue(); 3541 }; 3542 3543 if (SDValue R = fold0(Op)) 3544 return R; 3545 } 3546 3547 return SDValue(); 3548 } 3549 3550 /// Returns relocation base for the given PIC jumptable. 3551 SDValue 3552 HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table, 3553 SelectionDAG &DAG) const { 3554 int Idx = cast<JumpTableSDNode>(Table)->getIndex(); 3555 EVT VT = Table.getValueType(); 3556 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL); 3557 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T); 3558 } 3559 3560 //===----------------------------------------------------------------------===// 3561 // Inline Assembly Support 3562 //===----------------------------------------------------------------------===// 3563 3564 TargetLowering::ConstraintType 3565 HexagonTargetLowering::getConstraintType(StringRef Constraint) const { 3566 if (Constraint.size() == 1) { 3567 switch (Constraint[0]) { 3568 case 'q': 3569 case 'v': 3570 if (Subtarget.useHVXOps()) 3571 return C_RegisterClass; 3572 break; 3573 case 'a': 3574 return C_RegisterClass; 3575 default: 3576 break; 3577 } 3578 } 3579 return TargetLowering::getConstraintType(Constraint); 3580 } 3581 3582 std::pair<unsigned, const TargetRegisterClass*> 3583 HexagonTargetLowering::getRegForInlineAsmConstraint( 3584 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 3585 3586 if (Constraint.size() == 1) { 3587 switch (Constraint[0]) { 3588 case 'r': // R0-R31 3589 switch (VT.SimpleTy) { 3590 default: 3591 return {0u, nullptr}; 3592 case MVT::i1: 3593 case MVT::i8: 3594 case MVT::i16: 3595 case MVT::i32: 3596 case MVT::f32: 3597 return {0u, &Hexagon::IntRegsRegClass}; 3598 case MVT::i64: 3599 case MVT::f64: 3600 return {0u, &Hexagon::DoubleRegsRegClass}; 3601 } 3602 break; 3603 case 'a': // M0-M1 3604 if (VT != MVT::i32) 3605 return {0u, nullptr}; 3606 return {0u, &Hexagon::ModRegsRegClass}; 3607 case 'q': // q0-q3 3608 switch (VT.getSizeInBits()) { 3609 default: 3610 return {0u, nullptr}; 3611 case 64: 3612 case 128: 3613 return {0u, &Hexagon::HvxQRRegClass}; 3614 } 3615 break; 3616 case 'v': // V0-V31 3617 switch (VT.getSizeInBits()) { 3618 default: 3619 return {0u, nullptr}; 3620 case 512: 3621 return {0u, &Hexagon::HvxVRRegClass}; 3622 case 1024: 3623 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps()) 3624 return {0u, &Hexagon::HvxVRRegClass}; 3625 return {0u, &Hexagon::HvxWRRegClass}; 3626 case 2048: 3627 return {0u, &Hexagon::HvxWRRegClass}; 3628 } 3629 break; 3630 default: 3631 return {0u, nullptr}; 3632 } 3633 } 3634 3635 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 3636 } 3637 3638 /// isFPImmLegal - Returns true if the target can instruction select the 3639 /// specified FP immediate natively. If false, the legalizer will 3640 /// materialize the FP immediate as a load from a constant pool. 3641 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 3642 bool ForCodeSize) const { 3643 return true; 3644 } 3645 3646 /// isLegalAddressingMode - Return true if the addressing mode represented by 3647 /// AM is legal for this target, for a load/store of the specified type. 3648 bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL, 3649 const AddrMode &AM, Type *Ty, 3650 unsigned AS, Instruction *I) const { 3651 if (Ty->isSized()) { 3652 // When LSR detects uses of the same base address to access different 3653 // types (e.g. unions), it will assume a conservative type for these 3654 // uses: 3655 // LSR Use: Kind=Address of void in addrspace(4294967295), ... 3656 // The type Ty passed here would then be "void". Skip the alignment 3657 // checks, but do not return false right away, since that confuses 3658 // LSR into crashing. 3659 Align A = DL.getABITypeAlign(Ty); 3660 // The base offset must be a multiple of the alignment. 3661 if (!isAligned(A, AM.BaseOffs)) 3662 return false; 3663 // The shifted offset must fit in 11 bits. 3664 if (!isInt<11>(AM.BaseOffs >> Log2(A))) 3665 return false; 3666 } 3667 3668 // No global is ever allowed as a base. 3669 if (AM.BaseGV) 3670 return false; 3671 3672 int Scale = AM.Scale; 3673 if (Scale < 0) 3674 Scale = -Scale; 3675 switch (Scale) { 3676 case 0: // No scale reg, "r+i", "r", or just "i". 3677 break; 3678 default: // No scaled addressing mode. 3679 return false; 3680 } 3681 return true; 3682 } 3683 3684 /// Return true if folding a constant offset with the given GlobalAddress is 3685 /// legal. It is frequently not legal in PIC relocation models. 3686 bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) 3687 const { 3688 return HTM.getRelocationModel() == Reloc::Static; 3689 } 3690 3691 /// isLegalICmpImmediate - Return true if the specified immediate is legal 3692 /// icmp immediate, that is the target has icmp instructions which can compare 3693 /// a register against the immediate without having to materialize the 3694 /// immediate into a register. 3695 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 3696 return Imm >= -512 && Imm <= 511; 3697 } 3698 3699 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3700 /// for tail call optimization. Targets which want to do tail call 3701 /// optimization should implement this function. 3702 bool HexagonTargetLowering::IsEligibleForTailCallOptimization( 3703 SDValue Callee, 3704 CallingConv::ID CalleeCC, 3705 bool IsVarArg, 3706 bool IsCalleeStructRet, 3707 bool IsCallerStructRet, 3708 const SmallVectorImpl<ISD::OutputArg> &Outs, 3709 const SmallVectorImpl<SDValue> &OutVals, 3710 const SmallVectorImpl<ISD::InputArg> &Ins, 3711 SelectionDAG& DAG) const { 3712 const Function &CallerF = DAG.getMachineFunction().getFunction(); 3713 CallingConv::ID CallerCC = CallerF.getCallingConv(); 3714 bool CCMatch = CallerCC == CalleeCC; 3715 3716 // *************************************************************************** 3717 // Look for obvious safe cases to perform tail call optimization that do not 3718 // require ABI changes. 3719 // *************************************************************************** 3720 3721 // If this is a tail call via a function pointer, then don't do it! 3722 if (!isa<GlobalAddressSDNode>(Callee) && 3723 !isa<ExternalSymbolSDNode>(Callee)) { 3724 return false; 3725 } 3726 3727 // Do not optimize if the calling conventions do not match and the conventions 3728 // used are not C or Fast. 3729 if (!CCMatch) { 3730 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast); 3731 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast); 3732 // If R & E, then ok. 3733 if (!R || !E) 3734 return false; 3735 } 3736 3737 // Do not tail call optimize vararg calls. 3738 if (IsVarArg) 3739 return false; 3740 3741 // Also avoid tail call optimization if either caller or callee uses struct 3742 // return semantics. 3743 if (IsCalleeStructRet || IsCallerStructRet) 3744 return false; 3745 3746 // In addition to the cases above, we also disable Tail Call Optimization if 3747 // the calling convention code that at least one outgoing argument needs to 3748 // go on the stack. We cannot check that here because at this point that 3749 // information is not available. 3750 return true; 3751 } 3752 3753 /// Returns the target specific optimal type for load and store operations as 3754 /// a result of memset, memcpy, and memmove lowering. 3755 /// 3756 /// If DstAlign is zero that means it's safe to destination alignment can 3757 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't 3758 /// a need to check it against alignment requirement, probably because the 3759 /// source does not need to be loaded. If 'IsMemset' is true, that means it's 3760 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of 3761 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it 3762 /// does not need to be loaded. It returns EVT::Other if the type should be 3763 /// determined using generic target-independent logic. 3764 EVT HexagonTargetLowering::getOptimalMemOpType( 3765 const MemOp &Op, const AttributeList &FuncAttributes) const { 3766 if (Op.size() >= 8 && Op.isAligned(Align(8))) 3767 return MVT::i64; 3768 if (Op.size() >= 4 && Op.isAligned(Align(4))) 3769 return MVT::i32; 3770 if (Op.size() >= 2 && Op.isAligned(Align(2))) 3771 return MVT::i16; 3772 return MVT::Other; 3773 } 3774 3775 bool HexagonTargetLowering::allowsMemoryAccess( 3776 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 3777 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { 3778 MVT SVT = VT.getSimpleVT(); 3779 if (Subtarget.isHVXVectorType(SVT, true)) 3780 return allowsHvxMemoryAccess(SVT, Flags, Fast); 3781 return TargetLoweringBase::allowsMemoryAccess( 3782 Context, DL, VT, AddrSpace, Alignment, Flags, Fast); 3783 } 3784 3785 bool HexagonTargetLowering::allowsMisalignedMemoryAccesses( 3786 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 3787 unsigned *Fast) const { 3788 MVT SVT = VT.getSimpleVT(); 3789 if (Subtarget.isHVXVectorType(SVT, true)) 3790 return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast); 3791 if (Fast) 3792 *Fast = 0; 3793 return false; 3794 } 3795 3796 std::pair<const TargetRegisterClass*, uint8_t> 3797 HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 3798 MVT VT) const { 3799 if (Subtarget.isHVXVectorType(VT, true)) { 3800 unsigned BitWidth = VT.getSizeInBits(); 3801 unsigned VecWidth = Subtarget.getVectorLength() * 8; 3802 3803 if (VT.getVectorElementType() == MVT::i1) 3804 return std::make_pair(&Hexagon::HvxQRRegClass, 1); 3805 if (BitWidth == VecWidth) 3806 return std::make_pair(&Hexagon::HvxVRRegClass, 1); 3807 assert(BitWidth == 2 * VecWidth); 3808 return std::make_pair(&Hexagon::HvxWRRegClass, 1); 3809 } 3810 3811 return TargetLowering::findRepresentativeClass(TRI, VT); 3812 } 3813 3814 bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load, 3815 ISD::LoadExtType ExtTy, EVT NewVT) const { 3816 // TODO: This may be worth removing. Check regression tests for diffs. 3817 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT)) 3818 return false; 3819 3820 auto *L = cast<LoadSDNode>(Load); 3821 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr()); 3822 // Small-data object, do not shrink. 3823 if (BO.first.getOpcode() == HexagonISD::CONST32_GP) 3824 return false; 3825 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) { 3826 auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine()); 3827 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal()); 3828 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM); 3829 } 3830 return true; 3831 } 3832 3833 void HexagonTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 3834 SDNode *Node) const { 3835 AdjustHvxInstrPostInstrSelection(MI, Node); 3836 } 3837 3838 Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder, 3839 Type *ValueTy, Value *Addr, 3840 AtomicOrdering Ord) const { 3841 BasicBlock *BB = Builder.GetInsertBlock(); 3842 Module *M = BB->getParent()->getParent(); 3843 unsigned SZ = ValueTy->getPrimitiveSizeInBits(); 3844 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported"); 3845 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked 3846 : Intrinsic::hexagon_L4_loadd_locked; 3847 Function *Fn = Intrinsic::getDeclaration(M, IntID); 3848 3849 Value *Call = Builder.CreateCall(Fn, Addr, "larx"); 3850 3851 return Builder.CreateBitCast(Call, ValueTy); 3852 } 3853 3854 /// Perform a store-conditional operation to Addr. Return the status of the 3855 /// store. This should be 0 if the store succeeded, non-zero otherwise. 3856 Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder, 3857 Value *Val, Value *Addr, 3858 AtomicOrdering Ord) const { 3859 BasicBlock *BB = Builder.GetInsertBlock(); 3860 Module *M = BB->getParent()->getParent(); 3861 Type *Ty = Val->getType(); 3862 unsigned SZ = Ty->getPrimitiveSizeInBits(); 3863 3864 Type *CastTy = Builder.getIntNTy(SZ); 3865 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported"); 3866 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked 3867 : Intrinsic::hexagon_S4_stored_locked; 3868 Function *Fn = Intrinsic::getDeclaration(M, IntID); 3869 3870 Val = Builder.CreateBitCast(Val, CastTy); 3871 3872 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx"); 3873 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), ""); 3874 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext())); 3875 return Ext; 3876 } 3877 3878 TargetLowering::AtomicExpansionKind 3879 HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 3880 // Do not expand loads and stores that don't exceed 64 bits. 3881 return LI->getType()->getPrimitiveSizeInBits() > 64 3882 ? AtomicExpansionKind::LLOnly 3883 : AtomicExpansionKind::None; 3884 } 3885 3886 TargetLowering::AtomicExpansionKind 3887 HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 3888 // Do not expand loads and stores that don't exceed 64 bits. 3889 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64 3890 ? AtomicExpansionKind::Expand 3891 : AtomicExpansionKind::None; 3892 } 3893 3894 TargetLowering::AtomicExpansionKind 3895 HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR( 3896 AtomicCmpXchgInst *AI) const { 3897 return AtomicExpansionKind::LLSC; 3898 } 3899