1 //===-- HexagonISelLoweringHVX.cpp --- Lowering HVX operations ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "HexagonISelLowering.h" 10 #include "HexagonRegisterInfo.h" 11 #include "HexagonSubtarget.h" 12 #include "llvm/Analysis/MemoryLocation.h" 13 #include "llvm/IR/IntrinsicsHexagon.h" 14 #include "llvm/Support/CommandLine.h" 15 16 using namespace llvm; 17 18 static cl::opt<unsigned> HvxWidenThreshold("hexagon-hvx-widen", 19 cl::Hidden, cl::init(16), 20 cl::desc("Lower threshold (in bytes) for widening to HVX vectors")); 21 22 static const MVT LegalV64[] = { MVT::v64i8, MVT::v32i16, MVT::v16i32 }; 23 static const MVT LegalW64[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 }; 24 static const MVT LegalV128[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 }; 25 static const MVT LegalW128[] = { MVT::v256i8, MVT::v128i16, MVT::v64i32 }; 26 27 28 void 29 HexagonTargetLowering::initializeHVXLowering() { 30 if (Subtarget.useHVX64BOps()) { 31 addRegisterClass(MVT::v64i8, &Hexagon::HvxVRRegClass); 32 addRegisterClass(MVT::v32i16, &Hexagon::HvxVRRegClass); 33 addRegisterClass(MVT::v16i32, &Hexagon::HvxVRRegClass); 34 addRegisterClass(MVT::v128i8, &Hexagon::HvxWRRegClass); 35 addRegisterClass(MVT::v64i16, &Hexagon::HvxWRRegClass); 36 addRegisterClass(MVT::v32i32, &Hexagon::HvxWRRegClass); 37 // These "short" boolean vector types should be legal because 38 // they will appear as results of vector compares. If they were 39 // not legal, type legalization would try to make them legal 40 // and that would require using operations that do not use or 41 // produce such types. That, in turn, would imply using custom 42 // nodes, which would be unoptimizable by the DAG combiner. 43 // The idea is to rely on target-independent operations as much 44 // as possible. 45 addRegisterClass(MVT::v16i1, &Hexagon::HvxQRRegClass); 46 addRegisterClass(MVT::v32i1, &Hexagon::HvxQRRegClass); 47 addRegisterClass(MVT::v64i1, &Hexagon::HvxQRRegClass); 48 } else if (Subtarget.useHVX128BOps()) { 49 addRegisterClass(MVT::v128i8, &Hexagon::HvxVRRegClass); 50 addRegisterClass(MVT::v64i16, &Hexagon::HvxVRRegClass); 51 addRegisterClass(MVT::v32i32, &Hexagon::HvxVRRegClass); 52 addRegisterClass(MVT::v256i8, &Hexagon::HvxWRRegClass); 53 addRegisterClass(MVT::v128i16, &Hexagon::HvxWRRegClass); 54 addRegisterClass(MVT::v64i32, &Hexagon::HvxWRRegClass); 55 addRegisterClass(MVT::v32i1, &Hexagon::HvxQRRegClass); 56 addRegisterClass(MVT::v64i1, &Hexagon::HvxQRRegClass); 57 addRegisterClass(MVT::v128i1, &Hexagon::HvxQRRegClass); 58 } 59 60 // Set up operation actions. 61 62 bool Use64b = Subtarget.useHVX64BOps(); 63 ArrayRef<MVT> LegalV = Use64b ? LegalV64 : LegalV128; 64 ArrayRef<MVT> LegalW = Use64b ? LegalW64 : LegalW128; 65 MVT ByteV = Use64b ? MVT::v64i8 : MVT::v128i8; 66 MVT ByteW = Use64b ? MVT::v128i8 : MVT::v256i8; 67 68 auto setPromoteTo = [this] (unsigned Opc, MVT FromTy, MVT ToTy) { 69 setOperationAction(Opc, FromTy, Promote); 70 AddPromotedToType(Opc, FromTy, ToTy); 71 }; 72 73 // Handle bitcasts of vector predicates to scalars (e.g. v32i1 to i32). 74 // Note: v16i1 -> i16 is handled in type legalization instead of op 75 // legalization. 76 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 77 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 78 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 79 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); 80 setOperationAction(ISD::BITCAST, MVT::v128i1, Custom); 81 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 82 setOperationAction(ISD::VECTOR_SHUFFLE, ByteV, Legal); 83 setOperationAction(ISD::VECTOR_SHUFFLE, ByteW, Legal); 84 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 85 86 for (MVT T : LegalV) { 87 setIndexedLoadAction(ISD::POST_INC, T, Legal); 88 setIndexedStoreAction(ISD::POST_INC, T, Legal); 89 90 setOperationAction(ISD::AND, T, Legal); 91 setOperationAction(ISD::OR, T, Legal); 92 setOperationAction(ISD::XOR, T, Legal); 93 setOperationAction(ISD::ADD, T, Legal); 94 setOperationAction(ISD::SUB, T, Legal); 95 setOperationAction(ISD::MUL, T, Legal); 96 setOperationAction(ISD::CTPOP, T, Legal); 97 setOperationAction(ISD::CTLZ, T, Legal); 98 setOperationAction(ISD::SELECT, T, Legal); 99 setOperationAction(ISD::SPLAT_VECTOR, T, Legal); 100 if (T != ByteV) { 101 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Legal); 102 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Legal); 103 setOperationAction(ISD::BSWAP, T, Legal); 104 } 105 106 setOperationAction(ISD::SMIN, T, Legal); 107 setOperationAction(ISD::SMAX, T, Legal); 108 if (T.getScalarType() != MVT::i32) { 109 setOperationAction(ISD::UMIN, T, Legal); 110 setOperationAction(ISD::UMAX, T, Legal); 111 } 112 113 setOperationAction(ISD::CTTZ, T, Custom); 114 setOperationAction(ISD::LOAD, T, Custom); 115 setOperationAction(ISD::MLOAD, T, Custom); 116 setOperationAction(ISD::MSTORE, T, Custom); 117 setOperationAction(ISD::MULHS, T, Custom); 118 setOperationAction(ISD::MULHU, T, Custom); 119 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 120 // Make concat-vectors custom to handle concats of more than 2 vectors. 121 setOperationAction(ISD::CONCAT_VECTORS, T, Custom); 122 setOperationAction(ISD::INSERT_SUBVECTOR, T, Custom); 123 setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom); 124 setOperationAction(ISD::EXTRACT_SUBVECTOR, T, Custom); 125 setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom); 126 setOperationAction(ISD::ANY_EXTEND, T, Custom); 127 setOperationAction(ISD::SIGN_EXTEND, T, Custom); 128 setOperationAction(ISD::ZERO_EXTEND, T, Custom); 129 if (T != ByteV) { 130 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, T, Custom); 131 // HVX only has shifts of words and halfwords. 132 setOperationAction(ISD::SRA, T, Custom); 133 setOperationAction(ISD::SHL, T, Custom); 134 setOperationAction(ISD::SRL, T, Custom); 135 136 // Promote all shuffles to operate on vectors of bytes. 137 setPromoteTo(ISD::VECTOR_SHUFFLE, T, ByteV); 138 } 139 140 setCondCodeAction(ISD::SETNE, T, Expand); 141 setCondCodeAction(ISD::SETLE, T, Expand); 142 setCondCodeAction(ISD::SETGE, T, Expand); 143 setCondCodeAction(ISD::SETLT, T, Expand); 144 setCondCodeAction(ISD::SETULE, T, Expand); 145 setCondCodeAction(ISD::SETUGE, T, Expand); 146 setCondCodeAction(ISD::SETULT, T, Expand); 147 } 148 149 for (MVT T : LegalW) { 150 // Custom-lower BUILD_VECTOR for vector pairs. The standard (target- 151 // independent) handling of it would convert it to a load, which is 152 // not always the optimal choice. 153 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 154 // Make concat-vectors custom to handle concats of more than 2 vectors. 155 setOperationAction(ISD::CONCAT_VECTORS, T, Custom); 156 157 // Custom-lower these operations for pairs. Expand them into a concat 158 // of the corresponding operations on individual vectors. 159 setOperationAction(ISD::ANY_EXTEND, T, Custom); 160 setOperationAction(ISD::SIGN_EXTEND, T, Custom); 161 setOperationAction(ISD::ZERO_EXTEND, T, Custom); 162 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Custom); 163 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, T, Custom); 164 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Legal); 165 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Legal); 166 setOperationAction(ISD::SPLAT_VECTOR, T, Custom); 167 168 setOperationAction(ISD::LOAD, T, Custom); 169 setOperationAction(ISD::STORE, T, Custom); 170 setOperationAction(ISD::MLOAD, T, Custom); 171 setOperationAction(ISD::MSTORE, T, Custom); 172 setOperationAction(ISD::CTLZ, T, Custom); 173 setOperationAction(ISD::CTTZ, T, Custom); 174 setOperationAction(ISD::CTPOP, T, Custom); 175 176 setOperationAction(ISD::ADD, T, Legal); 177 setOperationAction(ISD::SUB, T, Legal); 178 setOperationAction(ISD::MUL, T, Custom); 179 setOperationAction(ISD::MULHS, T, Custom); 180 setOperationAction(ISD::MULHU, T, Custom); 181 setOperationAction(ISD::AND, T, Custom); 182 setOperationAction(ISD::OR, T, Custom); 183 setOperationAction(ISD::XOR, T, Custom); 184 setOperationAction(ISD::SETCC, T, Custom); 185 setOperationAction(ISD::VSELECT, T, Custom); 186 if (T != ByteW) { 187 setOperationAction(ISD::SRA, T, Custom); 188 setOperationAction(ISD::SHL, T, Custom); 189 setOperationAction(ISD::SRL, T, Custom); 190 191 // Promote all shuffles to operate on vectors of bytes. 192 setPromoteTo(ISD::VECTOR_SHUFFLE, T, ByteW); 193 } 194 195 setOperationAction(ISD::SMIN, T, Custom); 196 setOperationAction(ISD::SMAX, T, Custom); 197 if (T.getScalarType() != MVT::i32) { 198 setOperationAction(ISD::UMIN, T, Custom); 199 setOperationAction(ISD::UMAX, T, Custom); 200 } 201 } 202 203 // Boolean vectors. 204 205 for (MVT T : LegalW) { 206 // Boolean types for vector pairs will overlap with the boolean 207 // types for single vectors, e.g. 208 // v64i8 -> v64i1 (single) 209 // v64i16 -> v64i1 (pair) 210 // Set these actions first, and allow the single actions to overwrite 211 // any duplicates. 212 MVT BoolW = MVT::getVectorVT(MVT::i1, T.getVectorNumElements()); 213 setOperationAction(ISD::SETCC, BoolW, Custom); 214 setOperationAction(ISD::AND, BoolW, Custom); 215 setOperationAction(ISD::OR, BoolW, Custom); 216 setOperationAction(ISD::XOR, BoolW, Custom); 217 // Masked load/store takes a mask that may need splitting. 218 setOperationAction(ISD::MLOAD, BoolW, Custom); 219 setOperationAction(ISD::MSTORE, BoolW, Custom); 220 } 221 222 for (MVT T : LegalV) { 223 MVT BoolV = MVT::getVectorVT(MVT::i1, T.getVectorNumElements()); 224 setOperationAction(ISD::BUILD_VECTOR, BoolV, Custom); 225 setOperationAction(ISD::CONCAT_VECTORS, BoolV, Custom); 226 setOperationAction(ISD::INSERT_SUBVECTOR, BoolV, Custom); 227 setOperationAction(ISD::INSERT_VECTOR_ELT, BoolV, Custom); 228 setOperationAction(ISD::EXTRACT_SUBVECTOR, BoolV, Custom); 229 setOperationAction(ISD::EXTRACT_VECTOR_ELT, BoolV, Custom); 230 setOperationAction(ISD::SELECT, BoolV, Custom); 231 setOperationAction(ISD::AND, BoolV, Legal); 232 setOperationAction(ISD::OR, BoolV, Legal); 233 setOperationAction(ISD::XOR, BoolV, Legal); 234 } 235 236 if (Use64b) { 237 for (MVT T: {MVT::v32i8, MVT::v32i16, MVT::v16i8, MVT::v16i16, MVT::v16i32}) 238 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); 239 } else { 240 for (MVT T: {MVT::v64i8, MVT::v64i16, MVT::v32i8, MVT::v32i16, MVT::v32i32}) 241 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); 242 } 243 244 // Handle store widening for short vectors. 245 unsigned HwLen = Subtarget.getVectorLength(); 246 for (MVT ElemTy : Subtarget.getHVXElementTypes()) { 247 if (ElemTy == MVT::i1) 248 continue; 249 int ElemWidth = ElemTy.getFixedSizeInBits(); 250 int MaxElems = (8*HwLen) / ElemWidth; 251 for (int N = 2; N < MaxElems; N *= 2) { 252 MVT VecTy = MVT::getVectorVT(ElemTy, N); 253 auto Action = getPreferredVectorAction(VecTy); 254 if (Action == TargetLoweringBase::TypeWidenVector) { 255 setOperationAction(ISD::LOAD, VecTy, Custom); 256 setOperationAction(ISD::STORE, VecTy, Custom); 257 setOperationAction(ISD::SETCC, VecTy, Custom); 258 setOperationAction(ISD::TRUNCATE, VecTy, Custom); 259 setOperationAction(ISD::ANY_EXTEND, VecTy, Custom); 260 setOperationAction(ISD::SIGN_EXTEND, VecTy, Custom); 261 setOperationAction(ISD::ZERO_EXTEND, VecTy, Custom); 262 263 MVT BoolTy = MVT::getVectorVT(MVT::i1, N); 264 if (!isTypeLegal(BoolTy)) 265 setOperationAction(ISD::SETCC, BoolTy, Custom); 266 } 267 } 268 } 269 270 setTargetDAGCombine(ISD::SPLAT_VECTOR); 271 setTargetDAGCombine(ISD::VSELECT); 272 } 273 274 unsigned 275 HexagonTargetLowering::getPreferredHvxVectorAction(MVT VecTy) const { 276 MVT ElemTy = VecTy.getVectorElementType(); 277 unsigned VecLen = VecTy.getVectorNumElements(); 278 unsigned HwLen = Subtarget.getVectorLength(); 279 280 // Split vectors of i1 that exceed byte vector length. 281 if (ElemTy == MVT::i1 && VecLen > HwLen) 282 return TargetLoweringBase::TypeSplitVector; 283 284 ArrayRef<MVT> Tys = Subtarget.getHVXElementTypes(); 285 // For shorter vectors of i1, widen them if any of the corresponding 286 // vectors of integers needs to be widened. 287 if (ElemTy == MVT::i1) { 288 for (MVT T : Tys) { 289 assert(T != MVT::i1); 290 auto A = getPreferredHvxVectorAction(MVT::getVectorVT(T, VecLen)); 291 if (A != ~0u) 292 return A; 293 } 294 return ~0u; 295 } 296 297 // If the size of VecTy is at least half of the vector length, 298 // widen the vector. Note: the threshold was not selected in 299 // any scientific way. 300 if (llvm::is_contained(Tys, ElemTy)) { 301 unsigned VecWidth = VecTy.getSizeInBits(); 302 bool HaveThreshold = HvxWidenThreshold.getNumOccurrences() > 0; 303 if (HaveThreshold && 8*HvxWidenThreshold <= VecWidth) 304 return TargetLoweringBase::TypeWidenVector; 305 unsigned HwWidth = 8*HwLen; 306 if (VecWidth >= HwWidth/2 && VecWidth < HwWidth) 307 return TargetLoweringBase::TypeWidenVector; 308 } 309 310 // Defer to default. 311 return ~0u; 312 } 313 314 SDValue 315 HexagonTargetLowering::getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops, 316 const SDLoc &dl, SelectionDAG &DAG) const { 317 SmallVector<SDValue,4> IntOps; 318 IntOps.push_back(DAG.getConstant(IntId, dl, MVT::i32)); 319 append_range(IntOps, Ops); 320 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ResTy, IntOps); 321 } 322 323 MVT 324 HexagonTargetLowering::typeJoin(const TypePair &Tys) const { 325 assert(Tys.first.getVectorElementType() == Tys.second.getVectorElementType()); 326 327 MVT ElemTy = Tys.first.getVectorElementType(); 328 return MVT::getVectorVT(ElemTy, Tys.first.getVectorNumElements() + 329 Tys.second.getVectorNumElements()); 330 } 331 332 HexagonTargetLowering::TypePair 333 HexagonTargetLowering::typeSplit(MVT VecTy) const { 334 assert(VecTy.isVector()); 335 unsigned NumElem = VecTy.getVectorNumElements(); 336 assert((NumElem % 2) == 0 && "Expecting even-sized vector type"); 337 MVT HalfTy = MVT::getVectorVT(VecTy.getVectorElementType(), NumElem/2); 338 return { HalfTy, HalfTy }; 339 } 340 341 MVT 342 HexagonTargetLowering::typeExtElem(MVT VecTy, unsigned Factor) const { 343 MVT ElemTy = VecTy.getVectorElementType(); 344 MVT NewElemTy = MVT::getIntegerVT(ElemTy.getSizeInBits() * Factor); 345 return MVT::getVectorVT(NewElemTy, VecTy.getVectorNumElements()); 346 } 347 348 MVT 349 HexagonTargetLowering::typeTruncElem(MVT VecTy, unsigned Factor) const { 350 MVT ElemTy = VecTy.getVectorElementType(); 351 MVT NewElemTy = MVT::getIntegerVT(ElemTy.getSizeInBits() / Factor); 352 return MVT::getVectorVT(NewElemTy, VecTy.getVectorNumElements()); 353 } 354 355 SDValue 356 HexagonTargetLowering::opCastElem(SDValue Vec, MVT ElemTy, 357 SelectionDAG &DAG) const { 358 if (ty(Vec).getVectorElementType() == ElemTy) 359 return Vec; 360 MVT CastTy = tyVector(Vec.getValueType().getSimpleVT(), ElemTy); 361 return DAG.getBitcast(CastTy, Vec); 362 } 363 364 SDValue 365 HexagonTargetLowering::opJoin(const VectorPair &Ops, const SDLoc &dl, 366 SelectionDAG &DAG) const { 367 return DAG.getNode(ISD::CONCAT_VECTORS, dl, typeJoin(ty(Ops)), 368 Ops.second, Ops.first); 369 } 370 371 HexagonTargetLowering::VectorPair 372 HexagonTargetLowering::opSplit(SDValue Vec, const SDLoc &dl, 373 SelectionDAG &DAG) const { 374 TypePair Tys = typeSplit(ty(Vec)); 375 if (Vec.getOpcode() == HexagonISD::QCAT) 376 return VectorPair(Vec.getOperand(0), Vec.getOperand(1)); 377 return DAG.SplitVector(Vec, dl, Tys.first, Tys.second); 378 } 379 380 bool 381 HexagonTargetLowering::isHvxSingleTy(MVT Ty) const { 382 return Subtarget.isHVXVectorType(Ty) && 383 Ty.getSizeInBits() == 8 * Subtarget.getVectorLength(); 384 } 385 386 bool 387 HexagonTargetLowering::isHvxPairTy(MVT Ty) const { 388 return Subtarget.isHVXVectorType(Ty) && 389 Ty.getSizeInBits() == 16 * Subtarget.getVectorLength(); 390 } 391 392 bool 393 HexagonTargetLowering::isHvxBoolTy(MVT Ty) const { 394 return Subtarget.isHVXVectorType(Ty, true) && 395 Ty.getVectorElementType() == MVT::i1; 396 } 397 398 bool HexagonTargetLowering::allowsHvxMemoryAccess( 399 MVT VecTy, MachineMemOperand::Flags Flags, bool *Fast) const { 400 // Bool vectors are excluded by default, but make it explicit to 401 // emphasize that bool vectors cannot be loaded or stored. 402 // Also, disallow double vector stores (to prevent unnecessary 403 // store widening in DAG combiner). 404 if (VecTy.getSizeInBits() > 8*Subtarget.getVectorLength()) 405 return false; 406 if (!Subtarget.isHVXVectorType(VecTy, /*IncludeBool=*/false)) 407 return false; 408 if (Fast) 409 *Fast = true; 410 return true; 411 } 412 413 bool HexagonTargetLowering::allowsHvxMisalignedMemoryAccesses( 414 MVT VecTy, MachineMemOperand::Flags Flags, bool *Fast) const { 415 if (!Subtarget.isHVXVectorType(VecTy)) 416 return false; 417 // XXX Should this be false? vmemu are a bit slower than vmem. 418 if (Fast) 419 *Fast = true; 420 return true; 421 } 422 423 SDValue 424 HexagonTargetLowering::convertToByteIndex(SDValue ElemIdx, MVT ElemTy, 425 SelectionDAG &DAG) const { 426 if (ElemIdx.getValueType().getSimpleVT() != MVT::i32) 427 ElemIdx = DAG.getBitcast(MVT::i32, ElemIdx); 428 429 unsigned ElemWidth = ElemTy.getSizeInBits(); 430 if (ElemWidth == 8) 431 return ElemIdx; 432 433 unsigned L = Log2_32(ElemWidth/8); 434 const SDLoc &dl(ElemIdx); 435 return DAG.getNode(ISD::SHL, dl, MVT::i32, 436 {ElemIdx, DAG.getConstant(L, dl, MVT::i32)}); 437 } 438 439 SDValue 440 HexagonTargetLowering::getIndexInWord32(SDValue Idx, MVT ElemTy, 441 SelectionDAG &DAG) const { 442 unsigned ElemWidth = ElemTy.getSizeInBits(); 443 assert(ElemWidth >= 8 && ElemWidth <= 32); 444 if (ElemWidth == 32) 445 return Idx; 446 447 if (ty(Idx) != MVT::i32) 448 Idx = DAG.getBitcast(MVT::i32, Idx); 449 const SDLoc &dl(Idx); 450 SDValue Mask = DAG.getConstant(32/ElemWidth - 1, dl, MVT::i32); 451 SDValue SubIdx = DAG.getNode(ISD::AND, dl, MVT::i32, {Idx, Mask}); 452 return SubIdx; 453 } 454 455 SDValue 456 HexagonTargetLowering::getByteShuffle(const SDLoc &dl, SDValue Op0, 457 SDValue Op1, ArrayRef<int> Mask, 458 SelectionDAG &DAG) const { 459 MVT OpTy = ty(Op0); 460 assert(OpTy == ty(Op1)); 461 462 MVT ElemTy = OpTy.getVectorElementType(); 463 if (ElemTy == MVT::i8) 464 return DAG.getVectorShuffle(OpTy, dl, Op0, Op1, Mask); 465 assert(ElemTy.getSizeInBits() >= 8); 466 467 MVT ResTy = tyVector(OpTy, MVT::i8); 468 unsigned ElemSize = ElemTy.getSizeInBits() / 8; 469 470 SmallVector<int,128> ByteMask; 471 for (int M : Mask) { 472 if (M < 0) { 473 for (unsigned I = 0; I != ElemSize; ++I) 474 ByteMask.push_back(-1); 475 } else { 476 int NewM = M*ElemSize; 477 for (unsigned I = 0; I != ElemSize; ++I) 478 ByteMask.push_back(NewM+I); 479 } 480 } 481 assert(ResTy.getVectorNumElements() == ByteMask.size()); 482 return DAG.getVectorShuffle(ResTy, dl, opCastElem(Op0, MVT::i8, DAG), 483 opCastElem(Op1, MVT::i8, DAG), ByteMask); 484 } 485 486 SDValue 487 HexagonTargetLowering::buildHvxVectorReg(ArrayRef<SDValue> Values, 488 const SDLoc &dl, MVT VecTy, 489 SelectionDAG &DAG) const { 490 unsigned VecLen = Values.size(); 491 MachineFunction &MF = DAG.getMachineFunction(); 492 MVT ElemTy = VecTy.getVectorElementType(); 493 unsigned ElemWidth = ElemTy.getSizeInBits(); 494 unsigned HwLen = Subtarget.getVectorLength(); 495 496 unsigned ElemSize = ElemWidth / 8; 497 assert(ElemSize*VecLen == HwLen); 498 SmallVector<SDValue,32> Words; 499 500 if (VecTy.getVectorElementType() != MVT::i32) { 501 assert((ElemSize == 1 || ElemSize == 2) && "Invalid element size"); 502 unsigned OpsPerWord = (ElemSize == 1) ? 4 : 2; 503 MVT PartVT = MVT::getVectorVT(VecTy.getVectorElementType(), OpsPerWord); 504 for (unsigned i = 0; i != VecLen; i += OpsPerWord) { 505 SDValue W = buildVector32(Values.slice(i, OpsPerWord), dl, PartVT, DAG); 506 Words.push_back(DAG.getBitcast(MVT::i32, W)); 507 } 508 } else { 509 Words.assign(Values.begin(), Values.end()); 510 } 511 512 unsigned NumWords = Words.size(); 513 bool IsSplat = true, IsUndef = true; 514 SDValue SplatV; 515 for (unsigned i = 0; i != NumWords && IsSplat; ++i) { 516 if (isUndef(Words[i])) 517 continue; 518 IsUndef = false; 519 if (!SplatV.getNode()) 520 SplatV = Words[i]; 521 else if (SplatV != Words[i]) 522 IsSplat = false; 523 } 524 if (IsUndef) 525 return DAG.getUNDEF(VecTy); 526 if (IsSplat) { 527 assert(SplatV.getNode()); 528 auto *IdxN = dyn_cast<ConstantSDNode>(SplatV.getNode()); 529 if (IdxN && IdxN->isZero()) 530 return getZero(dl, VecTy, DAG); 531 MVT WordTy = MVT::getVectorVT(MVT::i32, HwLen/4); 532 SDValue S = DAG.getNode(ISD::SPLAT_VECTOR, dl, WordTy, SplatV); 533 return DAG.getBitcast(VecTy, S); 534 } 535 536 // Delay recognizing constant vectors until here, so that we can generate 537 // a vsplat. 538 SmallVector<ConstantInt*, 128> Consts(VecLen); 539 bool AllConst = getBuildVectorConstInts(Values, VecTy, DAG, Consts); 540 if (AllConst) { 541 ArrayRef<Constant*> Tmp((Constant**)Consts.begin(), 542 (Constant**)Consts.end()); 543 Constant *CV = ConstantVector::get(Tmp); 544 Align Alignment(HwLen); 545 SDValue CP = 546 LowerConstantPool(DAG.getConstantPool(CV, VecTy, Alignment), DAG); 547 return DAG.getLoad(VecTy, dl, DAG.getEntryNode(), CP, 548 MachinePointerInfo::getConstantPool(MF), Alignment); 549 } 550 551 // A special case is a situation where the vector is built entirely from 552 // elements extracted from another vector. This could be done via a shuffle 553 // more efficiently, but typically, the size of the source vector will not 554 // match the size of the vector being built (which precludes the use of a 555 // shuffle directly). 556 // This only handles a single source vector, and the vector being built 557 // should be of a sub-vector type of the source vector type. 558 auto IsBuildFromExtracts = [this,&Values] (SDValue &SrcVec, 559 SmallVectorImpl<int> &SrcIdx) { 560 SDValue Vec; 561 for (SDValue V : Values) { 562 if (isUndef(V)) { 563 SrcIdx.push_back(-1); 564 continue; 565 } 566 if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 567 return false; 568 // All extracts should come from the same vector. 569 SDValue T = V.getOperand(0); 570 if (Vec.getNode() != nullptr && T.getNode() != Vec.getNode()) 571 return false; 572 Vec = T; 573 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1)); 574 if (C == nullptr) 575 return false; 576 int I = C->getSExtValue(); 577 assert(I >= 0 && "Negative element index"); 578 SrcIdx.push_back(I); 579 } 580 SrcVec = Vec; 581 return true; 582 }; 583 584 SmallVector<int,128> ExtIdx; 585 SDValue ExtVec; 586 if (IsBuildFromExtracts(ExtVec, ExtIdx)) { 587 MVT ExtTy = ty(ExtVec); 588 unsigned ExtLen = ExtTy.getVectorNumElements(); 589 if (ExtLen == VecLen || ExtLen == 2*VecLen) { 590 // Construct a new shuffle mask that will produce a vector with the same 591 // number of elements as the input vector, and such that the vector we 592 // want will be the initial subvector of it. 593 SmallVector<int,128> Mask; 594 BitVector Used(ExtLen); 595 596 for (int M : ExtIdx) { 597 Mask.push_back(M); 598 if (M >= 0) 599 Used.set(M); 600 } 601 // Fill the rest of the mask with the unused elements of ExtVec in hopes 602 // that it will result in a permutation of ExtVec's elements. It's still 603 // fine if it doesn't (e.g. if undefs are present, or elements are 604 // repeated), but permutations can always be done efficiently via vdelta 605 // and vrdelta. 606 for (unsigned I = 0; I != ExtLen; ++I) { 607 if (Mask.size() == ExtLen) 608 break; 609 if (!Used.test(I)) 610 Mask.push_back(I); 611 } 612 613 SDValue S = DAG.getVectorShuffle(ExtTy, dl, ExtVec, 614 DAG.getUNDEF(ExtTy), Mask); 615 if (ExtLen == VecLen) 616 return S; 617 return DAG.getTargetExtractSubreg(Hexagon::vsub_lo, dl, VecTy, S); 618 } 619 } 620 621 // Construct two halves in parallel, then or them together. 622 assert(4*Words.size() == Subtarget.getVectorLength()); 623 SDValue HalfV0 = getInstr(Hexagon::V6_vd0, dl, VecTy, {}, DAG); 624 SDValue HalfV1 = getInstr(Hexagon::V6_vd0, dl, VecTy, {}, DAG); 625 SDValue S = DAG.getConstant(4, dl, MVT::i32); 626 for (unsigned i = 0; i != NumWords/2; ++i) { 627 SDValue N = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy, 628 {HalfV0, Words[i]}); 629 SDValue M = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy, 630 {HalfV1, Words[i+NumWords/2]}); 631 HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {N, S}); 632 HalfV1 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {M, S}); 633 } 634 635 HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy, 636 {HalfV0, DAG.getConstant(HwLen/2, dl, MVT::i32)}); 637 SDValue DstV = DAG.getNode(ISD::OR, dl, VecTy, {HalfV0, HalfV1}); 638 return DstV; 639 } 640 641 SDValue 642 HexagonTargetLowering::createHvxPrefixPred(SDValue PredV, const SDLoc &dl, 643 unsigned BitBytes, bool ZeroFill, SelectionDAG &DAG) const { 644 MVT PredTy = ty(PredV); 645 unsigned HwLen = Subtarget.getVectorLength(); 646 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 647 648 if (Subtarget.isHVXVectorType(PredTy, true)) { 649 // Move the vector predicate SubV to a vector register, and scale it 650 // down to match the representation (bytes per type element) that VecV 651 // uses. The scaling down will pick every 2nd or 4th (every Scale-th 652 // in general) element and put them at the front of the resulting 653 // vector. This subvector will then be inserted into the Q2V of VecV. 654 // To avoid having an operation that generates an illegal type (short 655 // vector), generate a full size vector. 656 // 657 SDValue T = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, PredV); 658 SmallVector<int,128> Mask(HwLen); 659 // Scale = BitBytes(PredV) / Given BitBytes. 660 unsigned Scale = HwLen / (PredTy.getVectorNumElements() * BitBytes); 661 unsigned BlockLen = PredTy.getVectorNumElements() * BitBytes; 662 663 for (unsigned i = 0; i != HwLen; ++i) { 664 unsigned Num = i % Scale; 665 unsigned Off = i / Scale; 666 Mask[BlockLen*Num + Off] = i; 667 } 668 SDValue S = DAG.getVectorShuffle(ByteTy, dl, T, DAG.getUNDEF(ByteTy), Mask); 669 if (!ZeroFill) 670 return S; 671 // Fill the bytes beyond BlockLen with 0s. 672 // V6_pred_scalar2 cannot fill the entire predicate, so it only works 673 // when BlockLen < HwLen. 674 assert(BlockLen < HwLen && "vsetq(v1) prerequisite"); 675 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 676 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 677 {DAG.getConstant(BlockLen, dl, MVT::i32)}, DAG); 678 SDValue M = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, Q); 679 return DAG.getNode(ISD::AND, dl, ByteTy, S, M); 680 } 681 682 // Make sure that this is a valid scalar predicate. 683 assert(PredTy == MVT::v2i1 || PredTy == MVT::v4i1 || PredTy == MVT::v8i1); 684 685 unsigned Bytes = 8 / PredTy.getVectorNumElements(); 686 SmallVector<SDValue,4> Words[2]; 687 unsigned IdxW = 0; 688 689 auto Lo32 = [&DAG, &dl] (SDValue P) { 690 return DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, P); 691 }; 692 auto Hi32 = [&DAG, &dl] (SDValue P) { 693 return DAG.getTargetExtractSubreg(Hexagon::isub_hi, dl, MVT::i32, P); 694 }; 695 696 SDValue W0 = isUndef(PredV) 697 ? DAG.getUNDEF(MVT::i64) 698 : DAG.getNode(HexagonISD::P2D, dl, MVT::i64, PredV); 699 Words[IdxW].push_back(Hi32(W0)); 700 Words[IdxW].push_back(Lo32(W0)); 701 702 while (Bytes < BitBytes) { 703 IdxW ^= 1; 704 Words[IdxW].clear(); 705 706 if (Bytes < 4) { 707 for (const SDValue &W : Words[IdxW ^ 1]) { 708 SDValue T = expandPredicate(W, dl, DAG); 709 Words[IdxW].push_back(Hi32(T)); 710 Words[IdxW].push_back(Lo32(T)); 711 } 712 } else { 713 for (const SDValue &W : Words[IdxW ^ 1]) { 714 Words[IdxW].push_back(W); 715 Words[IdxW].push_back(W); 716 } 717 } 718 Bytes *= 2; 719 } 720 721 assert(Bytes == BitBytes); 722 723 SDValue Vec = ZeroFill ? getZero(dl, ByteTy, DAG) : DAG.getUNDEF(ByteTy); 724 SDValue S4 = DAG.getConstant(HwLen-4, dl, MVT::i32); 725 for (const SDValue &W : Words[IdxW]) { 726 Vec = DAG.getNode(HexagonISD::VROR, dl, ByteTy, Vec, S4); 727 Vec = DAG.getNode(HexagonISD::VINSERTW0, dl, ByteTy, Vec, W); 728 } 729 730 return Vec; 731 } 732 733 SDValue 734 HexagonTargetLowering::buildHvxVectorPred(ArrayRef<SDValue> Values, 735 const SDLoc &dl, MVT VecTy, 736 SelectionDAG &DAG) const { 737 // Construct a vector V of bytes, such that a comparison V >u 0 would 738 // produce the required vector predicate. 739 unsigned VecLen = Values.size(); 740 unsigned HwLen = Subtarget.getVectorLength(); 741 assert(VecLen <= HwLen || VecLen == 8*HwLen); 742 SmallVector<SDValue,128> Bytes; 743 bool AllT = true, AllF = true; 744 745 auto IsTrue = [] (SDValue V) { 746 if (const auto *N = dyn_cast<ConstantSDNode>(V.getNode())) 747 return !N->isZero(); 748 return false; 749 }; 750 auto IsFalse = [] (SDValue V) { 751 if (const auto *N = dyn_cast<ConstantSDNode>(V.getNode())) 752 return N->isZero(); 753 return false; 754 }; 755 756 if (VecLen <= HwLen) { 757 // In the hardware, each bit of a vector predicate corresponds to a byte 758 // of a vector register. Calculate how many bytes does a bit of VecTy 759 // correspond to. 760 assert(HwLen % VecLen == 0); 761 unsigned BitBytes = HwLen / VecLen; 762 for (SDValue V : Values) { 763 AllT &= IsTrue(V); 764 AllF &= IsFalse(V); 765 766 SDValue Ext = !V.isUndef() ? DAG.getZExtOrTrunc(V, dl, MVT::i8) 767 : DAG.getUNDEF(MVT::i8); 768 for (unsigned B = 0; B != BitBytes; ++B) 769 Bytes.push_back(Ext); 770 } 771 } else { 772 // There are as many i1 values, as there are bits in a vector register. 773 // Divide the values into groups of 8 and check that each group consists 774 // of the same value (ignoring undefs). 775 for (unsigned I = 0; I != VecLen; I += 8) { 776 unsigned B = 0; 777 // Find the first non-undef value in this group. 778 for (; B != 8; ++B) { 779 if (!Values[I+B].isUndef()) 780 break; 781 } 782 SDValue F = Values[I+B]; 783 AllT &= IsTrue(F); 784 AllF &= IsFalse(F); 785 786 SDValue Ext = (B < 8) ? DAG.getZExtOrTrunc(F, dl, MVT::i8) 787 : DAG.getUNDEF(MVT::i8); 788 Bytes.push_back(Ext); 789 // Verify that the rest of values in the group are the same as the 790 // first. 791 for (; B != 8; ++B) 792 assert(Values[I+B].isUndef() || Values[I+B] == F); 793 } 794 } 795 796 if (AllT) 797 return DAG.getNode(HexagonISD::QTRUE, dl, VecTy); 798 if (AllF) 799 return DAG.getNode(HexagonISD::QFALSE, dl, VecTy); 800 801 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 802 SDValue ByteVec = buildHvxVectorReg(Bytes, dl, ByteTy, DAG); 803 return DAG.getNode(HexagonISD::V2Q, dl, VecTy, ByteVec); 804 } 805 806 SDValue 807 HexagonTargetLowering::extractHvxElementReg(SDValue VecV, SDValue IdxV, 808 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 809 MVT ElemTy = ty(VecV).getVectorElementType(); 810 811 unsigned ElemWidth = ElemTy.getSizeInBits(); 812 assert(ElemWidth >= 8 && ElemWidth <= 32); 813 (void)ElemWidth; 814 815 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG); 816 SDValue ExWord = DAG.getNode(HexagonISD::VEXTRACTW, dl, MVT::i32, 817 {VecV, ByteIdx}); 818 if (ElemTy == MVT::i32) 819 return ExWord; 820 821 // Have an extracted word, need to extract the smaller element out of it. 822 // 1. Extract the bits of (the original) IdxV that correspond to the index 823 // of the desired element in the 32-bit word. 824 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG); 825 // 2. Extract the element from the word. 826 SDValue ExVec = DAG.getBitcast(tyVector(ty(ExWord), ElemTy), ExWord); 827 return extractVector(ExVec, SubIdx, dl, ElemTy, MVT::i32, DAG); 828 } 829 830 SDValue 831 HexagonTargetLowering::extractHvxElementPred(SDValue VecV, SDValue IdxV, 832 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 833 // Implement other return types if necessary. 834 assert(ResTy == MVT::i1); 835 836 unsigned HwLen = Subtarget.getVectorLength(); 837 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 838 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 839 840 unsigned Scale = HwLen / ty(VecV).getVectorNumElements(); 841 SDValue ScV = DAG.getConstant(Scale, dl, MVT::i32); 842 IdxV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, ScV); 843 844 SDValue ExtB = extractHvxElementReg(ByteVec, IdxV, dl, MVT::i32, DAG); 845 SDValue Zero = DAG.getTargetConstant(0, dl, MVT::i32); 846 return getInstr(Hexagon::C2_cmpgtui, dl, MVT::i1, {ExtB, Zero}, DAG); 847 } 848 849 SDValue 850 HexagonTargetLowering::insertHvxElementReg(SDValue VecV, SDValue IdxV, 851 SDValue ValV, const SDLoc &dl, SelectionDAG &DAG) const { 852 MVT ElemTy = ty(VecV).getVectorElementType(); 853 854 unsigned ElemWidth = ElemTy.getSizeInBits(); 855 assert(ElemWidth >= 8 && ElemWidth <= 32); 856 (void)ElemWidth; 857 858 auto InsertWord = [&DAG,&dl,this] (SDValue VecV, SDValue ValV, 859 SDValue ByteIdxV) { 860 MVT VecTy = ty(VecV); 861 unsigned HwLen = Subtarget.getVectorLength(); 862 SDValue MaskV = DAG.getNode(ISD::AND, dl, MVT::i32, 863 {ByteIdxV, DAG.getConstant(-4, dl, MVT::i32)}); 864 SDValue RotV = DAG.getNode(HexagonISD::VROR, dl, VecTy, {VecV, MaskV}); 865 SDValue InsV = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy, {RotV, ValV}); 866 SDValue SubV = DAG.getNode(ISD::SUB, dl, MVT::i32, 867 {DAG.getConstant(HwLen, dl, MVT::i32), MaskV}); 868 SDValue TorV = DAG.getNode(HexagonISD::VROR, dl, VecTy, {InsV, SubV}); 869 return TorV; 870 }; 871 872 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG); 873 if (ElemTy == MVT::i32) 874 return InsertWord(VecV, ValV, ByteIdx); 875 876 // If this is not inserting a 32-bit word, convert it into such a thing. 877 // 1. Extract the existing word from the target vector. 878 SDValue WordIdx = DAG.getNode(ISD::SRL, dl, MVT::i32, 879 {ByteIdx, DAG.getConstant(2, dl, MVT::i32)}); 880 SDValue Ext = extractHvxElementReg(opCastElem(VecV, MVT::i32, DAG), WordIdx, 881 dl, MVT::i32, DAG); 882 883 // 2. Treating the extracted word as a 32-bit vector, insert the given 884 // value into it. 885 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG); 886 MVT SubVecTy = tyVector(ty(Ext), ElemTy); 887 SDValue Ins = insertVector(DAG.getBitcast(SubVecTy, Ext), 888 ValV, SubIdx, dl, ElemTy, DAG); 889 890 // 3. Insert the 32-bit word back into the original vector. 891 return InsertWord(VecV, Ins, ByteIdx); 892 } 893 894 SDValue 895 HexagonTargetLowering::insertHvxElementPred(SDValue VecV, SDValue IdxV, 896 SDValue ValV, const SDLoc &dl, SelectionDAG &DAG) const { 897 unsigned HwLen = Subtarget.getVectorLength(); 898 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 899 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 900 901 unsigned Scale = HwLen / ty(VecV).getVectorNumElements(); 902 SDValue ScV = DAG.getConstant(Scale, dl, MVT::i32); 903 IdxV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, ScV); 904 ValV = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, ValV); 905 906 SDValue InsV = insertHvxElementReg(ByteVec, IdxV, ValV, dl, DAG); 907 return DAG.getNode(HexagonISD::V2Q, dl, ty(VecV), InsV); 908 } 909 910 SDValue 911 HexagonTargetLowering::extractHvxSubvectorReg(SDValue VecV, SDValue IdxV, 912 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 913 MVT VecTy = ty(VecV); 914 unsigned HwLen = Subtarget.getVectorLength(); 915 unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue(); 916 MVT ElemTy = VecTy.getVectorElementType(); 917 unsigned ElemWidth = ElemTy.getSizeInBits(); 918 919 // If the source vector is a vector pair, get the single vector containing 920 // the subvector of interest. The subvector will never overlap two single 921 // vectors. 922 if (isHvxPairTy(VecTy)) { 923 unsigned SubIdx; 924 if (Idx * ElemWidth >= 8*HwLen) { 925 SubIdx = Hexagon::vsub_hi; 926 Idx -= VecTy.getVectorNumElements() / 2; 927 } else { 928 SubIdx = Hexagon::vsub_lo; 929 } 930 VecTy = typeSplit(VecTy).first; 931 VecV = DAG.getTargetExtractSubreg(SubIdx, dl, VecTy, VecV); 932 if (VecTy == ResTy) 933 return VecV; 934 } 935 936 // The only meaningful subvectors of a single HVX vector are those that 937 // fit in a scalar register. 938 assert(ResTy.getSizeInBits() == 32 || ResTy.getSizeInBits() == 64); 939 940 MVT WordTy = tyVector(VecTy, MVT::i32); 941 SDValue WordVec = DAG.getBitcast(WordTy, VecV); 942 unsigned WordIdx = (Idx*ElemWidth) / 32; 943 944 SDValue W0Idx = DAG.getConstant(WordIdx, dl, MVT::i32); 945 SDValue W0 = extractHvxElementReg(WordVec, W0Idx, dl, MVT::i32, DAG); 946 if (ResTy.getSizeInBits() == 32) 947 return DAG.getBitcast(ResTy, W0); 948 949 SDValue W1Idx = DAG.getConstant(WordIdx+1, dl, MVT::i32); 950 SDValue W1 = extractHvxElementReg(WordVec, W1Idx, dl, MVT::i32, DAG); 951 SDValue WW = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64, {W1, W0}); 952 return DAG.getBitcast(ResTy, WW); 953 } 954 955 SDValue 956 HexagonTargetLowering::extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, 957 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 958 MVT VecTy = ty(VecV); 959 unsigned HwLen = Subtarget.getVectorLength(); 960 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 961 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 962 // IdxV is required to be a constant. 963 unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue(); 964 965 unsigned ResLen = ResTy.getVectorNumElements(); 966 unsigned BitBytes = HwLen / VecTy.getVectorNumElements(); 967 unsigned Offset = Idx * BitBytes; 968 SDValue Undef = DAG.getUNDEF(ByteTy); 969 SmallVector<int,128> Mask; 970 971 if (Subtarget.isHVXVectorType(ResTy, true)) { 972 // Converting between two vector predicates. Since the result is shorter 973 // than the source, it will correspond to a vector predicate with the 974 // relevant bits replicated. The replication count is the ratio of the 975 // source and target vector lengths. 976 unsigned Rep = VecTy.getVectorNumElements() / ResLen; 977 assert(isPowerOf2_32(Rep) && HwLen % Rep == 0); 978 for (unsigned i = 0; i != HwLen/Rep; ++i) { 979 for (unsigned j = 0; j != Rep; ++j) 980 Mask.push_back(i + Offset); 981 } 982 SDValue ShuffV = DAG.getVectorShuffle(ByteTy, dl, ByteVec, Undef, Mask); 983 return DAG.getNode(HexagonISD::V2Q, dl, ResTy, ShuffV); 984 } 985 986 // Converting between a vector predicate and a scalar predicate. In the 987 // vector predicate, a group of BitBytes bits will correspond to a single 988 // i1 element of the source vector type. Those bits will all have the same 989 // value. The same will be true for ByteVec, where each byte corresponds 990 // to a bit in the vector predicate. 991 // The algorithm is to traverse the ByteVec, going over the i1 values from 992 // the source vector, and generate the corresponding representation in an 993 // 8-byte vector. To avoid repeated extracts from ByteVec, shuffle the 994 // elements so that the interesting 8 bytes will be in the low end of the 995 // vector. 996 unsigned Rep = 8 / ResLen; 997 // Make sure the output fill the entire vector register, so repeat the 998 // 8-byte groups as many times as necessary. 999 for (unsigned r = 0; r != HwLen/ResLen; ++r) { 1000 // This will generate the indexes of the 8 interesting bytes. 1001 for (unsigned i = 0; i != ResLen; ++i) { 1002 for (unsigned j = 0; j != Rep; ++j) 1003 Mask.push_back(Offset + i*BitBytes); 1004 } 1005 } 1006 1007 SDValue Zero = getZero(dl, MVT::i32, DAG); 1008 SDValue ShuffV = DAG.getVectorShuffle(ByteTy, dl, ByteVec, Undef, Mask); 1009 // Combine the two low words from ShuffV into a v8i8, and byte-compare 1010 // them against 0. 1011 SDValue W0 = DAG.getNode(HexagonISD::VEXTRACTW, dl, MVT::i32, {ShuffV, Zero}); 1012 SDValue W1 = DAG.getNode(HexagonISD::VEXTRACTW, dl, MVT::i32, 1013 {ShuffV, DAG.getConstant(4, dl, MVT::i32)}); 1014 SDValue Vec64 = DAG.getNode(HexagonISD::COMBINE, dl, MVT::v8i8, {W1, W0}); 1015 return getInstr(Hexagon::A4_vcmpbgtui, dl, ResTy, 1016 {Vec64, DAG.getTargetConstant(0, dl, MVT::i32)}, DAG); 1017 } 1018 1019 SDValue 1020 HexagonTargetLowering::insertHvxSubvectorReg(SDValue VecV, SDValue SubV, 1021 SDValue IdxV, const SDLoc &dl, SelectionDAG &DAG) const { 1022 MVT VecTy = ty(VecV); 1023 MVT SubTy = ty(SubV); 1024 unsigned HwLen = Subtarget.getVectorLength(); 1025 MVT ElemTy = VecTy.getVectorElementType(); 1026 unsigned ElemWidth = ElemTy.getSizeInBits(); 1027 1028 bool IsPair = isHvxPairTy(VecTy); 1029 MVT SingleTy = MVT::getVectorVT(ElemTy, (8*HwLen)/ElemWidth); 1030 // The two single vectors that VecV consists of, if it's a pair. 1031 SDValue V0, V1; 1032 SDValue SingleV = VecV; 1033 SDValue PickHi; 1034 1035 if (IsPair) { 1036 V0 = DAG.getTargetExtractSubreg(Hexagon::vsub_lo, dl, SingleTy, VecV); 1037 V1 = DAG.getTargetExtractSubreg(Hexagon::vsub_hi, dl, SingleTy, VecV); 1038 1039 SDValue HalfV = DAG.getConstant(SingleTy.getVectorNumElements(), 1040 dl, MVT::i32); 1041 PickHi = DAG.getSetCC(dl, MVT::i1, IdxV, HalfV, ISD::SETUGT); 1042 if (isHvxSingleTy(SubTy)) { 1043 if (const auto *CN = dyn_cast<const ConstantSDNode>(IdxV.getNode())) { 1044 unsigned Idx = CN->getZExtValue(); 1045 assert(Idx == 0 || Idx == VecTy.getVectorNumElements()/2); 1046 unsigned SubIdx = (Idx == 0) ? Hexagon::vsub_lo : Hexagon::vsub_hi; 1047 return DAG.getTargetInsertSubreg(SubIdx, dl, VecTy, VecV, SubV); 1048 } 1049 // If IdxV is not a constant, generate the two variants: with the 1050 // SubV as the high and as the low subregister, and select the right 1051 // pair based on the IdxV. 1052 SDValue InLo = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {SubV, V1}); 1053 SDValue InHi = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {V0, SubV}); 1054 return DAG.getNode(ISD::SELECT, dl, VecTy, PickHi, InHi, InLo); 1055 } 1056 // The subvector being inserted must be entirely contained in one of 1057 // the vectors V0 or V1. Set SingleV to the correct one, and update 1058 // IdxV to be the index relative to the beginning of that vector. 1059 SDValue S = DAG.getNode(ISD::SUB, dl, MVT::i32, IdxV, HalfV); 1060 IdxV = DAG.getNode(ISD::SELECT, dl, MVT::i32, PickHi, S, IdxV); 1061 SingleV = DAG.getNode(ISD::SELECT, dl, SingleTy, PickHi, V1, V0); 1062 } 1063 1064 // The only meaningful subvectors of a single HVX vector are those that 1065 // fit in a scalar register. 1066 assert(SubTy.getSizeInBits() == 32 || SubTy.getSizeInBits() == 64); 1067 // Convert IdxV to be index in bytes. 1068 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV.getNode()); 1069 if (!IdxN || !IdxN->isZero()) { 1070 IdxV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 1071 DAG.getConstant(ElemWidth/8, dl, MVT::i32)); 1072 SingleV = DAG.getNode(HexagonISD::VROR, dl, SingleTy, SingleV, IdxV); 1073 } 1074 // When inserting a single word, the rotation back to the original position 1075 // would be by HwLen-Idx, but if two words are inserted, it will need to be 1076 // by (HwLen-4)-Idx. 1077 unsigned RolBase = HwLen; 1078 if (VecTy.getSizeInBits() == 32) { 1079 SDValue V = DAG.getBitcast(MVT::i32, SubV); 1080 SingleV = DAG.getNode(HexagonISD::VINSERTW0, dl, SingleTy, V); 1081 } else { 1082 SDValue V = DAG.getBitcast(MVT::i64, SubV); 1083 SDValue R0 = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, V); 1084 SDValue R1 = DAG.getTargetExtractSubreg(Hexagon::isub_hi, dl, MVT::i32, V); 1085 SingleV = DAG.getNode(HexagonISD::VINSERTW0, dl, SingleTy, SingleV, R0); 1086 SingleV = DAG.getNode(HexagonISD::VROR, dl, SingleTy, SingleV, 1087 DAG.getConstant(4, dl, MVT::i32)); 1088 SingleV = DAG.getNode(HexagonISD::VINSERTW0, dl, SingleTy, SingleV, R1); 1089 RolBase = HwLen-4; 1090 } 1091 // If the vector wasn't ror'ed, don't ror it back. 1092 if (RolBase != 4 || !IdxN || !IdxN->isZero()) { 1093 SDValue RolV = DAG.getNode(ISD::SUB, dl, MVT::i32, 1094 DAG.getConstant(RolBase, dl, MVT::i32), IdxV); 1095 SingleV = DAG.getNode(HexagonISD::VROR, dl, SingleTy, SingleV, RolV); 1096 } 1097 1098 if (IsPair) { 1099 SDValue InLo = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {SingleV, V1}); 1100 SDValue InHi = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {V0, SingleV}); 1101 return DAG.getNode(ISD::SELECT, dl, VecTy, PickHi, InHi, InLo); 1102 } 1103 return SingleV; 1104 } 1105 1106 SDValue 1107 HexagonTargetLowering::insertHvxSubvectorPred(SDValue VecV, SDValue SubV, 1108 SDValue IdxV, const SDLoc &dl, SelectionDAG &DAG) const { 1109 MVT VecTy = ty(VecV); 1110 MVT SubTy = ty(SubV); 1111 assert(Subtarget.isHVXVectorType(VecTy, true)); 1112 // VecV is an HVX vector predicate. SubV may be either an HVX vector 1113 // predicate as well, or it can be a scalar predicate. 1114 1115 unsigned VecLen = VecTy.getVectorNumElements(); 1116 unsigned HwLen = Subtarget.getVectorLength(); 1117 assert(HwLen % VecLen == 0 && "Unexpected vector type"); 1118 1119 unsigned Scale = VecLen / SubTy.getVectorNumElements(); 1120 unsigned BitBytes = HwLen / VecLen; 1121 unsigned BlockLen = HwLen / Scale; 1122 1123 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1124 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 1125 SDValue ByteSub = createHvxPrefixPred(SubV, dl, BitBytes, false, DAG); 1126 SDValue ByteIdx; 1127 1128 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV.getNode()); 1129 if (!IdxN || !IdxN->isZero()) { 1130 ByteIdx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 1131 DAG.getConstant(BitBytes, dl, MVT::i32)); 1132 ByteVec = DAG.getNode(HexagonISD::VROR, dl, ByteTy, ByteVec, ByteIdx); 1133 } 1134 1135 // ByteVec is the target vector VecV rotated in such a way that the 1136 // subvector should be inserted at index 0. Generate a predicate mask 1137 // and use vmux to do the insertion. 1138 assert(BlockLen < HwLen && "vsetq(v1) prerequisite"); 1139 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 1140 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 1141 {DAG.getConstant(BlockLen, dl, MVT::i32)}, DAG); 1142 ByteVec = getInstr(Hexagon::V6_vmux, dl, ByteTy, {Q, ByteSub, ByteVec}, DAG); 1143 // Rotate ByteVec back, and convert to a vector predicate. 1144 if (!IdxN || !IdxN->isZero()) { 1145 SDValue HwLenV = DAG.getConstant(HwLen, dl, MVT::i32); 1146 SDValue ByteXdi = DAG.getNode(ISD::SUB, dl, MVT::i32, HwLenV, ByteIdx); 1147 ByteVec = DAG.getNode(HexagonISD::VROR, dl, ByteTy, ByteVec, ByteXdi); 1148 } 1149 return DAG.getNode(HexagonISD::V2Q, dl, VecTy, ByteVec); 1150 } 1151 1152 SDValue 1153 HexagonTargetLowering::extendHvxVectorPred(SDValue VecV, const SDLoc &dl, 1154 MVT ResTy, bool ZeroExt, SelectionDAG &DAG) const { 1155 // Sign- and any-extending of a vector predicate to a vector register is 1156 // equivalent to Q2V. For zero-extensions, generate a vmux between 0 and 1157 // a vector of 1s (where the 1s are of type matching the vector type). 1158 assert(Subtarget.isHVXVectorType(ResTy)); 1159 if (!ZeroExt) 1160 return DAG.getNode(HexagonISD::Q2V, dl, ResTy, VecV); 1161 1162 assert(ty(VecV).getVectorNumElements() == ResTy.getVectorNumElements()); 1163 SDValue True = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1164 DAG.getConstant(1, dl, MVT::i32)); 1165 SDValue False = getZero(dl, ResTy, DAG); 1166 return DAG.getSelect(dl, ResTy, VecV, True, False); 1167 } 1168 1169 SDValue 1170 HexagonTargetLowering::compressHvxPred(SDValue VecQ, const SDLoc &dl, 1171 MVT ResTy, SelectionDAG &DAG) const { 1172 // Given a predicate register VecQ, transfer bits VecQ[0..HwLen-1] 1173 // (i.e. the entire predicate register) to bits [0..HwLen-1] of a 1174 // vector register. The remaining bits of the vector register are 1175 // unspecified. 1176 1177 MachineFunction &MF = DAG.getMachineFunction(); 1178 unsigned HwLen = Subtarget.getVectorLength(); 1179 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1180 MVT PredTy = ty(VecQ); 1181 unsigned PredLen = PredTy.getVectorNumElements(); 1182 assert(HwLen % PredLen == 0); 1183 MVT VecTy = MVT::getVectorVT(MVT::getIntegerVT(8*HwLen/PredLen), PredLen); 1184 1185 Type *Int8Ty = Type::getInt8Ty(*DAG.getContext()); 1186 SmallVector<Constant*, 128> Tmp; 1187 // Create an array of bytes (hex): 01,02,04,08,10,20,40,80, 01,02,04,08,... 1188 // These are bytes with the LSB rotated left with respect to their index. 1189 for (unsigned i = 0; i != HwLen/8; ++i) { 1190 for (unsigned j = 0; j != 8; ++j) 1191 Tmp.push_back(ConstantInt::get(Int8Ty, 1ull << j)); 1192 } 1193 Constant *CV = ConstantVector::get(Tmp); 1194 Align Alignment(HwLen); 1195 SDValue CP = 1196 LowerConstantPool(DAG.getConstantPool(CV, ByteTy, Alignment), DAG); 1197 SDValue Bytes = 1198 DAG.getLoad(ByteTy, dl, DAG.getEntryNode(), CP, 1199 MachinePointerInfo::getConstantPool(MF), Alignment); 1200 1201 // Select the bytes that correspond to true bits in the vector predicate. 1202 SDValue Sel = DAG.getSelect(dl, VecTy, VecQ, DAG.getBitcast(VecTy, Bytes), 1203 getZero(dl, VecTy, DAG)); 1204 // Calculate the OR of all bytes in each group of 8. That will compress 1205 // all the individual bits into a single byte. 1206 // First, OR groups of 4, via vrmpy with 0x01010101. 1207 SDValue All1 = 1208 DAG.getSplatBuildVector(MVT::v4i8, dl, DAG.getConstant(1, dl, MVT::i32)); 1209 SDValue Vrmpy = getInstr(Hexagon::V6_vrmpyub, dl, ByteTy, {Sel, All1}, DAG); 1210 // Then rotate the accumulated vector by 4 bytes, and do the final OR. 1211 SDValue Rot = getInstr(Hexagon::V6_valignbi, dl, ByteTy, 1212 {Vrmpy, Vrmpy, DAG.getTargetConstant(4, dl, MVT::i32)}, DAG); 1213 SDValue Vor = DAG.getNode(ISD::OR, dl, ByteTy, {Vrmpy, Rot}); 1214 1215 // Pick every 8th byte and coalesce them at the beginning of the output. 1216 // For symmetry, coalesce every 1+8th byte after that, then every 2+8th 1217 // byte and so on. 1218 SmallVector<int,128> Mask; 1219 for (unsigned i = 0; i != HwLen; ++i) 1220 Mask.push_back((8*i) % HwLen + i/(HwLen/8)); 1221 SDValue Collect = 1222 DAG.getVectorShuffle(ByteTy, dl, Vor, DAG.getUNDEF(ByteTy), Mask); 1223 return DAG.getBitcast(ResTy, Collect); 1224 } 1225 1226 SDValue 1227 HexagonTargetLowering::LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) 1228 const { 1229 const SDLoc &dl(Op); 1230 MVT VecTy = ty(Op); 1231 1232 unsigned Size = Op.getNumOperands(); 1233 SmallVector<SDValue,128> Ops; 1234 for (unsigned i = 0; i != Size; ++i) 1235 Ops.push_back(Op.getOperand(i)); 1236 1237 if (VecTy.getVectorElementType() == MVT::i1) 1238 return buildHvxVectorPred(Ops, dl, VecTy, DAG); 1239 1240 if (VecTy.getSizeInBits() == 16*Subtarget.getVectorLength()) { 1241 ArrayRef<SDValue> A(Ops); 1242 MVT SingleTy = typeSplit(VecTy).first; 1243 SDValue V0 = buildHvxVectorReg(A.take_front(Size/2), dl, SingleTy, DAG); 1244 SDValue V1 = buildHvxVectorReg(A.drop_front(Size/2), dl, SingleTy, DAG); 1245 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, V0, V1); 1246 } 1247 1248 return buildHvxVectorReg(Ops, dl, VecTy, DAG); 1249 } 1250 1251 SDValue 1252 HexagonTargetLowering::LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) 1253 const { 1254 // Vector concatenation of two integer (non-bool) vectors does not need 1255 // special lowering. Custom-lower concats of bool vectors and expand 1256 // concats of more than 2 vectors. 1257 MVT VecTy = ty(Op); 1258 const SDLoc &dl(Op); 1259 unsigned NumOp = Op.getNumOperands(); 1260 if (VecTy.getVectorElementType() != MVT::i1) { 1261 if (NumOp == 2) 1262 return Op; 1263 // Expand the other cases into a build-vector. 1264 SmallVector<SDValue,8> Elems; 1265 for (SDValue V : Op.getNode()->ops()) 1266 DAG.ExtractVectorElements(V, Elems); 1267 // A vector of i16 will be broken up into a build_vector of i16's. 1268 // This is a problem, since at the time of operation legalization, 1269 // all operations are expected to be type-legalized, and i16 is not 1270 // a legal type. If any of the extracted elements is not of a valid 1271 // type, sign-extend it to a valid one. 1272 for (unsigned i = 0, e = Elems.size(); i != e; ++i) { 1273 SDValue V = Elems[i]; 1274 MVT Ty = ty(V); 1275 if (!isTypeLegal(Ty)) { 1276 EVT NTy = getTypeToTransformTo(*DAG.getContext(), Ty); 1277 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 1278 Elems[i] = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NTy, 1279 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NTy, 1280 V.getOperand(0), V.getOperand(1)), 1281 DAG.getValueType(Ty)); 1282 continue; 1283 } 1284 // A few less complicated cases. 1285 switch (V.getOpcode()) { 1286 case ISD::Constant: 1287 Elems[i] = DAG.getSExtOrTrunc(V, dl, NTy); 1288 break; 1289 case ISD::UNDEF: 1290 Elems[i] = DAG.getUNDEF(NTy); 1291 break; 1292 case ISD::TRUNCATE: 1293 Elems[i] = V.getOperand(0); 1294 break; 1295 default: 1296 llvm_unreachable("Unexpected vector element"); 1297 } 1298 } 1299 } 1300 return DAG.getBuildVector(VecTy, dl, Elems); 1301 } 1302 1303 assert(VecTy.getVectorElementType() == MVT::i1); 1304 unsigned HwLen = Subtarget.getVectorLength(); 1305 assert(isPowerOf2_32(NumOp) && HwLen % NumOp == 0); 1306 1307 SDValue Op0 = Op.getOperand(0); 1308 1309 // If the operands are HVX types (i.e. not scalar predicates), then 1310 // defer the concatenation, and create QCAT instead. 1311 if (Subtarget.isHVXVectorType(ty(Op0), true)) { 1312 if (NumOp == 2) 1313 return DAG.getNode(HexagonISD::QCAT, dl, VecTy, Op0, Op.getOperand(1)); 1314 1315 ArrayRef<SDUse> U(Op.getNode()->ops()); 1316 SmallVector<SDValue,4> SV(U.begin(), U.end()); 1317 ArrayRef<SDValue> Ops(SV); 1318 1319 MVT HalfTy = typeSplit(VecTy).first; 1320 SDValue V0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfTy, 1321 Ops.take_front(NumOp/2)); 1322 SDValue V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfTy, 1323 Ops.take_back(NumOp/2)); 1324 return DAG.getNode(HexagonISD::QCAT, dl, VecTy, V0, V1); 1325 } 1326 1327 // Count how many bytes (in a vector register) each bit in VecTy 1328 // corresponds to. 1329 unsigned BitBytes = HwLen / VecTy.getVectorNumElements(); 1330 1331 SmallVector<SDValue,8> Prefixes; 1332 for (SDValue V : Op.getNode()->op_values()) { 1333 SDValue P = createHvxPrefixPred(V, dl, BitBytes, true, DAG); 1334 Prefixes.push_back(P); 1335 } 1336 1337 unsigned InpLen = ty(Op.getOperand(0)).getVectorNumElements(); 1338 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1339 SDValue S = DAG.getConstant(InpLen*BitBytes, dl, MVT::i32); 1340 SDValue Res = getZero(dl, ByteTy, DAG); 1341 for (unsigned i = 0, e = Prefixes.size(); i != e; ++i) { 1342 Res = DAG.getNode(HexagonISD::VROR, dl, ByteTy, Res, S); 1343 Res = DAG.getNode(ISD::OR, dl, ByteTy, Res, Prefixes[e-i-1]); 1344 } 1345 return DAG.getNode(HexagonISD::V2Q, dl, VecTy, Res); 1346 } 1347 1348 SDValue 1349 HexagonTargetLowering::LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) 1350 const { 1351 // Change the type of the extracted element to i32. 1352 SDValue VecV = Op.getOperand(0); 1353 MVT ElemTy = ty(VecV).getVectorElementType(); 1354 const SDLoc &dl(Op); 1355 SDValue IdxV = Op.getOperand(1); 1356 if (ElemTy == MVT::i1) 1357 return extractHvxElementPred(VecV, IdxV, dl, ty(Op), DAG); 1358 1359 return extractHvxElementReg(VecV, IdxV, dl, ty(Op), DAG); 1360 } 1361 1362 SDValue 1363 HexagonTargetLowering::LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) 1364 const { 1365 const SDLoc &dl(Op); 1366 SDValue VecV = Op.getOperand(0); 1367 SDValue ValV = Op.getOperand(1); 1368 SDValue IdxV = Op.getOperand(2); 1369 MVT ElemTy = ty(VecV).getVectorElementType(); 1370 if (ElemTy == MVT::i1) 1371 return insertHvxElementPred(VecV, IdxV, ValV, dl, DAG); 1372 1373 return insertHvxElementReg(VecV, IdxV, ValV, dl, DAG); 1374 } 1375 1376 SDValue 1377 HexagonTargetLowering::LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) 1378 const { 1379 SDValue SrcV = Op.getOperand(0); 1380 MVT SrcTy = ty(SrcV); 1381 MVT DstTy = ty(Op); 1382 SDValue IdxV = Op.getOperand(1); 1383 unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue(); 1384 assert(Idx % DstTy.getVectorNumElements() == 0); 1385 (void)Idx; 1386 const SDLoc &dl(Op); 1387 1388 MVT ElemTy = SrcTy.getVectorElementType(); 1389 if (ElemTy == MVT::i1) 1390 return extractHvxSubvectorPred(SrcV, IdxV, dl, DstTy, DAG); 1391 1392 return extractHvxSubvectorReg(SrcV, IdxV, dl, DstTy, DAG); 1393 } 1394 1395 SDValue 1396 HexagonTargetLowering::LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) 1397 const { 1398 // Idx does not need to be a constant. 1399 SDValue VecV = Op.getOperand(0); 1400 SDValue ValV = Op.getOperand(1); 1401 SDValue IdxV = Op.getOperand(2); 1402 1403 const SDLoc &dl(Op); 1404 MVT VecTy = ty(VecV); 1405 MVT ElemTy = VecTy.getVectorElementType(); 1406 if (ElemTy == MVT::i1) 1407 return insertHvxSubvectorPred(VecV, ValV, IdxV, dl, DAG); 1408 1409 return insertHvxSubvectorReg(VecV, ValV, IdxV, dl, DAG); 1410 } 1411 1412 SDValue 1413 HexagonTargetLowering::LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const { 1414 // Lower any-extends of boolean vectors to sign-extends, since they 1415 // translate directly to Q2V. Zero-extending could also be done equally 1416 // fast, but Q2V is used/recognized in more places. 1417 // For all other vectors, use zero-extend. 1418 MVT ResTy = ty(Op); 1419 SDValue InpV = Op.getOperand(0); 1420 MVT ElemTy = ty(InpV).getVectorElementType(); 1421 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy)) 1422 return LowerHvxSignExt(Op, DAG); 1423 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Op), ResTy, InpV); 1424 } 1425 1426 SDValue 1427 HexagonTargetLowering::LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const { 1428 MVT ResTy = ty(Op); 1429 SDValue InpV = Op.getOperand(0); 1430 MVT ElemTy = ty(InpV).getVectorElementType(); 1431 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy)) 1432 return extendHvxVectorPred(InpV, SDLoc(Op), ty(Op), false, DAG); 1433 return Op; 1434 } 1435 1436 SDValue 1437 HexagonTargetLowering::LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const { 1438 MVT ResTy = ty(Op); 1439 SDValue InpV = Op.getOperand(0); 1440 MVT ElemTy = ty(InpV).getVectorElementType(); 1441 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy)) 1442 return extendHvxVectorPred(InpV, SDLoc(Op), ty(Op), true, DAG); 1443 return Op; 1444 } 1445 1446 SDValue 1447 HexagonTargetLowering::LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const { 1448 // Lower vector CTTZ into a computation using CTLZ (Hacker's Delight): 1449 // cttz(x) = bitwidth(x) - ctlz(~x & (x-1)) 1450 const SDLoc &dl(Op); 1451 MVT ResTy = ty(Op); 1452 SDValue InpV = Op.getOperand(0); 1453 assert(ResTy == ty(InpV)); 1454 1455 // Calculate the vectors of 1 and bitwidth(x). 1456 MVT ElemTy = ty(InpV).getVectorElementType(); 1457 unsigned ElemWidth = ElemTy.getSizeInBits(); 1458 1459 SDValue Vec1 = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1460 DAG.getConstant(1, dl, MVT::i32)); 1461 SDValue VecW = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1462 DAG.getConstant(ElemWidth, dl, MVT::i32)); 1463 SDValue VecN1 = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1464 DAG.getConstant(-1, dl, MVT::i32)); 1465 1466 // Do not use DAG.getNOT, because that would create BUILD_VECTOR with 1467 // a BITCAST. Here we can skip the BITCAST (so we don't have to handle 1468 // it separately in custom combine or selection). 1469 SDValue A = DAG.getNode(ISD::AND, dl, ResTy, 1470 {DAG.getNode(ISD::XOR, dl, ResTy, {InpV, VecN1}), 1471 DAG.getNode(ISD::SUB, dl, ResTy, {InpV, Vec1})}); 1472 return DAG.getNode(ISD::SUB, dl, ResTy, 1473 {VecW, DAG.getNode(ISD::CTLZ, dl, ResTy, A)}); 1474 } 1475 1476 SDValue 1477 HexagonTargetLowering::LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const { 1478 MVT ResTy = ty(Op); 1479 assert(ResTy.isVector()); 1480 const SDLoc &dl(Op); 1481 SmallVector<int,256> ShuffMask; 1482 1483 MVT ElemTy = ResTy.getVectorElementType(); 1484 unsigned VecLen = ResTy.getVectorNumElements(); 1485 SDValue Vs = Op.getOperand(0); 1486 SDValue Vt = Op.getOperand(1); 1487 bool IsSigned = Op.getOpcode() == ISD::MULHS; 1488 1489 if (ElemTy == MVT::i8 || ElemTy == MVT::i16) { 1490 // For i8 vectors Vs = (a0, a1, ...), Vt = (b0, b1, ...), 1491 // V6_vmpybv Vs, Vt produces a pair of i16 vectors Hi:Lo, 1492 // where Lo = (a0*b0, a2*b2, ...), Hi = (a1*b1, a3*b3, ...). 1493 // For i16, use V6_vmpyhv, which behaves in an analogous way to 1494 // V6_vmpybv: results Lo and Hi are products of even/odd elements 1495 // respectively. 1496 MVT ExtTy = typeExtElem(ResTy, 2); 1497 unsigned MpyOpc = ElemTy == MVT::i8 1498 ? (IsSigned ? Hexagon::V6_vmpybv : Hexagon::V6_vmpyubv) 1499 : (IsSigned ? Hexagon::V6_vmpyhv : Hexagon::V6_vmpyuhv); 1500 SDValue M = getInstr(MpyOpc, dl, ExtTy, {Vs, Vt}, DAG); 1501 1502 // Discard low halves of the resulting values, collect the high halves. 1503 for (unsigned I = 0; I < VecLen; I += 2) { 1504 ShuffMask.push_back(I+1); // Pick even element. 1505 ShuffMask.push_back(I+VecLen+1); // Pick odd element. 1506 } 1507 VectorPair P = opSplit(opCastElem(M, ElemTy, DAG), dl, DAG); 1508 SDValue BS = getByteShuffle(dl, P.first, P.second, ShuffMask, DAG); 1509 return DAG.getBitcast(ResTy, BS); 1510 } 1511 1512 assert(ElemTy == MVT::i32); 1513 SDValue S16 = DAG.getConstant(16, dl, MVT::i32); 1514 1515 auto MulHS_V60 = [&](SDValue Vs, SDValue Vt) { 1516 // mulhs(Vs,Vt) = 1517 // = [(Hi(Vs)*2^16 + Lo(Vs)) *s (Hi(Vt)*2^16 + Lo(Vt))] >> 32 1518 // = [Hi(Vs)*2^16 *s Hi(Vt)*2^16 + Hi(Vs) *su Lo(Vt)*2^16 1519 // + Lo(Vs) *us (Hi(Vt)*2^16 + Lo(Vt))] >> 32 1520 // = [Hi(Vs) *s Hi(Vt)*2^32 + Hi(Vs) *su Lo(Vt)*2^16 1521 // + Lo(Vs) *us Vt] >> 32 1522 // The low half of Lo(Vs)*Lo(Vt) will be discarded (it's not added to 1523 // anything, so it cannot produce any carry over to higher bits), 1524 // so everything in [] can be shifted by 16 without loss of precision. 1525 // = [Hi(Vs) *s Hi(Vt)*2^16 + Hi(Vs)*su Lo(Vt) + Lo(Vs)*Vt >> 16] >> 16 1526 // = [Hi(Vs) *s Hi(Vt)*2^16 + Hi(Vs)*su Lo(Vt) + V6_vmpyewuh(Vs,Vt)] >> 16 1527 // Denote Hi(Vs) = Vs': 1528 // = [Vs'*s Hi(Vt)*2^16 + Vs' *su Lo(Vt) + V6_vmpyewuh(Vt,Vs)] >> 16 1529 // = Vs'*s Hi(Vt) + (V6_vmpyiewuh(Vs',Vt) + V6_vmpyewuh(Vt,Vs)) >> 16 1530 SDValue T0 = getInstr(Hexagon::V6_vmpyewuh, dl, ResTy, {Vt, Vs}, DAG); 1531 // Get Vs': 1532 SDValue S0 = getInstr(Hexagon::V6_vasrw, dl, ResTy, {Vs, S16}, DAG); 1533 SDValue T1 = getInstr(Hexagon::V6_vmpyiewuh_acc, dl, ResTy, 1534 {T0, S0, Vt}, DAG); 1535 // Shift by 16: 1536 SDValue S2 = getInstr(Hexagon::V6_vasrw, dl, ResTy, {T1, S16}, DAG); 1537 // Get Vs'*Hi(Vt): 1538 SDValue T2 = getInstr(Hexagon::V6_vmpyiowh, dl, ResTy, {S0, Vt}, DAG); 1539 // Add: 1540 SDValue T3 = DAG.getNode(ISD::ADD, dl, ResTy, {S2, T2}); 1541 return T3; 1542 }; 1543 1544 auto MulHS_V62 = [&](SDValue Vs, SDValue Vt) { 1545 MVT PairTy = typeJoin({ResTy, ResTy}); 1546 SDValue T0 = getInstr(Hexagon::V6_vmpyewuh_64, dl, PairTy, {Vs, Vt}, DAG); 1547 SDValue T1 = getInstr(Hexagon::V6_vmpyowh_64_acc, dl, PairTy, 1548 {T0, Vs, Vt}, DAG); 1549 return opSplit(T1, dl, DAG).second; 1550 }; 1551 1552 if (IsSigned) { 1553 if (Subtarget.useHVXV62Ops()) 1554 return MulHS_V62(Vs, Vt); 1555 return MulHS_V60(Vs, Vt); 1556 } 1557 1558 // Unsigned mulhw. (Would expansion using signed mulhw be better?) 1559 1560 auto LoVec = [&DAG,ResTy,dl] (SDValue Pair) { 1561 return DAG.getTargetExtractSubreg(Hexagon::vsub_lo, dl, ResTy, Pair); 1562 }; 1563 auto HiVec = [&DAG,ResTy,dl] (SDValue Pair) { 1564 return DAG.getTargetExtractSubreg(Hexagon::vsub_hi, dl, ResTy, Pair); 1565 }; 1566 1567 MVT PairTy = typeJoin({ResTy, ResTy}); 1568 SDValue P = getInstr(Hexagon::V6_lvsplatw, dl, ResTy, 1569 {DAG.getConstant(0x02020202, dl, MVT::i32)}, DAG); 1570 // Multiply-unsigned halfwords: 1571 // LoVec = Vs.uh[2i] * Vt.uh[2i], 1572 // HiVec = Vs.uh[2i+1] * Vt.uh[2i+1] 1573 SDValue T0 = getInstr(Hexagon::V6_vmpyuhv, dl, PairTy, {Vs, Vt}, DAG); 1574 // The low halves in the LoVec of the pair can be discarded. They are 1575 // not added to anything (in the full-precision product), so they cannot 1576 // produce a carry into the higher bits. 1577 SDValue T1 = getInstr(Hexagon::V6_vlsrw, dl, ResTy, {LoVec(T0), S16}, DAG); 1578 // Swap low and high halves in Vt, and do the halfword multiplication 1579 // to get products Vs.uh[2i] * Vt.uh[2i+1] and Vs.uh[2i+1] * Vt.uh[2i]. 1580 SDValue D0 = getInstr(Hexagon::V6_vdelta, dl, ResTy, {Vt, P}, DAG); 1581 SDValue T2 = getInstr(Hexagon::V6_vmpyuhv, dl, PairTy, {Vs, D0}, DAG); 1582 // T2 has mixed products of halfwords: Lo(Vt)*Hi(Vs) and Hi(Vt)*Lo(Vs). 1583 // These products are words, but cannot be added directly because the 1584 // sums could overflow. Add these products, by halfwords, where each sum 1585 // of a pair of halfwords gives a word. 1586 SDValue T3 = getInstr(Hexagon::V6_vadduhw, dl, PairTy, 1587 {LoVec(T2), HiVec(T2)}, DAG); 1588 // Add the high halfwords from the products of the low halfwords. 1589 SDValue T4 = DAG.getNode(ISD::ADD, dl, ResTy, {T1, LoVec(T3)}); 1590 SDValue T5 = getInstr(Hexagon::V6_vlsrw, dl, ResTy, {T4, S16}, DAG); 1591 SDValue T6 = DAG.getNode(ISD::ADD, dl, ResTy, {HiVec(T0), HiVec(T3)}); 1592 SDValue T7 = DAG.getNode(ISD::ADD, dl, ResTy, {T5, T6}); 1593 return T7; 1594 } 1595 1596 SDValue 1597 HexagonTargetLowering::LowerHvxBitcast(SDValue Op, SelectionDAG &DAG) const { 1598 SDValue Val = Op.getOperand(0); 1599 MVT ResTy = ty(Op); 1600 MVT ValTy = ty(Val); 1601 const SDLoc &dl(Op); 1602 1603 if (isHvxBoolTy(ValTy) && ResTy.isScalarInteger()) { 1604 unsigned HwLen = Subtarget.getVectorLength(); 1605 MVT WordTy = MVT::getVectorVT(MVT::i32, HwLen/4); 1606 SDValue VQ = compressHvxPred(Val, dl, WordTy, DAG); 1607 unsigned BitWidth = ResTy.getSizeInBits(); 1608 1609 if (BitWidth < 64) { 1610 SDValue W0 = extractHvxElementReg(VQ, DAG.getConstant(0, dl, MVT::i32), 1611 dl, MVT::i32, DAG); 1612 if (BitWidth == 32) 1613 return W0; 1614 assert(BitWidth < 32u); 1615 return DAG.getZExtOrTrunc(W0, dl, ResTy); 1616 } 1617 1618 // The result is >= 64 bits. The only options are 64 or 128. 1619 assert(BitWidth == 64 || BitWidth == 128); 1620 SmallVector<SDValue,4> Words; 1621 for (unsigned i = 0; i != BitWidth/32; ++i) { 1622 SDValue W = extractHvxElementReg( 1623 VQ, DAG.getConstant(i, dl, MVT::i32), dl, MVT::i32, DAG); 1624 Words.push_back(W); 1625 } 1626 SmallVector<SDValue,2> Combines; 1627 assert(Words.size() % 2 == 0); 1628 for (unsigned i = 0, e = Words.size(); i < e; i += 2) { 1629 SDValue C = DAG.getNode( 1630 HexagonISD::COMBINE, dl, MVT::i64, {Words[i+1], Words[i]}); 1631 Combines.push_back(C); 1632 } 1633 1634 if (BitWidth == 64) 1635 return Combines[0]; 1636 1637 return DAG.getNode(ISD::BUILD_PAIR, dl, ResTy, Combines); 1638 } 1639 if (isHvxBoolTy(ResTy) && ValTy.isScalarInteger()) { 1640 // Handle bitcast from i128 -> v128i1 and i64 -> v64i1. 1641 unsigned BitWidth = ValTy.getSizeInBits(); 1642 unsigned HwLen = Subtarget.getVectorLength(); 1643 assert(BitWidth == HwLen); 1644 1645 MVT ValAsVecTy = MVT::getVectorVT(MVT::i8, BitWidth / 8); 1646 SDValue ValAsVec = DAG.getBitcast(ValAsVecTy, Val); 1647 // Splat each byte of Val 8 times. 1648 // Bytes = [(b0)x8, (b1)x8, ...., (b15)x8] 1649 // where b0, b1,..., b15 are least to most significant bytes of I. 1650 SmallVector<SDValue, 128> Bytes; 1651 // Tmp: 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80, 0x01,0x02,0x04,0x08,... 1652 // These are bytes with the LSB rotated left with respect to their index. 1653 SmallVector<SDValue, 128> Tmp; 1654 for (unsigned I = 0; I != HwLen / 8; ++I) { 1655 SDValue Idx = DAG.getConstant(I, dl, MVT::i32); 1656 SDValue Byte = 1657 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, ValAsVec, Idx); 1658 for (unsigned J = 0; J != 8; ++J) { 1659 Bytes.push_back(Byte); 1660 Tmp.push_back(DAG.getConstant(1ull << J, dl, MVT::i8)); 1661 } 1662 } 1663 1664 MVT ConstantVecTy = MVT::getVectorVT(MVT::i8, HwLen); 1665 SDValue ConstantVec = DAG.getBuildVector(ConstantVecTy, dl, Tmp); 1666 SDValue I2V = buildHvxVectorReg(Bytes, dl, ConstantVecTy, DAG); 1667 1668 // Each Byte in the I2V will be set iff corresponding bit is set in Val. 1669 I2V = DAG.getNode(ISD::AND, dl, ConstantVecTy, {I2V, ConstantVec}); 1670 return DAG.getNode(HexagonISD::V2Q, dl, ResTy, I2V); 1671 } 1672 1673 return Op; 1674 } 1675 1676 SDValue 1677 HexagonTargetLowering::LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const { 1678 // Sign- and zero-extends are legal. 1679 assert(Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG); 1680 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(Op), ty(Op), 1681 Op.getOperand(0)); 1682 } 1683 1684 SDValue 1685 HexagonTargetLowering::LowerHvxSelect(SDValue Op, SelectionDAG &DAG) const { 1686 MVT ResTy = ty(Op); 1687 if (ResTy.getVectorElementType() != MVT::i1) 1688 return Op; 1689 1690 const SDLoc &dl(Op); 1691 unsigned HwLen = Subtarget.getVectorLength(); 1692 unsigned VecLen = ResTy.getVectorNumElements(); 1693 assert(HwLen % VecLen == 0); 1694 unsigned ElemSize = HwLen / VecLen; 1695 1696 MVT VecTy = MVT::getVectorVT(MVT::getIntegerVT(ElemSize * 8), VecLen); 1697 SDValue S = 1698 DAG.getNode(ISD::SELECT, dl, VecTy, Op.getOperand(0), 1699 DAG.getNode(HexagonISD::Q2V, dl, VecTy, Op.getOperand(1)), 1700 DAG.getNode(HexagonISD::Q2V, dl, VecTy, Op.getOperand(2))); 1701 return DAG.getNode(HexagonISD::V2Q, dl, ResTy, S); 1702 } 1703 1704 SDValue 1705 HexagonTargetLowering::LowerHvxShift(SDValue Op, SelectionDAG &DAG) const { 1706 if (SDValue S = getVectorShiftByInt(Op, DAG)) 1707 return S; 1708 return Op; 1709 } 1710 1711 SDValue 1712 HexagonTargetLowering::LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const { 1713 const SDLoc &dl(Op); 1714 MVT ResTy = ty(Op); 1715 1716 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1717 bool Use64b = Subtarget.useHVX64BOps(); 1718 unsigned IntPredCast = Use64b ? Intrinsic::hexagon_V6_pred_typecast 1719 : Intrinsic::hexagon_V6_pred_typecast_128B; 1720 if (IntNo == IntPredCast) { 1721 SDValue Vs = Op.getOperand(1); 1722 MVT OpTy = ty(Vs); 1723 if (isHvxBoolTy(ResTy) && isHvxBoolTy(OpTy)) { 1724 if (ResTy == OpTy) 1725 return Vs; 1726 return DAG.getNode(HexagonISD::TYPECAST, dl, ResTy, Vs); 1727 } 1728 } 1729 1730 return Op; 1731 } 1732 1733 SDValue 1734 HexagonTargetLowering::LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const { 1735 const SDLoc &dl(Op); 1736 unsigned HwLen = Subtarget.getVectorLength(); 1737 MachineFunction &MF = DAG.getMachineFunction(); 1738 auto *MaskN = cast<MaskedLoadStoreSDNode>(Op.getNode()); 1739 SDValue Mask = MaskN->getMask(); 1740 SDValue Chain = MaskN->getChain(); 1741 SDValue Base = MaskN->getBasePtr(); 1742 auto *MemOp = MF.getMachineMemOperand(MaskN->getMemOperand(), 0, HwLen); 1743 1744 unsigned Opc = Op->getOpcode(); 1745 assert(Opc == ISD::MLOAD || Opc == ISD::MSTORE); 1746 1747 if (Opc == ISD::MLOAD) { 1748 MVT ValTy = ty(Op); 1749 SDValue Load = DAG.getLoad(ValTy, dl, Chain, Base, MemOp); 1750 SDValue Thru = cast<MaskedLoadSDNode>(MaskN)->getPassThru(); 1751 if (isUndef(Thru)) 1752 return Load; 1753 SDValue VSel = DAG.getNode(ISD::VSELECT, dl, ValTy, Mask, Load, Thru); 1754 return DAG.getMergeValues({VSel, Load.getValue(1)}, dl); 1755 } 1756 1757 // MSTORE 1758 // HVX only has aligned masked stores. 1759 1760 // TODO: Fold negations of the mask into the store. 1761 unsigned StoreOpc = Hexagon::V6_vS32b_qpred_ai; 1762 SDValue Value = cast<MaskedStoreSDNode>(MaskN)->getValue(); 1763 SDValue Offset0 = DAG.getTargetConstant(0, dl, ty(Base)); 1764 1765 if (MaskN->getAlign().value() % HwLen == 0) { 1766 SDValue Store = getInstr(StoreOpc, dl, MVT::Other, 1767 {Mask, Base, Offset0, Value, Chain}, DAG); 1768 DAG.setNodeMemRefs(cast<MachineSDNode>(Store.getNode()), {MemOp}); 1769 return Store; 1770 } 1771 1772 // Unaligned case. 1773 auto StoreAlign = [&](SDValue V, SDValue A) { 1774 SDValue Z = getZero(dl, ty(V), DAG); 1775 // TODO: use funnel shifts? 1776 // vlalign(Vu,Vv,Rt) rotates the pair Vu:Vv left by Rt and takes the 1777 // upper half. 1778 SDValue LoV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {V, Z, A}, DAG); 1779 SDValue HiV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {Z, V, A}, DAG); 1780 return std::make_pair(LoV, HiV); 1781 }; 1782 1783 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1784 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 1785 SDValue MaskV = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, Mask); 1786 VectorPair Tmp = StoreAlign(MaskV, Base); 1787 VectorPair MaskU = {DAG.getNode(HexagonISD::V2Q, dl, BoolTy, Tmp.first), 1788 DAG.getNode(HexagonISD::V2Q, dl, BoolTy, Tmp.second)}; 1789 VectorPair ValueU = StoreAlign(Value, Base); 1790 1791 SDValue Offset1 = DAG.getTargetConstant(HwLen, dl, MVT::i32); 1792 SDValue StoreLo = 1793 getInstr(StoreOpc, dl, MVT::Other, 1794 {MaskU.first, Base, Offset0, ValueU.first, Chain}, DAG); 1795 SDValue StoreHi = 1796 getInstr(StoreOpc, dl, MVT::Other, 1797 {MaskU.second, Base, Offset1, ValueU.second, Chain}, DAG); 1798 DAG.setNodeMemRefs(cast<MachineSDNode>(StoreLo.getNode()), {MemOp}); 1799 DAG.setNodeMemRefs(cast<MachineSDNode>(StoreHi.getNode()), {MemOp}); 1800 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, {StoreLo, StoreHi}); 1801 } 1802 1803 SDValue 1804 HexagonTargetLowering::SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const { 1805 assert(!Op.isMachineOpcode()); 1806 SmallVector<SDValue,2> OpsL, OpsH; 1807 const SDLoc &dl(Op); 1808 1809 auto SplitVTNode = [&DAG,this] (const VTSDNode *N) { 1810 MVT Ty = typeSplit(N->getVT().getSimpleVT()).first; 1811 SDValue TV = DAG.getValueType(Ty); 1812 return std::make_pair(TV, TV); 1813 }; 1814 1815 for (SDValue A : Op.getNode()->ops()) { 1816 VectorPair P = Subtarget.isHVXVectorType(ty(A), true) 1817 ? opSplit(A, dl, DAG) 1818 : std::make_pair(A, A); 1819 // Special case for type operand. 1820 if (Op.getOpcode() == ISD::SIGN_EXTEND_INREG) { 1821 if (const auto *N = dyn_cast<const VTSDNode>(A.getNode())) 1822 P = SplitVTNode(N); 1823 } 1824 OpsL.push_back(P.first); 1825 OpsH.push_back(P.second); 1826 } 1827 1828 MVT ResTy = ty(Op); 1829 MVT HalfTy = typeSplit(ResTy).first; 1830 SDValue L = DAG.getNode(Op.getOpcode(), dl, HalfTy, OpsL); 1831 SDValue H = DAG.getNode(Op.getOpcode(), dl, HalfTy, OpsH); 1832 SDValue S = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResTy, L, H); 1833 return S; 1834 } 1835 1836 SDValue 1837 HexagonTargetLowering::SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const { 1838 auto *MemN = cast<MemSDNode>(Op.getNode()); 1839 1840 MVT MemTy = MemN->getMemoryVT().getSimpleVT(); 1841 if (!isHvxPairTy(MemTy)) 1842 return Op; 1843 1844 const SDLoc &dl(Op); 1845 unsigned HwLen = Subtarget.getVectorLength(); 1846 MVT SingleTy = typeSplit(MemTy).first; 1847 SDValue Chain = MemN->getChain(); 1848 SDValue Base0 = MemN->getBasePtr(); 1849 SDValue Base1 = DAG.getMemBasePlusOffset(Base0, TypeSize::Fixed(HwLen), dl); 1850 unsigned MemOpc = MemN->getOpcode(); 1851 1852 MachineMemOperand *MOp0 = nullptr, *MOp1 = nullptr; 1853 if (MachineMemOperand *MMO = MemN->getMemOperand()) { 1854 MachineFunction &MF = DAG.getMachineFunction(); 1855 uint64_t MemSize = (MemOpc == ISD::MLOAD || MemOpc == ISD::MSTORE) 1856 ? (uint64_t)MemoryLocation::UnknownSize 1857 : HwLen; 1858 MOp0 = MF.getMachineMemOperand(MMO, 0, MemSize); 1859 MOp1 = MF.getMachineMemOperand(MMO, HwLen, MemSize); 1860 } 1861 1862 if (MemOpc == ISD::LOAD) { 1863 assert(cast<LoadSDNode>(Op)->isUnindexed()); 1864 SDValue Load0 = DAG.getLoad(SingleTy, dl, Chain, Base0, MOp0); 1865 SDValue Load1 = DAG.getLoad(SingleTy, dl, Chain, Base1, MOp1); 1866 return DAG.getMergeValues( 1867 { DAG.getNode(ISD::CONCAT_VECTORS, dl, MemTy, Load0, Load1), 1868 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1869 Load0.getValue(1), Load1.getValue(1)) }, dl); 1870 } 1871 if (MemOpc == ISD::STORE) { 1872 assert(cast<StoreSDNode>(Op)->isUnindexed()); 1873 VectorPair Vals = opSplit(cast<StoreSDNode>(Op)->getValue(), dl, DAG); 1874 SDValue Store0 = DAG.getStore(Chain, dl, Vals.first, Base0, MOp0); 1875 SDValue Store1 = DAG.getStore(Chain, dl, Vals.second, Base1, MOp1); 1876 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store0, Store1); 1877 } 1878 1879 assert(MemOpc == ISD::MLOAD || MemOpc == ISD::MSTORE); 1880 1881 auto MaskN = cast<MaskedLoadStoreSDNode>(Op); 1882 assert(MaskN->isUnindexed()); 1883 VectorPair Masks = opSplit(MaskN->getMask(), dl, DAG); 1884 SDValue Offset = DAG.getUNDEF(MVT::i32); 1885 1886 if (MemOpc == ISD::MLOAD) { 1887 VectorPair Thru = 1888 opSplit(cast<MaskedLoadSDNode>(Op)->getPassThru(), dl, DAG); 1889 SDValue MLoad0 = 1890 DAG.getMaskedLoad(SingleTy, dl, Chain, Base0, Offset, Masks.first, 1891 Thru.first, SingleTy, MOp0, ISD::UNINDEXED, 1892 ISD::NON_EXTLOAD, false); 1893 SDValue MLoad1 = 1894 DAG.getMaskedLoad(SingleTy, dl, Chain, Base1, Offset, Masks.second, 1895 Thru.second, SingleTy, MOp1, ISD::UNINDEXED, 1896 ISD::NON_EXTLOAD, false); 1897 return DAG.getMergeValues( 1898 { DAG.getNode(ISD::CONCAT_VECTORS, dl, MemTy, MLoad0, MLoad1), 1899 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1900 MLoad0.getValue(1), MLoad1.getValue(1)) }, dl); 1901 } 1902 if (MemOpc == ISD::MSTORE) { 1903 VectorPair Vals = opSplit(cast<MaskedStoreSDNode>(Op)->getValue(), dl, DAG); 1904 SDValue MStore0 = DAG.getMaskedStore(Chain, dl, Vals.first, Base0, Offset, 1905 Masks.first, SingleTy, MOp0, 1906 ISD::UNINDEXED, false, false); 1907 SDValue MStore1 = DAG.getMaskedStore(Chain, dl, Vals.second, Base1, Offset, 1908 Masks.second, SingleTy, MOp1, 1909 ISD::UNINDEXED, false, false); 1910 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MStore0, MStore1); 1911 } 1912 1913 std::string Name = "Unexpected operation: " + Op->getOperationName(&DAG); 1914 llvm_unreachable(Name.c_str()); 1915 } 1916 1917 SDValue 1918 HexagonTargetLowering::WidenHvxLoad(SDValue Op, SelectionDAG &DAG) const { 1919 const SDLoc &dl(Op); 1920 auto *LoadN = cast<LoadSDNode>(Op.getNode()); 1921 assert(LoadN->isUnindexed() && "Not widening indexed loads yet"); 1922 assert(LoadN->getMemoryVT().getVectorElementType() != MVT::i1 && 1923 "Not widening loads of i1 yet"); 1924 1925 SDValue Chain = LoadN->getChain(); 1926 SDValue Base = LoadN->getBasePtr(); 1927 SDValue Offset = DAG.getUNDEF(MVT::i32); 1928 1929 MVT ResTy = ty(Op); 1930 unsigned HwLen = Subtarget.getVectorLength(); 1931 unsigned ResLen = ResTy.getStoreSize(); 1932 assert(ResLen < HwLen && "vsetq(v1) prerequisite"); 1933 1934 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 1935 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 1936 {DAG.getConstant(ResLen, dl, MVT::i32)}, DAG); 1937 1938 MVT LoadTy = MVT::getVectorVT(MVT::i8, HwLen); 1939 MachineFunction &MF = DAG.getMachineFunction(); 1940 auto *MemOp = MF.getMachineMemOperand(LoadN->getMemOperand(), 0, HwLen); 1941 1942 SDValue Load = DAG.getMaskedLoad(LoadTy, dl, Chain, Base, Offset, Mask, 1943 DAG.getUNDEF(LoadTy), LoadTy, MemOp, 1944 ISD::UNINDEXED, ISD::NON_EXTLOAD, false); 1945 SDValue Value = opCastElem(Load, ResTy.getVectorElementType(), DAG); 1946 return DAG.getMergeValues({Value, Chain}, dl); 1947 } 1948 1949 SDValue 1950 HexagonTargetLowering::WidenHvxStore(SDValue Op, SelectionDAG &DAG) const { 1951 const SDLoc &dl(Op); 1952 auto *StoreN = cast<StoreSDNode>(Op.getNode()); 1953 assert(StoreN->isUnindexed() && "Not widening indexed stores yet"); 1954 assert(StoreN->getMemoryVT().getVectorElementType() != MVT::i1 && 1955 "Not widening stores of i1 yet"); 1956 1957 SDValue Chain = StoreN->getChain(); 1958 SDValue Base = StoreN->getBasePtr(); 1959 SDValue Offset = DAG.getUNDEF(MVT::i32); 1960 1961 SDValue Value = opCastElem(StoreN->getValue(), MVT::i8, DAG); 1962 MVT ValueTy = ty(Value); 1963 unsigned ValueLen = ValueTy.getVectorNumElements(); 1964 unsigned HwLen = Subtarget.getVectorLength(); 1965 assert(isPowerOf2_32(ValueLen)); 1966 1967 for (unsigned Len = ValueLen; Len < HwLen; ) { 1968 Value = opJoin({DAG.getUNDEF(ty(Value)), Value}, dl, DAG); 1969 Len = ty(Value).getVectorNumElements(); // This is Len *= 2 1970 } 1971 assert(ty(Value).getVectorNumElements() == HwLen); // Paranoia 1972 1973 assert(ValueLen < HwLen && "vsetq(v1) prerequisite"); 1974 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 1975 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 1976 {DAG.getConstant(ValueLen, dl, MVT::i32)}, DAG); 1977 MachineFunction &MF = DAG.getMachineFunction(); 1978 auto *MemOp = MF.getMachineMemOperand(StoreN->getMemOperand(), 0, HwLen); 1979 return DAG.getMaskedStore(Chain, dl, Value, Base, Offset, Mask, ty(Value), 1980 MemOp, ISD::UNINDEXED, false, false); 1981 } 1982 1983 SDValue 1984 HexagonTargetLowering::WidenHvxSetCC(SDValue Op, SelectionDAG &DAG) const { 1985 const SDLoc &dl(Op); 1986 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 1987 MVT ElemTy = ty(Op0).getVectorElementType(); 1988 unsigned HwLen = Subtarget.getVectorLength(); 1989 1990 unsigned WideOpLen = (8 * HwLen) / ElemTy.getSizeInBits(); 1991 assert(WideOpLen * ElemTy.getSizeInBits() == 8 * HwLen); 1992 MVT WideOpTy = MVT::getVectorVT(ElemTy, WideOpLen); 1993 if (!Subtarget.isHVXVectorType(WideOpTy, true)) 1994 return SDValue(); 1995 1996 SDValue WideOp0 = appendUndef(Op0, WideOpTy, DAG); 1997 SDValue WideOp1 = appendUndef(Op1, WideOpTy, DAG); 1998 EVT ResTy = 1999 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), WideOpTy); 2000 SDValue SetCC = DAG.getNode(ISD::SETCC, dl, ResTy, 2001 {WideOp0, WideOp1, Op.getOperand(2)}); 2002 2003 EVT RetTy = getTypeToTransformTo(*DAG.getContext(), ty(Op)); 2004 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RetTy, 2005 {SetCC, getZero(dl, MVT::i32, DAG)}); 2006 } 2007 2008 SDValue 2009 HexagonTargetLowering::WidenHvxExtend(SDValue Op, SelectionDAG &DAG) const { 2010 const SDLoc &dl(Op); 2011 unsigned HwWidth = 8*Subtarget.getVectorLength(); 2012 2013 SDValue Op0 = Op.getOperand(0); 2014 MVT ResTy = ty(Op); 2015 MVT OpTy = ty(Op0); 2016 if (!Subtarget.isHVXElementType(OpTy) || !Subtarget.isHVXElementType(ResTy)) 2017 return SDValue(); 2018 2019 // .-res, op-> ScalarVec Illegal HVX 2020 // Scalar ok - - 2021 // Illegal widen(insert) widen - 2022 // HVX - widen ok 2023 2024 auto getFactor = [HwWidth](MVT Ty) { 2025 unsigned Width = Ty.getSizeInBits(); 2026 return HwWidth > Width ? HwWidth / Width : 1; 2027 }; 2028 2029 auto getWideTy = [getFactor](MVT Ty) { 2030 unsigned WideLen = Ty.getVectorNumElements() * getFactor(Ty); 2031 return MVT::getVectorVT(Ty.getVectorElementType(), WideLen); 2032 }; 2033 2034 unsigned Opcode = Op.getOpcode() == ISD::SIGN_EXTEND ? HexagonISD::VUNPACK 2035 : HexagonISD::VUNPACKU; 2036 SDValue WideOp = appendUndef(Op0, getWideTy(OpTy), DAG); 2037 SDValue WideRes = DAG.getNode(Opcode, dl, getWideTy(ResTy), WideOp); 2038 return WideRes; 2039 } 2040 2041 SDValue 2042 HexagonTargetLowering::WidenHvxTruncate(SDValue Op, SelectionDAG &DAG) const { 2043 const SDLoc &dl(Op); 2044 unsigned HwWidth = 8*Subtarget.getVectorLength(); 2045 2046 SDValue Op0 = Op.getOperand(0); 2047 MVT ResTy = ty(Op); 2048 MVT OpTy = ty(Op0); 2049 if (!Subtarget.isHVXElementType(OpTy) || !Subtarget.isHVXElementType(ResTy)) 2050 return SDValue(); 2051 2052 // .-res, op-> ScalarVec Illegal HVX 2053 // Scalar ok extract(widen) - 2054 // Illegal - widen widen 2055 // HVX - - ok 2056 2057 auto getFactor = [HwWidth](MVT Ty) { 2058 unsigned Width = Ty.getSizeInBits(); 2059 assert(HwWidth % Width == 0); 2060 return HwWidth / Width; 2061 }; 2062 2063 auto getWideTy = [getFactor](MVT Ty) { 2064 unsigned WideLen = Ty.getVectorNumElements() * getFactor(Ty); 2065 return MVT::getVectorVT(Ty.getVectorElementType(), WideLen); 2066 }; 2067 2068 if (Subtarget.isHVXVectorType(OpTy)) 2069 return DAG.getNode(HexagonISD::VPACKL, dl, getWideTy(ResTy), Op0); 2070 2071 assert(!isTypeLegal(OpTy) && "HVX-widening a truncate of scalar?"); 2072 2073 SDValue WideOp = appendUndef(Op0, getWideTy(OpTy), DAG); 2074 SDValue WideRes = DAG.getNode(HexagonISD::VPACKL, dl, getWideTy(ResTy), 2075 WideOp); 2076 // If the original result wasn't legal and was supposed to be widened, 2077 // we're done. 2078 if (shouldWidenToHvx(ResTy, DAG)) 2079 return WideRes; 2080 2081 // The original result type wasn't meant to be widened to HVX, so 2082 // leave it as it is. Standard legalization should be able to deal 2083 // with it (since now it's a result of a target-idendependent ISD 2084 // node). 2085 assert(ResTy.isVector()); 2086 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResTy, 2087 {WideRes, getZero(dl, MVT::i32, DAG)}); 2088 } 2089 2090 SDValue 2091 HexagonTargetLowering::LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const { 2092 unsigned Opc = Op.getOpcode(); 2093 bool IsPairOp = isHvxPairTy(ty(Op)) || 2094 llvm::any_of(Op.getNode()->ops(), [this] (SDValue V) { 2095 return isHvxPairTy(ty(V)); 2096 }); 2097 2098 if (IsPairOp) { 2099 switch (Opc) { 2100 default: 2101 break; 2102 case ISD::LOAD: 2103 case ISD::STORE: 2104 case ISD::MLOAD: 2105 case ISD::MSTORE: 2106 return SplitHvxMemOp(Op, DAG); 2107 case ISD::CTPOP: 2108 case ISD::CTLZ: 2109 case ISD::CTTZ: 2110 case ISD::MUL: 2111 case ISD::MULHS: 2112 case ISD::MULHU: 2113 case ISD::AND: 2114 case ISD::OR: 2115 case ISD::XOR: 2116 case ISD::SRA: 2117 case ISD::SHL: 2118 case ISD::SRL: 2119 case ISD::SMIN: 2120 case ISD::SMAX: 2121 case ISD::UMIN: 2122 case ISD::UMAX: 2123 case ISD::SETCC: 2124 case ISD::VSELECT: 2125 case ISD::SIGN_EXTEND: 2126 case ISD::ZERO_EXTEND: 2127 case ISD::SIGN_EXTEND_INREG: 2128 case ISD::SPLAT_VECTOR: 2129 return SplitHvxPairOp(Op, DAG); 2130 } 2131 } 2132 2133 switch (Opc) { 2134 default: 2135 break; 2136 case ISD::BUILD_VECTOR: return LowerHvxBuildVector(Op, DAG); 2137 case ISD::CONCAT_VECTORS: return LowerHvxConcatVectors(Op, DAG); 2138 case ISD::INSERT_SUBVECTOR: return LowerHvxInsertSubvector(Op, DAG); 2139 case ISD::INSERT_VECTOR_ELT: return LowerHvxInsertElement(Op, DAG); 2140 case ISD::EXTRACT_SUBVECTOR: return LowerHvxExtractSubvector(Op, DAG); 2141 case ISD::EXTRACT_VECTOR_ELT: return LowerHvxExtractElement(Op, DAG); 2142 case ISD::BITCAST: return LowerHvxBitcast(Op, DAG); 2143 case ISD::ANY_EXTEND: return LowerHvxAnyExt(Op, DAG); 2144 case ISD::SIGN_EXTEND: return LowerHvxSignExt(Op, DAG); 2145 case ISD::ZERO_EXTEND: return LowerHvxZeroExt(Op, DAG); 2146 case ISD::CTTZ: return LowerHvxCttz(Op, DAG); 2147 case ISD::SELECT: return LowerHvxSelect(Op, DAG); 2148 case ISD::SRA: 2149 case ISD::SHL: 2150 case ISD::SRL: return LowerHvxShift(Op, DAG); 2151 case ISD::MULHS: 2152 case ISD::MULHU: return LowerHvxMulh(Op, DAG); 2153 case ISD::ANY_EXTEND_VECTOR_INREG: return LowerHvxExtend(Op, DAG); 2154 case ISD::SETCC: 2155 case ISD::INTRINSIC_VOID: return Op; 2156 case ISD::INTRINSIC_WO_CHAIN: return LowerHvxIntrinsic(Op, DAG); 2157 case ISD::MLOAD: 2158 case ISD::MSTORE: return LowerHvxMaskedOp(Op, DAG); 2159 // Unaligned loads will be handled by the default lowering. 2160 case ISD::LOAD: return SDValue(); 2161 } 2162 #ifndef NDEBUG 2163 Op.dumpr(&DAG); 2164 #endif 2165 llvm_unreachable("Unhandled HVX operation"); 2166 } 2167 2168 void 2169 HexagonTargetLowering::LowerHvxOperationWrapper(SDNode *N, 2170 SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 2171 unsigned Opc = N->getOpcode(); 2172 SDValue Op(N, 0); 2173 2174 switch (Opc) { 2175 case ISD::ANY_EXTEND: 2176 case ISD::SIGN_EXTEND: 2177 case ISD::ZERO_EXTEND: 2178 if (shouldWidenToHvx(ty(Op.getOperand(0)), DAG)) { 2179 if (SDValue T = WidenHvxExtend(Op, DAG)) 2180 Results.push_back(T); 2181 } 2182 break; 2183 case ISD::SETCC: 2184 if (shouldWidenToHvx(ty(Op.getOperand(0)), DAG)) { 2185 if (SDValue T = WidenHvxSetCC(Op, DAG)) 2186 Results.push_back(T); 2187 } 2188 break; 2189 case ISD::TRUNCATE: 2190 if (shouldWidenToHvx(ty(Op.getOperand(0)), DAG)) { 2191 if (SDValue T = WidenHvxTruncate(Op, DAG)) 2192 Results.push_back(T); 2193 } 2194 break; 2195 case ISD::STORE: { 2196 if (shouldWidenToHvx(ty(cast<StoreSDNode>(N)->getValue()), DAG)) { 2197 SDValue Store = WidenHvxStore(Op, DAG); 2198 Results.push_back(Store); 2199 } 2200 break; 2201 } 2202 case ISD::MLOAD: 2203 if (isHvxPairTy(ty(Op))) { 2204 SDValue S = SplitHvxMemOp(Op, DAG); 2205 assert(S->getOpcode() == ISD::MERGE_VALUES); 2206 Results.push_back(S.getOperand(0)); 2207 Results.push_back(S.getOperand(1)); 2208 } 2209 break; 2210 case ISD::MSTORE: 2211 if (isHvxPairTy(ty(Op->getOperand(1)))) { // Stored value 2212 SDValue S = SplitHvxMemOp(Op, DAG); 2213 Results.push_back(S); 2214 } 2215 break; 2216 default: 2217 break; 2218 } 2219 } 2220 2221 void 2222 HexagonTargetLowering::ReplaceHvxNodeResults(SDNode *N, 2223 SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 2224 unsigned Opc = N->getOpcode(); 2225 SDValue Op(N, 0); 2226 switch (Opc) { 2227 case ISD::ANY_EXTEND: 2228 case ISD::SIGN_EXTEND: 2229 case ISD::ZERO_EXTEND: 2230 if (shouldWidenToHvx(ty(Op), DAG)) { 2231 if (SDValue T = WidenHvxExtend(Op, DAG)) 2232 Results.push_back(T); 2233 } 2234 break; 2235 case ISD::SETCC: 2236 if (shouldWidenToHvx(ty(Op), DAG)) { 2237 if (SDValue T = WidenHvxSetCC(Op, DAG)) 2238 Results.push_back(T); 2239 } 2240 break; 2241 case ISD::TRUNCATE: 2242 if (shouldWidenToHvx(ty(Op), DAG)) { 2243 if (SDValue T = WidenHvxTruncate(Op, DAG)) 2244 Results.push_back(T); 2245 } 2246 break; 2247 case ISD::LOAD: { 2248 if (shouldWidenToHvx(ty(Op), DAG)) { 2249 SDValue Load = WidenHvxLoad(Op, DAG); 2250 assert(Load->getOpcode() == ISD::MERGE_VALUES); 2251 Results.push_back(Load.getOperand(0)); 2252 Results.push_back(Load.getOperand(1)); 2253 } 2254 break; 2255 } 2256 case ISD::BITCAST: 2257 if (isHvxBoolTy(ty(N->getOperand(0)))) { 2258 SDValue Op(N, 0); 2259 SDValue C = LowerHvxBitcast(Op, DAG); 2260 Results.push_back(C); 2261 } 2262 break; 2263 default: 2264 break; 2265 } 2266 } 2267 2268 SDValue 2269 HexagonTargetLowering::PerformHvxDAGCombine(SDNode *N, DAGCombinerInfo &DCI) 2270 const { 2271 const SDLoc &dl(N); 2272 SelectionDAG &DAG = DCI.DAG; 2273 SDValue Op(N, 0); 2274 unsigned Opc = Op.getOpcode(); 2275 if (DCI.isBeforeLegalizeOps()) 2276 return SDValue(); 2277 2278 SmallVector<SDValue, 4> Ops(N->ops().begin(), N->ops().end()); 2279 2280 switch (Opc) { 2281 case ISD::VSELECT: { 2282 // (vselect (xor x, qtrue), v0, v1) -> (vselect x, v1, v0) 2283 SDValue Cond = Ops[0]; 2284 if (Cond->getOpcode() == ISD::XOR) { 2285 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1); 2286 if (C1->getOpcode() == HexagonISD::QTRUE) 2287 return DAG.getNode(ISD::VSELECT, dl, ty(Op), C0, Ops[2], Ops[1]); 2288 } 2289 break; 2290 } 2291 case HexagonISD::V2Q: 2292 if (Ops[0].getOpcode() == ISD::SPLAT_VECTOR) { 2293 if (const auto *C = dyn_cast<ConstantSDNode>(Ops[0].getOperand(0))) 2294 return C->isZero() ? DAG.getNode(HexagonISD::QFALSE, dl, ty(Op)) 2295 : DAG.getNode(HexagonISD::QTRUE, dl, ty(Op)); 2296 } 2297 break; 2298 case HexagonISD::Q2V: 2299 if (Ops[0].getOpcode() == HexagonISD::QTRUE) 2300 return DAG.getNode(ISD::SPLAT_VECTOR, dl, ty(Op), 2301 DAG.getConstant(-1, dl, MVT::i32)); 2302 if (Ops[0].getOpcode() == HexagonISD::QFALSE) 2303 return getZero(dl, ty(Op), DAG); 2304 break; 2305 case HexagonISD::VINSERTW0: 2306 if (isUndef(Ops[1])) 2307 return Ops[0];; 2308 break; 2309 case HexagonISD::VROR: { 2310 if (Ops[0].getOpcode() == HexagonISD::VROR) { 2311 SDValue Vec = Ops[0].getOperand(0); 2312 SDValue Rot0 = Ops[1], Rot1 = Ops[0].getOperand(1); 2313 SDValue Rot = DAG.getNode(ISD::ADD, dl, ty(Rot0), {Rot0, Rot1}); 2314 return DAG.getNode(HexagonISD::VROR, dl, ty(Op), {Vec, Rot}); 2315 } 2316 break; 2317 } 2318 } 2319 2320 return SDValue(); 2321 } 2322 2323 bool 2324 HexagonTargetLowering::shouldWidenToHvx(MVT Ty, SelectionDAG &DAG) const { 2325 auto Action = getPreferredHvxVectorAction(Ty); 2326 if (Action == TargetLoweringBase::TypeWidenVector) { 2327 EVT WideTy = getTypeToTransformTo(*DAG.getContext(), Ty); 2328 assert(WideTy.isSimple()); 2329 return Subtarget.isHVXVectorType(WideTy.getSimpleVT(), true); 2330 } 2331 return false; 2332 } 2333 2334 bool 2335 HexagonTargetLowering::isHvxOperation(SDNode *N, SelectionDAG &DAG) const { 2336 if (!Subtarget.useHVXOps()) 2337 return false; 2338 // If the type of any result, or any operand type are HVX vector types, 2339 // this is an HVX operation. 2340 auto IsHvxTy = [this](EVT Ty) { 2341 return Ty.isSimple() && Subtarget.isHVXVectorType(Ty.getSimpleVT(), true); 2342 }; 2343 auto IsHvxOp = [this](SDValue Op) { 2344 return Op.getValueType().isSimple() && 2345 Subtarget.isHVXVectorType(ty(Op), true); 2346 }; 2347 if (llvm::any_of(N->values(), IsHvxTy) || llvm::any_of(N->ops(), IsHvxOp)) 2348 return true; 2349 2350 // Check if this could be an HVX operation after type widening. 2351 auto IsWidenedToHvx = [this, &DAG](SDValue Op) { 2352 if (!Op.getValueType().isSimple()) 2353 return false; 2354 MVT ValTy = ty(Op); 2355 return ValTy.isVector() && shouldWidenToHvx(ValTy, DAG); 2356 }; 2357 2358 for (int i = 0, e = N->getNumValues(); i != e; ++i) { 2359 if (IsWidenedToHvx(SDValue(N, i))) 2360 return true; 2361 } 2362 return llvm::any_of(N->ops(), IsWidenedToHvx); 2363 } 2364