1 //===-- HexagonISelLoweringHVX.cpp --- Lowering HVX operations ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "HexagonISelLowering.h" 10 #include "HexagonRegisterInfo.h" 11 #include "HexagonSubtarget.h" 12 #include "llvm/ADT/SetVector.h" 13 #include "llvm/ADT/SmallVector.h" 14 #include "llvm/Analysis/MemoryLocation.h" 15 #include "llvm/CodeGen/MachineBasicBlock.h" 16 #include "llvm/CodeGen/MachineFunction.h" 17 #include "llvm/CodeGen/MachineInstr.h" 18 #include "llvm/CodeGen/MachineOperand.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/TargetInstrInfo.h" 21 #include "llvm/IR/IntrinsicsHexagon.h" 22 #include "llvm/Support/CommandLine.h" 23 24 #include <algorithm> 25 #include <string> 26 #include <utility> 27 28 using namespace llvm; 29 30 static cl::opt<unsigned> HvxWidenThreshold("hexagon-hvx-widen", 31 cl::Hidden, cl::init(16), 32 cl::desc("Lower threshold (in bytes) for widening to HVX vectors")); 33 34 static const MVT LegalV64[] = { MVT::v64i8, MVT::v32i16, MVT::v16i32 }; 35 static const MVT LegalW64[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 }; 36 static const MVT LegalV128[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 }; 37 static const MVT LegalW128[] = { MVT::v256i8, MVT::v128i16, MVT::v64i32 }; 38 39 static std::tuple<unsigned, unsigned, unsigned> getIEEEProperties(MVT Ty) { 40 // For a float scalar type, return (exp-bits, exp-bias, fraction-bits) 41 MVT ElemTy = Ty.getScalarType(); 42 switch (ElemTy.SimpleTy) { 43 case MVT::f16: 44 return std::make_tuple(5, 15, 10); 45 case MVT::f32: 46 return std::make_tuple(8, 127, 23); 47 case MVT::f64: 48 return std::make_tuple(11, 1023, 52); 49 default: 50 break; 51 } 52 llvm_unreachable(("Unexpected type: " + EVT(ElemTy).getEVTString()).c_str()); 53 } 54 55 void 56 HexagonTargetLowering::initializeHVXLowering() { 57 if (Subtarget.useHVX64BOps()) { 58 addRegisterClass(MVT::v64i8, &Hexagon::HvxVRRegClass); 59 addRegisterClass(MVT::v32i16, &Hexagon::HvxVRRegClass); 60 addRegisterClass(MVT::v16i32, &Hexagon::HvxVRRegClass); 61 addRegisterClass(MVT::v128i8, &Hexagon::HvxWRRegClass); 62 addRegisterClass(MVT::v64i16, &Hexagon::HvxWRRegClass); 63 addRegisterClass(MVT::v32i32, &Hexagon::HvxWRRegClass); 64 // These "short" boolean vector types should be legal because 65 // they will appear as results of vector compares. If they were 66 // not legal, type legalization would try to make them legal 67 // and that would require using operations that do not use or 68 // produce such types. That, in turn, would imply using custom 69 // nodes, which would be unoptimizable by the DAG combiner. 70 // The idea is to rely on target-independent operations as much 71 // as possible. 72 addRegisterClass(MVT::v16i1, &Hexagon::HvxQRRegClass); 73 addRegisterClass(MVT::v32i1, &Hexagon::HvxQRRegClass); 74 addRegisterClass(MVT::v64i1, &Hexagon::HvxQRRegClass); 75 } else if (Subtarget.useHVX128BOps()) { 76 addRegisterClass(MVT::v128i8, &Hexagon::HvxVRRegClass); 77 addRegisterClass(MVT::v64i16, &Hexagon::HvxVRRegClass); 78 addRegisterClass(MVT::v32i32, &Hexagon::HvxVRRegClass); 79 addRegisterClass(MVT::v256i8, &Hexagon::HvxWRRegClass); 80 addRegisterClass(MVT::v128i16, &Hexagon::HvxWRRegClass); 81 addRegisterClass(MVT::v64i32, &Hexagon::HvxWRRegClass); 82 addRegisterClass(MVT::v32i1, &Hexagon::HvxQRRegClass); 83 addRegisterClass(MVT::v64i1, &Hexagon::HvxQRRegClass); 84 addRegisterClass(MVT::v128i1, &Hexagon::HvxQRRegClass); 85 if (Subtarget.useHVXV68Ops() && Subtarget.useHVXFloatingPoint()) { 86 addRegisterClass(MVT::v32f32, &Hexagon::HvxVRRegClass); 87 addRegisterClass(MVT::v64f16, &Hexagon::HvxVRRegClass); 88 addRegisterClass(MVT::v64f32, &Hexagon::HvxWRRegClass); 89 addRegisterClass(MVT::v128f16, &Hexagon::HvxWRRegClass); 90 } 91 } 92 93 // Set up operation actions. 94 95 bool Use64b = Subtarget.useHVX64BOps(); 96 ArrayRef<MVT> LegalV = Use64b ? LegalV64 : LegalV128; 97 ArrayRef<MVT> LegalW = Use64b ? LegalW64 : LegalW128; 98 MVT ByteV = Use64b ? MVT::v64i8 : MVT::v128i8; 99 MVT WordV = Use64b ? MVT::v16i32 : MVT::v32i32; 100 MVT ByteW = Use64b ? MVT::v128i8 : MVT::v256i8; 101 102 auto setPromoteTo = [this] (unsigned Opc, MVT FromTy, MVT ToTy) { 103 setOperationAction(Opc, FromTy, Promote); 104 AddPromotedToType(Opc, FromTy, ToTy); 105 }; 106 107 // Handle bitcasts of vector predicates to scalars (e.g. v32i1 to i32). 108 // Note: v16i1 -> i16 is handled in type legalization instead of op 109 // legalization. 110 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 111 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 112 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 113 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); 114 setOperationAction(ISD::BITCAST, MVT::v128i1, Custom); 115 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 116 setOperationAction(ISD::VECTOR_SHUFFLE, ByteV, Legal); 117 setOperationAction(ISD::VECTOR_SHUFFLE, ByteW, Legal); 118 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 119 120 if (Subtarget.useHVX128BOps() && Subtarget.useHVXV68Ops() && 121 Subtarget.useHVXFloatingPoint()) { 122 123 static const MVT FloatV[] = { MVT::v64f16, MVT::v32f32 }; 124 static const MVT FloatW[] = { MVT::v128f16, MVT::v64f32 }; 125 126 for (MVT T : FloatV) { 127 setOperationAction(ISD::FADD, T, Legal); 128 setOperationAction(ISD::FSUB, T, Legal); 129 setOperationAction(ISD::FMUL, T, Legal); 130 setOperationAction(ISD::FMINNUM, T, Legal); 131 setOperationAction(ISD::FMAXNUM, T, Legal); 132 133 setOperationAction(ISD::INSERT_SUBVECTOR, T, Custom); 134 setOperationAction(ISD::EXTRACT_SUBVECTOR, T, Custom); 135 136 setOperationAction(ISD::SPLAT_VECTOR, T, Legal); 137 setOperationAction(ISD::SPLAT_VECTOR, T, Legal); 138 139 setOperationAction(ISD::MLOAD, T, Custom); 140 setOperationAction(ISD::MSTORE, T, Custom); 141 // Custom-lower BUILD_VECTOR. The standard (target-independent) 142 // handling of it would convert it to a load, which is not always 143 // the optimal choice. 144 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 145 } 146 147 148 // BUILD_VECTOR with f16 operands cannot be promoted without 149 // promoting the result, so lower the node to vsplat or constant pool 150 setOperationAction(ISD::BUILD_VECTOR, MVT::f16, Custom); 151 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::f16, Custom); 152 setOperationAction(ISD::SPLAT_VECTOR, MVT::f16, Custom); 153 154 // Vector shuffle is always promoted to ByteV and a bitcast to f16 is 155 // generated. 156 setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v128f16, ByteW); 157 setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v64f16, ByteV); 158 setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v64f32, ByteW); 159 setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v32f32, ByteV); 160 161 for (MVT P : FloatW) { 162 setOperationAction(ISD::LOAD, P, Custom); 163 setOperationAction(ISD::STORE, P, Custom); 164 setOperationAction(ISD::FADD, P, Custom); 165 setOperationAction(ISD::FSUB, P, Custom); 166 setOperationAction(ISD::FMUL, P, Custom); 167 setOperationAction(ISD::FMINNUM, P, Custom); 168 setOperationAction(ISD::FMAXNUM, P, Custom); 169 setOperationAction(ISD::SETCC, P, Custom); 170 setOperationAction(ISD::VSELECT, P, Custom); 171 172 // Custom-lower BUILD_VECTOR. The standard (target-independent) 173 // handling of it would convert it to a load, which is not always 174 // the optimal choice. 175 setOperationAction(ISD::BUILD_VECTOR, P, Custom); 176 // Make concat-vectors custom to handle concats of more than 2 vectors. 177 setOperationAction(ISD::CONCAT_VECTORS, P, Custom); 178 179 setOperationAction(ISD::MLOAD, P, Custom); 180 setOperationAction(ISD::MSTORE, P, Custom); 181 } 182 183 if (Subtarget.useHVXQFloatOps()) { 184 setOperationAction(ISD::FP_EXTEND, MVT::v64f32, Custom); 185 setOperationAction(ISD::FP_ROUND, MVT::v64f16, Legal); 186 } else if (Subtarget.useHVXIEEEFPOps()) { 187 setOperationAction(ISD::FP_EXTEND, MVT::v64f32, Legal); 188 setOperationAction(ISD::FP_ROUND, MVT::v64f16, Legal); 189 } 190 } 191 192 for (MVT T : LegalV) { 193 setIndexedLoadAction(ISD::POST_INC, T, Legal); 194 setIndexedStoreAction(ISD::POST_INC, T, Legal); 195 196 setOperationAction(ISD::ABS, T, Legal); 197 setOperationAction(ISD::AND, T, Legal); 198 setOperationAction(ISD::OR, T, Legal); 199 setOperationAction(ISD::XOR, T, Legal); 200 setOperationAction(ISD::ADD, T, Legal); 201 setOperationAction(ISD::SUB, T, Legal); 202 setOperationAction(ISD::MUL, T, Legal); 203 setOperationAction(ISD::CTPOP, T, Legal); 204 setOperationAction(ISD::CTLZ, T, Legal); 205 setOperationAction(ISD::SELECT, T, Legal); 206 setOperationAction(ISD::SPLAT_VECTOR, T, Legal); 207 if (T != ByteV) { 208 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Legal); 209 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Legal); 210 setOperationAction(ISD::BSWAP, T, Legal); 211 } 212 213 setOperationAction(ISD::SMIN, T, Legal); 214 setOperationAction(ISD::SMAX, T, Legal); 215 if (T.getScalarType() != MVT::i32) { 216 setOperationAction(ISD::UMIN, T, Legal); 217 setOperationAction(ISD::UMAX, T, Legal); 218 } 219 220 setOperationAction(ISD::CTTZ, T, Custom); 221 setOperationAction(ISD::LOAD, T, Custom); 222 setOperationAction(ISD::MLOAD, T, Custom); 223 setOperationAction(ISD::MSTORE, T, Custom); 224 if (T.getScalarType() != MVT::i32) { 225 setOperationAction(ISD::MULHS, T, Legal); 226 setOperationAction(ISD::MULHU, T, Legal); 227 } 228 229 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 230 // Make concat-vectors custom to handle concats of more than 2 vectors. 231 setOperationAction(ISD::CONCAT_VECTORS, T, Custom); 232 setOperationAction(ISD::INSERT_SUBVECTOR, T, Custom); 233 setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom); 234 setOperationAction(ISD::EXTRACT_SUBVECTOR, T, Custom); 235 setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom); 236 setOperationAction(ISD::ANY_EXTEND, T, Custom); 237 setOperationAction(ISD::SIGN_EXTEND, T, Custom); 238 setOperationAction(ISD::ZERO_EXTEND, T, Custom); 239 setOperationAction(ISD::FSHL, T, Custom); 240 setOperationAction(ISD::FSHR, T, Custom); 241 if (T != ByteV) { 242 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, T, Custom); 243 // HVX only has shifts of words and halfwords. 244 setOperationAction(ISD::SRA, T, Custom); 245 setOperationAction(ISD::SHL, T, Custom); 246 setOperationAction(ISD::SRL, T, Custom); 247 248 // Promote all shuffles to operate on vectors of bytes. 249 setPromoteTo(ISD::VECTOR_SHUFFLE, T, ByteV); 250 } 251 252 if (Subtarget.useHVXFloatingPoint()) { 253 // Same action for both QFloat and IEEE. 254 setOperationAction(ISD::SINT_TO_FP, T, Custom); 255 setOperationAction(ISD::UINT_TO_FP, T, Custom); 256 setOperationAction(ISD::FP_TO_SINT, T, Custom); 257 setOperationAction(ISD::FP_TO_UINT, T, Custom); 258 } 259 260 setCondCodeAction(ISD::SETNE, T, Expand); 261 setCondCodeAction(ISD::SETLE, T, Expand); 262 setCondCodeAction(ISD::SETGE, T, Expand); 263 setCondCodeAction(ISD::SETLT, T, Expand); 264 setCondCodeAction(ISD::SETULE, T, Expand); 265 setCondCodeAction(ISD::SETUGE, T, Expand); 266 setCondCodeAction(ISD::SETULT, T, Expand); 267 } 268 269 for (MVT T : LegalW) { 270 // Custom-lower BUILD_VECTOR for vector pairs. The standard (target- 271 // independent) handling of it would convert it to a load, which is 272 // not always the optimal choice. 273 setOperationAction(ISD::BUILD_VECTOR, T, Custom); 274 // Make concat-vectors custom to handle concats of more than 2 vectors. 275 setOperationAction(ISD::CONCAT_VECTORS, T, Custom); 276 277 // Custom-lower these operations for pairs. Expand them into a concat 278 // of the corresponding operations on individual vectors. 279 setOperationAction(ISD::ANY_EXTEND, T, Custom); 280 setOperationAction(ISD::SIGN_EXTEND, T, Custom); 281 setOperationAction(ISD::ZERO_EXTEND, T, Custom); 282 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Custom); 283 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, T, Custom); 284 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Legal); 285 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Legal); 286 setOperationAction(ISD::SPLAT_VECTOR, T, Custom); 287 288 setOperationAction(ISD::LOAD, T, Custom); 289 setOperationAction(ISD::STORE, T, Custom); 290 setOperationAction(ISD::MLOAD, T, Custom); 291 setOperationAction(ISD::MSTORE, T, Custom); 292 setOperationAction(ISD::ABS, T, Custom); 293 setOperationAction(ISD::CTLZ, T, Custom); 294 setOperationAction(ISD::CTTZ, T, Custom); 295 setOperationAction(ISD::CTPOP, T, Custom); 296 297 setOperationAction(ISD::ADD, T, Legal); 298 setOperationAction(ISD::SUB, T, Legal); 299 setOperationAction(ISD::MUL, T, Custom); 300 setOperationAction(ISD::MULHS, T, Custom); 301 setOperationAction(ISD::MULHU, T, Custom); 302 setOperationAction(ISD::AND, T, Custom); 303 setOperationAction(ISD::OR, T, Custom); 304 setOperationAction(ISD::XOR, T, Custom); 305 setOperationAction(ISD::SETCC, T, Custom); 306 setOperationAction(ISD::VSELECT, T, Custom); 307 if (T != ByteW) { 308 setOperationAction(ISD::SRA, T, Custom); 309 setOperationAction(ISD::SHL, T, Custom); 310 setOperationAction(ISD::SRL, T, Custom); 311 312 // Promote all shuffles to operate on vectors of bytes. 313 setPromoteTo(ISD::VECTOR_SHUFFLE, T, ByteW); 314 } 315 setOperationAction(ISD::FSHL, T, Custom); 316 setOperationAction(ISD::FSHR, T, Custom); 317 318 setOperationAction(ISD::SMIN, T, Custom); 319 setOperationAction(ISD::SMAX, T, Custom); 320 if (T.getScalarType() != MVT::i32) { 321 setOperationAction(ISD::UMIN, T, Custom); 322 setOperationAction(ISD::UMAX, T, Custom); 323 } 324 325 if (Subtarget.useHVXFloatingPoint()) { 326 // Same action for both QFloat and IEEE. 327 setOperationAction(ISD::SINT_TO_FP, T, Custom); 328 setOperationAction(ISD::UINT_TO_FP, T, Custom); 329 setOperationAction(ISD::FP_TO_SINT, T, Custom); 330 setOperationAction(ISD::FP_TO_UINT, T, Custom); 331 } 332 } 333 334 // Legalize all of these to HexagonISD::[SU]MUL_LOHI. 335 setOperationAction(ISD::MULHS, WordV, Custom); // -> _LOHI 336 setOperationAction(ISD::MULHU, WordV, Custom); // -> _LOHI 337 setOperationAction(ISD::SMUL_LOHI, WordV, Custom); 338 setOperationAction(ISD::UMUL_LOHI, WordV, Custom); 339 340 setCondCodeAction(ISD::SETNE, MVT::v64f16, Expand); 341 setCondCodeAction(ISD::SETLE, MVT::v64f16, Expand); 342 setCondCodeAction(ISD::SETGE, MVT::v64f16, Expand); 343 setCondCodeAction(ISD::SETLT, MVT::v64f16, Expand); 344 setCondCodeAction(ISD::SETONE, MVT::v64f16, Expand); 345 setCondCodeAction(ISD::SETOLE, MVT::v64f16, Expand); 346 setCondCodeAction(ISD::SETOGE, MVT::v64f16, Expand); 347 setCondCodeAction(ISD::SETOLT, MVT::v64f16, Expand); 348 setCondCodeAction(ISD::SETUNE, MVT::v64f16, Expand); 349 setCondCodeAction(ISD::SETULE, MVT::v64f16, Expand); 350 setCondCodeAction(ISD::SETUGE, MVT::v64f16, Expand); 351 setCondCodeAction(ISD::SETULT, MVT::v64f16, Expand); 352 353 setCondCodeAction(ISD::SETNE, MVT::v32f32, Expand); 354 setCondCodeAction(ISD::SETLE, MVT::v32f32, Expand); 355 setCondCodeAction(ISD::SETGE, MVT::v32f32, Expand); 356 setCondCodeAction(ISD::SETLT, MVT::v32f32, Expand); 357 setCondCodeAction(ISD::SETONE, MVT::v32f32, Expand); 358 setCondCodeAction(ISD::SETOLE, MVT::v32f32, Expand); 359 setCondCodeAction(ISD::SETOGE, MVT::v32f32, Expand); 360 setCondCodeAction(ISD::SETOLT, MVT::v32f32, Expand); 361 setCondCodeAction(ISD::SETUNE, MVT::v32f32, Expand); 362 setCondCodeAction(ISD::SETULE, MVT::v32f32, Expand); 363 setCondCodeAction(ISD::SETUGE, MVT::v32f32, Expand); 364 setCondCodeAction(ISD::SETULT, MVT::v32f32, Expand); 365 366 // Boolean vectors. 367 368 for (MVT T : LegalW) { 369 // Boolean types for vector pairs will overlap with the boolean 370 // types for single vectors, e.g. 371 // v64i8 -> v64i1 (single) 372 // v64i16 -> v64i1 (pair) 373 // Set these actions first, and allow the single actions to overwrite 374 // any duplicates. 375 MVT BoolW = MVT::getVectorVT(MVT::i1, T.getVectorNumElements()); 376 setOperationAction(ISD::SETCC, BoolW, Custom); 377 setOperationAction(ISD::AND, BoolW, Custom); 378 setOperationAction(ISD::OR, BoolW, Custom); 379 setOperationAction(ISD::XOR, BoolW, Custom); 380 // Masked load/store takes a mask that may need splitting. 381 setOperationAction(ISD::MLOAD, BoolW, Custom); 382 setOperationAction(ISD::MSTORE, BoolW, Custom); 383 } 384 385 for (MVT T : LegalV) { 386 MVT BoolV = MVT::getVectorVT(MVT::i1, T.getVectorNumElements()); 387 setOperationAction(ISD::BUILD_VECTOR, BoolV, Custom); 388 setOperationAction(ISD::CONCAT_VECTORS, BoolV, Custom); 389 setOperationAction(ISD::INSERT_SUBVECTOR, BoolV, Custom); 390 setOperationAction(ISD::INSERT_VECTOR_ELT, BoolV, Custom); 391 setOperationAction(ISD::EXTRACT_SUBVECTOR, BoolV, Custom); 392 setOperationAction(ISD::EXTRACT_VECTOR_ELT, BoolV, Custom); 393 setOperationAction(ISD::SELECT, BoolV, Custom); 394 setOperationAction(ISD::AND, BoolV, Legal); 395 setOperationAction(ISD::OR, BoolV, Legal); 396 setOperationAction(ISD::XOR, BoolV, Legal); 397 } 398 399 if (Use64b) { 400 for (MVT T: {MVT::v32i8, MVT::v32i16, MVT::v16i8, MVT::v16i16, MVT::v16i32}) 401 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); 402 } else { 403 for (MVT T: {MVT::v64i8, MVT::v64i16, MVT::v32i8, MVT::v32i16, MVT::v32i32}) 404 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); 405 } 406 407 // Handle store widening for short vectors. 408 unsigned HwLen = Subtarget.getVectorLength(); 409 for (MVT ElemTy : Subtarget.getHVXElementTypes()) { 410 if (ElemTy == MVT::i1) 411 continue; 412 int ElemWidth = ElemTy.getFixedSizeInBits(); 413 int MaxElems = (8*HwLen) / ElemWidth; 414 for (int N = 2; N < MaxElems; N *= 2) { 415 MVT VecTy = MVT::getVectorVT(ElemTy, N); 416 auto Action = getPreferredVectorAction(VecTy); 417 if (Action == TargetLoweringBase::TypeWidenVector) { 418 setOperationAction(ISD::LOAD, VecTy, Custom); 419 setOperationAction(ISD::STORE, VecTy, Custom); 420 setOperationAction(ISD::SETCC, VecTy, Custom); 421 setOperationAction(ISD::TRUNCATE, VecTy, Custom); 422 setOperationAction(ISD::ANY_EXTEND, VecTy, Custom); 423 setOperationAction(ISD::SIGN_EXTEND, VecTy, Custom); 424 setOperationAction(ISD::ZERO_EXTEND, VecTy, Custom); 425 if (Subtarget.useHVXFloatingPoint()) { 426 setOperationAction(ISD::FP_TO_SINT, VecTy, Custom); 427 setOperationAction(ISD::FP_TO_UINT, VecTy, Custom); 428 setOperationAction(ISD::SINT_TO_FP, VecTy, Custom); 429 setOperationAction(ISD::UINT_TO_FP, VecTy, Custom); 430 } 431 432 MVT BoolTy = MVT::getVectorVT(MVT::i1, N); 433 if (!isTypeLegal(BoolTy)) 434 setOperationAction(ISD::SETCC, BoolTy, Custom); 435 } 436 } 437 } 438 439 setTargetDAGCombine({ISD::CONCAT_VECTORS, ISD::TRUNCATE, ISD::VSELECT}); 440 } 441 442 unsigned 443 HexagonTargetLowering::getPreferredHvxVectorAction(MVT VecTy) const { 444 MVT ElemTy = VecTy.getVectorElementType(); 445 unsigned VecLen = VecTy.getVectorNumElements(); 446 unsigned HwLen = Subtarget.getVectorLength(); 447 448 // Split vectors of i1 that exceed byte vector length. 449 if (ElemTy == MVT::i1 && VecLen > HwLen) 450 return TargetLoweringBase::TypeSplitVector; 451 452 ArrayRef<MVT> Tys = Subtarget.getHVXElementTypes(); 453 // For shorter vectors of i1, widen them if any of the corresponding 454 // vectors of integers needs to be widened. 455 if (ElemTy == MVT::i1) { 456 for (MVT T : Tys) { 457 assert(T != MVT::i1); 458 auto A = getPreferredHvxVectorAction(MVT::getVectorVT(T, VecLen)); 459 if (A != ~0u) 460 return A; 461 } 462 return ~0u; 463 } 464 465 // If the size of VecTy is at least half of the vector length, 466 // widen the vector. Note: the threshold was not selected in 467 // any scientific way. 468 if (llvm::is_contained(Tys, ElemTy)) { 469 unsigned VecWidth = VecTy.getSizeInBits(); 470 unsigned HwWidth = 8*HwLen; 471 if (VecWidth > 2*HwWidth) 472 return TargetLoweringBase::TypeSplitVector; 473 474 bool HaveThreshold = HvxWidenThreshold.getNumOccurrences() > 0; 475 if (HaveThreshold && 8*HvxWidenThreshold <= VecWidth) 476 return TargetLoweringBase::TypeWidenVector; 477 if (VecWidth >= HwWidth/2 && VecWidth < HwWidth) 478 return TargetLoweringBase::TypeWidenVector; 479 } 480 481 // Defer to default. 482 return ~0u; 483 } 484 485 unsigned 486 HexagonTargetLowering::getCustomHvxOperationAction(SDNode &Op) const { 487 unsigned Opc = Op.getOpcode(); 488 switch (Opc) { 489 case HexagonISD::SMUL_LOHI: 490 case HexagonISD::UMUL_LOHI: 491 case HexagonISD::USMUL_LOHI: 492 return TargetLoweringBase::Custom; 493 } 494 return TargetLoweringBase::Legal; 495 } 496 497 SDValue 498 HexagonTargetLowering::getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops, 499 const SDLoc &dl, SelectionDAG &DAG) const { 500 SmallVector<SDValue,4> IntOps; 501 IntOps.push_back(DAG.getConstant(IntId, dl, MVT::i32)); 502 append_range(IntOps, Ops); 503 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ResTy, IntOps); 504 } 505 506 MVT 507 HexagonTargetLowering::typeJoin(const TypePair &Tys) const { 508 assert(Tys.first.getVectorElementType() == Tys.second.getVectorElementType()); 509 510 MVT ElemTy = Tys.first.getVectorElementType(); 511 return MVT::getVectorVT(ElemTy, Tys.first.getVectorNumElements() + 512 Tys.second.getVectorNumElements()); 513 } 514 515 HexagonTargetLowering::TypePair 516 HexagonTargetLowering::typeSplit(MVT VecTy) const { 517 assert(VecTy.isVector()); 518 unsigned NumElem = VecTy.getVectorNumElements(); 519 assert((NumElem % 2) == 0 && "Expecting even-sized vector type"); 520 MVT HalfTy = MVT::getVectorVT(VecTy.getVectorElementType(), NumElem/2); 521 return { HalfTy, HalfTy }; 522 } 523 524 MVT 525 HexagonTargetLowering::typeExtElem(MVT VecTy, unsigned Factor) const { 526 MVT ElemTy = VecTy.getVectorElementType(); 527 MVT NewElemTy = MVT::getIntegerVT(ElemTy.getSizeInBits() * Factor); 528 return MVT::getVectorVT(NewElemTy, VecTy.getVectorNumElements()); 529 } 530 531 MVT 532 HexagonTargetLowering::typeTruncElem(MVT VecTy, unsigned Factor) const { 533 MVT ElemTy = VecTy.getVectorElementType(); 534 MVT NewElemTy = MVT::getIntegerVT(ElemTy.getSizeInBits() / Factor); 535 return MVT::getVectorVT(NewElemTy, VecTy.getVectorNumElements()); 536 } 537 538 SDValue 539 HexagonTargetLowering::opCastElem(SDValue Vec, MVT ElemTy, 540 SelectionDAG &DAG) const { 541 if (ty(Vec).getVectorElementType() == ElemTy) 542 return Vec; 543 MVT CastTy = tyVector(Vec.getValueType().getSimpleVT(), ElemTy); 544 return DAG.getBitcast(CastTy, Vec); 545 } 546 547 SDValue 548 HexagonTargetLowering::opJoin(const VectorPair &Ops, const SDLoc &dl, 549 SelectionDAG &DAG) const { 550 return DAG.getNode(ISD::CONCAT_VECTORS, dl, typeJoin(ty(Ops)), 551 Ops.first, Ops.second); 552 } 553 554 HexagonTargetLowering::VectorPair 555 HexagonTargetLowering::opSplit(SDValue Vec, const SDLoc &dl, 556 SelectionDAG &DAG) const { 557 TypePair Tys = typeSplit(ty(Vec)); 558 if (Vec.getOpcode() == HexagonISD::QCAT) 559 return VectorPair(Vec.getOperand(0), Vec.getOperand(1)); 560 return DAG.SplitVector(Vec, dl, Tys.first, Tys.second); 561 } 562 563 bool 564 HexagonTargetLowering::isHvxSingleTy(MVT Ty) const { 565 return Subtarget.isHVXVectorType(Ty) && 566 Ty.getSizeInBits() == 8 * Subtarget.getVectorLength(); 567 } 568 569 bool 570 HexagonTargetLowering::isHvxPairTy(MVT Ty) const { 571 return Subtarget.isHVXVectorType(Ty) && 572 Ty.getSizeInBits() == 16 * Subtarget.getVectorLength(); 573 } 574 575 bool 576 HexagonTargetLowering::isHvxBoolTy(MVT Ty) const { 577 return Subtarget.isHVXVectorType(Ty, true) && 578 Ty.getVectorElementType() == MVT::i1; 579 } 580 581 bool HexagonTargetLowering::allowsHvxMemoryAccess( 582 MVT VecTy, MachineMemOperand::Flags Flags, unsigned *Fast) const { 583 // Bool vectors are excluded by default, but make it explicit to 584 // emphasize that bool vectors cannot be loaded or stored. 585 // Also, disallow double vector stores (to prevent unnecessary 586 // store widening in DAG combiner). 587 if (VecTy.getSizeInBits() > 8*Subtarget.getVectorLength()) 588 return false; 589 if (!Subtarget.isHVXVectorType(VecTy, /*IncludeBool=*/false)) 590 return false; 591 if (Fast) 592 *Fast = 1; 593 return true; 594 } 595 596 bool HexagonTargetLowering::allowsHvxMisalignedMemoryAccesses( 597 MVT VecTy, MachineMemOperand::Flags Flags, unsigned *Fast) const { 598 if (!Subtarget.isHVXVectorType(VecTy)) 599 return false; 600 // XXX Should this be false? vmemu are a bit slower than vmem. 601 if (Fast) 602 *Fast = 1; 603 return true; 604 } 605 606 void HexagonTargetLowering::AdjustHvxInstrPostInstrSelection( 607 MachineInstr &MI, SDNode *Node) const { 608 unsigned Opc = MI.getOpcode(); 609 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 610 MachineBasicBlock &MB = *MI.getParent(); 611 MachineFunction &MF = *MB.getParent(); 612 MachineRegisterInfo &MRI = MF.getRegInfo(); 613 DebugLoc DL = MI.getDebugLoc(); 614 auto At = MI.getIterator(); 615 616 switch (Opc) { 617 case Hexagon::PS_vsplatib: 618 if (Subtarget.useHVXV62Ops()) { 619 // SplatV = A2_tfrsi #imm 620 // OutV = V6_lvsplatb SplatV 621 Register SplatV = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 622 BuildMI(MB, At, DL, TII.get(Hexagon::A2_tfrsi), SplatV) 623 .add(MI.getOperand(1)); 624 Register OutV = MI.getOperand(0).getReg(); 625 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplatb), OutV) 626 .addReg(SplatV); 627 } else { 628 // SplatV = A2_tfrsi #imm:#imm:#imm:#imm 629 // OutV = V6_lvsplatw SplatV 630 Register SplatV = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 631 const MachineOperand &InpOp = MI.getOperand(1); 632 assert(InpOp.isImm()); 633 uint32_t V = InpOp.getImm() & 0xFF; 634 BuildMI(MB, At, DL, TII.get(Hexagon::A2_tfrsi), SplatV) 635 .addImm(V << 24 | V << 16 | V << 8 | V); 636 Register OutV = MI.getOperand(0).getReg(); 637 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplatw), OutV).addReg(SplatV); 638 } 639 MB.erase(At); 640 break; 641 case Hexagon::PS_vsplatrb: 642 if (Subtarget.useHVXV62Ops()) { 643 // OutV = V6_lvsplatb Inp 644 Register OutV = MI.getOperand(0).getReg(); 645 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplatb), OutV) 646 .add(MI.getOperand(1)); 647 } else { 648 Register SplatV = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 649 const MachineOperand &InpOp = MI.getOperand(1); 650 BuildMI(MB, At, DL, TII.get(Hexagon::S2_vsplatrb), SplatV) 651 .addReg(InpOp.getReg(), 0, InpOp.getSubReg()); 652 Register OutV = MI.getOperand(0).getReg(); 653 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplatw), OutV) 654 .addReg(SplatV); 655 } 656 MB.erase(At); 657 break; 658 case Hexagon::PS_vsplatih: 659 if (Subtarget.useHVXV62Ops()) { 660 // SplatV = A2_tfrsi #imm 661 // OutV = V6_lvsplath SplatV 662 Register SplatV = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 663 BuildMI(MB, At, DL, TII.get(Hexagon::A2_tfrsi), SplatV) 664 .add(MI.getOperand(1)); 665 Register OutV = MI.getOperand(0).getReg(); 666 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplath), OutV) 667 .addReg(SplatV); 668 } else { 669 // SplatV = A2_tfrsi #imm:#imm 670 // OutV = V6_lvsplatw SplatV 671 Register SplatV = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 672 const MachineOperand &InpOp = MI.getOperand(1); 673 assert(InpOp.isImm()); 674 uint32_t V = InpOp.getImm() & 0xFFFF; 675 BuildMI(MB, At, DL, TII.get(Hexagon::A2_tfrsi), SplatV) 676 .addImm(V << 16 | V); 677 Register OutV = MI.getOperand(0).getReg(); 678 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplatw), OutV).addReg(SplatV); 679 } 680 MB.erase(At); 681 break; 682 case Hexagon::PS_vsplatrh: 683 if (Subtarget.useHVXV62Ops()) { 684 // OutV = V6_lvsplath Inp 685 Register OutV = MI.getOperand(0).getReg(); 686 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplath), OutV) 687 .add(MI.getOperand(1)); 688 } else { 689 // SplatV = A2_combine_ll Inp, Inp 690 // OutV = V6_lvsplatw SplatV 691 Register SplatV = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 692 const MachineOperand &InpOp = MI.getOperand(1); 693 BuildMI(MB, At, DL, TII.get(Hexagon::A2_combine_ll), SplatV) 694 .addReg(InpOp.getReg(), 0, InpOp.getSubReg()) 695 .addReg(InpOp.getReg(), 0, InpOp.getSubReg()); 696 Register OutV = MI.getOperand(0).getReg(); 697 BuildMI(MB, At, DL, TII.get(Hexagon::V6_lvsplatw), OutV).addReg(SplatV); 698 } 699 MB.erase(At); 700 break; 701 case Hexagon::PS_vsplatiw: 702 case Hexagon::PS_vsplatrw: 703 if (Opc == Hexagon::PS_vsplatiw) { 704 // SplatV = A2_tfrsi #imm 705 Register SplatV = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 706 BuildMI(MB, At, DL, TII.get(Hexagon::A2_tfrsi), SplatV) 707 .add(MI.getOperand(1)); 708 MI.getOperand(1).ChangeToRegister(SplatV, false); 709 } 710 // OutV = V6_lvsplatw SplatV/Inp 711 MI.setDesc(TII.get(Hexagon::V6_lvsplatw)); 712 break; 713 } 714 } 715 716 SDValue 717 HexagonTargetLowering::convertToByteIndex(SDValue ElemIdx, MVT ElemTy, 718 SelectionDAG &DAG) const { 719 if (ElemIdx.getValueType().getSimpleVT() != MVT::i32) 720 ElemIdx = DAG.getBitcast(MVT::i32, ElemIdx); 721 722 unsigned ElemWidth = ElemTy.getSizeInBits(); 723 if (ElemWidth == 8) 724 return ElemIdx; 725 726 unsigned L = Log2_32(ElemWidth/8); 727 const SDLoc &dl(ElemIdx); 728 return DAG.getNode(ISD::SHL, dl, MVT::i32, 729 {ElemIdx, DAG.getConstant(L, dl, MVT::i32)}); 730 } 731 732 SDValue 733 HexagonTargetLowering::getIndexInWord32(SDValue Idx, MVT ElemTy, 734 SelectionDAG &DAG) const { 735 unsigned ElemWidth = ElemTy.getSizeInBits(); 736 assert(ElemWidth >= 8 && ElemWidth <= 32); 737 if (ElemWidth == 32) 738 return Idx; 739 740 if (ty(Idx) != MVT::i32) 741 Idx = DAG.getBitcast(MVT::i32, Idx); 742 const SDLoc &dl(Idx); 743 SDValue Mask = DAG.getConstant(32/ElemWidth - 1, dl, MVT::i32); 744 SDValue SubIdx = DAG.getNode(ISD::AND, dl, MVT::i32, {Idx, Mask}); 745 return SubIdx; 746 } 747 748 SDValue 749 HexagonTargetLowering::getByteShuffle(const SDLoc &dl, SDValue Op0, 750 SDValue Op1, ArrayRef<int> Mask, 751 SelectionDAG &DAG) const { 752 MVT OpTy = ty(Op0); 753 assert(OpTy == ty(Op1)); 754 755 MVT ElemTy = OpTy.getVectorElementType(); 756 if (ElemTy == MVT::i8) 757 return DAG.getVectorShuffle(OpTy, dl, Op0, Op1, Mask); 758 assert(ElemTy.getSizeInBits() >= 8); 759 760 MVT ResTy = tyVector(OpTy, MVT::i8); 761 unsigned ElemSize = ElemTy.getSizeInBits() / 8; 762 763 SmallVector<int,128> ByteMask; 764 for (int M : Mask) { 765 if (M < 0) { 766 for (unsigned I = 0; I != ElemSize; ++I) 767 ByteMask.push_back(-1); 768 } else { 769 int NewM = M*ElemSize; 770 for (unsigned I = 0; I != ElemSize; ++I) 771 ByteMask.push_back(NewM+I); 772 } 773 } 774 assert(ResTy.getVectorNumElements() == ByteMask.size()); 775 return DAG.getVectorShuffle(ResTy, dl, opCastElem(Op0, MVT::i8, DAG), 776 opCastElem(Op1, MVT::i8, DAG), ByteMask); 777 } 778 779 SDValue 780 HexagonTargetLowering::buildHvxVectorReg(ArrayRef<SDValue> Values, 781 const SDLoc &dl, MVT VecTy, 782 SelectionDAG &DAG) const { 783 unsigned VecLen = Values.size(); 784 MachineFunction &MF = DAG.getMachineFunction(); 785 MVT ElemTy = VecTy.getVectorElementType(); 786 unsigned ElemWidth = ElemTy.getSizeInBits(); 787 unsigned HwLen = Subtarget.getVectorLength(); 788 789 unsigned ElemSize = ElemWidth / 8; 790 assert(ElemSize*VecLen == HwLen); 791 SmallVector<SDValue,32> Words; 792 793 if (VecTy.getVectorElementType() != MVT::i32 && 794 !(Subtarget.useHVXFloatingPoint() && 795 VecTy.getVectorElementType() == MVT::f32)) { 796 assert((ElemSize == 1 || ElemSize == 2) && "Invalid element size"); 797 unsigned OpsPerWord = (ElemSize == 1) ? 4 : 2; 798 MVT PartVT = MVT::getVectorVT(VecTy.getVectorElementType(), OpsPerWord); 799 for (unsigned i = 0; i != VecLen; i += OpsPerWord) { 800 SDValue W = buildVector32(Values.slice(i, OpsPerWord), dl, PartVT, DAG); 801 Words.push_back(DAG.getBitcast(MVT::i32, W)); 802 } 803 } else { 804 for (SDValue V : Values) 805 Words.push_back(DAG.getBitcast(MVT::i32, V)); 806 } 807 auto isSplat = [] (ArrayRef<SDValue> Values, SDValue &SplatV) { 808 unsigned NumValues = Values.size(); 809 assert(NumValues > 0); 810 bool IsUndef = true; 811 for (unsigned i = 0; i != NumValues; ++i) { 812 if (Values[i].isUndef()) 813 continue; 814 IsUndef = false; 815 if (!SplatV.getNode()) 816 SplatV = Values[i]; 817 else if (SplatV != Values[i]) 818 return false; 819 } 820 if (IsUndef) 821 SplatV = Values[0]; 822 return true; 823 }; 824 825 unsigned NumWords = Words.size(); 826 SDValue SplatV; 827 bool IsSplat = isSplat(Words, SplatV); 828 if (IsSplat && isUndef(SplatV)) 829 return DAG.getUNDEF(VecTy); 830 if (IsSplat) { 831 assert(SplatV.getNode()); 832 auto *IdxN = dyn_cast<ConstantSDNode>(SplatV.getNode()); 833 if (IdxN && IdxN->isZero()) 834 return getZero(dl, VecTy, DAG); 835 MVT WordTy = MVT::getVectorVT(MVT::i32, HwLen/4); 836 SDValue S = DAG.getNode(ISD::SPLAT_VECTOR, dl, WordTy, SplatV); 837 return DAG.getBitcast(VecTy, S); 838 } 839 840 // Delay recognizing constant vectors until here, so that we can generate 841 // a vsplat. 842 SmallVector<ConstantInt*, 128> Consts(VecLen); 843 bool AllConst = getBuildVectorConstInts(Values, VecTy, DAG, Consts); 844 if (AllConst) { 845 ArrayRef<Constant*> Tmp((Constant**)Consts.begin(), 846 (Constant**)Consts.end()); 847 Constant *CV = ConstantVector::get(Tmp); 848 Align Alignment(HwLen); 849 SDValue CP = 850 LowerConstantPool(DAG.getConstantPool(CV, VecTy, Alignment), DAG); 851 return DAG.getLoad(VecTy, dl, DAG.getEntryNode(), CP, 852 MachinePointerInfo::getConstantPool(MF), Alignment); 853 } 854 855 // A special case is a situation where the vector is built entirely from 856 // elements extracted from another vector. This could be done via a shuffle 857 // more efficiently, but typically, the size of the source vector will not 858 // match the size of the vector being built (which precludes the use of a 859 // shuffle directly). 860 // This only handles a single source vector, and the vector being built 861 // should be of a sub-vector type of the source vector type. 862 auto IsBuildFromExtracts = [this,&Values] (SDValue &SrcVec, 863 SmallVectorImpl<int> &SrcIdx) { 864 SDValue Vec; 865 for (SDValue V : Values) { 866 if (isUndef(V)) { 867 SrcIdx.push_back(-1); 868 continue; 869 } 870 if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 871 return false; 872 // All extracts should come from the same vector. 873 SDValue T = V.getOperand(0); 874 if (Vec.getNode() != nullptr && T.getNode() != Vec.getNode()) 875 return false; 876 Vec = T; 877 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1)); 878 if (C == nullptr) 879 return false; 880 int I = C->getSExtValue(); 881 assert(I >= 0 && "Negative element index"); 882 SrcIdx.push_back(I); 883 } 884 SrcVec = Vec; 885 return true; 886 }; 887 888 SmallVector<int,128> ExtIdx; 889 SDValue ExtVec; 890 if (IsBuildFromExtracts(ExtVec, ExtIdx)) { 891 MVT ExtTy = ty(ExtVec); 892 unsigned ExtLen = ExtTy.getVectorNumElements(); 893 if (ExtLen == VecLen || ExtLen == 2*VecLen) { 894 // Construct a new shuffle mask that will produce a vector with the same 895 // number of elements as the input vector, and such that the vector we 896 // want will be the initial subvector of it. 897 SmallVector<int,128> Mask; 898 BitVector Used(ExtLen); 899 900 for (int M : ExtIdx) { 901 Mask.push_back(M); 902 if (M >= 0) 903 Used.set(M); 904 } 905 // Fill the rest of the mask with the unused elements of ExtVec in hopes 906 // that it will result in a permutation of ExtVec's elements. It's still 907 // fine if it doesn't (e.g. if undefs are present, or elements are 908 // repeated), but permutations can always be done efficiently via vdelta 909 // and vrdelta. 910 for (unsigned I = 0; I != ExtLen; ++I) { 911 if (Mask.size() == ExtLen) 912 break; 913 if (!Used.test(I)) 914 Mask.push_back(I); 915 } 916 917 SDValue S = DAG.getVectorShuffle(ExtTy, dl, ExtVec, 918 DAG.getUNDEF(ExtTy), Mask); 919 return ExtLen == VecLen ? S : LoHalf(S, DAG); 920 } 921 } 922 923 // Find most common element to initialize vector with. This is to avoid 924 // unnecessary vinsert/valign for cases where the same value is present 925 // many times. Creates a histogram of the vector's elements to find the 926 // most common element n. 927 assert(4*Words.size() == Subtarget.getVectorLength()); 928 int VecHist[32]; 929 int n = 0; 930 for (unsigned i = 0; i != NumWords; ++i) { 931 VecHist[i] = 0; 932 if (Words[i].isUndef()) 933 continue; 934 for (unsigned j = i; j != NumWords; ++j) 935 if (Words[i] == Words[j]) 936 VecHist[i]++; 937 938 if (VecHist[i] > VecHist[n]) 939 n = i; 940 } 941 942 SDValue HalfV = getZero(dl, VecTy, DAG); 943 if (VecHist[n] > 1) { 944 SDValue SplatV = DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Words[n]); 945 HalfV = DAG.getNode(HexagonISD::VALIGN, dl, VecTy, 946 {HalfV, SplatV, DAG.getConstant(HwLen/2, dl, MVT::i32)}); 947 } 948 SDValue HalfV0 = HalfV; 949 SDValue HalfV1 = HalfV; 950 951 // Construct two halves in parallel, then or them together. Rn and Rm count 952 // number of rotations needed before the next element. One last rotation is 953 // performed post-loop to position the last element. 954 int Rn = 0, Rm = 0; 955 SDValue Sn, Sm; 956 SDValue N = HalfV0; 957 SDValue M = HalfV1; 958 for (unsigned i = 0; i != NumWords/2; ++i) { 959 // Rotate by element count since last insertion. 960 if (Words[i] != Words[n] || VecHist[n] <= 1) { 961 Sn = DAG.getConstant(Rn, dl, MVT::i32); 962 HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {N, Sn}); 963 N = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy, 964 {HalfV0, Words[i]}); 965 Rn = 0; 966 } 967 if (Words[i+NumWords/2] != Words[n] || VecHist[n] <= 1) { 968 Sm = DAG.getConstant(Rm, dl, MVT::i32); 969 HalfV1 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {M, Sm}); 970 M = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy, 971 {HalfV1, Words[i+NumWords/2]}); 972 Rm = 0; 973 } 974 Rn += 4; 975 Rm += 4; 976 } 977 // Perform last rotation. 978 Sn = DAG.getConstant(Rn+HwLen/2, dl, MVT::i32); 979 Sm = DAG.getConstant(Rm, dl, MVT::i32); 980 HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {N, Sn}); 981 HalfV1 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {M, Sm}); 982 983 SDValue T0 = DAG.getBitcast(tyVector(VecTy, MVT::i32), HalfV0); 984 SDValue T1 = DAG.getBitcast(tyVector(VecTy, MVT::i32), HalfV1); 985 986 SDValue DstV = DAG.getNode(ISD::OR, dl, ty(T0), {T0, T1}); 987 988 SDValue OutV = 989 DAG.getBitcast(tyVector(ty(DstV), VecTy.getVectorElementType()), DstV); 990 return OutV; 991 } 992 993 SDValue 994 HexagonTargetLowering::createHvxPrefixPred(SDValue PredV, const SDLoc &dl, 995 unsigned BitBytes, bool ZeroFill, SelectionDAG &DAG) const { 996 MVT PredTy = ty(PredV); 997 unsigned HwLen = Subtarget.getVectorLength(); 998 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 999 1000 if (Subtarget.isHVXVectorType(PredTy, true)) { 1001 // Move the vector predicate SubV to a vector register, and scale it 1002 // down to match the representation (bytes per type element) that VecV 1003 // uses. The scaling down will pick every 2nd or 4th (every Scale-th 1004 // in general) element and put them at the front of the resulting 1005 // vector. This subvector will then be inserted into the Q2V of VecV. 1006 // To avoid having an operation that generates an illegal type (short 1007 // vector), generate a full size vector. 1008 // 1009 SDValue T = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, PredV); 1010 SmallVector<int,128> Mask(HwLen); 1011 // Scale = BitBytes(PredV) / Given BitBytes. 1012 unsigned Scale = HwLen / (PredTy.getVectorNumElements() * BitBytes); 1013 unsigned BlockLen = PredTy.getVectorNumElements() * BitBytes; 1014 1015 for (unsigned i = 0; i != HwLen; ++i) { 1016 unsigned Num = i % Scale; 1017 unsigned Off = i / Scale; 1018 Mask[BlockLen*Num + Off] = i; 1019 } 1020 SDValue S = DAG.getVectorShuffle(ByteTy, dl, T, DAG.getUNDEF(ByteTy), Mask); 1021 if (!ZeroFill) 1022 return S; 1023 // Fill the bytes beyond BlockLen with 0s. 1024 // V6_pred_scalar2 cannot fill the entire predicate, so it only works 1025 // when BlockLen < HwLen. 1026 assert(BlockLen < HwLen && "vsetq(v1) prerequisite"); 1027 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 1028 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 1029 {DAG.getConstant(BlockLen, dl, MVT::i32)}, DAG); 1030 SDValue M = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, Q); 1031 return DAG.getNode(ISD::AND, dl, ByteTy, S, M); 1032 } 1033 1034 // Make sure that this is a valid scalar predicate. 1035 assert(PredTy == MVT::v2i1 || PredTy == MVT::v4i1 || PredTy == MVT::v8i1); 1036 1037 unsigned Bytes = 8 / PredTy.getVectorNumElements(); 1038 SmallVector<SDValue,4> Words[2]; 1039 unsigned IdxW = 0; 1040 1041 SDValue W0 = isUndef(PredV) 1042 ? DAG.getUNDEF(MVT::i64) 1043 : DAG.getNode(HexagonISD::P2D, dl, MVT::i64, PredV); 1044 Words[IdxW].push_back(HiHalf(W0, DAG)); 1045 Words[IdxW].push_back(LoHalf(W0, DAG)); 1046 1047 while (Bytes < BitBytes) { 1048 IdxW ^= 1; 1049 Words[IdxW].clear(); 1050 1051 if (Bytes < 4) { 1052 for (const SDValue &W : Words[IdxW ^ 1]) { 1053 SDValue T = expandPredicate(W, dl, DAG); 1054 Words[IdxW].push_back(HiHalf(T, DAG)); 1055 Words[IdxW].push_back(LoHalf(T, DAG)); 1056 } 1057 } else { 1058 for (const SDValue &W : Words[IdxW ^ 1]) { 1059 Words[IdxW].push_back(W); 1060 Words[IdxW].push_back(W); 1061 } 1062 } 1063 Bytes *= 2; 1064 } 1065 1066 assert(Bytes == BitBytes); 1067 1068 SDValue Vec = ZeroFill ? getZero(dl, ByteTy, DAG) : DAG.getUNDEF(ByteTy); 1069 SDValue S4 = DAG.getConstant(HwLen-4, dl, MVT::i32); 1070 for (const SDValue &W : Words[IdxW]) { 1071 Vec = DAG.getNode(HexagonISD::VROR, dl, ByteTy, Vec, S4); 1072 Vec = DAG.getNode(HexagonISD::VINSERTW0, dl, ByteTy, Vec, W); 1073 } 1074 1075 return Vec; 1076 } 1077 1078 SDValue 1079 HexagonTargetLowering::buildHvxVectorPred(ArrayRef<SDValue> Values, 1080 const SDLoc &dl, MVT VecTy, 1081 SelectionDAG &DAG) const { 1082 // Construct a vector V of bytes, such that a comparison V >u 0 would 1083 // produce the required vector predicate. 1084 unsigned VecLen = Values.size(); 1085 unsigned HwLen = Subtarget.getVectorLength(); 1086 assert(VecLen <= HwLen || VecLen == 8*HwLen); 1087 SmallVector<SDValue,128> Bytes; 1088 bool AllT = true, AllF = true; 1089 1090 auto IsTrue = [] (SDValue V) { 1091 if (const auto *N = dyn_cast<ConstantSDNode>(V.getNode())) 1092 return !N->isZero(); 1093 return false; 1094 }; 1095 auto IsFalse = [] (SDValue V) { 1096 if (const auto *N = dyn_cast<ConstantSDNode>(V.getNode())) 1097 return N->isZero(); 1098 return false; 1099 }; 1100 1101 if (VecLen <= HwLen) { 1102 // In the hardware, each bit of a vector predicate corresponds to a byte 1103 // of a vector register. Calculate how many bytes does a bit of VecTy 1104 // correspond to. 1105 assert(HwLen % VecLen == 0); 1106 unsigned BitBytes = HwLen / VecLen; 1107 for (SDValue V : Values) { 1108 AllT &= IsTrue(V); 1109 AllF &= IsFalse(V); 1110 1111 SDValue Ext = !V.isUndef() ? DAG.getZExtOrTrunc(V, dl, MVT::i8) 1112 : DAG.getUNDEF(MVT::i8); 1113 for (unsigned B = 0; B != BitBytes; ++B) 1114 Bytes.push_back(Ext); 1115 } 1116 } else { 1117 // There are as many i1 values, as there are bits in a vector register. 1118 // Divide the values into groups of 8 and check that each group consists 1119 // of the same value (ignoring undefs). 1120 for (unsigned I = 0; I != VecLen; I += 8) { 1121 unsigned B = 0; 1122 // Find the first non-undef value in this group. 1123 for (; B != 8; ++B) { 1124 if (!Values[I+B].isUndef()) 1125 break; 1126 } 1127 SDValue F = Values[I+B]; 1128 AllT &= IsTrue(F); 1129 AllF &= IsFalse(F); 1130 1131 SDValue Ext = (B < 8) ? DAG.getZExtOrTrunc(F, dl, MVT::i8) 1132 : DAG.getUNDEF(MVT::i8); 1133 Bytes.push_back(Ext); 1134 // Verify that the rest of values in the group are the same as the 1135 // first. 1136 for (; B != 8; ++B) 1137 assert(Values[I+B].isUndef() || Values[I+B] == F); 1138 } 1139 } 1140 1141 if (AllT) 1142 return DAG.getNode(HexagonISD::QTRUE, dl, VecTy); 1143 if (AllF) 1144 return DAG.getNode(HexagonISD::QFALSE, dl, VecTy); 1145 1146 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1147 SDValue ByteVec = buildHvxVectorReg(Bytes, dl, ByteTy, DAG); 1148 return DAG.getNode(HexagonISD::V2Q, dl, VecTy, ByteVec); 1149 } 1150 1151 SDValue 1152 HexagonTargetLowering::extractHvxElementReg(SDValue VecV, SDValue IdxV, 1153 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 1154 MVT ElemTy = ty(VecV).getVectorElementType(); 1155 1156 unsigned ElemWidth = ElemTy.getSizeInBits(); 1157 assert(ElemWidth >= 8 && ElemWidth <= 32); 1158 (void)ElemWidth; 1159 1160 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG); 1161 SDValue ExWord = DAG.getNode(HexagonISD::VEXTRACTW, dl, MVT::i32, 1162 {VecV, ByteIdx}); 1163 if (ElemTy == MVT::i32) 1164 return ExWord; 1165 1166 // Have an extracted word, need to extract the smaller element out of it. 1167 // 1. Extract the bits of (the original) IdxV that correspond to the index 1168 // of the desired element in the 32-bit word. 1169 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG); 1170 // 2. Extract the element from the word. 1171 SDValue ExVec = DAG.getBitcast(tyVector(ty(ExWord), ElemTy), ExWord); 1172 return extractVector(ExVec, SubIdx, dl, ElemTy, MVT::i32, DAG); 1173 } 1174 1175 SDValue 1176 HexagonTargetLowering::extractHvxElementPred(SDValue VecV, SDValue IdxV, 1177 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 1178 // Implement other return types if necessary. 1179 assert(ResTy == MVT::i1); 1180 1181 unsigned HwLen = Subtarget.getVectorLength(); 1182 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1183 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 1184 1185 unsigned Scale = HwLen / ty(VecV).getVectorNumElements(); 1186 SDValue ScV = DAG.getConstant(Scale, dl, MVT::i32); 1187 IdxV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, ScV); 1188 1189 SDValue ExtB = extractHvxElementReg(ByteVec, IdxV, dl, MVT::i32, DAG); 1190 SDValue Zero = DAG.getTargetConstant(0, dl, MVT::i32); 1191 return getInstr(Hexagon::C2_cmpgtui, dl, MVT::i1, {ExtB, Zero}, DAG); 1192 } 1193 1194 SDValue 1195 HexagonTargetLowering::insertHvxElementReg(SDValue VecV, SDValue IdxV, 1196 SDValue ValV, const SDLoc &dl, SelectionDAG &DAG) const { 1197 MVT ElemTy = ty(VecV).getVectorElementType(); 1198 1199 unsigned ElemWidth = ElemTy.getSizeInBits(); 1200 assert(ElemWidth >= 8 && ElemWidth <= 32); 1201 (void)ElemWidth; 1202 1203 auto InsertWord = [&DAG,&dl,this] (SDValue VecV, SDValue ValV, 1204 SDValue ByteIdxV) { 1205 MVT VecTy = ty(VecV); 1206 unsigned HwLen = Subtarget.getVectorLength(); 1207 SDValue MaskV = DAG.getNode(ISD::AND, dl, MVT::i32, 1208 {ByteIdxV, DAG.getConstant(-4, dl, MVT::i32)}); 1209 SDValue RotV = DAG.getNode(HexagonISD::VROR, dl, VecTy, {VecV, MaskV}); 1210 SDValue InsV = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy, {RotV, ValV}); 1211 SDValue SubV = DAG.getNode(ISD::SUB, dl, MVT::i32, 1212 {DAG.getConstant(HwLen, dl, MVT::i32), MaskV}); 1213 SDValue TorV = DAG.getNode(HexagonISD::VROR, dl, VecTy, {InsV, SubV}); 1214 return TorV; 1215 }; 1216 1217 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG); 1218 if (ElemTy == MVT::i32) 1219 return InsertWord(VecV, ValV, ByteIdx); 1220 1221 // If this is not inserting a 32-bit word, convert it into such a thing. 1222 // 1. Extract the existing word from the target vector. 1223 SDValue WordIdx = DAG.getNode(ISD::SRL, dl, MVT::i32, 1224 {ByteIdx, DAG.getConstant(2, dl, MVT::i32)}); 1225 SDValue Ext = extractHvxElementReg(opCastElem(VecV, MVT::i32, DAG), WordIdx, 1226 dl, MVT::i32, DAG); 1227 1228 // 2. Treating the extracted word as a 32-bit vector, insert the given 1229 // value into it. 1230 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG); 1231 MVT SubVecTy = tyVector(ty(Ext), ElemTy); 1232 SDValue Ins = insertVector(DAG.getBitcast(SubVecTy, Ext), 1233 ValV, SubIdx, dl, ElemTy, DAG); 1234 1235 // 3. Insert the 32-bit word back into the original vector. 1236 return InsertWord(VecV, Ins, ByteIdx); 1237 } 1238 1239 SDValue 1240 HexagonTargetLowering::insertHvxElementPred(SDValue VecV, SDValue IdxV, 1241 SDValue ValV, const SDLoc &dl, SelectionDAG &DAG) const { 1242 unsigned HwLen = Subtarget.getVectorLength(); 1243 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1244 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 1245 1246 unsigned Scale = HwLen / ty(VecV).getVectorNumElements(); 1247 SDValue ScV = DAG.getConstant(Scale, dl, MVT::i32); 1248 IdxV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, ScV); 1249 ValV = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, ValV); 1250 1251 SDValue InsV = insertHvxElementReg(ByteVec, IdxV, ValV, dl, DAG); 1252 return DAG.getNode(HexagonISD::V2Q, dl, ty(VecV), InsV); 1253 } 1254 1255 SDValue 1256 HexagonTargetLowering::extractHvxSubvectorReg(SDValue OrigOp, SDValue VecV, 1257 SDValue IdxV, const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 1258 MVT VecTy = ty(VecV); 1259 unsigned HwLen = Subtarget.getVectorLength(); 1260 unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue(); 1261 MVT ElemTy = VecTy.getVectorElementType(); 1262 unsigned ElemWidth = ElemTy.getSizeInBits(); 1263 1264 // If the source vector is a vector pair, get the single vector containing 1265 // the subvector of interest. The subvector will never overlap two single 1266 // vectors. 1267 if (isHvxPairTy(VecTy)) { 1268 if (Idx * ElemWidth >= 8*HwLen) 1269 Idx -= VecTy.getVectorNumElements() / 2; 1270 1271 VecV = OrigOp; 1272 if (typeSplit(VecTy).first == ResTy) 1273 return VecV; 1274 } 1275 1276 // The only meaningful subvectors of a single HVX vector are those that 1277 // fit in a scalar register. 1278 assert(ResTy.getSizeInBits() == 32 || ResTy.getSizeInBits() == 64); 1279 1280 MVT WordTy = tyVector(VecTy, MVT::i32); 1281 SDValue WordVec = DAG.getBitcast(WordTy, VecV); 1282 unsigned WordIdx = (Idx*ElemWidth) / 32; 1283 1284 SDValue W0Idx = DAG.getConstant(WordIdx, dl, MVT::i32); 1285 SDValue W0 = extractHvxElementReg(WordVec, W0Idx, dl, MVT::i32, DAG); 1286 if (ResTy.getSizeInBits() == 32) 1287 return DAG.getBitcast(ResTy, W0); 1288 1289 SDValue W1Idx = DAG.getConstant(WordIdx+1, dl, MVT::i32); 1290 SDValue W1 = extractHvxElementReg(WordVec, W1Idx, dl, MVT::i32, DAG); 1291 SDValue WW = getCombine(W1, W0, dl, MVT::i64, DAG); 1292 return DAG.getBitcast(ResTy, WW); 1293 } 1294 1295 SDValue 1296 HexagonTargetLowering::extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, 1297 const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const { 1298 MVT VecTy = ty(VecV); 1299 unsigned HwLen = Subtarget.getVectorLength(); 1300 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1301 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 1302 // IdxV is required to be a constant. 1303 unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue(); 1304 1305 unsigned ResLen = ResTy.getVectorNumElements(); 1306 unsigned BitBytes = HwLen / VecTy.getVectorNumElements(); 1307 unsigned Offset = Idx * BitBytes; 1308 SDValue Undef = DAG.getUNDEF(ByteTy); 1309 SmallVector<int,128> Mask; 1310 1311 if (Subtarget.isHVXVectorType(ResTy, true)) { 1312 // Converting between two vector predicates. Since the result is shorter 1313 // than the source, it will correspond to a vector predicate with the 1314 // relevant bits replicated. The replication count is the ratio of the 1315 // source and target vector lengths. 1316 unsigned Rep = VecTy.getVectorNumElements() / ResLen; 1317 assert(isPowerOf2_32(Rep) && HwLen % Rep == 0); 1318 for (unsigned i = 0; i != HwLen/Rep; ++i) { 1319 for (unsigned j = 0; j != Rep; ++j) 1320 Mask.push_back(i + Offset); 1321 } 1322 SDValue ShuffV = DAG.getVectorShuffle(ByteTy, dl, ByteVec, Undef, Mask); 1323 return DAG.getNode(HexagonISD::V2Q, dl, ResTy, ShuffV); 1324 } 1325 1326 // Converting between a vector predicate and a scalar predicate. In the 1327 // vector predicate, a group of BitBytes bits will correspond to a single 1328 // i1 element of the source vector type. Those bits will all have the same 1329 // value. The same will be true for ByteVec, where each byte corresponds 1330 // to a bit in the vector predicate. 1331 // The algorithm is to traverse the ByteVec, going over the i1 values from 1332 // the source vector, and generate the corresponding representation in an 1333 // 8-byte vector. To avoid repeated extracts from ByteVec, shuffle the 1334 // elements so that the interesting 8 bytes will be in the low end of the 1335 // vector. 1336 unsigned Rep = 8 / ResLen; 1337 // Make sure the output fill the entire vector register, so repeat the 1338 // 8-byte groups as many times as necessary. 1339 for (unsigned r = 0; r != HwLen/ResLen; ++r) { 1340 // This will generate the indexes of the 8 interesting bytes. 1341 for (unsigned i = 0; i != ResLen; ++i) { 1342 for (unsigned j = 0; j != Rep; ++j) 1343 Mask.push_back(Offset + i*BitBytes); 1344 } 1345 } 1346 1347 SDValue Zero = getZero(dl, MVT::i32, DAG); 1348 SDValue ShuffV = DAG.getVectorShuffle(ByteTy, dl, ByteVec, Undef, Mask); 1349 // Combine the two low words from ShuffV into a v8i8, and byte-compare 1350 // them against 0. 1351 SDValue W0 = DAG.getNode(HexagonISD::VEXTRACTW, dl, MVT::i32, {ShuffV, Zero}); 1352 SDValue W1 = DAG.getNode(HexagonISD::VEXTRACTW, dl, MVT::i32, 1353 {ShuffV, DAG.getConstant(4, dl, MVT::i32)}); 1354 SDValue Vec64 = getCombine(W1, W0, dl, MVT::v8i8, DAG); 1355 return getInstr(Hexagon::A4_vcmpbgtui, dl, ResTy, 1356 {Vec64, DAG.getTargetConstant(0, dl, MVT::i32)}, DAG); 1357 } 1358 1359 SDValue 1360 HexagonTargetLowering::insertHvxSubvectorReg(SDValue VecV, SDValue SubV, 1361 SDValue IdxV, const SDLoc &dl, SelectionDAG &DAG) const { 1362 MVT VecTy = ty(VecV); 1363 MVT SubTy = ty(SubV); 1364 unsigned HwLen = Subtarget.getVectorLength(); 1365 MVT ElemTy = VecTy.getVectorElementType(); 1366 unsigned ElemWidth = ElemTy.getSizeInBits(); 1367 1368 bool IsPair = isHvxPairTy(VecTy); 1369 MVT SingleTy = MVT::getVectorVT(ElemTy, (8*HwLen)/ElemWidth); 1370 // The two single vectors that VecV consists of, if it's a pair. 1371 SDValue V0, V1; 1372 SDValue SingleV = VecV; 1373 SDValue PickHi; 1374 1375 if (IsPair) { 1376 V0 = LoHalf(VecV, DAG); 1377 V1 = HiHalf(VecV, DAG); 1378 1379 SDValue HalfV = DAG.getConstant(SingleTy.getVectorNumElements(), 1380 dl, MVT::i32); 1381 PickHi = DAG.getSetCC(dl, MVT::i1, IdxV, HalfV, ISD::SETUGT); 1382 if (isHvxSingleTy(SubTy)) { 1383 if (const auto *CN = dyn_cast<const ConstantSDNode>(IdxV.getNode())) { 1384 unsigned Idx = CN->getZExtValue(); 1385 assert(Idx == 0 || Idx == VecTy.getVectorNumElements()/2); 1386 unsigned SubIdx = (Idx == 0) ? Hexagon::vsub_lo : Hexagon::vsub_hi; 1387 return DAG.getTargetInsertSubreg(SubIdx, dl, VecTy, VecV, SubV); 1388 } 1389 // If IdxV is not a constant, generate the two variants: with the 1390 // SubV as the high and as the low subregister, and select the right 1391 // pair based on the IdxV. 1392 SDValue InLo = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {SubV, V1}); 1393 SDValue InHi = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {V0, SubV}); 1394 return DAG.getNode(ISD::SELECT, dl, VecTy, PickHi, InHi, InLo); 1395 } 1396 // The subvector being inserted must be entirely contained in one of 1397 // the vectors V0 or V1. Set SingleV to the correct one, and update 1398 // IdxV to be the index relative to the beginning of that vector. 1399 SDValue S = DAG.getNode(ISD::SUB, dl, MVT::i32, IdxV, HalfV); 1400 IdxV = DAG.getNode(ISD::SELECT, dl, MVT::i32, PickHi, S, IdxV); 1401 SingleV = DAG.getNode(ISD::SELECT, dl, SingleTy, PickHi, V1, V0); 1402 } 1403 1404 // The only meaningful subvectors of a single HVX vector are those that 1405 // fit in a scalar register. 1406 assert(SubTy.getSizeInBits() == 32 || SubTy.getSizeInBits() == 64); 1407 // Convert IdxV to be index in bytes. 1408 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV.getNode()); 1409 if (!IdxN || !IdxN->isZero()) { 1410 IdxV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 1411 DAG.getConstant(ElemWidth/8, dl, MVT::i32)); 1412 SingleV = DAG.getNode(HexagonISD::VROR, dl, SingleTy, SingleV, IdxV); 1413 } 1414 // When inserting a single word, the rotation back to the original position 1415 // would be by HwLen-Idx, but if two words are inserted, it will need to be 1416 // by (HwLen-4)-Idx. 1417 unsigned RolBase = HwLen; 1418 if (SubTy.getSizeInBits() == 32) { 1419 SDValue V = DAG.getBitcast(MVT::i32, SubV); 1420 SingleV = DAG.getNode(HexagonISD::VINSERTW0, dl, SingleTy, SingleV, V); 1421 } else { 1422 SDValue V = DAG.getBitcast(MVT::i64, SubV); 1423 SDValue R0 = LoHalf(V, DAG); 1424 SDValue R1 = HiHalf(V, DAG); 1425 SingleV = DAG.getNode(HexagonISD::VINSERTW0, dl, SingleTy, SingleV, R0); 1426 SingleV = DAG.getNode(HexagonISD::VROR, dl, SingleTy, SingleV, 1427 DAG.getConstant(4, dl, MVT::i32)); 1428 SingleV = DAG.getNode(HexagonISD::VINSERTW0, dl, SingleTy, SingleV, R1); 1429 RolBase = HwLen-4; 1430 } 1431 // If the vector wasn't ror'ed, don't ror it back. 1432 if (RolBase != 4 || !IdxN || !IdxN->isZero()) { 1433 SDValue RolV = DAG.getNode(ISD::SUB, dl, MVT::i32, 1434 DAG.getConstant(RolBase, dl, MVT::i32), IdxV); 1435 SingleV = DAG.getNode(HexagonISD::VROR, dl, SingleTy, SingleV, RolV); 1436 } 1437 1438 if (IsPair) { 1439 SDValue InLo = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {SingleV, V1}); 1440 SDValue InHi = DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, {V0, SingleV}); 1441 return DAG.getNode(ISD::SELECT, dl, VecTy, PickHi, InHi, InLo); 1442 } 1443 return SingleV; 1444 } 1445 1446 SDValue 1447 HexagonTargetLowering::insertHvxSubvectorPred(SDValue VecV, SDValue SubV, 1448 SDValue IdxV, const SDLoc &dl, SelectionDAG &DAG) const { 1449 MVT VecTy = ty(VecV); 1450 MVT SubTy = ty(SubV); 1451 assert(Subtarget.isHVXVectorType(VecTy, true)); 1452 // VecV is an HVX vector predicate. SubV may be either an HVX vector 1453 // predicate as well, or it can be a scalar predicate. 1454 1455 unsigned VecLen = VecTy.getVectorNumElements(); 1456 unsigned HwLen = Subtarget.getVectorLength(); 1457 assert(HwLen % VecLen == 0 && "Unexpected vector type"); 1458 1459 unsigned Scale = VecLen / SubTy.getVectorNumElements(); 1460 unsigned BitBytes = HwLen / VecLen; 1461 unsigned BlockLen = HwLen / Scale; 1462 1463 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1464 SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV); 1465 SDValue ByteSub = createHvxPrefixPred(SubV, dl, BitBytes, false, DAG); 1466 SDValue ByteIdx; 1467 1468 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV.getNode()); 1469 if (!IdxN || !IdxN->isZero()) { 1470 ByteIdx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, 1471 DAG.getConstant(BitBytes, dl, MVT::i32)); 1472 ByteVec = DAG.getNode(HexagonISD::VROR, dl, ByteTy, ByteVec, ByteIdx); 1473 } 1474 1475 // ByteVec is the target vector VecV rotated in such a way that the 1476 // subvector should be inserted at index 0. Generate a predicate mask 1477 // and use vmux to do the insertion. 1478 assert(BlockLen < HwLen && "vsetq(v1) prerequisite"); 1479 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 1480 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 1481 {DAG.getConstant(BlockLen, dl, MVT::i32)}, DAG); 1482 ByteVec = getInstr(Hexagon::V6_vmux, dl, ByteTy, {Q, ByteSub, ByteVec}, DAG); 1483 // Rotate ByteVec back, and convert to a vector predicate. 1484 if (!IdxN || !IdxN->isZero()) { 1485 SDValue HwLenV = DAG.getConstant(HwLen, dl, MVT::i32); 1486 SDValue ByteXdi = DAG.getNode(ISD::SUB, dl, MVT::i32, HwLenV, ByteIdx); 1487 ByteVec = DAG.getNode(HexagonISD::VROR, dl, ByteTy, ByteVec, ByteXdi); 1488 } 1489 return DAG.getNode(HexagonISD::V2Q, dl, VecTy, ByteVec); 1490 } 1491 1492 SDValue 1493 HexagonTargetLowering::extendHvxVectorPred(SDValue VecV, const SDLoc &dl, 1494 MVT ResTy, bool ZeroExt, SelectionDAG &DAG) const { 1495 // Sign- and any-extending of a vector predicate to a vector register is 1496 // equivalent to Q2V. For zero-extensions, generate a vmux between 0 and 1497 // a vector of 1s (where the 1s are of type matching the vector type). 1498 assert(Subtarget.isHVXVectorType(ResTy)); 1499 if (!ZeroExt) 1500 return DAG.getNode(HexagonISD::Q2V, dl, ResTy, VecV); 1501 1502 assert(ty(VecV).getVectorNumElements() == ResTy.getVectorNumElements()); 1503 SDValue True = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1504 DAG.getConstant(1, dl, MVT::i32)); 1505 SDValue False = getZero(dl, ResTy, DAG); 1506 return DAG.getSelect(dl, ResTy, VecV, True, False); 1507 } 1508 1509 SDValue 1510 HexagonTargetLowering::compressHvxPred(SDValue VecQ, const SDLoc &dl, 1511 MVT ResTy, SelectionDAG &DAG) const { 1512 // Given a predicate register VecQ, transfer bits VecQ[0..HwLen-1] 1513 // (i.e. the entire predicate register) to bits [0..HwLen-1] of a 1514 // vector register. The remaining bits of the vector register are 1515 // unspecified. 1516 1517 MachineFunction &MF = DAG.getMachineFunction(); 1518 unsigned HwLen = Subtarget.getVectorLength(); 1519 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1520 MVT PredTy = ty(VecQ); 1521 unsigned PredLen = PredTy.getVectorNumElements(); 1522 assert(HwLen % PredLen == 0); 1523 MVT VecTy = MVT::getVectorVT(MVT::getIntegerVT(8*HwLen/PredLen), PredLen); 1524 1525 Type *Int8Ty = Type::getInt8Ty(*DAG.getContext()); 1526 SmallVector<Constant*, 128> Tmp; 1527 // Create an array of bytes (hex): 01,02,04,08,10,20,40,80, 01,02,04,08,... 1528 // These are bytes with the LSB rotated left with respect to their index. 1529 for (unsigned i = 0; i != HwLen/8; ++i) { 1530 for (unsigned j = 0; j != 8; ++j) 1531 Tmp.push_back(ConstantInt::get(Int8Ty, 1ull << j)); 1532 } 1533 Constant *CV = ConstantVector::get(Tmp); 1534 Align Alignment(HwLen); 1535 SDValue CP = 1536 LowerConstantPool(DAG.getConstantPool(CV, ByteTy, Alignment), DAG); 1537 SDValue Bytes = 1538 DAG.getLoad(ByteTy, dl, DAG.getEntryNode(), CP, 1539 MachinePointerInfo::getConstantPool(MF), Alignment); 1540 1541 // Select the bytes that correspond to true bits in the vector predicate. 1542 SDValue Sel = DAG.getSelect(dl, VecTy, VecQ, DAG.getBitcast(VecTy, Bytes), 1543 getZero(dl, VecTy, DAG)); 1544 // Calculate the OR of all bytes in each group of 8. That will compress 1545 // all the individual bits into a single byte. 1546 // First, OR groups of 4, via vrmpy with 0x01010101. 1547 SDValue All1 = 1548 DAG.getSplatBuildVector(MVT::v4i8, dl, DAG.getConstant(1, dl, MVT::i32)); 1549 SDValue Vrmpy = getInstr(Hexagon::V6_vrmpyub, dl, ByteTy, {Sel, All1}, DAG); 1550 // Then rotate the accumulated vector by 4 bytes, and do the final OR. 1551 SDValue Rot = getInstr(Hexagon::V6_valignbi, dl, ByteTy, 1552 {Vrmpy, Vrmpy, DAG.getTargetConstant(4, dl, MVT::i32)}, DAG); 1553 SDValue Vor = DAG.getNode(ISD::OR, dl, ByteTy, {Vrmpy, Rot}); 1554 1555 // Pick every 8th byte and coalesce them at the beginning of the output. 1556 // For symmetry, coalesce every 1+8th byte after that, then every 2+8th 1557 // byte and so on. 1558 SmallVector<int,128> Mask; 1559 for (unsigned i = 0; i != HwLen; ++i) 1560 Mask.push_back((8*i) % HwLen + i/(HwLen/8)); 1561 SDValue Collect = 1562 DAG.getVectorShuffle(ByteTy, dl, Vor, DAG.getUNDEF(ByteTy), Mask); 1563 return DAG.getBitcast(ResTy, Collect); 1564 } 1565 1566 SDValue 1567 HexagonTargetLowering::resizeToWidth(SDValue VecV, MVT ResTy, bool Signed, 1568 const SDLoc &dl, SelectionDAG &DAG) const { 1569 // Take a vector and resize the element type to match the given type. 1570 MVT InpTy = ty(VecV); 1571 if (InpTy == ResTy) 1572 return VecV; 1573 1574 unsigned InpWidth = InpTy.getSizeInBits(); 1575 unsigned ResWidth = ResTy.getSizeInBits(); 1576 1577 if (InpTy.isFloatingPoint()) { 1578 return InpWidth < ResWidth ? DAG.getNode(ISD::FP_EXTEND, dl, ResTy, VecV) 1579 : DAG.getNode(ISD::FP_ROUND, dl, ResTy, VecV, 1580 getZero(dl, MVT::i32, DAG)); 1581 } 1582 1583 assert(InpTy.isInteger()); 1584 1585 if (InpWidth < ResWidth) { 1586 unsigned ExtOpc = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1587 return DAG.getNode(ExtOpc, dl, ResTy, VecV); 1588 } else { 1589 unsigned NarOpc = Signed ? HexagonISD::SSAT : HexagonISD::USAT; 1590 return DAG.getNode(NarOpc, dl, ResTy, VecV, DAG.getValueType(ResTy)); 1591 } 1592 } 1593 1594 SDValue 1595 HexagonTargetLowering::extractSubvector(SDValue Vec, MVT SubTy, unsigned SubIdx, 1596 SelectionDAG &DAG) const { 1597 assert(ty(Vec).getSizeInBits() % SubTy.getSizeInBits() == 0); 1598 1599 const SDLoc &dl(Vec); 1600 unsigned ElemIdx = SubIdx * SubTy.getVectorNumElements(); 1601 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubTy, 1602 {Vec, DAG.getConstant(ElemIdx, dl, MVT::i32)}); 1603 } 1604 1605 SDValue 1606 HexagonTargetLowering::LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) 1607 const { 1608 const SDLoc &dl(Op); 1609 MVT VecTy = ty(Op); 1610 1611 unsigned Size = Op.getNumOperands(); 1612 SmallVector<SDValue,128> Ops; 1613 for (unsigned i = 0; i != Size; ++i) 1614 Ops.push_back(Op.getOperand(i)); 1615 1616 // First, split the BUILD_VECTOR for vector pairs. We could generate 1617 // some pairs directly (via splat), but splats should be generated 1618 // by the combiner prior to getting here. 1619 if (VecTy.getSizeInBits() == 16*Subtarget.getVectorLength()) { 1620 ArrayRef<SDValue> A(Ops); 1621 MVT SingleTy = typeSplit(VecTy).first; 1622 SDValue V0 = buildHvxVectorReg(A.take_front(Size/2), dl, SingleTy, DAG); 1623 SDValue V1 = buildHvxVectorReg(A.drop_front(Size/2), dl, SingleTy, DAG); 1624 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VecTy, V0, V1); 1625 } 1626 1627 if (VecTy.getVectorElementType() == MVT::i1) 1628 return buildHvxVectorPred(Ops, dl, VecTy, DAG); 1629 1630 // In case of MVT::f16 BUILD_VECTOR, since MVT::f16 is 1631 // not a legal type, just bitcast the node to use i16 1632 // types and bitcast the result back to f16 1633 if (VecTy.getVectorElementType() == MVT::f16) { 1634 SmallVector<SDValue,64> NewOps; 1635 for (unsigned i = 0; i != Size; i++) 1636 NewOps.push_back(DAG.getBitcast(MVT::i16, Ops[i])); 1637 1638 SDValue T0 = DAG.getNode(ISD::BUILD_VECTOR, dl, 1639 tyVector(VecTy, MVT::i16), NewOps); 1640 return DAG.getBitcast(tyVector(VecTy, MVT::f16), T0); 1641 } 1642 1643 return buildHvxVectorReg(Ops, dl, VecTy, DAG); 1644 } 1645 1646 SDValue 1647 HexagonTargetLowering::LowerHvxSplatVector(SDValue Op, SelectionDAG &DAG) 1648 const { 1649 const SDLoc &dl(Op); 1650 MVT VecTy = ty(Op); 1651 MVT ArgTy = ty(Op.getOperand(0)); 1652 1653 if (ArgTy == MVT::f16) { 1654 MVT SplatTy = MVT::getVectorVT(MVT::i16, VecTy.getVectorNumElements()); 1655 SDValue ToInt16 = DAG.getBitcast(MVT::i16, Op.getOperand(0)); 1656 SDValue ToInt32 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, ToInt16); 1657 SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, dl, SplatTy, ToInt32); 1658 return DAG.getBitcast(VecTy, Splat); 1659 } 1660 1661 return SDValue(); 1662 } 1663 1664 SDValue 1665 HexagonTargetLowering::LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) 1666 const { 1667 // Vector concatenation of two integer (non-bool) vectors does not need 1668 // special lowering. Custom-lower concats of bool vectors and expand 1669 // concats of more than 2 vectors. 1670 MVT VecTy = ty(Op); 1671 const SDLoc &dl(Op); 1672 unsigned NumOp = Op.getNumOperands(); 1673 if (VecTy.getVectorElementType() != MVT::i1) { 1674 if (NumOp == 2) 1675 return Op; 1676 // Expand the other cases into a build-vector. 1677 SmallVector<SDValue,8> Elems; 1678 for (SDValue V : Op.getNode()->ops()) 1679 DAG.ExtractVectorElements(V, Elems); 1680 // A vector of i16 will be broken up into a build_vector of i16's. 1681 // This is a problem, since at the time of operation legalization, 1682 // all operations are expected to be type-legalized, and i16 is not 1683 // a legal type. If any of the extracted elements is not of a valid 1684 // type, sign-extend it to a valid one. 1685 for (unsigned i = 0, e = Elems.size(); i != e; ++i) { 1686 SDValue V = Elems[i]; 1687 MVT Ty = ty(V); 1688 if (!isTypeLegal(Ty)) { 1689 MVT NTy = typeLegalize(Ty, DAG); 1690 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 1691 Elems[i] = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NTy, 1692 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NTy, 1693 V.getOperand(0), V.getOperand(1)), 1694 DAG.getValueType(Ty)); 1695 continue; 1696 } 1697 // A few less complicated cases. 1698 switch (V.getOpcode()) { 1699 case ISD::Constant: 1700 Elems[i] = DAG.getSExtOrTrunc(V, dl, NTy); 1701 break; 1702 case ISD::UNDEF: 1703 Elems[i] = DAG.getUNDEF(NTy); 1704 break; 1705 case ISD::TRUNCATE: 1706 Elems[i] = V.getOperand(0); 1707 break; 1708 default: 1709 llvm_unreachable("Unexpected vector element"); 1710 } 1711 } 1712 } 1713 return DAG.getBuildVector(VecTy, dl, Elems); 1714 } 1715 1716 assert(VecTy.getVectorElementType() == MVT::i1); 1717 unsigned HwLen = Subtarget.getVectorLength(); 1718 assert(isPowerOf2_32(NumOp) && HwLen % NumOp == 0); 1719 1720 SDValue Op0 = Op.getOperand(0); 1721 1722 // If the operands are HVX types (i.e. not scalar predicates), then 1723 // defer the concatenation, and create QCAT instead. 1724 if (Subtarget.isHVXVectorType(ty(Op0), true)) { 1725 if (NumOp == 2) 1726 return DAG.getNode(HexagonISD::QCAT, dl, VecTy, Op0, Op.getOperand(1)); 1727 1728 ArrayRef<SDUse> U(Op.getNode()->ops()); 1729 SmallVector<SDValue,4> SV(U.begin(), U.end()); 1730 ArrayRef<SDValue> Ops(SV); 1731 1732 MVT HalfTy = typeSplit(VecTy).first; 1733 SDValue V0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfTy, 1734 Ops.take_front(NumOp/2)); 1735 SDValue V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfTy, 1736 Ops.take_back(NumOp/2)); 1737 return DAG.getNode(HexagonISD::QCAT, dl, VecTy, V0, V1); 1738 } 1739 1740 // Count how many bytes (in a vector register) each bit in VecTy 1741 // corresponds to. 1742 unsigned BitBytes = HwLen / VecTy.getVectorNumElements(); 1743 1744 SmallVector<SDValue,8> Prefixes; 1745 for (SDValue V : Op.getNode()->op_values()) { 1746 SDValue P = createHvxPrefixPred(V, dl, BitBytes, true, DAG); 1747 Prefixes.push_back(P); 1748 } 1749 1750 unsigned InpLen = ty(Op.getOperand(0)).getVectorNumElements(); 1751 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 1752 SDValue S = DAG.getConstant(InpLen*BitBytes, dl, MVT::i32); 1753 SDValue Res = getZero(dl, ByteTy, DAG); 1754 for (unsigned i = 0, e = Prefixes.size(); i != e; ++i) { 1755 Res = DAG.getNode(HexagonISD::VROR, dl, ByteTy, Res, S); 1756 Res = DAG.getNode(ISD::OR, dl, ByteTy, Res, Prefixes[e-i-1]); 1757 } 1758 return DAG.getNode(HexagonISD::V2Q, dl, VecTy, Res); 1759 } 1760 1761 SDValue 1762 HexagonTargetLowering::LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) 1763 const { 1764 // Change the type of the extracted element to i32. 1765 SDValue VecV = Op.getOperand(0); 1766 MVT ElemTy = ty(VecV).getVectorElementType(); 1767 const SDLoc &dl(Op); 1768 SDValue IdxV = Op.getOperand(1); 1769 if (ElemTy == MVT::i1) 1770 return extractHvxElementPred(VecV, IdxV, dl, ty(Op), DAG); 1771 1772 return extractHvxElementReg(VecV, IdxV, dl, ty(Op), DAG); 1773 } 1774 1775 SDValue 1776 HexagonTargetLowering::LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) 1777 const { 1778 const SDLoc &dl(Op); 1779 MVT VecTy = ty(Op); 1780 SDValue VecV = Op.getOperand(0); 1781 SDValue ValV = Op.getOperand(1); 1782 SDValue IdxV = Op.getOperand(2); 1783 MVT ElemTy = ty(VecV).getVectorElementType(); 1784 if (ElemTy == MVT::i1) 1785 return insertHvxElementPred(VecV, IdxV, ValV, dl, DAG); 1786 1787 if (ElemTy == MVT::f16) { 1788 SDValue T0 = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 1789 tyVector(VecTy, MVT::i16), 1790 DAG.getBitcast(tyVector(VecTy, MVT::i16), VecV), 1791 DAG.getBitcast(MVT::i16, ValV), IdxV); 1792 return DAG.getBitcast(tyVector(VecTy, MVT::f16), T0); 1793 } 1794 1795 return insertHvxElementReg(VecV, IdxV, ValV, dl, DAG); 1796 } 1797 1798 SDValue 1799 HexagonTargetLowering::LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) 1800 const { 1801 SDValue SrcV = Op.getOperand(0); 1802 MVT SrcTy = ty(SrcV); 1803 MVT DstTy = ty(Op); 1804 SDValue IdxV = Op.getOperand(1); 1805 unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue(); 1806 assert(Idx % DstTy.getVectorNumElements() == 0); 1807 (void)Idx; 1808 const SDLoc &dl(Op); 1809 1810 MVT ElemTy = SrcTy.getVectorElementType(); 1811 if (ElemTy == MVT::i1) 1812 return extractHvxSubvectorPred(SrcV, IdxV, dl, DstTy, DAG); 1813 1814 return extractHvxSubvectorReg(Op, SrcV, IdxV, dl, DstTy, DAG); 1815 } 1816 1817 SDValue 1818 HexagonTargetLowering::LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) 1819 const { 1820 // Idx does not need to be a constant. 1821 SDValue VecV = Op.getOperand(0); 1822 SDValue ValV = Op.getOperand(1); 1823 SDValue IdxV = Op.getOperand(2); 1824 1825 const SDLoc &dl(Op); 1826 MVT VecTy = ty(VecV); 1827 MVT ElemTy = VecTy.getVectorElementType(); 1828 if (ElemTy == MVT::i1) 1829 return insertHvxSubvectorPred(VecV, ValV, IdxV, dl, DAG); 1830 1831 return insertHvxSubvectorReg(VecV, ValV, IdxV, dl, DAG); 1832 } 1833 1834 SDValue 1835 HexagonTargetLowering::LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const { 1836 // Lower any-extends of boolean vectors to sign-extends, since they 1837 // translate directly to Q2V. Zero-extending could also be done equally 1838 // fast, but Q2V is used/recognized in more places. 1839 // For all other vectors, use zero-extend. 1840 MVT ResTy = ty(Op); 1841 SDValue InpV = Op.getOperand(0); 1842 MVT ElemTy = ty(InpV).getVectorElementType(); 1843 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy)) 1844 return LowerHvxSignExt(Op, DAG); 1845 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Op), ResTy, InpV); 1846 } 1847 1848 SDValue 1849 HexagonTargetLowering::LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const { 1850 MVT ResTy = ty(Op); 1851 SDValue InpV = Op.getOperand(0); 1852 MVT ElemTy = ty(InpV).getVectorElementType(); 1853 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy)) 1854 return extendHvxVectorPred(InpV, SDLoc(Op), ty(Op), false, DAG); 1855 return Op; 1856 } 1857 1858 SDValue 1859 HexagonTargetLowering::LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const { 1860 MVT ResTy = ty(Op); 1861 SDValue InpV = Op.getOperand(0); 1862 MVT ElemTy = ty(InpV).getVectorElementType(); 1863 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy)) 1864 return extendHvxVectorPred(InpV, SDLoc(Op), ty(Op), true, DAG); 1865 return Op; 1866 } 1867 1868 SDValue 1869 HexagonTargetLowering::LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const { 1870 // Lower vector CTTZ into a computation using CTLZ (Hacker's Delight): 1871 // cttz(x) = bitwidth(x) - ctlz(~x & (x-1)) 1872 const SDLoc &dl(Op); 1873 MVT ResTy = ty(Op); 1874 SDValue InpV = Op.getOperand(0); 1875 assert(ResTy == ty(InpV)); 1876 1877 // Calculate the vectors of 1 and bitwidth(x). 1878 MVT ElemTy = ty(InpV).getVectorElementType(); 1879 unsigned ElemWidth = ElemTy.getSizeInBits(); 1880 1881 SDValue Vec1 = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1882 DAG.getConstant(1, dl, MVT::i32)); 1883 SDValue VecW = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1884 DAG.getConstant(ElemWidth, dl, MVT::i32)); 1885 SDValue VecN1 = DAG.getNode(ISD::SPLAT_VECTOR, dl, ResTy, 1886 DAG.getConstant(-1, dl, MVT::i32)); 1887 1888 // Do not use DAG.getNOT, because that would create BUILD_VECTOR with 1889 // a BITCAST. Here we can skip the BITCAST (so we don't have to handle 1890 // it separately in custom combine or selection). 1891 SDValue A = DAG.getNode(ISD::AND, dl, ResTy, 1892 {DAG.getNode(ISD::XOR, dl, ResTy, {InpV, VecN1}), 1893 DAG.getNode(ISD::SUB, dl, ResTy, {InpV, Vec1})}); 1894 return DAG.getNode(ISD::SUB, dl, ResTy, 1895 {VecW, DAG.getNode(ISD::CTLZ, dl, ResTy, A)}); 1896 } 1897 1898 SDValue 1899 HexagonTargetLowering::LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const { 1900 const SDLoc &dl(Op); 1901 MVT ResTy = ty(Op); 1902 assert(ResTy.getVectorElementType() == MVT::i32); 1903 1904 SDValue Vs = Op.getOperand(0); 1905 SDValue Vt = Op.getOperand(1); 1906 1907 SDVTList ResTys = DAG.getVTList(ResTy, ResTy); 1908 unsigned Opc = Op.getOpcode(); 1909 1910 // On HVX v62+ producing the full product is cheap, so legalize MULH to LOHI. 1911 if (Opc == ISD::MULHU) 1912 return DAG.getNode(HexagonISD::UMUL_LOHI, dl, ResTys, {Vs, Vt}).getValue(1); 1913 if (Opc == ISD::MULHS) 1914 return DAG.getNode(HexagonISD::SMUL_LOHI, dl, ResTys, {Vs, Vt}).getValue(1); 1915 1916 #ifndef NDEBUG 1917 Op.dump(&DAG); 1918 #endif 1919 llvm_unreachable("Unexpected mulh operation"); 1920 } 1921 1922 SDValue 1923 HexagonTargetLowering::LowerHvxMulLoHi(SDValue Op, SelectionDAG &DAG) const { 1924 const SDLoc &dl(Op); 1925 unsigned Opc = Op.getOpcode(); 1926 SDValue Vu = Op.getOperand(0); 1927 SDValue Vv = Op.getOperand(1); 1928 1929 // If the HI part is not used, convert it to a regular MUL. 1930 if (auto HiVal = Op.getValue(1); HiVal.use_empty()) { 1931 // Need to preserve the types and the number of values. 1932 SDValue Hi = DAG.getUNDEF(ty(HiVal)); 1933 SDValue Lo = DAG.getNode(ISD::MUL, dl, ty(Op), {Vu, Vv}); 1934 return DAG.getMergeValues({Lo, Hi}, dl); 1935 } 1936 1937 bool SignedVu = Opc == HexagonISD::SMUL_LOHI; 1938 bool SignedVv = Opc == HexagonISD::SMUL_LOHI || Opc == HexagonISD::USMUL_LOHI; 1939 1940 // Legal on HVX v62+, but lower it here because patterns can't handle multi- 1941 // valued nodes. 1942 if (Subtarget.useHVXV62Ops()) 1943 return emitHvxMulLoHiV62(Vu, SignedVu, Vv, SignedVv, dl, DAG); 1944 1945 if (Opc == HexagonISD::SMUL_LOHI) { 1946 // Direct MULHS expansion is cheaper than doing the whole SMUL_LOHI, 1947 // for other signedness LOHI is cheaper. 1948 if (auto LoVal = Op.getValue(0); LoVal.use_empty()) { 1949 SDValue Hi = emitHvxMulHsV60(Vu, Vv, dl, DAG); 1950 SDValue Lo = DAG.getUNDEF(ty(LoVal)); 1951 return DAG.getMergeValues({Lo, Hi}, dl); 1952 } 1953 } 1954 1955 return emitHvxMulLoHiV60(Vu, SignedVu, Vv, SignedVv, dl, DAG); 1956 } 1957 1958 SDValue 1959 HexagonTargetLowering::LowerHvxBitcast(SDValue Op, SelectionDAG &DAG) const { 1960 SDValue Val = Op.getOperand(0); 1961 MVT ResTy = ty(Op); 1962 MVT ValTy = ty(Val); 1963 const SDLoc &dl(Op); 1964 1965 if (isHvxBoolTy(ValTy) && ResTy.isScalarInteger()) { 1966 unsigned HwLen = Subtarget.getVectorLength(); 1967 MVT WordTy = MVT::getVectorVT(MVT::i32, HwLen/4); 1968 SDValue VQ = compressHvxPred(Val, dl, WordTy, DAG); 1969 unsigned BitWidth = ResTy.getSizeInBits(); 1970 1971 if (BitWidth < 64) { 1972 SDValue W0 = extractHvxElementReg(VQ, DAG.getConstant(0, dl, MVT::i32), 1973 dl, MVT::i32, DAG); 1974 if (BitWidth == 32) 1975 return W0; 1976 assert(BitWidth < 32u); 1977 return DAG.getZExtOrTrunc(W0, dl, ResTy); 1978 } 1979 1980 // The result is >= 64 bits. The only options are 64 or 128. 1981 assert(BitWidth == 64 || BitWidth == 128); 1982 SmallVector<SDValue,4> Words; 1983 for (unsigned i = 0; i != BitWidth/32; ++i) { 1984 SDValue W = extractHvxElementReg( 1985 VQ, DAG.getConstant(i, dl, MVT::i32), dl, MVT::i32, DAG); 1986 Words.push_back(W); 1987 } 1988 SmallVector<SDValue,2> Combines; 1989 assert(Words.size() % 2 == 0); 1990 for (unsigned i = 0, e = Words.size(); i < e; i += 2) { 1991 SDValue C = getCombine(Words[i+1], Words[i], dl, MVT::i64, DAG); 1992 Combines.push_back(C); 1993 } 1994 1995 if (BitWidth == 64) 1996 return Combines[0]; 1997 1998 return DAG.getNode(ISD::BUILD_PAIR, dl, ResTy, Combines); 1999 } 2000 if (isHvxBoolTy(ResTy) && ValTy.isScalarInteger()) { 2001 // Handle bitcast from i128 -> v128i1 and i64 -> v64i1. 2002 unsigned BitWidth = ValTy.getSizeInBits(); 2003 unsigned HwLen = Subtarget.getVectorLength(); 2004 assert(BitWidth == HwLen); 2005 2006 MVT ValAsVecTy = MVT::getVectorVT(MVT::i8, BitWidth / 8); 2007 SDValue ValAsVec = DAG.getBitcast(ValAsVecTy, Val); 2008 // Splat each byte of Val 8 times. 2009 // Bytes = [(b0)x8, (b1)x8, ...., (b15)x8] 2010 // where b0, b1,..., b15 are least to most significant bytes of I. 2011 SmallVector<SDValue, 128> Bytes; 2012 // Tmp: 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80, 0x01,0x02,0x04,0x08,... 2013 // These are bytes with the LSB rotated left with respect to their index. 2014 SmallVector<SDValue, 128> Tmp; 2015 for (unsigned I = 0; I != HwLen / 8; ++I) { 2016 SDValue Idx = DAG.getConstant(I, dl, MVT::i32); 2017 SDValue Byte = 2018 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, ValAsVec, Idx); 2019 for (unsigned J = 0; J != 8; ++J) { 2020 Bytes.push_back(Byte); 2021 Tmp.push_back(DAG.getConstant(1ull << J, dl, MVT::i8)); 2022 } 2023 } 2024 2025 MVT ConstantVecTy = MVT::getVectorVT(MVT::i8, HwLen); 2026 SDValue ConstantVec = DAG.getBuildVector(ConstantVecTy, dl, Tmp); 2027 SDValue I2V = buildHvxVectorReg(Bytes, dl, ConstantVecTy, DAG); 2028 2029 // Each Byte in the I2V will be set iff corresponding bit is set in Val. 2030 I2V = DAG.getNode(ISD::AND, dl, ConstantVecTy, {I2V, ConstantVec}); 2031 return DAG.getNode(HexagonISD::V2Q, dl, ResTy, I2V); 2032 } 2033 2034 return Op; 2035 } 2036 2037 SDValue 2038 HexagonTargetLowering::LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const { 2039 // Sign- and zero-extends are legal. 2040 assert(Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG); 2041 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(Op), ty(Op), 2042 Op.getOperand(0)); 2043 } 2044 2045 SDValue 2046 HexagonTargetLowering::LowerHvxSelect(SDValue Op, SelectionDAG &DAG) const { 2047 MVT ResTy = ty(Op); 2048 if (ResTy.getVectorElementType() != MVT::i1) 2049 return Op; 2050 2051 const SDLoc &dl(Op); 2052 unsigned HwLen = Subtarget.getVectorLength(); 2053 unsigned VecLen = ResTy.getVectorNumElements(); 2054 assert(HwLen % VecLen == 0); 2055 unsigned ElemSize = HwLen / VecLen; 2056 2057 MVT VecTy = MVT::getVectorVT(MVT::getIntegerVT(ElemSize * 8), VecLen); 2058 SDValue S = 2059 DAG.getNode(ISD::SELECT, dl, VecTy, Op.getOperand(0), 2060 DAG.getNode(HexagonISD::Q2V, dl, VecTy, Op.getOperand(1)), 2061 DAG.getNode(HexagonISD::Q2V, dl, VecTy, Op.getOperand(2))); 2062 return DAG.getNode(HexagonISD::V2Q, dl, ResTy, S); 2063 } 2064 2065 SDValue 2066 HexagonTargetLowering::LowerHvxShift(SDValue Op, SelectionDAG &DAG) const { 2067 if (SDValue S = getVectorShiftByInt(Op, DAG)) 2068 return S; 2069 return Op; 2070 } 2071 2072 SDValue 2073 HexagonTargetLowering::LowerHvxFunnelShift(SDValue Op, 2074 SelectionDAG &DAG) const { 2075 unsigned Opc = Op.getOpcode(); 2076 assert(Opc == ISD::FSHL || Opc == ISD::FSHR); 2077 2078 // Make sure the shift amount is within the range of the bitwidth 2079 // of the element type. 2080 SDValue A = Op.getOperand(0); 2081 SDValue B = Op.getOperand(1); 2082 SDValue S = Op.getOperand(2); 2083 2084 MVT InpTy = ty(A); 2085 MVT ElemTy = InpTy.getVectorElementType(); 2086 2087 const SDLoc &dl(Op); 2088 unsigned ElemWidth = ElemTy.getSizeInBits(); 2089 bool IsLeft = Opc == ISD::FSHL; 2090 2091 // The expansion into regular shifts produces worse code for i8 and for 2092 // right shift of i32 on v65+. 2093 bool UseShifts = ElemTy != MVT::i8; 2094 if (Subtarget.useHVXV65Ops() && ElemTy == MVT::i32) 2095 UseShifts = false; 2096 2097 if (SDValue SplatV = getSplatValue(S, DAG); SplatV && UseShifts) { 2098 // If this is a funnel shift by a scalar, lower it into regular shifts. 2099 SDValue Mask = DAG.getConstant(ElemWidth - 1, dl, MVT::i32); 2100 SDValue ModS = 2101 DAG.getNode(ISD::AND, dl, MVT::i32, 2102 {DAG.getZExtOrTrunc(SplatV, dl, MVT::i32), Mask}); 2103 SDValue NegS = 2104 DAG.getNode(ISD::SUB, dl, MVT::i32, 2105 {DAG.getConstant(ElemWidth, dl, MVT::i32), ModS}); 2106 SDValue IsZero = 2107 DAG.getSetCC(dl, MVT::i1, ModS, getZero(dl, MVT::i32, DAG), ISD::SETEQ); 2108 // FSHL A, B => A << | B >>n 2109 // FSHR A, B => A <<n | B >> 2110 SDValue Part1 = 2111 DAG.getNode(HexagonISD::VASL, dl, InpTy, {A, IsLeft ? ModS : NegS}); 2112 SDValue Part2 = 2113 DAG.getNode(HexagonISD::VLSR, dl, InpTy, {B, IsLeft ? NegS : ModS}); 2114 SDValue Or = DAG.getNode(ISD::OR, dl, InpTy, {Part1, Part2}); 2115 // If the shift amount was 0, pick A or B, depending on the direction. 2116 // The opposite shift will also be by 0, so the "Or" will be incorrect. 2117 return DAG.getNode(ISD::SELECT, dl, InpTy, {IsZero, (IsLeft ? A : B), Or}); 2118 } 2119 2120 SDValue Mask = DAG.getSplatBuildVector( 2121 InpTy, dl, DAG.getConstant(ElemWidth - 1, dl, ElemTy)); 2122 2123 unsigned MOpc = Opc == ISD::FSHL ? HexagonISD::MFSHL : HexagonISD::MFSHR; 2124 return DAG.getNode(MOpc, dl, ty(Op), 2125 {A, B, DAG.getNode(ISD::AND, dl, InpTy, {S, Mask})}); 2126 } 2127 2128 SDValue 2129 HexagonTargetLowering::LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const { 2130 const SDLoc &dl(Op); 2131 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2132 SmallVector<SDValue> Ops(Op->ops().begin(), Op->ops().end()); 2133 2134 auto Swap = [&](SDValue P) { 2135 return DAG.getMergeValues({P.getValue(1), P.getValue(0)}, dl); 2136 }; 2137 2138 switch (IntNo) { 2139 case Intrinsic::hexagon_V6_pred_typecast: 2140 case Intrinsic::hexagon_V6_pred_typecast_128B: { 2141 MVT ResTy = ty(Op), InpTy = ty(Ops[1]); 2142 if (isHvxBoolTy(ResTy) && isHvxBoolTy(InpTy)) { 2143 if (ResTy == InpTy) 2144 return Ops[1]; 2145 return DAG.getNode(HexagonISD::TYPECAST, dl, ResTy, Ops[1]); 2146 } 2147 break; 2148 } 2149 case Intrinsic::hexagon_V6_vmpyss_parts: 2150 case Intrinsic::hexagon_V6_vmpyss_parts_128B: 2151 return Swap(DAG.getNode(HexagonISD::SMUL_LOHI, dl, Op->getVTList(), 2152 {Ops[1], Ops[2]})); 2153 case Intrinsic::hexagon_V6_vmpyuu_parts: 2154 case Intrinsic::hexagon_V6_vmpyuu_parts_128B: 2155 return Swap(DAG.getNode(HexagonISD::UMUL_LOHI, dl, Op->getVTList(), 2156 {Ops[1], Ops[2]})); 2157 case Intrinsic::hexagon_V6_vmpyus_parts: 2158 case Intrinsic::hexagon_V6_vmpyus_parts_128B: { 2159 return Swap(DAG.getNode(HexagonISD::USMUL_LOHI, dl, Op->getVTList(), 2160 {Ops[1], Ops[2]})); 2161 } 2162 } // switch 2163 2164 return Op; 2165 } 2166 2167 SDValue 2168 HexagonTargetLowering::LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const { 2169 const SDLoc &dl(Op); 2170 unsigned HwLen = Subtarget.getVectorLength(); 2171 MachineFunction &MF = DAG.getMachineFunction(); 2172 auto *MaskN = cast<MaskedLoadStoreSDNode>(Op.getNode()); 2173 SDValue Mask = MaskN->getMask(); 2174 SDValue Chain = MaskN->getChain(); 2175 SDValue Base = MaskN->getBasePtr(); 2176 auto *MemOp = MF.getMachineMemOperand(MaskN->getMemOperand(), 0, HwLen); 2177 2178 unsigned Opc = Op->getOpcode(); 2179 assert(Opc == ISD::MLOAD || Opc == ISD::MSTORE); 2180 2181 if (Opc == ISD::MLOAD) { 2182 MVT ValTy = ty(Op); 2183 SDValue Load = DAG.getLoad(ValTy, dl, Chain, Base, MemOp); 2184 SDValue Thru = cast<MaskedLoadSDNode>(MaskN)->getPassThru(); 2185 if (isUndef(Thru)) 2186 return Load; 2187 SDValue VSel = DAG.getNode(ISD::VSELECT, dl, ValTy, Mask, Load, Thru); 2188 return DAG.getMergeValues({VSel, Load.getValue(1)}, dl); 2189 } 2190 2191 // MSTORE 2192 // HVX only has aligned masked stores. 2193 2194 // TODO: Fold negations of the mask into the store. 2195 unsigned StoreOpc = Hexagon::V6_vS32b_qpred_ai; 2196 SDValue Value = cast<MaskedStoreSDNode>(MaskN)->getValue(); 2197 SDValue Offset0 = DAG.getTargetConstant(0, dl, ty(Base)); 2198 2199 if (MaskN->getAlign().value() % HwLen == 0) { 2200 SDValue Store = getInstr(StoreOpc, dl, MVT::Other, 2201 {Mask, Base, Offset0, Value, Chain}, DAG); 2202 DAG.setNodeMemRefs(cast<MachineSDNode>(Store.getNode()), {MemOp}); 2203 return Store; 2204 } 2205 2206 // Unaligned case. 2207 auto StoreAlign = [&](SDValue V, SDValue A) { 2208 SDValue Z = getZero(dl, ty(V), DAG); 2209 // TODO: use funnel shifts? 2210 // vlalign(Vu,Vv,Rt) rotates the pair Vu:Vv left by Rt and takes the 2211 // upper half. 2212 SDValue LoV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {V, Z, A}, DAG); 2213 SDValue HiV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {Z, V, A}, DAG); 2214 return std::make_pair(LoV, HiV); 2215 }; 2216 2217 MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen); 2218 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 2219 SDValue MaskV = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, Mask); 2220 VectorPair Tmp = StoreAlign(MaskV, Base); 2221 VectorPair MaskU = {DAG.getNode(HexagonISD::V2Q, dl, BoolTy, Tmp.first), 2222 DAG.getNode(HexagonISD::V2Q, dl, BoolTy, Tmp.second)}; 2223 VectorPair ValueU = StoreAlign(Value, Base); 2224 2225 SDValue Offset1 = DAG.getTargetConstant(HwLen, dl, MVT::i32); 2226 SDValue StoreLo = 2227 getInstr(StoreOpc, dl, MVT::Other, 2228 {MaskU.first, Base, Offset0, ValueU.first, Chain}, DAG); 2229 SDValue StoreHi = 2230 getInstr(StoreOpc, dl, MVT::Other, 2231 {MaskU.second, Base, Offset1, ValueU.second, Chain}, DAG); 2232 DAG.setNodeMemRefs(cast<MachineSDNode>(StoreLo.getNode()), {MemOp}); 2233 DAG.setNodeMemRefs(cast<MachineSDNode>(StoreHi.getNode()), {MemOp}); 2234 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, {StoreLo, StoreHi}); 2235 } 2236 2237 SDValue HexagonTargetLowering::LowerHvxFpExtend(SDValue Op, 2238 SelectionDAG &DAG) const { 2239 // This conversion only applies to QFloat. IEEE extension from f16 to f32 2240 // is legal (done via a pattern). 2241 assert(Subtarget.useHVXQFloatOps()); 2242 2243 assert(Op->getOpcode() == ISD::FP_EXTEND); 2244 2245 MVT VecTy = ty(Op); 2246 MVT ArgTy = ty(Op.getOperand(0)); 2247 const SDLoc &dl(Op); 2248 assert(VecTy == MVT::v64f32 && ArgTy == MVT::v64f16); 2249 2250 SDValue F16Vec = Op.getOperand(0); 2251 2252 APFloat FloatVal = APFloat(1.0f); 2253 bool Ignored; 2254 FloatVal.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); 2255 SDValue Fp16Ones = DAG.getConstantFP(FloatVal, dl, ArgTy); 2256 SDValue VmpyVec = 2257 getInstr(Hexagon::V6_vmpy_qf32_hf, dl, VecTy, {F16Vec, Fp16Ones}, DAG); 2258 2259 MVT HalfTy = typeSplit(VecTy).first; 2260 VectorPair Pair = opSplit(VmpyVec, dl, DAG); 2261 SDValue LoVec = 2262 getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.first}, DAG); 2263 SDValue HiVec = 2264 getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.second}, DAG); 2265 2266 SDValue ShuffVec = 2267 getInstr(Hexagon::V6_vshuffvdd, dl, VecTy, 2268 {HiVec, LoVec, DAG.getConstant(-4, dl, MVT::i32)}, DAG); 2269 2270 return ShuffVec; 2271 } 2272 2273 SDValue 2274 HexagonTargetLowering::LowerHvxFpToInt(SDValue Op, SelectionDAG &DAG) const { 2275 // Catch invalid conversion ops (just in case). 2276 assert(Op.getOpcode() == ISD::FP_TO_SINT || 2277 Op.getOpcode() == ISD::FP_TO_UINT); 2278 2279 MVT ResTy = ty(Op); 2280 MVT FpTy = ty(Op.getOperand(0)).getVectorElementType(); 2281 MVT IntTy = ResTy.getVectorElementType(); 2282 2283 if (Subtarget.useHVXIEEEFPOps()) { 2284 // There are only conversions from f16. 2285 if (FpTy == MVT::f16) { 2286 // Other int types aren't legal in HVX, so we shouldn't see them here. 2287 assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32); 2288 // Conversions to i8 and i16 are legal. 2289 if (IntTy == MVT::i8 || IntTy == MVT::i16) 2290 return Op; 2291 } 2292 } 2293 2294 if (IntTy.getSizeInBits() != FpTy.getSizeInBits()) 2295 return EqualizeFpIntConversion(Op, DAG); 2296 2297 return ExpandHvxFpToInt(Op, DAG); 2298 } 2299 2300 SDValue 2301 HexagonTargetLowering::LowerHvxIntToFp(SDValue Op, SelectionDAG &DAG) const { 2302 // Catch invalid conversion ops (just in case). 2303 assert(Op.getOpcode() == ISD::SINT_TO_FP || 2304 Op.getOpcode() == ISD::UINT_TO_FP); 2305 2306 MVT ResTy = ty(Op); 2307 MVT IntTy = ty(Op.getOperand(0)).getVectorElementType(); 2308 MVT FpTy = ResTy.getVectorElementType(); 2309 2310 if (Subtarget.useHVXIEEEFPOps()) { 2311 // There are only conversions to f16. 2312 if (FpTy == MVT::f16) { 2313 // Other int types aren't legal in HVX, so we shouldn't see them here. 2314 assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32); 2315 // i8, i16 -> f16 is legal. 2316 if (IntTy == MVT::i8 || IntTy == MVT::i16) 2317 return Op; 2318 } 2319 } 2320 2321 if (IntTy.getSizeInBits() != FpTy.getSizeInBits()) 2322 return EqualizeFpIntConversion(Op, DAG); 2323 2324 return ExpandHvxIntToFp(Op, DAG); 2325 } 2326 2327 HexagonTargetLowering::TypePair 2328 HexagonTargetLowering::typeExtendToWider(MVT Ty0, MVT Ty1) const { 2329 // Compare the widths of elements of the two types, and extend the narrower 2330 // type to match the with of the wider type. For vector types, apply this 2331 // to the element type. 2332 assert(Ty0.isVector() == Ty1.isVector()); 2333 2334 MVT ElemTy0 = Ty0.getScalarType(); 2335 MVT ElemTy1 = Ty1.getScalarType(); 2336 2337 unsigned Width0 = ElemTy0.getSizeInBits(); 2338 unsigned Width1 = ElemTy1.getSizeInBits(); 2339 unsigned MaxWidth = std::max(Width0, Width1); 2340 2341 auto getScalarWithWidth = [](MVT ScalarTy, unsigned Width) { 2342 if (ScalarTy.isInteger()) 2343 return MVT::getIntegerVT(Width); 2344 assert(ScalarTy.isFloatingPoint()); 2345 return MVT::getFloatingPointVT(Width); 2346 }; 2347 2348 MVT WideETy0 = getScalarWithWidth(ElemTy0, MaxWidth); 2349 MVT WideETy1 = getScalarWithWidth(ElemTy1, MaxWidth); 2350 2351 if (!Ty0.isVector()) { 2352 // Both types are scalars. 2353 return {WideETy0, WideETy1}; 2354 } 2355 2356 // Vector types. 2357 unsigned NumElem = Ty0.getVectorNumElements(); 2358 assert(NumElem == Ty1.getVectorNumElements()); 2359 2360 return {MVT::getVectorVT(WideETy0, NumElem), 2361 MVT::getVectorVT(WideETy1, NumElem)}; 2362 } 2363 2364 HexagonTargetLowering::TypePair 2365 HexagonTargetLowering::typeWidenToWider(MVT Ty0, MVT Ty1) const { 2366 // Compare the numbers of elements of two vector types, and widen the 2367 // narrower one to match the number of elements in the wider one. 2368 assert(Ty0.isVector() && Ty1.isVector()); 2369 2370 unsigned Len0 = Ty0.getVectorNumElements(); 2371 unsigned Len1 = Ty1.getVectorNumElements(); 2372 if (Len0 == Len1) 2373 return {Ty0, Ty1}; 2374 2375 unsigned MaxLen = std::max(Len0, Len1); 2376 return {MVT::getVectorVT(Ty0.getVectorElementType(), MaxLen), 2377 MVT::getVectorVT(Ty1.getVectorElementType(), MaxLen)}; 2378 } 2379 2380 MVT 2381 HexagonTargetLowering::typeLegalize(MVT Ty, SelectionDAG &DAG) const { 2382 EVT LegalTy = getTypeToTransformTo(*DAG.getContext(), Ty); 2383 assert(LegalTy.isSimple()); 2384 return LegalTy.getSimpleVT(); 2385 } 2386 2387 MVT 2388 HexagonTargetLowering::typeWidenToHvx(MVT Ty) const { 2389 unsigned HwWidth = 8 * Subtarget.getVectorLength(); 2390 assert(Ty.getSizeInBits() <= HwWidth); 2391 if (Ty.getSizeInBits() == HwWidth) 2392 return Ty; 2393 2394 MVT ElemTy = Ty.getScalarType(); 2395 return MVT::getVectorVT(ElemTy, HwWidth / ElemTy.getSizeInBits()); 2396 } 2397 2398 HexagonTargetLowering::VectorPair 2399 HexagonTargetLowering::emitHvxAddWithOverflow(SDValue A, SDValue B, 2400 const SDLoc &dl, bool Signed, SelectionDAG &DAG) const { 2401 // Compute A+B, return {A+B, O}, where O = vector predicate indicating 2402 // whether an overflow has occured. 2403 MVT ResTy = ty(A); 2404 assert(ResTy == ty(B)); 2405 MVT PredTy = MVT::getVectorVT(MVT::i1, ResTy.getVectorNumElements()); 2406 2407 if (!Signed) { 2408 // V62+ has V6_vaddcarry, but it requires input predicate, so it doesn't 2409 // save any instructions. 2410 SDValue Add = DAG.getNode(ISD::ADD, dl, ResTy, {A, B}); 2411 SDValue Ovf = DAG.getSetCC(dl, PredTy, Add, A, ISD::SETULT); 2412 return {Add, Ovf}; 2413 } 2414 2415 // Signed overflow has happened, if: 2416 // (A, B have the same sign) and (A+B has a different sign from either) 2417 // i.e. (~A xor B) & ((A+B) xor B), then check the sign bit 2418 SDValue Add = DAG.getNode(ISD::ADD, dl, ResTy, {A, B}); 2419 SDValue NotA = 2420 DAG.getNode(ISD::XOR, dl, ResTy, {A, DAG.getConstant(-1, dl, ResTy)}); 2421 SDValue Xor0 = DAG.getNode(ISD::XOR, dl, ResTy, {NotA, B}); 2422 SDValue Xor1 = DAG.getNode(ISD::XOR, dl, ResTy, {Add, B}); 2423 SDValue And = DAG.getNode(ISD::AND, dl, ResTy, {Xor0, Xor1}); 2424 SDValue MSB = 2425 DAG.getSetCC(dl, PredTy, And, getZero(dl, ResTy, DAG), ISD::SETLT); 2426 return {Add, MSB}; 2427 } 2428 2429 HexagonTargetLowering::VectorPair 2430 HexagonTargetLowering::emitHvxShiftRightRnd(SDValue Val, unsigned Amt, 2431 bool Signed, SelectionDAG &DAG) const { 2432 // Shift Val right by Amt bits, round the result to the nearest integer, 2433 // tie-break by rounding halves to even integer. 2434 2435 const SDLoc &dl(Val); 2436 MVT ValTy = ty(Val); 2437 2438 // This should also work for signed integers. 2439 // 2440 // uint tmp0 = inp + ((1 << (Amt-1)) - 1); 2441 // bool ovf = (inp > tmp0); 2442 // uint rup = inp & (1 << (Amt+1)); 2443 // 2444 // uint tmp1 = inp >> (Amt-1); // tmp1 == tmp2 iff 2445 // uint tmp2 = tmp0 >> (Amt-1); // the Amt-1 lower bits were all 0 2446 // uint tmp3 = tmp2 + rup; 2447 // uint frac = (tmp1 != tmp2) ? tmp2 >> 1 : tmp3 >> 1; 2448 unsigned ElemWidth = ValTy.getVectorElementType().getSizeInBits(); 2449 MVT ElemTy = MVT::getIntegerVT(ElemWidth); 2450 MVT IntTy = tyVector(ValTy, ElemTy); 2451 MVT PredTy = MVT::getVectorVT(MVT::i1, IntTy.getVectorNumElements()); 2452 unsigned ShRight = Signed ? ISD::SRA : ISD::SRL; 2453 2454 SDValue Inp = DAG.getBitcast(IntTy, Val); 2455 SDValue LowBits = DAG.getConstant((1ull << (Amt - 1)) - 1, dl, IntTy); 2456 2457 SDValue AmtP1 = DAG.getConstant(1ull << Amt, dl, IntTy); 2458 SDValue And = DAG.getNode(ISD::AND, dl, IntTy, {Inp, AmtP1}); 2459 SDValue Zero = getZero(dl, IntTy, DAG); 2460 SDValue Bit = DAG.getSetCC(dl, PredTy, And, Zero, ISD::SETNE); 2461 SDValue Rup = DAG.getZExtOrTrunc(Bit, dl, IntTy); 2462 auto [Tmp0, Ovf] = emitHvxAddWithOverflow(Inp, LowBits, dl, Signed, DAG); 2463 2464 SDValue AmtM1 = DAG.getConstant(Amt - 1, dl, IntTy); 2465 SDValue Tmp1 = DAG.getNode(ShRight, dl, IntTy, Inp, AmtM1); 2466 SDValue Tmp2 = DAG.getNode(ShRight, dl, IntTy, Tmp0, AmtM1); 2467 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, IntTy, Tmp2, Rup); 2468 2469 SDValue Eq = DAG.getSetCC(dl, PredTy, Tmp1, Tmp2, ISD::SETEQ); 2470 SDValue One = DAG.getConstant(1, dl, IntTy); 2471 SDValue Tmp4 = DAG.getNode(ShRight, dl, IntTy, {Tmp2, One}); 2472 SDValue Tmp5 = DAG.getNode(ShRight, dl, IntTy, {Tmp3, One}); 2473 SDValue Mux = DAG.getNode(ISD::VSELECT, dl, IntTy, {Eq, Tmp5, Tmp4}); 2474 return {Mux, Ovf}; 2475 } 2476 2477 SDValue 2478 HexagonTargetLowering::emitHvxMulHsV60(SDValue A, SDValue B, const SDLoc &dl, 2479 SelectionDAG &DAG) const { 2480 MVT VecTy = ty(A); 2481 MVT PairTy = typeJoin({VecTy, VecTy}); 2482 assert(VecTy.getVectorElementType() == MVT::i32); 2483 2484 SDValue S16 = DAG.getConstant(16, dl, MVT::i32); 2485 2486 // mulhs(A,B) = 2487 // = [(Hi(A)*2^16 + Lo(A)) *s (Hi(B)*2^16 + Lo(B))] >> 32 2488 // = [Hi(A)*2^16 *s Hi(B)*2^16 + Hi(A) *su Lo(B)*2^16 2489 // + Lo(A) *us (Hi(B)*2^16 + Lo(B))] >> 32 2490 // = [Hi(A) *s Hi(B)*2^32 + Hi(A) *su Lo(B)*2^16 + Lo(A) *us B] >> 32 2491 // The low half of Lo(A)*Lo(B) will be discarded (it's not added to 2492 // anything, so it cannot produce any carry over to higher bits), 2493 // so everything in [] can be shifted by 16 without loss of precision. 2494 // = [Hi(A) *s Hi(B)*2^16 + Hi(A)*su Lo(B) + Lo(A)*B >> 16] >> 16 2495 // = [Hi(A) *s Hi(B)*2^16 + Hi(A)*su Lo(B) + V6_vmpyewuh(A,B)] >> 16 2496 // The final additions need to make sure to properly maintain any carry- 2497 // out bits. 2498 // 2499 // Hi(B) Lo(B) 2500 // Hi(A) Lo(A) 2501 // -------------- 2502 // Lo(B)*Lo(A) | T0 = V6_vmpyewuh(B,A) does this, 2503 // Hi(B)*Lo(A) | + dropping the low 16 bits 2504 // Hi(A)*Lo(B) | T2 2505 // Hi(B)*Hi(A) 2506 2507 SDValue T0 = getInstr(Hexagon::V6_vmpyewuh, dl, VecTy, {B, A}, DAG); 2508 // T1 = get Hi(A) into low halves. 2509 SDValue T1 = getInstr(Hexagon::V6_vasrw, dl, VecTy, {A, S16}, DAG); 2510 // P0 = interleaved T1.h*B.uh (full precision product) 2511 SDValue P0 = getInstr(Hexagon::V6_vmpyhus, dl, PairTy, {T1, B}, DAG); 2512 // T2 = T1.even(h) * B.even(uh), i.e. Hi(A)*Lo(B) 2513 SDValue T2 = LoHalf(P0, DAG); 2514 // We need to add T0+T2, recording the carry-out, which will be 1<<16 2515 // added to the final sum. 2516 // P1 = interleaved even/odd 32-bit (unsigned) sums of 16-bit halves 2517 SDValue P1 = getInstr(Hexagon::V6_vadduhw, dl, PairTy, {T0, T2}, DAG); 2518 // P2 = interleaved even/odd 32-bit (signed) sums of 16-bit halves 2519 SDValue P2 = getInstr(Hexagon::V6_vaddhw, dl, PairTy, {T0, T2}, DAG); 2520 // T3 = full-precision(T0+T2) >> 16 2521 // The low halves are added-unsigned, the high ones are added-signed. 2522 SDValue T3 = getInstr(Hexagon::V6_vasrw_acc, dl, VecTy, 2523 {HiHalf(P2, DAG), LoHalf(P1, DAG), S16}, DAG); 2524 SDValue T4 = getInstr(Hexagon::V6_vasrw, dl, VecTy, {B, S16}, DAG); 2525 // P3 = interleaved Hi(B)*Hi(A) (full precision), 2526 // which is now Lo(T1)*Lo(T4), so we want to keep the even product. 2527 SDValue P3 = getInstr(Hexagon::V6_vmpyhv, dl, PairTy, {T1, T4}, DAG); 2528 SDValue T5 = LoHalf(P3, DAG); 2529 // Add: 2530 SDValue T6 = DAG.getNode(ISD::ADD, dl, VecTy, {T3, T5}); 2531 return T6; 2532 } 2533 2534 SDValue 2535 HexagonTargetLowering::emitHvxMulLoHiV60(SDValue A, bool SignedA, SDValue B, 2536 bool SignedB, const SDLoc &dl, 2537 SelectionDAG &DAG) const { 2538 MVT VecTy = ty(A); 2539 MVT PairTy = typeJoin({VecTy, VecTy}); 2540 assert(VecTy.getVectorElementType() == MVT::i32); 2541 2542 SDValue S16 = DAG.getConstant(16, dl, MVT::i32); 2543 2544 if (SignedA && !SignedB) { 2545 // Make A:unsigned, B:signed. 2546 std::swap(A, B); 2547 std::swap(SignedA, SignedB); 2548 } 2549 2550 // Do halfword-wise multiplications for unsigned*unsigned product, then 2551 // add corrections for signed and unsigned*signed. 2552 2553 SDValue Lo, Hi; 2554 2555 // P0:lo = (uu) products of low halves of A and B, 2556 // P0:hi = (uu) products of high halves. 2557 SDValue P0 = getInstr(Hexagon::V6_vmpyuhv, dl, PairTy, {A, B}, DAG); 2558 2559 // Swap low/high halves in B 2560 SDValue T0 = getInstr(Hexagon::V6_lvsplatw, dl, VecTy, 2561 {DAG.getConstant(0x02020202, dl, MVT::i32)}, DAG); 2562 SDValue T1 = getInstr(Hexagon::V6_vdelta, dl, VecTy, {B, T0}, DAG); 2563 // P1 = products of even/odd halfwords. 2564 // P1:lo = (uu) products of even(A.uh) * odd(B.uh) 2565 // P1:hi = (uu) products of odd(A.uh) * even(B.uh) 2566 SDValue P1 = getInstr(Hexagon::V6_vmpyuhv, dl, PairTy, {A, T1}, DAG); 2567 2568 // P2:lo = low halves of P1:lo + P1:hi, 2569 // P2:hi = high halves of P1:lo + P1:hi. 2570 SDValue P2 = getInstr(Hexagon::V6_vadduhw, dl, PairTy, 2571 {HiHalf(P1, DAG), LoHalf(P1, DAG)}, DAG); 2572 // Still need to add the high halves of P0:lo to P2:lo 2573 SDValue T2 = 2574 getInstr(Hexagon::V6_vlsrw, dl, VecTy, {LoHalf(P0, DAG), S16}, DAG); 2575 SDValue T3 = DAG.getNode(ISD::ADD, dl, VecTy, {LoHalf(P2, DAG), T2}); 2576 2577 // The high halves of T3 will contribute to the HI part of LOHI. 2578 SDValue T4 = getInstr(Hexagon::V6_vasrw_acc, dl, VecTy, 2579 {HiHalf(P2, DAG), T3, S16}, DAG); 2580 2581 // The low halves of P2 need to be added to high halves of the LO part. 2582 Lo = getInstr(Hexagon::V6_vaslw_acc, dl, VecTy, 2583 {LoHalf(P0, DAG), LoHalf(P2, DAG), S16}, DAG); 2584 Hi = DAG.getNode(ISD::ADD, dl, VecTy, {HiHalf(P0, DAG), T4}); 2585 2586 if (SignedA) { 2587 assert(SignedB && "Signed A and unsigned B should have been inverted"); 2588 2589 MVT PredTy = MVT::getVectorVT(MVT::i1, VecTy.getVectorNumElements()); 2590 SDValue Zero = getZero(dl, VecTy, DAG); 2591 SDValue Q0 = DAG.getSetCC(dl, PredTy, A, Zero, ISD::SETLT); 2592 SDValue Q1 = DAG.getSetCC(dl, PredTy, B, Zero, ISD::SETLT); 2593 SDValue X0 = DAG.getNode(ISD::VSELECT, dl, VecTy, {Q0, B, Zero}); 2594 SDValue X1 = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q1, X0, A}, DAG); 2595 Hi = getInstr(Hexagon::V6_vsubw, dl, VecTy, {Hi, X1}, DAG); 2596 } else if (SignedB) { 2597 // Same correction as for mulhus: 2598 // mulhus(A.uw,B.w) = mulhu(A.uw,B.uw) - (A.w if B < 0) 2599 MVT PredTy = MVT::getVectorVT(MVT::i1, VecTy.getVectorNumElements()); 2600 SDValue Zero = getZero(dl, VecTy, DAG); 2601 SDValue Q1 = DAG.getSetCC(dl, PredTy, B, Zero, ISD::SETLT); 2602 Hi = getInstr(Hexagon::V6_vsubwq, dl, VecTy, {Q1, Hi, A}, DAG); 2603 } else { 2604 assert(!SignedA && !SignedB); 2605 } 2606 2607 return DAG.getMergeValues({Lo, Hi}, dl); 2608 } 2609 2610 SDValue 2611 HexagonTargetLowering::emitHvxMulLoHiV62(SDValue A, bool SignedA, 2612 SDValue B, bool SignedB, 2613 const SDLoc &dl, 2614 SelectionDAG &DAG) const { 2615 MVT VecTy = ty(A); 2616 MVT PairTy = typeJoin({VecTy, VecTy}); 2617 assert(VecTy.getVectorElementType() == MVT::i32); 2618 2619 if (SignedA && !SignedB) { 2620 // Make A:unsigned, B:signed. 2621 std::swap(A, B); 2622 std::swap(SignedA, SignedB); 2623 } 2624 2625 // Do S*S first, then make corrections for U*S or U*U if needed. 2626 SDValue P0 = getInstr(Hexagon::V6_vmpyewuh_64, dl, PairTy, {A, B}, DAG); 2627 SDValue P1 = 2628 getInstr(Hexagon::V6_vmpyowh_64_acc, dl, PairTy, {P0, A, B}, DAG); 2629 SDValue Lo = LoHalf(P1, DAG); 2630 SDValue Hi = HiHalf(P1, DAG); 2631 2632 if (!SignedB) { 2633 assert(!SignedA && "Signed A and unsigned B should have been inverted"); 2634 SDValue Zero = getZero(dl, VecTy, DAG); 2635 MVT PredTy = MVT::getVectorVT(MVT::i1, VecTy.getVectorNumElements()); 2636 2637 // Mulhu(X, Y) = Mulhs(X, Y) + (X, if Y < 0) + (Y, if X < 0). 2638 // def: Pat<(VecI32 (mulhu HVI32:$A, HVI32:$B)), 2639 // (V6_vaddw (HiHalf (Muls64O $A, $B)), 2640 // (V6_vaddwq (V6_vgtw (V6_vd0), $B), 2641 // (V6_vandvqv (V6_vgtw (V6_vd0), $A), $B), 2642 // $A))>; 2643 SDValue Q0 = DAG.getSetCC(dl, PredTy, A, Zero, ISD::SETLT); 2644 SDValue Q1 = DAG.getSetCC(dl, PredTy, B, Zero, ISD::SETLT); 2645 SDValue T0 = getInstr(Hexagon::V6_vandvqv, dl, VecTy, {Q0, B}, DAG); 2646 SDValue T1 = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q1, T0, A}, DAG); 2647 Hi = getInstr(Hexagon::V6_vaddw, dl, VecTy, {Hi, T1}, DAG); 2648 } else if (!SignedA) { 2649 SDValue Zero = getZero(dl, VecTy, DAG); 2650 MVT PredTy = MVT::getVectorVT(MVT::i1, VecTy.getVectorNumElements()); 2651 2652 // Mulhus(unsigned X, signed Y) = Mulhs(X, Y) + (Y, if X < 0). 2653 // def: Pat<(VecI32 (HexagonMULHUS HVI32:$A, HVI32:$B)), 2654 // (V6_vaddwq (V6_vgtw (V6_vd0), $A), 2655 // (HiHalf (Muls64O $A, $B)), 2656 // $B)>; 2657 SDValue Q0 = DAG.getSetCC(dl, PredTy, A, Zero, ISD::SETLT); 2658 Hi = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q0, Hi, B}, DAG); 2659 } 2660 2661 return DAG.getMergeValues({Lo, Hi}, dl); 2662 } 2663 2664 SDValue 2665 HexagonTargetLowering::EqualizeFpIntConversion(SDValue Op, SelectionDAG &DAG) 2666 const { 2667 // Rewrite conversion between integer and floating-point in such a way that 2668 // the integer type is extended/narrowed to match the bitwidth of the 2669 // floating-point type, combined with additional integer-integer extensions 2670 // or narrowings to match the original input/result types. 2671 // E.g. f32 -> i8 ==> f32 -> i32 -> i8 2672 // 2673 // The input/result types are not required to be legal, but if they are 2674 // legal, this function should not introduce illegal types. 2675 2676 unsigned Opc = Op.getOpcode(); 2677 assert(Opc == ISD::FP_TO_SINT || Opc == ISD::FP_TO_UINT || 2678 Opc == ISD::SINT_TO_FP || Opc == ISD::UINT_TO_FP); 2679 2680 SDValue Inp = Op.getOperand(0); 2681 MVT InpTy = ty(Inp); 2682 MVT ResTy = ty(Op); 2683 2684 if (InpTy == ResTy) 2685 return Op; 2686 2687 const SDLoc &dl(Op); 2688 bool Signed = Opc == ISD::FP_TO_SINT || Opc == ISD::SINT_TO_FP; 2689 2690 auto [WInpTy, WResTy] = typeExtendToWider(InpTy, ResTy); 2691 SDValue WInp = resizeToWidth(Inp, WInpTy, Signed, dl, DAG); 2692 SDValue Conv = DAG.getNode(Opc, dl, WResTy, WInp); 2693 SDValue Res = resizeToWidth(Conv, ResTy, Signed, dl, DAG); 2694 return Res; 2695 } 2696 2697 SDValue 2698 HexagonTargetLowering::ExpandHvxFpToInt(SDValue Op, SelectionDAG &DAG) const { 2699 unsigned Opc = Op.getOpcode(); 2700 assert(Opc == ISD::FP_TO_SINT || Opc == ISD::FP_TO_UINT); 2701 2702 const SDLoc &dl(Op); 2703 SDValue Op0 = Op.getOperand(0); 2704 MVT InpTy = ty(Op0); 2705 MVT ResTy = ty(Op); 2706 assert(InpTy.changeTypeToInteger() == ResTy); 2707 2708 // int32_t conv_f32_to_i32(uint32_t inp) { 2709 // // s | exp8 | frac23 2710 // 2711 // int neg = (int32_t)inp < 0; 2712 // 2713 // // "expm1" is the actual exponent minus 1: instead of "bias", subtract 2714 // // "bias+1". When the encoded exp is "all-1" (i.e. inf/nan), this will 2715 // // produce a large positive "expm1", which will result in max u/int. 2716 // // In all IEEE formats, bias is the largest positive number that can be 2717 // // represented in bias-width bits (i.e. 011..1). 2718 // int32_t expm1 = (inp << 1) - 0x80000000; 2719 // expm1 >>= 24; 2720 // 2721 // // Always insert the "implicit 1". Subnormal numbers will become 0 2722 // // regardless. 2723 // uint32_t frac = (inp << 8) | 0x80000000; 2724 // 2725 // // "frac" is the fraction part represented as Q1.31. If it was 2726 // // interpreted as uint32_t, it would be the fraction part multiplied 2727 // // by 2^31. 2728 // 2729 // // Calculate the amount of right shift, since shifting further to the 2730 // // left would lose significant bits. Limit it to 32, because we want 2731 // // shifts by 32+ to produce 0, whereas V6_vlsrwv treats the shift 2732 // // amount as a 6-bit signed value (so 33 is same as -31, i.e. shift 2733 // // left by 31). "rsh" can be negative. 2734 // int32_t rsh = min(31 - (expm1 + 1), 32); 2735 // 2736 // frac >>= rsh; // rsh == 32 will produce 0 2737 // 2738 // // Everything up to this point is the same for conversion to signed 2739 // // unsigned integer. 2740 // 2741 // if (neg) // Only for signed int 2742 // frac = -frac; // 2743 // if (rsh <= 0 && neg) // bound = neg ? 0x80000000 : 0x7fffffff 2744 // frac = 0x80000000; // frac = rsh <= 0 ? bound : frac 2745 // if (rsh <= 0 && !neg) // 2746 // frac = 0x7fffffff; // 2747 // 2748 // if (neg) // Only for unsigned int 2749 // frac = 0; // 2750 // if (rsh < 0 && !neg) // frac = rsh < 0 ? 0x7fffffff : frac; 2751 // frac = 0x7fffffff; // frac = neg ? 0 : frac; 2752 // 2753 // return frac; 2754 // } 2755 2756 MVT PredTy = MVT::getVectorVT(MVT::i1, ResTy.getVectorElementCount()); 2757 2758 // Zero = V6_vd0(); 2759 // Neg = V6_vgtw(Zero, Inp); 2760 // One = V6_lvsplatw(1); 2761 // M80 = V6_lvsplatw(0x80000000); 2762 // Exp00 = V6_vaslwv(Inp, One); 2763 // Exp01 = V6_vsubw(Exp00, M80); 2764 // ExpM1 = V6_vasrw(Exp01, 24); 2765 // Frc00 = V6_vaslw(Inp, 8); 2766 // Frc01 = V6_vor(Frc00, M80); 2767 // Rsh00 = V6_vsubw(V6_lvsplatw(30), ExpM1); 2768 // Rsh01 = V6_vminw(Rsh00, V6_lvsplatw(32)); 2769 // Frc02 = V6_vlsrwv(Frc01, Rsh01); 2770 2771 // if signed int: 2772 // Bnd = V6_vmux(Neg, M80, V6_lvsplatw(0x7fffffff)) 2773 // Pos = V6_vgtw(Rsh01, Zero); 2774 // Frc13 = V6_vsubw(Zero, Frc02); 2775 // Frc14 = V6_vmux(Neg, Frc13, Frc02); 2776 // Int = V6_vmux(Pos, Frc14, Bnd); 2777 // 2778 // if unsigned int: 2779 // Rsn = V6_vgtw(Zero, Rsh01) 2780 // Frc23 = V6_vmux(Rsn, V6_lvsplatw(0x7fffffff), Frc02) 2781 // Int = V6_vmux(Neg, Zero, Frc23) 2782 2783 auto [ExpWidth, ExpBias, FracWidth] = getIEEEProperties(InpTy); 2784 unsigned ElemWidth = 1 + ExpWidth + FracWidth; 2785 assert((1ull << (ExpWidth - 1)) == (1 + ExpBias)); 2786 2787 SDValue Inp = DAG.getBitcast(ResTy, Op0); 2788 SDValue Zero = getZero(dl, ResTy, DAG); 2789 SDValue Neg = DAG.getSetCC(dl, PredTy, Inp, Zero, ISD::SETLT); 2790 SDValue M80 = DAG.getConstant(1ull << (ElemWidth - 1), dl, ResTy); 2791 SDValue M7F = DAG.getConstant((1ull << (ElemWidth - 1)) - 1, dl, ResTy); 2792 SDValue One = DAG.getConstant(1, dl, ResTy); 2793 SDValue Exp00 = DAG.getNode(ISD::SHL, dl, ResTy, {Inp, One}); 2794 SDValue Exp01 = DAG.getNode(ISD::SUB, dl, ResTy, {Exp00, M80}); 2795 SDValue MNE = DAG.getConstant(ElemWidth - ExpWidth, dl, ResTy); 2796 SDValue ExpM1 = DAG.getNode(ISD::SRA, dl, ResTy, {Exp01, MNE}); 2797 2798 SDValue ExpW = DAG.getConstant(ExpWidth, dl, ResTy); 2799 SDValue Frc00 = DAG.getNode(ISD::SHL, dl, ResTy, {Inp, ExpW}); 2800 SDValue Frc01 = DAG.getNode(ISD::OR, dl, ResTy, {Frc00, M80}); 2801 2802 SDValue MN2 = DAG.getConstant(ElemWidth - 2, dl, ResTy); 2803 SDValue Rsh00 = DAG.getNode(ISD::SUB, dl, ResTy, {MN2, ExpM1}); 2804 SDValue MW = DAG.getConstant(ElemWidth, dl, ResTy); 2805 SDValue Rsh01 = DAG.getNode(ISD::SMIN, dl, ResTy, {Rsh00, MW}); 2806 SDValue Frc02 = DAG.getNode(ISD::SRL, dl, ResTy, {Frc01, Rsh01}); 2807 2808 SDValue Int; 2809 2810 if (Opc == ISD::FP_TO_SINT) { 2811 SDValue Bnd = DAG.getNode(ISD::VSELECT, dl, ResTy, {Neg, M80, M7F}); 2812 SDValue Pos = DAG.getSetCC(dl, PredTy, Rsh01, Zero, ISD::SETGT); 2813 SDValue Frc13 = DAG.getNode(ISD::SUB, dl, ResTy, {Zero, Frc02}); 2814 SDValue Frc14 = DAG.getNode(ISD::VSELECT, dl, ResTy, {Neg, Frc13, Frc02}); 2815 Int = DAG.getNode(ISD::VSELECT, dl, ResTy, {Pos, Frc14, Bnd}); 2816 } else { 2817 assert(Opc == ISD::FP_TO_UINT); 2818 SDValue Rsn = DAG.getSetCC(dl, PredTy, Rsh01, Zero, ISD::SETLT); 2819 SDValue Frc23 = DAG.getNode(ISD::VSELECT, dl, ResTy, Rsn, M7F, Frc02); 2820 Int = DAG.getNode(ISD::VSELECT, dl, ResTy, Neg, Zero, Frc23); 2821 } 2822 2823 return Int; 2824 } 2825 2826 SDValue 2827 HexagonTargetLowering::ExpandHvxIntToFp(SDValue Op, SelectionDAG &DAG) const { 2828 unsigned Opc = Op.getOpcode(); 2829 assert(Opc == ISD::SINT_TO_FP || Opc == ISD::UINT_TO_FP); 2830 2831 const SDLoc &dl(Op); 2832 SDValue Op0 = Op.getOperand(0); 2833 MVT InpTy = ty(Op0); 2834 MVT ResTy = ty(Op); 2835 assert(ResTy.changeTypeToInteger() == InpTy); 2836 2837 // uint32_t vnoc1_rnd(int32_t w) { 2838 // int32_t iszero = w == 0; 2839 // int32_t isneg = w < 0; 2840 // uint32_t u = __builtin_HEXAGON_A2_abs(w); 2841 // 2842 // uint32_t norm_left = __builtin_HEXAGON_S2_cl0(u) + 1; 2843 // uint32_t frac0 = (uint64_t)u << norm_left; 2844 // 2845 // // Rounding: 2846 // uint32_t frac1 = frac0 + ((1 << 8) - 1); 2847 // uint32_t renorm = (frac0 > frac1); 2848 // uint32_t rup = (int)(frac0 << 22) < 0; 2849 // 2850 // uint32_t frac2 = frac0 >> 8; 2851 // uint32_t frac3 = frac1 >> 8; 2852 // uint32_t frac = (frac2 != frac3) ? frac3 >> 1 : (frac3 + rup) >> 1; 2853 // 2854 // int32_t exp = 32 - norm_left + renorm + 127; 2855 // exp <<= 23; 2856 // 2857 // uint32_t sign = 0x80000000 * isneg; 2858 // uint32_t f = sign | exp | frac; 2859 // return iszero ? 0 : f; 2860 // } 2861 2862 MVT PredTy = MVT::getVectorVT(MVT::i1, InpTy.getVectorElementCount()); 2863 bool Signed = Opc == ISD::SINT_TO_FP; 2864 2865 auto [ExpWidth, ExpBias, FracWidth] = getIEEEProperties(ResTy); 2866 unsigned ElemWidth = 1 + ExpWidth + FracWidth; 2867 2868 SDValue Zero = getZero(dl, InpTy, DAG); 2869 SDValue One = DAG.getConstant(1, dl, InpTy); 2870 SDValue IsZero = DAG.getSetCC(dl, PredTy, Op0, Zero, ISD::SETEQ); 2871 SDValue Abs = Signed ? DAG.getNode(ISD::ABS, dl, InpTy, Op0) : Op0; 2872 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, InpTy, Abs); 2873 SDValue NLeft = DAG.getNode(ISD::ADD, dl, InpTy, {Clz, One}); 2874 SDValue Frac0 = DAG.getNode(ISD::SHL, dl, InpTy, {Abs, NLeft}); 2875 2876 auto [Frac, Ovf] = emitHvxShiftRightRnd(Frac0, ExpWidth + 1, false, DAG); 2877 if (Signed) { 2878 SDValue IsNeg = DAG.getSetCC(dl, PredTy, Op0, Zero, ISD::SETLT); 2879 SDValue M80 = DAG.getConstant(1ull << (ElemWidth - 1), dl, InpTy); 2880 SDValue Sign = DAG.getNode(ISD::VSELECT, dl, InpTy, {IsNeg, M80, Zero}); 2881 Frac = DAG.getNode(ISD::OR, dl, InpTy, {Sign, Frac}); 2882 } 2883 2884 SDValue Rnrm = DAG.getZExtOrTrunc(Ovf, dl, InpTy); 2885 SDValue Exp0 = DAG.getConstant(ElemWidth + ExpBias, dl, InpTy); 2886 SDValue Exp1 = DAG.getNode(ISD::ADD, dl, InpTy, {Rnrm, Exp0}); 2887 SDValue Exp2 = DAG.getNode(ISD::SUB, dl, InpTy, {Exp1, NLeft}); 2888 SDValue Exp3 = DAG.getNode(ISD::SHL, dl, InpTy, 2889 {Exp2, DAG.getConstant(FracWidth, dl, InpTy)}); 2890 SDValue Flt0 = DAG.getNode(ISD::OR, dl, InpTy, {Frac, Exp3}); 2891 SDValue Flt1 = DAG.getNode(ISD::VSELECT, dl, InpTy, {IsZero, Zero, Flt0}); 2892 SDValue Flt = DAG.getBitcast(ResTy, Flt1); 2893 2894 return Flt; 2895 } 2896 2897 SDValue 2898 HexagonTargetLowering::CreateTLWrapper(SDValue Op, SelectionDAG &DAG) const { 2899 unsigned Opc = Op.getOpcode(); 2900 unsigned TLOpc; 2901 switch (Opc) { 2902 case ISD::ANY_EXTEND: 2903 case ISD::SIGN_EXTEND: 2904 case ISD::ZERO_EXTEND: 2905 TLOpc = HexagonISD::TL_EXTEND; 2906 break; 2907 case ISD::TRUNCATE: 2908 TLOpc = HexagonISD::TL_TRUNCATE; 2909 break; 2910 #ifndef NDEBUG 2911 Op.dump(&DAG); 2912 #endif 2913 llvm_unreachable("Unepected operator"); 2914 } 2915 2916 const SDLoc &dl(Op); 2917 return DAG.getNode(TLOpc, dl, ty(Op), Op.getOperand(0), 2918 DAG.getUNDEF(MVT::i128), // illegal type 2919 DAG.getConstant(Opc, dl, MVT::i32)); 2920 } 2921 2922 SDValue 2923 HexagonTargetLowering::RemoveTLWrapper(SDValue Op, SelectionDAG &DAG) const { 2924 assert(Op.getOpcode() == HexagonISD::TL_EXTEND || 2925 Op.getOpcode() == HexagonISD::TL_TRUNCATE); 2926 unsigned Opc = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2927 return DAG.getNode(Opc, SDLoc(Op), ty(Op), Op.getOperand(0)); 2928 } 2929 2930 HexagonTargetLowering::VectorPair 2931 HexagonTargetLowering::SplitVectorOp(SDValue Op, SelectionDAG &DAG) const { 2932 assert(!Op.isMachineOpcode()); 2933 SmallVector<SDValue, 2> OpsL, OpsH; 2934 const SDLoc &dl(Op); 2935 2936 auto SplitVTNode = [&DAG, this](const VTSDNode *N) { 2937 MVT Ty = typeSplit(N->getVT().getSimpleVT()).first; 2938 SDValue TV = DAG.getValueType(Ty); 2939 return std::make_pair(TV, TV); 2940 }; 2941 2942 for (SDValue A : Op.getNode()->ops()) { 2943 auto [Lo, Hi] = 2944 ty(A).isVector() ? opSplit(A, dl, DAG) : std::make_pair(A, A); 2945 // Special case for type operand. 2946 switch (Op.getOpcode()) { 2947 case ISD::SIGN_EXTEND_INREG: 2948 case HexagonISD::SSAT: 2949 case HexagonISD::USAT: 2950 if (const auto *N = dyn_cast<const VTSDNode>(A.getNode())) 2951 std::tie(Lo, Hi) = SplitVTNode(N); 2952 break; 2953 } 2954 OpsL.push_back(Lo); 2955 OpsH.push_back(Hi); 2956 } 2957 2958 MVT ResTy = ty(Op); 2959 MVT HalfTy = typeSplit(ResTy).first; 2960 SDValue L = DAG.getNode(Op.getOpcode(), dl, HalfTy, OpsL); 2961 SDValue H = DAG.getNode(Op.getOpcode(), dl, HalfTy, OpsH); 2962 return {L, H}; 2963 } 2964 2965 SDValue 2966 HexagonTargetLowering::SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const { 2967 auto *MemN = cast<MemSDNode>(Op.getNode()); 2968 2969 MVT MemTy = MemN->getMemoryVT().getSimpleVT(); 2970 if (!isHvxPairTy(MemTy)) 2971 return Op; 2972 2973 const SDLoc &dl(Op); 2974 unsigned HwLen = Subtarget.getVectorLength(); 2975 MVT SingleTy = typeSplit(MemTy).first; 2976 SDValue Chain = MemN->getChain(); 2977 SDValue Base0 = MemN->getBasePtr(); 2978 SDValue Base1 = DAG.getMemBasePlusOffset(Base0, TypeSize::Fixed(HwLen), dl); 2979 unsigned MemOpc = MemN->getOpcode(); 2980 2981 MachineMemOperand *MOp0 = nullptr, *MOp1 = nullptr; 2982 if (MachineMemOperand *MMO = MemN->getMemOperand()) { 2983 MachineFunction &MF = DAG.getMachineFunction(); 2984 uint64_t MemSize = (MemOpc == ISD::MLOAD || MemOpc == ISD::MSTORE) 2985 ? (uint64_t)MemoryLocation::UnknownSize 2986 : HwLen; 2987 MOp0 = MF.getMachineMemOperand(MMO, 0, MemSize); 2988 MOp1 = MF.getMachineMemOperand(MMO, HwLen, MemSize); 2989 } 2990 2991 if (MemOpc == ISD::LOAD) { 2992 assert(cast<LoadSDNode>(Op)->isUnindexed()); 2993 SDValue Load0 = DAG.getLoad(SingleTy, dl, Chain, Base0, MOp0); 2994 SDValue Load1 = DAG.getLoad(SingleTy, dl, Chain, Base1, MOp1); 2995 return DAG.getMergeValues( 2996 { DAG.getNode(ISD::CONCAT_VECTORS, dl, MemTy, Load0, Load1), 2997 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2998 Load0.getValue(1), Load1.getValue(1)) }, dl); 2999 } 3000 if (MemOpc == ISD::STORE) { 3001 assert(cast<StoreSDNode>(Op)->isUnindexed()); 3002 VectorPair Vals = opSplit(cast<StoreSDNode>(Op)->getValue(), dl, DAG); 3003 SDValue Store0 = DAG.getStore(Chain, dl, Vals.first, Base0, MOp0); 3004 SDValue Store1 = DAG.getStore(Chain, dl, Vals.second, Base1, MOp1); 3005 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store0, Store1); 3006 } 3007 3008 assert(MemOpc == ISD::MLOAD || MemOpc == ISD::MSTORE); 3009 3010 auto MaskN = cast<MaskedLoadStoreSDNode>(Op); 3011 assert(MaskN->isUnindexed()); 3012 VectorPair Masks = opSplit(MaskN->getMask(), dl, DAG); 3013 SDValue Offset = DAG.getUNDEF(MVT::i32); 3014 3015 if (MemOpc == ISD::MLOAD) { 3016 VectorPair Thru = 3017 opSplit(cast<MaskedLoadSDNode>(Op)->getPassThru(), dl, DAG); 3018 SDValue MLoad0 = 3019 DAG.getMaskedLoad(SingleTy, dl, Chain, Base0, Offset, Masks.first, 3020 Thru.first, SingleTy, MOp0, ISD::UNINDEXED, 3021 ISD::NON_EXTLOAD, false); 3022 SDValue MLoad1 = 3023 DAG.getMaskedLoad(SingleTy, dl, Chain, Base1, Offset, Masks.second, 3024 Thru.second, SingleTy, MOp1, ISD::UNINDEXED, 3025 ISD::NON_EXTLOAD, false); 3026 return DAG.getMergeValues( 3027 { DAG.getNode(ISD::CONCAT_VECTORS, dl, MemTy, MLoad0, MLoad1), 3028 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3029 MLoad0.getValue(1), MLoad1.getValue(1)) }, dl); 3030 } 3031 if (MemOpc == ISD::MSTORE) { 3032 VectorPair Vals = opSplit(cast<MaskedStoreSDNode>(Op)->getValue(), dl, DAG); 3033 SDValue MStore0 = DAG.getMaskedStore(Chain, dl, Vals.first, Base0, Offset, 3034 Masks.first, SingleTy, MOp0, 3035 ISD::UNINDEXED, false, false); 3036 SDValue MStore1 = DAG.getMaskedStore(Chain, dl, Vals.second, Base1, Offset, 3037 Masks.second, SingleTy, MOp1, 3038 ISD::UNINDEXED, false, false); 3039 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MStore0, MStore1); 3040 } 3041 3042 std::string Name = "Unexpected operation: " + Op->getOperationName(&DAG); 3043 llvm_unreachable(Name.c_str()); 3044 } 3045 3046 SDValue 3047 HexagonTargetLowering::WidenHvxLoad(SDValue Op, SelectionDAG &DAG) const { 3048 const SDLoc &dl(Op); 3049 auto *LoadN = cast<LoadSDNode>(Op.getNode()); 3050 assert(LoadN->isUnindexed() && "Not widening indexed loads yet"); 3051 assert(LoadN->getMemoryVT().getVectorElementType() != MVT::i1 && 3052 "Not widening loads of i1 yet"); 3053 3054 SDValue Chain = LoadN->getChain(); 3055 SDValue Base = LoadN->getBasePtr(); 3056 SDValue Offset = DAG.getUNDEF(MVT::i32); 3057 3058 MVT ResTy = ty(Op); 3059 unsigned HwLen = Subtarget.getVectorLength(); 3060 unsigned ResLen = ResTy.getStoreSize(); 3061 assert(ResLen < HwLen && "vsetq(v1) prerequisite"); 3062 3063 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 3064 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 3065 {DAG.getConstant(ResLen, dl, MVT::i32)}, DAG); 3066 3067 MVT LoadTy = MVT::getVectorVT(MVT::i8, HwLen); 3068 MachineFunction &MF = DAG.getMachineFunction(); 3069 auto *MemOp = MF.getMachineMemOperand(LoadN->getMemOperand(), 0, HwLen); 3070 3071 SDValue Load = DAG.getMaskedLoad(LoadTy, dl, Chain, Base, Offset, Mask, 3072 DAG.getUNDEF(LoadTy), LoadTy, MemOp, 3073 ISD::UNINDEXED, ISD::NON_EXTLOAD, false); 3074 SDValue Value = opCastElem(Load, ResTy.getVectorElementType(), DAG); 3075 return DAG.getMergeValues({Value, Load.getValue(1)}, dl); 3076 } 3077 3078 SDValue 3079 HexagonTargetLowering::WidenHvxStore(SDValue Op, SelectionDAG &DAG) const { 3080 const SDLoc &dl(Op); 3081 auto *StoreN = cast<StoreSDNode>(Op.getNode()); 3082 assert(StoreN->isUnindexed() && "Not widening indexed stores yet"); 3083 assert(StoreN->getMemoryVT().getVectorElementType() != MVT::i1 && 3084 "Not widening stores of i1 yet"); 3085 3086 SDValue Chain = StoreN->getChain(); 3087 SDValue Base = StoreN->getBasePtr(); 3088 SDValue Offset = DAG.getUNDEF(MVT::i32); 3089 3090 SDValue Value = opCastElem(StoreN->getValue(), MVT::i8, DAG); 3091 MVT ValueTy = ty(Value); 3092 unsigned ValueLen = ValueTy.getVectorNumElements(); 3093 unsigned HwLen = Subtarget.getVectorLength(); 3094 assert(isPowerOf2_32(ValueLen)); 3095 3096 for (unsigned Len = ValueLen; Len < HwLen; ) { 3097 Value = opJoin({Value, DAG.getUNDEF(ty(Value))}, dl, DAG); 3098 Len = ty(Value).getVectorNumElements(); // This is Len *= 2 3099 } 3100 assert(ty(Value).getVectorNumElements() == HwLen); // Paranoia 3101 3102 assert(ValueLen < HwLen && "vsetq(v1) prerequisite"); 3103 MVT BoolTy = MVT::getVectorVT(MVT::i1, HwLen); 3104 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy, 3105 {DAG.getConstant(ValueLen, dl, MVT::i32)}, DAG); 3106 MachineFunction &MF = DAG.getMachineFunction(); 3107 auto *MemOp = MF.getMachineMemOperand(StoreN->getMemOperand(), 0, HwLen); 3108 return DAG.getMaskedStore(Chain, dl, Value, Base, Offset, Mask, ty(Value), 3109 MemOp, ISD::UNINDEXED, false, false); 3110 } 3111 3112 SDValue 3113 HexagonTargetLowering::WidenHvxSetCC(SDValue Op, SelectionDAG &DAG) const { 3114 const SDLoc &dl(Op); 3115 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 3116 MVT ElemTy = ty(Op0).getVectorElementType(); 3117 unsigned HwLen = Subtarget.getVectorLength(); 3118 3119 unsigned WideOpLen = (8 * HwLen) / ElemTy.getSizeInBits(); 3120 assert(WideOpLen * ElemTy.getSizeInBits() == 8 * HwLen); 3121 MVT WideOpTy = MVT::getVectorVT(ElemTy, WideOpLen); 3122 if (!Subtarget.isHVXVectorType(WideOpTy, true)) 3123 return SDValue(); 3124 3125 SDValue WideOp0 = appendUndef(Op0, WideOpTy, DAG); 3126 SDValue WideOp1 = appendUndef(Op1, WideOpTy, DAG); 3127 EVT ResTy = 3128 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), WideOpTy); 3129 SDValue SetCC = DAG.getNode(ISD::SETCC, dl, ResTy, 3130 {WideOp0, WideOp1, Op.getOperand(2)}); 3131 3132 EVT RetTy = typeLegalize(ty(Op), DAG); 3133 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RetTy, 3134 {SetCC, getZero(dl, MVT::i32, DAG)}); 3135 } 3136 3137 SDValue 3138 HexagonTargetLowering::LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const { 3139 unsigned Opc = Op.getOpcode(); 3140 bool IsPairOp = isHvxPairTy(ty(Op)) || 3141 llvm::any_of(Op.getNode()->ops(), [this] (SDValue V) { 3142 return isHvxPairTy(ty(V)); 3143 }); 3144 3145 if (IsPairOp) { 3146 switch (Opc) { 3147 default: 3148 break; 3149 case ISD::LOAD: 3150 case ISD::STORE: 3151 case ISD::MLOAD: 3152 case ISD::MSTORE: 3153 return SplitHvxMemOp(Op, DAG); 3154 case ISD::SINT_TO_FP: 3155 case ISD::UINT_TO_FP: 3156 case ISD::FP_TO_SINT: 3157 case ISD::FP_TO_UINT: 3158 if (ty(Op).getSizeInBits() == ty(Op.getOperand(0)).getSizeInBits()) 3159 return opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG); 3160 break; 3161 case ISD::ABS: 3162 case ISD::CTPOP: 3163 case ISD::CTLZ: 3164 case ISD::CTTZ: 3165 case ISD::MUL: 3166 case ISD::FADD: 3167 case ISD::FSUB: 3168 case ISD::FMUL: 3169 case ISD::FMINNUM: 3170 case ISD::FMAXNUM: 3171 case ISD::MULHS: 3172 case ISD::MULHU: 3173 case ISD::AND: 3174 case ISD::OR: 3175 case ISD::XOR: 3176 case ISD::SRA: 3177 case ISD::SHL: 3178 case ISD::SRL: 3179 case ISD::FSHL: 3180 case ISD::FSHR: 3181 case ISD::SMIN: 3182 case ISD::SMAX: 3183 case ISD::UMIN: 3184 case ISD::UMAX: 3185 case ISD::SETCC: 3186 case ISD::VSELECT: 3187 case ISD::SIGN_EXTEND_INREG: 3188 case ISD::SPLAT_VECTOR: 3189 return opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG); 3190 case ISD::SIGN_EXTEND: 3191 case ISD::ZERO_EXTEND: 3192 // In general, sign- and zero-extends can't be split and still 3193 // be legal. The only exception is extending bool vectors. 3194 if (ty(Op.getOperand(0)).getVectorElementType() == MVT::i1) 3195 return opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG); 3196 break; 3197 } 3198 } 3199 3200 switch (Opc) { 3201 default: 3202 break; 3203 case ISD::BUILD_VECTOR: return LowerHvxBuildVector(Op, DAG); 3204 case ISD::SPLAT_VECTOR: return LowerHvxSplatVector(Op, DAG); 3205 case ISD::CONCAT_VECTORS: return LowerHvxConcatVectors(Op, DAG); 3206 case ISD::INSERT_SUBVECTOR: return LowerHvxInsertSubvector(Op, DAG); 3207 case ISD::INSERT_VECTOR_ELT: return LowerHvxInsertElement(Op, DAG); 3208 case ISD::EXTRACT_SUBVECTOR: return LowerHvxExtractSubvector(Op, DAG); 3209 case ISD::EXTRACT_VECTOR_ELT: return LowerHvxExtractElement(Op, DAG); 3210 case ISD::BITCAST: return LowerHvxBitcast(Op, DAG); 3211 case ISD::ANY_EXTEND: return LowerHvxAnyExt(Op, DAG); 3212 case ISD::SIGN_EXTEND: return LowerHvxSignExt(Op, DAG); 3213 case ISD::ZERO_EXTEND: return LowerHvxZeroExt(Op, DAG); 3214 case ISD::CTTZ: return LowerHvxCttz(Op, DAG); 3215 case ISD::SELECT: return LowerHvxSelect(Op, DAG); 3216 case ISD::SRA: 3217 case ISD::SHL: 3218 case ISD::SRL: return LowerHvxShift(Op, DAG); 3219 case ISD::FSHL: 3220 case ISD::FSHR: return LowerHvxFunnelShift(Op, DAG); 3221 case ISD::MULHS: 3222 case ISD::MULHU: return LowerHvxMulh(Op, DAG); 3223 case ISD::SMUL_LOHI: 3224 case ISD::UMUL_LOHI: return LowerHvxMulLoHi(Op, DAG); 3225 case ISD::ANY_EXTEND_VECTOR_INREG: return LowerHvxExtend(Op, DAG); 3226 case ISD::SETCC: 3227 case ISD::INTRINSIC_VOID: return Op; 3228 case ISD::INTRINSIC_WO_CHAIN: return LowerHvxIntrinsic(Op, DAG); 3229 case ISD::MLOAD: 3230 case ISD::MSTORE: return LowerHvxMaskedOp(Op, DAG); 3231 // Unaligned loads will be handled by the default lowering. 3232 case ISD::LOAD: return SDValue(); 3233 case ISD::FP_EXTEND: return LowerHvxFpExtend(Op, DAG); 3234 case ISD::FP_TO_SINT: 3235 case ISD::FP_TO_UINT: return LowerHvxFpToInt(Op, DAG); 3236 case ISD::SINT_TO_FP: 3237 case ISD::UINT_TO_FP: return LowerHvxIntToFp(Op, DAG); 3238 3239 // Special nodes: 3240 case HexagonISD::SMUL_LOHI: 3241 case HexagonISD::UMUL_LOHI: 3242 case HexagonISD::USMUL_LOHI: return LowerHvxMulLoHi(Op, DAG); 3243 } 3244 #ifndef NDEBUG 3245 Op.dumpr(&DAG); 3246 #endif 3247 llvm_unreachable("Unhandled HVX operation"); 3248 } 3249 3250 SDValue 3251 HexagonTargetLowering::ExpandHvxResizeIntoSteps(SDValue Op, SelectionDAG &DAG) 3252 const { 3253 // Rewrite the extension/truncation/saturation op into steps where each 3254 // step changes the type widths by a factor of 2. 3255 // E.g. i8 -> i16 remains unchanged, but i8 -> i32 ==> i8 -> i16 -> i32. 3256 // 3257 // Some of the vector types in Op may not be legal. 3258 3259 unsigned Opc = Op.getOpcode(); 3260 switch (Opc) { 3261 case HexagonISD::SSAT: 3262 case HexagonISD::USAT: 3263 case HexagonISD::TL_EXTEND: 3264 case HexagonISD::TL_TRUNCATE: 3265 break; 3266 case ISD::ANY_EXTEND: 3267 case ISD::ZERO_EXTEND: 3268 case ISD::SIGN_EXTEND: 3269 case ISD::TRUNCATE: 3270 llvm_unreachable("ISD:: ops will be auto-folded"); 3271 break; 3272 #ifndef NDEBUG 3273 Op.dump(&DAG); 3274 #endif 3275 llvm_unreachable("Unexpected operation"); 3276 } 3277 3278 SDValue Inp = Op.getOperand(0); 3279 MVT InpTy = ty(Inp); 3280 MVT ResTy = ty(Op); 3281 3282 unsigned InpWidth = InpTy.getVectorElementType().getSizeInBits(); 3283 unsigned ResWidth = ResTy.getVectorElementType().getSizeInBits(); 3284 assert(InpWidth != ResWidth); 3285 3286 if (InpWidth == 2 * ResWidth || ResWidth == 2 * InpWidth) 3287 return Op; 3288 3289 const SDLoc &dl(Op); 3290 unsigned NumElems = InpTy.getVectorNumElements(); 3291 assert(NumElems == ResTy.getVectorNumElements()); 3292 3293 auto repeatOp = [&](unsigned NewWidth, SDValue Arg) { 3294 MVT Ty = MVT::getVectorVT(MVT::getIntegerVT(NewWidth), NumElems); 3295 switch (Opc) { 3296 case HexagonISD::SSAT: 3297 case HexagonISD::USAT: 3298 return DAG.getNode(Opc, dl, Ty, {Arg, DAG.getValueType(Ty)}); 3299 case HexagonISD::TL_EXTEND: 3300 case HexagonISD::TL_TRUNCATE: 3301 return DAG.getNode(Opc, dl, Ty, {Arg, Op.getOperand(1), Op.getOperand(2)}); 3302 default: 3303 llvm_unreachable("Unexpected opcode"); 3304 } 3305 }; 3306 3307 SDValue S = Inp; 3308 if (InpWidth < ResWidth) { 3309 assert(ResWidth % InpWidth == 0 && isPowerOf2_32(ResWidth / InpWidth)); 3310 while (InpWidth * 2 <= ResWidth) 3311 S = repeatOp(InpWidth *= 2, S); 3312 } else { 3313 // InpWidth > ResWidth 3314 assert(InpWidth % ResWidth == 0 && isPowerOf2_32(InpWidth / ResWidth)); 3315 while (InpWidth / 2 >= ResWidth) 3316 S = repeatOp(InpWidth /= 2, S); 3317 } 3318 return S; 3319 } 3320 3321 SDValue 3322 HexagonTargetLowering::LegalizeHvxResize(SDValue Op, SelectionDAG &DAG) const { 3323 SDValue Inp0 = Op.getOperand(0); 3324 MVT InpTy = ty(Inp0); 3325 MVT ResTy = ty(Op); 3326 unsigned InpWidth = InpTy.getSizeInBits(); 3327 unsigned ResWidth = ResTy.getSizeInBits(); 3328 unsigned Opc = Op.getOpcode(); 3329 3330 if (shouldWidenToHvx(InpTy, DAG) || shouldWidenToHvx(ResTy, DAG)) { 3331 // First, make sure that the narrower type is widened to HVX. 3332 // This may cause the result to be wider than what the legalizer 3333 // expects, so insert EXTRACT_SUBVECTOR to bring it back to the 3334 // desired type. 3335 auto [WInpTy, WResTy] = 3336 InpWidth < ResWidth ? typeWidenToWider(typeWidenToHvx(InpTy), ResTy) 3337 : typeWidenToWider(InpTy, typeWidenToHvx(ResTy)); 3338 SDValue W = appendUndef(Inp0, WInpTy, DAG); 3339 SDValue S; 3340 if (Opc == HexagonISD::TL_EXTEND || Opc == HexagonISD::TL_TRUNCATE) { 3341 S = DAG.getNode(Opc, SDLoc(Op), WResTy, W, Op.getOperand(1), 3342 Op.getOperand(2)); 3343 } else { 3344 S = DAG.getNode(Opc, SDLoc(Op), WResTy, W, DAG.getValueType(WResTy)); 3345 } 3346 SDValue T = ExpandHvxResizeIntoSteps(S, DAG); 3347 return extractSubvector(T, typeLegalize(ResTy, DAG), 0, DAG); 3348 } else if (shouldSplitToHvx(InpWidth < ResWidth ? ResTy : InpTy, DAG)) { 3349 return opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG); 3350 } else { 3351 assert(isTypeLegal(InpTy) && isTypeLegal(ResTy)); 3352 return RemoveTLWrapper(Op, DAG); 3353 } 3354 llvm_unreachable("Unexpected situation"); 3355 } 3356 3357 void 3358 HexagonTargetLowering::LowerHvxOperationWrapper(SDNode *N, 3359 SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 3360 unsigned Opc = N->getOpcode(); 3361 SDValue Op(N, 0); 3362 SDValue Inp0; // Optional first argument. 3363 if (N->getNumOperands() > 0) 3364 Inp0 = Op.getOperand(0); 3365 3366 switch (Opc) { 3367 case ISD::ANY_EXTEND: 3368 case ISD::SIGN_EXTEND: 3369 case ISD::ZERO_EXTEND: 3370 case ISD::TRUNCATE: 3371 if (Subtarget.isHVXElementType(ty(Op)) && 3372 Subtarget.isHVXElementType(ty(Inp0))) { 3373 Results.push_back(CreateTLWrapper(Op, DAG)); 3374 } 3375 break; 3376 case ISD::SETCC: 3377 if (shouldWidenToHvx(ty(Inp0), DAG)) { 3378 if (SDValue T = WidenHvxSetCC(Op, DAG)) 3379 Results.push_back(T); 3380 } 3381 break; 3382 case ISD::STORE: { 3383 if (shouldWidenToHvx(ty(cast<StoreSDNode>(N)->getValue()), DAG)) { 3384 SDValue Store = WidenHvxStore(Op, DAG); 3385 Results.push_back(Store); 3386 } 3387 break; 3388 } 3389 case ISD::MLOAD: 3390 if (isHvxPairTy(ty(Op))) { 3391 SDValue S = SplitHvxMemOp(Op, DAG); 3392 assert(S->getOpcode() == ISD::MERGE_VALUES); 3393 Results.push_back(S.getOperand(0)); 3394 Results.push_back(S.getOperand(1)); 3395 } 3396 break; 3397 case ISD::MSTORE: 3398 if (isHvxPairTy(ty(Op->getOperand(1)))) { // Stored value 3399 SDValue S = SplitHvxMemOp(Op, DAG); 3400 Results.push_back(S); 3401 } 3402 break; 3403 case ISD::SINT_TO_FP: 3404 case ISD::UINT_TO_FP: 3405 case ISD::FP_TO_SINT: 3406 case ISD::FP_TO_UINT: 3407 if (ty(Op).getSizeInBits() != ty(Inp0).getSizeInBits()) { 3408 SDValue T = EqualizeFpIntConversion(Op, DAG); 3409 Results.push_back(T); 3410 } 3411 break; 3412 case HexagonISD::SSAT: 3413 case HexagonISD::USAT: 3414 case HexagonISD::TL_EXTEND: 3415 case HexagonISD::TL_TRUNCATE: 3416 Results.push_back(LegalizeHvxResize(Op, DAG)); 3417 break; 3418 default: 3419 break; 3420 } 3421 } 3422 3423 void 3424 HexagonTargetLowering::ReplaceHvxNodeResults(SDNode *N, 3425 SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 3426 unsigned Opc = N->getOpcode(); 3427 SDValue Op(N, 0); 3428 SDValue Inp0; // Optional first argument. 3429 if (N->getNumOperands() > 0) 3430 Inp0 = Op.getOperand(0); 3431 3432 switch (Opc) { 3433 case ISD::ANY_EXTEND: 3434 case ISD::SIGN_EXTEND: 3435 case ISD::ZERO_EXTEND: 3436 case ISD::TRUNCATE: 3437 if (Subtarget.isHVXElementType(ty(Op)) && 3438 Subtarget.isHVXElementType(ty(Inp0))) { 3439 Results.push_back(CreateTLWrapper(Op, DAG)); 3440 } 3441 break; 3442 case ISD::SETCC: 3443 if (shouldWidenToHvx(ty(Op), DAG)) { 3444 if (SDValue T = WidenHvxSetCC(Op, DAG)) 3445 Results.push_back(T); 3446 } 3447 break; 3448 case ISD::LOAD: { 3449 if (shouldWidenToHvx(ty(Op), DAG)) { 3450 SDValue Load = WidenHvxLoad(Op, DAG); 3451 assert(Load->getOpcode() == ISD::MERGE_VALUES); 3452 Results.push_back(Load.getOperand(0)); 3453 Results.push_back(Load.getOperand(1)); 3454 } 3455 break; 3456 } 3457 case ISD::BITCAST: 3458 if (isHvxBoolTy(ty(Inp0))) { 3459 SDValue C = LowerHvxBitcast(Op, DAG); 3460 Results.push_back(C); 3461 } 3462 break; 3463 case ISD::FP_TO_SINT: 3464 case ISD::FP_TO_UINT: 3465 if (ty(Op).getSizeInBits() != ty(Inp0).getSizeInBits()) { 3466 SDValue T = EqualizeFpIntConversion(Op, DAG); 3467 Results.push_back(T); 3468 } 3469 break; 3470 case HexagonISD::SSAT: 3471 case HexagonISD::USAT: 3472 case HexagonISD::TL_EXTEND: 3473 case HexagonISD::TL_TRUNCATE: 3474 Results.push_back(LegalizeHvxResize(Op, DAG)); 3475 break; 3476 default: 3477 break; 3478 } 3479 } 3480 3481 SDValue 3482 HexagonTargetLowering::combineTruncateBeforeLegal(SDValue Op, 3483 DAGCombinerInfo &DCI) const { 3484 // Simplify V:v2NiB --(bitcast)--> vNi2B --(truncate)--> vNiB 3485 // to extract-subvector (shuffle V, pick even, pick odd) 3486 3487 assert(Op.getOpcode() == ISD::TRUNCATE); 3488 SelectionDAG &DAG = DCI.DAG; 3489 const SDLoc &dl(Op); 3490 3491 if (Op.getOperand(0).getOpcode() == ISD::BITCAST) 3492 return SDValue(); 3493 SDValue Cast = Op.getOperand(0); 3494 SDValue Src = Cast.getOperand(0); 3495 3496 EVT TruncTy = Op.getValueType(); 3497 EVT CastTy = Cast.getValueType(); 3498 EVT SrcTy = Src.getValueType(); 3499 if (SrcTy.isSimple()) 3500 return SDValue(); 3501 if (SrcTy.getVectorElementType() != TruncTy.getVectorElementType()) 3502 return SDValue(); 3503 unsigned SrcLen = SrcTy.getVectorNumElements(); 3504 unsigned CastLen = CastTy.getVectorNumElements(); 3505 if (2 * CastLen != SrcLen) 3506 return SDValue(); 3507 3508 SmallVector<int, 128> Mask(SrcLen); 3509 for (int i = 0; i != static_cast<int>(CastLen); ++i) { 3510 Mask[i] = 2 * i; 3511 Mask[i + CastLen] = 2 * i + 1; 3512 } 3513 SDValue Deal = 3514 DAG.getVectorShuffle(SrcTy, dl, Src, DAG.getUNDEF(SrcTy), Mask); 3515 return opSplit(Deal, dl, DAG).first; 3516 } 3517 3518 SDValue 3519 HexagonTargetLowering::combineConcatVectorsBeforeLegal( 3520 SDValue Op, DAGCombinerInfo &DCI) const { 3521 // Fold 3522 // concat (shuffle x, y, m1), (shuffle x, y, m2) 3523 // into 3524 // shuffle (concat x, y), undef, m3 3525 if (Op.getNumOperands() != 2) 3526 return SDValue(); 3527 3528 SelectionDAG &DAG = DCI.DAG; 3529 const SDLoc &dl(Op); 3530 SDValue V0 = Op.getOperand(0); 3531 SDValue V1 = Op.getOperand(1); 3532 3533 if (V0.getOpcode() != ISD::VECTOR_SHUFFLE) 3534 return SDValue(); 3535 if (V1.getOpcode() != ISD::VECTOR_SHUFFLE) 3536 return SDValue(); 3537 3538 SetVector<SDValue> Order; 3539 Order.insert(V0.getOperand(0)); 3540 Order.insert(V0.getOperand(1)); 3541 Order.insert(V1.getOperand(0)); 3542 Order.insert(V1.getOperand(1)); 3543 3544 if (Order.size() > 2) 3545 return SDValue(); 3546 3547 // In ISD::VECTOR_SHUFFLE, the types of each input and the type of the 3548 // result must be the same. 3549 EVT InpTy = V0.getValueType(); 3550 assert(InpTy.isVector()); 3551 unsigned InpLen = InpTy.getVectorNumElements(); 3552 3553 SmallVector<int, 128> LongMask; 3554 auto AppendToMask = [&](SDValue Shuffle) { 3555 auto *SV = cast<ShuffleVectorSDNode>(Shuffle.getNode()); 3556 ArrayRef<int> Mask = SV->getMask(); 3557 SDValue X = Shuffle.getOperand(0); 3558 SDValue Y = Shuffle.getOperand(1); 3559 for (int M : Mask) { 3560 if (M == -1) { 3561 LongMask.push_back(M); 3562 continue; 3563 } 3564 SDValue Src = static_cast<unsigned>(M) < InpLen ? X : Y; 3565 if (static_cast<unsigned>(M) >= InpLen) 3566 M -= InpLen; 3567 3568 int OutOffset = Order[0] == Src ? 0 : InpLen; 3569 LongMask.push_back(M + OutOffset); 3570 } 3571 }; 3572 3573 AppendToMask(V0); 3574 AppendToMask(V1); 3575 3576 SDValue C0 = Order.front(); 3577 SDValue C1 = Order.back(); // Can be same as front 3578 EVT LongTy = InpTy.getDoubleNumVectorElementsVT(*DAG.getContext()); 3579 3580 SDValue Cat = DAG.getNode(ISD::CONCAT_VECTORS, dl, LongTy, {C0, C1}); 3581 return DAG.getVectorShuffle(LongTy, dl, Cat, DAG.getUNDEF(LongTy), LongMask); 3582 } 3583 3584 SDValue 3585 HexagonTargetLowering::PerformHvxDAGCombine(SDNode *N, DAGCombinerInfo &DCI) 3586 const { 3587 const SDLoc &dl(N); 3588 SelectionDAG &DAG = DCI.DAG; 3589 SDValue Op(N, 0); 3590 unsigned Opc = Op.getOpcode(); 3591 3592 SmallVector<SDValue, 4> Ops(N->ops().begin(), N->ops().end()); 3593 3594 if (Opc == ISD::TRUNCATE) 3595 return combineTruncateBeforeLegal(Op, DCI); 3596 if (Opc == ISD::CONCAT_VECTORS) 3597 return combineConcatVectorsBeforeLegal(Op, DCI); 3598 3599 if (DCI.isBeforeLegalizeOps()) 3600 return SDValue(); 3601 3602 switch (Opc) { 3603 case ISD::VSELECT: { 3604 // (vselect (xor x, qtrue), v0, v1) -> (vselect x, v1, v0) 3605 SDValue Cond = Ops[0]; 3606 if (Cond->getOpcode() == ISD::XOR) { 3607 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1); 3608 if (C1->getOpcode() == HexagonISD::QTRUE) 3609 return DAG.getNode(ISD::VSELECT, dl, ty(Op), C0, Ops[2], Ops[1]); 3610 } 3611 break; 3612 } 3613 case HexagonISD::V2Q: 3614 if (Ops[0].getOpcode() == ISD::SPLAT_VECTOR) { 3615 if (const auto *C = dyn_cast<ConstantSDNode>(Ops[0].getOperand(0))) 3616 return C->isZero() ? DAG.getNode(HexagonISD::QFALSE, dl, ty(Op)) 3617 : DAG.getNode(HexagonISD::QTRUE, dl, ty(Op)); 3618 } 3619 break; 3620 case HexagonISD::Q2V: 3621 if (Ops[0].getOpcode() == HexagonISD::QTRUE) 3622 return DAG.getNode(ISD::SPLAT_VECTOR, dl, ty(Op), 3623 DAG.getConstant(-1, dl, MVT::i32)); 3624 if (Ops[0].getOpcode() == HexagonISD::QFALSE) 3625 return getZero(dl, ty(Op), DAG); 3626 break; 3627 case HexagonISD::VINSERTW0: 3628 if (isUndef(Ops[1])) 3629 return Ops[0]; 3630 break; 3631 case HexagonISD::VROR: { 3632 if (Ops[0].getOpcode() == HexagonISD::VROR) { 3633 SDValue Vec = Ops[0].getOperand(0); 3634 SDValue Rot0 = Ops[1], Rot1 = Ops[0].getOperand(1); 3635 SDValue Rot = DAG.getNode(ISD::ADD, dl, ty(Rot0), {Rot0, Rot1}); 3636 return DAG.getNode(HexagonISD::VROR, dl, ty(Op), {Vec, Rot}); 3637 } 3638 break; 3639 } 3640 } 3641 3642 return SDValue(); 3643 } 3644 3645 bool 3646 HexagonTargetLowering::shouldSplitToHvx(MVT Ty, SelectionDAG &DAG) const { 3647 if (Subtarget.isHVXVectorType(Ty, true)) 3648 return false; 3649 auto Action = getPreferredHvxVectorAction(Ty); 3650 if (Action == TargetLoweringBase::TypeSplitVector) 3651 return Subtarget.isHVXVectorType(typeLegalize(Ty, DAG), true); 3652 return false; 3653 } 3654 3655 bool 3656 HexagonTargetLowering::shouldWidenToHvx(MVT Ty, SelectionDAG &DAG) const { 3657 if (Subtarget.isHVXVectorType(Ty, true)) 3658 return false; 3659 auto Action = getPreferredHvxVectorAction(Ty); 3660 if (Action == TargetLoweringBase::TypeWidenVector) 3661 return Subtarget.isHVXVectorType(typeLegalize(Ty, DAG), true); 3662 return false; 3663 } 3664 3665 bool 3666 HexagonTargetLowering::isHvxOperation(SDNode *N, SelectionDAG &DAG) const { 3667 if (!Subtarget.useHVXOps()) 3668 return false; 3669 // If the type of any result, or any operand type are HVX vector types, 3670 // this is an HVX operation. 3671 auto IsHvxTy = [this](EVT Ty) { 3672 return Ty.isSimple() && Subtarget.isHVXVectorType(Ty.getSimpleVT(), true); 3673 }; 3674 auto IsHvxOp = [this](SDValue Op) { 3675 return Op.getValueType().isSimple() && 3676 Subtarget.isHVXVectorType(ty(Op), true); 3677 }; 3678 if (llvm::any_of(N->values(), IsHvxTy) || llvm::any_of(N->ops(), IsHvxOp)) 3679 return true; 3680 3681 // Check if this could be an HVX operation after type widening. 3682 auto IsWidenedToHvx = [this, &DAG](SDValue Op) { 3683 if (!Op.getValueType().isSimple()) 3684 return false; 3685 MVT ValTy = ty(Op); 3686 return ValTy.isVector() && shouldWidenToHvx(ValTy, DAG); 3687 }; 3688 3689 for (int i = 0, e = N->getNumValues(); i != e; ++i) { 3690 if (IsWidenedToHvx(SDValue(N, i))) 3691 return true; 3692 } 3693 return llvm::any_of(N->ops(), IsWidenedToHvx); 3694 } 3695