1 //===------- LegalizeVectorTypes.cpp - Legalization of vector types -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file performs vector type splitting and scalarization for LegalizeTypes. 10 // Scalarization is the act of changing a computation in an illegal one-element 11 // vector type to be a computation in its scalar element type. For example, 12 // implementing <1 x f32> arithmetic in a scalar f32 register. This is needed 13 // as a base case when scalarizing vector arithmetic like <4 x f32>, which 14 // eventually decomposes to scalars if the target doesn't support v4f32 or v2f32 15 // types. 16 // Splitting is the act of changing a computation in an invalid vector type to 17 // be a computation in two vectors of half the size. For example, implementing 18 // <128 x f32> operations in terms of two <64 x f32> operations. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "LegalizeTypes.h" 23 #include "llvm/ADT/SmallBitVector.h" 24 #include "llvm/Analysis/MemoryLocation.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 #include "llvm/CodeGen/ISDOpcodes.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/TypeSize.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include <numeric> 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "legalize-types" 36 37 //===----------------------------------------------------------------------===// 38 // Result Vector Scalarization: <1 x ty> -> ty. 39 //===----------------------------------------------------------------------===// 40 41 void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) { 42 LLVM_DEBUG(dbgs() << "Scalarize node result " << ResNo << ": "; 43 N->dump(&DAG)); 44 SDValue R = SDValue(); 45 46 switch (N->getOpcode()) { 47 default: 48 #ifndef NDEBUG 49 dbgs() << "ScalarizeVectorResult #" << ResNo << ": "; 50 N->dump(&DAG); 51 dbgs() << "\n"; 52 #endif 53 report_fatal_error("Do not know how to scalarize the result of this " 54 "operator!\n"); 55 56 case ISD::MERGE_VALUES: R = ScalarizeVecRes_MERGE_VALUES(N, ResNo);break; 57 case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N); break; 58 case ISD::BUILD_VECTOR: R = ScalarizeVecRes_BUILD_VECTOR(N); break; 59 case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break; 60 case ISD::FP_ROUND: R = ScalarizeVecRes_FP_ROUND(N); break; 61 case ISD::AssertZext: 62 case ISD::AssertSext: 63 case ISD::FPOWI: 64 R = ScalarizeVecRes_UnaryOpWithExtraInput(N); 65 break; 66 case ISD::INSERT_VECTOR_ELT: R = ScalarizeVecRes_INSERT_VECTOR_ELT(N); break; 67 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(N));break; 68 case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break; 69 case ISD::SIGN_EXTEND_INREG: R = ScalarizeVecRes_InregOp(N); break; 70 case ISD::VSELECT: R = ScalarizeVecRes_VSELECT(N); break; 71 case ISD::SELECT: R = ScalarizeVecRes_SELECT(N); break; 72 case ISD::SELECT_CC: R = ScalarizeVecRes_SELECT_CC(N); break; 73 case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break; 74 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break; 75 case ISD::VECTOR_SHUFFLE: R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break; 76 case ISD::IS_FPCLASS: R = ScalarizeVecRes_IS_FPCLASS(N); break; 77 case ISD::ANY_EXTEND_VECTOR_INREG: 78 case ISD::SIGN_EXTEND_VECTOR_INREG: 79 case ISD::ZERO_EXTEND_VECTOR_INREG: 80 R = ScalarizeVecRes_VecInregOp(N); 81 break; 82 case ISD::ABS: 83 case ISD::ANY_EXTEND: 84 case ISD::BITREVERSE: 85 case ISD::BSWAP: 86 case ISD::CTLZ: 87 case ISD::CTLZ_ZERO_UNDEF: 88 case ISD::CTPOP: 89 case ISD::CTTZ: 90 case ISD::CTTZ_ZERO_UNDEF: 91 case ISD::FABS: 92 case ISD::FACOS: 93 case ISD::FASIN: 94 case ISD::FATAN: 95 case ISD::FCEIL: 96 case ISD::FCOS: 97 case ISD::FCOSH: 98 case ISD::FEXP: 99 case ISD::FEXP2: 100 case ISD::FEXP10: 101 case ISD::FFLOOR: 102 case ISD::FLOG: 103 case ISD::FLOG10: 104 case ISD::FLOG2: 105 case ISD::FNEARBYINT: 106 case ISD::FNEG: 107 case ISD::FREEZE: 108 case ISD::ARITH_FENCE: 109 case ISD::FP_EXTEND: 110 case ISD::FP_TO_SINT: 111 case ISD::FP_TO_UINT: 112 case ISD::FRINT: 113 case ISD::LRINT: 114 case ISD::LLRINT: 115 case ISD::FROUND: 116 case ISD::FROUNDEVEN: 117 case ISD::FSIN: 118 case ISD::FSINH: 119 case ISD::FSQRT: 120 case ISD::FTAN: 121 case ISD::FTANH: 122 case ISD::FTRUNC: 123 case ISD::SIGN_EXTEND: 124 case ISD::SINT_TO_FP: 125 case ISD::TRUNCATE: 126 case ISD::UINT_TO_FP: 127 case ISD::ZERO_EXTEND: 128 case ISD::FCANONICALIZE: 129 R = ScalarizeVecRes_UnaryOp(N); 130 break; 131 case ISD::ADDRSPACECAST: 132 R = ScalarizeVecRes_ADDRSPACECAST(N); 133 break; 134 case ISD::FFREXP: 135 R = ScalarizeVecRes_FFREXP(N, ResNo); 136 break; 137 case ISD::ADD: 138 case ISD::AND: 139 case ISD::AVGCEILS: 140 case ISD::AVGCEILU: 141 case ISD::AVGFLOORS: 142 case ISD::AVGFLOORU: 143 case ISD::FADD: 144 case ISD::FCOPYSIGN: 145 case ISD::FDIV: 146 case ISD::FMUL: 147 case ISD::FMINNUM: 148 case ISD::FMAXNUM: 149 case ISD::FMINNUM_IEEE: 150 case ISD::FMAXNUM_IEEE: 151 case ISD::FMINIMUM: 152 case ISD::FMAXIMUM: 153 case ISD::FLDEXP: 154 case ISD::SMIN: 155 case ISD::SMAX: 156 case ISD::UMIN: 157 case ISD::UMAX: 158 159 case ISD::SADDSAT: 160 case ISD::UADDSAT: 161 case ISD::SSUBSAT: 162 case ISD::USUBSAT: 163 case ISD::SSHLSAT: 164 case ISD::USHLSAT: 165 166 case ISD::FPOW: 167 case ISD::FREM: 168 case ISD::FSUB: 169 case ISD::MUL: 170 case ISD::MULHS: 171 case ISD::MULHU: 172 case ISD::OR: 173 case ISD::SDIV: 174 case ISD::SREM: 175 case ISD::SUB: 176 case ISD::UDIV: 177 case ISD::UREM: 178 case ISD::XOR: 179 case ISD::SHL: 180 case ISD::SRA: 181 case ISD::SRL: 182 case ISD::ROTL: 183 case ISD::ROTR: 184 R = ScalarizeVecRes_BinOp(N); 185 break; 186 187 case ISD::SCMP: 188 case ISD::UCMP: 189 R = ScalarizeVecRes_CMP(N); 190 break; 191 192 case ISD::FMA: 193 case ISD::FSHL: 194 case ISD::FSHR: 195 R = ScalarizeVecRes_TernaryOp(N); 196 break; 197 198 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 199 case ISD::STRICT_##DAGN: 200 #include "llvm/IR/ConstrainedOps.def" 201 R = ScalarizeVecRes_StrictFPOp(N); 202 break; 203 204 case ISD::FP_TO_UINT_SAT: 205 case ISD::FP_TO_SINT_SAT: 206 R = ScalarizeVecRes_FP_TO_XINT_SAT(N); 207 break; 208 209 case ISD::UADDO: 210 case ISD::SADDO: 211 case ISD::USUBO: 212 case ISD::SSUBO: 213 case ISD::UMULO: 214 case ISD::SMULO: 215 R = ScalarizeVecRes_OverflowOp(N, ResNo); 216 break; 217 case ISD::SMULFIX: 218 case ISD::SMULFIXSAT: 219 case ISD::UMULFIX: 220 case ISD::UMULFIXSAT: 221 case ISD::SDIVFIX: 222 case ISD::SDIVFIXSAT: 223 case ISD::UDIVFIX: 224 case ISD::UDIVFIXSAT: 225 R = ScalarizeVecRes_FIX(N); 226 break; 227 } 228 229 // If R is null, the sub-method took care of registering the result. 230 if (R.getNode()) 231 SetScalarizedVector(SDValue(N, ResNo), R); 232 } 233 234 SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) { 235 SDValue LHS = GetScalarizedVector(N->getOperand(0)); 236 SDValue RHS = GetScalarizedVector(N->getOperand(1)); 237 return DAG.getNode(N->getOpcode(), SDLoc(N), 238 LHS.getValueType(), LHS, RHS, N->getFlags()); 239 } 240 241 SDValue DAGTypeLegalizer::ScalarizeVecRes_CMP(SDNode *N) { 242 SDLoc DL(N); 243 244 SDValue LHS = N->getOperand(0); 245 SDValue RHS = N->getOperand(1); 246 if (getTypeAction(LHS.getValueType()) == 247 TargetLowering::TypeScalarizeVector) { 248 LHS = GetScalarizedVector(LHS); 249 RHS = GetScalarizedVector(RHS); 250 } else { 251 EVT VT = LHS.getValueType().getVectorElementType(); 252 LHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, LHS, 253 DAG.getVectorIdxConstant(0, DL)); 254 RHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, RHS, 255 DAG.getVectorIdxConstant(0, DL)); 256 } 257 258 return DAG.getNode(N->getOpcode(), SDLoc(N), 259 N->getValueType(0).getVectorElementType(), LHS, RHS); 260 } 261 262 SDValue DAGTypeLegalizer::ScalarizeVecRes_TernaryOp(SDNode *N) { 263 SDValue Op0 = GetScalarizedVector(N->getOperand(0)); 264 SDValue Op1 = GetScalarizedVector(N->getOperand(1)); 265 SDValue Op2 = GetScalarizedVector(N->getOperand(2)); 266 return DAG.getNode(N->getOpcode(), SDLoc(N), Op0.getValueType(), Op0, Op1, 267 Op2, N->getFlags()); 268 } 269 270 SDValue DAGTypeLegalizer::ScalarizeVecRes_FIX(SDNode *N) { 271 SDValue Op0 = GetScalarizedVector(N->getOperand(0)); 272 SDValue Op1 = GetScalarizedVector(N->getOperand(1)); 273 SDValue Op2 = N->getOperand(2); 274 return DAG.getNode(N->getOpcode(), SDLoc(N), Op0.getValueType(), Op0, Op1, 275 Op2, N->getFlags()); 276 } 277 278 SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(SDNode *N, unsigned ResNo) { 279 assert(N->getValueType(0).getVectorNumElements() == 1 && 280 "Unexpected vector type!"); 281 SDValue Elt = GetScalarizedVector(N->getOperand(0)); 282 283 EVT VT0 = N->getValueType(0); 284 EVT VT1 = N->getValueType(1); 285 SDLoc dl(N); 286 287 SDNode *ScalarNode = 288 DAG.getNode(N->getOpcode(), dl, 289 {VT0.getScalarType(), VT1.getScalarType()}, Elt) 290 .getNode(); 291 292 // Replace the other vector result not being explicitly scalarized here. 293 unsigned OtherNo = 1 - ResNo; 294 EVT OtherVT = N->getValueType(OtherNo); 295 if (getTypeAction(OtherVT) == TargetLowering::TypeScalarizeVector) { 296 SetScalarizedVector(SDValue(N, OtherNo), SDValue(ScalarNode, OtherNo)); 297 } else { 298 SDValue OtherVal = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, OtherVT, 299 SDValue(ScalarNode, OtherNo)); 300 ReplaceValueWith(SDValue(N, OtherNo), OtherVal); 301 } 302 303 return SDValue(ScalarNode, ResNo); 304 } 305 306 SDValue DAGTypeLegalizer::ScalarizeVecRes_StrictFPOp(SDNode *N) { 307 EVT VT = N->getValueType(0).getVectorElementType(); 308 unsigned NumOpers = N->getNumOperands(); 309 SDValue Chain = N->getOperand(0); 310 EVT ValueVTs[] = {VT, MVT::Other}; 311 SDLoc dl(N); 312 313 SmallVector<SDValue, 4> Opers(NumOpers); 314 315 // The Chain is the first operand. 316 Opers[0] = Chain; 317 318 // Now process the remaining operands. 319 for (unsigned i = 1; i < NumOpers; ++i) { 320 SDValue Oper = N->getOperand(i); 321 EVT OperVT = Oper.getValueType(); 322 323 if (OperVT.isVector()) { 324 if (getTypeAction(OperVT) == TargetLowering::TypeScalarizeVector) 325 Oper = GetScalarizedVector(Oper); 326 else 327 Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 328 OperVT.getVectorElementType(), Oper, 329 DAG.getVectorIdxConstant(0, dl)); 330 } 331 332 Opers[i] = Oper; 333 } 334 335 SDValue Result = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(ValueVTs), 336 Opers, N->getFlags()); 337 338 // Legalize the chain result - switch anything that used the old chain to 339 // use the new one. 340 ReplaceValueWith(SDValue(N, 1), Result.getValue(1)); 341 return Result; 342 } 343 344 SDValue DAGTypeLegalizer::ScalarizeVecRes_OverflowOp(SDNode *N, 345 unsigned ResNo) { 346 SDLoc DL(N); 347 EVT ResVT = N->getValueType(0); 348 EVT OvVT = N->getValueType(1); 349 350 SDValue ScalarLHS, ScalarRHS; 351 if (getTypeAction(ResVT) == TargetLowering::TypeScalarizeVector) { 352 ScalarLHS = GetScalarizedVector(N->getOperand(0)); 353 ScalarRHS = GetScalarizedVector(N->getOperand(1)); 354 } else { 355 SmallVector<SDValue, 1> ElemsLHS, ElemsRHS; 356 DAG.ExtractVectorElements(N->getOperand(0), ElemsLHS); 357 DAG.ExtractVectorElements(N->getOperand(1), ElemsRHS); 358 ScalarLHS = ElemsLHS[0]; 359 ScalarRHS = ElemsRHS[0]; 360 } 361 362 SDVTList ScalarVTs = DAG.getVTList( 363 ResVT.getVectorElementType(), OvVT.getVectorElementType()); 364 SDNode *ScalarNode = DAG.getNode( 365 N->getOpcode(), DL, ScalarVTs, ScalarLHS, ScalarRHS).getNode(); 366 ScalarNode->setFlags(N->getFlags()); 367 368 // Replace the other vector result not being explicitly scalarized here. 369 unsigned OtherNo = 1 - ResNo; 370 EVT OtherVT = N->getValueType(OtherNo); 371 if (getTypeAction(OtherVT) == TargetLowering::TypeScalarizeVector) { 372 SetScalarizedVector(SDValue(N, OtherNo), SDValue(ScalarNode, OtherNo)); 373 } else { 374 SDValue OtherVal = DAG.getNode( 375 ISD::SCALAR_TO_VECTOR, DL, OtherVT, SDValue(ScalarNode, OtherNo)); 376 ReplaceValueWith(SDValue(N, OtherNo), OtherVal); 377 } 378 379 return SDValue(ScalarNode, ResNo); 380 } 381 382 SDValue DAGTypeLegalizer::ScalarizeVecRes_MERGE_VALUES(SDNode *N, 383 unsigned ResNo) { 384 SDValue Op = DisintegrateMERGE_VALUES(N, ResNo); 385 return GetScalarizedVector(Op); 386 } 387 388 SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) { 389 SDValue Op = N->getOperand(0); 390 if (Op.getValueType().isVector() 391 && Op.getValueType().getVectorNumElements() == 1 392 && !isSimpleLegalType(Op.getValueType())) 393 Op = GetScalarizedVector(Op); 394 EVT NewVT = N->getValueType(0).getVectorElementType(); 395 return DAG.getNode(ISD::BITCAST, SDLoc(N), 396 NewVT, Op); 397 } 398 399 SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(SDNode *N) { 400 EVT EltVT = N->getValueType(0).getVectorElementType(); 401 SDValue InOp = N->getOperand(0); 402 // The BUILD_VECTOR operands may be of wider element types and 403 // we may need to truncate them back to the requested return type. 404 if (EltVT.isInteger()) 405 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, InOp); 406 return InOp; 407 } 408 409 SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N) { 410 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), 411 N->getValueType(0).getVectorElementType(), 412 N->getOperand(0), N->getOperand(1)); 413 } 414 415 SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_ROUND(SDNode *N) { 416 SDLoc DL(N); 417 SDValue Op = N->getOperand(0); 418 EVT OpVT = Op.getValueType(); 419 // The result needs scalarizing, but it's not a given that the source does. 420 // See similar logic in ScalarizeVecRes_UnaryOp. 421 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) { 422 Op = GetScalarizedVector(Op); 423 } else { 424 EVT VT = OpVT.getVectorElementType(); 425 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, 426 DAG.getVectorIdxConstant(0, DL)); 427 } 428 return DAG.getNode(ISD::FP_ROUND, DL, 429 N->getValueType(0).getVectorElementType(), Op, 430 N->getOperand(1)); 431 } 432 433 SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOpWithExtraInput(SDNode *N) { 434 SDValue Op = GetScalarizedVector(N->getOperand(0)); 435 return DAG.getNode(N->getOpcode(), SDLoc(N), Op.getValueType(), Op, 436 N->getOperand(1)); 437 } 438 439 SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) { 440 // The value to insert may have a wider type than the vector element type, 441 // so be sure to truncate it to the element type if necessary. 442 SDValue Op = N->getOperand(1); 443 EVT EltVT = N->getValueType(0).getVectorElementType(); 444 if (Op.getValueType() != EltVT) 445 // FIXME: Can this happen for floating point types? 446 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, Op); 447 return Op; 448 } 449 450 SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) { 451 assert(N->isUnindexed() && "Indexed vector load?"); 452 453 SDValue Result = DAG.getLoad( 454 ISD::UNINDEXED, N->getExtensionType(), 455 N->getValueType(0).getVectorElementType(), SDLoc(N), N->getChain(), 456 N->getBasePtr(), DAG.getUNDEF(N->getBasePtr().getValueType()), 457 N->getPointerInfo(), N->getMemoryVT().getVectorElementType(), 458 N->getOriginalAlign(), N->getMemOperand()->getFlags(), N->getAAInfo()); 459 460 // Legalize the chain result - switch anything that used the old chain to 461 // use the new one. 462 ReplaceValueWith(SDValue(N, 1), Result.getValue(1)); 463 return Result; 464 } 465 466 SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) { 467 // Get the dest type - it doesn't always match the input type, e.g. int_to_fp. 468 EVT DestVT = N->getValueType(0).getVectorElementType(); 469 SDValue Op = N->getOperand(0); 470 EVT OpVT = Op.getValueType(); 471 SDLoc DL(N); 472 // The result needs scalarizing, but it's not a given that the source does. 473 // This is a workaround for targets where it's impossible to scalarize the 474 // result of a conversion, because the source type is legal. 475 // For instance, this happens on AArch64: v1i1 is illegal but v1i{8,16,32} 476 // are widened to v8i8, v4i16, and v2i32, which is legal, because v1i64 is 477 // legal and was not scalarized. 478 // See the similar logic in ScalarizeVecRes_SETCC 479 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) { 480 Op = GetScalarizedVector(Op); 481 } else { 482 EVT VT = OpVT.getVectorElementType(); 483 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, 484 DAG.getVectorIdxConstant(0, DL)); 485 } 486 return DAG.getNode(N->getOpcode(), SDLoc(N), DestVT, Op, N->getFlags()); 487 } 488 489 SDValue DAGTypeLegalizer::ScalarizeVecRes_InregOp(SDNode *N) { 490 EVT EltVT = N->getValueType(0).getVectorElementType(); 491 EVT ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT().getVectorElementType(); 492 SDValue LHS = GetScalarizedVector(N->getOperand(0)); 493 return DAG.getNode(N->getOpcode(), SDLoc(N), EltVT, 494 LHS, DAG.getValueType(ExtVT)); 495 } 496 497 SDValue DAGTypeLegalizer::ScalarizeVecRes_VecInregOp(SDNode *N) { 498 SDLoc DL(N); 499 SDValue Op = N->getOperand(0); 500 501 EVT OpVT = Op.getValueType(); 502 EVT OpEltVT = OpVT.getVectorElementType(); 503 EVT EltVT = N->getValueType(0).getVectorElementType(); 504 505 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) { 506 Op = GetScalarizedVector(Op); 507 } else { 508 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpEltVT, Op, 509 DAG.getVectorIdxConstant(0, DL)); 510 } 511 512 switch (N->getOpcode()) { 513 case ISD::ANY_EXTEND_VECTOR_INREG: 514 return DAG.getNode(ISD::ANY_EXTEND, DL, EltVT, Op); 515 case ISD::SIGN_EXTEND_VECTOR_INREG: 516 return DAG.getNode(ISD::SIGN_EXTEND, DL, EltVT, Op); 517 case ISD::ZERO_EXTEND_VECTOR_INREG: 518 return DAG.getNode(ISD::ZERO_EXTEND, DL, EltVT, Op); 519 } 520 521 llvm_unreachable("Illegal extend_vector_inreg opcode"); 522 } 523 524 SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(SDNode *N) { 525 EVT DestVT = N->getValueType(0).getVectorElementType(); 526 SDValue Op = N->getOperand(0); 527 EVT OpVT = Op.getValueType(); 528 SDLoc DL(N); 529 // The result needs scalarizing, but it's not a given that the source does. 530 // This is a workaround for targets where it's impossible to scalarize the 531 // result of a conversion, because the source type is legal. 532 // For instance, this happens on AArch64: v1i1 is illegal but v1i{8,16,32} 533 // are widened to v8i8, v4i16, and v2i32, which is legal, because v1i64 is 534 // legal and was not scalarized. 535 // See the similar logic in ScalarizeVecRes_SETCC 536 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) { 537 Op = GetScalarizedVector(Op); 538 } else { 539 EVT VT = OpVT.getVectorElementType(); 540 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, 541 DAG.getVectorIdxConstant(0, DL)); 542 } 543 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(N); 544 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace(); 545 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace(); 546 return DAG.getAddrSpaceCast(DL, DestVT, Op, SrcAS, DestAS); 547 } 548 549 SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) { 550 // If the operand is wider than the vector element type then it is implicitly 551 // truncated. Make that explicit here. 552 EVT EltVT = N->getValueType(0).getVectorElementType(); 553 SDValue InOp = N->getOperand(0); 554 if (InOp.getValueType() != EltVT) 555 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, InOp); 556 return InOp; 557 } 558 559 SDValue DAGTypeLegalizer::ScalarizeVecRes_VSELECT(SDNode *N) { 560 SDValue Cond = N->getOperand(0); 561 EVT OpVT = Cond.getValueType(); 562 SDLoc DL(N); 563 // The vselect result and true/value operands needs scalarizing, but it's 564 // not a given that the Cond does. For instance, in AVX512 v1i1 is legal. 565 // See the similar logic in ScalarizeVecRes_SETCC 566 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) { 567 Cond = GetScalarizedVector(Cond); 568 } else { 569 EVT VT = OpVT.getVectorElementType(); 570 Cond = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Cond, 571 DAG.getVectorIdxConstant(0, DL)); 572 } 573 574 SDValue LHS = GetScalarizedVector(N->getOperand(1)); 575 TargetLowering::BooleanContent ScalarBool = 576 TLI.getBooleanContents(false, false); 577 TargetLowering::BooleanContent VecBool = TLI.getBooleanContents(true, false); 578 579 // If integer and float booleans have different contents then we can't 580 // reliably optimize in all cases. There is a full explanation for this in 581 // DAGCombiner::visitSELECT() where the same issue affects folding 582 // (select C, 0, 1) to (xor C, 1). 583 if (TLI.getBooleanContents(false, false) != 584 TLI.getBooleanContents(false, true)) { 585 // At least try the common case where the boolean is generated by a 586 // comparison. 587 if (Cond->getOpcode() == ISD::SETCC) { 588 EVT OpVT = Cond->getOperand(0).getValueType(); 589 ScalarBool = TLI.getBooleanContents(OpVT.getScalarType()); 590 VecBool = TLI.getBooleanContents(OpVT); 591 } else 592 ScalarBool = TargetLowering::UndefinedBooleanContent; 593 } 594 595 EVT CondVT = Cond.getValueType(); 596 if (ScalarBool != VecBool) { 597 switch (ScalarBool) { 598 case TargetLowering::UndefinedBooleanContent: 599 break; 600 case TargetLowering::ZeroOrOneBooleanContent: 601 assert(VecBool == TargetLowering::UndefinedBooleanContent || 602 VecBool == TargetLowering::ZeroOrNegativeOneBooleanContent); 603 // Vector read from all ones, scalar expects a single 1 so mask. 604 Cond = DAG.getNode(ISD::AND, SDLoc(N), CondVT, 605 Cond, DAG.getConstant(1, SDLoc(N), CondVT)); 606 break; 607 case TargetLowering::ZeroOrNegativeOneBooleanContent: 608 assert(VecBool == TargetLowering::UndefinedBooleanContent || 609 VecBool == TargetLowering::ZeroOrOneBooleanContent); 610 // Vector reads from a one, scalar from all ones so sign extend. 611 Cond = DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), CondVT, 612 Cond, DAG.getValueType(MVT::i1)); 613 break; 614 } 615 } 616 617 // Truncate the condition if needed 618 auto BoolVT = getSetCCResultType(CondVT); 619 if (BoolVT.bitsLT(CondVT)) 620 Cond = DAG.getNode(ISD::TRUNCATE, SDLoc(N), BoolVT, Cond); 621 622 return DAG.getSelect(SDLoc(N), 623 LHS.getValueType(), Cond, LHS, 624 GetScalarizedVector(N->getOperand(2))); 625 } 626 627 SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) { 628 SDValue LHS = GetScalarizedVector(N->getOperand(1)); 629 return DAG.getSelect(SDLoc(N), 630 LHS.getValueType(), N->getOperand(0), LHS, 631 GetScalarizedVector(N->getOperand(2))); 632 } 633 634 SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT_CC(SDNode *N) { 635 SDValue LHS = GetScalarizedVector(N->getOperand(2)); 636 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), LHS.getValueType(), 637 N->getOperand(0), N->getOperand(1), 638 LHS, GetScalarizedVector(N->getOperand(3)), 639 N->getOperand(4)); 640 } 641 642 SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) { 643 return DAG.getUNDEF(N->getValueType(0).getVectorElementType()); 644 } 645 646 SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) { 647 // Figure out if the scalar is the LHS or RHS and return it. 648 SDValue Arg = N->getOperand(2).getOperand(0); 649 if (Arg.isUndef()) 650 return DAG.getUNDEF(N->getValueType(0).getVectorElementType()); 651 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero(); 652 return GetScalarizedVector(N->getOperand(Op)); 653 } 654 655 SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N) { 656 SDValue Src = N->getOperand(0); 657 EVT SrcVT = Src.getValueType(); 658 SDLoc dl(N); 659 660 // Handle case where result is scalarized but operand is not 661 if (getTypeAction(SrcVT) == TargetLowering::TypeScalarizeVector) 662 Src = GetScalarizedVector(Src); 663 else 664 Src = DAG.getNode( 665 ISD::EXTRACT_VECTOR_ELT, dl, SrcVT.getVectorElementType(), Src, 666 DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 667 668 EVT DstVT = N->getValueType(0).getVectorElementType(); 669 return DAG.getNode(N->getOpcode(), dl, DstVT, Src, N->getOperand(1)); 670 } 671 672 SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) { 673 assert(N->getValueType(0).isVector() && 674 N->getOperand(0).getValueType().isVector() && 675 "Operand types must be vectors"); 676 SDValue LHS = N->getOperand(0); 677 SDValue RHS = N->getOperand(1); 678 EVT OpVT = LHS.getValueType(); 679 EVT NVT = N->getValueType(0).getVectorElementType(); 680 SDLoc DL(N); 681 682 // The result needs scalarizing, but it's not a given that the source does. 683 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) { 684 LHS = GetScalarizedVector(LHS); 685 RHS = GetScalarizedVector(RHS); 686 } else { 687 EVT VT = OpVT.getVectorElementType(); 688 LHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, LHS, 689 DAG.getVectorIdxConstant(0, DL)); 690 RHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, RHS, 691 DAG.getVectorIdxConstant(0, DL)); 692 } 693 694 // Turn it into a scalar SETCC. 695 SDValue Res = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, 696 N->getOperand(2)); 697 // Vectors may have a different boolean contents to scalars. Promote the 698 // value appropriately. 699 ISD::NodeType ExtendCode = 700 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT)); 701 return DAG.getNode(ExtendCode, DL, NVT, Res); 702 } 703 704 SDValue DAGTypeLegalizer::ScalarizeVecRes_IS_FPCLASS(SDNode *N) { 705 SDLoc DL(N); 706 SDValue Arg = N->getOperand(0); 707 SDValue Test = N->getOperand(1); 708 EVT ArgVT = Arg.getValueType(); 709 EVT ResultVT = N->getValueType(0).getVectorElementType(); 710 711 if (getTypeAction(ArgVT) == TargetLowering::TypeScalarizeVector) { 712 Arg = GetScalarizedVector(Arg); 713 } else { 714 EVT VT = ArgVT.getVectorElementType(); 715 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Arg, 716 DAG.getVectorIdxConstant(0, DL)); 717 } 718 719 SDValue Res = 720 DAG.getNode(ISD::IS_FPCLASS, DL, MVT::i1, {Arg, Test}, N->getFlags()); 721 // Vectors may have a different boolean contents to scalars. Promote the 722 // value appropriately. 723 ISD::NodeType ExtendCode = 724 TargetLowering::getExtendForContent(TLI.getBooleanContents(ArgVT)); 725 return DAG.getNode(ExtendCode, DL, ResultVT, Res); 726 } 727 728 //===----------------------------------------------------------------------===// 729 // Operand Vector Scalarization <1 x ty> -> ty. 730 //===----------------------------------------------------------------------===// 731 732 bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) { 733 LLVM_DEBUG(dbgs() << "Scalarize node operand " << OpNo << ": "; 734 N->dump(&DAG)); 735 SDValue Res = SDValue(); 736 737 switch (N->getOpcode()) { 738 default: 739 #ifndef NDEBUG 740 dbgs() << "ScalarizeVectorOperand Op #" << OpNo << ": "; 741 N->dump(&DAG); 742 dbgs() << "\n"; 743 #endif 744 report_fatal_error("Do not know how to scalarize this operator's " 745 "operand!\n"); 746 case ISD::BITCAST: 747 Res = ScalarizeVecOp_BITCAST(N); 748 break; 749 case ISD::ANY_EXTEND: 750 case ISD::ZERO_EXTEND: 751 case ISD::SIGN_EXTEND: 752 case ISD::TRUNCATE: 753 case ISD::FP_TO_SINT: 754 case ISD::FP_TO_UINT: 755 case ISD::SINT_TO_FP: 756 case ISD::UINT_TO_FP: 757 case ISD::LRINT: 758 case ISD::LLRINT: 759 Res = ScalarizeVecOp_UnaryOp(N); 760 break; 761 case ISD::STRICT_SINT_TO_FP: 762 case ISD::STRICT_UINT_TO_FP: 763 case ISD::STRICT_FP_TO_SINT: 764 case ISD::STRICT_FP_TO_UINT: 765 Res = ScalarizeVecOp_UnaryOp_StrictFP(N); 766 break; 767 case ISD::CONCAT_VECTORS: 768 Res = ScalarizeVecOp_CONCAT_VECTORS(N); 769 break; 770 case ISD::EXTRACT_VECTOR_ELT: 771 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N); 772 break; 773 case ISD::VSELECT: 774 Res = ScalarizeVecOp_VSELECT(N); 775 break; 776 case ISD::SETCC: 777 Res = ScalarizeVecOp_VSETCC(N); 778 break; 779 case ISD::STORE: 780 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo); 781 break; 782 case ISD::STRICT_FP_ROUND: 783 Res = ScalarizeVecOp_STRICT_FP_ROUND(N, OpNo); 784 break; 785 case ISD::FP_ROUND: 786 Res = ScalarizeVecOp_FP_ROUND(N, OpNo); 787 break; 788 case ISD::STRICT_FP_EXTEND: 789 Res = ScalarizeVecOp_STRICT_FP_EXTEND(N); 790 break; 791 case ISD::FP_EXTEND: 792 Res = ScalarizeVecOp_FP_EXTEND(N); 793 break; 794 case ISD::VECREDUCE_FADD: 795 case ISD::VECREDUCE_FMUL: 796 case ISD::VECREDUCE_ADD: 797 case ISD::VECREDUCE_MUL: 798 case ISD::VECREDUCE_AND: 799 case ISD::VECREDUCE_OR: 800 case ISD::VECREDUCE_XOR: 801 case ISD::VECREDUCE_SMAX: 802 case ISD::VECREDUCE_SMIN: 803 case ISD::VECREDUCE_UMAX: 804 case ISD::VECREDUCE_UMIN: 805 case ISD::VECREDUCE_FMAX: 806 case ISD::VECREDUCE_FMIN: 807 case ISD::VECREDUCE_FMAXIMUM: 808 case ISD::VECREDUCE_FMINIMUM: 809 Res = ScalarizeVecOp_VECREDUCE(N); 810 break; 811 case ISD::VECREDUCE_SEQ_FADD: 812 case ISD::VECREDUCE_SEQ_FMUL: 813 Res = ScalarizeVecOp_VECREDUCE_SEQ(N); 814 break; 815 case ISD::SCMP: 816 case ISD::UCMP: 817 Res = ScalarizeVecOp_CMP(N); 818 break; 819 } 820 821 // If the result is null, the sub-method took care of registering results etc. 822 if (!Res.getNode()) return false; 823 824 // If the result is N, the sub-method updated N in place. Tell the legalizer 825 // core about this. 826 if (Res.getNode() == N) 827 return true; 828 829 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && 830 "Invalid operand expansion"); 831 832 ReplaceValueWith(SDValue(N, 0), Res); 833 return false; 834 } 835 836 /// If the value to convert is a vector that needs to be scalarized, it must be 837 /// <1 x ty>. Convert the element instead. 838 SDValue DAGTypeLegalizer::ScalarizeVecOp_BITCAST(SDNode *N) { 839 SDValue Elt = GetScalarizedVector(N->getOperand(0)); 840 return DAG.getNode(ISD::BITCAST, SDLoc(N), 841 N->getValueType(0), Elt); 842 } 843 844 /// If the input is a vector that needs to be scalarized, it must be <1 x ty>. 845 /// Do the operation on the element instead. 846 SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp(SDNode *N) { 847 assert(N->getValueType(0).getVectorNumElements() == 1 && 848 "Unexpected vector type!"); 849 SDValue Elt = GetScalarizedVector(N->getOperand(0)); 850 SDValue Op = DAG.getNode(N->getOpcode(), SDLoc(N), 851 N->getValueType(0).getScalarType(), Elt); 852 // Revectorize the result so the types line up with what the uses of this 853 // expression expect. 854 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Op); 855 } 856 857 /// If the input is a vector that needs to be scalarized, it must be <1 x ty>. 858 /// Do the strict FP operation on the element instead. 859 SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(SDNode *N) { 860 assert(N->getValueType(0).getVectorNumElements() == 1 && 861 "Unexpected vector type!"); 862 SDValue Elt = GetScalarizedVector(N->getOperand(1)); 863 SDValue Res = DAG.getNode(N->getOpcode(), SDLoc(N), 864 { N->getValueType(0).getScalarType(), MVT::Other }, 865 { N->getOperand(0), Elt }); 866 // Legalize the chain result - switch anything that used the old chain to 867 // use the new one. 868 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 869 // Revectorize the result so the types line up with what the uses of this 870 // expression expect. 871 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); 872 873 // Do our own replacement and return SDValue() to tell the caller that we 874 // handled all replacements since caller can only handle a single result. 875 ReplaceValueWith(SDValue(N, 0), Res); 876 return SDValue(); 877 } 878 879 /// The vectors to concatenate have length one - use a BUILD_VECTOR instead. 880 SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(SDNode *N) { 881 SmallVector<SDValue, 8> Ops(N->getNumOperands()); 882 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 883 Ops[i] = GetScalarizedVector(N->getOperand(i)); 884 return DAG.getBuildVector(N->getValueType(0), SDLoc(N), Ops); 885 } 886 887 /// If the input is a vector that needs to be scalarized, it must be <1 x ty>, 888 /// so just return the element, ignoring the index. 889 SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) { 890 EVT VT = N->getValueType(0); 891 SDValue Res = GetScalarizedVector(N->getOperand(0)); 892 if (Res.getValueType() != VT) 893 Res = VT.isFloatingPoint() 894 ? DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, Res) 895 : DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Res); 896 return Res; 897 } 898 899 /// If the input condition is a vector that needs to be scalarized, it must be 900 /// <1 x i1>, so just convert to a normal ISD::SELECT 901 /// (still with vector output type since that was acceptable if we got here). 902 SDValue DAGTypeLegalizer::ScalarizeVecOp_VSELECT(SDNode *N) { 903 SDValue ScalarCond = GetScalarizedVector(N->getOperand(0)); 904 EVT VT = N->getValueType(0); 905 906 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, ScalarCond, N->getOperand(1), 907 N->getOperand(2)); 908 } 909 910 /// If the operand is a vector that needs to be scalarized then the 911 /// result must be v1i1, so just convert to a scalar SETCC and wrap 912 /// with a scalar_to_vector since the res type is legal if we got here 913 SDValue DAGTypeLegalizer::ScalarizeVecOp_VSETCC(SDNode *N) { 914 assert(N->getValueType(0).isVector() && 915 N->getOperand(0).getValueType().isVector() && 916 "Operand types must be vectors"); 917 assert(N->getValueType(0) == MVT::v1i1 && "Expected v1i1 type"); 918 919 EVT VT = N->getValueType(0); 920 SDValue LHS = GetScalarizedVector(N->getOperand(0)); 921 SDValue RHS = GetScalarizedVector(N->getOperand(1)); 922 923 EVT OpVT = N->getOperand(0).getValueType(); 924 EVT NVT = VT.getVectorElementType(); 925 SDLoc DL(N); 926 // Turn it into a scalar SETCC. 927 SDValue Res = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, 928 N->getOperand(2)); 929 930 // Vectors may have a different boolean contents to scalars. Promote the 931 // value appropriately. 932 ISD::NodeType ExtendCode = 933 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT)); 934 935 Res = DAG.getNode(ExtendCode, DL, NVT, Res); 936 937 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Res); 938 } 939 940 /// If the value to store is a vector that needs to be scalarized, it must be 941 /// <1 x ty>. Just store the element. 942 SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){ 943 assert(N->isUnindexed() && "Indexed store of one-element vector?"); 944 assert(OpNo == 1 && "Do not know how to scalarize this operand!"); 945 SDLoc dl(N); 946 947 if (N->isTruncatingStore()) 948 return DAG.getTruncStore( 949 N->getChain(), dl, GetScalarizedVector(N->getOperand(1)), 950 N->getBasePtr(), N->getPointerInfo(), 951 N->getMemoryVT().getVectorElementType(), N->getOriginalAlign(), 952 N->getMemOperand()->getFlags(), N->getAAInfo()); 953 954 return DAG.getStore(N->getChain(), dl, GetScalarizedVector(N->getOperand(1)), 955 N->getBasePtr(), N->getPointerInfo(), 956 N->getOriginalAlign(), N->getMemOperand()->getFlags(), 957 N->getAAInfo()); 958 } 959 960 /// If the value to round is a vector that needs to be scalarized, it must be 961 /// <1 x ty>. Convert the element instead. 962 SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo) { 963 assert(OpNo == 0 && "Wrong operand for scalarization!"); 964 SDValue Elt = GetScalarizedVector(N->getOperand(0)); 965 SDValue Res = DAG.getNode(ISD::FP_ROUND, SDLoc(N), 966 N->getValueType(0).getVectorElementType(), Elt, 967 N->getOperand(1)); 968 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); 969 } 970 971 SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(SDNode *N, 972 unsigned OpNo) { 973 assert(OpNo == 1 && "Wrong operand for scalarization!"); 974 SDValue Elt = GetScalarizedVector(N->getOperand(1)); 975 SDValue Res = DAG.getNode(ISD::STRICT_FP_ROUND, SDLoc(N), 976 { N->getValueType(0).getVectorElementType(), 977 MVT::Other }, 978 { N->getOperand(0), Elt, N->getOperand(2) }); 979 // Legalize the chain result - switch anything that used the old chain to 980 // use the new one. 981 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 982 983 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); 984 985 // Do our own replacement and return SDValue() to tell the caller that we 986 // handled all replacements since caller can only handle a single result. 987 ReplaceValueWith(SDValue(N, 0), Res); 988 return SDValue(); 989 } 990 991 /// If the value to extend is a vector that needs to be scalarized, it must be 992 /// <1 x ty>. Convert the element instead. 993 SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_EXTEND(SDNode *N) { 994 SDValue Elt = GetScalarizedVector(N->getOperand(0)); 995 SDValue Res = DAG.getNode(ISD::FP_EXTEND, SDLoc(N), 996 N->getValueType(0).getVectorElementType(), Elt); 997 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); 998 } 999 1000 /// If the value to extend is a vector that needs to be scalarized, it must be 1001 /// <1 x ty>. Convert the element instead. 1002 SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(SDNode *N) { 1003 SDValue Elt = GetScalarizedVector(N->getOperand(1)); 1004 SDValue Res = 1005 DAG.getNode(ISD::STRICT_FP_EXTEND, SDLoc(N), 1006 {N->getValueType(0).getVectorElementType(), MVT::Other}, 1007 {N->getOperand(0), Elt}); 1008 // Legalize the chain result - switch anything that used the old chain to 1009 // use the new one. 1010 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 1011 1012 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res); 1013 1014 // Do our own replacement and return SDValue() to tell the caller that we 1015 // handled all replacements since caller can only handle a single result. 1016 ReplaceValueWith(SDValue(N, 0), Res); 1017 return SDValue(); 1018 } 1019 1020 SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE(SDNode *N) { 1021 SDValue Res = GetScalarizedVector(N->getOperand(0)); 1022 // Result type may be wider than element type. 1023 if (Res.getValueType() != N->getValueType(0)) 1024 Res = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), N->getValueType(0), Res); 1025 return Res; 1026 } 1027 1028 SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N) { 1029 SDValue AccOp = N->getOperand(0); 1030 SDValue VecOp = N->getOperand(1); 1031 1032 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(N->getOpcode()); 1033 1034 SDValue Op = GetScalarizedVector(VecOp); 1035 return DAG.getNode(BaseOpc, SDLoc(N), N->getValueType(0), 1036 AccOp, Op, N->getFlags()); 1037 } 1038 1039 SDValue DAGTypeLegalizer::ScalarizeVecOp_CMP(SDNode *N) { 1040 SDValue LHS = GetScalarizedVector(N->getOperand(0)); 1041 SDValue RHS = GetScalarizedVector(N->getOperand(1)); 1042 1043 EVT ResVT = N->getValueType(0).getVectorElementType(); 1044 SDValue Cmp = DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, LHS, RHS); 1045 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Cmp); 1046 } 1047 1048 //===----------------------------------------------------------------------===// 1049 // Result Vector Splitting 1050 //===----------------------------------------------------------------------===// 1051 1052 /// This method is called when the specified result of the specified node is 1053 /// found to need vector splitting. At this point, the node may also have 1054 /// invalid operands or may have other results that need legalization, we just 1055 /// know that (at least) one result needs vector splitting. 1056 void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { 1057 LLVM_DEBUG(dbgs() << "Split node result: "; N->dump(&DAG)); 1058 SDValue Lo, Hi; 1059 1060 // See if the target wants to custom expand this node. 1061 if (CustomLowerNode(N, N->getValueType(ResNo), true)) 1062 return; 1063 1064 switch (N->getOpcode()) { 1065 default: 1066 #ifndef NDEBUG 1067 dbgs() << "SplitVectorResult #" << ResNo << ": "; 1068 N->dump(&DAG); 1069 dbgs() << "\n"; 1070 #endif 1071 report_fatal_error("Do not know how to split the result of this " 1072 "operator!\n"); 1073 1074 case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break; 1075 case ISD::AssertZext: SplitVecRes_AssertZext(N, Lo, Hi); break; 1076 case ISD::VSELECT: 1077 case ISD::SELECT: 1078 case ISD::VP_MERGE: 1079 case ISD::VP_SELECT: SplitRes_Select(N, Lo, Hi); break; 1080 case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break; 1081 case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break; 1082 case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break; 1083 case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break; 1084 case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break; 1085 case ISD::EXTRACT_SUBVECTOR: SplitVecRes_EXTRACT_SUBVECTOR(N, Lo, Hi); break; 1086 case ISD::INSERT_SUBVECTOR: SplitVecRes_INSERT_SUBVECTOR(N, Lo, Hi); break; 1087 case ISD::FPOWI: 1088 case ISD::FLDEXP: 1089 case ISD::FCOPYSIGN: SplitVecRes_FPOp_MultiType(N, Lo, Hi); break; 1090 case ISD::IS_FPCLASS: SplitVecRes_IS_FPCLASS(N, Lo, Hi); break; 1091 case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break; 1092 case ISD::EXPERIMENTAL_VP_SPLAT: SplitVecRes_VP_SPLAT(N, Lo, Hi); break; 1093 case ISD::SPLAT_VECTOR: 1094 case ISD::SCALAR_TO_VECTOR: 1095 SplitVecRes_ScalarOp(N, Lo, Hi); 1096 break; 1097 case ISD::STEP_VECTOR: 1098 SplitVecRes_STEP_VECTOR(N, Lo, Hi); 1099 break; 1100 case ISD::SIGN_EXTEND_INREG: SplitVecRes_InregOp(N, Lo, Hi); break; 1101 case ISD::LOAD: 1102 SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi); 1103 break; 1104 case ISD::VP_LOAD: 1105 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(N), Lo, Hi); 1106 break; 1107 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: 1108 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(N), Lo, Hi); 1109 break; 1110 case ISD::MLOAD: 1111 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(N), Lo, Hi); 1112 break; 1113 case ISD::MGATHER: 1114 case ISD::VP_GATHER: 1115 SplitVecRes_Gather(cast<MemSDNode>(N), Lo, Hi, /*SplitSETCC*/ true); 1116 break; 1117 case ISD::VECTOR_COMPRESS: 1118 SplitVecRes_VECTOR_COMPRESS(N, Lo, Hi); 1119 break; 1120 case ISD::SETCC: 1121 case ISD::VP_SETCC: 1122 SplitVecRes_SETCC(N, Lo, Hi); 1123 break; 1124 case ISD::VECTOR_REVERSE: 1125 SplitVecRes_VECTOR_REVERSE(N, Lo, Hi); 1126 break; 1127 case ISD::VECTOR_SHUFFLE: 1128 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi); 1129 break; 1130 case ISD::VECTOR_SPLICE: 1131 SplitVecRes_VECTOR_SPLICE(N, Lo, Hi); 1132 break; 1133 case ISD::VECTOR_DEINTERLEAVE: 1134 SplitVecRes_VECTOR_DEINTERLEAVE(N); 1135 return; 1136 case ISD::VECTOR_INTERLEAVE: 1137 SplitVecRes_VECTOR_INTERLEAVE(N); 1138 return; 1139 case ISD::VAARG: 1140 SplitVecRes_VAARG(N, Lo, Hi); 1141 break; 1142 1143 case ISD::ANY_EXTEND_VECTOR_INREG: 1144 case ISD::SIGN_EXTEND_VECTOR_INREG: 1145 case ISD::ZERO_EXTEND_VECTOR_INREG: 1146 SplitVecRes_ExtVecInRegOp(N, Lo, Hi); 1147 break; 1148 1149 case ISD::ABS: 1150 case ISD::VP_ABS: 1151 case ISD::BITREVERSE: 1152 case ISD::VP_BITREVERSE: 1153 case ISD::BSWAP: 1154 case ISD::VP_BSWAP: 1155 case ISD::CTLZ: 1156 case ISD::VP_CTLZ: 1157 case ISD::CTTZ: 1158 case ISD::VP_CTTZ: 1159 case ISD::CTLZ_ZERO_UNDEF: 1160 case ISD::VP_CTLZ_ZERO_UNDEF: 1161 case ISD::CTTZ_ZERO_UNDEF: 1162 case ISD::VP_CTTZ_ZERO_UNDEF: 1163 case ISD::CTPOP: 1164 case ISD::VP_CTPOP: 1165 case ISD::FABS: case ISD::VP_FABS: 1166 case ISD::FACOS: 1167 case ISD::FASIN: 1168 case ISD::FATAN: 1169 case ISD::FCEIL: 1170 case ISD::VP_FCEIL: 1171 case ISD::FCOS: 1172 case ISD::FCOSH: 1173 case ISD::FEXP: 1174 case ISD::FEXP2: 1175 case ISD::FEXP10: 1176 case ISD::FFLOOR: 1177 case ISD::VP_FFLOOR: 1178 case ISD::FLOG: 1179 case ISD::FLOG10: 1180 case ISD::FLOG2: 1181 case ISD::FNEARBYINT: 1182 case ISD::VP_FNEARBYINT: 1183 case ISD::FNEG: case ISD::VP_FNEG: 1184 case ISD::FREEZE: 1185 case ISD::ARITH_FENCE: 1186 case ISD::FP_EXTEND: 1187 case ISD::VP_FP_EXTEND: 1188 case ISD::FP_ROUND: 1189 case ISD::VP_FP_ROUND: 1190 case ISD::FP_TO_SINT: 1191 case ISD::VP_FP_TO_SINT: 1192 case ISD::FP_TO_UINT: 1193 case ISD::VP_FP_TO_UINT: 1194 case ISD::FRINT: 1195 case ISD::VP_FRINT: 1196 case ISD::LRINT: 1197 case ISD::VP_LRINT: 1198 case ISD::LLRINT: 1199 case ISD::VP_LLRINT: 1200 case ISD::FROUND: 1201 case ISD::VP_FROUND: 1202 case ISD::FROUNDEVEN: 1203 case ISD::VP_FROUNDEVEN: 1204 case ISD::FSIN: 1205 case ISD::FSINH: 1206 case ISD::FSQRT: case ISD::VP_SQRT: 1207 case ISD::FTAN: 1208 case ISD::FTANH: 1209 case ISD::FTRUNC: 1210 case ISD::VP_FROUNDTOZERO: 1211 case ISD::SINT_TO_FP: 1212 case ISD::VP_SINT_TO_FP: 1213 case ISD::TRUNCATE: 1214 case ISD::VP_TRUNCATE: 1215 case ISD::UINT_TO_FP: 1216 case ISD::VP_UINT_TO_FP: 1217 case ISD::FCANONICALIZE: 1218 SplitVecRes_UnaryOp(N, Lo, Hi); 1219 break; 1220 case ISD::ADDRSPACECAST: 1221 SplitVecRes_ADDRSPACECAST(N, Lo, Hi); 1222 break; 1223 case ISD::FFREXP: 1224 SplitVecRes_FFREXP(N, ResNo, Lo, Hi); 1225 break; 1226 1227 case ISD::ANY_EXTEND: 1228 case ISD::SIGN_EXTEND: 1229 case ISD::ZERO_EXTEND: 1230 case ISD::VP_SIGN_EXTEND: 1231 case ISD::VP_ZERO_EXTEND: 1232 SplitVecRes_ExtendOp(N, Lo, Hi); 1233 break; 1234 1235 case ISD::ADD: case ISD::VP_ADD: 1236 case ISD::SUB: case ISD::VP_SUB: 1237 case ISD::MUL: case ISD::VP_MUL: 1238 case ISD::MULHS: 1239 case ISD::MULHU: 1240 case ISD::AVGCEILS: 1241 case ISD::AVGCEILU: 1242 case ISD::AVGFLOORS: 1243 case ISD::AVGFLOORU: 1244 case ISD::FADD: case ISD::VP_FADD: 1245 case ISD::FSUB: case ISD::VP_FSUB: 1246 case ISD::FMUL: case ISD::VP_FMUL: 1247 case ISD::FMINNUM: 1248 case ISD::FMINNUM_IEEE: 1249 case ISD::VP_FMINNUM: 1250 case ISD::FMAXNUM: 1251 case ISD::FMAXNUM_IEEE: 1252 case ISD::VP_FMAXNUM: 1253 case ISD::FMINIMUM: 1254 case ISD::VP_FMINIMUM: 1255 case ISD::FMAXIMUM: 1256 case ISD::VP_FMAXIMUM: 1257 case ISD::SDIV: case ISD::VP_SDIV: 1258 case ISD::UDIV: case ISD::VP_UDIV: 1259 case ISD::FDIV: case ISD::VP_FDIV: 1260 case ISD::FPOW: 1261 case ISD::AND: case ISD::VP_AND: 1262 case ISD::OR: case ISD::VP_OR: 1263 case ISD::XOR: case ISD::VP_XOR: 1264 case ISD::SHL: case ISD::VP_SHL: 1265 case ISD::SRA: case ISD::VP_SRA: 1266 case ISD::SRL: case ISD::VP_SRL: 1267 case ISD::UREM: case ISD::VP_UREM: 1268 case ISD::SREM: case ISD::VP_SREM: 1269 case ISD::FREM: case ISD::VP_FREM: 1270 case ISD::SMIN: case ISD::VP_SMIN: 1271 case ISD::SMAX: case ISD::VP_SMAX: 1272 case ISD::UMIN: case ISD::VP_UMIN: 1273 case ISD::UMAX: case ISD::VP_UMAX: 1274 case ISD::SADDSAT: case ISD::VP_SADDSAT: 1275 case ISD::UADDSAT: case ISD::VP_UADDSAT: 1276 case ISD::SSUBSAT: case ISD::VP_SSUBSAT: 1277 case ISD::USUBSAT: case ISD::VP_USUBSAT: 1278 case ISD::SSHLSAT: 1279 case ISD::USHLSAT: 1280 case ISD::ROTL: 1281 case ISD::ROTR: 1282 case ISD::VP_FCOPYSIGN: 1283 SplitVecRes_BinOp(N, Lo, Hi); 1284 break; 1285 case ISD::FMA: case ISD::VP_FMA: 1286 case ISD::FSHL: 1287 case ISD::VP_FSHL: 1288 case ISD::FSHR: 1289 case ISD::VP_FSHR: 1290 SplitVecRes_TernaryOp(N, Lo, Hi); 1291 break; 1292 1293 case ISD::SCMP: case ISD::UCMP: 1294 SplitVecRes_CMP(N, Lo, Hi); 1295 break; 1296 1297 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1298 case ISD::STRICT_##DAGN: 1299 #include "llvm/IR/ConstrainedOps.def" 1300 SplitVecRes_StrictFPOp(N, Lo, Hi); 1301 break; 1302 1303 case ISD::FP_TO_UINT_SAT: 1304 case ISD::FP_TO_SINT_SAT: 1305 SplitVecRes_FP_TO_XINT_SAT(N, Lo, Hi); 1306 break; 1307 1308 case ISD::UADDO: 1309 case ISD::SADDO: 1310 case ISD::USUBO: 1311 case ISD::SSUBO: 1312 case ISD::UMULO: 1313 case ISD::SMULO: 1314 SplitVecRes_OverflowOp(N, ResNo, Lo, Hi); 1315 break; 1316 case ISD::SMULFIX: 1317 case ISD::SMULFIXSAT: 1318 case ISD::UMULFIX: 1319 case ISD::UMULFIXSAT: 1320 case ISD::SDIVFIX: 1321 case ISD::SDIVFIXSAT: 1322 case ISD::UDIVFIX: 1323 case ISD::UDIVFIXSAT: 1324 SplitVecRes_FIX(N, Lo, Hi); 1325 break; 1326 case ISD::EXPERIMENTAL_VP_REVERSE: 1327 SplitVecRes_VP_REVERSE(N, Lo, Hi); 1328 break; 1329 } 1330 1331 // If Lo/Hi is null, the sub-method took care of registering results etc. 1332 if (Lo.getNode()) 1333 SetSplitVector(SDValue(N, ResNo), Lo, Hi); 1334 } 1335 1336 void DAGTypeLegalizer::IncrementPointer(MemSDNode *N, EVT MemVT, 1337 MachinePointerInfo &MPI, SDValue &Ptr, 1338 uint64_t *ScaledOffset) { 1339 SDLoc DL(N); 1340 unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinValue() / 8; 1341 1342 if (MemVT.isScalableVector()) { 1343 SDNodeFlags Flags; 1344 SDValue BytesIncrement = DAG.getVScale( 1345 DL, Ptr.getValueType(), 1346 APInt(Ptr.getValueSizeInBits().getFixedValue(), IncrementSize)); 1347 MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace()); 1348 Flags.setNoUnsignedWrap(true); 1349 if (ScaledOffset) 1350 *ScaledOffset += IncrementSize; 1351 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, BytesIncrement, 1352 Flags); 1353 } else { 1354 MPI = N->getPointerInfo().getWithOffset(IncrementSize); 1355 // Increment the pointer to the other half. 1356 Ptr = DAG.getObjectPtrOffset(DL, Ptr, TypeSize::getFixed(IncrementSize)); 1357 } 1358 } 1359 1360 std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(SDValue Mask) { 1361 return SplitMask(Mask, SDLoc(Mask)); 1362 } 1363 1364 std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(SDValue Mask, 1365 const SDLoc &DL) { 1366 SDValue MaskLo, MaskHi; 1367 EVT MaskVT = Mask.getValueType(); 1368 if (getTypeAction(MaskVT) == TargetLowering::TypeSplitVector) 1369 GetSplitVector(Mask, MaskLo, MaskHi); 1370 else 1371 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL); 1372 return std::make_pair(MaskLo, MaskHi); 1373 } 1374 1375 void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi) { 1376 SDValue LHSLo, LHSHi; 1377 GetSplitVector(N->getOperand(0), LHSLo, LHSHi); 1378 SDValue RHSLo, RHSHi; 1379 GetSplitVector(N->getOperand(1), RHSLo, RHSHi); 1380 SDLoc dl(N); 1381 1382 const SDNodeFlags Flags = N->getFlags(); 1383 unsigned Opcode = N->getOpcode(); 1384 if (N->getNumOperands() == 2) { 1385 Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(), LHSLo, RHSLo, Flags); 1386 Hi = DAG.getNode(Opcode, dl, LHSHi.getValueType(), LHSHi, RHSHi, Flags); 1387 return; 1388 } 1389 1390 assert(N->getNumOperands() == 4 && "Unexpected number of operands!"); 1391 assert(N->isVPOpcode() && "Expected VP opcode"); 1392 1393 SDValue MaskLo, MaskHi; 1394 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(2)); 1395 1396 SDValue EVLLo, EVLHi; 1397 std::tie(EVLLo, EVLHi) = 1398 DAG.SplitEVL(N->getOperand(3), N->getValueType(0), dl); 1399 1400 Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(), 1401 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags); 1402 Hi = DAG.getNode(Opcode, dl, LHSHi.getValueType(), 1403 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags); 1404 } 1405 1406 void DAGTypeLegalizer::SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo, 1407 SDValue &Hi) { 1408 SDValue Op0Lo, Op0Hi; 1409 GetSplitVector(N->getOperand(0), Op0Lo, Op0Hi); 1410 SDValue Op1Lo, Op1Hi; 1411 GetSplitVector(N->getOperand(1), Op1Lo, Op1Hi); 1412 SDValue Op2Lo, Op2Hi; 1413 GetSplitVector(N->getOperand(2), Op2Lo, Op2Hi); 1414 SDLoc dl(N); 1415 1416 const SDNodeFlags Flags = N->getFlags(); 1417 unsigned Opcode = N->getOpcode(); 1418 if (N->getNumOperands() == 3) { 1419 Lo = DAG.getNode(Opcode, dl, Op0Lo.getValueType(), Op0Lo, Op1Lo, Op2Lo, Flags); 1420 Hi = DAG.getNode(Opcode, dl, Op0Hi.getValueType(), Op0Hi, Op1Hi, Op2Hi, Flags); 1421 return; 1422 } 1423 1424 assert(N->getNumOperands() == 5 && "Unexpected number of operands!"); 1425 assert(N->isVPOpcode() && "Expected VP opcode"); 1426 1427 SDValue MaskLo, MaskHi; 1428 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(3)); 1429 1430 SDValue EVLLo, EVLHi; 1431 std::tie(EVLLo, EVLHi) = 1432 DAG.SplitEVL(N->getOperand(4), N->getValueType(0), dl); 1433 1434 Lo = DAG.getNode(Opcode, dl, Op0Lo.getValueType(), 1435 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags); 1436 Hi = DAG.getNode(Opcode, dl, Op0Hi.getValueType(), 1437 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags); 1438 } 1439 1440 void DAGTypeLegalizer::SplitVecRes_CMP(SDNode *N, SDValue &Lo, SDValue &Hi) { 1441 LLVMContext &Ctxt = *DAG.getContext(); 1442 SDLoc dl(N); 1443 1444 SDValue LHS = N->getOperand(0); 1445 SDValue RHS = N->getOperand(1); 1446 1447 SDValue LHSLo, LHSHi, RHSLo, RHSHi; 1448 if (getTypeAction(LHS.getValueType()) == TargetLowering::TypeSplitVector) { 1449 GetSplitVector(LHS, LHSLo, LHSHi); 1450 GetSplitVector(RHS, RHSLo, RHSHi); 1451 } else { 1452 std::tie(LHSLo, LHSHi) = DAG.SplitVector(LHS, dl); 1453 std::tie(RHSLo, RHSHi) = DAG.SplitVector(RHS, dl); 1454 } 1455 1456 EVT SplitResVT = N->getValueType(0).getHalfNumVectorElementsVT(Ctxt); 1457 Lo = DAG.getNode(N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo); 1458 Hi = DAG.getNode(N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi); 1459 } 1460 1461 void DAGTypeLegalizer::SplitVecRes_FIX(SDNode *N, SDValue &Lo, SDValue &Hi) { 1462 SDValue LHSLo, LHSHi; 1463 GetSplitVector(N->getOperand(0), LHSLo, LHSHi); 1464 SDValue RHSLo, RHSHi; 1465 GetSplitVector(N->getOperand(1), RHSLo, RHSHi); 1466 SDLoc dl(N); 1467 SDValue Op2 = N->getOperand(2); 1468 1469 unsigned Opcode = N->getOpcode(); 1470 Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(), LHSLo, RHSLo, Op2, 1471 N->getFlags()); 1472 Hi = DAG.getNode(Opcode, dl, LHSHi.getValueType(), LHSHi, RHSHi, Op2, 1473 N->getFlags()); 1474 } 1475 1476 void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo, 1477 SDValue &Hi) { 1478 // We know the result is a vector. The input may be either a vector or a 1479 // scalar value. 1480 EVT LoVT, HiVT; 1481 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1482 SDLoc dl(N); 1483 1484 SDValue InOp = N->getOperand(0); 1485 EVT InVT = InOp.getValueType(); 1486 1487 // Handle some special cases efficiently. 1488 switch (getTypeAction(InVT)) { 1489 case TargetLowering::TypeLegal: 1490 case TargetLowering::TypePromoteInteger: 1491 case TargetLowering::TypePromoteFloat: 1492 case TargetLowering::TypeSoftPromoteHalf: 1493 case TargetLowering::TypeSoftenFloat: 1494 case TargetLowering::TypeScalarizeVector: 1495 case TargetLowering::TypeWidenVector: 1496 break; 1497 case TargetLowering::TypeExpandInteger: 1498 case TargetLowering::TypeExpandFloat: 1499 // A scalar to vector conversion, where the scalar needs expansion. 1500 // If the vector is being split in two then we can just convert the 1501 // expanded pieces. 1502 if (LoVT == HiVT) { 1503 GetExpandedOp(InOp, Lo, Hi); 1504 if (DAG.getDataLayout().isBigEndian()) 1505 std::swap(Lo, Hi); 1506 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo); 1507 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi); 1508 return; 1509 } 1510 break; 1511 case TargetLowering::TypeSplitVector: 1512 // If the input is a vector that needs to be split, convert each split 1513 // piece of the input now. 1514 GetSplitVector(InOp, Lo, Hi); 1515 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo); 1516 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi); 1517 return; 1518 case TargetLowering::TypeScalarizeScalableVector: 1519 report_fatal_error("Scalarization of scalable vectors is not supported."); 1520 } 1521 1522 if (LoVT.isScalableVector()) { 1523 auto [InLo, InHi] = DAG.SplitVectorOperand(N, 0); 1524 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, InLo); 1525 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, InHi); 1526 return; 1527 } 1528 1529 // In the general case, convert the input to an integer and split it by hand. 1530 EVT LoIntVT = EVT::getIntegerVT(*DAG.getContext(), LoVT.getSizeInBits()); 1531 EVT HiIntVT = EVT::getIntegerVT(*DAG.getContext(), HiVT.getSizeInBits()); 1532 if (DAG.getDataLayout().isBigEndian()) 1533 std::swap(LoIntVT, HiIntVT); 1534 1535 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi); 1536 1537 if (DAG.getDataLayout().isBigEndian()) 1538 std::swap(Lo, Hi); 1539 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo); 1540 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi); 1541 } 1542 1543 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, 1544 SDValue &Hi) { 1545 EVT LoVT, HiVT; 1546 SDLoc dl(N); 1547 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1548 unsigned LoNumElts = LoVT.getVectorNumElements(); 1549 SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts); 1550 Lo = DAG.getBuildVector(LoVT, dl, LoOps); 1551 1552 SmallVector<SDValue, 8> HiOps(N->op_begin()+LoNumElts, N->op_end()); 1553 Hi = DAG.getBuildVector(HiVT, dl, HiOps); 1554 } 1555 1556 void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, 1557 SDValue &Hi) { 1558 assert(!(N->getNumOperands() & 1) && "Unsupported CONCAT_VECTORS"); 1559 SDLoc dl(N); 1560 unsigned NumSubvectors = N->getNumOperands() / 2; 1561 if (NumSubvectors == 1) { 1562 Lo = N->getOperand(0); 1563 Hi = N->getOperand(1); 1564 return; 1565 } 1566 1567 EVT LoVT, HiVT; 1568 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1569 1570 SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors); 1571 Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, LoVT, LoOps); 1572 1573 SmallVector<SDValue, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end()); 1574 Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HiVT, HiOps); 1575 } 1576 1577 void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo, 1578 SDValue &Hi) { 1579 SDValue Vec = N->getOperand(0); 1580 SDValue Idx = N->getOperand(1); 1581 SDLoc dl(N); 1582 1583 EVT LoVT, HiVT; 1584 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1585 1586 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx); 1587 uint64_t IdxVal = Idx->getAsZExtVal(); 1588 Hi = DAG.getNode( 1589 ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec, 1590 DAG.getVectorIdxConstant(IdxVal + LoVT.getVectorMinNumElements(), dl)); 1591 } 1592 1593 void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo, 1594 SDValue &Hi) { 1595 SDValue Vec = N->getOperand(0); 1596 SDValue SubVec = N->getOperand(1); 1597 SDValue Idx = N->getOperand(2); 1598 SDLoc dl(N); 1599 GetSplitVector(Vec, Lo, Hi); 1600 1601 EVT VecVT = Vec.getValueType(); 1602 EVT LoVT = Lo.getValueType(); 1603 EVT SubVecVT = SubVec.getValueType(); 1604 unsigned VecElems = VecVT.getVectorMinNumElements(); 1605 unsigned SubElems = SubVecVT.getVectorMinNumElements(); 1606 unsigned LoElems = LoVT.getVectorMinNumElements(); 1607 1608 // If we know the index is in the first half, and we know the subvector 1609 // doesn't cross the boundary between the halves, we can avoid spilling the 1610 // vector, and insert into the lower half of the split vector directly. 1611 unsigned IdxVal = Idx->getAsZExtVal(); 1612 if (IdxVal + SubElems <= LoElems) { 1613 Lo = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, LoVT, Lo, SubVec, Idx); 1614 return; 1615 } 1616 // Similarly if the subvector is fully in the high half, but mind that we 1617 // can't tell whether a fixed-length subvector is fully within the high half 1618 // of a scalable vector. 1619 if (VecVT.isScalableVector() == SubVecVT.isScalableVector() && 1620 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) { 1621 Hi = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, Hi.getValueType(), Hi, SubVec, 1622 DAG.getVectorIdxConstant(IdxVal - LoElems, dl)); 1623 return; 1624 } 1625 1626 // Spill the vector to the stack. 1627 // In cases where the vector is illegal it will be broken down into parts 1628 // and stored in parts - we should use the alignment for the smallest part. 1629 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false); 1630 SDValue StackPtr = 1631 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign); 1632 auto &MF = DAG.getMachineFunction(); 1633 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1634 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 1635 1636 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1637 SmallestAlign); 1638 1639 // Store the new subvector into the specified index. 1640 SDValue SubVecPtr = 1641 TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT, SubVecVT, Idx); 1642 Store = DAG.getStore(Store, dl, SubVec, SubVecPtr, 1643 MachinePointerInfo::getUnknownStack(MF)); 1644 1645 // Load the Lo part from the stack slot. 1646 Lo = DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, PtrInfo, 1647 SmallestAlign); 1648 1649 // Increment the pointer to the other part. 1650 auto *Load = cast<LoadSDNode>(Lo); 1651 MachinePointerInfo MPI = Load->getPointerInfo(); 1652 IncrementPointer(Load, LoVT, MPI, StackPtr); 1653 1654 // Load the Hi part from the stack slot. 1655 Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign); 1656 } 1657 1658 // Handle splitting an FP where the second operand does not match the first 1659 // type. The second operand may be a scalar, or a vector that has exactly as 1660 // many elements as the first 1661 void DAGTypeLegalizer::SplitVecRes_FPOp_MultiType(SDNode *N, SDValue &Lo, 1662 SDValue &Hi) { 1663 SDValue LHSLo, LHSHi; 1664 GetSplitVector(N->getOperand(0), LHSLo, LHSHi); 1665 SDLoc DL(N); 1666 1667 SDValue RHSLo, RHSHi; 1668 SDValue RHS = N->getOperand(1); 1669 EVT RHSVT = RHS.getValueType(); 1670 if (RHSVT.isVector()) { 1671 if (getTypeAction(RHSVT) == TargetLowering::TypeSplitVector) 1672 GetSplitVector(RHS, RHSLo, RHSHi); 1673 else 1674 std::tie(RHSLo, RHSHi) = DAG.SplitVector(RHS, SDLoc(RHS)); 1675 1676 Lo = DAG.getNode(N->getOpcode(), DL, LHSLo.getValueType(), LHSLo, RHSLo); 1677 Hi = DAG.getNode(N->getOpcode(), DL, LHSHi.getValueType(), LHSHi, RHSHi); 1678 } else { 1679 Lo = DAG.getNode(N->getOpcode(), DL, LHSLo.getValueType(), LHSLo, RHS); 1680 Hi = DAG.getNode(N->getOpcode(), DL, LHSHi.getValueType(), LHSHi, RHS); 1681 } 1682 } 1683 1684 void DAGTypeLegalizer::SplitVecRes_IS_FPCLASS(SDNode *N, SDValue &Lo, 1685 SDValue &Hi) { 1686 SDLoc DL(N); 1687 SDValue ArgLo, ArgHi; 1688 SDValue Test = N->getOperand(1); 1689 SDValue FpValue = N->getOperand(0); 1690 if (getTypeAction(FpValue.getValueType()) == TargetLowering::TypeSplitVector) 1691 GetSplitVector(FpValue, ArgLo, ArgHi); 1692 else 1693 std::tie(ArgLo, ArgHi) = DAG.SplitVector(FpValue, SDLoc(FpValue)); 1694 EVT LoVT, HiVT; 1695 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1696 1697 Lo = DAG.getNode(ISD::IS_FPCLASS, DL, LoVT, ArgLo, Test, N->getFlags()); 1698 Hi = DAG.getNode(ISD::IS_FPCLASS, DL, HiVT, ArgHi, Test, N->getFlags()); 1699 } 1700 1701 void DAGTypeLegalizer::SplitVecRes_InregOp(SDNode *N, SDValue &Lo, 1702 SDValue &Hi) { 1703 SDValue LHSLo, LHSHi; 1704 GetSplitVector(N->getOperand(0), LHSLo, LHSHi); 1705 SDLoc dl(N); 1706 1707 EVT LoVT, HiVT; 1708 std::tie(LoVT, HiVT) = 1709 DAG.GetSplitDestVTs(cast<VTSDNode>(N->getOperand(1))->getVT()); 1710 1711 Lo = DAG.getNode(N->getOpcode(), dl, LHSLo.getValueType(), LHSLo, 1712 DAG.getValueType(LoVT)); 1713 Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi, 1714 DAG.getValueType(HiVT)); 1715 } 1716 1717 void DAGTypeLegalizer::SplitVecRes_ExtVecInRegOp(SDNode *N, SDValue &Lo, 1718 SDValue &Hi) { 1719 unsigned Opcode = N->getOpcode(); 1720 SDValue N0 = N->getOperand(0); 1721 1722 SDLoc dl(N); 1723 SDValue InLo, InHi; 1724 1725 if (getTypeAction(N0.getValueType()) == TargetLowering::TypeSplitVector) 1726 GetSplitVector(N0, InLo, InHi); 1727 else 1728 std::tie(InLo, InHi) = DAG.SplitVectorOperand(N, 0); 1729 1730 EVT InLoVT = InLo.getValueType(); 1731 unsigned InNumElements = InLoVT.getVectorNumElements(); 1732 1733 EVT OutLoVT, OutHiVT; 1734 std::tie(OutLoVT, OutHiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1735 unsigned OutNumElements = OutLoVT.getVectorNumElements(); 1736 assert((2 * OutNumElements) <= InNumElements && 1737 "Illegal extend vector in reg split"); 1738 1739 // *_EXTEND_VECTOR_INREG instructions extend the lowest elements of the 1740 // input vector (i.e. we only use InLo): 1741 // OutLo will extend the first OutNumElements from InLo. 1742 // OutHi will extend the next OutNumElements from InLo. 1743 1744 // Shuffle the elements from InLo for OutHi into the bottom elements to 1745 // create a 'fake' InHi. 1746 SmallVector<int, 8> SplitHi(InNumElements, -1); 1747 for (unsigned i = 0; i != OutNumElements; ++i) 1748 SplitHi[i] = i + OutNumElements; 1749 InHi = DAG.getVectorShuffle(InLoVT, dl, InLo, DAG.getUNDEF(InLoVT), SplitHi); 1750 1751 Lo = DAG.getNode(Opcode, dl, OutLoVT, InLo); 1752 Hi = DAG.getNode(Opcode, dl, OutHiVT, InHi); 1753 } 1754 1755 void DAGTypeLegalizer::SplitVecRes_StrictFPOp(SDNode *N, SDValue &Lo, 1756 SDValue &Hi) { 1757 unsigned NumOps = N->getNumOperands(); 1758 SDValue Chain = N->getOperand(0); 1759 EVT LoVT, HiVT; 1760 SDLoc dl(N); 1761 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1762 1763 SmallVector<SDValue, 4> OpsLo(NumOps); 1764 SmallVector<SDValue, 4> OpsHi(NumOps); 1765 1766 // The Chain is the first operand. 1767 OpsLo[0] = Chain; 1768 OpsHi[0] = Chain; 1769 1770 // Now process the remaining operands. 1771 for (unsigned i = 1; i < NumOps; ++i) { 1772 SDValue Op = N->getOperand(i); 1773 SDValue OpLo = Op; 1774 SDValue OpHi = Op; 1775 1776 EVT InVT = Op.getValueType(); 1777 if (InVT.isVector()) { 1778 // If the input also splits, handle it directly for a 1779 // compile time speedup. Otherwise split it by hand. 1780 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) 1781 GetSplitVector(Op, OpLo, OpHi); 1782 else 1783 std::tie(OpLo, OpHi) = DAG.SplitVectorOperand(N, i); 1784 } 1785 1786 OpsLo[i] = OpLo; 1787 OpsHi[i] = OpHi; 1788 } 1789 1790 EVT LoValueVTs[] = {LoVT, MVT::Other}; 1791 EVT HiValueVTs[] = {HiVT, MVT::Other}; 1792 Lo = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(LoValueVTs), OpsLo, 1793 N->getFlags()); 1794 Hi = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(HiValueVTs), OpsHi, 1795 N->getFlags()); 1796 1797 // Build a factor node to remember that this Op is independent of the 1798 // other one. 1799 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1800 Lo.getValue(1), Hi.getValue(1)); 1801 1802 // Legalize the chain result - switch anything that used the old chain to 1803 // use the new one. 1804 ReplaceValueWith(SDValue(N, 1), Chain); 1805 } 1806 1807 SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(SDNode *N, unsigned ResNE) { 1808 SDValue Chain = N->getOperand(0); 1809 EVT VT = N->getValueType(0); 1810 unsigned NE = VT.getVectorNumElements(); 1811 EVT EltVT = VT.getVectorElementType(); 1812 SDLoc dl(N); 1813 1814 SmallVector<SDValue, 8> Scalars; 1815 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 1816 1817 // If ResNE is 0, fully unroll the vector op. 1818 if (ResNE == 0) 1819 ResNE = NE; 1820 else if (NE > ResNE) 1821 NE = ResNE; 1822 1823 //The results of each unrolled operation, including the chain. 1824 EVT ChainVTs[] = {EltVT, MVT::Other}; 1825 SmallVector<SDValue, 8> Chains; 1826 1827 unsigned i; 1828 for (i = 0; i != NE; ++i) { 1829 Operands[0] = Chain; 1830 for (unsigned j = 1, e = N->getNumOperands(); j != e; ++j) { 1831 SDValue Operand = N->getOperand(j); 1832 EVT OperandVT = Operand.getValueType(); 1833 if (OperandVT.isVector()) { 1834 EVT OperandEltVT = OperandVT.getVectorElementType(); 1835 Operands[j] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 1836 Operand, DAG.getVectorIdxConstant(i, dl)); 1837 } else { 1838 Operands[j] = Operand; 1839 } 1840 } 1841 SDValue Scalar = DAG.getNode(N->getOpcode(), dl, ChainVTs, Operands); 1842 Scalar.getNode()->setFlags(N->getFlags()); 1843 1844 //Add in the scalar as well as its chain value to the 1845 //result vectors. 1846 Scalars.push_back(Scalar); 1847 Chains.push_back(Scalar.getValue(1)); 1848 } 1849 1850 for (; i < ResNE; ++i) 1851 Scalars.push_back(DAG.getUNDEF(EltVT)); 1852 1853 // Build a new factor node to connect the chain back together. 1854 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 1855 ReplaceValueWith(SDValue(N, 1), Chain); 1856 1857 // Create a new BUILD_VECTOR node 1858 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, ResNE); 1859 return DAG.getBuildVector(VecVT, dl, Scalars); 1860 } 1861 1862 void DAGTypeLegalizer::SplitVecRes_OverflowOp(SDNode *N, unsigned ResNo, 1863 SDValue &Lo, SDValue &Hi) { 1864 SDLoc dl(N); 1865 EVT ResVT = N->getValueType(0); 1866 EVT OvVT = N->getValueType(1); 1867 EVT LoResVT, HiResVT, LoOvVT, HiOvVT; 1868 std::tie(LoResVT, HiResVT) = DAG.GetSplitDestVTs(ResVT); 1869 std::tie(LoOvVT, HiOvVT) = DAG.GetSplitDestVTs(OvVT); 1870 1871 SDValue LoLHS, HiLHS, LoRHS, HiRHS; 1872 if (getTypeAction(ResVT) == TargetLowering::TypeSplitVector) { 1873 GetSplitVector(N->getOperand(0), LoLHS, HiLHS); 1874 GetSplitVector(N->getOperand(1), LoRHS, HiRHS); 1875 } else { 1876 std::tie(LoLHS, HiLHS) = DAG.SplitVectorOperand(N, 0); 1877 std::tie(LoRHS, HiRHS) = DAG.SplitVectorOperand(N, 1); 1878 } 1879 1880 unsigned Opcode = N->getOpcode(); 1881 SDVTList LoVTs = DAG.getVTList(LoResVT, LoOvVT); 1882 SDVTList HiVTs = DAG.getVTList(HiResVT, HiOvVT); 1883 SDNode *LoNode = DAG.getNode(Opcode, dl, LoVTs, LoLHS, LoRHS).getNode(); 1884 SDNode *HiNode = DAG.getNode(Opcode, dl, HiVTs, HiLHS, HiRHS).getNode(); 1885 LoNode->setFlags(N->getFlags()); 1886 HiNode->setFlags(N->getFlags()); 1887 1888 Lo = SDValue(LoNode, ResNo); 1889 Hi = SDValue(HiNode, ResNo); 1890 1891 // Replace the other vector result not being explicitly split here. 1892 unsigned OtherNo = 1 - ResNo; 1893 EVT OtherVT = N->getValueType(OtherNo); 1894 if (getTypeAction(OtherVT) == TargetLowering::TypeSplitVector) { 1895 SetSplitVector(SDValue(N, OtherNo), 1896 SDValue(LoNode, OtherNo), SDValue(HiNode, OtherNo)); 1897 } else { 1898 SDValue OtherVal = DAG.getNode( 1899 ISD::CONCAT_VECTORS, dl, OtherVT, 1900 SDValue(LoNode, OtherNo), SDValue(HiNode, OtherNo)); 1901 ReplaceValueWith(SDValue(N, OtherNo), OtherVal); 1902 } 1903 } 1904 1905 void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, 1906 SDValue &Hi) { 1907 SDValue Vec = N->getOperand(0); 1908 SDValue Elt = N->getOperand(1); 1909 SDValue Idx = N->getOperand(2); 1910 SDLoc dl(N); 1911 GetSplitVector(Vec, Lo, Hi); 1912 1913 if (ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { 1914 unsigned IdxVal = CIdx->getZExtValue(); 1915 unsigned LoNumElts = Lo.getValueType().getVectorMinNumElements(); 1916 if (IdxVal < LoNumElts) { 1917 Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 1918 Lo.getValueType(), Lo, Elt, Idx); 1919 return; 1920 } else if (!Vec.getValueType().isScalableVector()) { 1921 Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Hi.getValueType(), Hi, Elt, 1922 DAG.getVectorIdxConstant(IdxVal - LoNumElts, dl)); 1923 return; 1924 } 1925 } 1926 1927 // Make the vector elements byte-addressable if they aren't already. 1928 EVT VecVT = Vec.getValueType(); 1929 EVT EltVT = VecVT.getVectorElementType(); 1930 if (!EltVT.isByteSized()) { 1931 EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext()); 1932 VecVT = VecVT.changeElementType(EltVT); 1933 Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec); 1934 // Extend the element type to match if needed. 1935 if (EltVT.bitsGT(Elt.getValueType())) 1936 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, EltVT, Elt); 1937 } 1938 1939 // Spill the vector to the stack. 1940 // In cases where the vector is illegal it will be broken down into parts 1941 // and stored in parts - we should use the alignment for the smallest part. 1942 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false); 1943 SDValue StackPtr = 1944 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign); 1945 auto &MF = DAG.getMachineFunction(); 1946 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1947 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 1948 1949 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1950 SmallestAlign); 1951 1952 // Store the new element. This may be larger than the vector element type, 1953 // so use a truncating store. 1954 SDValue EltPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx); 1955 Store = DAG.getTruncStore( 1956 Store, dl, Elt, EltPtr, MachinePointerInfo::getUnknownStack(MF), EltVT, 1957 commonAlignment(SmallestAlign, 1958 EltVT.getFixedSizeInBits() / 8)); 1959 1960 EVT LoVT, HiVT; 1961 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); 1962 1963 // Load the Lo part from the stack slot. 1964 Lo = DAG.getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign); 1965 1966 // Increment the pointer to the other part. 1967 auto Load = cast<LoadSDNode>(Lo); 1968 MachinePointerInfo MPI = Load->getPointerInfo(); 1969 IncrementPointer(Load, LoVT, MPI, StackPtr); 1970 1971 Hi = DAG.getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign); 1972 1973 // If we adjusted the original type, we need to truncate the results. 1974 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1975 if (LoVT != Lo.getValueType()) 1976 Lo = DAG.getNode(ISD::TRUNCATE, dl, LoVT, Lo); 1977 if (HiVT != Hi.getValueType()) 1978 Hi = DAG.getNode(ISD::TRUNCATE, dl, HiVT, Hi); 1979 } 1980 1981 void DAGTypeLegalizer::SplitVecRes_STEP_VECTOR(SDNode *N, SDValue &Lo, 1982 SDValue &Hi) { 1983 EVT LoVT, HiVT; 1984 SDLoc dl(N); 1985 assert(N->getValueType(0).isScalableVector() && 1986 "Only scalable vectors are supported for STEP_VECTOR"); 1987 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 1988 SDValue Step = N->getOperand(0); 1989 1990 Lo = DAG.getNode(ISD::STEP_VECTOR, dl, LoVT, Step); 1991 1992 // Hi = Lo + (EltCnt * Step) 1993 EVT EltVT = Step.getValueType(); 1994 APInt StepVal = Step->getAsAPIntVal(); 1995 SDValue StartOfHi = 1996 DAG.getVScale(dl, EltVT, StepVal * LoVT.getVectorMinNumElements()); 1997 StartOfHi = DAG.getSExtOrTrunc(StartOfHi, dl, HiVT.getVectorElementType()); 1998 StartOfHi = DAG.getNode(ISD::SPLAT_VECTOR, dl, HiVT, StartOfHi); 1999 2000 Hi = DAG.getNode(ISD::STEP_VECTOR, dl, HiVT, Step); 2001 Hi = DAG.getNode(ISD::ADD, dl, HiVT, Hi, StartOfHi); 2002 } 2003 2004 void DAGTypeLegalizer::SplitVecRes_ScalarOp(SDNode *N, SDValue &Lo, 2005 SDValue &Hi) { 2006 EVT LoVT, HiVT; 2007 SDLoc dl(N); 2008 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 2009 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, N->getOperand(0)); 2010 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2011 Hi = DAG.getUNDEF(HiVT); 2012 } else { 2013 assert(N->getOpcode() == ISD::SPLAT_VECTOR && "Unexpected opcode"); 2014 Hi = Lo; 2015 } 2016 } 2017 2018 void DAGTypeLegalizer::SplitVecRes_VP_SPLAT(SDNode *N, SDValue &Lo, 2019 SDValue &Hi) { 2020 SDLoc dl(N); 2021 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(N->getValueType(0)); 2022 auto [MaskLo, MaskHi] = SplitMask(N->getOperand(1)); 2023 auto [EVLLo, EVLHi] = DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl); 2024 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, N->getOperand(0), MaskLo, EVLLo); 2025 Hi = DAG.getNode(N->getOpcode(), dl, HiVT, N->getOperand(0), MaskHi, EVLHi); 2026 } 2027 2028 void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, 2029 SDValue &Hi) { 2030 assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!"); 2031 EVT LoVT, HiVT; 2032 SDLoc dl(LD); 2033 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0)); 2034 2035 ISD::LoadExtType ExtType = LD->getExtensionType(); 2036 SDValue Ch = LD->getChain(); 2037 SDValue Ptr = LD->getBasePtr(); 2038 SDValue Offset = DAG.getUNDEF(Ptr.getValueType()); 2039 EVT MemoryVT = LD->getMemoryVT(); 2040 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); 2041 AAMDNodes AAInfo = LD->getAAInfo(); 2042 2043 EVT LoMemVT, HiMemVT; 2044 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 2045 2046 if (!LoMemVT.isByteSized() || !HiMemVT.isByteSized()) { 2047 SDValue Value, NewChain; 2048 std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG); 2049 std::tie(Lo, Hi) = DAG.SplitVector(Value, dl); 2050 ReplaceValueWith(SDValue(LD, 1), NewChain); 2051 return; 2052 } 2053 2054 Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset, 2055 LD->getPointerInfo(), LoMemVT, LD->getOriginalAlign(), 2056 MMOFlags, AAInfo); 2057 2058 MachinePointerInfo MPI; 2059 IncrementPointer(LD, LoMemVT, MPI, Ptr); 2060 2061 Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset, MPI, 2062 HiMemVT, LD->getOriginalAlign(), MMOFlags, AAInfo); 2063 2064 // Build a factor node to remember that this load is independent of the 2065 // other one. 2066 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 2067 Hi.getValue(1)); 2068 2069 // Legalize the chain result - switch anything that used the old chain to 2070 // use the new one. 2071 ReplaceValueWith(SDValue(LD, 1), Ch); 2072 } 2073 2074 void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo, 2075 SDValue &Hi) { 2076 assert(LD->isUnindexed() && "Indexed VP load during type legalization!"); 2077 EVT LoVT, HiVT; 2078 SDLoc dl(LD); 2079 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0)); 2080 2081 ISD::LoadExtType ExtType = LD->getExtensionType(); 2082 SDValue Ch = LD->getChain(); 2083 SDValue Ptr = LD->getBasePtr(); 2084 SDValue Offset = LD->getOffset(); 2085 assert(Offset.isUndef() && "Unexpected indexed variable-length load offset"); 2086 Align Alignment = LD->getOriginalAlign(); 2087 SDValue Mask = LD->getMask(); 2088 SDValue EVL = LD->getVectorLength(); 2089 EVT MemoryVT = LD->getMemoryVT(); 2090 2091 EVT LoMemVT, HiMemVT; 2092 bool HiIsEmpty = false; 2093 std::tie(LoMemVT, HiMemVT) = 2094 DAG.GetDependentSplitDestVTs(MemoryVT, LoVT, &HiIsEmpty); 2095 2096 // Split Mask operand 2097 SDValue MaskLo, MaskHi; 2098 if (Mask.getOpcode() == ISD::SETCC) { 2099 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi); 2100 } else { 2101 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector) 2102 GetSplitVector(Mask, MaskLo, MaskHi); 2103 else 2104 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl); 2105 } 2106 2107 // Split EVL operand 2108 SDValue EVLLo, EVLHi; 2109 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL, LD->getValueType(0), dl); 2110 2111 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 2112 LD->getPointerInfo(), MachineMemOperand::MOLoad, 2113 LocationSize::beforeOrAfterPointer(), Alignment, LD->getAAInfo(), 2114 LD->getRanges()); 2115 2116 Lo = 2117 DAG.getLoadVP(LD->getAddressingMode(), ExtType, LoVT, dl, Ch, Ptr, Offset, 2118 MaskLo, EVLLo, LoMemVT, MMO, LD->isExpandingLoad()); 2119 2120 if (HiIsEmpty) { 2121 // The hi vp_load has zero storage size. We therefore simply set it to 2122 // the low vp_load and rely on subsequent removal from the chain. 2123 Hi = Lo; 2124 } else { 2125 // Generate hi vp_load. 2126 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, dl, LoMemVT, DAG, 2127 LD->isExpandingLoad()); 2128 2129 MachinePointerInfo MPI; 2130 if (LoMemVT.isScalableVector()) 2131 MPI = MachinePointerInfo(LD->getPointerInfo().getAddrSpace()); 2132 else 2133 MPI = LD->getPointerInfo().getWithOffset( 2134 LoMemVT.getStoreSize().getFixedValue()); 2135 2136 MMO = DAG.getMachineFunction().getMachineMemOperand( 2137 MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), 2138 Alignment, LD->getAAInfo(), LD->getRanges()); 2139 2140 Hi = DAG.getLoadVP(LD->getAddressingMode(), ExtType, HiVT, dl, Ch, Ptr, 2141 Offset, MaskHi, EVLHi, HiMemVT, MMO, 2142 LD->isExpandingLoad()); 2143 } 2144 2145 // Build a factor node to remember that this load is independent of the 2146 // other one. 2147 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 2148 Hi.getValue(1)); 2149 2150 // Legalize the chain result - switch anything that used the old chain to 2151 // use the new one. 2152 ReplaceValueWith(SDValue(LD, 1), Ch); 2153 } 2154 2155 void DAGTypeLegalizer::SplitVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *SLD, 2156 SDValue &Lo, SDValue &Hi) { 2157 assert(SLD->isUnindexed() && 2158 "Indexed VP strided load during type legalization!"); 2159 assert(SLD->getOffset().isUndef() && 2160 "Unexpected indexed variable-length load offset"); 2161 2162 SDLoc DL(SLD); 2163 2164 EVT LoVT, HiVT; 2165 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(SLD->getValueType(0)); 2166 2167 EVT LoMemVT, HiMemVT; 2168 bool HiIsEmpty = false; 2169 std::tie(LoMemVT, HiMemVT) = 2170 DAG.GetDependentSplitDestVTs(SLD->getMemoryVT(), LoVT, &HiIsEmpty); 2171 2172 SDValue Mask = SLD->getMask(); 2173 SDValue LoMask, HiMask; 2174 if (Mask.getOpcode() == ISD::SETCC) { 2175 SplitVecRes_SETCC(Mask.getNode(), LoMask, HiMask); 2176 } else { 2177 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector) 2178 GetSplitVector(Mask, LoMask, HiMask); 2179 else 2180 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL); 2181 } 2182 2183 SDValue LoEVL, HiEVL; 2184 std::tie(LoEVL, HiEVL) = 2185 DAG.SplitEVL(SLD->getVectorLength(), SLD->getValueType(0), DL); 2186 2187 // Generate the low vp_strided_load 2188 Lo = DAG.getStridedLoadVP( 2189 SLD->getAddressingMode(), SLD->getExtensionType(), LoVT, DL, 2190 SLD->getChain(), SLD->getBasePtr(), SLD->getOffset(), SLD->getStride(), 2191 LoMask, LoEVL, LoMemVT, SLD->getMemOperand(), SLD->isExpandingLoad()); 2192 2193 if (HiIsEmpty) { 2194 // The high vp_strided_load has zero storage size. We therefore simply set 2195 // it to the low vp_strided_load and rely on subsequent removal from the 2196 // chain. 2197 Hi = Lo; 2198 } else { 2199 // Generate the high vp_strided_load. 2200 // To calculate the high base address, we need to sum to the low base 2201 // address stride number of bytes for each element already loaded by low, 2202 // that is: Ptr = Ptr + (LoEVL * Stride) 2203 EVT PtrVT = SLD->getBasePtr().getValueType(); 2204 SDValue Increment = 2205 DAG.getNode(ISD::MUL, DL, PtrVT, LoEVL, 2206 DAG.getSExtOrTrunc(SLD->getStride(), DL, PtrVT)); 2207 SDValue Ptr = 2208 DAG.getNode(ISD::ADD, DL, PtrVT, SLD->getBasePtr(), Increment); 2209 2210 Align Alignment = SLD->getOriginalAlign(); 2211 if (LoMemVT.isScalableVector()) 2212 Alignment = commonAlignment( 2213 Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8); 2214 2215 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 2216 MachinePointerInfo(SLD->getPointerInfo().getAddrSpace()), 2217 MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), 2218 Alignment, SLD->getAAInfo(), SLD->getRanges()); 2219 2220 Hi = DAG.getStridedLoadVP(SLD->getAddressingMode(), SLD->getExtensionType(), 2221 HiVT, DL, SLD->getChain(), Ptr, SLD->getOffset(), 2222 SLD->getStride(), HiMask, HiEVL, HiMemVT, MMO, 2223 SLD->isExpandingLoad()); 2224 } 2225 2226 // Build a factor node to remember that this load is independent of the 2227 // other one. 2228 SDValue Ch = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), 2229 Hi.getValue(1)); 2230 2231 // Legalize the chain result - switch anything that used the old chain to 2232 // use the new one. 2233 ReplaceValueWith(SDValue(SLD, 1), Ch); 2234 } 2235 2236 void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD, 2237 SDValue &Lo, SDValue &Hi) { 2238 assert(MLD->isUnindexed() && "Indexed masked load during type legalization!"); 2239 EVT LoVT, HiVT; 2240 SDLoc dl(MLD); 2241 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0)); 2242 2243 SDValue Ch = MLD->getChain(); 2244 SDValue Ptr = MLD->getBasePtr(); 2245 SDValue Offset = MLD->getOffset(); 2246 assert(Offset.isUndef() && "Unexpected indexed masked load offset"); 2247 SDValue Mask = MLD->getMask(); 2248 SDValue PassThru = MLD->getPassThru(); 2249 Align Alignment = MLD->getOriginalAlign(); 2250 ISD::LoadExtType ExtType = MLD->getExtensionType(); 2251 2252 // Split Mask operand 2253 SDValue MaskLo, MaskHi; 2254 if (Mask.getOpcode() == ISD::SETCC) { 2255 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi); 2256 } else { 2257 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector) 2258 GetSplitVector(Mask, MaskLo, MaskHi); 2259 else 2260 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl); 2261 } 2262 2263 EVT MemoryVT = MLD->getMemoryVT(); 2264 EVT LoMemVT, HiMemVT; 2265 bool HiIsEmpty = false; 2266 std::tie(LoMemVT, HiMemVT) = 2267 DAG.GetDependentSplitDestVTs(MemoryVT, LoVT, &HiIsEmpty); 2268 2269 SDValue PassThruLo, PassThruHi; 2270 if (getTypeAction(PassThru.getValueType()) == TargetLowering::TypeSplitVector) 2271 GetSplitVector(PassThru, PassThruLo, PassThruHi); 2272 else 2273 std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl); 2274 2275 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 2276 MLD->getPointerInfo(), MachineMemOperand::MOLoad, 2277 LocationSize::beforeOrAfterPointer(), Alignment, MLD->getAAInfo(), 2278 MLD->getRanges()); 2279 2280 Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, Offset, MaskLo, PassThruLo, LoMemVT, 2281 MMO, MLD->getAddressingMode(), ExtType, 2282 MLD->isExpandingLoad()); 2283 2284 if (HiIsEmpty) { 2285 // The hi masked load has zero storage size. We therefore simply set it to 2286 // the low masked load and rely on subsequent removal from the chain. 2287 Hi = Lo; 2288 } else { 2289 // Generate hi masked load. 2290 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, dl, LoMemVT, DAG, 2291 MLD->isExpandingLoad()); 2292 2293 MachinePointerInfo MPI; 2294 if (LoMemVT.isScalableVector()) 2295 MPI = MachinePointerInfo(MLD->getPointerInfo().getAddrSpace()); 2296 else 2297 MPI = MLD->getPointerInfo().getWithOffset( 2298 LoMemVT.getStoreSize().getFixedValue()); 2299 2300 MMO = DAG.getMachineFunction().getMachineMemOperand( 2301 MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), 2302 Alignment, MLD->getAAInfo(), MLD->getRanges()); 2303 2304 Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, Offset, MaskHi, PassThruHi, 2305 HiMemVT, MMO, MLD->getAddressingMode(), ExtType, 2306 MLD->isExpandingLoad()); 2307 } 2308 2309 // Build a factor node to remember that this load is independent of the 2310 // other one. 2311 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 2312 Hi.getValue(1)); 2313 2314 // Legalize the chain result - switch anything that used the old chain to 2315 // use the new one. 2316 ReplaceValueWith(SDValue(MLD, 1), Ch); 2317 2318 } 2319 2320 void DAGTypeLegalizer::SplitVecRes_Gather(MemSDNode *N, SDValue &Lo, 2321 SDValue &Hi, bool SplitSETCC) { 2322 EVT LoVT, HiVT; 2323 SDLoc dl(N); 2324 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 2325 2326 SDValue Ch = N->getChain(); 2327 SDValue Ptr = N->getBasePtr(); 2328 struct Operands { 2329 SDValue Mask; 2330 SDValue Index; 2331 SDValue Scale; 2332 } Ops = [&]() -> Operands { 2333 if (auto *MSC = dyn_cast<MaskedGatherSDNode>(N)) { 2334 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()}; 2335 } 2336 auto *VPSC = cast<VPGatherSDNode>(N); 2337 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()}; 2338 }(); 2339 2340 EVT MemoryVT = N->getMemoryVT(); 2341 Align Alignment = N->getOriginalAlign(); 2342 2343 // Split Mask operand 2344 SDValue MaskLo, MaskHi; 2345 if (SplitSETCC && Ops.Mask.getOpcode() == ISD::SETCC) { 2346 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi); 2347 } else { 2348 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl); 2349 } 2350 2351 EVT LoMemVT, HiMemVT; 2352 // Split MemoryVT 2353 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 2354 2355 SDValue IndexHi, IndexLo; 2356 if (getTypeAction(Ops.Index.getValueType()) == 2357 TargetLowering::TypeSplitVector) 2358 GetSplitVector(Ops.Index, IndexLo, IndexHi); 2359 else 2360 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index, dl); 2361 2362 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 2363 N->getPointerInfo(), MachineMemOperand::MOLoad, 2364 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), 2365 N->getRanges()); 2366 2367 if (auto *MGT = dyn_cast<MaskedGatherSDNode>(N)) { 2368 SDValue PassThru = MGT->getPassThru(); 2369 SDValue PassThruLo, PassThruHi; 2370 if (getTypeAction(PassThru.getValueType()) == 2371 TargetLowering::TypeSplitVector) 2372 GetSplitVector(PassThru, PassThruLo, PassThruHi); 2373 else 2374 std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl); 2375 2376 ISD::LoadExtType ExtType = MGT->getExtensionType(); 2377 ISD::MemIndexType IndexTy = MGT->getIndexType(); 2378 2379 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Ops.Scale}; 2380 Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl, 2381 OpsLo, MMO, IndexTy, ExtType); 2382 2383 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi, Ptr, IndexHi, Ops.Scale}; 2384 Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl, 2385 OpsHi, MMO, IndexTy, ExtType); 2386 } else { 2387 auto *VPGT = cast<VPGatherSDNode>(N); 2388 SDValue EVLLo, EVLHi; 2389 std::tie(EVLLo, EVLHi) = 2390 DAG.SplitEVL(VPGT->getVectorLength(), MemoryVT, dl); 2391 2392 SDValue OpsLo[] = {Ch, Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo}; 2393 Lo = DAG.getGatherVP(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl, OpsLo, 2394 MMO, VPGT->getIndexType()); 2395 2396 SDValue OpsHi[] = {Ch, Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi}; 2397 Hi = DAG.getGatherVP(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl, OpsHi, 2398 MMO, VPGT->getIndexType()); 2399 } 2400 2401 // Build a factor node to remember that this load is independent of the 2402 // other one. 2403 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 2404 Hi.getValue(1)); 2405 2406 // Legalize the chain result - switch anything that used the old chain to 2407 // use the new one. 2408 ReplaceValueWith(SDValue(N, 1), Ch); 2409 } 2410 2411 void DAGTypeLegalizer::SplitVecRes_VECTOR_COMPRESS(SDNode *N, SDValue &Lo, 2412 SDValue &Hi) { 2413 // This is not "trivial", as there is a dependency between the two subvectors. 2414 // Depending on the number of 1s in the mask, the elements from the Hi vector 2415 // need to be moved to the Lo vector. So we just perform this as one "big" 2416 // operation and then extract the Lo and Hi vectors from that. This gets rid 2417 // of VECTOR_COMPRESS and all other operands can be legalized later. 2418 SDValue Compressed = TLI.expandVECTOR_COMPRESS(N, DAG); 2419 std::tie(Lo, Hi) = DAG.SplitVector(Compressed, SDLoc(N)); 2420 } 2421 2422 void DAGTypeLegalizer::SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi) { 2423 assert(N->getValueType(0).isVector() && 2424 N->getOperand(0).getValueType().isVector() && 2425 "Operand types must be vectors"); 2426 2427 EVT LoVT, HiVT; 2428 SDLoc DL(N); 2429 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 2430 2431 // If the input also splits, handle it directly. Otherwise split it by hand. 2432 SDValue LL, LH, RL, RH; 2433 if (getTypeAction(N->getOperand(0).getValueType()) == 2434 TargetLowering::TypeSplitVector) 2435 GetSplitVector(N->getOperand(0), LL, LH); 2436 else 2437 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0); 2438 2439 if (getTypeAction(N->getOperand(1).getValueType()) == 2440 TargetLowering::TypeSplitVector) 2441 GetSplitVector(N->getOperand(1), RL, RH); 2442 else 2443 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1); 2444 2445 if (N->getOpcode() == ISD::SETCC) { 2446 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2)); 2447 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2)); 2448 } else { 2449 assert(N->getOpcode() == ISD::VP_SETCC && "Expected VP_SETCC opcode"); 2450 SDValue MaskLo, MaskHi, EVLLo, EVLHi; 2451 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(3)); 2452 std::tie(EVLLo, EVLHi) = 2453 DAG.SplitEVL(N->getOperand(4), N->getValueType(0), DL); 2454 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2), MaskLo, 2455 EVLLo); 2456 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2), MaskHi, 2457 EVLHi); 2458 } 2459 } 2460 2461 void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, 2462 SDValue &Hi) { 2463 // Get the dest types - they may not match the input types, e.g. int_to_fp. 2464 EVT LoVT, HiVT; 2465 SDLoc dl(N); 2466 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 2467 2468 // If the input also splits, handle it directly for a compile time speedup. 2469 // Otherwise split it by hand. 2470 EVT InVT = N->getOperand(0).getValueType(); 2471 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) 2472 GetSplitVector(N->getOperand(0), Lo, Hi); 2473 else 2474 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); 2475 2476 const SDNodeFlags Flags = N->getFlags(); 2477 unsigned Opcode = N->getOpcode(); 2478 if (N->getNumOperands() <= 2) { 2479 if (Opcode == ISD::FP_ROUND) { 2480 Lo = DAG.getNode(Opcode, dl, LoVT, Lo, N->getOperand(1), Flags); 2481 Hi = DAG.getNode(Opcode, dl, HiVT, Hi, N->getOperand(1), Flags); 2482 } else { 2483 Lo = DAG.getNode(Opcode, dl, LoVT, Lo, Flags); 2484 Hi = DAG.getNode(Opcode, dl, HiVT, Hi, Flags); 2485 } 2486 return; 2487 } 2488 2489 assert(N->getNumOperands() == 3 && "Unexpected number of operands!"); 2490 assert(N->isVPOpcode() && "Expected VP opcode"); 2491 2492 SDValue MaskLo, MaskHi; 2493 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1)); 2494 2495 SDValue EVLLo, EVLHi; 2496 std::tie(EVLLo, EVLHi) = 2497 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl); 2498 2499 Lo = DAG.getNode(Opcode, dl, LoVT, {Lo, MaskLo, EVLLo}, Flags); 2500 Hi = DAG.getNode(Opcode, dl, HiVT, {Hi, MaskHi, EVLHi}, Flags); 2501 } 2502 2503 void DAGTypeLegalizer::SplitVecRes_ADDRSPACECAST(SDNode *N, SDValue &Lo, 2504 SDValue &Hi) { 2505 SDLoc dl(N); 2506 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(N->getValueType(0)); 2507 2508 // If the input also splits, handle it directly for a compile time speedup. 2509 // Otherwise split it by hand. 2510 EVT InVT = N->getOperand(0).getValueType(); 2511 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) 2512 GetSplitVector(N->getOperand(0), Lo, Hi); 2513 else 2514 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); 2515 2516 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(N); 2517 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace(); 2518 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace(); 2519 Lo = DAG.getAddrSpaceCast(dl, LoVT, Lo, SrcAS, DestAS); 2520 Hi = DAG.getAddrSpaceCast(dl, HiVT, Hi, SrcAS, DestAS); 2521 } 2522 2523 void DAGTypeLegalizer::SplitVecRes_FFREXP(SDNode *N, unsigned ResNo, 2524 SDValue &Lo, SDValue &Hi) { 2525 SDLoc dl(N); 2526 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(N->getValueType(0)); 2527 auto [LoVT1, HiVT1] = DAG.GetSplitDestVTs(N->getValueType(1)); 2528 2529 // If the input also splits, handle it directly for a compile time speedup. 2530 // Otherwise split it by hand. 2531 EVT InVT = N->getOperand(0).getValueType(); 2532 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) 2533 GetSplitVector(N->getOperand(0), Lo, Hi); 2534 else 2535 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); 2536 2537 Lo = DAG.getNode(N->getOpcode(), dl, {LoVT, LoVT1}, Lo); 2538 Hi = DAG.getNode(N->getOpcode(), dl, {HiVT, HiVT1}, Hi); 2539 Lo->setFlags(N->getFlags()); 2540 Hi->setFlags(N->getFlags()); 2541 2542 SDNode *HiNode = Hi.getNode(); 2543 SDNode *LoNode = Lo.getNode(); 2544 2545 // Replace the other vector result not being explicitly split here. 2546 unsigned OtherNo = 1 - ResNo; 2547 EVT OtherVT = N->getValueType(OtherNo); 2548 if (getTypeAction(OtherVT) == TargetLowering::TypeSplitVector) { 2549 SetSplitVector(SDValue(N, OtherNo), SDValue(LoNode, OtherNo), 2550 SDValue(HiNode, OtherNo)); 2551 } else { 2552 SDValue OtherVal = 2553 DAG.getNode(ISD::CONCAT_VECTORS, dl, OtherVT, SDValue(LoNode, OtherNo), 2554 SDValue(HiNode, OtherNo)); 2555 ReplaceValueWith(SDValue(N, OtherNo), OtherVal); 2556 } 2557 } 2558 2559 void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo, 2560 SDValue &Hi) { 2561 SDLoc dl(N); 2562 EVT SrcVT = N->getOperand(0).getValueType(); 2563 EVT DestVT = N->getValueType(0); 2564 EVT LoVT, HiVT; 2565 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(DestVT); 2566 2567 // We can do better than a generic split operation if the extend is doing 2568 // more than just doubling the width of the elements and the following are 2569 // true: 2570 // - The number of vector elements is even, 2571 // - the source type is legal, 2572 // - the type of a split source is illegal, 2573 // - the type of an extended (by doubling element size) source is legal, and 2574 // - the type of that extended source when split is legal. 2575 // 2576 // This won't necessarily completely legalize the operation, but it will 2577 // more effectively move in the right direction and prevent falling down 2578 // to scalarization in many cases due to the input vector being split too 2579 // far. 2580 if (SrcVT.getVectorElementCount().isKnownEven() && 2581 SrcVT.getScalarSizeInBits() * 2 < DestVT.getScalarSizeInBits()) { 2582 LLVMContext &Ctx = *DAG.getContext(); 2583 EVT NewSrcVT = SrcVT.widenIntegerVectorElementType(Ctx); 2584 EVT SplitSrcVT = SrcVT.getHalfNumVectorElementsVT(Ctx); 2585 2586 EVT SplitLoVT, SplitHiVT; 2587 std::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT); 2588 if (TLI.isTypeLegal(SrcVT) && !TLI.isTypeLegal(SplitSrcVT) && 2589 TLI.isTypeLegal(NewSrcVT) && TLI.isTypeLegal(SplitLoVT)) { 2590 LLVM_DEBUG(dbgs() << "Split vector extend via incremental extend:"; 2591 N->dump(&DAG); dbgs() << "\n"); 2592 if (!N->isVPOpcode()) { 2593 // Extend the source vector by one step. 2594 SDValue NewSrc = 2595 DAG.getNode(N->getOpcode(), dl, NewSrcVT, N->getOperand(0)); 2596 // Get the low and high halves of the new, extended one step, vector. 2597 std::tie(Lo, Hi) = DAG.SplitVector(NewSrc, dl); 2598 // Extend those vector halves the rest of the way. 2599 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo); 2600 Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi); 2601 return; 2602 } 2603 2604 // Extend the source vector by one step. 2605 SDValue NewSrc = 2606 DAG.getNode(N->getOpcode(), dl, NewSrcVT, N->getOperand(0), 2607 N->getOperand(1), N->getOperand(2)); 2608 // Get the low and high halves of the new, extended one step, vector. 2609 std::tie(Lo, Hi) = DAG.SplitVector(NewSrc, dl); 2610 2611 SDValue MaskLo, MaskHi; 2612 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1)); 2613 2614 SDValue EVLLo, EVLHi; 2615 std::tie(EVLLo, EVLHi) = 2616 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl); 2617 // Extend those vector halves the rest of the way. 2618 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo}); 2619 Hi = DAG.getNode(N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi}); 2620 return; 2621 } 2622 } 2623 // Fall back to the generic unary operator splitting otherwise. 2624 SplitVecRes_UnaryOp(N, Lo, Hi); 2625 } 2626 2627 void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N, 2628 SDValue &Lo, SDValue &Hi) { 2629 // The low and high parts of the original input give four input vectors. 2630 SDValue Inputs[4]; 2631 SDLoc DL(N); 2632 GetSplitVector(N->getOperand(0), Inputs[0], Inputs[1]); 2633 GetSplitVector(N->getOperand(1), Inputs[2], Inputs[3]); 2634 EVT NewVT = Inputs[0].getValueType(); 2635 unsigned NewElts = NewVT.getVectorNumElements(); 2636 2637 auto &&IsConstant = [](const SDValue &N) { 2638 APInt SplatValue; 2639 return N.getResNo() == 0 && 2640 (ISD::isConstantSplatVector(N.getNode(), SplatValue) || 2641 ISD::isBuildVectorOfConstantSDNodes(N.getNode())); 2642 }; 2643 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &DL](SDValue &Input1, 2644 SDValue &Input2, 2645 ArrayRef<int> Mask) { 2646 assert(Input1->getOpcode() == ISD::BUILD_VECTOR && 2647 Input2->getOpcode() == ISD::BUILD_VECTOR && 2648 "Expected build vector node."); 2649 EVT EltVT = NewVT.getVectorElementType(); 2650 SmallVector<SDValue> Ops(NewElts, DAG.getUNDEF(EltVT)); 2651 for (unsigned I = 0; I < NewElts; ++I) { 2652 if (Mask[I] == PoisonMaskElem) 2653 continue; 2654 unsigned Idx = Mask[I]; 2655 if (Idx >= NewElts) 2656 Ops[I] = Input2.getOperand(Idx - NewElts); 2657 else 2658 Ops[I] = Input1.getOperand(Idx); 2659 // Make the type of all elements the same as the element type. 2660 if (Ops[I].getValueType().bitsGT(EltVT)) 2661 Ops[I] = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Ops[I]); 2662 } 2663 return DAG.getBuildVector(NewVT, DL, Ops); 2664 }; 2665 2666 // If Lo or Hi uses elements from at most two of the four input vectors, then 2667 // express it as a vector shuffle of those two inputs. Otherwise extract the 2668 // input elements by hand and construct the Lo/Hi output using a BUILD_VECTOR. 2669 SmallVector<int> OrigMask(N->getMask()); 2670 // Try to pack incoming shuffles/inputs. 2671 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT, this, NewElts, 2672 &DL](SmallVectorImpl<int> &Mask) { 2673 // Check if all inputs are shuffles of the same operands or non-shuffles. 2674 MapVector<std::pair<SDValue, SDValue>, SmallVector<unsigned>> ShufflesIdxs; 2675 for (unsigned Idx = 0; Idx < std::size(Inputs); ++Idx) { 2676 SDValue Input = Inputs[Idx]; 2677 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.getNode()); 2678 if (!Shuffle || 2679 Input.getOperand(0).getValueType() != Input.getValueType()) 2680 continue; 2681 ShufflesIdxs[std::make_pair(Input.getOperand(0), Input.getOperand(1))] 2682 .push_back(Idx); 2683 ShufflesIdxs[std::make_pair(Input.getOperand(1), Input.getOperand(0))] 2684 .push_back(Idx); 2685 } 2686 for (auto &P : ShufflesIdxs) { 2687 if (P.second.size() < 2) 2688 continue; 2689 // Use shuffles operands instead of shuffles themselves. 2690 // 1. Adjust mask. 2691 for (int &Idx : Mask) { 2692 if (Idx == PoisonMaskElem) 2693 continue; 2694 unsigned SrcRegIdx = Idx / NewElts; 2695 if (Inputs[SrcRegIdx].isUndef()) { 2696 Idx = PoisonMaskElem; 2697 continue; 2698 } 2699 auto *Shuffle = 2700 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode()); 2701 if (!Shuffle || !is_contained(P.second, SrcRegIdx)) 2702 continue; 2703 int MaskElt = Shuffle->getMaskElt(Idx % NewElts); 2704 if (MaskElt == PoisonMaskElem) { 2705 Idx = PoisonMaskElem; 2706 continue; 2707 } 2708 Idx = MaskElt % NewElts + 2709 P.second[Shuffle->getOperand(MaskElt / NewElts) == P.first.first 2710 ? 0 2711 : 1] * 2712 NewElts; 2713 } 2714 // 2. Update inputs. 2715 Inputs[P.second[0]] = P.first.first; 2716 Inputs[P.second[1]] = P.first.second; 2717 // Clear the pair data. 2718 P.second.clear(); 2719 ShufflesIdxs[std::make_pair(P.first.second, P.first.first)].clear(); 2720 } 2721 // Check if any concat_vectors can be simplified. 2722 SmallBitVector UsedSubVector(2 * std::size(Inputs)); 2723 for (int &Idx : Mask) { 2724 if (Idx == PoisonMaskElem) 2725 continue; 2726 unsigned SrcRegIdx = Idx / NewElts; 2727 if (Inputs[SrcRegIdx].isUndef()) { 2728 Idx = PoisonMaskElem; 2729 continue; 2730 } 2731 TargetLowering::LegalizeTypeAction TypeAction = 2732 getTypeAction(Inputs[SrcRegIdx].getValueType()); 2733 if (Inputs[SrcRegIdx].getOpcode() == ISD::CONCAT_VECTORS && 2734 Inputs[SrcRegIdx].getNumOperands() == 2 && 2735 !Inputs[SrcRegIdx].getOperand(1).isUndef() && 2736 (TypeAction == TargetLowering::TypeLegal || 2737 TypeAction == TargetLowering::TypeWidenVector)) 2738 UsedSubVector.set(2 * SrcRegIdx + (Idx % NewElts) / (NewElts / 2)); 2739 } 2740 if (UsedSubVector.count() > 1) { 2741 SmallVector<SmallVector<std::pair<unsigned, int>, 2>> Pairs; 2742 for (unsigned I = 0; I < std::size(Inputs); ++I) { 2743 if (UsedSubVector.test(2 * I) == UsedSubVector.test(2 * I + 1)) 2744 continue; 2745 if (Pairs.empty() || Pairs.back().size() == 2) 2746 Pairs.emplace_back(); 2747 if (UsedSubVector.test(2 * I)) { 2748 Pairs.back().emplace_back(I, 0); 2749 } else { 2750 assert(UsedSubVector.test(2 * I + 1) && 2751 "Expected to be used one of the subvectors."); 2752 Pairs.back().emplace_back(I, 1); 2753 } 2754 } 2755 if (!Pairs.empty() && Pairs.front().size() > 1) { 2756 // Adjust mask. 2757 for (int &Idx : Mask) { 2758 if (Idx == PoisonMaskElem) 2759 continue; 2760 unsigned SrcRegIdx = Idx / NewElts; 2761 auto *It = find_if( 2762 Pairs, [SrcRegIdx](ArrayRef<std::pair<unsigned, int>> Idxs) { 2763 return Idxs.front().first == SrcRegIdx || 2764 Idxs.back().first == SrcRegIdx; 2765 }); 2766 if (It == Pairs.end()) 2767 continue; 2768 Idx = It->front().first * NewElts + (Idx % NewElts) % (NewElts / 2) + 2769 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2)); 2770 } 2771 // Adjust inputs. 2772 for (ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) { 2773 Inputs[Idxs.front().first] = DAG.getNode( 2774 ISD::CONCAT_VECTORS, DL, 2775 Inputs[Idxs.front().first].getValueType(), 2776 Inputs[Idxs.front().first].getOperand(Idxs.front().second), 2777 Inputs[Idxs.back().first].getOperand(Idxs.back().second)); 2778 } 2779 } 2780 } 2781 bool Changed; 2782 do { 2783 // Try to remove extra shuffles (except broadcasts) and shuffles with the 2784 // reused operands. 2785 Changed = false; 2786 for (unsigned I = 0; I < std::size(Inputs); ++I) { 2787 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[I].getNode()); 2788 if (!Shuffle) 2789 continue; 2790 if (Shuffle->getOperand(0).getValueType() != NewVT) 2791 continue; 2792 int Op = -1; 2793 if (!Inputs[I].hasOneUse() && Shuffle->getOperand(1).isUndef() && 2794 !Shuffle->isSplat()) { 2795 Op = 0; 2796 } else if (!Inputs[I].hasOneUse() && 2797 !Shuffle->getOperand(1).isUndef()) { 2798 // Find the only used operand, if possible. 2799 for (int &Idx : Mask) { 2800 if (Idx == PoisonMaskElem) 2801 continue; 2802 unsigned SrcRegIdx = Idx / NewElts; 2803 if (SrcRegIdx != I) 2804 continue; 2805 int MaskElt = Shuffle->getMaskElt(Idx % NewElts); 2806 if (MaskElt == PoisonMaskElem) { 2807 Idx = PoisonMaskElem; 2808 continue; 2809 } 2810 int OpIdx = MaskElt / NewElts; 2811 if (Op == -1) { 2812 Op = OpIdx; 2813 continue; 2814 } 2815 if (Op != OpIdx) { 2816 Op = -1; 2817 break; 2818 } 2819 } 2820 } 2821 if (Op < 0) { 2822 // Try to check if one of the shuffle operands is used already. 2823 for (int OpIdx = 0; OpIdx < 2; ++OpIdx) { 2824 if (Shuffle->getOperand(OpIdx).isUndef()) 2825 continue; 2826 auto *It = find(Inputs, Shuffle->getOperand(OpIdx)); 2827 if (It == std::end(Inputs)) 2828 continue; 2829 int FoundOp = std::distance(std::begin(Inputs), It); 2830 // Found that operand is used already. 2831 // 1. Fix the mask for the reused operand. 2832 for (int &Idx : Mask) { 2833 if (Idx == PoisonMaskElem) 2834 continue; 2835 unsigned SrcRegIdx = Idx / NewElts; 2836 if (SrcRegIdx != I) 2837 continue; 2838 int MaskElt = Shuffle->getMaskElt(Idx % NewElts); 2839 if (MaskElt == PoisonMaskElem) { 2840 Idx = PoisonMaskElem; 2841 continue; 2842 } 2843 int MaskIdx = MaskElt / NewElts; 2844 if (OpIdx == MaskIdx) 2845 Idx = MaskElt % NewElts + FoundOp * NewElts; 2846 } 2847 // 2. Set Op to the unused OpIdx. 2848 Op = (OpIdx + 1) % 2; 2849 break; 2850 } 2851 } 2852 if (Op >= 0) { 2853 Changed = true; 2854 Inputs[I] = Shuffle->getOperand(Op); 2855 // Adjust mask. 2856 for (int &Idx : Mask) { 2857 if (Idx == PoisonMaskElem) 2858 continue; 2859 unsigned SrcRegIdx = Idx / NewElts; 2860 if (SrcRegIdx != I) 2861 continue; 2862 int MaskElt = Shuffle->getMaskElt(Idx % NewElts); 2863 int OpIdx = MaskElt / NewElts; 2864 if (OpIdx != Op) 2865 continue; 2866 Idx = MaskElt % NewElts + SrcRegIdx * NewElts; 2867 } 2868 } 2869 } 2870 } while (Changed); 2871 }; 2872 TryPeekThroughShufflesInputs(OrigMask); 2873 // Proces unique inputs. 2874 auto &&MakeUniqueInputs = [&Inputs, &IsConstant, 2875 NewElts](SmallVectorImpl<int> &Mask) { 2876 SetVector<SDValue> UniqueInputs; 2877 SetVector<SDValue> UniqueConstantInputs; 2878 for (const auto &I : Inputs) { 2879 if (IsConstant(I)) 2880 UniqueConstantInputs.insert(I); 2881 else if (!I.isUndef()) 2882 UniqueInputs.insert(I); 2883 } 2884 // Adjust mask in case of reused inputs. Also, need to insert constant 2885 // inputs at first, otherwise it affects the final outcome. 2886 if (UniqueInputs.size() != std::size(Inputs)) { 2887 auto &&UniqueVec = UniqueInputs.takeVector(); 2888 auto &&UniqueConstantVec = UniqueConstantInputs.takeVector(); 2889 unsigned ConstNum = UniqueConstantVec.size(); 2890 for (int &Idx : Mask) { 2891 if (Idx == PoisonMaskElem) 2892 continue; 2893 unsigned SrcRegIdx = Idx / NewElts; 2894 if (Inputs[SrcRegIdx].isUndef()) { 2895 Idx = PoisonMaskElem; 2896 continue; 2897 } 2898 const auto It = find(UniqueConstantVec, Inputs[SrcRegIdx]); 2899 if (It != UniqueConstantVec.end()) { 2900 Idx = (Idx % NewElts) + 2901 NewElts * std::distance(UniqueConstantVec.begin(), It); 2902 assert(Idx >= 0 && "Expected defined mask idx."); 2903 continue; 2904 } 2905 const auto RegIt = find(UniqueVec, Inputs[SrcRegIdx]); 2906 assert(RegIt != UniqueVec.end() && "Cannot find non-const value."); 2907 Idx = (Idx % NewElts) + 2908 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum); 2909 assert(Idx >= 0 && "Expected defined mask idx."); 2910 } 2911 copy(UniqueConstantVec, std::begin(Inputs)); 2912 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum)); 2913 } 2914 }; 2915 MakeUniqueInputs(OrigMask); 2916 SDValue OrigInputs[4]; 2917 copy(Inputs, std::begin(OrigInputs)); 2918 for (unsigned High = 0; High < 2; ++High) { 2919 SDValue &Output = High ? Hi : Lo; 2920 2921 // Build a shuffle mask for the output, discovering on the fly which 2922 // input vectors to use as shuffle operands. 2923 unsigned FirstMaskIdx = High * NewElts; 2924 SmallVector<int> Mask(NewElts * std::size(Inputs), PoisonMaskElem); 2925 copy(ArrayRef(OrigMask).slice(FirstMaskIdx, NewElts), Mask.begin()); 2926 assert(!Output && "Expected default initialized initial value."); 2927 TryPeekThroughShufflesInputs(Mask); 2928 MakeUniqueInputs(Mask); 2929 SDValue TmpInputs[4]; 2930 copy(Inputs, std::begin(TmpInputs)); 2931 // Track changes in the output registers. 2932 int UsedIdx = -1; 2933 bool SecondIteration = false; 2934 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](unsigned Idx) { 2935 if (UsedIdx < 0) { 2936 UsedIdx = Idx; 2937 return false; 2938 } 2939 if (UsedIdx >= 0 && static_cast<unsigned>(UsedIdx) == Idx) 2940 SecondIteration = true; 2941 return SecondIteration; 2942 }; 2943 processShuffleMasks( 2944 Mask, std::size(Inputs), std::size(Inputs), 2945 /*NumOfUsedRegs=*/1, 2946 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); }, 2947 [&Output, &DAG = DAG, NewVT, &DL, &Inputs, 2948 &BuildVector](ArrayRef<int> Mask, unsigned Idx, unsigned /*Unused*/) { 2949 if (Inputs[Idx]->getOpcode() == ISD::BUILD_VECTOR) 2950 Output = BuildVector(Inputs[Idx], Inputs[Idx], Mask); 2951 else 2952 Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx], 2953 DAG.getUNDEF(NewVT), Mask); 2954 Inputs[Idx] = Output; 2955 }, 2956 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &DL, &Inputs, 2957 &TmpInputs, 2958 &BuildVector](ArrayRef<int> Mask, unsigned Idx1, unsigned Idx2) { 2959 if (AccumulateResults(Idx1)) { 2960 if (Inputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR && 2961 Inputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR) 2962 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask); 2963 else 2964 Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx1], 2965 Inputs[Idx2], Mask); 2966 } else { 2967 if (TmpInputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR && 2968 TmpInputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR) 2969 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask); 2970 else 2971 Output = DAG.getVectorShuffle(NewVT, DL, TmpInputs[Idx1], 2972 TmpInputs[Idx2], Mask); 2973 } 2974 Inputs[Idx1] = Output; 2975 }); 2976 copy(OrigInputs, std::begin(Inputs)); 2977 } 2978 } 2979 2980 void DAGTypeLegalizer::SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) { 2981 EVT OVT = N->getValueType(0); 2982 EVT NVT = OVT.getHalfNumVectorElementsVT(*DAG.getContext()); 2983 SDValue Chain = N->getOperand(0); 2984 SDValue Ptr = N->getOperand(1); 2985 SDValue SV = N->getOperand(2); 2986 SDLoc dl(N); 2987 2988 const Align Alignment = 2989 DAG.getDataLayout().getABITypeAlign(NVT.getTypeForEVT(*DAG.getContext())); 2990 2991 Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, SV, Alignment.value()); 2992 Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, SV, Alignment.value()); 2993 Chain = Hi.getValue(1); 2994 2995 // Modified the chain - switch anything that used the old chain to use 2996 // the new one. 2997 ReplaceValueWith(SDValue(N, 1), Chain); 2998 } 2999 3000 void DAGTypeLegalizer::SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo, 3001 SDValue &Hi) { 3002 EVT DstVTLo, DstVTHi; 3003 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(N->getValueType(0)); 3004 SDLoc dl(N); 3005 3006 SDValue SrcLo, SrcHi; 3007 EVT SrcVT = N->getOperand(0).getValueType(); 3008 if (getTypeAction(SrcVT) == TargetLowering::TypeSplitVector) 3009 GetSplitVector(N->getOperand(0), SrcLo, SrcHi); 3010 else 3011 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(N, 0); 3012 3013 Lo = DAG.getNode(N->getOpcode(), dl, DstVTLo, SrcLo, N->getOperand(1)); 3014 Hi = DAG.getNode(N->getOpcode(), dl, DstVTHi, SrcHi, N->getOperand(1)); 3015 } 3016 3017 void DAGTypeLegalizer::SplitVecRes_VECTOR_REVERSE(SDNode *N, SDValue &Lo, 3018 SDValue &Hi) { 3019 SDValue InLo, InHi; 3020 GetSplitVector(N->getOperand(0), InLo, InHi); 3021 SDLoc DL(N); 3022 3023 Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, InHi.getValueType(), InHi); 3024 Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, InLo.getValueType(), InLo); 3025 } 3026 3027 void DAGTypeLegalizer::SplitVecRes_VECTOR_SPLICE(SDNode *N, SDValue &Lo, 3028 SDValue &Hi) { 3029 SDLoc DL(N); 3030 3031 SDValue Expanded = TLI.expandVectorSplice(N, DAG); 3032 std::tie(Lo, Hi) = DAG.SplitVector(Expanded, DL); 3033 } 3034 3035 void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo, 3036 SDValue &Hi) { 3037 EVT VT = N->getValueType(0); 3038 SDValue Val = N->getOperand(0); 3039 SDValue Mask = N->getOperand(1); 3040 SDValue EVL = N->getOperand(2); 3041 SDLoc DL(N); 3042 3043 // Fallback to VP_STRIDED_STORE to stack followed by VP_LOAD. 3044 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false); 3045 3046 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 3047 VT.getVectorElementCount()); 3048 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment); 3049 EVT PtrVT = StackPtr.getValueType(); 3050 auto &MF = DAG.getMachineFunction(); 3051 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 3052 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 3053 3054 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand( 3055 PtrInfo, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), 3056 Alignment); 3057 MachineMemOperand *LoadMMO = DAG.getMachineFunction().getMachineMemOperand( 3058 PtrInfo, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), 3059 Alignment); 3060 3061 unsigned EltWidth = VT.getScalarSizeInBits() / 8; 3062 SDValue NumElemMinus1 = 3063 DAG.getNode(ISD::SUB, DL, PtrVT, DAG.getZExtOrTrunc(EVL, DL, PtrVT), 3064 DAG.getConstant(1, DL, PtrVT)); 3065 SDValue StartOffset = DAG.getNode(ISD::MUL, DL, PtrVT, NumElemMinus1, 3066 DAG.getConstant(EltWidth, DL, PtrVT)); 3067 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, StartOffset); 3068 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth, DL, PtrVT); 3069 3070 SDValue TrueMask = DAG.getBoolConstant(true, DL, Mask.getValueType(), VT); 3071 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(), DL, Val, StorePtr, 3072 DAG.getUNDEF(PtrVT), Stride, TrueMask, 3073 EVL, MemVT, StoreMMO, ISD::UNINDEXED); 3074 3075 SDValue Load = DAG.getLoadVP(VT, DL, Store, StackPtr, Mask, EVL, LoadMMO); 3076 3077 std::tie(Lo, Hi) = DAG.SplitVector(Load, DL); 3078 } 3079 3080 void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(SDNode *N) { 3081 3082 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi; 3083 GetSplitVector(N->getOperand(0), Op0Lo, Op0Hi); 3084 GetSplitVector(N->getOperand(1), Op1Lo, Op1Hi); 3085 EVT VT = Op0Lo.getValueType(); 3086 SDLoc DL(N); 3087 SDValue ResLo = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL, 3088 DAG.getVTList(VT, VT), Op0Lo, Op0Hi); 3089 SDValue ResHi = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL, 3090 DAG.getVTList(VT, VT), Op1Lo, Op1Hi); 3091 3092 SetSplitVector(SDValue(N, 0), ResLo.getValue(0), ResHi.getValue(0)); 3093 SetSplitVector(SDValue(N, 1), ResLo.getValue(1), ResHi.getValue(1)); 3094 } 3095 3096 void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(SDNode *N) { 3097 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi; 3098 GetSplitVector(N->getOperand(0), Op0Lo, Op0Hi); 3099 GetSplitVector(N->getOperand(1), Op1Lo, Op1Hi); 3100 EVT VT = Op0Lo.getValueType(); 3101 SDLoc DL(N); 3102 SDValue Res[] = {DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, 3103 DAG.getVTList(VT, VT), Op0Lo, Op1Lo), 3104 DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, 3105 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)}; 3106 3107 SetSplitVector(SDValue(N, 0), Res[0].getValue(0), Res[0].getValue(1)); 3108 SetSplitVector(SDValue(N, 1), Res[1].getValue(0), Res[1].getValue(1)); 3109 } 3110 3111 //===----------------------------------------------------------------------===// 3112 // Operand Vector Splitting 3113 //===----------------------------------------------------------------------===// 3114 3115 /// This method is called when the specified operand of the specified node is 3116 /// found to need vector splitting. At this point, all of the result types of 3117 /// the node are known to be legal, but other operands of the node may need 3118 /// legalization as well as the specified one. 3119 bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { 3120 LLVM_DEBUG(dbgs() << "Split node operand: "; N->dump(&DAG)); 3121 SDValue Res = SDValue(); 3122 3123 // See if the target wants to custom split this node. 3124 if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false)) 3125 return false; 3126 3127 switch (N->getOpcode()) { 3128 default: 3129 #ifndef NDEBUG 3130 dbgs() << "SplitVectorOperand Op #" << OpNo << ": "; 3131 N->dump(&DAG); 3132 dbgs() << "\n"; 3133 #endif 3134 report_fatal_error("Do not know how to split this operator's " 3135 "operand!\n"); 3136 3137 case ISD::VP_SETCC: 3138 case ISD::STRICT_FSETCC: 3139 case ISD::SETCC: Res = SplitVecOp_VSETCC(N); break; 3140 case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break; 3141 case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break; 3142 case ISD::INSERT_SUBVECTOR: Res = SplitVecOp_INSERT_SUBVECTOR(N, OpNo); break; 3143 case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break; 3144 case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break; 3145 case ISD::VP_TRUNCATE: 3146 case ISD::TRUNCATE: 3147 Res = SplitVecOp_TruncateHelper(N); 3148 break; 3149 case ISD::STRICT_FP_ROUND: 3150 case ISD::VP_FP_ROUND: 3151 case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break; 3152 case ISD::FCOPYSIGN: Res = SplitVecOp_FPOpDifferentTypes(N); break; 3153 case ISD::STORE: 3154 Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo); 3155 break; 3156 case ISD::VP_STORE: 3157 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(N), OpNo); 3158 break; 3159 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: 3160 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(N), OpNo); 3161 break; 3162 case ISD::MSTORE: 3163 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(N), OpNo); 3164 break; 3165 case ISD::MSCATTER: 3166 case ISD::VP_SCATTER: 3167 Res = SplitVecOp_Scatter(cast<MemSDNode>(N), OpNo); 3168 break; 3169 case ISD::MGATHER: 3170 case ISD::VP_GATHER: 3171 Res = SplitVecOp_Gather(cast<MemSDNode>(N), OpNo); 3172 break; 3173 case ISD::VSELECT: 3174 Res = SplitVecOp_VSELECT(N, OpNo); 3175 break; 3176 case ISD::STRICT_SINT_TO_FP: 3177 case ISD::STRICT_UINT_TO_FP: 3178 case ISD::SINT_TO_FP: 3179 case ISD::UINT_TO_FP: 3180 case ISD::VP_SINT_TO_FP: 3181 case ISD::VP_UINT_TO_FP: 3182 if (N->getValueType(0).bitsLT( 3183 N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType())) 3184 Res = SplitVecOp_TruncateHelper(N); 3185 else 3186 Res = SplitVecOp_UnaryOp(N); 3187 break; 3188 case ISD::FP_TO_SINT_SAT: 3189 case ISD::FP_TO_UINT_SAT: 3190 Res = SplitVecOp_FP_TO_XINT_SAT(N); 3191 break; 3192 case ISD::FP_TO_SINT: 3193 case ISD::FP_TO_UINT: 3194 case ISD::VP_FP_TO_SINT: 3195 case ISD::VP_FP_TO_UINT: 3196 case ISD::STRICT_FP_TO_SINT: 3197 case ISD::STRICT_FP_TO_UINT: 3198 case ISD::STRICT_FP_EXTEND: 3199 case ISD::FP_EXTEND: 3200 case ISD::SIGN_EXTEND: 3201 case ISD::ZERO_EXTEND: 3202 case ISD::ANY_EXTEND: 3203 case ISD::FTRUNC: 3204 case ISD::LRINT: 3205 case ISD::LLRINT: 3206 Res = SplitVecOp_UnaryOp(N); 3207 break; 3208 case ISD::FLDEXP: 3209 Res = SplitVecOp_FPOpDifferentTypes(N); 3210 break; 3211 3212 case ISD::SCMP: 3213 case ISD::UCMP: 3214 Res = SplitVecOp_CMP(N); 3215 break; 3216 3217 case ISD::ANY_EXTEND_VECTOR_INREG: 3218 case ISD::SIGN_EXTEND_VECTOR_INREG: 3219 case ISD::ZERO_EXTEND_VECTOR_INREG: 3220 Res = SplitVecOp_ExtVecInRegOp(N); 3221 break; 3222 3223 case ISD::VECREDUCE_FADD: 3224 case ISD::VECREDUCE_FMUL: 3225 case ISD::VECREDUCE_ADD: 3226 case ISD::VECREDUCE_MUL: 3227 case ISD::VECREDUCE_AND: 3228 case ISD::VECREDUCE_OR: 3229 case ISD::VECREDUCE_XOR: 3230 case ISD::VECREDUCE_SMAX: 3231 case ISD::VECREDUCE_SMIN: 3232 case ISD::VECREDUCE_UMAX: 3233 case ISD::VECREDUCE_UMIN: 3234 case ISD::VECREDUCE_FMAX: 3235 case ISD::VECREDUCE_FMIN: 3236 case ISD::VECREDUCE_FMAXIMUM: 3237 case ISD::VECREDUCE_FMINIMUM: 3238 Res = SplitVecOp_VECREDUCE(N, OpNo); 3239 break; 3240 case ISD::VECREDUCE_SEQ_FADD: 3241 case ISD::VECREDUCE_SEQ_FMUL: 3242 Res = SplitVecOp_VECREDUCE_SEQ(N); 3243 break; 3244 case ISD::VP_REDUCE_FADD: 3245 case ISD::VP_REDUCE_SEQ_FADD: 3246 case ISD::VP_REDUCE_FMUL: 3247 case ISD::VP_REDUCE_SEQ_FMUL: 3248 case ISD::VP_REDUCE_ADD: 3249 case ISD::VP_REDUCE_MUL: 3250 case ISD::VP_REDUCE_AND: 3251 case ISD::VP_REDUCE_OR: 3252 case ISD::VP_REDUCE_XOR: 3253 case ISD::VP_REDUCE_SMAX: 3254 case ISD::VP_REDUCE_SMIN: 3255 case ISD::VP_REDUCE_UMAX: 3256 case ISD::VP_REDUCE_UMIN: 3257 case ISD::VP_REDUCE_FMAX: 3258 case ISD::VP_REDUCE_FMIN: 3259 case ISD::VP_REDUCE_FMAXIMUM: 3260 case ISD::VP_REDUCE_FMINIMUM: 3261 Res = SplitVecOp_VP_REDUCE(N, OpNo); 3262 break; 3263 case ISD::VP_CTTZ_ELTS: 3264 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF: 3265 Res = SplitVecOp_VP_CttzElements(N); 3266 break; 3267 } 3268 3269 // If the result is null, the sub-method took care of registering results etc. 3270 if (!Res.getNode()) return false; 3271 3272 // If the result is N, the sub-method updated N in place. Tell the legalizer 3273 // core about this. 3274 if (Res.getNode() == N) 3275 return true; 3276 3277 if (N->isStrictFPOpcode()) 3278 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 2 && 3279 "Invalid operand expansion"); 3280 else 3281 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && 3282 "Invalid operand expansion"); 3283 3284 ReplaceValueWith(SDValue(N, 0), Res); 3285 return false; 3286 } 3287 3288 SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(SDNode *N, unsigned OpNo) { 3289 // The only possibility for an illegal operand is the mask, since result type 3290 // legalization would have handled this node already otherwise. 3291 assert(OpNo == 0 && "Illegal operand must be mask"); 3292 3293 SDValue Mask = N->getOperand(0); 3294 SDValue Src0 = N->getOperand(1); 3295 SDValue Src1 = N->getOperand(2); 3296 EVT Src0VT = Src0.getValueType(); 3297 SDLoc DL(N); 3298 assert(Mask.getValueType().isVector() && "VSELECT without a vector mask?"); 3299 3300 SDValue Lo, Hi; 3301 GetSplitVector(N->getOperand(0), Lo, Hi); 3302 assert(Lo.getValueType() == Hi.getValueType() && 3303 "Lo and Hi have differing types"); 3304 3305 EVT LoOpVT, HiOpVT; 3306 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT); 3307 assert(LoOpVT == HiOpVT && "Asymmetric vector split?"); 3308 3309 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask; 3310 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0, DL); 3311 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1, DL); 3312 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL); 3313 3314 SDValue LoSelect = 3315 DAG.getNode(ISD::VSELECT, DL, LoOpVT, LoMask, LoOp0, LoOp1); 3316 SDValue HiSelect = 3317 DAG.getNode(ISD::VSELECT, DL, HiOpVT, HiMask, HiOp0, HiOp1); 3318 3319 return DAG.getNode(ISD::CONCAT_VECTORS, DL, Src0VT, LoSelect, HiSelect); 3320 } 3321 3322 SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo) { 3323 EVT ResVT = N->getValueType(0); 3324 SDValue Lo, Hi; 3325 SDLoc dl(N); 3326 3327 SDValue VecOp = N->getOperand(OpNo); 3328 EVT VecVT = VecOp.getValueType(); 3329 assert(VecVT.isVector() && "Can only split reduce vector operand"); 3330 GetSplitVector(VecOp, Lo, Hi); 3331 EVT LoOpVT, HiOpVT; 3332 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT); 3333 3334 // Use the appropriate scalar instruction on the split subvectors before 3335 // reducing the now partially reduced smaller vector. 3336 unsigned CombineOpc = ISD::getVecReduceBaseOpcode(N->getOpcode()); 3337 SDValue Partial = DAG.getNode(CombineOpc, dl, LoOpVT, Lo, Hi, N->getFlags()); 3338 return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, N->getFlags()); 3339 } 3340 3341 SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE_SEQ(SDNode *N) { 3342 EVT ResVT = N->getValueType(0); 3343 SDValue Lo, Hi; 3344 SDLoc dl(N); 3345 3346 SDValue AccOp = N->getOperand(0); 3347 SDValue VecOp = N->getOperand(1); 3348 SDNodeFlags Flags = N->getFlags(); 3349 3350 EVT VecVT = VecOp.getValueType(); 3351 assert(VecVT.isVector() && "Can only split reduce vector operand"); 3352 GetSplitVector(VecOp, Lo, Hi); 3353 EVT LoOpVT, HiOpVT; 3354 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT); 3355 3356 // Reduce low half. 3357 SDValue Partial = DAG.getNode(N->getOpcode(), dl, ResVT, AccOp, Lo, Flags); 3358 3359 // Reduce high half, using low half result as initial value. 3360 return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, Hi, Flags); 3361 } 3362 3363 SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(SDNode *N, unsigned OpNo) { 3364 assert(N->isVPOpcode() && "Expected VP opcode"); 3365 assert(OpNo == 1 && "Can only split reduce vector operand"); 3366 3367 unsigned Opc = N->getOpcode(); 3368 EVT ResVT = N->getValueType(0); 3369 SDValue Lo, Hi; 3370 SDLoc dl(N); 3371 3372 SDValue VecOp = N->getOperand(OpNo); 3373 EVT VecVT = VecOp.getValueType(); 3374 assert(VecVT.isVector() && "Can only split reduce vector operand"); 3375 GetSplitVector(VecOp, Lo, Hi); 3376 3377 SDValue MaskLo, MaskHi; 3378 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(2)); 3379 3380 SDValue EVLLo, EVLHi; 3381 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(N->getOperand(3), VecVT, dl); 3382 3383 const SDNodeFlags Flags = N->getFlags(); 3384 3385 SDValue ResLo = 3386 DAG.getNode(Opc, dl, ResVT, {N->getOperand(0), Lo, MaskLo, EVLLo}, Flags); 3387 return DAG.getNode(Opc, dl, ResVT, {ResLo, Hi, MaskHi, EVLHi}, Flags); 3388 } 3389 3390 SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) { 3391 // The result has a legal vector type, but the input needs splitting. 3392 EVT ResVT = N->getValueType(0); 3393 SDValue Lo, Hi; 3394 SDLoc dl(N); 3395 GetSplitVector(N->getOperand(N->isStrictFPOpcode() ? 1 : 0), Lo, Hi); 3396 EVT InVT = Lo.getValueType(); 3397 3398 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(), 3399 InVT.getVectorElementCount()); 3400 3401 if (N->isStrictFPOpcode()) { 3402 Lo = DAG.getNode(N->getOpcode(), dl, { OutVT, MVT::Other }, 3403 { N->getOperand(0), Lo }); 3404 Hi = DAG.getNode(N->getOpcode(), dl, { OutVT, MVT::Other }, 3405 { N->getOperand(0), Hi }); 3406 3407 // Build a factor node to remember that this operation is independent 3408 // of the other one. 3409 SDValue Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 3410 Hi.getValue(1)); 3411 3412 // Legalize the chain result - switch anything that used the old chain to 3413 // use the new one. 3414 ReplaceValueWith(SDValue(N, 1), Ch); 3415 } else if (N->getNumOperands() == 3) { 3416 assert(N->isVPOpcode() && "Expected VP opcode"); 3417 SDValue MaskLo, MaskHi, EVLLo, EVLHi; 3418 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1)); 3419 std::tie(EVLLo, EVLHi) = 3420 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl); 3421 Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo, MaskLo, EVLLo); 3422 Hi = DAG.getNode(N->getOpcode(), dl, OutVT, Hi, MaskHi, EVLHi); 3423 } else { 3424 Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo); 3425 Hi = DAG.getNode(N->getOpcode(), dl, OutVT, Hi); 3426 } 3427 3428 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi); 3429 } 3430 3431 SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) { 3432 // For example, i64 = BITCAST v4i16 on alpha. Typically the vector will 3433 // end up being split all the way down to individual components. Convert the 3434 // split pieces into integers and reassemble. 3435 EVT ResVT = N->getValueType(0); 3436 SDValue Lo, Hi; 3437 GetSplitVector(N->getOperand(0), Lo, Hi); 3438 SDLoc dl(N); 3439 3440 if (ResVT.isScalableVector()) { 3441 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT); 3442 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo); 3443 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi); 3444 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi); 3445 } 3446 3447 Lo = BitConvertToInteger(Lo); 3448 Hi = BitConvertToInteger(Hi); 3449 3450 if (DAG.getDataLayout().isBigEndian()) 3451 std::swap(Lo, Hi); 3452 3453 return DAG.getNode(ISD::BITCAST, dl, ResVT, JoinIntegers(Lo, Hi)); 3454 } 3455 3456 SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N, 3457 unsigned OpNo) { 3458 assert(OpNo == 1 && "Invalid OpNo; can only split SubVec."); 3459 // We know that the result type is legal. 3460 EVT ResVT = N->getValueType(0); 3461 3462 SDValue Vec = N->getOperand(0); 3463 SDValue SubVec = N->getOperand(1); 3464 SDValue Idx = N->getOperand(2); 3465 SDLoc dl(N); 3466 3467 SDValue Lo, Hi; 3468 GetSplitVector(SubVec, Lo, Hi); 3469 3470 uint64_t IdxVal = Idx->getAsZExtVal(); 3471 uint64_t LoElts = Lo.getValueType().getVectorMinNumElements(); 3472 3473 SDValue FirstInsertion = 3474 DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Lo, Idx); 3475 SDValue SecondInsertion = 3476 DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, FirstInsertion, Hi, 3477 DAG.getVectorIdxConstant(IdxVal + LoElts, dl)); 3478 3479 return SecondInsertion; 3480 } 3481 3482 SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) { 3483 // We know that the extracted result type is legal. 3484 EVT SubVT = N->getValueType(0); 3485 SDValue Idx = N->getOperand(1); 3486 SDLoc dl(N); 3487 SDValue Lo, Hi; 3488 3489 GetSplitVector(N->getOperand(0), Lo, Hi); 3490 3491 uint64_t LoEltsMin = Lo.getValueType().getVectorMinNumElements(); 3492 uint64_t IdxVal = Idx->getAsZExtVal(); 3493 3494 if (IdxVal < LoEltsMin) { 3495 assert(IdxVal + SubVT.getVectorMinNumElements() <= LoEltsMin && 3496 "Extracted subvector crosses vector split!"); 3497 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Lo, Idx); 3498 } else if (SubVT.isScalableVector() == 3499 N->getOperand(0).getValueType().isScalableVector()) 3500 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Hi, 3501 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl)); 3502 3503 // After this point the DAG node only permits extracting fixed-width 3504 // subvectors from scalable vectors. 3505 assert(SubVT.isFixedLengthVector() && 3506 "Extracting scalable subvector from fixed-width unsupported"); 3507 3508 // If the element type is i1 and we're not promoting the result, then we may 3509 // end up loading the wrong data since the bits are packed tightly into 3510 // bytes. For example, if we extract a v4i1 (legal) from a nxv4i1 (legal) 3511 // type at index 4, then we will load a byte starting at index 0. 3512 if (SubVT.getScalarType() == MVT::i1) 3513 report_fatal_error("Don't know how to extract fixed-width predicate " 3514 "subvector from a scalable predicate vector"); 3515 3516 // Spill the vector to the stack. We should use the alignment for 3517 // the smallest part. 3518 SDValue Vec = N->getOperand(0); 3519 EVT VecVT = Vec.getValueType(); 3520 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false); 3521 SDValue StackPtr = 3522 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign); 3523 auto &MF = DAG.getMachineFunction(); 3524 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 3525 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 3526 3527 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 3528 SmallestAlign); 3529 3530 // Extract the subvector by loading the correct part. 3531 StackPtr = TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT, SubVT, Idx); 3532 3533 return DAG.getLoad( 3534 SubVT, dl, Store, StackPtr, 3535 MachinePointerInfo::getUnknownStack(DAG.getMachineFunction())); 3536 } 3537 3538 SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) { 3539 SDValue Vec = N->getOperand(0); 3540 SDValue Idx = N->getOperand(1); 3541 EVT VecVT = Vec.getValueType(); 3542 3543 if (const ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Idx)) { 3544 uint64_t IdxVal = Index->getZExtValue(); 3545 3546 SDValue Lo, Hi; 3547 GetSplitVector(Vec, Lo, Hi); 3548 3549 uint64_t LoElts = Lo.getValueType().getVectorMinNumElements(); 3550 3551 if (IdxVal < LoElts) 3552 return SDValue(DAG.UpdateNodeOperands(N, Lo, Idx), 0); 3553 else if (!Vec.getValueType().isScalableVector()) 3554 return SDValue(DAG.UpdateNodeOperands(N, Hi, 3555 DAG.getConstant(IdxVal - LoElts, SDLoc(N), 3556 Idx.getValueType())), 0); 3557 } 3558 3559 // See if the target wants to custom expand this node. 3560 if (CustomLowerNode(N, N->getValueType(0), true)) 3561 return SDValue(); 3562 3563 // Make the vector elements byte-addressable if they aren't already. 3564 SDLoc dl(N); 3565 EVT EltVT = VecVT.getVectorElementType(); 3566 if (!EltVT.isByteSized()) { 3567 EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext()); 3568 VecVT = VecVT.changeElementType(EltVT); 3569 Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec); 3570 SDValue NewExtract = 3571 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vec, Idx); 3572 return DAG.getAnyExtOrTrunc(NewExtract, dl, N->getValueType(0)); 3573 } 3574 3575 // Store the vector to the stack. 3576 // In cases where the vector is illegal it will be broken down into parts 3577 // and stored in parts - we should use the alignment for the smallest part. 3578 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false); 3579 SDValue StackPtr = 3580 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign); 3581 auto &MF = DAG.getMachineFunction(); 3582 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 3583 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 3584 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 3585 SmallestAlign); 3586 3587 // Load back the required element. 3588 StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx); 3589 3590 // EXTRACT_VECTOR_ELT can extend the element type to the width of the return 3591 // type, leaving the high bits undefined. But it can't truncate. 3592 assert(N->getValueType(0).bitsGE(EltVT) && "Illegal EXTRACT_VECTOR_ELT."); 3593 3594 return DAG.getExtLoad( 3595 ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr, 3596 MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), EltVT, 3597 commonAlignment(SmallestAlign, EltVT.getFixedSizeInBits() / 8)); 3598 } 3599 3600 SDValue DAGTypeLegalizer::SplitVecOp_ExtVecInRegOp(SDNode *N) { 3601 SDValue Lo, Hi; 3602 3603 // *_EXTEND_VECTOR_INREG only reference the lower half of the input, so 3604 // splitting the result has the same effect as splitting the input operand. 3605 SplitVecRes_ExtVecInRegOp(N, Lo, Hi); 3606 3607 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), N->getValueType(0), Lo, Hi); 3608 } 3609 3610 SDValue DAGTypeLegalizer::SplitVecOp_Gather(MemSDNode *N, unsigned OpNo) { 3611 (void)OpNo; 3612 SDValue Lo, Hi; 3613 SplitVecRes_Gather(N, Lo, Hi); 3614 3615 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, N, N->getValueType(0), Lo, Hi); 3616 ReplaceValueWith(SDValue(N, 0), Res); 3617 return SDValue(); 3618 } 3619 3620 SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) { 3621 assert(N->isUnindexed() && "Indexed vp_store of vector?"); 3622 SDValue Ch = N->getChain(); 3623 SDValue Ptr = N->getBasePtr(); 3624 SDValue Offset = N->getOffset(); 3625 assert(Offset.isUndef() && "Unexpected VP store offset"); 3626 SDValue Mask = N->getMask(); 3627 SDValue EVL = N->getVectorLength(); 3628 SDValue Data = N->getValue(); 3629 Align Alignment = N->getOriginalAlign(); 3630 SDLoc DL(N); 3631 3632 SDValue DataLo, DataHi; 3633 if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector) 3634 // Split Data operand 3635 GetSplitVector(Data, DataLo, DataHi); 3636 else 3637 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 3638 3639 // Split Mask operand 3640 SDValue MaskLo, MaskHi; 3641 if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC) { 3642 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi); 3643 } else { 3644 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector) 3645 GetSplitVector(Mask, MaskLo, MaskHi); 3646 else 3647 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL); 3648 } 3649 3650 EVT MemoryVT = N->getMemoryVT(); 3651 EVT LoMemVT, HiMemVT; 3652 bool HiIsEmpty = false; 3653 std::tie(LoMemVT, HiMemVT) = 3654 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.getValueType(), &HiIsEmpty); 3655 3656 // Split EVL 3657 SDValue EVLLo, EVLHi; 3658 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL, Data.getValueType(), DL); 3659 3660 SDValue Lo, Hi; 3661 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 3662 N->getPointerInfo(), MachineMemOperand::MOStore, 3663 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), 3664 N->getRanges()); 3665 3666 Lo = DAG.getStoreVP(Ch, DL, DataLo, Ptr, Offset, MaskLo, EVLLo, LoMemVT, MMO, 3667 N->getAddressingMode(), N->isTruncatingStore(), 3668 N->isCompressingStore()); 3669 3670 // If the hi vp_store has zero storage size, only the lo vp_store is needed. 3671 if (HiIsEmpty) 3672 return Lo; 3673 3674 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 3675 N->isCompressingStore()); 3676 3677 MachinePointerInfo MPI; 3678 if (LoMemVT.isScalableVector()) { 3679 Alignment = commonAlignment(Alignment, 3680 LoMemVT.getSizeInBits().getKnownMinValue() / 8); 3681 MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace()); 3682 } else 3683 MPI = N->getPointerInfo().getWithOffset( 3684 LoMemVT.getStoreSize().getFixedValue()); 3685 3686 MMO = DAG.getMachineFunction().getMachineMemOperand( 3687 MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), 3688 Alignment, N->getAAInfo(), N->getRanges()); 3689 3690 Hi = DAG.getStoreVP(Ch, DL, DataHi, Ptr, Offset, MaskHi, EVLHi, HiMemVT, MMO, 3691 N->getAddressingMode(), N->isTruncatingStore(), 3692 N->isCompressingStore()); 3693 3694 // Build a factor node to remember that this store is independent of the 3695 // other one. 3696 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 3697 } 3698 3699 SDValue DAGTypeLegalizer::SplitVecOp_VP_STRIDED_STORE(VPStridedStoreSDNode *N, 3700 unsigned OpNo) { 3701 assert(N->isUnindexed() && "Indexed vp_strided_store of a vector?"); 3702 assert(N->getOffset().isUndef() && "Unexpected VP strided store offset"); 3703 3704 SDLoc DL(N); 3705 3706 SDValue Data = N->getValue(); 3707 SDValue LoData, HiData; 3708 if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector) 3709 GetSplitVector(Data, LoData, HiData); 3710 else 3711 std::tie(LoData, HiData) = DAG.SplitVector(Data, DL); 3712 3713 EVT LoMemVT, HiMemVT; 3714 bool HiIsEmpty = false; 3715 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs( 3716 N->getMemoryVT(), LoData.getValueType(), &HiIsEmpty); 3717 3718 SDValue Mask = N->getMask(); 3719 SDValue LoMask, HiMask; 3720 if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC) 3721 SplitVecRes_SETCC(Mask.getNode(), LoMask, HiMask); 3722 else if (getTypeAction(Mask.getValueType()) == 3723 TargetLowering::TypeSplitVector) 3724 GetSplitVector(Mask, LoMask, HiMask); 3725 else 3726 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL); 3727 3728 SDValue LoEVL, HiEVL; 3729 std::tie(LoEVL, HiEVL) = 3730 DAG.SplitEVL(N->getVectorLength(), Data.getValueType(), DL); 3731 3732 // Generate the low vp_strided_store 3733 SDValue Lo = DAG.getStridedStoreVP( 3734 N->getChain(), DL, LoData, N->getBasePtr(), N->getOffset(), 3735 N->getStride(), LoMask, LoEVL, LoMemVT, N->getMemOperand(), 3736 N->getAddressingMode(), N->isTruncatingStore(), N->isCompressingStore()); 3737 3738 // If the high vp_strided_store has zero storage size, only the low 3739 // vp_strided_store is needed. 3740 if (HiIsEmpty) 3741 return Lo; 3742 3743 // Generate the high vp_strided_store. 3744 // To calculate the high base address, we need to sum to the low base 3745 // address stride number of bytes for each element already stored by low, 3746 // that is: Ptr = Ptr + (LoEVL * Stride) 3747 EVT PtrVT = N->getBasePtr().getValueType(); 3748 SDValue Increment = 3749 DAG.getNode(ISD::MUL, DL, PtrVT, LoEVL, 3750 DAG.getSExtOrTrunc(N->getStride(), DL, PtrVT)); 3751 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, N->getBasePtr(), Increment); 3752 3753 Align Alignment = N->getOriginalAlign(); 3754 if (LoMemVT.isScalableVector()) 3755 Alignment = commonAlignment(Alignment, 3756 LoMemVT.getSizeInBits().getKnownMinValue() / 8); 3757 3758 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 3759 MachinePointerInfo(N->getPointerInfo().getAddrSpace()), 3760 MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), 3761 Alignment, N->getAAInfo(), N->getRanges()); 3762 3763 SDValue Hi = DAG.getStridedStoreVP( 3764 N->getChain(), DL, HiData, Ptr, N->getOffset(), N->getStride(), HiMask, 3765 HiEVL, HiMemVT, MMO, N->getAddressingMode(), N->isTruncatingStore(), 3766 N->isCompressingStore()); 3767 3768 // Build a factor node to remember that this store is independent of the 3769 // other one. 3770 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 3771 } 3772 3773 SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N, 3774 unsigned OpNo) { 3775 assert(N->isUnindexed() && "Indexed masked store of vector?"); 3776 SDValue Ch = N->getChain(); 3777 SDValue Ptr = N->getBasePtr(); 3778 SDValue Offset = N->getOffset(); 3779 assert(Offset.isUndef() && "Unexpected indexed masked store offset"); 3780 SDValue Mask = N->getMask(); 3781 SDValue Data = N->getValue(); 3782 Align Alignment = N->getOriginalAlign(); 3783 SDLoc DL(N); 3784 3785 SDValue DataLo, DataHi; 3786 if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector) 3787 // Split Data operand 3788 GetSplitVector(Data, DataLo, DataHi); 3789 else 3790 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 3791 3792 // Split Mask operand 3793 SDValue MaskLo, MaskHi; 3794 if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC) { 3795 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi); 3796 } else { 3797 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector) 3798 GetSplitVector(Mask, MaskLo, MaskHi); 3799 else 3800 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL); 3801 } 3802 3803 EVT MemoryVT = N->getMemoryVT(); 3804 EVT LoMemVT, HiMemVT; 3805 bool HiIsEmpty = false; 3806 std::tie(LoMemVT, HiMemVT) = 3807 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.getValueType(), &HiIsEmpty); 3808 3809 SDValue Lo, Hi, Res; 3810 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 3811 N->getPointerInfo(), MachineMemOperand::MOStore, 3812 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), 3813 N->getRanges()); 3814 3815 Lo = DAG.getMaskedStore(Ch, DL, DataLo, Ptr, Offset, MaskLo, LoMemVT, MMO, 3816 N->getAddressingMode(), N->isTruncatingStore(), 3817 N->isCompressingStore()); 3818 3819 if (HiIsEmpty) { 3820 // The hi masked store has zero storage size. 3821 // Only the lo masked store is needed. 3822 Res = Lo; 3823 } else { 3824 3825 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 3826 N->isCompressingStore()); 3827 3828 MachinePointerInfo MPI; 3829 if (LoMemVT.isScalableVector()) { 3830 Alignment = commonAlignment( 3831 Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8); 3832 MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace()); 3833 } else 3834 MPI = N->getPointerInfo().getWithOffset( 3835 LoMemVT.getStoreSize().getFixedValue()); 3836 3837 MMO = DAG.getMachineFunction().getMachineMemOperand( 3838 MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), 3839 Alignment, N->getAAInfo(), N->getRanges()); 3840 3841 Hi = DAG.getMaskedStore(Ch, DL, DataHi, Ptr, Offset, MaskHi, HiMemVT, MMO, 3842 N->getAddressingMode(), N->isTruncatingStore(), 3843 N->isCompressingStore()); 3844 3845 // Build a factor node to remember that this store is independent of the 3846 // other one. 3847 Res = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 3848 } 3849 3850 return Res; 3851 } 3852 3853 SDValue DAGTypeLegalizer::SplitVecOp_Scatter(MemSDNode *N, unsigned OpNo) { 3854 SDValue Ch = N->getChain(); 3855 SDValue Ptr = N->getBasePtr(); 3856 EVT MemoryVT = N->getMemoryVT(); 3857 Align Alignment = N->getOriginalAlign(); 3858 SDLoc DL(N); 3859 struct Operands { 3860 SDValue Mask; 3861 SDValue Index; 3862 SDValue Scale; 3863 SDValue Data; 3864 } Ops = [&]() -> Operands { 3865 if (auto *MSC = dyn_cast<MaskedScatterSDNode>(N)) { 3866 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(), 3867 MSC->getValue()}; 3868 } 3869 auto *VPSC = cast<VPScatterSDNode>(N); 3870 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(), 3871 VPSC->getValue()}; 3872 }(); 3873 // Split all operands 3874 3875 EVT LoMemVT, HiMemVT; 3876 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 3877 3878 SDValue DataLo, DataHi; 3879 if (getTypeAction(Ops.Data.getValueType()) == TargetLowering::TypeSplitVector) 3880 // Split Data operand 3881 GetSplitVector(Ops.Data, DataLo, DataHi); 3882 else 3883 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data, DL); 3884 3885 // Split Mask operand 3886 SDValue MaskLo, MaskHi; 3887 if (OpNo == 1 && Ops.Mask.getOpcode() == ISD::SETCC) { 3888 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi); 3889 } else { 3890 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, DL); 3891 } 3892 3893 SDValue IndexHi, IndexLo; 3894 if (getTypeAction(Ops.Index.getValueType()) == 3895 TargetLowering::TypeSplitVector) 3896 GetSplitVector(Ops.Index, IndexLo, IndexHi); 3897 else 3898 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index, DL); 3899 3900 SDValue Lo; 3901 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 3902 N->getPointerInfo(), MachineMemOperand::MOStore, 3903 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), 3904 N->getRanges()); 3905 3906 if (auto *MSC = dyn_cast<MaskedScatterSDNode>(N)) { 3907 SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Ops.Scale}; 3908 Lo = 3909 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT, DL, OpsLo, MMO, 3910 MSC->getIndexType(), MSC->isTruncatingStore()); 3911 3912 // The order of the Scatter operation after split is well defined. The "Hi" 3913 // part comes after the "Lo". So these two operations should be chained one 3914 // after another. 3915 SDValue OpsHi[] = {Lo, DataHi, MaskHi, Ptr, IndexHi, Ops.Scale}; 3916 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT, DL, OpsHi, 3917 MMO, MSC->getIndexType(), 3918 MSC->isTruncatingStore()); 3919 } 3920 auto *VPSC = cast<VPScatterSDNode>(N); 3921 SDValue EVLLo, EVLHi; 3922 std::tie(EVLLo, EVLHi) = 3923 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(), DL); 3924 3925 SDValue OpsLo[] = {Ch, DataLo, Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo}; 3926 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT, DL, OpsLo, MMO, 3927 VPSC->getIndexType()); 3928 3929 // The order of the Scatter operation after split is well defined. The "Hi" 3930 // part comes after the "Lo". So these two operations should be chained one 3931 // after another. 3932 SDValue OpsHi[] = {Lo, DataHi, Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi}; 3933 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT, DL, OpsHi, MMO, 3934 VPSC->getIndexType()); 3935 } 3936 3937 SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { 3938 assert(N->isUnindexed() && "Indexed store of vector?"); 3939 assert(OpNo == 1 && "Can only split the stored value"); 3940 SDLoc DL(N); 3941 3942 bool isTruncating = N->isTruncatingStore(); 3943 SDValue Ch = N->getChain(); 3944 SDValue Ptr = N->getBasePtr(); 3945 EVT MemoryVT = N->getMemoryVT(); 3946 Align Alignment = N->getOriginalAlign(); 3947 MachineMemOperand::Flags MMOFlags = N->getMemOperand()->getFlags(); 3948 AAMDNodes AAInfo = N->getAAInfo(); 3949 SDValue Lo, Hi; 3950 GetSplitVector(N->getOperand(1), Lo, Hi); 3951 3952 EVT LoMemVT, HiMemVT; 3953 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 3954 3955 // Scalarize if the split halves are not byte-sized. 3956 if (!LoMemVT.isByteSized() || !HiMemVT.isByteSized()) 3957 return TLI.scalarizeVectorStore(N, DAG); 3958 3959 if (isTruncating) 3960 Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), LoMemVT, 3961 Alignment, MMOFlags, AAInfo); 3962 else 3963 Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment, MMOFlags, 3964 AAInfo); 3965 3966 MachinePointerInfo MPI; 3967 IncrementPointer(N, LoMemVT, MPI, Ptr); 3968 3969 if (isTruncating) 3970 Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, MPI, 3971 HiMemVT, Alignment, MMOFlags, AAInfo); 3972 else 3973 Hi = DAG.getStore(Ch, DL, Hi, Ptr, MPI, Alignment, MMOFlags, AAInfo); 3974 3975 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 3976 } 3977 3978 SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) { 3979 SDLoc DL(N); 3980 3981 // The input operands all must have the same type, and we know the result 3982 // type is valid. Convert this to a buildvector which extracts all the 3983 // input elements. 3984 // TODO: If the input elements are power-two vectors, we could convert this to 3985 // a new CONCAT_VECTORS node with elements that are half-wide. 3986 SmallVector<SDValue, 32> Elts; 3987 EVT EltVT = N->getValueType(0).getVectorElementType(); 3988 for (const SDValue &Op : N->op_values()) { 3989 for (unsigned i = 0, e = Op.getValueType().getVectorNumElements(); 3990 i != e; ++i) { 3991 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Op, 3992 DAG.getVectorIdxConstant(i, DL))); 3993 } 3994 } 3995 3996 return DAG.getBuildVector(N->getValueType(0), DL, Elts); 3997 } 3998 3999 SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) { 4000 // The result type is legal, but the input type is illegal. If splitting 4001 // ends up with the result type of each half still being legal, just 4002 // do that. If, however, that would result in an illegal result type, 4003 // we can try to get more clever with power-two vectors. Specifically, 4004 // split the input type, but also widen the result element size, then 4005 // concatenate the halves and truncate again. For example, consider a target 4006 // where v8i8 is legal and v8i32 is not (ARM, which doesn't have 256-bit 4007 // vectors). To perform a "%res = v8i8 trunc v8i32 %in" we do: 4008 // %inlo = v4i32 extract_subvector %in, 0 4009 // %inhi = v4i32 extract_subvector %in, 4 4010 // %lo16 = v4i16 trunc v4i32 %inlo 4011 // %hi16 = v4i16 trunc v4i32 %inhi 4012 // %in16 = v8i16 concat_vectors v4i16 %lo16, v4i16 %hi16 4013 // %res = v8i8 trunc v8i16 %in16 4014 // 4015 // Without this transform, the original truncate would end up being 4016 // scalarized, which is pretty much always a last resort. 4017 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; 4018 SDValue InVec = N->getOperand(OpNo); 4019 EVT InVT = InVec->getValueType(0); 4020 EVT OutVT = N->getValueType(0); 4021 ElementCount NumElements = OutVT.getVectorElementCount(); 4022 bool IsFloat = OutVT.isFloatingPoint(); 4023 4024 unsigned InElementSize = InVT.getScalarSizeInBits(); 4025 unsigned OutElementSize = OutVT.getScalarSizeInBits(); 4026 4027 // Determine the split output VT. If its legal we can just split dirctly. 4028 EVT LoOutVT, HiOutVT; 4029 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT); 4030 assert(LoOutVT == HiOutVT && "Unequal split?"); 4031 4032 // If the input elements are only 1/2 the width of the result elements, 4033 // just use the normal splitting. Our trick only work if there's room 4034 // to split more than once. 4035 if (isTypeLegal(LoOutVT) || 4036 InElementSize <= OutElementSize * 2) 4037 return SplitVecOp_UnaryOp(N); 4038 SDLoc DL(N); 4039 4040 // Don't touch if this will be scalarized. 4041 EVT FinalVT = InVT; 4042 while (getTypeAction(FinalVT) == TargetLowering::TypeSplitVector) 4043 FinalVT = FinalVT.getHalfNumVectorElementsVT(*DAG.getContext()); 4044 4045 if (getTypeAction(FinalVT) == TargetLowering::TypeScalarizeVector) 4046 return SplitVecOp_UnaryOp(N); 4047 4048 // Get the split input vector. 4049 SDValue InLoVec, InHiVec; 4050 GetSplitVector(InVec, InLoVec, InHiVec); 4051 4052 // Truncate them to 1/2 the element size. 4053 // 4054 // This assumes the number of elements is a power of two; any vector that 4055 // isn't should be widened, not split. 4056 EVT HalfElementVT = IsFloat ? 4057 EVT::getFloatingPointVT(InElementSize/2) : 4058 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2); 4059 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, 4060 NumElements.divideCoefficientBy(2)); 4061 4062 SDValue HalfLo; 4063 SDValue HalfHi; 4064 SDValue Chain; 4065 if (N->isStrictFPOpcode()) { 4066 HalfLo = DAG.getNode(N->getOpcode(), DL, {HalfVT, MVT::Other}, 4067 {N->getOperand(0), InLoVec}); 4068 HalfHi = DAG.getNode(N->getOpcode(), DL, {HalfVT, MVT::Other}, 4069 {N->getOperand(0), InHiVec}); 4070 // Legalize the chain result - switch anything that used the old chain to 4071 // use the new one. 4072 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, HalfLo.getValue(1), 4073 HalfHi.getValue(1)); 4074 } else { 4075 HalfLo = DAG.getNode(N->getOpcode(), DL, HalfVT, InLoVec); 4076 HalfHi = DAG.getNode(N->getOpcode(), DL, HalfVT, InHiVec); 4077 } 4078 4079 // Concatenate them to get the full intermediate truncation result. 4080 EVT InterVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements); 4081 SDValue InterVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InterVT, HalfLo, 4082 HalfHi); 4083 // Now finish up by truncating all the way down to the original result 4084 // type. This should normally be something that ends up being legal directly, 4085 // but in theory if a target has very wide vectors and an annoyingly 4086 // restricted set of legal types, this split can chain to build things up. 4087 4088 if (N->isStrictFPOpcode()) { 4089 SDValue Res = DAG.getNode( 4090 ISD::STRICT_FP_ROUND, DL, {OutVT, MVT::Other}, 4091 {Chain, InterVec, 4092 DAG.getTargetConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()))}); 4093 // Relink the chain 4094 ReplaceValueWith(SDValue(N, 1), SDValue(Res.getNode(), 1)); 4095 return Res; 4096 } 4097 4098 return IsFloat 4099 ? DAG.getNode(ISD::FP_ROUND, DL, OutVT, InterVec, 4100 DAG.getTargetConstant( 4101 0, DL, TLI.getPointerTy(DAG.getDataLayout()))) 4102 : DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec); 4103 } 4104 4105 SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) { 4106 bool isStrict = N->getOpcode() == ISD::STRICT_FSETCC; 4107 assert(N->getValueType(0).isVector() && 4108 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() && 4109 "Operand types must be vectors"); 4110 // The result has a legal vector type, but the input needs splitting. 4111 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes; 4112 SDLoc DL(N); 4113 GetSplitVector(N->getOperand(isStrict ? 1 : 0), Lo0, Hi0); 4114 GetSplitVector(N->getOperand(isStrict ? 2 : 1), Lo1, Hi1); 4115 4116 auto PartEltCnt = Lo0.getValueType().getVectorElementCount(); 4117 4118 LLVMContext &Context = *DAG.getContext(); 4119 EVT PartResVT = EVT::getVectorVT(Context, MVT::i1, PartEltCnt); 4120 EVT WideResVT = EVT::getVectorVT(Context, MVT::i1, PartEltCnt*2); 4121 4122 if (N->getOpcode() == ISD::SETCC) { 4123 LoRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Lo0, Lo1, N->getOperand(2)); 4124 HiRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Hi0, Hi1, N->getOperand(2)); 4125 } else if (N->getOpcode() == ISD::STRICT_FSETCC) { 4126 LoRes = DAG.getNode(ISD::STRICT_FSETCC, DL, 4127 DAG.getVTList(PartResVT, N->getValueType(1)), 4128 N->getOperand(0), Lo0, Lo1, N->getOperand(3)); 4129 HiRes = DAG.getNode(ISD::STRICT_FSETCC, DL, 4130 DAG.getVTList(PartResVT, N->getValueType(1)), 4131 N->getOperand(0), Hi0, Hi1, N->getOperand(3)); 4132 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 4133 LoRes.getValue(1), HiRes.getValue(1)); 4134 ReplaceValueWith(SDValue(N, 1), NewChain); 4135 } else { 4136 assert(N->getOpcode() == ISD::VP_SETCC && "Expected VP_SETCC opcode"); 4137 SDValue MaskLo, MaskHi, EVLLo, EVLHi; 4138 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(3)); 4139 std::tie(EVLLo, EVLHi) = 4140 DAG.SplitEVL(N->getOperand(4), N->getValueType(0), DL); 4141 LoRes = DAG.getNode(ISD::VP_SETCC, DL, PartResVT, Lo0, Lo1, 4142 N->getOperand(2), MaskLo, EVLLo); 4143 HiRes = DAG.getNode(ISD::VP_SETCC, DL, PartResVT, Hi0, Hi1, 4144 N->getOperand(2), MaskHi, EVLHi); 4145 } 4146 SDValue Con = DAG.getNode(ISD::CONCAT_VECTORS, DL, WideResVT, LoRes, HiRes); 4147 4148 EVT OpVT = N->getOperand(0).getValueType(); 4149 ISD::NodeType ExtendCode = 4150 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT)); 4151 return DAG.getNode(ExtendCode, DL, N->getValueType(0), Con); 4152 } 4153 4154 4155 SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) { 4156 // The result has a legal vector type, but the input needs splitting. 4157 EVT ResVT = N->getValueType(0); 4158 SDValue Lo, Hi; 4159 SDLoc DL(N); 4160 GetSplitVector(N->getOperand(N->isStrictFPOpcode() ? 1 : 0), Lo, Hi); 4161 EVT InVT = Lo.getValueType(); 4162 4163 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(), 4164 InVT.getVectorElementCount()); 4165 4166 if (N->isStrictFPOpcode()) { 4167 Lo = DAG.getNode(N->getOpcode(), DL, { OutVT, MVT::Other }, 4168 { N->getOperand(0), Lo, N->getOperand(2) }); 4169 Hi = DAG.getNode(N->getOpcode(), DL, { OutVT, MVT::Other }, 4170 { N->getOperand(0), Hi, N->getOperand(2) }); 4171 // Legalize the chain result - switch anything that used the old chain to 4172 // use the new one. 4173 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 4174 Lo.getValue(1), Hi.getValue(1)); 4175 ReplaceValueWith(SDValue(N, 1), NewChain); 4176 } else if (N->getOpcode() == ISD::VP_FP_ROUND) { 4177 SDValue MaskLo, MaskHi, EVLLo, EVLHi; 4178 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1)); 4179 std::tie(EVLLo, EVLHi) = 4180 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), DL); 4181 Lo = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Lo, MaskLo, EVLLo); 4182 Hi = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Hi, MaskHi, EVLHi); 4183 } else { 4184 Lo = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Lo, N->getOperand(1)); 4185 Hi = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Hi, N->getOperand(1)); 4186 } 4187 4188 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi); 4189 } 4190 4191 // Split a vector type in an FP binary operation where the second operand has a 4192 // different type from the first. 4193 // 4194 // The result (and the first input) has a legal vector type, but the second 4195 // input needs splitting. 4196 SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(SDNode *N) { 4197 SDLoc DL(N); 4198 4199 EVT LHSLoVT, LHSHiVT; 4200 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 4201 4202 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT)) 4203 return DAG.UnrollVectorOp(N, N->getValueType(0).getVectorNumElements()); 4204 4205 SDValue LHSLo, LHSHi; 4206 std::tie(LHSLo, LHSHi) = 4207 DAG.SplitVector(N->getOperand(0), DL, LHSLoVT, LHSHiVT); 4208 4209 SDValue RHSLo, RHSHi; 4210 std::tie(RHSLo, RHSHi) = DAG.SplitVector(N->getOperand(1), DL); 4211 4212 SDValue Lo = DAG.getNode(N->getOpcode(), DL, LHSLoVT, LHSLo, RHSLo); 4213 SDValue Hi = DAG.getNode(N->getOpcode(), DL, LHSHiVT, LHSHi, RHSHi); 4214 4215 return DAG.getNode(ISD::CONCAT_VECTORS, DL, N->getValueType(0), Lo, Hi); 4216 } 4217 4218 SDValue DAGTypeLegalizer::SplitVecOp_CMP(SDNode *N) { 4219 LLVMContext &Ctxt = *DAG.getContext(); 4220 SDLoc dl(N); 4221 4222 SDValue LHSLo, LHSHi, RHSLo, RHSHi; 4223 GetSplitVector(N->getOperand(0), LHSLo, LHSHi); 4224 GetSplitVector(N->getOperand(1), RHSLo, RHSHi); 4225 4226 EVT ResVT = N->getValueType(0); 4227 ElementCount SplitOpEC = LHSLo.getValueType().getVectorElementCount(); 4228 EVT NewResVT = 4229 EVT::getVectorVT(Ctxt, ResVT.getVectorElementType(), SplitOpEC); 4230 4231 SDValue Lo = DAG.getNode(N->getOpcode(), dl, NewResVT, LHSLo, RHSLo); 4232 SDValue Hi = DAG.getNode(N->getOpcode(), dl, NewResVT, LHSHi, RHSHi); 4233 4234 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi); 4235 } 4236 4237 SDValue DAGTypeLegalizer::SplitVecOp_FP_TO_XINT_SAT(SDNode *N) { 4238 EVT ResVT = N->getValueType(0); 4239 SDValue Lo, Hi; 4240 SDLoc dl(N); 4241 GetSplitVector(N->getOperand(0), Lo, Hi); 4242 EVT InVT = Lo.getValueType(); 4243 4244 EVT NewResVT = 4245 EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(), 4246 InVT.getVectorElementCount()); 4247 4248 Lo = DAG.getNode(N->getOpcode(), dl, NewResVT, Lo, N->getOperand(1)); 4249 Hi = DAG.getNode(N->getOpcode(), dl, NewResVT, Hi, N->getOperand(1)); 4250 4251 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi); 4252 } 4253 4254 SDValue DAGTypeLegalizer::SplitVecOp_VP_CttzElements(SDNode *N) { 4255 SDLoc DL(N); 4256 EVT ResVT = N->getValueType(0); 4257 4258 SDValue Lo, Hi; 4259 SDValue VecOp = N->getOperand(0); 4260 GetSplitVector(VecOp, Lo, Hi); 4261 4262 auto [MaskLo, MaskHi] = SplitMask(N->getOperand(1)); 4263 auto [EVLLo, EVLHi] = 4264 DAG.SplitEVL(N->getOperand(2), VecOp.getValueType(), DL); 4265 SDValue VLo = DAG.getZExtOrTrunc(EVLLo, DL, ResVT); 4266 4267 // if VP_CTTZ_ELTS(Lo) != EVLLo => VP_CTTZ_ELTS(Lo). 4268 // else => EVLLo + (VP_CTTZ_ELTS(Hi) or VP_CTTZ_ELTS_ZERO_UNDEF(Hi)). 4269 SDValue ResLo = DAG.getNode(ISD::VP_CTTZ_ELTS, DL, ResVT, Lo, MaskLo, EVLLo); 4270 SDValue ResLoNotEVL = 4271 DAG.getSetCC(DL, getSetCCResultType(ResVT), ResLo, VLo, ISD::SETNE); 4272 SDValue ResHi = DAG.getNode(N->getOpcode(), DL, ResVT, Hi, MaskHi, EVLHi); 4273 return DAG.getSelect(DL, ResVT, ResLoNotEVL, ResLo, 4274 DAG.getNode(ISD::ADD, DL, ResVT, VLo, ResHi)); 4275 } 4276 4277 //===----------------------------------------------------------------------===// 4278 // Result Vector Widening 4279 //===----------------------------------------------------------------------===// 4280 4281 void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { 4282 LLVM_DEBUG(dbgs() << "Widen node result " << ResNo << ": "; N->dump(&DAG)); 4283 4284 // See if the target wants to custom widen this node. 4285 if (CustomWidenLowerNode(N, N->getValueType(ResNo))) 4286 return; 4287 4288 SDValue Res = SDValue(); 4289 4290 auto unrollExpandedOp = [&]() { 4291 // We're going to widen this vector op to a legal type by padding with undef 4292 // elements. If the wide vector op is eventually going to be expanded to 4293 // scalar libcalls, then unroll into scalar ops now to avoid unnecessary 4294 // libcalls on the undef elements. 4295 EVT VT = N->getValueType(0); 4296 EVT WideVecVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 4297 if (!TLI.isOperationLegalOrCustom(N->getOpcode(), WideVecVT) && 4298 TLI.isOperationExpand(N->getOpcode(), VT.getScalarType())) { 4299 Res = DAG.UnrollVectorOp(N, WideVecVT.getVectorNumElements()); 4300 return true; 4301 } 4302 return false; 4303 }; 4304 4305 switch (N->getOpcode()) { 4306 default: 4307 #ifndef NDEBUG 4308 dbgs() << "WidenVectorResult #" << ResNo << ": "; 4309 N->dump(&DAG); 4310 dbgs() << "\n"; 4311 #endif 4312 report_fatal_error("Do not know how to widen the result of this operator!"); 4313 4314 case ISD::MERGE_VALUES: Res = WidenVecRes_MERGE_VALUES(N, ResNo); break; 4315 case ISD::ADDRSPACECAST: 4316 Res = WidenVecRes_ADDRSPACECAST(N); 4317 break; 4318 case ISD::AssertZext: Res = WidenVecRes_AssertZext(N); break; 4319 case ISD::BITCAST: Res = WidenVecRes_BITCAST(N); break; 4320 case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break; 4321 case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break; 4322 case ISD::INSERT_SUBVECTOR: 4323 Res = WidenVecRes_INSERT_SUBVECTOR(N); 4324 break; 4325 case ISD::EXTRACT_SUBVECTOR: Res = WidenVecRes_EXTRACT_SUBVECTOR(N); break; 4326 case ISD::INSERT_VECTOR_ELT: Res = WidenVecRes_INSERT_VECTOR_ELT(N); break; 4327 case ISD::LOAD: Res = WidenVecRes_LOAD(N); break; 4328 case ISD::STEP_VECTOR: 4329 case ISD::SPLAT_VECTOR: 4330 case ISD::SCALAR_TO_VECTOR: 4331 case ISD::EXPERIMENTAL_VP_SPLAT: 4332 Res = WidenVecRes_ScalarOp(N); 4333 break; 4334 case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_InregOp(N); break; 4335 case ISD::VSELECT: 4336 case ISD::SELECT: 4337 case ISD::VP_SELECT: 4338 case ISD::VP_MERGE: 4339 Res = WidenVecRes_Select(N); 4340 break; 4341 case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break; 4342 case ISD::VP_SETCC: 4343 case ISD::SETCC: Res = WidenVecRes_SETCC(N); break; 4344 case ISD::UNDEF: Res = WidenVecRes_UNDEF(N); break; 4345 case ISD::VECTOR_SHUFFLE: 4346 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N)); 4347 break; 4348 case ISD::VP_LOAD: 4349 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(N)); 4350 break; 4351 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: 4352 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(N)); 4353 break; 4354 case ISD::VECTOR_COMPRESS: 4355 Res = WidenVecRes_VECTOR_COMPRESS(N); 4356 break; 4357 case ISD::MLOAD: 4358 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(N)); 4359 break; 4360 case ISD::MGATHER: 4361 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(N)); 4362 break; 4363 case ISD::VP_GATHER: 4364 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(N)); 4365 break; 4366 case ISD::VECTOR_REVERSE: 4367 Res = WidenVecRes_VECTOR_REVERSE(N); 4368 break; 4369 4370 case ISD::ADD: case ISD::VP_ADD: 4371 case ISD::AND: case ISD::VP_AND: 4372 case ISD::MUL: case ISD::VP_MUL: 4373 case ISD::MULHS: 4374 case ISD::MULHU: 4375 case ISD::OR: case ISD::VP_OR: 4376 case ISD::SUB: case ISD::VP_SUB: 4377 case ISD::XOR: case ISD::VP_XOR: 4378 case ISD::SHL: case ISD::VP_SHL: 4379 case ISD::SRA: case ISD::VP_SRA: 4380 case ISD::SRL: case ISD::VP_SRL: 4381 case ISD::FMINNUM: 4382 case ISD::FMINNUM_IEEE: 4383 case ISD::VP_FMINNUM: 4384 case ISD::FMAXNUM: 4385 case ISD::FMAXNUM_IEEE: 4386 case ISD::VP_FMAXNUM: 4387 case ISD::FMINIMUM: 4388 case ISD::VP_FMINIMUM: 4389 case ISD::FMAXIMUM: 4390 case ISD::VP_FMAXIMUM: 4391 case ISD::SMIN: case ISD::VP_SMIN: 4392 case ISD::SMAX: case ISD::VP_SMAX: 4393 case ISD::UMIN: case ISD::VP_UMIN: 4394 case ISD::UMAX: case ISD::VP_UMAX: 4395 case ISD::UADDSAT: case ISD::VP_UADDSAT: 4396 case ISD::SADDSAT: case ISD::VP_SADDSAT: 4397 case ISD::USUBSAT: case ISD::VP_USUBSAT: 4398 case ISD::SSUBSAT: case ISD::VP_SSUBSAT: 4399 case ISD::SSHLSAT: 4400 case ISD::USHLSAT: 4401 case ISD::ROTL: 4402 case ISD::ROTR: 4403 case ISD::AVGFLOORS: 4404 case ISD::AVGFLOORU: 4405 case ISD::AVGCEILS: 4406 case ISD::AVGCEILU: 4407 // Vector-predicated binary op widening. Note that -- unlike the 4408 // unpredicated versions -- we don't have to worry about trapping on 4409 // operations like UDIV, FADD, etc., as we pass on the original vector 4410 // length parameter. This means the widened elements containing garbage 4411 // aren't active. 4412 case ISD::VP_SDIV: 4413 case ISD::VP_UDIV: 4414 case ISD::VP_SREM: 4415 case ISD::VP_UREM: 4416 case ISD::VP_FADD: 4417 case ISD::VP_FSUB: 4418 case ISD::VP_FMUL: 4419 case ISD::VP_FDIV: 4420 case ISD::VP_FREM: 4421 case ISD::VP_FCOPYSIGN: 4422 Res = WidenVecRes_Binary(N); 4423 break; 4424 4425 case ISD::SCMP: 4426 case ISD::UCMP: 4427 Res = WidenVecRes_CMP(N); 4428 break; 4429 4430 case ISD::FPOW: 4431 case ISD::FREM: 4432 if (unrollExpandedOp()) 4433 break; 4434 // If the target has custom/legal support for the scalar FP intrinsic ops 4435 // (they are probably not destined to become libcalls), then widen those 4436 // like any other binary ops. 4437 [[fallthrough]]; 4438 4439 case ISD::FADD: 4440 case ISD::FMUL: 4441 case ISD::FSUB: 4442 case ISD::FDIV: 4443 case ISD::SDIV: 4444 case ISD::UDIV: 4445 case ISD::SREM: 4446 case ISD::UREM: 4447 Res = WidenVecRes_BinaryCanTrap(N); 4448 break; 4449 4450 case ISD::SMULFIX: 4451 case ISD::SMULFIXSAT: 4452 case ISD::UMULFIX: 4453 case ISD::UMULFIXSAT: 4454 // These are binary operations, but with an extra operand that shouldn't 4455 // be widened (the scale). 4456 Res = WidenVecRes_BinaryWithExtraScalarOp(N); 4457 break; 4458 4459 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 4460 case ISD::STRICT_##DAGN: 4461 #include "llvm/IR/ConstrainedOps.def" 4462 Res = WidenVecRes_StrictFP(N); 4463 break; 4464 4465 case ISD::UADDO: 4466 case ISD::SADDO: 4467 case ISD::USUBO: 4468 case ISD::SSUBO: 4469 case ISD::UMULO: 4470 case ISD::SMULO: 4471 Res = WidenVecRes_OverflowOp(N, ResNo); 4472 break; 4473 4474 case ISD::FCOPYSIGN: 4475 Res = WidenVecRes_FCOPYSIGN(N); 4476 break; 4477 4478 case ISD::IS_FPCLASS: 4479 case ISD::FPTRUNC_ROUND: 4480 Res = WidenVecRes_UnarySameEltsWithScalarArg(N); 4481 break; 4482 4483 case ISD::FLDEXP: 4484 case ISD::FPOWI: 4485 if (!unrollExpandedOp()) 4486 Res = WidenVecRes_ExpOp(N); 4487 break; 4488 4489 case ISD::ANY_EXTEND_VECTOR_INREG: 4490 case ISD::SIGN_EXTEND_VECTOR_INREG: 4491 case ISD::ZERO_EXTEND_VECTOR_INREG: 4492 Res = WidenVecRes_EXTEND_VECTOR_INREG(N); 4493 break; 4494 4495 case ISD::ANY_EXTEND: 4496 case ISD::FP_EXTEND: 4497 case ISD::VP_FP_EXTEND: 4498 case ISD::FP_ROUND: 4499 case ISD::VP_FP_ROUND: 4500 case ISD::FP_TO_SINT: 4501 case ISD::VP_FP_TO_SINT: 4502 case ISD::FP_TO_UINT: 4503 case ISD::VP_FP_TO_UINT: 4504 case ISD::SIGN_EXTEND: 4505 case ISD::VP_SIGN_EXTEND: 4506 case ISD::SINT_TO_FP: 4507 case ISD::VP_SINT_TO_FP: 4508 case ISD::VP_TRUNCATE: 4509 case ISD::TRUNCATE: 4510 case ISD::UINT_TO_FP: 4511 case ISD::VP_UINT_TO_FP: 4512 case ISD::ZERO_EXTEND: 4513 case ISD::VP_ZERO_EXTEND: 4514 Res = WidenVecRes_Convert(N); 4515 break; 4516 4517 case ISD::FP_TO_SINT_SAT: 4518 case ISD::FP_TO_UINT_SAT: 4519 Res = WidenVecRes_FP_TO_XINT_SAT(N); 4520 break; 4521 4522 case ISD::LRINT: 4523 case ISD::LLRINT: 4524 case ISD::VP_LRINT: 4525 case ISD::VP_LLRINT: 4526 Res = WidenVecRes_XRINT(N); 4527 break; 4528 4529 case ISD::FABS: 4530 case ISD::FACOS: 4531 case ISD::FASIN: 4532 case ISD::FATAN: 4533 case ISD::FCEIL: 4534 case ISD::FCOS: 4535 case ISD::FCOSH: 4536 case ISD::FEXP: 4537 case ISD::FEXP2: 4538 case ISD::FEXP10: 4539 case ISD::FFLOOR: 4540 case ISD::FLOG: 4541 case ISD::FLOG10: 4542 case ISD::FLOG2: 4543 case ISD::FNEARBYINT: 4544 case ISD::FRINT: 4545 case ISD::FROUND: 4546 case ISD::FROUNDEVEN: 4547 case ISD::FSIN: 4548 case ISD::FSINH: 4549 case ISD::FSQRT: 4550 case ISD::FTAN: 4551 case ISD::FTANH: 4552 case ISD::FTRUNC: 4553 if (unrollExpandedOp()) 4554 break; 4555 // If the target has custom/legal support for the scalar FP intrinsic ops 4556 // (they are probably not destined to become libcalls), then widen those 4557 // like any other unary ops. 4558 [[fallthrough]]; 4559 4560 case ISD::ABS: 4561 case ISD::VP_ABS: 4562 case ISD::BITREVERSE: 4563 case ISD::VP_BITREVERSE: 4564 case ISD::BSWAP: 4565 case ISD::VP_BSWAP: 4566 case ISD::CTLZ: 4567 case ISD::VP_CTLZ: 4568 case ISD::CTLZ_ZERO_UNDEF: 4569 case ISD::VP_CTLZ_ZERO_UNDEF: 4570 case ISD::CTPOP: 4571 case ISD::VP_CTPOP: 4572 case ISD::CTTZ: 4573 case ISD::VP_CTTZ: 4574 case ISD::CTTZ_ZERO_UNDEF: 4575 case ISD::VP_CTTZ_ZERO_UNDEF: 4576 case ISD::FNEG: case ISD::VP_FNEG: 4577 case ISD::VP_FABS: 4578 case ISD::VP_SQRT: 4579 case ISD::VP_FCEIL: 4580 case ISD::VP_FFLOOR: 4581 case ISD::VP_FRINT: 4582 case ISD::VP_FNEARBYINT: 4583 case ISD::VP_FROUND: 4584 case ISD::VP_FROUNDEVEN: 4585 case ISD::VP_FROUNDTOZERO: 4586 case ISD::FREEZE: 4587 case ISD::ARITH_FENCE: 4588 case ISD::FCANONICALIZE: 4589 Res = WidenVecRes_Unary(N); 4590 break; 4591 case ISD::FMA: case ISD::VP_FMA: 4592 case ISD::FSHL: 4593 case ISD::VP_FSHL: 4594 case ISD::FSHR: 4595 case ISD::VP_FSHR: 4596 Res = WidenVecRes_Ternary(N); 4597 break; 4598 } 4599 4600 // If Res is null, the sub-method took care of registering the result. 4601 if (Res.getNode()) 4602 SetWidenedVector(SDValue(N, ResNo), Res); 4603 } 4604 4605 SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) { 4606 // Ternary op widening. 4607 SDLoc dl(N); 4608 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 4609 SDValue InOp1 = GetWidenedVector(N->getOperand(0)); 4610 SDValue InOp2 = GetWidenedVector(N->getOperand(1)); 4611 SDValue InOp3 = GetWidenedVector(N->getOperand(2)); 4612 if (N->getNumOperands() == 3) 4613 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3); 4614 4615 assert(N->getNumOperands() == 5 && "Unexpected number of operands!"); 4616 assert(N->isVPOpcode() && "Expected VP opcode"); 4617 4618 SDValue Mask = 4619 GetWidenedMask(N->getOperand(3), WidenVT.getVectorElementCount()); 4620 return DAG.getNode(N->getOpcode(), dl, WidenVT, 4621 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)}); 4622 } 4623 4624 SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) { 4625 // Binary op widening. 4626 SDLoc dl(N); 4627 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 4628 SDValue InOp1 = GetWidenedVector(N->getOperand(0)); 4629 SDValue InOp2 = GetWidenedVector(N->getOperand(1)); 4630 if (N->getNumOperands() == 2) 4631 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, 4632 N->getFlags()); 4633 4634 assert(N->getNumOperands() == 4 && "Unexpected number of operands!"); 4635 assert(N->isVPOpcode() && "Expected VP opcode"); 4636 4637 SDValue Mask = 4638 GetWidenedMask(N->getOperand(2), WidenVT.getVectorElementCount()); 4639 return DAG.getNode(N->getOpcode(), dl, WidenVT, 4640 {InOp1, InOp2, Mask, N->getOperand(3)}, N->getFlags()); 4641 } 4642 4643 SDValue DAGTypeLegalizer::WidenVecRes_CMP(SDNode *N) { 4644 LLVMContext &Ctxt = *DAG.getContext(); 4645 SDLoc dl(N); 4646 4647 SDValue LHS = N->getOperand(0); 4648 SDValue RHS = N->getOperand(1); 4649 EVT OpVT = LHS.getValueType(); 4650 if (getTypeAction(OpVT) == TargetLowering::TypeWidenVector) { 4651 LHS = GetWidenedVector(LHS); 4652 RHS = GetWidenedVector(RHS); 4653 OpVT = LHS.getValueType(); 4654 } 4655 4656 EVT WidenResVT = TLI.getTypeToTransformTo(Ctxt, N->getValueType(0)); 4657 ElementCount WidenResEC = WidenResVT.getVectorElementCount(); 4658 if (WidenResEC == OpVT.getVectorElementCount()) { 4659 return DAG.getNode(N->getOpcode(), dl, WidenResVT, LHS, RHS); 4660 } 4661 4662 return DAG.UnrollVectorOp(N, WidenResVT.getVectorNumElements()); 4663 } 4664 4665 SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(SDNode *N) { 4666 // Binary op widening, but with an extra operand that shouldn't be widened. 4667 SDLoc dl(N); 4668 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 4669 SDValue InOp1 = GetWidenedVector(N->getOperand(0)); 4670 SDValue InOp2 = GetWidenedVector(N->getOperand(1)); 4671 SDValue InOp3 = N->getOperand(2); 4672 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3, 4673 N->getFlags()); 4674 } 4675 4676 // Given a vector of operations that have been broken up to widen, see 4677 // if we can collect them together into the next widest legal VT. This 4678 // implementation is trap-safe. 4679 static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, 4680 SmallVectorImpl<SDValue> &ConcatOps, 4681 unsigned ConcatEnd, EVT VT, EVT MaxVT, 4682 EVT WidenVT) { 4683 // Check to see if we have a single operation with the widen type. 4684 if (ConcatEnd == 1) { 4685 VT = ConcatOps[0].getValueType(); 4686 if (VT == WidenVT) 4687 return ConcatOps[0]; 4688 } 4689 4690 SDLoc dl(ConcatOps[0]); 4691 EVT WidenEltVT = WidenVT.getVectorElementType(); 4692 4693 // while (Some element of ConcatOps is not of type MaxVT) { 4694 // From the end of ConcatOps, collect elements of the same type and put 4695 // them into an op of the next larger supported type 4696 // } 4697 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) { 4698 int Idx = ConcatEnd - 1; 4699 VT = ConcatOps[Idx--].getValueType(); 4700 while (Idx >= 0 && ConcatOps[Idx].getValueType() == VT) 4701 Idx--; 4702 4703 int NextSize = VT.isVector() ? VT.getVectorNumElements() : 1; 4704 EVT NextVT; 4705 do { 4706 NextSize *= 2; 4707 NextVT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NextSize); 4708 } while (!TLI.isTypeLegal(NextVT)); 4709 4710 if (!VT.isVector()) { 4711 // Scalar type, create an INSERT_VECTOR_ELEMENT of type NextVT 4712 SDValue VecOp = DAG.getUNDEF(NextVT); 4713 unsigned NumToInsert = ConcatEnd - Idx - 1; 4714 for (unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) { 4715 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp, 4716 ConcatOps[OpIdx], DAG.getVectorIdxConstant(i, dl)); 4717 } 4718 ConcatOps[Idx+1] = VecOp; 4719 ConcatEnd = Idx + 2; 4720 } else { 4721 // Vector type, create a CONCAT_VECTORS of type NextVT 4722 SDValue undefVec = DAG.getUNDEF(VT); 4723 unsigned OpsToConcat = NextSize/VT.getVectorNumElements(); 4724 SmallVector<SDValue, 16> SubConcatOps(OpsToConcat); 4725 unsigned RealVals = ConcatEnd - Idx - 1; 4726 unsigned SubConcatEnd = 0; 4727 unsigned SubConcatIdx = Idx + 1; 4728 while (SubConcatEnd < RealVals) 4729 SubConcatOps[SubConcatEnd++] = ConcatOps[++Idx]; 4730 while (SubConcatEnd < OpsToConcat) 4731 SubConcatOps[SubConcatEnd++] = undefVec; 4732 ConcatOps[SubConcatIdx] = DAG.getNode(ISD::CONCAT_VECTORS, dl, 4733 NextVT, SubConcatOps); 4734 ConcatEnd = SubConcatIdx + 1; 4735 } 4736 } 4737 4738 // Check to see if we have a single operation with the widen type. 4739 if (ConcatEnd == 1) { 4740 VT = ConcatOps[0].getValueType(); 4741 if (VT == WidenVT) 4742 return ConcatOps[0]; 4743 } 4744 4745 // add undefs of size MaxVT until ConcatOps grows to length of WidenVT 4746 unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements(); 4747 if (NumOps != ConcatEnd ) { 4748 SDValue UndefVal = DAG.getUNDEF(MaxVT); 4749 for (unsigned j = ConcatEnd; j < NumOps; ++j) 4750 ConcatOps[j] = UndefVal; 4751 } 4752 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, 4753 ArrayRef(ConcatOps.data(), NumOps)); 4754 } 4755 4756 SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) { 4757 // Binary op widening for operations that can trap. 4758 unsigned Opcode = N->getOpcode(); 4759 SDLoc dl(N); 4760 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 4761 EVT WidenEltVT = WidenVT.getVectorElementType(); 4762 EVT VT = WidenVT; 4763 unsigned NumElts = VT.getVectorMinNumElements(); 4764 const SDNodeFlags Flags = N->getFlags(); 4765 while (!TLI.isTypeLegal(VT) && NumElts != 1) { 4766 NumElts = NumElts / 2; 4767 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts); 4768 } 4769 4770 if (NumElts != 1 && !TLI.canOpTrap(N->getOpcode(), VT)) { 4771 // Operation doesn't trap so just widen as normal. 4772 SDValue InOp1 = GetWidenedVector(N->getOperand(0)); 4773 SDValue InOp2 = GetWidenedVector(N->getOperand(1)); 4774 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags); 4775 } 4776 4777 // FIXME: Improve support for scalable vectors. 4778 assert(!VT.isScalableVector() && "Scalable vectors not handled yet."); 4779 4780 // No legal vector version so unroll the vector operation and then widen. 4781 if (NumElts == 1) 4782 return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements()); 4783 4784 // Since the operation can trap, apply operation on the original vector. 4785 EVT MaxVT = VT; 4786 SDValue InOp1 = GetWidenedVector(N->getOperand(0)); 4787 SDValue InOp2 = GetWidenedVector(N->getOperand(1)); 4788 unsigned CurNumElts = N->getValueType(0).getVectorNumElements(); 4789 4790 SmallVector<SDValue, 16> ConcatOps(CurNumElts); 4791 unsigned ConcatEnd = 0; // Current ConcatOps index. 4792 int Idx = 0; // Current Idx into input vectors. 4793 4794 // NumElts := greatest legal vector size (at most WidenVT) 4795 // while (orig. vector has unhandled elements) { 4796 // take munches of size NumElts from the beginning and add to ConcatOps 4797 // NumElts := next smaller supported vector size or 1 4798 // } 4799 while (CurNumElts != 0) { 4800 while (CurNumElts >= NumElts) { 4801 SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1, 4802 DAG.getVectorIdxConstant(Idx, dl)); 4803 SDValue EOp2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp2, 4804 DAG.getVectorIdxConstant(Idx, dl)); 4805 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags); 4806 Idx += NumElts; 4807 CurNumElts -= NumElts; 4808 } 4809 do { 4810 NumElts = NumElts / 2; 4811 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts); 4812 } while (!TLI.isTypeLegal(VT) && NumElts != 1); 4813 4814 if (NumElts == 1) { 4815 for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) { 4816 SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, 4817 InOp1, DAG.getVectorIdxConstant(Idx, dl)); 4818 SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, 4819 InOp2, DAG.getVectorIdxConstant(Idx, dl)); 4820 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT, 4821 EOp1, EOp2, Flags); 4822 } 4823 CurNumElts = 0; 4824 } 4825 } 4826 4827 return CollectOpsToWiden(DAG, TLI, ConcatOps, ConcatEnd, VT, MaxVT, WidenVT); 4828 } 4829 4830 SDValue DAGTypeLegalizer::WidenVecRes_StrictFP(SDNode *N) { 4831 switch (N->getOpcode()) { 4832 case ISD::STRICT_FSETCC: 4833 case ISD::STRICT_FSETCCS: 4834 return WidenVecRes_STRICT_FSETCC(N); 4835 case ISD::STRICT_FP_EXTEND: 4836 case ISD::STRICT_FP_ROUND: 4837 case ISD::STRICT_FP_TO_SINT: 4838 case ISD::STRICT_FP_TO_UINT: 4839 case ISD::STRICT_SINT_TO_FP: 4840 case ISD::STRICT_UINT_TO_FP: 4841 return WidenVecRes_Convert_StrictFP(N); 4842 default: 4843 break; 4844 } 4845 4846 // StrictFP op widening for operations that can trap. 4847 unsigned NumOpers = N->getNumOperands(); 4848 unsigned Opcode = N->getOpcode(); 4849 SDLoc dl(N); 4850 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 4851 EVT WidenEltVT = WidenVT.getVectorElementType(); 4852 EVT VT = WidenVT; 4853 unsigned NumElts = VT.getVectorNumElements(); 4854 while (!TLI.isTypeLegal(VT) && NumElts != 1) { 4855 NumElts = NumElts / 2; 4856 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts); 4857 } 4858 4859 // No legal vector version so unroll the vector operation and then widen. 4860 if (NumElts == 1) 4861 return UnrollVectorOp_StrictFP(N, WidenVT.getVectorNumElements()); 4862 4863 // Since the operation can trap, apply operation on the original vector. 4864 EVT MaxVT = VT; 4865 SmallVector<SDValue, 4> InOps; 4866 unsigned CurNumElts = N->getValueType(0).getVectorNumElements(); 4867 4868 SmallVector<SDValue, 16> ConcatOps(CurNumElts); 4869 SmallVector<SDValue, 16> Chains; 4870 unsigned ConcatEnd = 0; // Current ConcatOps index. 4871 int Idx = 0; // Current Idx into input vectors. 4872 4873 // The Chain is the first operand. 4874 InOps.push_back(N->getOperand(0)); 4875 4876 // Now process the remaining operands. 4877 for (unsigned i = 1; i < NumOpers; ++i) { 4878 SDValue Oper = N->getOperand(i); 4879 4880 EVT OpVT = Oper.getValueType(); 4881 if (OpVT.isVector()) { 4882 if (getTypeAction(OpVT) == TargetLowering::TypeWidenVector) 4883 Oper = GetWidenedVector(Oper); 4884 else { 4885 EVT WideOpVT = 4886 EVT::getVectorVT(*DAG.getContext(), OpVT.getVectorElementType(), 4887 WidenVT.getVectorElementCount()); 4888 Oper = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, 4889 DAG.getUNDEF(WideOpVT), Oper, 4890 DAG.getVectorIdxConstant(0, dl)); 4891 } 4892 } 4893 4894 InOps.push_back(Oper); 4895 } 4896 4897 // NumElts := greatest legal vector size (at most WidenVT) 4898 // while (orig. vector has unhandled elements) { 4899 // take munches of size NumElts from the beginning and add to ConcatOps 4900 // NumElts := next smaller supported vector size or 1 4901 // } 4902 while (CurNumElts != 0) { 4903 while (CurNumElts >= NumElts) { 4904 SmallVector<SDValue, 4> EOps; 4905 4906 for (unsigned i = 0; i < NumOpers; ++i) { 4907 SDValue Op = InOps[i]; 4908 4909 EVT OpVT = Op.getValueType(); 4910 if (OpVT.isVector()) { 4911 EVT OpExtractVT = 4912 EVT::getVectorVT(*DAG.getContext(), OpVT.getVectorElementType(), 4913 VT.getVectorElementCount()); 4914 Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpExtractVT, Op, 4915 DAG.getVectorIdxConstant(Idx, dl)); 4916 } 4917 4918 EOps.push_back(Op); 4919 } 4920 4921 EVT OperVT[] = {VT, MVT::Other}; 4922 SDValue Oper = DAG.getNode(Opcode, dl, OperVT, EOps); 4923 ConcatOps[ConcatEnd++] = Oper; 4924 Chains.push_back(Oper.getValue(1)); 4925 Idx += NumElts; 4926 CurNumElts -= NumElts; 4927 } 4928 do { 4929 NumElts = NumElts / 2; 4930 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts); 4931 } while (!TLI.isTypeLegal(VT) && NumElts != 1); 4932 4933 if (NumElts == 1) { 4934 for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) { 4935 SmallVector<SDValue, 4> EOps; 4936 4937 for (unsigned i = 0; i < NumOpers; ++i) { 4938 SDValue Op = InOps[i]; 4939 4940 EVT OpVT = Op.getValueType(); 4941 if (OpVT.isVector()) 4942 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 4943 OpVT.getVectorElementType(), Op, 4944 DAG.getVectorIdxConstant(Idx, dl)); 4945 4946 EOps.push_back(Op); 4947 } 4948 4949 EVT WidenVT[] = {WidenEltVT, MVT::Other}; 4950 SDValue Oper = DAG.getNode(Opcode, dl, WidenVT, EOps); 4951 ConcatOps[ConcatEnd++] = Oper; 4952 Chains.push_back(Oper.getValue(1)); 4953 } 4954 CurNumElts = 0; 4955 } 4956 } 4957 4958 // Build a factor node to remember all the Ops that have been created. 4959 SDValue NewChain; 4960 if (Chains.size() == 1) 4961 NewChain = Chains[0]; 4962 else 4963 NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 4964 ReplaceValueWith(SDValue(N, 1), NewChain); 4965 4966 return CollectOpsToWiden(DAG, TLI, ConcatOps, ConcatEnd, VT, MaxVT, WidenVT); 4967 } 4968 4969 SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(SDNode *N, unsigned ResNo) { 4970 SDLoc DL(N); 4971 EVT ResVT = N->getValueType(0); 4972 EVT OvVT = N->getValueType(1); 4973 EVT WideResVT, WideOvVT; 4974 SDValue WideLHS, WideRHS; 4975 4976 // TODO: This might result in a widen/split loop. 4977 if (ResNo == 0) { 4978 WideResVT = TLI.getTypeToTransformTo(*DAG.getContext(), ResVT); 4979 WideOvVT = EVT::getVectorVT( 4980 *DAG.getContext(), OvVT.getVectorElementType(), 4981 WideResVT.getVectorNumElements()); 4982 4983 WideLHS = GetWidenedVector(N->getOperand(0)); 4984 WideRHS = GetWidenedVector(N->getOperand(1)); 4985 } else { 4986 WideOvVT = TLI.getTypeToTransformTo(*DAG.getContext(), OvVT); 4987 WideResVT = EVT::getVectorVT( 4988 *DAG.getContext(), ResVT.getVectorElementType(), 4989 WideOvVT.getVectorNumElements()); 4990 4991 SDValue Zero = DAG.getVectorIdxConstant(0, DL); 4992 WideLHS = DAG.getNode( 4993 ISD::INSERT_SUBVECTOR, DL, WideResVT, DAG.getUNDEF(WideResVT), 4994 N->getOperand(0), Zero); 4995 WideRHS = DAG.getNode( 4996 ISD::INSERT_SUBVECTOR, DL, WideResVT, DAG.getUNDEF(WideResVT), 4997 N->getOperand(1), Zero); 4998 } 4999 5000 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT); 5001 SDNode *WideNode = DAG.getNode( 5002 N->getOpcode(), DL, WideVTs, WideLHS, WideRHS).getNode(); 5003 5004 // Replace the other vector result not being explicitly widened here. 5005 unsigned OtherNo = 1 - ResNo; 5006 EVT OtherVT = N->getValueType(OtherNo); 5007 if (getTypeAction(OtherVT) == TargetLowering::TypeWidenVector) { 5008 SetWidenedVector(SDValue(N, OtherNo), SDValue(WideNode, OtherNo)); 5009 } else { 5010 SDValue Zero = DAG.getVectorIdxConstant(0, DL); 5011 SDValue OtherVal = DAG.getNode( 5012 ISD::EXTRACT_SUBVECTOR, DL, OtherVT, SDValue(WideNode, OtherNo), Zero); 5013 ReplaceValueWith(SDValue(N, OtherNo), OtherVal); 5014 } 5015 5016 return SDValue(WideNode, ResNo); 5017 } 5018 5019 SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) { 5020 LLVMContext &Ctx = *DAG.getContext(); 5021 SDValue InOp = N->getOperand(0); 5022 SDLoc DL(N); 5023 5024 EVT WidenVT = TLI.getTypeToTransformTo(Ctx, N->getValueType(0)); 5025 ElementCount WidenEC = WidenVT.getVectorElementCount(); 5026 5027 EVT InVT = InOp.getValueType(); 5028 5029 unsigned Opcode = N->getOpcode(); 5030 const SDNodeFlags Flags = N->getFlags(); 5031 5032 // Handle the case of ZERO_EXTEND where the promoted InVT element size does 5033 // not equal that of WidenVT. 5034 if (N->getOpcode() == ISD::ZERO_EXTEND && 5035 getTypeAction(InVT) == TargetLowering::TypePromoteInteger && 5036 TLI.getTypeToTransformTo(Ctx, InVT).getScalarSizeInBits() != 5037 WidenVT.getScalarSizeInBits()) { 5038 InOp = ZExtPromotedInteger(InOp); 5039 InVT = InOp.getValueType(); 5040 if (WidenVT.getScalarSizeInBits() < InVT.getScalarSizeInBits()) 5041 Opcode = ISD::TRUNCATE; 5042 } 5043 5044 EVT InEltVT = InVT.getVectorElementType(); 5045 EVT InWidenVT = EVT::getVectorVT(Ctx, InEltVT, WidenEC); 5046 ElementCount InVTEC = InVT.getVectorElementCount(); 5047 5048 if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) { 5049 InOp = GetWidenedVector(N->getOperand(0)); 5050 InVT = InOp.getValueType(); 5051 InVTEC = InVT.getVectorElementCount(); 5052 if (InVTEC == WidenEC) { 5053 if (N->getNumOperands() == 1) 5054 return DAG.getNode(Opcode, DL, WidenVT, InOp); 5055 if (N->getNumOperands() == 3) { 5056 assert(N->isVPOpcode() && "Expected VP opcode"); 5057 SDValue Mask = 5058 GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount()); 5059 return DAG.getNode(Opcode, DL, WidenVT, InOp, Mask, N->getOperand(2)); 5060 } 5061 return DAG.getNode(Opcode, DL, WidenVT, InOp, N->getOperand(1), Flags); 5062 } 5063 if (WidenVT.getSizeInBits() == InVT.getSizeInBits()) { 5064 // If both input and result vector types are of same width, extend 5065 // operations should be done with SIGN/ZERO_EXTEND_VECTOR_INREG, which 5066 // accepts fewer elements in the result than in the input. 5067 if (Opcode == ISD::ANY_EXTEND) 5068 return DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, WidenVT, InOp); 5069 if (Opcode == ISD::SIGN_EXTEND) 5070 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, WidenVT, InOp); 5071 if (Opcode == ISD::ZERO_EXTEND) 5072 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, WidenVT, InOp); 5073 } 5074 } 5075 5076 if (TLI.isTypeLegal(InWidenVT)) { 5077 // Because the result and the input are different vector types, widening 5078 // the result could create a legal type but widening the input might make 5079 // it an illegal type that might lead to repeatedly splitting the input 5080 // and then widening it. To avoid this, we widen the input only if 5081 // it results in a legal type. 5082 if (WidenEC.isKnownMultipleOf(InVTEC.getKnownMinValue())) { 5083 // Widen the input and call convert on the widened input vector. 5084 unsigned NumConcat = 5085 WidenEC.getKnownMinValue() / InVTEC.getKnownMinValue(); 5086 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT)); 5087 Ops[0] = InOp; 5088 SDValue InVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InWidenVT, Ops); 5089 if (N->getNumOperands() == 1) 5090 return DAG.getNode(Opcode, DL, WidenVT, InVec); 5091 return DAG.getNode(Opcode, DL, WidenVT, InVec, N->getOperand(1), Flags); 5092 } 5093 5094 if (InVTEC.isKnownMultipleOf(WidenEC.getKnownMinValue())) { 5095 SDValue InVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InWidenVT, InOp, 5096 DAG.getVectorIdxConstant(0, DL)); 5097 // Extract the input and convert the shorten input vector. 5098 if (N->getNumOperands() == 1) 5099 return DAG.getNode(Opcode, DL, WidenVT, InVal); 5100 return DAG.getNode(Opcode, DL, WidenVT, InVal, N->getOperand(1), Flags); 5101 } 5102 } 5103 5104 // Otherwise unroll into some nasty scalar code and rebuild the vector. 5105 EVT EltVT = WidenVT.getVectorElementType(); 5106 SmallVector<SDValue, 16> Ops(WidenEC.getFixedValue(), DAG.getUNDEF(EltVT)); 5107 // Use the original element count so we don't do more scalar opts than 5108 // necessary. 5109 unsigned MinElts = N->getValueType(0).getVectorNumElements(); 5110 for (unsigned i=0; i < MinElts; ++i) { 5111 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, InEltVT, InOp, 5112 DAG.getVectorIdxConstant(i, DL)); 5113 if (N->getNumOperands() == 1) 5114 Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val); 5115 else 5116 Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val, N->getOperand(1), Flags); 5117 } 5118 5119 return DAG.getBuildVector(WidenVT, DL, Ops); 5120 } 5121 5122 SDValue DAGTypeLegalizer::WidenVecRes_FP_TO_XINT_SAT(SDNode *N) { 5123 SDLoc dl(N); 5124 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5125 ElementCount WidenNumElts = WidenVT.getVectorElementCount(); 5126 5127 SDValue Src = N->getOperand(0); 5128 EVT SrcVT = Src.getValueType(); 5129 5130 // Also widen the input. 5131 if (getTypeAction(SrcVT) == TargetLowering::TypeWidenVector) { 5132 Src = GetWidenedVector(Src); 5133 SrcVT = Src.getValueType(); 5134 } 5135 5136 // Input and output not widened to the same size, give up. 5137 if (WidenNumElts != SrcVT.getVectorElementCount()) 5138 return DAG.UnrollVectorOp(N, WidenNumElts.getKnownMinValue()); 5139 5140 return DAG.getNode(N->getOpcode(), dl, WidenVT, Src, N->getOperand(1)); 5141 } 5142 5143 SDValue DAGTypeLegalizer::WidenVecRes_XRINT(SDNode *N) { 5144 SDLoc dl(N); 5145 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5146 ElementCount WidenNumElts = WidenVT.getVectorElementCount(); 5147 5148 SDValue Src = N->getOperand(0); 5149 EVT SrcVT = Src.getValueType(); 5150 5151 // Also widen the input. 5152 if (getTypeAction(SrcVT) == TargetLowering::TypeWidenVector) { 5153 Src = GetWidenedVector(Src); 5154 SrcVT = Src.getValueType(); 5155 } 5156 5157 // Input and output not widened to the same size, give up. 5158 if (WidenNumElts != SrcVT.getVectorElementCount()) 5159 return DAG.UnrollVectorOp(N, WidenNumElts.getKnownMinValue()); 5160 5161 if (N->getNumOperands() == 1) 5162 return DAG.getNode(N->getOpcode(), dl, WidenVT, Src); 5163 5164 assert(N->getNumOperands() == 3 && "Unexpected number of operands!"); 5165 assert(N->isVPOpcode() && "Expected VP opcode"); 5166 5167 SDValue Mask = 5168 GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount()); 5169 return DAG.getNode(N->getOpcode(), dl, WidenVT, Src, Mask, N->getOperand(2)); 5170 } 5171 5172 SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(SDNode *N) { 5173 SDValue InOp = N->getOperand(1); 5174 SDLoc DL(N); 5175 SmallVector<SDValue, 4> NewOps(N->op_begin(), N->op_end()); 5176 5177 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5178 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 5179 5180 EVT InVT = InOp.getValueType(); 5181 EVT InEltVT = InVT.getVectorElementType(); 5182 5183 unsigned Opcode = N->getOpcode(); 5184 5185 // FIXME: Optimizations need to be implemented here. 5186 5187 // Otherwise unroll into some nasty scalar code and rebuild the vector. 5188 EVT EltVT = WidenVT.getVectorElementType(); 5189 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}}; 5190 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT)); 5191 SmallVector<SDValue, 32> OpChains; 5192 // Use the original element count so we don't do more scalar opts than 5193 // necessary. 5194 unsigned MinElts = N->getValueType(0).getVectorNumElements(); 5195 for (unsigned i=0; i < MinElts; ++i) { 5196 NewOps[1] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, InEltVT, InOp, 5197 DAG.getVectorIdxConstant(i, DL)); 5198 Ops[i] = DAG.getNode(Opcode, DL, EltVTs, NewOps); 5199 OpChains.push_back(Ops[i].getValue(1)); 5200 } 5201 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OpChains); 5202 ReplaceValueWith(SDValue(N, 1), NewChain); 5203 5204 return DAG.getBuildVector(WidenVT, DL, Ops); 5205 } 5206 5207 SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(SDNode *N) { 5208 unsigned Opcode = N->getOpcode(); 5209 SDValue InOp = N->getOperand(0); 5210 SDLoc DL(N); 5211 5212 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5213 EVT WidenSVT = WidenVT.getVectorElementType(); 5214 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 5215 5216 EVT InVT = InOp.getValueType(); 5217 EVT InSVT = InVT.getVectorElementType(); 5218 unsigned InVTNumElts = InVT.getVectorNumElements(); 5219 5220 if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) { 5221 InOp = GetWidenedVector(InOp); 5222 InVT = InOp.getValueType(); 5223 if (InVT.getSizeInBits() == WidenVT.getSizeInBits()) { 5224 switch (Opcode) { 5225 case ISD::ANY_EXTEND_VECTOR_INREG: 5226 case ISD::SIGN_EXTEND_VECTOR_INREG: 5227 case ISD::ZERO_EXTEND_VECTOR_INREG: 5228 return DAG.getNode(Opcode, DL, WidenVT, InOp); 5229 } 5230 } 5231 } 5232 5233 // Unroll, extend the scalars and rebuild the vector. 5234 SmallVector<SDValue, 16> Ops; 5235 for (unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i != e; ++i) { 5236 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, InSVT, InOp, 5237 DAG.getVectorIdxConstant(i, DL)); 5238 switch (Opcode) { 5239 case ISD::ANY_EXTEND_VECTOR_INREG: 5240 Val = DAG.getNode(ISD::ANY_EXTEND, DL, WidenSVT, Val); 5241 break; 5242 case ISD::SIGN_EXTEND_VECTOR_INREG: 5243 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, WidenSVT, Val); 5244 break; 5245 case ISD::ZERO_EXTEND_VECTOR_INREG: 5246 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenSVT, Val); 5247 break; 5248 default: 5249 llvm_unreachable("A *_EXTEND_VECTOR_INREG node was expected"); 5250 } 5251 Ops.push_back(Val); 5252 } 5253 5254 while (Ops.size() != WidenNumElts) 5255 Ops.push_back(DAG.getUNDEF(WidenSVT)); 5256 5257 return DAG.getBuildVector(WidenVT, DL, Ops); 5258 } 5259 5260 SDValue DAGTypeLegalizer::WidenVecRes_FCOPYSIGN(SDNode *N) { 5261 // If this is an FCOPYSIGN with same input types, we can treat it as a 5262 // normal (can trap) binary op. 5263 if (N->getOperand(0).getValueType() == N->getOperand(1).getValueType()) 5264 return WidenVecRes_BinaryCanTrap(N); 5265 5266 // If the types are different, fall back to unrolling. 5267 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5268 return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements()); 5269 } 5270 5271 /// Result and first source operand are different scalar types, but must have 5272 /// the same number of elements. There is an additional control argument which 5273 /// should be passed through unchanged. 5274 SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(SDNode *N) { 5275 SDValue FpValue = N->getOperand(0); 5276 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5277 if (getTypeAction(FpValue.getValueType()) != TargetLowering::TypeWidenVector) 5278 return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements()); 5279 SDValue Arg = GetWidenedVector(FpValue); 5280 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, {Arg, N->getOperand(1)}, 5281 N->getFlags()); 5282 } 5283 5284 SDValue DAGTypeLegalizer::WidenVecRes_ExpOp(SDNode *N) { 5285 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5286 SDValue InOp = GetWidenedVector(N->getOperand(0)); 5287 SDValue RHS = N->getOperand(1); 5288 EVT ExpVT = RHS.getValueType(); 5289 SDValue ExpOp = RHS; 5290 if (ExpVT.isVector()) { 5291 EVT WideExpVT = 5292 WidenVT.changeVectorElementType(ExpVT.getVectorElementType()); 5293 ExpOp = ModifyToType(RHS, WideExpVT); 5294 } 5295 5296 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp, ExpOp); 5297 } 5298 5299 SDValue DAGTypeLegalizer::WidenVecRes_Unary(SDNode *N) { 5300 // Unary op widening. 5301 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5302 SDValue InOp = GetWidenedVector(N->getOperand(0)); 5303 if (N->getNumOperands() == 1) 5304 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp, N->getFlags()); 5305 5306 assert(N->getNumOperands() == 3 && "Unexpected number of operands!"); 5307 assert(N->isVPOpcode() && "Expected VP opcode"); 5308 5309 SDValue Mask = 5310 GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount()); 5311 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, 5312 {InOp, Mask, N->getOperand(2)}); 5313 } 5314 5315 SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) { 5316 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5317 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(), 5318 cast<VTSDNode>(N->getOperand(1))->getVT() 5319 .getVectorElementType(), 5320 WidenVT.getVectorNumElements()); 5321 SDValue WidenLHS = GetWidenedVector(N->getOperand(0)); 5322 return DAG.getNode(N->getOpcode(), SDLoc(N), 5323 WidenVT, WidenLHS, DAG.getValueType(ExtVT)); 5324 } 5325 5326 SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(SDNode *N, unsigned ResNo) { 5327 SDValue WidenVec = DisintegrateMERGE_VALUES(N, ResNo); 5328 return GetWidenedVector(WidenVec); 5329 } 5330 5331 SDValue DAGTypeLegalizer::WidenVecRes_ADDRSPACECAST(SDNode *N) { 5332 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5333 SDValue InOp = GetWidenedVector(N->getOperand(0)); 5334 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(N); 5335 5336 return DAG.getAddrSpaceCast(SDLoc(N), WidenVT, InOp, 5337 AddrSpaceCastN->getSrcAddressSpace(), 5338 AddrSpaceCastN->getDestAddressSpace()); 5339 } 5340 5341 SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) { 5342 SDValue InOp = N->getOperand(0); 5343 EVT InVT = InOp.getValueType(); 5344 EVT VT = N->getValueType(0); 5345 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 5346 SDLoc dl(N); 5347 5348 switch (getTypeAction(InVT)) { 5349 case TargetLowering::TypeLegal: 5350 break; 5351 case TargetLowering::TypeScalarizeScalableVector: 5352 report_fatal_error("Scalarization of scalable vectors is not supported."); 5353 case TargetLowering::TypePromoteInteger: { 5354 // If the incoming type is a vector that is being promoted, then 5355 // we know that the elements are arranged differently and that we 5356 // must perform the conversion using a stack slot. 5357 if (InVT.isVector()) 5358 break; 5359 5360 // If the InOp is promoted to the same size, convert it. Otherwise, 5361 // fall out of the switch and widen the promoted input. 5362 SDValue NInOp = GetPromotedInteger(InOp); 5363 EVT NInVT = NInOp.getValueType(); 5364 if (WidenVT.bitsEq(NInVT)) { 5365 // For big endian targets we need to shift the input integer or the 5366 // interesting bits will end up at the wrong place. 5367 if (DAG.getDataLayout().isBigEndian()) { 5368 unsigned ShiftAmt = NInVT.getSizeInBits() - InVT.getSizeInBits(); 5369 EVT ShiftAmtTy = TLI.getShiftAmountTy(NInVT, DAG.getDataLayout()); 5370 assert(ShiftAmt < WidenVT.getSizeInBits() && "Too large shift amount!"); 5371 NInOp = DAG.getNode(ISD::SHL, dl, NInVT, NInOp, 5372 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy)); 5373 } 5374 return DAG.getNode(ISD::BITCAST, dl, WidenVT, NInOp); 5375 } 5376 InOp = NInOp; 5377 InVT = NInVT; 5378 break; 5379 } 5380 case TargetLowering::TypeSoftenFloat: 5381 case TargetLowering::TypePromoteFloat: 5382 case TargetLowering::TypeSoftPromoteHalf: 5383 case TargetLowering::TypeExpandInteger: 5384 case TargetLowering::TypeExpandFloat: 5385 case TargetLowering::TypeScalarizeVector: 5386 case TargetLowering::TypeSplitVector: 5387 break; 5388 case TargetLowering::TypeWidenVector: 5389 // If the InOp is widened to the same size, convert it. Otherwise, fall 5390 // out of the switch and widen the widened input. 5391 InOp = GetWidenedVector(InOp); 5392 InVT = InOp.getValueType(); 5393 if (WidenVT.bitsEq(InVT)) 5394 // The input widens to the same size. Convert to the widen value. 5395 return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp); 5396 break; 5397 } 5398 5399 unsigned WidenSize = WidenVT.getSizeInBits(); 5400 unsigned InSize = InVT.getSizeInBits(); 5401 unsigned InScalarSize = InVT.getScalarSizeInBits(); 5402 // x86mmx is not an acceptable vector element type, so don't try. 5403 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) { 5404 // Determine new input vector type. The new input vector type will use 5405 // the same element type (if its a vector) or use the input type as a 5406 // vector. It is the same size as the type to widen to. 5407 EVT NewInVT; 5408 unsigned NewNumParts = WidenSize / InSize; 5409 if (InVT.isVector()) { 5410 EVT InEltVT = InVT.getVectorElementType(); 5411 NewInVT = EVT::getVectorVT(*DAG.getContext(), InEltVT, 5412 WidenSize / InEltVT.getSizeInBits()); 5413 } else { 5414 // For big endian systems, using the promoted input scalar type 5415 // to produce the scalar_to_vector would put the desired bits into 5416 // the least significant byte(s) of the wider element zero. This 5417 // will mean that the users of the result vector are using incorrect 5418 // bits. Use the original input type instead. Although either input 5419 // type can be used on little endian systems, for consistency we 5420 // use the original type there as well. 5421 EVT OrigInVT = N->getOperand(0).getValueType(); 5422 NewNumParts = WidenSize / OrigInVT.getSizeInBits(); 5423 NewInVT = EVT::getVectorVT(*DAG.getContext(), OrigInVT, NewNumParts); 5424 } 5425 5426 if (TLI.isTypeLegal(NewInVT)) { 5427 SDValue NewVec; 5428 if (InVT.isVector()) { 5429 // Because the result and the input are different vector types, widening 5430 // the result could create a legal type but widening the input might 5431 // make it an illegal type that might lead to repeatedly splitting the 5432 // input and then widening it. To avoid this, we widen the input only if 5433 // it results in a legal type. 5434 if (WidenSize % InSize == 0) { 5435 SmallVector<SDValue, 16> Ops(NewNumParts, DAG.getUNDEF(InVT)); 5436 Ops[0] = InOp; 5437 5438 NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewInVT, Ops); 5439 } else { 5440 SmallVector<SDValue, 16> Ops; 5441 DAG.ExtractVectorElements(InOp, Ops); 5442 Ops.append(WidenSize / InScalarSize - Ops.size(), 5443 DAG.getUNDEF(InVT.getVectorElementType())); 5444 5445 NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl, NewInVT, Ops); 5446 } 5447 } else { 5448 NewVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewInVT, InOp); 5449 } 5450 return DAG.getNode(ISD::BITCAST, dl, WidenVT, NewVec); 5451 } 5452 } 5453 5454 return CreateStackStoreLoad(InOp, WidenVT); 5455 } 5456 5457 SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) { 5458 SDLoc dl(N); 5459 // Build a vector with undefined for the new nodes. 5460 EVT VT = N->getValueType(0); 5461 5462 // Integer BUILD_VECTOR operands may be larger than the node's vector element 5463 // type. The UNDEFs need to have the same type as the existing operands. 5464 EVT EltVT = N->getOperand(0).getValueType(); 5465 unsigned NumElts = VT.getVectorNumElements(); 5466 5467 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 5468 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 5469 5470 SmallVector<SDValue, 16> NewOps(N->op_begin(), N->op_end()); 5471 assert(WidenNumElts >= NumElts && "Shrinking vector instead of widening!"); 5472 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT)); 5473 5474 return DAG.getBuildVector(WidenVT, dl, NewOps); 5475 } 5476 5477 SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) { 5478 EVT InVT = N->getOperand(0).getValueType(); 5479 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5480 SDLoc dl(N); 5481 unsigned NumOperands = N->getNumOperands(); 5482 5483 bool InputWidened = false; // Indicates we need to widen the input. 5484 if (getTypeAction(InVT) != TargetLowering::TypeWidenVector) { 5485 unsigned WidenNumElts = WidenVT.getVectorMinNumElements(); 5486 unsigned NumInElts = InVT.getVectorMinNumElements(); 5487 if (WidenNumElts % NumInElts == 0) { 5488 // Add undef vectors to widen to correct length. 5489 unsigned NumConcat = WidenNumElts / NumInElts; 5490 SDValue UndefVal = DAG.getUNDEF(InVT); 5491 SmallVector<SDValue, 16> Ops(NumConcat); 5492 for (unsigned i=0; i < NumOperands; ++i) 5493 Ops[i] = N->getOperand(i); 5494 for (unsigned i = NumOperands; i != NumConcat; ++i) 5495 Ops[i] = UndefVal; 5496 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Ops); 5497 } 5498 } else { 5499 InputWidened = true; 5500 if (WidenVT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) { 5501 // The inputs and the result are widen to the same value. 5502 unsigned i; 5503 for (i=1; i < NumOperands; ++i) 5504 if (!N->getOperand(i).isUndef()) 5505 break; 5506 5507 if (i == NumOperands) 5508 // Everything but the first operand is an UNDEF so just return the 5509 // widened first operand. 5510 return GetWidenedVector(N->getOperand(0)); 5511 5512 if (NumOperands == 2) { 5513 assert(!WidenVT.isScalableVector() && 5514 "Cannot use vector shuffles to widen CONCAT_VECTOR result"); 5515 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 5516 unsigned NumInElts = InVT.getVectorNumElements(); 5517 5518 // Replace concat of two operands with a shuffle. 5519 SmallVector<int, 16> MaskOps(WidenNumElts, -1); 5520 for (unsigned i = 0; i < NumInElts; ++i) { 5521 MaskOps[i] = i; 5522 MaskOps[i + NumInElts] = i + WidenNumElts; 5523 } 5524 return DAG.getVectorShuffle(WidenVT, dl, 5525 GetWidenedVector(N->getOperand(0)), 5526 GetWidenedVector(N->getOperand(1)), 5527 MaskOps); 5528 } 5529 } 5530 } 5531 5532 assert(!WidenVT.isScalableVector() && 5533 "Cannot use build vectors to widen CONCAT_VECTOR result"); 5534 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 5535 unsigned NumInElts = InVT.getVectorNumElements(); 5536 5537 // Fall back to use extracts and build vector. 5538 EVT EltVT = WidenVT.getVectorElementType(); 5539 SmallVector<SDValue, 16> Ops(WidenNumElts); 5540 unsigned Idx = 0; 5541 for (unsigned i=0; i < NumOperands; ++i) { 5542 SDValue InOp = N->getOperand(i); 5543 if (InputWidened) 5544 InOp = GetWidenedVector(InOp); 5545 for (unsigned j = 0; j < NumInElts; ++j) 5546 Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp, 5547 DAG.getVectorIdxConstant(j, dl)); 5548 } 5549 SDValue UndefVal = DAG.getUNDEF(EltVT); 5550 for (; Idx < WidenNumElts; ++Idx) 5551 Ops[Idx] = UndefVal; 5552 return DAG.getBuildVector(WidenVT, dl, Ops); 5553 } 5554 5555 SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(SDNode *N) { 5556 EVT VT = N->getValueType(0); 5557 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 5558 SDValue InOp1 = GetWidenedVector(N->getOperand(0)); 5559 SDValue InOp2 = N->getOperand(1); 5560 SDValue Idx = N->getOperand(2); 5561 SDLoc dl(N); 5562 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WidenVT, InOp1, InOp2, Idx); 5563 } 5564 5565 SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) { 5566 EVT VT = N->getValueType(0); 5567 EVT EltVT = VT.getVectorElementType(); 5568 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 5569 SDValue InOp = N->getOperand(0); 5570 SDValue Idx = N->getOperand(1); 5571 SDLoc dl(N); 5572 5573 auto InOpTypeAction = getTypeAction(InOp.getValueType()); 5574 if (InOpTypeAction == TargetLowering::TypeWidenVector) 5575 InOp = GetWidenedVector(InOp); 5576 5577 EVT InVT = InOp.getValueType(); 5578 5579 // Check if we can just return the input vector after widening. 5580 uint64_t IdxVal = Idx->getAsZExtVal(); 5581 if (IdxVal == 0 && InVT == WidenVT) 5582 return InOp; 5583 5584 // Check if we can extract from the vector. 5585 unsigned WidenNumElts = WidenVT.getVectorMinNumElements(); 5586 unsigned InNumElts = InVT.getVectorMinNumElements(); 5587 unsigned VTNumElts = VT.getVectorMinNumElements(); 5588 assert(IdxVal % VTNumElts == 0 && 5589 "Expected Idx to be a multiple of subvector minimum vector length"); 5590 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts) 5591 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, WidenVT, InOp, Idx); 5592 5593 if (VT.isScalableVector()) { 5594 // Try to split the operation up into smaller extracts and concat the 5595 // results together, e.g. 5596 // nxv6i64 extract_subvector(nxv12i64, 6) 5597 // <-> 5598 // nxv8i64 concat( 5599 // nxv2i64 extract_subvector(nxv16i64, 6) 5600 // nxv2i64 extract_subvector(nxv16i64, 8) 5601 // nxv2i64 extract_subvector(nxv16i64, 10) 5602 // undef) 5603 unsigned GCD = std::gcd(VTNumElts, WidenNumElts); 5604 assert((IdxVal % GCD) == 0 && "Expected Idx to be a multiple of the broken " 5605 "down type's element count"); 5606 EVT PartVT = EVT::getVectorVT(*DAG.getContext(), EltVT, 5607 ElementCount::getScalable(GCD)); 5608 // Avoid recursion around e.g. nxv1i8. 5609 if (getTypeAction(PartVT) != TargetLowering::TypeWidenVector) { 5610 SmallVector<SDValue> Parts; 5611 unsigned I = 0; 5612 for (; I < VTNumElts / GCD; ++I) 5613 Parts.push_back( 5614 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, PartVT, InOp, 5615 DAG.getVectorIdxConstant(IdxVal + I * GCD, dl))); 5616 for (; I < WidenNumElts / GCD; ++I) 5617 Parts.push_back(DAG.getUNDEF(PartVT)); 5618 5619 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Parts); 5620 } 5621 5622 report_fatal_error("Don't know how to widen the result of " 5623 "EXTRACT_SUBVECTOR for scalable vectors"); 5624 } 5625 5626 // We could try widening the input to the right length but for now, extract 5627 // the original elements, fill the rest with undefs and build a vector. 5628 SmallVector<SDValue, 16> Ops(WidenNumElts); 5629 unsigned i; 5630 for (i = 0; i < VTNumElts; ++i) 5631 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp, 5632 DAG.getVectorIdxConstant(IdxVal + i, dl)); 5633 5634 SDValue UndefVal = DAG.getUNDEF(EltVT); 5635 for (; i < WidenNumElts; ++i) 5636 Ops[i] = UndefVal; 5637 return DAG.getBuildVector(WidenVT, dl, Ops); 5638 } 5639 5640 SDValue DAGTypeLegalizer::WidenVecRes_AssertZext(SDNode *N) { 5641 SDValue InOp = ModifyToType( 5642 N->getOperand(0), 5643 TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)), true); 5644 return DAG.getNode(ISD::AssertZext, SDLoc(N), InOp.getValueType(), InOp, 5645 N->getOperand(1)); 5646 } 5647 5648 SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) { 5649 SDValue InOp = GetWidenedVector(N->getOperand(0)); 5650 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), 5651 InOp.getValueType(), InOp, 5652 N->getOperand(1), N->getOperand(2)); 5653 } 5654 5655 SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) { 5656 LoadSDNode *LD = cast<LoadSDNode>(N); 5657 ISD::LoadExtType ExtType = LD->getExtensionType(); 5658 5659 // A vector must always be stored in memory as-is, i.e. without any padding 5660 // between the elements, since various code depend on it, e.g. in the 5661 // handling of a bitcast of a vector type to int, which may be done with a 5662 // vector store followed by an integer load. A vector that does not have 5663 // elements that are byte-sized must therefore be stored as an integer 5664 // built out of the extracted vector elements. 5665 if (!LD->getMemoryVT().isByteSized()) { 5666 SDValue Value, NewChain; 5667 std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG); 5668 ReplaceValueWith(SDValue(LD, 0), Value); 5669 ReplaceValueWith(SDValue(LD, 1), NewChain); 5670 return SDValue(); 5671 } 5672 5673 // Generate a vector-predicated load if it is custom/legal on the target. To 5674 // avoid possible recursion, only do this if the widened mask type is legal. 5675 // FIXME: Not all targets may support EVL in VP_LOAD. These will have been 5676 // removed from the IR by the ExpandVectorPredication pass but we're 5677 // reintroducing them here. 5678 EVT LdVT = LD->getMemoryVT(); 5679 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), LdVT); 5680 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, 5681 WideVT.getVectorElementCount()); 5682 if (ExtType == ISD::NON_EXTLOAD && 5683 TLI.isOperationLegalOrCustom(ISD::VP_LOAD, WideVT) && 5684 TLI.isTypeLegal(WideMaskVT)) { 5685 SDLoc DL(N); 5686 SDValue Mask = DAG.getAllOnesConstant(DL, WideMaskVT); 5687 SDValue EVL = DAG.getElementCount(DL, TLI.getVPExplicitVectorLengthTy(), 5688 LdVT.getVectorElementCount()); 5689 const auto *MMO = LD->getMemOperand(); 5690 SDValue NewLoad = 5691 DAG.getLoadVP(WideVT, DL, LD->getChain(), LD->getBasePtr(), Mask, EVL, 5692 MMO->getPointerInfo(), MMO->getAlign(), MMO->getFlags(), 5693 MMO->getAAInfo()); 5694 5695 // Modified the chain - switch anything that used the old chain to use 5696 // the new one. 5697 ReplaceValueWith(SDValue(N, 1), NewLoad.getValue(1)); 5698 5699 return NewLoad; 5700 } 5701 5702 SDValue Result; 5703 SmallVector<SDValue, 16> LdChain; // Chain for the series of load 5704 if (ExtType != ISD::NON_EXTLOAD) 5705 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType); 5706 else 5707 Result = GenWidenVectorLoads(LdChain, LD); 5708 5709 if (Result) { 5710 // If we generate a single load, we can use that for the chain. Otherwise, 5711 // build a factor node to remember the multiple loads are independent and 5712 // chain to that. 5713 SDValue NewChain; 5714 if (LdChain.size() == 1) 5715 NewChain = LdChain[0]; 5716 else 5717 NewChain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, LdChain); 5718 5719 // Modified the chain - switch anything that used the old chain to use 5720 // the new one. 5721 ReplaceValueWith(SDValue(N, 1), NewChain); 5722 5723 return Result; 5724 } 5725 5726 report_fatal_error("Unable to widen vector load"); 5727 } 5728 5729 SDValue DAGTypeLegalizer::WidenVecRes_VP_LOAD(VPLoadSDNode *N) { 5730 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5731 SDValue Mask = N->getMask(); 5732 SDValue EVL = N->getVectorLength(); 5733 ISD::LoadExtType ExtType = N->getExtensionType(); 5734 SDLoc dl(N); 5735 5736 // The mask should be widened as well 5737 assert(getTypeAction(Mask.getValueType()) == 5738 TargetLowering::TypeWidenVector && 5739 "Unable to widen binary VP op"); 5740 Mask = GetWidenedVector(Mask); 5741 assert(Mask.getValueType().getVectorElementCount() == 5742 TLI.getTypeToTransformTo(*DAG.getContext(), Mask.getValueType()) 5743 .getVectorElementCount() && 5744 "Unable to widen vector load"); 5745 5746 SDValue Res = 5747 DAG.getLoadVP(N->getAddressingMode(), ExtType, WidenVT, dl, N->getChain(), 5748 N->getBasePtr(), N->getOffset(), Mask, EVL, 5749 N->getMemoryVT(), N->getMemOperand(), N->isExpandingLoad()); 5750 // Legalize the chain result - switch anything that used the old chain to 5751 // use the new one. 5752 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 5753 return Res; 5754 } 5755 5756 SDValue DAGTypeLegalizer::WidenVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *N) { 5757 SDLoc DL(N); 5758 5759 // The mask should be widened as well 5760 SDValue Mask = N->getMask(); 5761 assert(getTypeAction(Mask.getValueType()) == 5762 TargetLowering::TypeWidenVector && 5763 "Unable to widen VP strided load"); 5764 Mask = GetWidenedVector(Mask); 5765 5766 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5767 assert(Mask.getValueType().getVectorElementCount() == 5768 WidenVT.getVectorElementCount() && 5769 "Data and mask vectors should have the same number of elements"); 5770 5771 SDValue Res = DAG.getStridedLoadVP( 5772 N->getAddressingMode(), N->getExtensionType(), WidenVT, DL, N->getChain(), 5773 N->getBasePtr(), N->getOffset(), N->getStride(), Mask, 5774 N->getVectorLength(), N->getMemoryVT(), N->getMemOperand(), 5775 N->isExpandingLoad()); 5776 5777 // Legalize the chain result - switch anything that used the old chain to 5778 // use the new one. 5779 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 5780 return Res; 5781 } 5782 5783 SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_COMPRESS(SDNode *N) { 5784 SDValue Vec = N->getOperand(0); 5785 SDValue Mask = N->getOperand(1); 5786 SDValue Passthru = N->getOperand(2); 5787 EVT WideVecVT = 5788 TLI.getTypeToTransformTo(*DAG.getContext(), Vec.getValueType()); 5789 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), 5790 Mask.getValueType().getVectorElementType(), 5791 WideVecVT.getVectorNumElements()); 5792 5793 SDValue WideVec = ModifyToType(Vec, WideVecVT); 5794 SDValue WideMask = ModifyToType(Mask, WideMaskVT, /*FillWithZeroes=*/true); 5795 SDValue WidePassthru = ModifyToType(Passthru, WideVecVT); 5796 return DAG.getNode(ISD::VECTOR_COMPRESS, SDLoc(N), WideVecVT, WideVec, 5797 WideMask, WidePassthru); 5798 } 5799 5800 SDValue DAGTypeLegalizer::WidenVecRes_MLOAD(MaskedLoadSDNode *N) { 5801 5802 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),N->getValueType(0)); 5803 SDValue Mask = N->getMask(); 5804 EVT MaskVT = Mask.getValueType(); 5805 SDValue PassThru = GetWidenedVector(N->getPassThru()); 5806 ISD::LoadExtType ExtType = N->getExtensionType(); 5807 SDLoc dl(N); 5808 5809 // The mask should be widened as well 5810 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), 5811 MaskVT.getVectorElementType(), 5812 WidenVT.getVectorNumElements()); 5813 Mask = ModifyToType(Mask, WideMaskVT, true); 5814 5815 SDValue Res = DAG.getMaskedLoad( 5816 WidenVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, 5817 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(), 5818 ExtType, N->isExpandingLoad()); 5819 // Legalize the chain result - switch anything that used the old chain to 5820 // use the new one. 5821 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 5822 return Res; 5823 } 5824 5825 SDValue DAGTypeLegalizer::WidenVecRes_MGATHER(MaskedGatherSDNode *N) { 5826 5827 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5828 SDValue Mask = N->getMask(); 5829 EVT MaskVT = Mask.getValueType(); 5830 SDValue PassThru = GetWidenedVector(N->getPassThru()); 5831 SDValue Scale = N->getScale(); 5832 unsigned NumElts = WideVT.getVectorNumElements(); 5833 SDLoc dl(N); 5834 5835 // The mask should be widened as well 5836 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), 5837 MaskVT.getVectorElementType(), 5838 WideVT.getVectorNumElements()); 5839 Mask = ModifyToType(Mask, WideMaskVT, true); 5840 5841 // Widen the Index operand 5842 SDValue Index = N->getIndex(); 5843 EVT WideIndexVT = EVT::getVectorVT(*DAG.getContext(), 5844 Index.getValueType().getScalarType(), 5845 NumElts); 5846 Index = ModifyToType(Index, WideIndexVT); 5847 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index, 5848 Scale }; 5849 5850 // Widen the MemoryType 5851 EVT WideMemVT = EVT::getVectorVT(*DAG.getContext(), 5852 N->getMemoryVT().getScalarType(), NumElts); 5853 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other), 5854 WideMemVT, dl, Ops, N->getMemOperand(), 5855 N->getIndexType(), N->getExtensionType()); 5856 5857 // Legalize the chain result - switch anything that used the old chain to 5858 // use the new one. 5859 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 5860 return Res; 5861 } 5862 5863 SDValue DAGTypeLegalizer::WidenVecRes_VP_GATHER(VPGatherSDNode *N) { 5864 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5865 SDValue Mask = N->getMask(); 5866 SDValue Scale = N->getScale(); 5867 ElementCount WideEC = WideVT.getVectorElementCount(); 5868 SDLoc dl(N); 5869 5870 SDValue Index = GetWidenedVector(N->getIndex()); 5871 EVT WideMemVT = EVT::getVectorVT(*DAG.getContext(), 5872 N->getMemoryVT().getScalarType(), WideEC); 5873 Mask = GetWidenedMask(Mask, WideEC); 5874 5875 SDValue Ops[] = {N->getChain(), N->getBasePtr(), Index, Scale, 5876 Mask, N->getVectorLength()}; 5877 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT, 5878 dl, Ops, N->getMemOperand(), N->getIndexType()); 5879 5880 // Legalize the chain result - switch anything that used the old chain to 5881 // use the new one. 5882 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 5883 return Res; 5884 } 5885 5886 SDValue DAGTypeLegalizer::WidenVecRes_ScalarOp(SDNode *N) { 5887 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 5888 if (N->isVPOpcode()) 5889 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, N->getOperand(0), 5890 N->getOperand(1), N->getOperand(2)); 5891 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, N->getOperand(0)); 5892 } 5893 5894 // Return true is this is a SETCC node or a strict version of it. 5895 static inline bool isSETCCOp(unsigned Opcode) { 5896 switch (Opcode) { 5897 case ISD::SETCC: 5898 case ISD::STRICT_FSETCC: 5899 case ISD::STRICT_FSETCCS: 5900 return true; 5901 } 5902 return false; 5903 } 5904 5905 // Return true if this is a node that could have two SETCCs as operands. 5906 static inline bool isLogicalMaskOp(unsigned Opcode) { 5907 switch (Opcode) { 5908 case ISD::AND: 5909 case ISD::OR: 5910 case ISD::XOR: 5911 return true; 5912 } 5913 return false; 5914 } 5915 5916 // If N is a SETCC or a strict variant of it, return the type 5917 // of the compare operands. 5918 static inline EVT getSETCCOperandType(SDValue N) { 5919 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; 5920 return N->getOperand(OpNo).getValueType(); 5921 } 5922 5923 // This is used just for the assert in convertMask(). Check that this either 5924 // a SETCC or a previously handled SETCC by convertMask(). 5925 #ifndef NDEBUG 5926 static inline bool isSETCCorConvertedSETCC(SDValue N) { 5927 if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR) 5928 N = N.getOperand(0); 5929 else if (N.getOpcode() == ISD::CONCAT_VECTORS) { 5930 for (unsigned i = 1; i < N->getNumOperands(); ++i) 5931 if (!N->getOperand(i)->isUndef()) 5932 return false; 5933 N = N.getOperand(0); 5934 } 5935 5936 if (N.getOpcode() == ISD::TRUNCATE) 5937 N = N.getOperand(0); 5938 else if (N.getOpcode() == ISD::SIGN_EXTEND) 5939 N = N.getOperand(0); 5940 5941 if (isLogicalMaskOp(N.getOpcode())) 5942 return isSETCCorConvertedSETCC(N.getOperand(0)) && 5943 isSETCCorConvertedSETCC(N.getOperand(1)); 5944 5945 return (isSETCCOp(N.getOpcode()) || 5946 ISD::isBuildVectorOfConstantSDNodes(N.getNode())); 5947 } 5948 #endif 5949 5950 // Return a mask of vector type MaskVT to replace InMask. Also adjust MaskVT 5951 // to ToMaskVT if needed with vector extension or truncation. 5952 SDValue DAGTypeLegalizer::convertMask(SDValue InMask, EVT MaskVT, 5953 EVT ToMaskVT) { 5954 // Currently a SETCC or a AND/OR/XOR with two SETCCs are handled. 5955 // FIXME: This code seems to be too restrictive, we might consider 5956 // generalizing it or dropping it. 5957 assert(isSETCCorConvertedSETCC(InMask) && "Unexpected mask argument."); 5958 5959 // Make a new Mask node, with a legal result VT. 5960 SDValue Mask; 5961 SmallVector<SDValue, 4> Ops; 5962 for (unsigned i = 0, e = InMask->getNumOperands(); i < e; ++i) 5963 Ops.push_back(InMask->getOperand(i)); 5964 if (InMask->isStrictFPOpcode()) { 5965 Mask = DAG.getNode(InMask->getOpcode(), SDLoc(InMask), 5966 { MaskVT, MVT::Other }, Ops); 5967 ReplaceValueWith(InMask.getValue(1), Mask.getValue(1)); 5968 } 5969 else 5970 Mask = DAG.getNode(InMask->getOpcode(), SDLoc(InMask), MaskVT, Ops); 5971 5972 // If MaskVT has smaller or bigger elements than ToMaskVT, a vector sign 5973 // extend or truncate is needed. 5974 LLVMContext &Ctx = *DAG.getContext(); 5975 unsigned MaskScalarBits = MaskVT.getScalarSizeInBits(); 5976 unsigned ToMaskScalBits = ToMaskVT.getScalarSizeInBits(); 5977 if (MaskScalarBits < ToMaskScalBits) { 5978 EVT ExtVT = EVT::getVectorVT(Ctx, ToMaskVT.getVectorElementType(), 5979 MaskVT.getVectorNumElements()); 5980 Mask = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(Mask), ExtVT, Mask); 5981 } else if (MaskScalarBits > ToMaskScalBits) { 5982 EVT TruncVT = EVT::getVectorVT(Ctx, ToMaskVT.getVectorElementType(), 5983 MaskVT.getVectorNumElements()); 5984 Mask = DAG.getNode(ISD::TRUNCATE, SDLoc(Mask), TruncVT, Mask); 5985 } 5986 5987 assert(Mask->getValueType(0).getScalarSizeInBits() == 5988 ToMaskVT.getScalarSizeInBits() && 5989 "Mask should have the right element size by now."); 5990 5991 // Adjust Mask to the right number of elements. 5992 unsigned CurrMaskNumEls = Mask->getValueType(0).getVectorNumElements(); 5993 if (CurrMaskNumEls > ToMaskVT.getVectorNumElements()) { 5994 SDValue ZeroIdx = DAG.getVectorIdxConstant(0, SDLoc(Mask)); 5995 Mask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Mask), ToMaskVT, Mask, 5996 ZeroIdx); 5997 } else if (CurrMaskNumEls < ToMaskVT.getVectorNumElements()) { 5998 unsigned NumSubVecs = (ToMaskVT.getVectorNumElements() / CurrMaskNumEls); 5999 EVT SubVT = Mask->getValueType(0); 6000 SmallVector<SDValue, 16> SubOps(NumSubVecs, DAG.getUNDEF(SubVT)); 6001 SubOps[0] = Mask; 6002 Mask = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Mask), ToMaskVT, SubOps); 6003 } 6004 6005 assert((Mask->getValueType(0) == ToMaskVT) && 6006 "A mask of ToMaskVT should have been produced by now."); 6007 6008 return Mask; 6009 } 6010 6011 // This method tries to handle some special cases for the vselect mask 6012 // and if needed adjusting the mask vector type to match that of the VSELECT. 6013 // Without it, many cases end up with scalarization of the SETCC, with many 6014 // unnecessary instructions. 6015 SDValue DAGTypeLegalizer::WidenVSELECTMask(SDNode *N) { 6016 LLVMContext &Ctx = *DAG.getContext(); 6017 SDValue Cond = N->getOperand(0); 6018 6019 if (N->getOpcode() != ISD::VSELECT) 6020 return SDValue(); 6021 6022 if (!isSETCCOp(Cond->getOpcode()) && !isLogicalMaskOp(Cond->getOpcode())) 6023 return SDValue(); 6024 6025 // If this is a splitted VSELECT that was previously already handled, do 6026 // nothing. 6027 EVT CondVT = Cond->getValueType(0); 6028 if (CondVT.getScalarSizeInBits() != 1) 6029 return SDValue(); 6030 6031 EVT VSelVT = N->getValueType(0); 6032 6033 // This method can't handle scalable vector types. 6034 // FIXME: This support could be added in the future. 6035 if (VSelVT.isScalableVector()) 6036 return SDValue(); 6037 6038 // Only handle vector types which are a power of 2. 6039 if (!isPowerOf2_64(VSelVT.getSizeInBits())) 6040 return SDValue(); 6041 6042 // Don't touch if this will be scalarized. 6043 EVT FinalVT = VSelVT; 6044 while (getTypeAction(FinalVT) == TargetLowering::TypeSplitVector) 6045 FinalVT = FinalVT.getHalfNumVectorElementsVT(Ctx); 6046 6047 if (FinalVT.getVectorNumElements() == 1) 6048 return SDValue(); 6049 6050 // If there is support for an i1 vector mask, don't touch. 6051 if (isSETCCOp(Cond.getOpcode())) { 6052 EVT SetCCOpVT = getSETCCOperandType(Cond); 6053 while (TLI.getTypeAction(Ctx, SetCCOpVT) != TargetLowering::TypeLegal) 6054 SetCCOpVT = TLI.getTypeToTransformTo(Ctx, SetCCOpVT); 6055 EVT SetCCResVT = getSetCCResultType(SetCCOpVT); 6056 if (SetCCResVT.getScalarSizeInBits() == 1) 6057 return SDValue(); 6058 } else if (CondVT.getScalarType() == MVT::i1) { 6059 // If there is support for an i1 vector mask (or only scalar i1 conditions), 6060 // don't touch. 6061 while (TLI.getTypeAction(Ctx, CondVT) != TargetLowering::TypeLegal) 6062 CondVT = TLI.getTypeToTransformTo(Ctx, CondVT); 6063 6064 if (CondVT.getScalarType() == MVT::i1) 6065 return SDValue(); 6066 } 6067 6068 // Widen the vselect result type if needed. 6069 if (getTypeAction(VSelVT) == TargetLowering::TypeWidenVector) 6070 VSelVT = TLI.getTypeToTransformTo(Ctx, VSelVT); 6071 6072 // The mask of the VSELECT should have integer elements. 6073 EVT ToMaskVT = VSelVT; 6074 if (!ToMaskVT.getScalarType().isInteger()) 6075 ToMaskVT = ToMaskVT.changeVectorElementTypeToInteger(); 6076 6077 SDValue Mask; 6078 if (isSETCCOp(Cond->getOpcode())) { 6079 EVT MaskVT = getSetCCResultType(getSETCCOperandType(Cond)); 6080 Mask = convertMask(Cond, MaskVT, ToMaskVT); 6081 } else if (isLogicalMaskOp(Cond->getOpcode()) && 6082 isSETCCOp(Cond->getOperand(0).getOpcode()) && 6083 isSETCCOp(Cond->getOperand(1).getOpcode())) { 6084 // Cond is (AND/OR/XOR (SETCC, SETCC)) 6085 SDValue SETCC0 = Cond->getOperand(0); 6086 SDValue SETCC1 = Cond->getOperand(1); 6087 EVT VT0 = getSetCCResultType(getSETCCOperandType(SETCC0)); 6088 EVT VT1 = getSetCCResultType(getSETCCOperandType(SETCC1)); 6089 unsigned ScalarBits0 = VT0.getScalarSizeInBits(); 6090 unsigned ScalarBits1 = VT1.getScalarSizeInBits(); 6091 unsigned ScalarBits_ToMask = ToMaskVT.getScalarSizeInBits(); 6092 EVT MaskVT; 6093 // If the two SETCCs have different VTs, either extend/truncate one of 6094 // them to the other "towards" ToMaskVT, or truncate one and extend the 6095 // other to ToMaskVT. 6096 if (ScalarBits0 != ScalarBits1) { 6097 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1); 6098 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0); 6099 if (ScalarBits_ToMask >= WideVT.getScalarSizeInBits()) 6100 MaskVT = WideVT; 6101 else if (ScalarBits_ToMask <= NarrowVT.getScalarSizeInBits()) 6102 MaskVT = NarrowVT; 6103 else 6104 MaskVT = ToMaskVT; 6105 } else 6106 // If the two SETCCs have the same VT, don't change it. 6107 MaskVT = VT0; 6108 6109 // Make new SETCCs and logical nodes. 6110 SETCC0 = convertMask(SETCC0, VT0, MaskVT); 6111 SETCC1 = convertMask(SETCC1, VT1, MaskVT); 6112 Cond = DAG.getNode(Cond->getOpcode(), SDLoc(Cond), MaskVT, SETCC0, SETCC1); 6113 6114 // Convert the logical op for VSELECT if needed. 6115 Mask = convertMask(Cond, MaskVT, ToMaskVT); 6116 } else 6117 return SDValue(); 6118 6119 return Mask; 6120 } 6121 6122 SDValue DAGTypeLegalizer::WidenVecRes_Select(SDNode *N) { 6123 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 6124 ElementCount WidenEC = WidenVT.getVectorElementCount(); 6125 6126 SDValue Cond1 = N->getOperand(0); 6127 EVT CondVT = Cond1.getValueType(); 6128 unsigned Opcode = N->getOpcode(); 6129 if (CondVT.isVector()) { 6130 if (SDValue WideCond = WidenVSELECTMask(N)) { 6131 SDValue InOp1 = GetWidenedVector(N->getOperand(1)); 6132 SDValue InOp2 = GetWidenedVector(N->getOperand(2)); 6133 assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT); 6134 return DAG.getNode(Opcode, SDLoc(N), WidenVT, WideCond, InOp1, InOp2); 6135 } 6136 6137 EVT CondEltVT = CondVT.getVectorElementType(); 6138 EVT CondWidenVT = EVT::getVectorVT(*DAG.getContext(), CondEltVT, WidenEC); 6139 if (getTypeAction(CondVT) == TargetLowering::TypeWidenVector) 6140 Cond1 = GetWidenedVector(Cond1); 6141 6142 // If we have to split the condition there is no point in widening the 6143 // select. This would result in an cycle of widening the select -> 6144 // widening the condition operand -> splitting the condition operand -> 6145 // splitting the select -> widening the select. Instead split this select 6146 // further and widen the resulting type. 6147 if (getTypeAction(CondVT) == TargetLowering::TypeSplitVector) { 6148 SDValue SplitSelect = SplitVecOp_VSELECT(N, 0); 6149 SDValue Res = ModifyToType(SplitSelect, WidenVT); 6150 return Res; 6151 } 6152 6153 if (Cond1.getValueType() != CondWidenVT) 6154 Cond1 = ModifyToType(Cond1, CondWidenVT); 6155 } 6156 6157 SDValue InOp1 = GetWidenedVector(N->getOperand(1)); 6158 SDValue InOp2 = GetWidenedVector(N->getOperand(2)); 6159 assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT); 6160 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE) 6161 return DAG.getNode(Opcode, SDLoc(N), WidenVT, Cond1, InOp1, InOp2, 6162 N->getOperand(3)); 6163 return DAG.getNode(Opcode, SDLoc(N), WidenVT, Cond1, InOp1, InOp2); 6164 } 6165 6166 SDValue DAGTypeLegalizer::WidenVecRes_SELECT_CC(SDNode *N) { 6167 SDValue InOp1 = GetWidenedVector(N->getOperand(2)); 6168 SDValue InOp2 = GetWidenedVector(N->getOperand(3)); 6169 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), 6170 InOp1.getValueType(), N->getOperand(0), 6171 N->getOperand(1), InOp1, InOp2, N->getOperand(4)); 6172 } 6173 6174 SDValue DAGTypeLegalizer::WidenVecRes_UNDEF(SDNode *N) { 6175 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 6176 return DAG.getUNDEF(WidenVT); 6177 } 6178 6179 SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N) { 6180 EVT VT = N->getValueType(0); 6181 SDLoc dl(N); 6182 6183 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 6184 unsigned NumElts = VT.getVectorNumElements(); 6185 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 6186 6187 SDValue InOp1 = GetWidenedVector(N->getOperand(0)); 6188 SDValue InOp2 = GetWidenedVector(N->getOperand(1)); 6189 6190 // Adjust mask based on new input vector length. 6191 SmallVector<int, 16> NewMask; 6192 for (unsigned i = 0; i != NumElts; ++i) { 6193 int Idx = N->getMaskElt(i); 6194 if (Idx < (int)NumElts) 6195 NewMask.push_back(Idx); 6196 else 6197 NewMask.push_back(Idx - NumElts + WidenNumElts); 6198 } 6199 for (unsigned i = NumElts; i != WidenNumElts; ++i) 6200 NewMask.push_back(-1); 6201 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask); 6202 } 6203 6204 SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_REVERSE(SDNode *N) { 6205 EVT VT = N->getValueType(0); 6206 EVT EltVT = VT.getVectorElementType(); 6207 SDLoc dl(N); 6208 6209 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 6210 SDValue OpValue = GetWidenedVector(N->getOperand(0)); 6211 assert(WidenVT == OpValue.getValueType() && "Unexpected widened vector type"); 6212 6213 SDValue ReverseVal = DAG.getNode(ISD::VECTOR_REVERSE, dl, WidenVT, OpValue); 6214 unsigned WidenNumElts = WidenVT.getVectorMinNumElements(); 6215 unsigned VTNumElts = VT.getVectorMinNumElements(); 6216 unsigned IdxVal = WidenNumElts - VTNumElts; 6217 6218 if (VT.isScalableVector()) { 6219 // Try to split the 'Widen ReverseVal' into smaller extracts and concat the 6220 // results together, e.g.(nxv6i64 -> nxv8i64) 6221 // nxv8i64 vector_reverse 6222 // <-> 6223 // nxv8i64 concat( 6224 // nxv2i64 extract_subvector(nxv8i64, 2) 6225 // nxv2i64 extract_subvector(nxv8i64, 4) 6226 // nxv2i64 extract_subvector(nxv8i64, 6) 6227 // nxv2i64 undef) 6228 6229 unsigned GCD = std::gcd(VTNumElts, WidenNumElts); 6230 EVT PartVT = EVT::getVectorVT(*DAG.getContext(), EltVT, 6231 ElementCount::getScalable(GCD)); 6232 assert((IdxVal % GCD) == 0 && "Expected Idx to be a multiple of the broken " 6233 "down type's element count"); 6234 SmallVector<SDValue> Parts; 6235 unsigned i = 0; 6236 for (; i < VTNumElts / GCD; ++i) 6237 Parts.push_back( 6238 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, PartVT, ReverseVal, 6239 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl))); 6240 for (; i < WidenNumElts / GCD; ++i) 6241 Parts.push_back(DAG.getUNDEF(PartVT)); 6242 6243 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Parts); 6244 } 6245 6246 // Use VECTOR_SHUFFLE to combine new vector from 'ReverseVal' for 6247 // fixed-vectors. 6248 SmallVector<int, 16> Mask; 6249 for (unsigned i = 0; i != VTNumElts; ++i) { 6250 Mask.push_back(IdxVal + i); 6251 } 6252 for (unsigned i = VTNumElts; i != WidenNumElts; ++i) 6253 Mask.push_back(-1); 6254 6255 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT), 6256 Mask); 6257 } 6258 6259 SDValue DAGTypeLegalizer::WidenVecRes_SETCC(SDNode *N) { 6260 assert(N->getValueType(0).isVector() && 6261 N->getOperand(0).getValueType().isVector() && 6262 "Operands must be vectors"); 6263 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); 6264 ElementCount WidenEC = WidenVT.getVectorElementCount(); 6265 6266 SDValue InOp1 = N->getOperand(0); 6267 EVT InVT = InOp1.getValueType(); 6268 assert(InVT.isVector() && "can not widen non-vector type"); 6269 EVT WidenInVT = 6270 EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), WidenEC); 6271 6272 // The input and output types often differ here, and it could be that while 6273 // we'd prefer to widen the result type, the input operands have been split. 6274 // In this case, we also need to split the result of this node as well. 6275 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) { 6276 SDValue SplitVSetCC = SplitVecOp_VSETCC(N); 6277 SDValue Res = ModifyToType(SplitVSetCC, WidenVT); 6278 return Res; 6279 } 6280 6281 // If the inputs also widen, handle them directly. Otherwise widen by hand. 6282 SDValue InOp2 = N->getOperand(1); 6283 if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) { 6284 InOp1 = GetWidenedVector(InOp1); 6285 InOp2 = GetWidenedVector(InOp2); 6286 } else { 6287 InOp1 = DAG.WidenVector(InOp1, SDLoc(N)); 6288 InOp2 = DAG.WidenVector(InOp2, SDLoc(N)); 6289 } 6290 6291 // Assume that the input and output will be widen appropriately. If not, 6292 // we will have to unroll it at some point. 6293 assert(InOp1.getValueType() == WidenInVT && 6294 InOp2.getValueType() == WidenInVT && 6295 "Input not widened to expected type!"); 6296 (void)WidenInVT; 6297 if (N->getOpcode() == ISD::VP_SETCC) { 6298 SDValue Mask = 6299 GetWidenedMask(N->getOperand(3), WidenVT.getVectorElementCount()); 6300 return DAG.getNode(ISD::VP_SETCC, SDLoc(N), WidenVT, InOp1, InOp2, 6301 N->getOperand(2), Mask, N->getOperand(4)); 6302 } 6303 return DAG.getNode(ISD::SETCC, SDLoc(N), WidenVT, InOp1, InOp2, 6304 N->getOperand(2)); 6305 } 6306 6307 SDValue DAGTypeLegalizer::WidenVecRes_STRICT_FSETCC(SDNode *N) { 6308 assert(N->getValueType(0).isVector() && 6309 N->getOperand(1).getValueType().isVector() && 6310 "Operands must be vectors"); 6311 EVT VT = N->getValueType(0); 6312 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 6313 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 6314 unsigned NumElts = VT.getVectorNumElements(); 6315 EVT EltVT = VT.getVectorElementType(); 6316 6317 SDLoc dl(N); 6318 SDValue Chain = N->getOperand(0); 6319 SDValue LHS = N->getOperand(1); 6320 SDValue RHS = N->getOperand(2); 6321 SDValue CC = N->getOperand(3); 6322 EVT TmpEltVT = LHS.getValueType().getVectorElementType(); 6323 6324 // Fully unroll and reassemble. 6325 SmallVector<SDValue, 8> Scalars(WidenNumElts, DAG.getUNDEF(EltVT)); 6326 SmallVector<SDValue, 8> Chains(NumElts); 6327 for (unsigned i = 0; i != NumElts; ++i) { 6328 SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS, 6329 DAG.getVectorIdxConstant(i, dl)); 6330 SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS, 6331 DAG.getVectorIdxConstant(i, dl)); 6332 6333 Scalars[i] = DAG.getNode(N->getOpcode(), dl, {MVT::i1, MVT::Other}, 6334 {Chain, LHSElem, RHSElem, CC}); 6335 Chains[i] = Scalars[i].getValue(1); 6336 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i], 6337 DAG.getBoolConstant(true, dl, EltVT, VT), 6338 DAG.getBoolConstant(false, dl, EltVT, VT)); 6339 } 6340 6341 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 6342 ReplaceValueWith(SDValue(N, 1), NewChain); 6343 6344 return DAG.getBuildVector(WidenVT, dl, Scalars); 6345 } 6346 6347 //===----------------------------------------------------------------------===// 6348 // Widen Vector Operand 6349 //===----------------------------------------------------------------------===// 6350 bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) { 6351 LLVM_DEBUG(dbgs() << "Widen node operand " << OpNo << ": "; N->dump(&DAG)); 6352 SDValue Res = SDValue(); 6353 6354 // See if the target wants to custom widen this node. 6355 if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false)) 6356 return false; 6357 6358 switch (N->getOpcode()) { 6359 default: 6360 #ifndef NDEBUG 6361 dbgs() << "WidenVectorOperand op #" << OpNo << ": "; 6362 N->dump(&DAG); 6363 dbgs() << "\n"; 6364 #endif 6365 report_fatal_error("Do not know how to widen this operator's operand!"); 6366 6367 case ISD::BITCAST: Res = WidenVecOp_BITCAST(N); break; 6368 case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break; 6369 case ISD::INSERT_SUBVECTOR: Res = WidenVecOp_INSERT_SUBVECTOR(N); break; 6370 case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break; 6371 case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break; 6372 case ISD::STORE: Res = WidenVecOp_STORE(N); break; 6373 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(N, OpNo); break; 6374 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: 6375 Res = WidenVecOp_VP_STRIDED_STORE(N, OpNo); 6376 break; 6377 case ISD::ANY_EXTEND_VECTOR_INREG: 6378 case ISD::SIGN_EXTEND_VECTOR_INREG: 6379 case ISD::ZERO_EXTEND_VECTOR_INREG: 6380 Res = WidenVecOp_EXTEND_VECTOR_INREG(N); 6381 break; 6382 case ISD::MSTORE: Res = WidenVecOp_MSTORE(N, OpNo); break; 6383 case ISD::MGATHER: Res = WidenVecOp_MGATHER(N, OpNo); break; 6384 case ISD::MSCATTER: Res = WidenVecOp_MSCATTER(N, OpNo); break; 6385 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(N, OpNo); break; 6386 case ISD::SETCC: Res = WidenVecOp_SETCC(N); break; 6387 case ISD::STRICT_FSETCC: 6388 case ISD::STRICT_FSETCCS: Res = WidenVecOp_STRICT_FSETCC(N); break; 6389 case ISD::VSELECT: Res = WidenVecOp_VSELECT(N); break; 6390 case ISD::FLDEXP: 6391 case ISD::FCOPYSIGN: 6392 case ISD::LRINT: 6393 case ISD::LLRINT: 6394 Res = WidenVecOp_UnrollVectorOp(N); 6395 break; 6396 case ISD::IS_FPCLASS: Res = WidenVecOp_IS_FPCLASS(N); break; 6397 6398 case ISD::ANY_EXTEND: 6399 case ISD::SIGN_EXTEND: 6400 case ISD::ZERO_EXTEND: 6401 Res = WidenVecOp_EXTEND(N); 6402 break; 6403 6404 case ISD::SCMP: 6405 case ISD::UCMP: 6406 Res = WidenVecOp_CMP(N); 6407 break; 6408 6409 case ISD::FP_EXTEND: 6410 case ISD::STRICT_FP_EXTEND: 6411 case ISD::FP_ROUND: 6412 case ISD::STRICT_FP_ROUND: 6413 case ISD::FP_TO_SINT: 6414 case ISD::STRICT_FP_TO_SINT: 6415 case ISD::FP_TO_UINT: 6416 case ISD::STRICT_FP_TO_UINT: 6417 case ISD::SINT_TO_FP: 6418 case ISD::STRICT_SINT_TO_FP: 6419 case ISD::UINT_TO_FP: 6420 case ISD::STRICT_UINT_TO_FP: 6421 case ISD::TRUNCATE: 6422 Res = WidenVecOp_Convert(N); 6423 break; 6424 6425 case ISD::FP_TO_SINT_SAT: 6426 case ISD::FP_TO_UINT_SAT: 6427 Res = WidenVecOp_FP_TO_XINT_SAT(N); 6428 break; 6429 6430 case ISD::EXPERIMENTAL_VP_SPLAT: 6431 Res = WidenVecOp_VP_SPLAT(N, OpNo); 6432 break; 6433 6434 case ISD::VECREDUCE_FADD: 6435 case ISD::VECREDUCE_FMUL: 6436 case ISD::VECREDUCE_ADD: 6437 case ISD::VECREDUCE_MUL: 6438 case ISD::VECREDUCE_AND: 6439 case ISD::VECREDUCE_OR: 6440 case ISD::VECREDUCE_XOR: 6441 case ISD::VECREDUCE_SMAX: 6442 case ISD::VECREDUCE_SMIN: 6443 case ISD::VECREDUCE_UMAX: 6444 case ISD::VECREDUCE_UMIN: 6445 case ISD::VECREDUCE_FMAX: 6446 case ISD::VECREDUCE_FMIN: 6447 case ISD::VECREDUCE_FMAXIMUM: 6448 case ISD::VECREDUCE_FMINIMUM: 6449 Res = WidenVecOp_VECREDUCE(N); 6450 break; 6451 case ISD::VECREDUCE_SEQ_FADD: 6452 case ISD::VECREDUCE_SEQ_FMUL: 6453 Res = WidenVecOp_VECREDUCE_SEQ(N); 6454 break; 6455 case ISD::VP_REDUCE_FADD: 6456 case ISD::VP_REDUCE_SEQ_FADD: 6457 case ISD::VP_REDUCE_FMUL: 6458 case ISD::VP_REDUCE_SEQ_FMUL: 6459 case ISD::VP_REDUCE_ADD: 6460 case ISD::VP_REDUCE_MUL: 6461 case ISD::VP_REDUCE_AND: 6462 case ISD::VP_REDUCE_OR: 6463 case ISD::VP_REDUCE_XOR: 6464 case ISD::VP_REDUCE_SMAX: 6465 case ISD::VP_REDUCE_SMIN: 6466 case ISD::VP_REDUCE_UMAX: 6467 case ISD::VP_REDUCE_UMIN: 6468 case ISD::VP_REDUCE_FMAX: 6469 case ISD::VP_REDUCE_FMIN: 6470 case ISD::VP_REDUCE_FMAXIMUM: 6471 case ISD::VP_REDUCE_FMINIMUM: 6472 Res = WidenVecOp_VP_REDUCE(N); 6473 break; 6474 case ISD::VP_CTTZ_ELTS: 6475 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF: 6476 Res = WidenVecOp_VP_CttzElements(N); 6477 break; 6478 } 6479 6480 // If Res is null, the sub-method took care of registering the result. 6481 if (!Res.getNode()) return false; 6482 6483 // If the result is N, the sub-method updated N in place. Tell the legalizer 6484 // core about this. 6485 if (Res.getNode() == N) 6486 return true; 6487 6488 6489 if (N->isStrictFPOpcode()) 6490 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 2 && 6491 "Invalid operand expansion"); 6492 else 6493 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && 6494 "Invalid operand expansion"); 6495 6496 ReplaceValueWith(SDValue(N, 0), Res); 6497 return false; 6498 } 6499 6500 SDValue DAGTypeLegalizer::WidenVecOp_EXTEND(SDNode *N) { 6501 SDLoc DL(N); 6502 EVT VT = N->getValueType(0); 6503 6504 SDValue InOp = N->getOperand(0); 6505 assert(getTypeAction(InOp.getValueType()) == 6506 TargetLowering::TypeWidenVector && 6507 "Unexpected type action"); 6508 InOp = GetWidenedVector(InOp); 6509 assert(VT.getVectorNumElements() < 6510 InOp.getValueType().getVectorNumElements() && 6511 "Input wasn't widened!"); 6512 6513 // We may need to further widen the operand until it has the same total 6514 // vector size as the result. 6515 EVT InVT = InOp.getValueType(); 6516 if (InVT.getSizeInBits() != VT.getSizeInBits()) { 6517 EVT InEltVT = InVT.getVectorElementType(); 6518 for (EVT FixedVT : MVT::vector_valuetypes()) { 6519 EVT FixedEltVT = FixedVT.getVectorElementType(); 6520 if (TLI.isTypeLegal(FixedVT) && 6521 FixedVT.getSizeInBits() == VT.getSizeInBits() && 6522 FixedEltVT == InEltVT) { 6523 assert(FixedVT.getVectorNumElements() >= VT.getVectorNumElements() && 6524 "Not enough elements in the fixed type for the operand!"); 6525 assert(FixedVT.getVectorNumElements() != InVT.getVectorNumElements() && 6526 "We can't have the same type as we started with!"); 6527 if (FixedVT.getVectorNumElements() > InVT.getVectorNumElements()) 6528 InOp = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, FixedVT, 6529 DAG.getUNDEF(FixedVT), InOp, 6530 DAG.getVectorIdxConstant(0, DL)); 6531 else 6532 InOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, FixedVT, InOp, 6533 DAG.getVectorIdxConstant(0, DL)); 6534 break; 6535 } 6536 } 6537 InVT = InOp.getValueType(); 6538 if (InVT.getSizeInBits() != VT.getSizeInBits()) 6539 // We couldn't find a legal vector type that was a widening of the input 6540 // and could be extended in-register to the result type, so we have to 6541 // scalarize. 6542 return WidenVecOp_Convert(N); 6543 } 6544 6545 // Use special DAG nodes to represent the operation of extending the 6546 // low lanes. 6547 switch (N->getOpcode()) { 6548 default: 6549 llvm_unreachable("Extend legalization on extend operation!"); 6550 case ISD::ANY_EXTEND: 6551 return DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, InOp); 6552 case ISD::SIGN_EXTEND: 6553 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, InOp); 6554 case ISD::ZERO_EXTEND: 6555 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, InOp); 6556 } 6557 } 6558 6559 SDValue DAGTypeLegalizer::WidenVecOp_CMP(SDNode *N) { 6560 SDLoc dl(N); 6561 6562 EVT OpVT = N->getOperand(0).getValueType(); 6563 EVT ResVT = N->getValueType(0); 6564 SDValue LHS = GetWidenedVector(N->getOperand(0)); 6565 SDValue RHS = GetWidenedVector(N->getOperand(1)); 6566 6567 // 1. EXTRACT_SUBVECTOR 6568 // 2. SIGN_EXTEND/ZERO_EXTEND 6569 // 3. CMP 6570 LHS = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, LHS, 6571 DAG.getVectorIdxConstant(0, dl)); 6572 RHS = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, RHS, 6573 DAG.getVectorIdxConstant(0, dl)); 6574 6575 // At this point the result type is guaranteed to be valid, so we can use it 6576 // as the operand type by extending it appropriately 6577 ISD::NodeType ExtendOpcode = 6578 N->getOpcode() == ISD::SCMP ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6579 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS); 6580 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS); 6581 6582 return DAG.getNode(N->getOpcode(), dl, ResVT, LHS, RHS); 6583 } 6584 6585 SDValue DAGTypeLegalizer::WidenVecOp_UnrollVectorOp(SDNode *N) { 6586 // The result (and first input) is legal, but the second input is illegal. 6587 // We can't do much to fix that, so just unroll and let the extracts off of 6588 // the second input be widened as needed later. 6589 return DAG.UnrollVectorOp(N); 6590 } 6591 6592 SDValue DAGTypeLegalizer::WidenVecOp_IS_FPCLASS(SDNode *N) { 6593 SDLoc DL(N); 6594 EVT ResultVT = N->getValueType(0); 6595 SDValue Test = N->getOperand(1); 6596 SDValue WideArg = GetWidenedVector(N->getOperand(0)); 6597 6598 // Process this node similarly to SETCC. 6599 EVT WideResultVT = getSetCCResultType(WideArg.getValueType()); 6600 if (ResultVT.getScalarType() == MVT::i1) 6601 WideResultVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, 6602 WideResultVT.getVectorNumElements()); 6603 6604 SDValue WideNode = DAG.getNode(ISD::IS_FPCLASS, DL, WideResultVT, 6605 {WideArg, Test}, N->getFlags()); 6606 6607 // Extract the needed results from the result vector. 6608 EVT ResVT = 6609 EVT::getVectorVT(*DAG.getContext(), WideResultVT.getVectorElementType(), 6610 ResultVT.getVectorNumElements()); 6611 SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, WideNode, 6612 DAG.getVectorIdxConstant(0, DL)); 6613 6614 EVT OpVT = N->getOperand(0).getValueType(); 6615 ISD::NodeType ExtendCode = 6616 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT)); 6617 return DAG.getNode(ExtendCode, DL, ResultVT, CC); 6618 } 6619 6620 SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) { 6621 // Since the result is legal and the input is illegal. 6622 EVT VT = N->getValueType(0); 6623 EVT EltVT = VT.getVectorElementType(); 6624 SDLoc dl(N); 6625 SDValue InOp = N->getOperand(N->isStrictFPOpcode() ? 1 : 0); 6626 assert(getTypeAction(InOp.getValueType()) == 6627 TargetLowering::TypeWidenVector && 6628 "Unexpected type action"); 6629 InOp = GetWidenedVector(InOp); 6630 EVT InVT = InOp.getValueType(); 6631 unsigned Opcode = N->getOpcode(); 6632 6633 // See if a widened result type would be legal, if so widen the node. 6634 // FIXME: This isn't safe for StrictFP. Other optimization here is needed. 6635 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, 6636 InVT.getVectorElementCount()); 6637 if (TLI.isTypeLegal(WideVT) && !N->isStrictFPOpcode()) { 6638 SDValue Res; 6639 if (N->isStrictFPOpcode()) { 6640 if (Opcode == ISD::STRICT_FP_ROUND) 6641 Res = DAG.getNode(Opcode, dl, { WideVT, MVT::Other }, 6642 { N->getOperand(0), InOp, N->getOperand(2) }); 6643 else 6644 Res = DAG.getNode(Opcode, dl, { WideVT, MVT::Other }, 6645 { N->getOperand(0), InOp }); 6646 // Legalize the chain result - switch anything that used the old chain to 6647 // use the new one. 6648 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 6649 } else { 6650 if (Opcode == ISD::FP_ROUND) 6651 Res = DAG.getNode(Opcode, dl, WideVT, InOp, N->getOperand(1)); 6652 else 6653 Res = DAG.getNode(Opcode, dl, WideVT, InOp); 6654 } 6655 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res, 6656 DAG.getVectorIdxConstant(0, dl)); 6657 } 6658 6659 EVT InEltVT = InVT.getVectorElementType(); 6660 6661 // Unroll the convert into some scalar code and create a nasty build vector. 6662 unsigned NumElts = VT.getVectorNumElements(); 6663 SmallVector<SDValue, 16> Ops(NumElts); 6664 if (N->isStrictFPOpcode()) { 6665 SmallVector<SDValue, 4> NewOps(N->op_begin(), N->op_end()); 6666 SmallVector<SDValue, 32> OpChains; 6667 for (unsigned i=0; i < NumElts; ++i) { 6668 NewOps[1] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp, 6669 DAG.getVectorIdxConstant(i, dl)); 6670 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps); 6671 OpChains.push_back(Ops[i].getValue(1)); 6672 } 6673 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OpChains); 6674 ReplaceValueWith(SDValue(N, 1), NewChain); 6675 } else { 6676 for (unsigned i = 0; i < NumElts; ++i) 6677 Ops[i] = DAG.getNode(Opcode, dl, EltVT, 6678 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, 6679 InOp, DAG.getVectorIdxConstant(i, dl))); 6680 } 6681 6682 return DAG.getBuildVector(VT, dl, Ops); 6683 } 6684 6685 SDValue DAGTypeLegalizer::WidenVecOp_FP_TO_XINT_SAT(SDNode *N) { 6686 EVT DstVT = N->getValueType(0); 6687 SDValue Src = GetWidenedVector(N->getOperand(0)); 6688 EVT SrcVT = Src.getValueType(); 6689 ElementCount WideNumElts = SrcVT.getVectorElementCount(); 6690 SDLoc dl(N); 6691 6692 // See if a widened result type would be legal, if so widen the node. 6693 EVT WideDstVT = EVT::getVectorVT(*DAG.getContext(), 6694 DstVT.getVectorElementType(), WideNumElts); 6695 if (TLI.isTypeLegal(WideDstVT)) { 6696 SDValue Res = 6697 DAG.getNode(N->getOpcode(), dl, WideDstVT, Src, N->getOperand(1)); 6698 return DAG.getNode( 6699 ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res, 6700 DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 6701 } 6702 6703 // Give up and unroll. 6704 return DAG.UnrollVectorOp(N); 6705 } 6706 6707 SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) { 6708 EVT VT = N->getValueType(0); 6709 SDValue InOp = GetWidenedVector(N->getOperand(0)); 6710 EVT InWidenVT = InOp.getValueType(); 6711 SDLoc dl(N); 6712 6713 // Check if we can convert between two legal vector types and extract. 6714 TypeSize InWidenSize = InWidenVT.getSizeInBits(); 6715 TypeSize Size = VT.getSizeInBits(); 6716 // x86mmx is not an acceptable vector element type, so don't try. 6717 if (!VT.isVector() && VT != MVT::x86mmx && 6718 InWidenSize.hasKnownScalarFactor(Size)) { 6719 unsigned NewNumElts = InWidenSize.getKnownScalarFactor(Size); 6720 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts); 6721 if (TLI.isTypeLegal(NewVT)) { 6722 SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp); 6723 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp, 6724 DAG.getVectorIdxConstant(0, dl)); 6725 } 6726 } 6727 6728 // Handle a case like bitcast v12i8 -> v3i32. Normally that would get widened 6729 // to v16i8 -> v4i32, but for a target where v3i32 is legal but v12i8 is not, 6730 // we end up here. Handling the case here with EXTRACT_SUBVECTOR avoids 6731 // having to copy via memory. 6732 if (VT.isVector()) { 6733 EVT EltVT = VT.getVectorElementType(); 6734 unsigned EltSize = EltVT.getFixedSizeInBits(); 6735 if (InWidenSize.isKnownMultipleOf(EltSize)) { 6736 ElementCount NewNumElts = 6737 (InWidenVT.getVectorElementCount() * InWidenVT.getScalarSizeInBits()) 6738 .divideCoefficientBy(EltSize); 6739 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NewNumElts); 6740 if (TLI.isTypeLegal(NewVT)) { 6741 SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp); 6742 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, BitOp, 6743 DAG.getVectorIdxConstant(0, dl)); 6744 } 6745 } 6746 } 6747 6748 return CreateStackStoreLoad(InOp, VT); 6749 } 6750 6751 SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) { 6752 EVT VT = N->getValueType(0); 6753 EVT EltVT = VT.getVectorElementType(); 6754 EVT InVT = N->getOperand(0).getValueType(); 6755 SDLoc dl(N); 6756 6757 // If the widen width for this operand is the same as the width of the concat 6758 // and all but the first operand is undef, just use the widened operand. 6759 unsigned NumOperands = N->getNumOperands(); 6760 if (VT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) { 6761 unsigned i; 6762 for (i = 1; i < NumOperands; ++i) 6763 if (!N->getOperand(i).isUndef()) 6764 break; 6765 6766 if (i == NumOperands) 6767 return GetWidenedVector(N->getOperand(0)); 6768 } 6769 6770 // Otherwise, fall back to a nasty build vector. 6771 unsigned NumElts = VT.getVectorNumElements(); 6772 SmallVector<SDValue, 16> Ops(NumElts); 6773 6774 unsigned NumInElts = InVT.getVectorNumElements(); 6775 6776 unsigned Idx = 0; 6777 for (unsigned i=0; i < NumOperands; ++i) { 6778 SDValue InOp = N->getOperand(i); 6779 assert(getTypeAction(InOp.getValueType()) == 6780 TargetLowering::TypeWidenVector && 6781 "Unexpected type action"); 6782 InOp = GetWidenedVector(InOp); 6783 for (unsigned j = 0; j < NumInElts; ++j) 6784 Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp, 6785 DAG.getVectorIdxConstant(j, dl)); 6786 } 6787 return DAG.getBuildVector(VT, dl, Ops); 6788 } 6789 6790 SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(SDNode *N) { 6791 EVT VT = N->getValueType(0); 6792 SDValue SubVec = N->getOperand(1); 6793 SDValue InVec = N->getOperand(0); 6794 6795 if (getTypeAction(SubVec.getValueType()) == TargetLowering::TypeWidenVector) 6796 SubVec = GetWidenedVector(SubVec); 6797 6798 EVT SubVT = SubVec.getValueType(); 6799 6800 // Whether or not all the elements of the widened SubVec will be inserted into 6801 // valid indices of VT. 6802 bool IndicesValid = false; 6803 // If we statically know that VT can fit SubVT, the indices are valid. 6804 if (VT.knownBitsGE(SubVT)) 6805 IndicesValid = true; 6806 else if (VT.isScalableVector() && SubVT.isFixedLengthVector()) { 6807 // Otherwise, if we're inserting a fixed vector into a scalable vector and 6808 // we know the minimum vscale we can work out if it's valid ourselves. 6809 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute( 6810 Attribute::VScaleRange); 6811 if (Attr.isValid()) { 6812 unsigned VScaleMin = Attr.getVScaleRangeMin(); 6813 if (VT.getSizeInBits().getKnownMinValue() * VScaleMin >= 6814 SubVT.getFixedSizeInBits()) 6815 IndicesValid = true; 6816 } 6817 } 6818 6819 // We need to make sure that the indices are still valid, otherwise we might 6820 // widen what was previously well-defined to something undefined. 6821 if (IndicesValid && InVec.isUndef() && N->getConstantOperandVal(2) == 0) 6822 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, InVec, SubVec, 6823 N->getOperand(2)); 6824 6825 report_fatal_error("Don't know how to widen the operands for " 6826 "INSERT_SUBVECTOR"); 6827 } 6828 6829 SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N) { 6830 SDValue InOp = GetWidenedVector(N->getOperand(0)); 6831 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), 6832 N->getValueType(0), InOp, N->getOperand(1)); 6833 } 6834 6835 SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N) { 6836 SDValue InOp = GetWidenedVector(N->getOperand(0)); 6837 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), 6838 N->getValueType(0), InOp, N->getOperand(1)); 6839 } 6840 6841 SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(SDNode *N) { 6842 SDValue InOp = GetWidenedVector(N->getOperand(0)); 6843 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), InOp); 6844 } 6845 6846 SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) { 6847 // We have to widen the value, but we want only to store the original 6848 // vector type. 6849 StoreSDNode *ST = cast<StoreSDNode>(N); 6850 6851 if (!ST->getMemoryVT().getScalarType().isByteSized()) 6852 return TLI.scalarizeVectorStore(ST, DAG); 6853 6854 if (ST->isTruncatingStore()) 6855 return TLI.scalarizeVectorStore(ST, DAG); 6856 6857 // Generate a vector-predicated store if it is custom/legal on the target. 6858 // To avoid possible recursion, only do this if the widened mask type is 6859 // legal. 6860 // FIXME: Not all targets may support EVL in VP_STORE. These will have been 6861 // removed from the IR by the ExpandVectorPredication pass but we're 6862 // reintroducing them here. 6863 SDValue StVal = ST->getValue(); 6864 EVT StVT = StVal.getValueType(); 6865 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StVT); 6866 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, 6867 WideVT.getVectorElementCount()); 6868 6869 if (TLI.isOperationLegalOrCustom(ISD::VP_STORE, WideVT) && 6870 TLI.isTypeLegal(WideMaskVT)) { 6871 // Widen the value. 6872 SDLoc DL(N); 6873 StVal = GetWidenedVector(StVal); 6874 SDValue Mask = DAG.getAllOnesConstant(DL, WideMaskVT); 6875 SDValue EVL = DAG.getElementCount(DL, TLI.getVPExplicitVectorLengthTy(), 6876 StVT.getVectorElementCount()); 6877 return DAG.getStoreVP(ST->getChain(), DL, StVal, ST->getBasePtr(), 6878 DAG.getUNDEF(ST->getBasePtr().getValueType()), Mask, 6879 EVL, StVT, ST->getMemOperand(), 6880 ST->getAddressingMode()); 6881 } 6882 6883 SmallVector<SDValue, 16> StChain; 6884 if (GenWidenVectorStores(StChain, ST)) { 6885 if (StChain.size() == 1) 6886 return StChain[0]; 6887 6888 return DAG.getNode(ISD::TokenFactor, SDLoc(ST), MVT::Other, StChain); 6889 } 6890 6891 report_fatal_error("Unable to widen vector store"); 6892 } 6893 6894 SDValue DAGTypeLegalizer::WidenVecOp_VP_SPLAT(SDNode *N, unsigned OpNo) { 6895 assert(OpNo == 1 && "Can widen only mask operand of vp_splat"); 6896 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), 6897 N->getOperand(0), GetWidenedVector(N->getOperand(1)), 6898 N->getOperand(2)); 6899 } 6900 6901 SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(SDNode *N, unsigned OpNo) { 6902 assert((OpNo == 1 || OpNo == 3) && 6903 "Can widen only data or mask operand of vp_store"); 6904 VPStoreSDNode *ST = cast<VPStoreSDNode>(N); 6905 SDValue Mask = ST->getMask(); 6906 SDValue StVal = ST->getValue(); 6907 SDLoc dl(N); 6908 6909 if (OpNo == 1) { 6910 // Widen the value. 6911 StVal = GetWidenedVector(StVal); 6912 6913 // We only handle the case where the mask needs widening to an 6914 // identically-sized type as the vector inputs. 6915 assert(getTypeAction(Mask.getValueType()) == 6916 TargetLowering::TypeWidenVector && 6917 "Unable to widen VP store"); 6918 Mask = GetWidenedVector(Mask); 6919 } else { 6920 Mask = GetWidenedVector(Mask); 6921 6922 // We only handle the case where the stored value needs widening to an 6923 // identically-sized type as the mask. 6924 assert(getTypeAction(StVal.getValueType()) == 6925 TargetLowering::TypeWidenVector && 6926 "Unable to widen VP store"); 6927 StVal = GetWidenedVector(StVal); 6928 } 6929 6930 assert(Mask.getValueType().getVectorElementCount() == 6931 StVal.getValueType().getVectorElementCount() && 6932 "Mask and data vectors should have the same number of elements"); 6933 return DAG.getStoreVP(ST->getChain(), dl, StVal, ST->getBasePtr(), 6934 ST->getOffset(), Mask, ST->getVectorLength(), 6935 ST->getMemoryVT(), ST->getMemOperand(), 6936 ST->getAddressingMode(), ST->isTruncatingStore(), 6937 ST->isCompressingStore()); 6938 } 6939 6940 SDValue DAGTypeLegalizer::WidenVecOp_VP_STRIDED_STORE(SDNode *N, 6941 unsigned OpNo) { 6942 assert((OpNo == 1 || OpNo == 4) && 6943 "Can widen only data or mask operand of vp_strided_store"); 6944 VPStridedStoreSDNode *SST = cast<VPStridedStoreSDNode>(N); 6945 SDValue Mask = SST->getMask(); 6946 SDValue StVal = SST->getValue(); 6947 SDLoc DL(N); 6948 6949 if (OpNo == 1) 6950 assert(getTypeAction(Mask.getValueType()) == 6951 TargetLowering::TypeWidenVector && 6952 "Unable to widen VP strided store"); 6953 else 6954 assert(getTypeAction(StVal.getValueType()) == 6955 TargetLowering::TypeWidenVector && 6956 "Unable to widen VP strided store"); 6957 6958 StVal = GetWidenedVector(StVal); 6959 Mask = GetWidenedVector(Mask); 6960 6961 assert(StVal.getValueType().getVectorElementCount() == 6962 Mask.getValueType().getVectorElementCount() && 6963 "Data and mask vectors should have the same number of elements"); 6964 6965 return DAG.getStridedStoreVP( 6966 SST->getChain(), DL, StVal, SST->getBasePtr(), SST->getOffset(), 6967 SST->getStride(), Mask, SST->getVectorLength(), SST->getMemoryVT(), 6968 SST->getMemOperand(), SST->getAddressingMode(), SST->isTruncatingStore(), 6969 SST->isCompressingStore()); 6970 } 6971 6972 SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(SDNode *N, unsigned OpNo) { 6973 assert((OpNo == 1 || OpNo == 4) && 6974 "Can widen only data or mask operand of mstore"); 6975 MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 6976 SDValue Mask = MST->getMask(); 6977 EVT MaskVT = Mask.getValueType(); 6978 SDValue StVal = MST->getValue(); 6979 SDLoc dl(N); 6980 6981 if (OpNo == 1) { 6982 // Widen the value. 6983 StVal = GetWidenedVector(StVal); 6984 6985 // The mask should be widened as well. 6986 EVT WideVT = StVal.getValueType(); 6987 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), 6988 MaskVT.getVectorElementType(), 6989 WideVT.getVectorNumElements()); 6990 Mask = ModifyToType(Mask, WideMaskVT, true); 6991 } else { 6992 // Widen the mask. 6993 EVT WideMaskVT = TLI.getTypeToTransformTo(*DAG.getContext(), MaskVT); 6994 Mask = ModifyToType(Mask, WideMaskVT, true); 6995 6996 EVT ValueVT = StVal.getValueType(); 6997 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), 6998 ValueVT.getVectorElementType(), 6999 WideMaskVT.getVectorNumElements()); 7000 StVal = ModifyToType(StVal, WideVT); 7001 } 7002 7003 assert(Mask.getValueType().getVectorNumElements() == 7004 StVal.getValueType().getVectorNumElements() && 7005 "Mask and data vectors should have the same number of elements"); 7006 return DAG.getMaskedStore(MST->getChain(), dl, StVal, MST->getBasePtr(), 7007 MST->getOffset(), Mask, MST->getMemoryVT(), 7008 MST->getMemOperand(), MST->getAddressingMode(), 7009 false, MST->isCompressingStore()); 7010 } 7011 7012 SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(SDNode *N, unsigned OpNo) { 7013 assert(OpNo == 4 && "Can widen only the index of mgather"); 7014 auto *MG = cast<MaskedGatherSDNode>(N); 7015 SDValue DataOp = MG->getPassThru(); 7016 SDValue Mask = MG->getMask(); 7017 SDValue Scale = MG->getScale(); 7018 7019 // Just widen the index. It's allowed to have extra elements. 7020 SDValue Index = GetWidenedVector(MG->getIndex()); 7021 7022 SDLoc dl(N); 7023 SDValue Ops[] = {MG->getChain(), DataOp, Mask, MG->getBasePtr(), Index, 7024 Scale}; 7025 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops, 7026 MG->getMemOperand(), MG->getIndexType(), 7027 MG->getExtensionType()); 7028 ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); 7029 ReplaceValueWith(SDValue(N, 0), Res.getValue(0)); 7030 return SDValue(); 7031 } 7032 7033 SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(SDNode *N, unsigned OpNo) { 7034 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N); 7035 SDValue DataOp = MSC->getValue(); 7036 SDValue Mask = MSC->getMask(); 7037 SDValue Index = MSC->getIndex(); 7038 SDValue Scale = MSC->getScale(); 7039 EVT WideMemVT = MSC->getMemoryVT(); 7040 7041 if (OpNo == 1) { 7042 DataOp = GetWidenedVector(DataOp); 7043 unsigned NumElts = DataOp.getValueType().getVectorNumElements(); 7044 7045 // Widen index. 7046 EVT IndexVT = Index.getValueType(); 7047 EVT WideIndexVT = EVT::getVectorVT(*DAG.getContext(), 7048 IndexVT.getVectorElementType(), NumElts); 7049 Index = ModifyToType(Index, WideIndexVT); 7050 7051 // The mask should be widened as well. 7052 EVT MaskVT = Mask.getValueType(); 7053 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), 7054 MaskVT.getVectorElementType(), NumElts); 7055 Mask = ModifyToType(Mask, WideMaskVT, true); 7056 7057 // Widen the MemoryType 7058 WideMemVT = EVT::getVectorVT(*DAG.getContext(), 7059 MSC->getMemoryVT().getScalarType(), NumElts); 7060 } else if (OpNo == 4) { 7061 // Just widen the index. It's allowed to have extra elements. 7062 Index = GetWidenedVector(Index); 7063 } else 7064 llvm_unreachable("Can't widen this operand of mscatter"); 7065 7066 SDValue Ops[] = {MSC->getChain(), DataOp, Mask, MSC->getBasePtr(), Index, 7067 Scale}; 7068 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT, SDLoc(N), 7069 Ops, MSC->getMemOperand(), MSC->getIndexType(), 7070 MSC->isTruncatingStore()); 7071 } 7072 7073 SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(SDNode *N, unsigned OpNo) { 7074 VPScatterSDNode *VPSC = cast<VPScatterSDNode>(N); 7075 SDValue DataOp = VPSC->getValue(); 7076 SDValue Mask = VPSC->getMask(); 7077 SDValue Index = VPSC->getIndex(); 7078 SDValue Scale = VPSC->getScale(); 7079 EVT WideMemVT = VPSC->getMemoryVT(); 7080 7081 if (OpNo == 1) { 7082 DataOp = GetWidenedVector(DataOp); 7083 Index = GetWidenedVector(Index); 7084 const auto WideEC = DataOp.getValueType().getVectorElementCount(); 7085 Mask = GetWidenedMask(Mask, WideEC); 7086 WideMemVT = EVT::getVectorVT(*DAG.getContext(), 7087 VPSC->getMemoryVT().getScalarType(), WideEC); 7088 } else if (OpNo == 3) { 7089 // Just widen the index. It's allowed to have extra elements. 7090 Index = GetWidenedVector(Index); 7091 } else 7092 llvm_unreachable("Can't widen this operand of VP_SCATTER"); 7093 7094 SDValue Ops[] = { 7095 VPSC->getChain(), DataOp, VPSC->getBasePtr(), Index, Scale, Mask, 7096 VPSC->getVectorLength()}; 7097 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT, SDLoc(N), Ops, 7098 VPSC->getMemOperand(), VPSC->getIndexType()); 7099 } 7100 7101 SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) { 7102 SDValue InOp0 = GetWidenedVector(N->getOperand(0)); 7103 SDValue InOp1 = GetWidenedVector(N->getOperand(1)); 7104 SDLoc dl(N); 7105 EVT VT = N->getValueType(0); 7106 7107 // WARNING: In this code we widen the compare instruction with garbage. 7108 // This garbage may contain denormal floats which may be slow. Is this a real 7109 // concern ? Should we zero the unused lanes if this is a float compare ? 7110 7111 // Get a new SETCC node to compare the newly widened operands. 7112 // Only some of the compared elements are legal. 7113 EVT SVT = getSetCCResultType(InOp0.getValueType()); 7114 // The result type is legal, if its vXi1, keep vXi1 for the new SETCC. 7115 if (VT.getScalarType() == MVT::i1) 7116 SVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, 7117 SVT.getVectorElementCount()); 7118 7119 SDValue WideSETCC = DAG.getNode(ISD::SETCC, SDLoc(N), 7120 SVT, InOp0, InOp1, N->getOperand(2)); 7121 7122 // Extract the needed results from the result vector. 7123 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), 7124 SVT.getVectorElementType(), 7125 VT.getVectorElementCount()); 7126 SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, WideSETCC, 7127 DAG.getVectorIdxConstant(0, dl)); 7128 7129 EVT OpVT = N->getOperand(0).getValueType(); 7130 ISD::NodeType ExtendCode = 7131 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT)); 7132 return DAG.getNode(ExtendCode, dl, VT, CC); 7133 } 7134 7135 SDValue DAGTypeLegalizer::WidenVecOp_STRICT_FSETCC(SDNode *N) { 7136 SDValue Chain = N->getOperand(0); 7137 SDValue LHS = GetWidenedVector(N->getOperand(1)); 7138 SDValue RHS = GetWidenedVector(N->getOperand(2)); 7139 SDValue CC = N->getOperand(3); 7140 SDLoc dl(N); 7141 7142 EVT VT = N->getValueType(0); 7143 EVT EltVT = VT.getVectorElementType(); 7144 EVT TmpEltVT = LHS.getValueType().getVectorElementType(); 7145 unsigned NumElts = VT.getVectorNumElements(); 7146 7147 // Unroll into a build vector. 7148 SmallVector<SDValue, 8> Scalars(NumElts); 7149 SmallVector<SDValue, 8> Chains(NumElts); 7150 7151 for (unsigned i = 0; i != NumElts; ++i) { 7152 SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS, 7153 DAG.getVectorIdxConstant(i, dl)); 7154 SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS, 7155 DAG.getVectorIdxConstant(i, dl)); 7156 7157 Scalars[i] = DAG.getNode(N->getOpcode(), dl, {MVT::i1, MVT::Other}, 7158 {Chain, LHSElem, RHSElem, CC}); 7159 Chains[i] = Scalars[i].getValue(1); 7160 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i], 7161 DAG.getBoolConstant(true, dl, EltVT, VT), 7162 DAG.getBoolConstant(false, dl, EltVT, VT)); 7163 } 7164 7165 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 7166 ReplaceValueWith(SDValue(N, 1), NewChain); 7167 7168 return DAG.getBuildVector(VT, dl, Scalars); 7169 } 7170 7171 SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE(SDNode *N) { 7172 SDLoc dl(N); 7173 SDValue Op = GetWidenedVector(N->getOperand(0)); 7174 EVT OrigVT = N->getOperand(0).getValueType(); 7175 EVT WideVT = Op.getValueType(); 7176 EVT ElemVT = OrigVT.getVectorElementType(); 7177 SDNodeFlags Flags = N->getFlags(); 7178 7179 unsigned Opc = N->getOpcode(); 7180 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc); 7181 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags); 7182 assert(NeutralElem && "Neutral element must exist"); 7183 7184 // Pad the vector with the neutral element. 7185 unsigned OrigElts = OrigVT.getVectorMinNumElements(); 7186 unsigned WideElts = WideVT.getVectorMinNumElements(); 7187 7188 if (WideVT.isScalableVector()) { 7189 unsigned GCD = std::gcd(OrigElts, WideElts); 7190 EVT SplatVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 7191 ElementCount::getScalable(GCD)); 7192 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem); 7193 for (unsigned Idx = OrigElts; Idx < WideElts; Idx = Idx + GCD) 7194 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Op, SplatNeutral, 7195 DAG.getVectorIdxConstant(Idx, dl)); 7196 return DAG.getNode(Opc, dl, N->getValueType(0), Op, Flags); 7197 } 7198 7199 for (unsigned Idx = OrigElts; Idx < WideElts; Idx++) 7200 Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem, 7201 DAG.getVectorIdxConstant(Idx, dl)); 7202 7203 return DAG.getNode(Opc, dl, N->getValueType(0), Op, Flags); 7204 } 7205 7206 SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE_SEQ(SDNode *N) { 7207 SDLoc dl(N); 7208 SDValue AccOp = N->getOperand(0); 7209 SDValue VecOp = N->getOperand(1); 7210 SDValue Op = GetWidenedVector(VecOp); 7211 7212 EVT OrigVT = VecOp.getValueType(); 7213 EVT WideVT = Op.getValueType(); 7214 EVT ElemVT = OrigVT.getVectorElementType(); 7215 SDNodeFlags Flags = N->getFlags(); 7216 7217 unsigned Opc = N->getOpcode(); 7218 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc); 7219 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags); 7220 7221 // Pad the vector with the neutral element. 7222 unsigned OrigElts = OrigVT.getVectorMinNumElements(); 7223 unsigned WideElts = WideVT.getVectorMinNumElements(); 7224 7225 if (WideVT.isScalableVector()) { 7226 unsigned GCD = std::gcd(OrigElts, WideElts); 7227 EVT SplatVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 7228 ElementCount::getScalable(GCD)); 7229 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem); 7230 for (unsigned Idx = OrigElts; Idx < WideElts; Idx = Idx + GCD) 7231 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Op, SplatNeutral, 7232 DAG.getVectorIdxConstant(Idx, dl)); 7233 return DAG.getNode(Opc, dl, N->getValueType(0), AccOp, Op, Flags); 7234 } 7235 7236 for (unsigned Idx = OrigElts; Idx < WideElts; Idx++) 7237 Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem, 7238 DAG.getVectorIdxConstant(Idx, dl)); 7239 7240 return DAG.getNode(Opc, dl, N->getValueType(0), AccOp, Op, Flags); 7241 } 7242 7243 SDValue DAGTypeLegalizer::WidenVecOp_VP_REDUCE(SDNode *N) { 7244 assert(N->isVPOpcode() && "Expected VP opcode"); 7245 7246 SDLoc dl(N); 7247 SDValue Op = GetWidenedVector(N->getOperand(1)); 7248 SDValue Mask = GetWidenedMask(N->getOperand(2), 7249 Op.getValueType().getVectorElementCount()); 7250 7251 return DAG.getNode(N->getOpcode(), dl, N->getValueType(0), 7252 {N->getOperand(0), Op, Mask, N->getOperand(3)}, 7253 N->getFlags()); 7254 } 7255 7256 SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) { 7257 // This only gets called in the case that the left and right inputs and 7258 // result are of a legal odd vector type, and the condition is illegal i1 of 7259 // the same odd width that needs widening. 7260 EVT VT = N->getValueType(0); 7261 assert(VT.isVector() && !VT.isPow2VectorType() && isTypeLegal(VT)); 7262 7263 SDValue Cond = GetWidenedVector(N->getOperand(0)); 7264 SDValue LeftIn = DAG.WidenVector(N->getOperand(1), SDLoc(N)); 7265 SDValue RightIn = DAG.WidenVector(N->getOperand(2), SDLoc(N)); 7266 SDLoc DL(N); 7267 7268 SDValue Select = DAG.getNode(N->getOpcode(), DL, LeftIn.getValueType(), Cond, 7269 LeftIn, RightIn); 7270 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Select, 7271 DAG.getVectorIdxConstant(0, DL)); 7272 } 7273 7274 SDValue DAGTypeLegalizer::WidenVecOp_VP_CttzElements(SDNode *N) { 7275 SDLoc DL(N); 7276 SDValue Source = GetWidenedVector(N->getOperand(0)); 7277 EVT SrcVT = Source.getValueType(); 7278 SDValue Mask = 7279 GetWidenedMask(N->getOperand(1), SrcVT.getVectorElementCount()); 7280 7281 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), 7282 {Source, Mask, N->getOperand(2)}, N->getFlags()); 7283 } 7284 7285 //===----------------------------------------------------------------------===// 7286 // Vector Widening Utilities 7287 //===----------------------------------------------------------------------===// 7288 7289 // Utility function to find the type to chop up a widen vector for load/store 7290 // TLI: Target lowering used to determine legal types. 7291 // Width: Width left need to load/store. 7292 // WidenVT: The widen vector type to load to/store from 7293 // Align: If 0, don't allow use of a wider type 7294 // WidenEx: If Align is not 0, the amount additional we can load/store from. 7295 7296 static std::optional<EVT> findMemType(SelectionDAG &DAG, 7297 const TargetLowering &TLI, unsigned Width, 7298 EVT WidenVT, unsigned Align = 0, 7299 unsigned WidenEx = 0) { 7300 EVT WidenEltVT = WidenVT.getVectorElementType(); 7301 const bool Scalable = WidenVT.isScalableVector(); 7302 unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinValue(); 7303 unsigned WidenEltWidth = WidenEltVT.getSizeInBits(); 7304 unsigned AlignInBits = Align*8; 7305 7306 // If we have one element to load/store, return it. 7307 EVT RetVT = WidenEltVT; 7308 if (!Scalable && Width == WidenEltWidth) 7309 return RetVT; 7310 7311 // Don't bother looking for an integer type if the vector is scalable, skip 7312 // to vector types. 7313 if (!Scalable) { 7314 // See if there is larger legal integer than the element type to load/store. 7315 for (EVT MemVT : reverse(MVT::integer_valuetypes())) { 7316 unsigned MemVTWidth = MemVT.getSizeInBits(); 7317 if (MemVT.getSizeInBits() <= WidenEltWidth) 7318 break; 7319 auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); 7320 if ((Action == TargetLowering::TypeLegal || 7321 Action == TargetLowering::TypePromoteInteger) && 7322 (WidenWidth % MemVTWidth) == 0 && 7323 isPowerOf2_32(WidenWidth / MemVTWidth) && 7324 (MemVTWidth <= Width || 7325 (Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) { 7326 if (MemVTWidth == WidenWidth) 7327 return MemVT; 7328 RetVT = MemVT; 7329 break; 7330 } 7331 } 7332 } 7333 7334 // See if there is a larger vector type to load/store that has the same vector 7335 // element type and is evenly divisible with the WidenVT. 7336 for (EVT MemVT : reverse(MVT::vector_valuetypes())) { 7337 // Skip vector MVTs which don't match the scalable property of WidenVT. 7338 if (Scalable != MemVT.isScalableVector()) 7339 continue; 7340 unsigned MemVTWidth = MemVT.getSizeInBits().getKnownMinValue(); 7341 auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); 7342 if ((Action == TargetLowering::TypeLegal || 7343 Action == TargetLowering::TypePromoteInteger) && 7344 WidenEltVT == MemVT.getVectorElementType() && 7345 (WidenWidth % MemVTWidth) == 0 && 7346 isPowerOf2_32(WidenWidth / MemVTWidth) && 7347 (MemVTWidth <= Width || 7348 (Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) { 7349 if (RetVT.getFixedSizeInBits() < MemVTWidth || MemVT == WidenVT) 7350 return MemVT; 7351 } 7352 } 7353 7354 // Using element-wise loads and stores for widening operations is not 7355 // supported for scalable vectors 7356 if (Scalable) 7357 return std::nullopt; 7358 7359 return RetVT; 7360 } 7361 7362 // Builds a vector type from scalar loads 7363 // VecTy: Resulting Vector type 7364 // LDOps: Load operators to build a vector type 7365 // [Start,End) the list of loads to use. 7366 static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy, 7367 SmallVectorImpl<SDValue> &LdOps, 7368 unsigned Start, unsigned End) { 7369 SDLoc dl(LdOps[Start]); 7370 EVT LdTy = LdOps[Start].getValueType(); 7371 unsigned Width = VecTy.getSizeInBits(); 7372 unsigned NumElts = Width / LdTy.getSizeInBits(); 7373 EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), LdTy, NumElts); 7374 7375 unsigned Idx = 1; 7376 SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT,LdOps[Start]); 7377 7378 for (unsigned i = Start + 1; i != End; ++i) { 7379 EVT NewLdTy = LdOps[i].getValueType(); 7380 if (NewLdTy != LdTy) { 7381 NumElts = Width / NewLdTy.getSizeInBits(); 7382 NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewLdTy, NumElts); 7383 VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, VecOp); 7384 // Readjust position and vector position based on new load type. 7385 Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits(); 7386 LdTy = NewLdTy; 7387 } 7388 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i], 7389 DAG.getVectorIdxConstant(Idx++, dl)); 7390 } 7391 return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp); 7392 } 7393 7394 SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain, 7395 LoadSDNode *LD) { 7396 // The strategy assumes that we can efficiently load power-of-two widths. 7397 // The routine chops the vector into the largest vector loads with the same 7398 // element type or scalar loads and then recombines it to the widen vector 7399 // type. 7400 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),LD->getValueType(0)); 7401 EVT LdVT = LD->getMemoryVT(); 7402 SDLoc dl(LD); 7403 assert(LdVT.isVector() && WidenVT.isVector()); 7404 assert(LdVT.isScalableVector() == WidenVT.isScalableVector()); 7405 assert(LdVT.getVectorElementType() == WidenVT.getVectorElementType()); 7406 7407 // Load information 7408 SDValue Chain = LD->getChain(); 7409 SDValue BasePtr = LD->getBasePtr(); 7410 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); 7411 AAMDNodes AAInfo = LD->getAAInfo(); 7412 7413 TypeSize LdWidth = LdVT.getSizeInBits(); 7414 TypeSize WidenWidth = WidenVT.getSizeInBits(); 7415 TypeSize WidthDiff = WidenWidth - LdWidth; 7416 // Allow wider loads if they are sufficiently aligned to avoid memory faults 7417 // and if the original load is simple. 7418 unsigned LdAlign = 7419 (!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlign().value(); 7420 7421 // Find the vector type that can load from. 7422 std::optional<EVT> FirstVT = 7423 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign, 7424 WidthDiff.getKnownMinValue()); 7425 7426 if (!FirstVT) 7427 return SDValue(); 7428 7429 SmallVector<EVT, 8> MemVTs; 7430 TypeSize FirstVTWidth = FirstVT->getSizeInBits(); 7431 7432 // Unless we're able to load in one instruction we must work out how to load 7433 // the remainder. 7434 if (!TypeSize::isKnownLE(LdWidth, FirstVTWidth)) { 7435 std::optional<EVT> NewVT = FirstVT; 7436 TypeSize RemainingWidth = LdWidth; 7437 TypeSize NewVTWidth = FirstVTWidth; 7438 do { 7439 RemainingWidth -= NewVTWidth; 7440 if (TypeSize::isKnownLT(RemainingWidth, NewVTWidth)) { 7441 // The current type we are using is too large. Find a better size. 7442 NewVT = findMemType(DAG, TLI, RemainingWidth.getKnownMinValue(), 7443 WidenVT, LdAlign, WidthDiff.getKnownMinValue()); 7444 if (!NewVT) 7445 return SDValue(); 7446 NewVTWidth = NewVT->getSizeInBits(); 7447 } 7448 MemVTs.push_back(*NewVT); 7449 } while (TypeSize::isKnownGT(RemainingWidth, NewVTWidth)); 7450 } 7451 7452 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr, LD->getPointerInfo(), 7453 LD->getOriginalAlign(), MMOFlags, AAInfo); 7454 LdChain.push_back(LdOp.getValue(1)); 7455 7456 // Check if we can load the element with one instruction. 7457 if (MemVTs.empty()) { 7458 assert(TypeSize::isKnownLE(LdWidth, FirstVTWidth)); 7459 if (!FirstVT->isVector()) { 7460 unsigned NumElts = 7461 WidenWidth.getFixedValue() / FirstVTWidth.getFixedValue(); 7462 EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), *FirstVT, NumElts); 7463 SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp); 7464 return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp); 7465 } 7466 if (FirstVT == WidenVT) 7467 return LdOp; 7468 7469 // TODO: We don't currently have any tests that exercise this code path. 7470 assert(WidenWidth.getFixedValue() % FirstVTWidth.getFixedValue() == 0); 7471 unsigned NumConcat = 7472 WidenWidth.getFixedValue() / FirstVTWidth.getFixedValue(); 7473 SmallVector<SDValue, 16> ConcatOps(NumConcat); 7474 SDValue UndefVal = DAG.getUNDEF(*FirstVT); 7475 ConcatOps[0] = LdOp; 7476 for (unsigned i = 1; i != NumConcat; ++i) 7477 ConcatOps[i] = UndefVal; 7478 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, ConcatOps); 7479 } 7480 7481 // Load vector by using multiple loads from largest vector to scalar. 7482 SmallVector<SDValue, 16> LdOps; 7483 LdOps.push_back(LdOp); 7484 7485 uint64_t ScaledOffset = 0; 7486 MachinePointerInfo MPI = LD->getPointerInfo(); 7487 7488 // First incremement past the first load. 7489 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr, 7490 &ScaledOffset); 7491 7492 for (EVT MemVT : MemVTs) { 7493 Align NewAlign = ScaledOffset == 0 7494 ? LD->getOriginalAlign() 7495 : commonAlignment(LD->getAlign(), ScaledOffset); 7496 SDValue L = 7497 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo); 7498 7499 LdOps.push_back(L); 7500 LdChain.push_back(L.getValue(1)); 7501 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset); 7502 } 7503 7504 // Build the vector from the load operations. 7505 unsigned End = LdOps.size(); 7506 if (!LdOps[0].getValueType().isVector()) 7507 // All the loads are scalar loads. 7508 return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End); 7509 7510 // If the load contains vectors, build the vector using concat vector. 7511 // All of the vectors used to load are power-of-2, and the scalar loads can be 7512 // combined to make a power-of-2 vector. 7513 SmallVector<SDValue, 16> ConcatOps(End); 7514 int i = End - 1; 7515 int Idx = End; 7516 EVT LdTy = LdOps[i].getValueType(); 7517 // First, combine the scalar loads to a vector. 7518 if (!LdTy.isVector()) { 7519 for (--i; i >= 0; --i) { 7520 LdTy = LdOps[i].getValueType(); 7521 if (LdTy.isVector()) 7522 break; 7523 } 7524 ConcatOps[--Idx] = BuildVectorFromScalar(DAG, LdTy, LdOps, i + 1, End); 7525 } 7526 7527 ConcatOps[--Idx] = LdOps[i]; 7528 for (--i; i >= 0; --i) { 7529 EVT NewLdTy = LdOps[i].getValueType(); 7530 if (NewLdTy != LdTy) { 7531 // Create a larger vector. 7532 TypeSize LdTySize = LdTy.getSizeInBits(); 7533 TypeSize NewLdTySize = NewLdTy.getSizeInBits(); 7534 assert(NewLdTySize.isScalable() == LdTySize.isScalable() && 7535 NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinValue())); 7536 unsigned NumOps = 7537 NewLdTySize.getKnownMinValue() / LdTySize.getKnownMinValue(); 7538 SmallVector<SDValue, 16> WidenOps(NumOps); 7539 unsigned j = 0; 7540 for (; j != End-Idx; ++j) 7541 WidenOps[j] = ConcatOps[Idx+j]; 7542 for (; j != NumOps; ++j) 7543 WidenOps[j] = DAG.getUNDEF(LdTy); 7544 7545 ConcatOps[End-1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewLdTy, 7546 WidenOps); 7547 Idx = End - 1; 7548 LdTy = NewLdTy; 7549 } 7550 ConcatOps[--Idx] = LdOps[i]; 7551 } 7552 7553 if (WidenWidth == LdTy.getSizeInBits() * (End - Idx)) 7554 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, 7555 ArrayRef(&ConcatOps[Idx], End - Idx)); 7556 7557 // We need to fill the rest with undefs to build the vector. 7558 unsigned NumOps = 7559 WidenWidth.getKnownMinValue() / LdTy.getSizeInBits().getKnownMinValue(); 7560 SmallVector<SDValue, 16> WidenOps(NumOps); 7561 SDValue UndefVal = DAG.getUNDEF(LdTy); 7562 { 7563 unsigned i = 0; 7564 for (; i != End-Idx; ++i) 7565 WidenOps[i] = ConcatOps[Idx+i]; 7566 for (; i != NumOps; ++i) 7567 WidenOps[i] = UndefVal; 7568 } 7569 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, WidenOps); 7570 } 7571 7572 SDValue 7573 DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain, 7574 LoadSDNode *LD, 7575 ISD::LoadExtType ExtType) { 7576 // For extension loads, it may not be more efficient to chop up the vector 7577 // and then extend it. Instead, we unroll the load and build a new vector. 7578 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),LD->getValueType(0)); 7579 EVT LdVT = LD->getMemoryVT(); 7580 SDLoc dl(LD); 7581 assert(LdVT.isVector() && WidenVT.isVector()); 7582 assert(LdVT.isScalableVector() == WidenVT.isScalableVector()); 7583 7584 // Load information 7585 SDValue Chain = LD->getChain(); 7586 SDValue BasePtr = LD->getBasePtr(); 7587 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); 7588 AAMDNodes AAInfo = LD->getAAInfo(); 7589 7590 if (LdVT.isScalableVector()) 7591 report_fatal_error("Generating widen scalable extending vector loads is " 7592 "not yet supported"); 7593 7594 EVT EltVT = WidenVT.getVectorElementType(); 7595 EVT LdEltVT = LdVT.getVectorElementType(); 7596 unsigned NumElts = LdVT.getVectorNumElements(); 7597 7598 // Load each element and widen. 7599 unsigned WidenNumElts = WidenVT.getVectorNumElements(); 7600 SmallVector<SDValue, 16> Ops(WidenNumElts); 7601 unsigned Increment = LdEltVT.getSizeInBits() / 8; 7602 Ops[0] = 7603 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr, LD->getPointerInfo(), 7604 LdEltVT, LD->getOriginalAlign(), MMOFlags, AAInfo); 7605 LdChain.push_back(Ops[0].getValue(1)); 7606 unsigned i = 0, Offset = Increment; 7607 for (i=1; i < NumElts; ++i, Offset += Increment) { 7608 SDValue NewBasePtr = 7609 DAG.getObjectPtrOffset(dl, BasePtr, TypeSize::getFixed(Offset)); 7610 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr, 7611 LD->getPointerInfo().getWithOffset(Offset), LdEltVT, 7612 LD->getOriginalAlign(), MMOFlags, AAInfo); 7613 LdChain.push_back(Ops[i].getValue(1)); 7614 } 7615 7616 // Fill the rest with undefs. 7617 SDValue UndefVal = DAG.getUNDEF(EltVT); 7618 for (; i != WidenNumElts; ++i) 7619 Ops[i] = UndefVal; 7620 7621 return DAG.getBuildVector(WidenVT, dl, Ops); 7622 } 7623 7624 bool DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, 7625 StoreSDNode *ST) { 7626 // The strategy assumes that we can efficiently store power-of-two widths. 7627 // The routine chops the vector into the largest vector stores with the same 7628 // element type or scalar stores. 7629 SDValue Chain = ST->getChain(); 7630 SDValue BasePtr = ST->getBasePtr(); 7631 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); 7632 AAMDNodes AAInfo = ST->getAAInfo(); 7633 SDValue ValOp = GetWidenedVector(ST->getValue()); 7634 SDLoc dl(ST); 7635 7636 EVT StVT = ST->getMemoryVT(); 7637 TypeSize StWidth = StVT.getSizeInBits(); 7638 EVT ValVT = ValOp.getValueType(); 7639 TypeSize ValWidth = ValVT.getSizeInBits(); 7640 EVT ValEltVT = ValVT.getVectorElementType(); 7641 unsigned ValEltWidth = ValEltVT.getFixedSizeInBits(); 7642 assert(StVT.getVectorElementType() == ValEltVT); 7643 assert(StVT.isScalableVector() == ValVT.isScalableVector() && 7644 "Mismatch between store and value types"); 7645 7646 int Idx = 0; // current index to store 7647 7648 MachinePointerInfo MPI = ST->getPointerInfo(); 7649 uint64_t ScaledOffset = 0; 7650 7651 // A breakdown of how to widen this vector store. Each element of the vector 7652 // is a memory VT combined with the number of times it is to be stored to, 7653 // e,g., v5i32 -> {{v2i32,2},{i32,1}} 7654 SmallVector<std::pair<EVT, unsigned>, 4> MemVTs; 7655 7656 while (StWidth.isNonZero()) { 7657 // Find the largest vector type we can store with. 7658 std::optional<EVT> NewVT = 7659 findMemType(DAG, TLI, StWidth.getKnownMinValue(), ValVT); 7660 if (!NewVT) 7661 return false; 7662 MemVTs.push_back({*NewVT, 0}); 7663 TypeSize NewVTWidth = NewVT->getSizeInBits(); 7664 7665 do { 7666 StWidth -= NewVTWidth; 7667 MemVTs.back().second++; 7668 } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth)); 7669 } 7670 7671 for (const auto &Pair : MemVTs) { 7672 EVT NewVT = Pair.first; 7673 unsigned Count = Pair.second; 7674 TypeSize NewVTWidth = NewVT.getSizeInBits(); 7675 7676 if (NewVT.isVector()) { 7677 unsigned NumVTElts = NewVT.getVectorMinNumElements(); 7678 do { 7679 Align NewAlign = ScaledOffset == 0 7680 ? ST->getOriginalAlign() 7681 : commonAlignment(ST->getAlign(), ScaledOffset); 7682 SDValue EOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NewVT, ValOp, 7683 DAG.getVectorIdxConstant(Idx, dl)); 7684 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign, 7685 MMOFlags, AAInfo); 7686 StChain.push_back(PartStore); 7687 7688 Idx += NumVTElts; 7689 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr, 7690 &ScaledOffset); 7691 } while (--Count); 7692 } else { 7693 // Cast the vector to the scalar type we can store. 7694 unsigned NumElts = ValWidth.getFixedValue() / NewVTWidth.getFixedValue(); 7695 EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts); 7696 SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp); 7697 // Readjust index position based on new vector type. 7698 Idx = Idx * ValEltWidth / NewVTWidth.getFixedValue(); 7699 do { 7700 SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp, 7701 DAG.getVectorIdxConstant(Idx++, dl)); 7702 SDValue PartStore = 7703 DAG.getStore(Chain, dl, EOp, BasePtr, MPI, ST->getOriginalAlign(), 7704 MMOFlags, AAInfo); 7705 StChain.push_back(PartStore); 7706 7707 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr); 7708 } while (--Count); 7709 // Restore index back to be relative to the original widen element type. 7710 Idx = Idx * NewVTWidth.getFixedValue() / ValEltWidth; 7711 } 7712 } 7713 7714 return true; 7715 } 7716 7717 /// Modifies a vector input (widen or narrows) to a vector of NVT. The 7718 /// input vector must have the same element type as NVT. 7719 /// FillWithZeroes specifies that the vector should be widened with zeroes. 7720 SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT, 7721 bool FillWithZeroes) { 7722 // Note that InOp might have been widened so it might already have 7723 // the right width or it might need be narrowed. 7724 EVT InVT = InOp.getValueType(); 7725 assert(InVT.getVectorElementType() == NVT.getVectorElementType() && 7726 "input and widen element type must match"); 7727 assert(InVT.isScalableVector() == NVT.isScalableVector() && 7728 "cannot modify scalable vectors in this way"); 7729 SDLoc dl(InOp); 7730 7731 // Check if InOp already has the right width. 7732 if (InVT == NVT) 7733 return InOp; 7734 7735 ElementCount InEC = InVT.getVectorElementCount(); 7736 ElementCount WidenEC = NVT.getVectorElementCount(); 7737 if (WidenEC.hasKnownScalarFactor(InEC)) { 7738 unsigned NumConcat = WidenEC.getKnownScalarFactor(InEC); 7739 SmallVector<SDValue, 16> Ops(NumConcat); 7740 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) : 7741 DAG.getUNDEF(InVT); 7742 Ops[0] = InOp; 7743 for (unsigned i = 1; i != NumConcat; ++i) 7744 Ops[i] = FillVal; 7745 7746 return DAG.getNode(ISD::CONCAT_VECTORS, dl, NVT, Ops); 7747 } 7748 7749 if (InEC.hasKnownScalarFactor(WidenEC)) 7750 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT, InOp, 7751 DAG.getVectorIdxConstant(0, dl)); 7752 7753 assert(!InVT.isScalableVector() && !NVT.isScalableVector() && 7754 "Scalable vectors should have been handled already."); 7755 7756 unsigned InNumElts = InEC.getFixedValue(); 7757 unsigned WidenNumElts = WidenEC.getFixedValue(); 7758 7759 // Fall back to extract and build (+ mask, if padding with zeros). 7760 SmallVector<SDValue, 16> Ops(WidenNumElts); 7761 EVT EltVT = NVT.getVectorElementType(); 7762 unsigned MinNumElts = std::min(WidenNumElts, InNumElts); 7763 unsigned Idx; 7764 for (Idx = 0; Idx < MinNumElts; ++Idx) 7765 Ops[Idx] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp, 7766 DAG.getVectorIdxConstant(Idx, dl)); 7767 7768 SDValue UndefVal = DAG.getUNDEF(EltVT); 7769 for (; Idx < WidenNumElts; ++Idx) 7770 Ops[Idx] = UndefVal; 7771 7772 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops); 7773 if (!FillWithZeroes) 7774 return Widened; 7775 7776 assert(NVT.isInteger() && 7777 "We expect to never want to FillWithZeroes for non-integral types."); 7778 7779 SmallVector<SDValue, 16> MaskOps; 7780 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT)); 7781 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT)); 7782 7783 return DAG.getNode(ISD::AND, dl, NVT, Widened, 7784 DAG.getBuildVector(NVT, dl, MaskOps)); 7785 } 7786