1 //===------- LegalizeVectorTypes.cpp - Legalization of vector types -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file performs vector type splitting and scalarization for LegalizeTypes.
10 // Scalarization is the act of changing a computation in an illegal one-element
11 // vector type to be a computation in its scalar element type. For example,
12 // implementing <1 x f32> arithmetic in a scalar f32 register. This is needed
13 // as a base case when scalarizing vector arithmetic like <4 x f32>, which
14 // eventually decomposes to scalars if the target doesn't support v4f32 or v2f32
15 // types.
16 // Splitting is the act of changing a computation in an invalid vector type to
17 // be a computation in two vectors of half the size. For example, implementing
18 // <128 x f32> operations in terms of two <64 x f32> operations.
19 //
20 //===----------------------------------------------------------------------===//
21
22 #include "LegalizeTypes.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/Analysis/MemoryLocation.h"
25 #include "llvm/Analysis/VectorUtils.h"
26 #include "llvm/CodeGen/ISDOpcodes.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/TypeSize.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <numeric>
32
33 using namespace llvm;
34
35 #define DEBUG_TYPE "legalize-types"
36
37 //===----------------------------------------------------------------------===//
38 // Result Vector Scalarization: <1 x ty> -> ty.
39 //===----------------------------------------------------------------------===//
40
ScalarizeVectorResult(SDNode * N,unsigned ResNo)41 void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
42 LLVM_DEBUG(dbgs() << "Scalarize node result " << ResNo << ": ";
43 N->dump(&DAG));
44 SDValue R = SDValue();
45
46 switch (N->getOpcode()) {
47 default:
48 #ifndef NDEBUG
49 dbgs() << "ScalarizeVectorResult #" << ResNo << ": ";
50 N->dump(&DAG);
51 dbgs() << "\n";
52 #endif
53 report_fatal_error("Do not know how to scalarize the result of this "
54 "operator!\n");
55
56 case ISD::MERGE_VALUES: R = ScalarizeVecRes_MERGE_VALUES(N, ResNo);break;
57 case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N); break;
58 case ISD::BUILD_VECTOR: R = ScalarizeVecRes_BUILD_VECTOR(N); break;
59 case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break;
60 case ISD::FP_ROUND: R = ScalarizeVecRes_FP_ROUND(N); break;
61 case ISD::AssertZext:
62 case ISD::AssertSext:
63 case ISD::FPOWI:
64 case ISD::AssertNoFPClass:
65 R = ScalarizeVecRes_UnaryOpWithExtraInput(N);
66 break;
67 case ISD::INSERT_VECTOR_ELT: R = ScalarizeVecRes_INSERT_VECTOR_ELT(N); break;
68 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(N));break;
69 case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break;
70 case ISD::SIGN_EXTEND_INREG: R = ScalarizeVecRes_InregOp(N); break;
71 case ISD::VSELECT: R = ScalarizeVecRes_VSELECT(N); break;
72 case ISD::SELECT: R = ScalarizeVecRes_SELECT(N); break;
73 case ISD::SELECT_CC: R = ScalarizeVecRes_SELECT_CC(N); break;
74 case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break;
75 case ISD::POISON:
76 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break;
77 case ISD::VECTOR_SHUFFLE: R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
78 case ISD::IS_FPCLASS: R = ScalarizeVecRes_IS_FPCLASS(N); break;
79 case ISD::ANY_EXTEND_VECTOR_INREG:
80 case ISD::SIGN_EXTEND_VECTOR_INREG:
81 case ISD::ZERO_EXTEND_VECTOR_INREG:
82 R = ScalarizeVecRes_VecInregOp(N);
83 break;
84 case ISD::ABS:
85 case ISD::ANY_EXTEND:
86 case ISD::BITREVERSE:
87 case ISD::BSWAP:
88 case ISD::CTLZ:
89 case ISD::CTLZ_ZERO_UNDEF:
90 case ISD::CTPOP:
91 case ISD::CTTZ:
92 case ISD::CTTZ_ZERO_UNDEF:
93 case ISD::FABS:
94 case ISD::FACOS:
95 case ISD::FASIN:
96 case ISD::FATAN:
97 case ISD::FCEIL:
98 case ISD::FCOS:
99 case ISD::FCOSH:
100 case ISD::FEXP:
101 case ISD::FEXP2:
102 case ISD::FEXP10:
103 case ISD::FFLOOR:
104 case ISD::FLOG:
105 case ISD::FLOG10:
106 case ISD::FLOG2:
107 case ISD::FNEARBYINT:
108 case ISD::FNEG:
109 case ISD::FREEZE:
110 case ISD::ARITH_FENCE:
111 case ISD::FP_EXTEND:
112 case ISD::FP_TO_SINT:
113 case ISD::FP_TO_UINT:
114 case ISD::FRINT:
115 case ISD::LRINT:
116 case ISD::LLRINT:
117 case ISD::FROUND:
118 case ISD::FROUNDEVEN:
119 case ISD::LROUND:
120 case ISD::LLROUND:
121 case ISD::FSIN:
122 case ISD::FSINH:
123 case ISD::FSQRT:
124 case ISD::FTAN:
125 case ISD::FTANH:
126 case ISD::FTRUNC:
127 case ISD::SIGN_EXTEND:
128 case ISD::SINT_TO_FP:
129 case ISD::TRUNCATE:
130 case ISD::UINT_TO_FP:
131 case ISD::ZERO_EXTEND:
132 case ISD::FCANONICALIZE:
133 R = ScalarizeVecRes_UnaryOp(N);
134 break;
135 case ISD::ADDRSPACECAST:
136 R = ScalarizeVecRes_ADDRSPACECAST(N);
137 break;
138 case ISD::FMODF:
139 case ISD::FFREXP:
140 case ISD::FSINCOS:
141 case ISD::FSINCOSPI:
142 R = ScalarizeVecRes_UnaryOpWithTwoResults(N, ResNo);
143 break;
144 case ISD::ADD:
145 case ISD::AND:
146 case ISD::AVGCEILS:
147 case ISD::AVGCEILU:
148 case ISD::AVGFLOORS:
149 case ISD::AVGFLOORU:
150 case ISD::FADD:
151 case ISD::FCOPYSIGN:
152 case ISD::FDIV:
153 case ISD::FMUL:
154 case ISD::FMINNUM:
155 case ISD::FMAXNUM:
156 case ISD::FMINNUM_IEEE:
157 case ISD::FMAXNUM_IEEE:
158 case ISD::FMINIMUM:
159 case ISD::FMAXIMUM:
160 case ISD::FMINIMUMNUM:
161 case ISD::FMAXIMUMNUM:
162 case ISD::FLDEXP:
163 case ISD::ABDS:
164 case ISD::ABDU:
165 case ISD::SMIN:
166 case ISD::SMAX:
167 case ISD::UMIN:
168 case ISD::UMAX:
169
170 case ISD::SADDSAT:
171 case ISD::UADDSAT:
172 case ISD::SSUBSAT:
173 case ISD::USUBSAT:
174 case ISD::SSHLSAT:
175 case ISD::USHLSAT:
176
177 case ISD::FPOW:
178 case ISD::FATAN2:
179 case ISD::FREM:
180 case ISD::FSUB:
181 case ISD::MUL:
182 case ISD::MULHS:
183 case ISD::MULHU:
184 case ISD::OR:
185 case ISD::SDIV:
186 case ISD::SREM:
187 case ISD::SUB:
188 case ISD::UDIV:
189 case ISD::UREM:
190 case ISD::XOR:
191 case ISD::SHL:
192 case ISD::SRA:
193 case ISD::SRL:
194 case ISD::ROTL:
195 case ISD::ROTR:
196 R = ScalarizeVecRes_BinOp(N);
197 break;
198
199 case ISD::SCMP:
200 case ISD::UCMP:
201 R = ScalarizeVecRes_CMP(N);
202 break;
203
204 case ISD::FMA:
205 case ISD::FSHL:
206 case ISD::FSHR:
207 R = ScalarizeVecRes_TernaryOp(N);
208 break;
209
210 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
211 case ISD::STRICT_##DAGN:
212 #include "llvm/IR/ConstrainedOps.def"
213 R = ScalarizeVecRes_StrictFPOp(N);
214 break;
215
216 case ISD::FP_TO_UINT_SAT:
217 case ISD::FP_TO_SINT_SAT:
218 R = ScalarizeVecRes_FP_TO_XINT_SAT(N);
219 break;
220
221 case ISD::UADDO:
222 case ISD::SADDO:
223 case ISD::USUBO:
224 case ISD::SSUBO:
225 case ISD::UMULO:
226 case ISD::SMULO:
227 R = ScalarizeVecRes_OverflowOp(N, ResNo);
228 break;
229 case ISD::SMULFIX:
230 case ISD::SMULFIXSAT:
231 case ISD::UMULFIX:
232 case ISD::UMULFIXSAT:
233 case ISD::SDIVFIX:
234 case ISD::SDIVFIXSAT:
235 case ISD::UDIVFIX:
236 case ISD::UDIVFIXSAT:
237 R = ScalarizeVecRes_FIX(N);
238 break;
239 }
240
241 // If R is null, the sub-method took care of registering the result.
242 if (R.getNode())
243 SetScalarizedVector(SDValue(N, ResNo), R);
244 }
245
ScalarizeVecRes_BinOp(SDNode * N)246 SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
247 SDValue LHS = GetScalarizedVector(N->getOperand(0));
248 SDValue RHS = GetScalarizedVector(N->getOperand(1));
249 return DAG.getNode(N->getOpcode(), SDLoc(N),
250 LHS.getValueType(), LHS, RHS, N->getFlags());
251 }
252
ScalarizeVecRes_CMP(SDNode * N)253 SDValue DAGTypeLegalizer::ScalarizeVecRes_CMP(SDNode *N) {
254 SDLoc DL(N);
255
256 SDValue LHS = N->getOperand(0);
257 SDValue RHS = N->getOperand(1);
258 if (getTypeAction(LHS.getValueType()) ==
259 TargetLowering::TypeScalarizeVector) {
260 LHS = GetScalarizedVector(LHS);
261 RHS = GetScalarizedVector(RHS);
262 } else {
263 EVT VT = LHS.getValueType().getVectorElementType();
264 LHS = DAG.getExtractVectorElt(DL, VT, LHS, 0);
265 RHS = DAG.getExtractVectorElt(DL, VT, RHS, 0);
266 }
267
268 return DAG.getNode(N->getOpcode(), SDLoc(N),
269 N->getValueType(0).getVectorElementType(), LHS, RHS);
270 }
271
ScalarizeVecRes_TernaryOp(SDNode * N)272 SDValue DAGTypeLegalizer::ScalarizeVecRes_TernaryOp(SDNode *N) {
273 SDValue Op0 = GetScalarizedVector(N->getOperand(0));
274 SDValue Op1 = GetScalarizedVector(N->getOperand(1));
275 SDValue Op2 = GetScalarizedVector(N->getOperand(2));
276 return DAG.getNode(N->getOpcode(), SDLoc(N), Op0.getValueType(), Op0, Op1,
277 Op2, N->getFlags());
278 }
279
ScalarizeVecRes_FIX(SDNode * N)280 SDValue DAGTypeLegalizer::ScalarizeVecRes_FIX(SDNode *N) {
281 SDValue Op0 = GetScalarizedVector(N->getOperand(0));
282 SDValue Op1 = GetScalarizedVector(N->getOperand(1));
283 SDValue Op2 = N->getOperand(2);
284 return DAG.getNode(N->getOpcode(), SDLoc(N), Op0.getValueType(), Op0, Op1,
285 Op2, N->getFlags());
286 }
287
288 SDValue
ScalarizeVecRes_UnaryOpWithTwoResults(SDNode * N,unsigned ResNo)289 DAGTypeLegalizer::ScalarizeVecRes_UnaryOpWithTwoResults(SDNode *N,
290 unsigned ResNo) {
291 assert(N->getValueType(0).getVectorNumElements() == 1 &&
292 "Unexpected vector type!");
293 SDValue Elt = GetScalarizedVector(N->getOperand(0));
294
295 EVT VT0 = N->getValueType(0);
296 EVT VT1 = N->getValueType(1);
297 SDLoc dl(N);
298
299 SDNode *ScalarNode =
300 DAG.getNode(N->getOpcode(), dl,
301 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
302 .getNode();
303
304 // Replace the other vector result not being explicitly scalarized here.
305 unsigned OtherNo = 1 - ResNo;
306 EVT OtherVT = N->getValueType(OtherNo);
307 if (getTypeAction(OtherVT) == TargetLowering::TypeScalarizeVector) {
308 SetScalarizedVector(SDValue(N, OtherNo), SDValue(ScalarNode, OtherNo));
309 } else {
310 SDValue OtherVal = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, OtherVT,
311 SDValue(ScalarNode, OtherNo));
312 ReplaceValueWith(SDValue(N, OtherNo), OtherVal);
313 }
314
315 return SDValue(ScalarNode, ResNo);
316 }
317
ScalarizeVecRes_StrictFPOp(SDNode * N)318 SDValue DAGTypeLegalizer::ScalarizeVecRes_StrictFPOp(SDNode *N) {
319 EVT VT = N->getValueType(0).getVectorElementType();
320 unsigned NumOpers = N->getNumOperands();
321 SDValue Chain = N->getOperand(0);
322 EVT ValueVTs[] = {VT, MVT::Other};
323 SDLoc dl(N);
324
325 SmallVector<SDValue, 4> Opers(NumOpers);
326
327 // The Chain is the first operand.
328 Opers[0] = Chain;
329
330 // Now process the remaining operands.
331 for (unsigned i = 1; i < NumOpers; ++i) {
332 SDValue Oper = N->getOperand(i);
333 EVT OperVT = Oper.getValueType();
334
335 if (OperVT.isVector()) {
336 if (getTypeAction(OperVT) == TargetLowering::TypeScalarizeVector)
337 Oper = GetScalarizedVector(Oper);
338 else
339 Oper =
340 DAG.getExtractVectorElt(dl, OperVT.getVectorElementType(), Oper, 0);
341 }
342
343 Opers[i] = Oper;
344 }
345
346 SDValue Result = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(ValueVTs),
347 Opers, N->getFlags());
348
349 // Legalize the chain result - switch anything that used the old chain to
350 // use the new one.
351 ReplaceValueWith(SDValue(N, 1), Result.getValue(1));
352 return Result;
353 }
354
ScalarizeVecRes_OverflowOp(SDNode * N,unsigned ResNo)355 SDValue DAGTypeLegalizer::ScalarizeVecRes_OverflowOp(SDNode *N,
356 unsigned ResNo) {
357 SDLoc DL(N);
358 EVT ResVT = N->getValueType(0);
359 EVT OvVT = N->getValueType(1);
360
361 SDValue ScalarLHS, ScalarRHS;
362 if (getTypeAction(ResVT) == TargetLowering::TypeScalarizeVector) {
363 ScalarLHS = GetScalarizedVector(N->getOperand(0));
364 ScalarRHS = GetScalarizedVector(N->getOperand(1));
365 } else {
366 SmallVector<SDValue, 1> ElemsLHS, ElemsRHS;
367 DAG.ExtractVectorElements(N->getOperand(0), ElemsLHS);
368 DAG.ExtractVectorElements(N->getOperand(1), ElemsRHS);
369 ScalarLHS = ElemsLHS[0];
370 ScalarRHS = ElemsRHS[0];
371 }
372
373 SDVTList ScalarVTs = DAG.getVTList(
374 ResVT.getVectorElementType(), OvVT.getVectorElementType());
375 SDNode *ScalarNode = DAG.getNode(
376 N->getOpcode(), DL, ScalarVTs, ScalarLHS, ScalarRHS).getNode();
377 ScalarNode->setFlags(N->getFlags());
378
379 // Replace the other vector result not being explicitly scalarized here.
380 unsigned OtherNo = 1 - ResNo;
381 EVT OtherVT = N->getValueType(OtherNo);
382 if (getTypeAction(OtherVT) == TargetLowering::TypeScalarizeVector) {
383 SetScalarizedVector(SDValue(N, OtherNo), SDValue(ScalarNode, OtherNo));
384 } else {
385 SDValue OtherVal = DAG.getNode(
386 ISD::SCALAR_TO_VECTOR, DL, OtherVT, SDValue(ScalarNode, OtherNo));
387 ReplaceValueWith(SDValue(N, OtherNo), OtherVal);
388 }
389
390 return SDValue(ScalarNode, ResNo);
391 }
392
ScalarizeVecRes_MERGE_VALUES(SDNode * N,unsigned ResNo)393 SDValue DAGTypeLegalizer::ScalarizeVecRes_MERGE_VALUES(SDNode *N,
394 unsigned ResNo) {
395 SDValue Op = DisintegrateMERGE_VALUES(N, ResNo);
396 return GetScalarizedVector(Op);
397 }
398
ScalarizeVecRes_BITCAST(SDNode * N)399 SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) {
400 SDValue Op = N->getOperand(0);
401 if (getTypeAction(Op.getValueType()) == TargetLowering::TypeScalarizeVector)
402 Op = GetScalarizedVector(Op);
403 EVT NewVT = N->getValueType(0).getVectorElementType();
404 return DAG.getNode(ISD::BITCAST, SDLoc(N),
405 NewVT, Op);
406 }
407
ScalarizeVecRes_BUILD_VECTOR(SDNode * N)408 SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(SDNode *N) {
409 EVT EltVT = N->getValueType(0).getVectorElementType();
410 SDValue InOp = N->getOperand(0);
411 // The BUILD_VECTOR operands may be of wider element types and
412 // we may need to truncate them back to the requested return type.
413 if (EltVT.isInteger())
414 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, InOp);
415 return InOp;
416 }
417
ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode * N)418 SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
419 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
420 N->getValueType(0).getVectorElementType(),
421 N->getOperand(0), N->getOperand(1));
422 }
423
ScalarizeVecRes_FP_ROUND(SDNode * N)424 SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_ROUND(SDNode *N) {
425 SDLoc DL(N);
426 SDValue Op = N->getOperand(0);
427 EVT OpVT = Op.getValueType();
428 // The result needs scalarizing, but it's not a given that the source does.
429 // See similar logic in ScalarizeVecRes_UnaryOp.
430 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
431 Op = GetScalarizedVector(Op);
432 } else {
433 EVT VT = OpVT.getVectorElementType();
434 Op = DAG.getExtractVectorElt(DL, VT, Op, 0);
435 }
436 return DAG.getNode(ISD::FP_ROUND, DL,
437 N->getValueType(0).getVectorElementType(), Op,
438 N->getOperand(1));
439 }
440
ScalarizeVecRes_UnaryOpWithExtraInput(SDNode * N)441 SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOpWithExtraInput(SDNode *N) {
442 SDValue Op = GetScalarizedVector(N->getOperand(0));
443 return DAG.getNode(N->getOpcode(), SDLoc(N), Op.getValueType(), Op,
444 N->getOperand(1));
445 }
446
ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode * N)447 SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) {
448 // The value to insert may have a wider type than the vector element type,
449 // so be sure to truncate it to the element type if necessary.
450 SDValue Op = N->getOperand(1);
451 EVT EltVT = N->getValueType(0).getVectorElementType();
452 if (Op.getValueType() != EltVT)
453 // FIXME: Can this happen for floating point types?
454 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, Op);
455 return Op;
456 }
457
ScalarizeVecRes_LOAD(LoadSDNode * N)458 SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
459 assert(N->isUnindexed() && "Indexed vector load?");
460
461 SDValue Result = DAG.getLoad(
462 ISD::UNINDEXED, N->getExtensionType(),
463 N->getValueType(0).getVectorElementType(), SDLoc(N), N->getChain(),
464 N->getBasePtr(), DAG.getUNDEF(N->getBasePtr().getValueType()),
465 N->getPointerInfo(), N->getMemoryVT().getVectorElementType(),
466 N->getBaseAlign(), N->getMemOperand()->getFlags(), N->getAAInfo());
467
468 // Legalize the chain result - switch anything that used the old chain to
469 // use the new one.
470 ReplaceValueWith(SDValue(N, 1), Result.getValue(1));
471 return Result;
472 }
473
ScalarizeVecRes_UnaryOp(SDNode * N)474 SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) {
475 // Get the dest type - it doesn't always match the input type, e.g. int_to_fp.
476 EVT DestVT = N->getValueType(0).getVectorElementType();
477 SDValue Op = N->getOperand(0);
478 EVT OpVT = Op.getValueType();
479 SDLoc DL(N);
480 // The result needs scalarizing, but it's not a given that the source does.
481 // This is a workaround for targets where it's impossible to scalarize the
482 // result of a conversion, because the source type is legal.
483 // For instance, this happens on AArch64: v1i1 is illegal but v1i{8,16,32}
484 // are widened to v8i8, v4i16, and v2i32, which is legal, because v1i64 is
485 // legal and was not scalarized.
486 // See the similar logic in ScalarizeVecRes_SETCC
487 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
488 Op = GetScalarizedVector(Op);
489 } else {
490 EVT VT = OpVT.getVectorElementType();
491 Op = DAG.getExtractVectorElt(DL, VT, Op, 0);
492 }
493 return DAG.getNode(N->getOpcode(), SDLoc(N), DestVT, Op, N->getFlags());
494 }
495
ScalarizeVecRes_InregOp(SDNode * N)496 SDValue DAGTypeLegalizer::ScalarizeVecRes_InregOp(SDNode *N) {
497 EVT EltVT = N->getValueType(0).getVectorElementType();
498 EVT ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT().getVectorElementType();
499 SDValue LHS = GetScalarizedVector(N->getOperand(0));
500 return DAG.getNode(N->getOpcode(), SDLoc(N), EltVT,
501 LHS, DAG.getValueType(ExtVT));
502 }
503
ScalarizeVecRes_VecInregOp(SDNode * N)504 SDValue DAGTypeLegalizer::ScalarizeVecRes_VecInregOp(SDNode *N) {
505 SDLoc DL(N);
506 SDValue Op = N->getOperand(0);
507
508 EVT OpVT = Op.getValueType();
509 EVT OpEltVT = OpVT.getVectorElementType();
510 EVT EltVT = N->getValueType(0).getVectorElementType();
511
512 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
513 Op = GetScalarizedVector(Op);
514 } else {
515 Op = DAG.getExtractVectorElt(DL, OpEltVT, Op, 0);
516 }
517
518 switch (N->getOpcode()) {
519 case ISD::ANY_EXTEND_VECTOR_INREG:
520 return DAG.getNode(ISD::ANY_EXTEND, DL, EltVT, Op);
521 case ISD::SIGN_EXTEND_VECTOR_INREG:
522 return DAG.getNode(ISD::SIGN_EXTEND, DL, EltVT, Op);
523 case ISD::ZERO_EXTEND_VECTOR_INREG:
524 return DAG.getNode(ISD::ZERO_EXTEND, DL, EltVT, Op);
525 }
526
527 llvm_unreachable("Illegal extend_vector_inreg opcode");
528 }
529
ScalarizeVecRes_ADDRSPACECAST(SDNode * N)530 SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(SDNode *N) {
531 EVT DestVT = N->getValueType(0).getVectorElementType();
532 SDValue Op = N->getOperand(0);
533 EVT OpVT = Op.getValueType();
534 SDLoc DL(N);
535 // The result needs scalarizing, but it's not a given that the source does.
536 // This is a workaround for targets where it's impossible to scalarize the
537 // result of a conversion, because the source type is legal.
538 // For instance, this happens on AArch64: v1i1 is illegal but v1i{8,16,32}
539 // are widened to v8i8, v4i16, and v2i32, which is legal, because v1i64 is
540 // legal and was not scalarized.
541 // See the similar logic in ScalarizeVecRes_SETCC
542 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
543 Op = GetScalarizedVector(Op);
544 } else {
545 EVT VT = OpVT.getVectorElementType();
546 Op = DAG.getExtractVectorElt(DL, VT, Op, 0);
547 }
548 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(N);
549 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
550 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
551 return DAG.getAddrSpaceCast(DL, DestVT, Op, SrcAS, DestAS);
552 }
553
ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode * N)554 SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
555 // If the operand is wider than the vector element type then it is implicitly
556 // truncated. Make that explicit here.
557 EVT EltVT = N->getValueType(0).getVectorElementType();
558 SDValue InOp = N->getOperand(0);
559 if (InOp.getValueType() != EltVT)
560 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), EltVT, InOp);
561 return InOp;
562 }
563
ScalarizeVecRes_VSELECT(SDNode * N)564 SDValue DAGTypeLegalizer::ScalarizeVecRes_VSELECT(SDNode *N) {
565 SDValue Cond = N->getOperand(0);
566 EVT OpVT = Cond.getValueType();
567 SDLoc DL(N);
568 // The vselect result and true/value operands needs scalarizing, but it's
569 // not a given that the Cond does. For instance, in AVX512 v1i1 is legal.
570 // See the similar logic in ScalarizeVecRes_SETCC
571 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
572 Cond = GetScalarizedVector(Cond);
573 } else {
574 EVT VT = OpVT.getVectorElementType();
575 Cond = DAG.getExtractVectorElt(DL, VT, Cond, 0);
576 }
577
578 SDValue LHS = GetScalarizedVector(N->getOperand(1));
579 TargetLowering::BooleanContent ScalarBool =
580 TLI.getBooleanContents(false, false);
581 TargetLowering::BooleanContent VecBool = TLI.getBooleanContents(true, false);
582
583 // If integer and float booleans have different contents then we can't
584 // reliably optimize in all cases. There is a full explanation for this in
585 // DAGCombiner::visitSELECT() where the same issue affects folding
586 // (select C, 0, 1) to (xor C, 1).
587 if (TLI.getBooleanContents(false, false) !=
588 TLI.getBooleanContents(false, true)) {
589 // At least try the common case where the boolean is generated by a
590 // comparison.
591 if (Cond->getOpcode() == ISD::SETCC) {
592 EVT OpVT = Cond->getOperand(0).getValueType();
593 ScalarBool = TLI.getBooleanContents(OpVT.getScalarType());
594 VecBool = TLI.getBooleanContents(OpVT);
595 } else
596 ScalarBool = TargetLowering::UndefinedBooleanContent;
597 }
598
599 EVT CondVT = Cond.getValueType();
600 if (ScalarBool != VecBool) {
601 switch (ScalarBool) {
602 case TargetLowering::UndefinedBooleanContent:
603 break;
604 case TargetLowering::ZeroOrOneBooleanContent:
605 assert(VecBool == TargetLowering::UndefinedBooleanContent ||
606 VecBool == TargetLowering::ZeroOrNegativeOneBooleanContent);
607 // Vector read from all ones, scalar expects a single 1 so mask.
608 Cond = DAG.getNode(ISD::AND, SDLoc(N), CondVT,
609 Cond, DAG.getConstant(1, SDLoc(N), CondVT));
610 break;
611 case TargetLowering::ZeroOrNegativeOneBooleanContent:
612 assert(VecBool == TargetLowering::UndefinedBooleanContent ||
613 VecBool == TargetLowering::ZeroOrOneBooleanContent);
614 // Vector reads from a one, scalar from all ones so sign extend.
615 Cond = DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), CondVT,
616 Cond, DAG.getValueType(MVT::i1));
617 break;
618 }
619 }
620
621 // Truncate the condition if needed
622 auto BoolVT = getSetCCResultType(CondVT);
623 if (BoolVT.bitsLT(CondVT))
624 Cond = DAG.getNode(ISD::TRUNCATE, SDLoc(N), BoolVT, Cond);
625
626 return DAG.getSelect(SDLoc(N),
627 LHS.getValueType(), Cond, LHS,
628 GetScalarizedVector(N->getOperand(2)));
629 }
630
ScalarizeVecRes_SELECT(SDNode * N)631 SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) {
632 SDValue LHS = GetScalarizedVector(N->getOperand(1));
633 return DAG.getSelect(SDLoc(N),
634 LHS.getValueType(), N->getOperand(0), LHS,
635 GetScalarizedVector(N->getOperand(2)));
636 }
637
ScalarizeVecRes_SELECT_CC(SDNode * N)638 SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT_CC(SDNode *N) {
639 SDValue LHS = GetScalarizedVector(N->getOperand(2));
640 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), LHS.getValueType(),
641 N->getOperand(0), N->getOperand(1),
642 LHS, GetScalarizedVector(N->getOperand(3)),
643 N->getOperand(4));
644 }
645
ScalarizeVecRes_UNDEF(SDNode * N)646 SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) {
647 return DAG.getUNDEF(N->getValueType(0).getVectorElementType());
648 }
649
ScalarizeVecRes_VECTOR_SHUFFLE(SDNode * N)650 SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) {
651 // Figure out if the scalar is the LHS or RHS and return it.
652 SDValue Arg = N->getOperand(2).getOperand(0);
653 if (Arg.isUndef())
654 return DAG.getUNDEF(N->getValueType(0).getVectorElementType());
655 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
656 return GetScalarizedVector(N->getOperand(Op));
657 }
658
ScalarizeVecRes_FP_TO_XINT_SAT(SDNode * N)659 SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N) {
660 SDValue Src = N->getOperand(0);
661 EVT SrcVT = Src.getValueType();
662 SDLoc dl(N);
663
664 // Handle case where result is scalarized but operand is not
665 if (getTypeAction(SrcVT) == TargetLowering::TypeScalarizeVector)
666 Src = GetScalarizedVector(Src);
667 else
668 Src = DAG.getNode(
669 ISD::EXTRACT_VECTOR_ELT, dl, SrcVT.getVectorElementType(), Src,
670 DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
671
672 EVT DstVT = N->getValueType(0).getVectorElementType();
673 return DAG.getNode(N->getOpcode(), dl, DstVT, Src, N->getOperand(1));
674 }
675
ScalarizeVecRes_SETCC(SDNode * N)676 SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) {
677 assert(N->getValueType(0).isVector() &&
678 N->getOperand(0).getValueType().isVector() &&
679 "Operand types must be vectors");
680 SDValue LHS = N->getOperand(0);
681 SDValue RHS = N->getOperand(1);
682 EVT OpVT = LHS.getValueType();
683 EVT NVT = N->getValueType(0).getVectorElementType();
684 SDLoc DL(N);
685
686 // The result needs scalarizing, but it's not a given that the source does.
687 if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
688 LHS = GetScalarizedVector(LHS);
689 RHS = GetScalarizedVector(RHS);
690 } else {
691 EVT VT = OpVT.getVectorElementType();
692 LHS = DAG.getExtractVectorElt(DL, VT, LHS, 0);
693 RHS = DAG.getExtractVectorElt(DL, VT, RHS, 0);
694 }
695
696 // Turn it into a scalar SETCC.
697 SDValue Res = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS,
698 N->getOperand(2));
699 // Vectors may have a different boolean contents to scalars. Promote the
700 // value appropriately.
701 ISD::NodeType ExtendCode =
702 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
703 return DAG.getNode(ExtendCode, DL, NVT, Res);
704 }
705
ScalarizeVecRes_IS_FPCLASS(SDNode * N)706 SDValue DAGTypeLegalizer::ScalarizeVecRes_IS_FPCLASS(SDNode *N) {
707 SDLoc DL(N);
708 SDValue Arg = N->getOperand(0);
709 SDValue Test = N->getOperand(1);
710 EVT ArgVT = Arg.getValueType();
711 EVT ResultVT = N->getValueType(0).getVectorElementType();
712
713 if (getTypeAction(ArgVT) == TargetLowering::TypeScalarizeVector) {
714 Arg = GetScalarizedVector(Arg);
715 } else {
716 EVT VT = ArgVT.getVectorElementType();
717 Arg = DAG.getExtractVectorElt(DL, VT, Arg, 0);
718 }
719
720 SDValue Res =
721 DAG.getNode(ISD::IS_FPCLASS, DL, MVT::i1, {Arg, Test}, N->getFlags());
722 // Vectors may have a different boolean contents to scalars. Promote the
723 // value appropriately.
724 ISD::NodeType ExtendCode =
725 TargetLowering::getExtendForContent(TLI.getBooleanContents(ArgVT));
726 return DAG.getNode(ExtendCode, DL, ResultVT, Res);
727 }
728
729 //===----------------------------------------------------------------------===//
730 // Operand Vector Scalarization <1 x ty> -> ty.
731 //===----------------------------------------------------------------------===//
732
ScalarizeVectorOperand(SDNode * N,unsigned OpNo)733 bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
734 LLVM_DEBUG(dbgs() << "Scalarize node operand " << OpNo << ": ";
735 N->dump(&DAG));
736 SDValue Res = SDValue();
737
738 switch (N->getOpcode()) {
739 default:
740 #ifndef NDEBUG
741 dbgs() << "ScalarizeVectorOperand Op #" << OpNo << ": ";
742 N->dump(&DAG);
743 dbgs() << "\n";
744 #endif
745 report_fatal_error("Do not know how to scalarize this operator's "
746 "operand!\n");
747 case ISD::BITCAST:
748 Res = ScalarizeVecOp_BITCAST(N);
749 break;
750 case ISD::FAKE_USE:
751 Res = ScalarizeVecOp_FAKE_USE(N);
752 break;
753 case ISD::ANY_EXTEND:
754 case ISD::ZERO_EXTEND:
755 case ISD::SIGN_EXTEND:
756 case ISD::TRUNCATE:
757 case ISD::FP_TO_SINT:
758 case ISD::FP_TO_UINT:
759 case ISD::SINT_TO_FP:
760 case ISD::UINT_TO_FP:
761 case ISD::LROUND:
762 case ISD::LLROUND:
763 case ISD::LRINT:
764 case ISD::LLRINT:
765 Res = ScalarizeVecOp_UnaryOp(N);
766 break;
767 case ISD::FP_TO_SINT_SAT:
768 case ISD::FP_TO_UINT_SAT:
769 Res = ScalarizeVecOp_UnaryOpWithExtraInput(N);
770 break;
771 case ISD::STRICT_SINT_TO_FP:
772 case ISD::STRICT_UINT_TO_FP:
773 case ISD::STRICT_FP_TO_SINT:
774 case ISD::STRICT_FP_TO_UINT:
775 Res = ScalarizeVecOp_UnaryOp_StrictFP(N);
776 break;
777 case ISD::CONCAT_VECTORS:
778 Res = ScalarizeVecOp_CONCAT_VECTORS(N);
779 break;
780 case ISD::INSERT_SUBVECTOR:
781 Res = ScalarizeVecOp_INSERT_SUBVECTOR(N, OpNo);
782 break;
783 case ISD::EXTRACT_VECTOR_ELT:
784 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N);
785 break;
786 case ISD::VSELECT:
787 Res = ScalarizeVecOp_VSELECT(N);
788 break;
789 case ISD::SETCC:
790 Res = ScalarizeVecOp_VSETCC(N);
791 break;
792 case ISD::STORE:
793 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo);
794 break;
795 case ISD::STRICT_FP_ROUND:
796 Res = ScalarizeVecOp_STRICT_FP_ROUND(N, OpNo);
797 break;
798 case ISD::FP_ROUND:
799 Res = ScalarizeVecOp_FP_ROUND(N, OpNo);
800 break;
801 case ISD::STRICT_FP_EXTEND:
802 Res = ScalarizeVecOp_STRICT_FP_EXTEND(N);
803 break;
804 case ISD::FP_EXTEND:
805 Res = ScalarizeVecOp_FP_EXTEND(N);
806 break;
807 case ISD::VECREDUCE_FADD:
808 case ISD::VECREDUCE_FMUL:
809 case ISD::VECREDUCE_ADD:
810 case ISD::VECREDUCE_MUL:
811 case ISD::VECREDUCE_AND:
812 case ISD::VECREDUCE_OR:
813 case ISD::VECREDUCE_XOR:
814 case ISD::VECREDUCE_SMAX:
815 case ISD::VECREDUCE_SMIN:
816 case ISD::VECREDUCE_UMAX:
817 case ISD::VECREDUCE_UMIN:
818 case ISD::VECREDUCE_FMAX:
819 case ISD::VECREDUCE_FMIN:
820 case ISD::VECREDUCE_FMAXIMUM:
821 case ISD::VECREDUCE_FMINIMUM:
822 Res = ScalarizeVecOp_VECREDUCE(N);
823 break;
824 case ISD::VECREDUCE_SEQ_FADD:
825 case ISD::VECREDUCE_SEQ_FMUL:
826 Res = ScalarizeVecOp_VECREDUCE_SEQ(N);
827 break;
828 case ISD::SCMP:
829 case ISD::UCMP:
830 Res = ScalarizeVecOp_CMP(N);
831 break;
832 }
833
834 // If the result is null, the sub-method took care of registering results etc.
835 if (!Res.getNode()) return false;
836
837 // If the result is N, the sub-method updated N in place. Tell the legalizer
838 // core about this.
839 if (Res.getNode() == N)
840 return true;
841
842 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
843 "Invalid operand expansion");
844
845 ReplaceValueWith(SDValue(N, 0), Res);
846 return false;
847 }
848
849 /// If the value to convert is a vector that needs to be scalarized, it must be
850 /// <1 x ty>. Convert the element instead.
ScalarizeVecOp_BITCAST(SDNode * N)851 SDValue DAGTypeLegalizer::ScalarizeVecOp_BITCAST(SDNode *N) {
852 SDValue Elt = GetScalarizedVector(N->getOperand(0));
853 return DAG.getNode(ISD::BITCAST, SDLoc(N),
854 N->getValueType(0), Elt);
855 }
856
857 // Need to legalize vector operands of fake uses. Must be <1 x ty>.
ScalarizeVecOp_FAKE_USE(SDNode * N)858 SDValue DAGTypeLegalizer::ScalarizeVecOp_FAKE_USE(SDNode *N) {
859 assert(N->getOperand(1).getValueType().getVectorNumElements() == 1 &&
860 "Fake Use: Unexpected vector type!");
861 SDValue Elt = GetScalarizedVector(N->getOperand(1));
862 return DAG.getNode(ISD::FAKE_USE, SDLoc(), MVT::Other, N->getOperand(0), Elt);
863 }
864
865 /// If the input is a vector that needs to be scalarized, it must be <1 x ty>.
866 /// Do the operation on the element instead.
ScalarizeVecOp_UnaryOp(SDNode * N)867 SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp(SDNode *N) {
868 assert(N->getValueType(0).getVectorNumElements() == 1 &&
869 "Unexpected vector type!");
870 SDValue Elt = GetScalarizedVector(N->getOperand(0));
871 SDValue Op = DAG.getNode(N->getOpcode(), SDLoc(N),
872 N->getValueType(0).getScalarType(), Elt);
873 // Revectorize the result so the types line up with what the uses of this
874 // expression expect.
875 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Op);
876 }
877
878 /// Same as ScalarizeVecOp_UnaryOp with an extra operand (for example a
879 /// typesize).
ScalarizeVecOp_UnaryOpWithExtraInput(SDNode * N)880 SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOpWithExtraInput(SDNode *N) {
881 assert(N->getValueType(0).getVectorNumElements() == 1 &&
882 "Unexpected vector type!");
883 SDValue Elt = GetScalarizedVector(N->getOperand(0));
884 SDValue Op =
885 DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0).getScalarType(),
886 Elt, N->getOperand(1));
887 // Revectorize the result so the types line up with what the uses of this
888 // expression expect.
889 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Op);
890 }
891
892 /// If the input is a vector that needs to be scalarized, it must be <1 x ty>.
893 /// Do the strict FP operation on the element instead.
ScalarizeVecOp_UnaryOp_StrictFP(SDNode * N)894 SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(SDNode *N) {
895 assert(N->getValueType(0).getVectorNumElements() == 1 &&
896 "Unexpected vector type!");
897 SDValue Elt = GetScalarizedVector(N->getOperand(1));
898 SDValue Res = DAG.getNode(N->getOpcode(), SDLoc(N),
899 { N->getValueType(0).getScalarType(), MVT::Other },
900 { N->getOperand(0), Elt });
901 // Legalize the chain result - switch anything that used the old chain to
902 // use the new one.
903 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
904 // Revectorize the result so the types line up with what the uses of this
905 // expression expect.
906 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
907
908 // Do our own replacement and return SDValue() to tell the caller that we
909 // handled all replacements since caller can only handle a single result.
910 ReplaceValueWith(SDValue(N, 0), Res);
911 return SDValue();
912 }
913
914 /// The vectors to concatenate have length one - use a BUILD_VECTOR instead.
ScalarizeVecOp_CONCAT_VECTORS(SDNode * N)915 SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(SDNode *N) {
916 SmallVector<SDValue, 8> Ops(N->getNumOperands());
917 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
918 Ops[i] = GetScalarizedVector(N->getOperand(i));
919 return DAG.getBuildVector(N->getValueType(0), SDLoc(N), Ops);
920 }
921
922 /// The inserted subvector is to be scalarized - use insert vector element
923 /// instead.
ScalarizeVecOp_INSERT_SUBVECTOR(SDNode * N,unsigned OpNo)924 SDValue DAGTypeLegalizer::ScalarizeVecOp_INSERT_SUBVECTOR(SDNode *N,
925 unsigned OpNo) {
926 // We should not be attempting to scalarize the containing vector
927 assert(OpNo == 1);
928 SDValue Elt = GetScalarizedVector(N->getOperand(1));
929 SDValue ContainingVec = N->getOperand(0);
930 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N),
931 ContainingVec.getValueType(), ContainingVec, Elt,
932 N->getOperand(2));
933 }
934
935 /// If the input is a vector that needs to be scalarized, it must be <1 x ty>,
936 /// so just return the element, ignoring the index.
ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode * N)937 SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
938 EVT VT = N->getValueType(0);
939 SDValue Res = GetScalarizedVector(N->getOperand(0));
940 if (Res.getValueType() != VT)
941 Res = VT.isFloatingPoint()
942 ? DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, Res)
943 : DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Res);
944 return Res;
945 }
946
947 /// If the input condition is a vector that needs to be scalarized, it must be
948 /// <1 x i1>, so just convert to a normal ISD::SELECT
949 /// (still with vector output type since that was acceptable if we got here).
ScalarizeVecOp_VSELECT(SDNode * N)950 SDValue DAGTypeLegalizer::ScalarizeVecOp_VSELECT(SDNode *N) {
951 SDValue ScalarCond = GetScalarizedVector(N->getOperand(0));
952 EVT VT = N->getValueType(0);
953
954 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, ScalarCond, N->getOperand(1),
955 N->getOperand(2));
956 }
957
958 /// If the operand is a vector that needs to be scalarized then the
959 /// result must be v1i1, so just convert to a scalar SETCC and wrap
960 /// with a scalar_to_vector since the res type is legal if we got here
ScalarizeVecOp_VSETCC(SDNode * N)961 SDValue DAGTypeLegalizer::ScalarizeVecOp_VSETCC(SDNode *N) {
962 assert(N->getValueType(0).isVector() &&
963 N->getOperand(0).getValueType().isVector() &&
964 "Operand types must be vectors");
965 assert(N->getValueType(0) == MVT::v1i1 && "Expected v1i1 type");
966
967 EVT VT = N->getValueType(0);
968 SDValue LHS = GetScalarizedVector(N->getOperand(0));
969 SDValue RHS = GetScalarizedVector(N->getOperand(1));
970
971 EVT OpVT = N->getOperand(0).getValueType();
972 EVT NVT = VT.getVectorElementType();
973 SDLoc DL(N);
974 // Turn it into a scalar SETCC.
975 SDValue Res = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS,
976 N->getOperand(2));
977
978 // Vectors may have a different boolean contents to scalars. Promote the
979 // value appropriately.
980 ISD::NodeType ExtendCode =
981 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
982
983 Res = DAG.getNode(ExtendCode, DL, NVT, Res);
984
985 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Res);
986 }
987
988 /// If the value to store is a vector that needs to be scalarized, it must be
989 /// <1 x ty>. Just store the element.
ScalarizeVecOp_STORE(StoreSDNode * N,unsigned OpNo)990 SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
991 assert(N->isUnindexed() && "Indexed store of one-element vector?");
992 assert(OpNo == 1 && "Do not know how to scalarize this operand!");
993 SDLoc dl(N);
994
995 if (N->isTruncatingStore())
996 return DAG.getTruncStore(
997 N->getChain(), dl, GetScalarizedVector(N->getOperand(1)),
998 N->getBasePtr(), N->getPointerInfo(),
999 N->getMemoryVT().getVectorElementType(), N->getBaseAlign(),
1000 N->getMemOperand()->getFlags(), N->getAAInfo());
1001
1002 return DAG.getStore(N->getChain(), dl, GetScalarizedVector(N->getOperand(1)),
1003 N->getBasePtr(), N->getPointerInfo(), N->getBaseAlign(),
1004 N->getMemOperand()->getFlags(), N->getAAInfo());
1005 }
1006
1007 /// If the value to round is a vector that needs to be scalarized, it must be
1008 /// <1 x ty>. Convert the element instead.
ScalarizeVecOp_FP_ROUND(SDNode * N,unsigned OpNo)1009 SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo) {
1010 assert(OpNo == 0 && "Wrong operand for scalarization!");
1011 SDValue Elt = GetScalarizedVector(N->getOperand(0));
1012 SDValue Res = DAG.getNode(ISD::FP_ROUND, SDLoc(N),
1013 N->getValueType(0).getVectorElementType(), Elt,
1014 N->getOperand(1));
1015 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
1016 }
1017
ScalarizeVecOp_STRICT_FP_ROUND(SDNode * N,unsigned OpNo)1018 SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(SDNode *N,
1019 unsigned OpNo) {
1020 assert(OpNo == 1 && "Wrong operand for scalarization!");
1021 SDValue Elt = GetScalarizedVector(N->getOperand(1));
1022 SDValue Res = DAG.getNode(ISD::STRICT_FP_ROUND, SDLoc(N),
1023 { N->getValueType(0).getVectorElementType(),
1024 MVT::Other },
1025 { N->getOperand(0), Elt, N->getOperand(2) });
1026 // Legalize the chain result - switch anything that used the old chain to
1027 // use the new one.
1028 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
1029
1030 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
1031
1032 // Do our own replacement and return SDValue() to tell the caller that we
1033 // handled all replacements since caller can only handle a single result.
1034 ReplaceValueWith(SDValue(N, 0), Res);
1035 return SDValue();
1036 }
1037
1038 /// If the value to extend is a vector that needs to be scalarized, it must be
1039 /// <1 x ty>. Convert the element instead.
ScalarizeVecOp_FP_EXTEND(SDNode * N)1040 SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_EXTEND(SDNode *N) {
1041 SDValue Elt = GetScalarizedVector(N->getOperand(0));
1042 SDValue Res = DAG.getNode(ISD::FP_EXTEND, SDLoc(N),
1043 N->getValueType(0).getVectorElementType(), Elt);
1044 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
1045 }
1046
1047 /// If the value to extend is a vector that needs to be scalarized, it must be
1048 /// <1 x ty>. Convert the element instead.
ScalarizeVecOp_STRICT_FP_EXTEND(SDNode * N)1049 SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(SDNode *N) {
1050 SDValue Elt = GetScalarizedVector(N->getOperand(1));
1051 SDValue Res =
1052 DAG.getNode(ISD::STRICT_FP_EXTEND, SDLoc(N),
1053 {N->getValueType(0).getVectorElementType(), MVT::Other},
1054 {N->getOperand(0), Elt});
1055 // Legalize the chain result - switch anything that used the old chain to
1056 // use the new one.
1057 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
1058
1059 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
1060
1061 // Do our own replacement and return SDValue() to tell the caller that we
1062 // handled all replacements since caller can only handle a single result.
1063 ReplaceValueWith(SDValue(N, 0), Res);
1064 return SDValue();
1065 }
1066
ScalarizeVecOp_VECREDUCE(SDNode * N)1067 SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE(SDNode *N) {
1068 SDValue Res = GetScalarizedVector(N->getOperand(0));
1069 // Result type may be wider than element type.
1070 if (Res.getValueType() != N->getValueType(0))
1071 Res = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), N->getValueType(0), Res);
1072 return Res;
1073 }
1074
ScalarizeVecOp_VECREDUCE_SEQ(SDNode * N)1075 SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N) {
1076 SDValue AccOp = N->getOperand(0);
1077 SDValue VecOp = N->getOperand(1);
1078
1079 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(N->getOpcode());
1080
1081 SDValue Op = GetScalarizedVector(VecOp);
1082 return DAG.getNode(BaseOpc, SDLoc(N), N->getValueType(0),
1083 AccOp, Op, N->getFlags());
1084 }
1085
ScalarizeVecOp_CMP(SDNode * N)1086 SDValue DAGTypeLegalizer::ScalarizeVecOp_CMP(SDNode *N) {
1087 SDValue LHS = GetScalarizedVector(N->getOperand(0));
1088 SDValue RHS = GetScalarizedVector(N->getOperand(1));
1089
1090 EVT ResVT = N->getValueType(0).getVectorElementType();
1091 SDValue Cmp = DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, LHS, RHS);
1092 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Cmp);
1093 }
1094
1095 //===----------------------------------------------------------------------===//
1096 // Result Vector Splitting
1097 //===----------------------------------------------------------------------===//
1098
1099 /// This method is called when the specified result of the specified node is
1100 /// found to need vector splitting. At this point, the node may also have
1101 /// invalid operands or may have other results that need legalization, we just
1102 /// know that (at least) one result needs vector splitting.
SplitVectorResult(SDNode * N,unsigned ResNo)1103 void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
1104 LLVM_DEBUG(dbgs() << "Split node result: "; N->dump(&DAG));
1105 SDValue Lo, Hi;
1106
1107 // See if the target wants to custom expand this node.
1108 if (CustomLowerNode(N, N->getValueType(ResNo), true))
1109 return;
1110
1111 switch (N->getOpcode()) {
1112 default:
1113 #ifndef NDEBUG
1114 dbgs() << "SplitVectorResult #" << ResNo << ": ";
1115 N->dump(&DAG);
1116 dbgs() << "\n";
1117 #endif
1118 report_fatal_error("Do not know how to split the result of this "
1119 "operator!\n");
1120
1121 case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
1122 case ISD::AssertZext: SplitVecRes_AssertZext(N, Lo, Hi); break;
1123 case ISD::VSELECT:
1124 case ISD::SELECT:
1125 case ISD::VP_MERGE:
1126 case ISD::VP_SELECT: SplitRes_Select(N, Lo, Hi); break;
1127 case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
1128 case ISD::POISON:
1129 case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
1130 case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break;
1131 case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
1132 case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break;
1133 case ISD::EXTRACT_SUBVECTOR: SplitVecRes_EXTRACT_SUBVECTOR(N, Lo, Hi); break;
1134 case ISD::INSERT_SUBVECTOR: SplitVecRes_INSERT_SUBVECTOR(N, Lo, Hi); break;
1135 case ISD::FPOWI:
1136 case ISD::FLDEXP:
1137 case ISD::FCOPYSIGN: SplitVecRes_FPOp_MultiType(N, Lo, Hi); break;
1138 case ISD::IS_FPCLASS: SplitVecRes_IS_FPCLASS(N, Lo, Hi); break;
1139 case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
1140 case ISD::EXPERIMENTAL_VP_SPLAT: SplitVecRes_VP_SPLAT(N, Lo, Hi); break;
1141 case ISD::SPLAT_VECTOR:
1142 case ISD::SCALAR_TO_VECTOR:
1143 SplitVecRes_ScalarOp(N, Lo, Hi);
1144 break;
1145 case ISD::STEP_VECTOR:
1146 SplitVecRes_STEP_VECTOR(N, Lo, Hi);
1147 break;
1148 case ISD::SIGN_EXTEND_INREG: SplitVecRes_InregOp(N, Lo, Hi); break;
1149 case ISD::LOAD:
1150 SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
1151 break;
1152 case ISD::VP_LOAD:
1153 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(N), Lo, Hi);
1154 break;
1155 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1156 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(N), Lo, Hi);
1157 break;
1158 case ISD::MLOAD:
1159 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(N), Lo, Hi);
1160 break;
1161 case ISD::MGATHER:
1162 case ISD::VP_GATHER:
1163 SplitVecRes_Gather(cast<MemSDNode>(N), Lo, Hi, /*SplitSETCC*/ true);
1164 break;
1165 case ISD::VECTOR_COMPRESS:
1166 SplitVecRes_VECTOR_COMPRESS(N, Lo, Hi);
1167 break;
1168 case ISD::SETCC:
1169 case ISD::VP_SETCC:
1170 SplitVecRes_SETCC(N, Lo, Hi);
1171 break;
1172 case ISD::VECTOR_REVERSE:
1173 SplitVecRes_VECTOR_REVERSE(N, Lo, Hi);
1174 break;
1175 case ISD::VECTOR_SHUFFLE:
1176 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi);
1177 break;
1178 case ISD::VECTOR_SPLICE:
1179 SplitVecRes_VECTOR_SPLICE(N, Lo, Hi);
1180 break;
1181 case ISD::VECTOR_DEINTERLEAVE:
1182 SplitVecRes_VECTOR_DEINTERLEAVE(N);
1183 return;
1184 case ISD::VECTOR_INTERLEAVE:
1185 SplitVecRes_VECTOR_INTERLEAVE(N);
1186 return;
1187 case ISD::VAARG:
1188 SplitVecRes_VAARG(N, Lo, Hi);
1189 break;
1190
1191 case ISD::ANY_EXTEND_VECTOR_INREG:
1192 case ISD::SIGN_EXTEND_VECTOR_INREG:
1193 case ISD::ZERO_EXTEND_VECTOR_INREG:
1194 SplitVecRes_ExtVecInRegOp(N, Lo, Hi);
1195 break;
1196
1197 case ISD::ABS:
1198 case ISD::VP_ABS:
1199 case ISD::BITREVERSE:
1200 case ISD::VP_BITREVERSE:
1201 case ISD::BSWAP:
1202 case ISD::VP_BSWAP:
1203 case ISD::CTLZ:
1204 case ISD::VP_CTLZ:
1205 case ISD::CTTZ:
1206 case ISD::VP_CTTZ:
1207 case ISD::CTLZ_ZERO_UNDEF:
1208 case ISD::VP_CTLZ_ZERO_UNDEF:
1209 case ISD::CTTZ_ZERO_UNDEF:
1210 case ISD::VP_CTTZ_ZERO_UNDEF:
1211 case ISD::CTPOP:
1212 case ISD::VP_CTPOP:
1213 case ISD::FABS: case ISD::VP_FABS:
1214 case ISD::FACOS:
1215 case ISD::FASIN:
1216 case ISD::FATAN:
1217 case ISD::FCEIL:
1218 case ISD::VP_FCEIL:
1219 case ISD::FCOS:
1220 case ISD::FCOSH:
1221 case ISD::FEXP:
1222 case ISD::FEXP2:
1223 case ISD::FEXP10:
1224 case ISD::FFLOOR:
1225 case ISD::VP_FFLOOR:
1226 case ISD::FLOG:
1227 case ISD::FLOG10:
1228 case ISD::FLOG2:
1229 case ISD::FNEARBYINT:
1230 case ISD::VP_FNEARBYINT:
1231 case ISD::FNEG: case ISD::VP_FNEG:
1232 case ISD::FREEZE:
1233 case ISD::ARITH_FENCE:
1234 case ISD::FP_EXTEND:
1235 case ISD::VP_FP_EXTEND:
1236 case ISD::FP_ROUND:
1237 case ISD::VP_FP_ROUND:
1238 case ISD::FP_TO_SINT:
1239 case ISD::VP_FP_TO_SINT:
1240 case ISD::FP_TO_UINT:
1241 case ISD::VP_FP_TO_UINT:
1242 case ISD::FRINT:
1243 case ISD::VP_FRINT:
1244 case ISD::LRINT:
1245 case ISD::VP_LRINT:
1246 case ISD::LLRINT:
1247 case ISD::VP_LLRINT:
1248 case ISD::FROUND:
1249 case ISD::VP_FROUND:
1250 case ISD::FROUNDEVEN:
1251 case ISD::VP_FROUNDEVEN:
1252 case ISD::LROUND:
1253 case ISD::LLROUND:
1254 case ISD::FSIN:
1255 case ISD::FSINH:
1256 case ISD::FSQRT: case ISD::VP_SQRT:
1257 case ISD::FTAN:
1258 case ISD::FTANH:
1259 case ISD::FTRUNC:
1260 case ISD::VP_FROUNDTOZERO:
1261 case ISD::SINT_TO_FP:
1262 case ISD::VP_SINT_TO_FP:
1263 case ISD::TRUNCATE:
1264 case ISD::VP_TRUNCATE:
1265 case ISD::UINT_TO_FP:
1266 case ISD::VP_UINT_TO_FP:
1267 case ISD::FCANONICALIZE:
1268 case ISD::AssertNoFPClass:
1269 SplitVecRes_UnaryOp(N, Lo, Hi);
1270 break;
1271 case ISD::ADDRSPACECAST:
1272 SplitVecRes_ADDRSPACECAST(N, Lo, Hi);
1273 break;
1274 case ISD::FMODF:
1275 case ISD::FFREXP:
1276 case ISD::FSINCOS:
1277 case ISD::FSINCOSPI:
1278 SplitVecRes_UnaryOpWithTwoResults(N, ResNo, Lo, Hi);
1279 break;
1280
1281 case ISD::ANY_EXTEND:
1282 case ISD::SIGN_EXTEND:
1283 case ISD::ZERO_EXTEND:
1284 case ISD::VP_SIGN_EXTEND:
1285 case ISD::VP_ZERO_EXTEND:
1286 SplitVecRes_ExtendOp(N, Lo, Hi);
1287 break;
1288
1289 case ISD::ADD: case ISD::VP_ADD:
1290 case ISD::SUB: case ISD::VP_SUB:
1291 case ISD::MUL: case ISD::VP_MUL:
1292 case ISD::MULHS:
1293 case ISD::MULHU:
1294 case ISD::ABDS:
1295 case ISD::ABDU:
1296 case ISD::AVGCEILS:
1297 case ISD::AVGCEILU:
1298 case ISD::AVGFLOORS:
1299 case ISD::AVGFLOORU:
1300 case ISD::FADD: case ISD::VP_FADD:
1301 case ISD::FSUB: case ISD::VP_FSUB:
1302 case ISD::FMUL: case ISD::VP_FMUL:
1303 case ISD::FMINNUM:
1304 case ISD::FMINNUM_IEEE:
1305 case ISD::VP_FMINNUM:
1306 case ISD::FMAXNUM:
1307 case ISD::FMAXNUM_IEEE:
1308 case ISD::VP_FMAXNUM:
1309 case ISD::FMINIMUM:
1310 case ISD::VP_FMINIMUM:
1311 case ISD::FMAXIMUM:
1312 case ISD::VP_FMAXIMUM:
1313 case ISD::FMINIMUMNUM:
1314 case ISD::FMAXIMUMNUM:
1315 case ISD::SDIV: case ISD::VP_SDIV:
1316 case ISD::UDIV: case ISD::VP_UDIV:
1317 case ISD::FDIV: case ISD::VP_FDIV:
1318 case ISD::FPOW:
1319 case ISD::FATAN2:
1320 case ISD::AND: case ISD::VP_AND:
1321 case ISD::OR: case ISD::VP_OR:
1322 case ISD::XOR: case ISD::VP_XOR:
1323 case ISD::SHL: case ISD::VP_SHL:
1324 case ISD::SRA: case ISD::VP_SRA:
1325 case ISD::SRL: case ISD::VP_SRL:
1326 case ISD::UREM: case ISD::VP_UREM:
1327 case ISD::SREM: case ISD::VP_SREM:
1328 case ISD::FREM: case ISD::VP_FREM:
1329 case ISD::SMIN: case ISD::VP_SMIN:
1330 case ISD::SMAX: case ISD::VP_SMAX:
1331 case ISD::UMIN: case ISD::VP_UMIN:
1332 case ISD::UMAX: case ISD::VP_UMAX:
1333 case ISD::SADDSAT: case ISD::VP_SADDSAT:
1334 case ISD::UADDSAT: case ISD::VP_UADDSAT:
1335 case ISD::SSUBSAT: case ISD::VP_SSUBSAT:
1336 case ISD::USUBSAT: case ISD::VP_USUBSAT:
1337 case ISD::SSHLSAT:
1338 case ISD::USHLSAT:
1339 case ISD::ROTL:
1340 case ISD::ROTR:
1341 case ISD::VP_FCOPYSIGN:
1342 SplitVecRes_BinOp(N, Lo, Hi);
1343 break;
1344 case ISD::FMA: case ISD::VP_FMA:
1345 case ISD::FSHL:
1346 case ISD::VP_FSHL:
1347 case ISD::FSHR:
1348 case ISD::VP_FSHR:
1349 SplitVecRes_TernaryOp(N, Lo, Hi);
1350 break;
1351
1352 case ISD::SCMP: case ISD::UCMP:
1353 SplitVecRes_CMP(N, Lo, Hi);
1354 break;
1355
1356 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1357 case ISD::STRICT_##DAGN:
1358 #include "llvm/IR/ConstrainedOps.def"
1359 SplitVecRes_StrictFPOp(N, Lo, Hi);
1360 break;
1361
1362 case ISD::FP_TO_UINT_SAT:
1363 case ISD::FP_TO_SINT_SAT:
1364 SplitVecRes_FP_TO_XINT_SAT(N, Lo, Hi);
1365 break;
1366
1367 case ISD::UADDO:
1368 case ISD::SADDO:
1369 case ISD::USUBO:
1370 case ISD::SSUBO:
1371 case ISD::UMULO:
1372 case ISD::SMULO:
1373 SplitVecRes_OverflowOp(N, ResNo, Lo, Hi);
1374 break;
1375 case ISD::SMULFIX:
1376 case ISD::SMULFIXSAT:
1377 case ISD::UMULFIX:
1378 case ISD::UMULFIXSAT:
1379 case ISD::SDIVFIX:
1380 case ISD::SDIVFIXSAT:
1381 case ISD::UDIVFIX:
1382 case ISD::UDIVFIXSAT:
1383 SplitVecRes_FIX(N, Lo, Hi);
1384 break;
1385 case ISD::EXPERIMENTAL_VP_SPLICE:
1386 SplitVecRes_VP_SPLICE(N, Lo, Hi);
1387 break;
1388 case ISD::EXPERIMENTAL_VP_REVERSE:
1389 SplitVecRes_VP_REVERSE(N, Lo, Hi);
1390 break;
1391 case ISD::PARTIAL_REDUCE_UMLA:
1392 case ISD::PARTIAL_REDUCE_SMLA:
1393 case ISD::PARTIAL_REDUCE_SUMLA:
1394 SplitVecRes_PARTIAL_REDUCE_MLA(N, Lo, Hi);
1395 break;
1396 case ISD::GET_ACTIVE_LANE_MASK:
1397 SplitVecRes_GET_ACTIVE_LANE_MASK(N, Lo, Hi);
1398 break;
1399 }
1400
1401 // If Lo/Hi is null, the sub-method took care of registering results etc.
1402 if (Lo.getNode())
1403 SetSplitVector(SDValue(N, ResNo), Lo, Hi);
1404 }
1405
IncrementPointer(MemSDNode * N,EVT MemVT,MachinePointerInfo & MPI,SDValue & Ptr,uint64_t * ScaledOffset)1406 void DAGTypeLegalizer::IncrementPointer(MemSDNode *N, EVT MemVT,
1407 MachinePointerInfo &MPI, SDValue &Ptr,
1408 uint64_t *ScaledOffset) {
1409 SDLoc DL(N);
1410 unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinValue() / 8;
1411
1412 if (MemVT.isScalableVector()) {
1413 SDValue BytesIncrement = DAG.getVScale(
1414 DL, Ptr.getValueType(),
1415 APInt(Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1416 MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
1417 if (ScaledOffset)
1418 *ScaledOffset += IncrementSize;
1419 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, BytesIncrement,
1420 SDNodeFlags::NoUnsignedWrap);
1421 } else {
1422 MPI = N->getPointerInfo().getWithOffset(IncrementSize);
1423 // Increment the pointer to the other half.
1424 Ptr = DAG.getObjectPtrOffset(DL, Ptr, TypeSize::getFixed(IncrementSize));
1425 }
1426 }
1427
SplitMask(SDValue Mask)1428 std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(SDValue Mask) {
1429 return SplitMask(Mask, SDLoc(Mask));
1430 }
1431
SplitMask(SDValue Mask,const SDLoc & DL)1432 std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(SDValue Mask,
1433 const SDLoc &DL) {
1434 SDValue MaskLo, MaskHi;
1435 EVT MaskVT = Mask.getValueType();
1436 if (getTypeAction(MaskVT) == TargetLowering::TypeSplitVector)
1437 GetSplitVector(Mask, MaskLo, MaskHi);
1438 else
1439 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL);
1440 return std::make_pair(MaskLo, MaskHi);
1441 }
1442
SplitVecRes_BinOp(SDNode * N,SDValue & Lo,SDValue & Hi)1443 void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi) {
1444 SDValue LHSLo, LHSHi;
1445 GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
1446 SDValue RHSLo, RHSHi;
1447 GetSplitVector(N->getOperand(1), RHSLo, RHSHi);
1448 SDLoc dl(N);
1449
1450 const SDNodeFlags Flags = N->getFlags();
1451 unsigned Opcode = N->getOpcode();
1452 if (N->getNumOperands() == 2) {
1453 Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(), LHSLo, RHSLo, Flags);
1454 Hi = DAG.getNode(Opcode, dl, LHSHi.getValueType(), LHSHi, RHSHi, Flags);
1455 return;
1456 }
1457
1458 assert(N->getNumOperands() == 4 && "Unexpected number of operands!");
1459 assert(N->isVPOpcode() && "Expected VP opcode");
1460
1461 SDValue MaskLo, MaskHi;
1462 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(2));
1463
1464 SDValue EVLLo, EVLHi;
1465 std::tie(EVLLo, EVLHi) =
1466 DAG.SplitEVL(N->getOperand(3), N->getValueType(0), dl);
1467
1468 Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(),
1469 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1470 Hi = DAG.getNode(Opcode, dl, LHSHi.getValueType(),
1471 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1472 }
1473
SplitVecRes_TernaryOp(SDNode * N,SDValue & Lo,SDValue & Hi)1474 void DAGTypeLegalizer::SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo,
1475 SDValue &Hi) {
1476 SDValue Op0Lo, Op0Hi;
1477 GetSplitVector(N->getOperand(0), Op0Lo, Op0Hi);
1478 SDValue Op1Lo, Op1Hi;
1479 GetSplitVector(N->getOperand(1), Op1Lo, Op1Hi);
1480 SDValue Op2Lo, Op2Hi;
1481 GetSplitVector(N->getOperand(2), Op2Lo, Op2Hi);
1482 SDLoc dl(N);
1483
1484 const SDNodeFlags Flags = N->getFlags();
1485 unsigned Opcode = N->getOpcode();
1486 if (N->getNumOperands() == 3) {
1487 Lo = DAG.getNode(Opcode, dl, Op0Lo.getValueType(), Op0Lo, Op1Lo, Op2Lo, Flags);
1488 Hi = DAG.getNode(Opcode, dl, Op0Hi.getValueType(), Op0Hi, Op1Hi, Op2Hi, Flags);
1489 return;
1490 }
1491
1492 assert(N->getNumOperands() == 5 && "Unexpected number of operands!");
1493 assert(N->isVPOpcode() && "Expected VP opcode");
1494
1495 SDValue MaskLo, MaskHi;
1496 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(3));
1497
1498 SDValue EVLLo, EVLHi;
1499 std::tie(EVLLo, EVLHi) =
1500 DAG.SplitEVL(N->getOperand(4), N->getValueType(0), dl);
1501
1502 Lo = DAG.getNode(Opcode, dl, Op0Lo.getValueType(),
1503 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1504 Hi = DAG.getNode(Opcode, dl, Op0Hi.getValueType(),
1505 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1506 }
1507
SplitVecRes_CMP(SDNode * N,SDValue & Lo,SDValue & Hi)1508 void DAGTypeLegalizer::SplitVecRes_CMP(SDNode *N, SDValue &Lo, SDValue &Hi) {
1509 LLVMContext &Ctxt = *DAG.getContext();
1510 SDLoc dl(N);
1511
1512 SDValue LHS = N->getOperand(0);
1513 SDValue RHS = N->getOperand(1);
1514
1515 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
1516 if (getTypeAction(LHS.getValueType()) == TargetLowering::TypeSplitVector) {
1517 GetSplitVector(LHS, LHSLo, LHSHi);
1518 GetSplitVector(RHS, RHSLo, RHSHi);
1519 } else {
1520 std::tie(LHSLo, LHSHi) = DAG.SplitVector(LHS, dl);
1521 std::tie(RHSLo, RHSHi) = DAG.SplitVector(RHS, dl);
1522 }
1523
1524 EVT SplitResVT = N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
1525 Lo = DAG.getNode(N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
1526 Hi = DAG.getNode(N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
1527 }
1528
SplitVecRes_FIX(SDNode * N,SDValue & Lo,SDValue & Hi)1529 void DAGTypeLegalizer::SplitVecRes_FIX(SDNode *N, SDValue &Lo, SDValue &Hi) {
1530 SDValue LHSLo, LHSHi;
1531 GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
1532 SDValue RHSLo, RHSHi;
1533 GetSplitVector(N->getOperand(1), RHSLo, RHSHi);
1534 SDLoc dl(N);
1535 SDValue Op2 = N->getOperand(2);
1536
1537 unsigned Opcode = N->getOpcode();
1538 Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(), LHSLo, RHSLo, Op2,
1539 N->getFlags());
1540 Hi = DAG.getNode(Opcode, dl, LHSHi.getValueType(), LHSHi, RHSHi, Op2,
1541 N->getFlags());
1542 }
1543
SplitVecRes_BITCAST(SDNode * N,SDValue & Lo,SDValue & Hi)1544 void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
1545 SDValue &Hi) {
1546 // We know the result is a vector. The input may be either a vector or a
1547 // scalar value.
1548 EVT LoVT, HiVT;
1549 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
1550 SDLoc dl(N);
1551
1552 SDValue InOp = N->getOperand(0);
1553 EVT InVT = InOp.getValueType();
1554
1555 // Handle some special cases efficiently.
1556 switch (getTypeAction(InVT)) {
1557 case TargetLowering::TypeLegal:
1558 case TargetLowering::TypePromoteInteger:
1559 case TargetLowering::TypePromoteFloat:
1560 case TargetLowering::TypeSoftPromoteHalf:
1561 case TargetLowering::TypeSoftenFloat:
1562 case TargetLowering::TypeScalarizeVector:
1563 case TargetLowering::TypeWidenVector:
1564 break;
1565 case TargetLowering::TypeExpandInteger:
1566 case TargetLowering::TypeExpandFloat:
1567 // A scalar to vector conversion, where the scalar needs expansion.
1568 // If the vector is being split in two then we can just convert the
1569 // expanded pieces.
1570 if (LoVT == HiVT) {
1571 GetExpandedOp(InOp, Lo, Hi);
1572 if (DAG.getDataLayout().isBigEndian())
1573 std::swap(Lo, Hi);
1574 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
1575 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
1576 return;
1577 }
1578 break;
1579 case TargetLowering::TypeSplitVector:
1580 // If the input is a vector that needs to be split, convert each split
1581 // piece of the input now.
1582 GetSplitVector(InOp, Lo, Hi);
1583 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
1584 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
1585 return;
1586 case TargetLowering::TypeScalarizeScalableVector:
1587 report_fatal_error("Scalarization of scalable vectors is not supported.");
1588 }
1589
1590 if (LoVT.isScalableVector()) {
1591 auto [InLo, InHi] = DAG.SplitVectorOperand(N, 0);
1592 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, InLo);
1593 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, InHi);
1594 return;
1595 }
1596
1597 // In the general case, convert the input to an integer and split it by hand.
1598 EVT LoIntVT = EVT::getIntegerVT(*DAG.getContext(), LoVT.getSizeInBits());
1599 EVT HiIntVT = EVT::getIntegerVT(*DAG.getContext(), HiVT.getSizeInBits());
1600 if (DAG.getDataLayout().isBigEndian())
1601 std::swap(LoIntVT, HiIntVT);
1602
1603 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi);
1604
1605 if (DAG.getDataLayout().isBigEndian())
1606 std::swap(Lo, Hi);
1607 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
1608 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
1609 }
1610
SplitVecRes_BUILD_VECTOR(SDNode * N,SDValue & Lo,SDValue & Hi)1611 void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
1612 SDValue &Hi) {
1613 EVT LoVT, HiVT;
1614 SDLoc dl(N);
1615 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
1616 unsigned LoNumElts = LoVT.getVectorNumElements();
1617 SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts);
1618 Lo = DAG.getBuildVector(LoVT, dl, LoOps);
1619
1620 SmallVector<SDValue, 8> HiOps(N->op_begin()+LoNumElts, N->op_end());
1621 Hi = DAG.getBuildVector(HiVT, dl, HiOps);
1622 }
1623
SplitVecRes_CONCAT_VECTORS(SDNode * N,SDValue & Lo,SDValue & Hi)1624 void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo,
1625 SDValue &Hi) {
1626 assert(!(N->getNumOperands() & 1) && "Unsupported CONCAT_VECTORS");
1627 SDLoc dl(N);
1628 unsigned NumSubvectors = N->getNumOperands() / 2;
1629 if (NumSubvectors == 1) {
1630 Lo = N->getOperand(0);
1631 Hi = N->getOperand(1);
1632 return;
1633 }
1634
1635 EVT LoVT, HiVT;
1636 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
1637
1638 SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors);
1639 Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, LoVT, LoOps);
1640
1641 SmallVector<SDValue, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end());
1642 Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HiVT, HiOps);
1643 }
1644
SplitVecRes_EXTRACT_SUBVECTOR(SDNode * N,SDValue & Lo,SDValue & Hi)1645 void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
1646 SDValue &Hi) {
1647 SDValue Vec = N->getOperand(0);
1648 SDValue Idx = N->getOperand(1);
1649 SDLoc dl(N);
1650
1651 EVT LoVT, HiVT;
1652 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
1653
1654 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
1655 uint64_t IdxVal = Idx->getAsZExtVal();
1656 Hi = DAG.getNode(
1657 ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
1658 DAG.getVectorIdxConstant(IdxVal + LoVT.getVectorMinNumElements(), dl));
1659 }
1660
SplitVecRes_INSERT_SUBVECTOR(SDNode * N,SDValue & Lo,SDValue & Hi)1661 void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
1662 SDValue &Hi) {
1663 SDValue Vec = N->getOperand(0);
1664 SDValue SubVec = N->getOperand(1);
1665 SDValue Idx = N->getOperand(2);
1666 SDLoc dl(N);
1667 GetSplitVector(Vec, Lo, Hi);
1668
1669 EVT VecVT = Vec.getValueType();
1670 EVT LoVT = Lo.getValueType();
1671 EVT SubVecVT = SubVec.getValueType();
1672 unsigned VecElems = VecVT.getVectorMinNumElements();
1673 unsigned SubElems = SubVecVT.getVectorMinNumElements();
1674 unsigned LoElems = LoVT.getVectorMinNumElements();
1675
1676 // If we know the index is in the first half, and we know the subvector
1677 // doesn't cross the boundary between the halves, we can avoid spilling the
1678 // vector, and insert into the lower half of the split vector directly.
1679 unsigned IdxVal = Idx->getAsZExtVal();
1680 if (IdxVal + SubElems <= LoElems) {
1681 Lo = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, LoVT, Lo, SubVec, Idx);
1682 return;
1683 }
1684 // Similarly if the subvector is fully in the high half, but mind that we
1685 // can't tell whether a fixed-length subvector is fully within the high half
1686 // of a scalable vector.
1687 if (VecVT.isScalableVector() == SubVecVT.isScalableVector() &&
1688 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1689 Hi = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, Hi.getValueType(), Hi, SubVec,
1690 DAG.getVectorIdxConstant(IdxVal - LoElems, dl));
1691 return;
1692 }
1693
1694 if (getTypeAction(SubVecVT) == TargetLowering::TypeWidenVector &&
1695 Vec.isUndef() && SubVecVT.getVectorElementType() == MVT::i1) {
1696 SDValue WideSubVec = GetWidenedVector(SubVec);
1697 if (WideSubVec.getValueType() == VecVT) {
1698 std::tie(Lo, Hi) = DAG.SplitVector(WideSubVec, SDLoc(WideSubVec));
1699 return;
1700 }
1701 }
1702
1703 // Spill the vector to the stack.
1704 // In cases where the vector is illegal it will be broken down into parts
1705 // and stored in parts - we should use the alignment for the smallest part.
1706 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false);
1707 SDValue StackPtr =
1708 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign);
1709 auto &MF = DAG.getMachineFunction();
1710 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
1711 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
1712
1713 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
1714 SmallestAlign);
1715
1716 // Store the new subvector into the specified index.
1717 SDValue SubVecPtr =
1718 TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT, SubVecVT, Idx);
1719 Store = DAG.getStore(Store, dl, SubVec, SubVecPtr,
1720 MachinePointerInfo::getUnknownStack(MF));
1721
1722 // Load the Lo part from the stack slot.
1723 Lo = DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1724 SmallestAlign);
1725
1726 // Increment the pointer to the other part.
1727 auto *Load = cast<LoadSDNode>(Lo);
1728 MachinePointerInfo MPI = Load->getPointerInfo();
1729 IncrementPointer(Load, LoVT, MPI, StackPtr);
1730
1731 // Load the Hi part from the stack slot.
1732 Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1733 }
1734
1735 // Handle splitting an FP where the second operand does not match the first
1736 // type. The second operand may be a scalar, or a vector that has exactly as
1737 // many elements as the first
SplitVecRes_FPOp_MultiType(SDNode * N,SDValue & Lo,SDValue & Hi)1738 void DAGTypeLegalizer::SplitVecRes_FPOp_MultiType(SDNode *N, SDValue &Lo,
1739 SDValue &Hi) {
1740 SDValue LHSLo, LHSHi;
1741 GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
1742 SDLoc DL(N);
1743
1744 SDValue RHSLo, RHSHi;
1745 SDValue RHS = N->getOperand(1);
1746 EVT RHSVT = RHS.getValueType();
1747 if (RHSVT.isVector()) {
1748 if (getTypeAction(RHSVT) == TargetLowering::TypeSplitVector)
1749 GetSplitVector(RHS, RHSLo, RHSHi);
1750 else
1751 std::tie(RHSLo, RHSHi) = DAG.SplitVector(RHS, SDLoc(RHS));
1752
1753 Lo = DAG.getNode(N->getOpcode(), DL, LHSLo.getValueType(), LHSLo, RHSLo);
1754 Hi = DAG.getNode(N->getOpcode(), DL, LHSHi.getValueType(), LHSHi, RHSHi);
1755 } else {
1756 Lo = DAG.getNode(N->getOpcode(), DL, LHSLo.getValueType(), LHSLo, RHS);
1757 Hi = DAG.getNode(N->getOpcode(), DL, LHSHi.getValueType(), LHSHi, RHS);
1758 }
1759 }
1760
SplitVecRes_IS_FPCLASS(SDNode * N,SDValue & Lo,SDValue & Hi)1761 void DAGTypeLegalizer::SplitVecRes_IS_FPCLASS(SDNode *N, SDValue &Lo,
1762 SDValue &Hi) {
1763 SDLoc DL(N);
1764 SDValue ArgLo, ArgHi;
1765 SDValue Test = N->getOperand(1);
1766 SDValue FpValue = N->getOperand(0);
1767 if (getTypeAction(FpValue.getValueType()) == TargetLowering::TypeSplitVector)
1768 GetSplitVector(FpValue, ArgLo, ArgHi);
1769 else
1770 std::tie(ArgLo, ArgHi) = DAG.SplitVector(FpValue, SDLoc(FpValue));
1771 EVT LoVT, HiVT;
1772 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
1773
1774 Lo = DAG.getNode(ISD::IS_FPCLASS, DL, LoVT, ArgLo, Test, N->getFlags());
1775 Hi = DAG.getNode(ISD::IS_FPCLASS, DL, HiVT, ArgHi, Test, N->getFlags());
1776 }
1777
SplitVecRes_InregOp(SDNode * N,SDValue & Lo,SDValue & Hi)1778 void DAGTypeLegalizer::SplitVecRes_InregOp(SDNode *N, SDValue &Lo,
1779 SDValue &Hi) {
1780 SDValue LHSLo, LHSHi;
1781 GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
1782 SDLoc dl(N);
1783
1784 EVT LoVT, HiVT;
1785 std::tie(LoVT, HiVT) =
1786 DAG.GetSplitDestVTs(cast<VTSDNode>(N->getOperand(1))->getVT());
1787
1788 Lo = DAG.getNode(N->getOpcode(), dl, LHSLo.getValueType(), LHSLo,
1789 DAG.getValueType(LoVT));
1790 Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi,
1791 DAG.getValueType(HiVT));
1792 }
1793
SplitVecRes_ExtVecInRegOp(SDNode * N,SDValue & Lo,SDValue & Hi)1794 void DAGTypeLegalizer::SplitVecRes_ExtVecInRegOp(SDNode *N, SDValue &Lo,
1795 SDValue &Hi) {
1796 unsigned Opcode = N->getOpcode();
1797 SDValue N0 = N->getOperand(0);
1798
1799 SDLoc dl(N);
1800 SDValue InLo, InHi;
1801
1802 if (getTypeAction(N0.getValueType()) == TargetLowering::TypeSplitVector)
1803 GetSplitVector(N0, InLo, InHi);
1804 else
1805 std::tie(InLo, InHi) = DAG.SplitVectorOperand(N, 0);
1806
1807 EVT InLoVT = InLo.getValueType();
1808 unsigned InNumElements = InLoVT.getVectorNumElements();
1809
1810 EVT OutLoVT, OutHiVT;
1811 std::tie(OutLoVT, OutHiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
1812 unsigned OutNumElements = OutLoVT.getVectorNumElements();
1813 assert((2 * OutNumElements) <= InNumElements &&
1814 "Illegal extend vector in reg split");
1815
1816 // *_EXTEND_VECTOR_INREG instructions extend the lowest elements of the
1817 // input vector (i.e. we only use InLo):
1818 // OutLo will extend the first OutNumElements from InLo.
1819 // OutHi will extend the next OutNumElements from InLo.
1820
1821 // Shuffle the elements from InLo for OutHi into the bottom elements to
1822 // create a 'fake' InHi.
1823 SmallVector<int, 8> SplitHi(InNumElements, -1);
1824 for (unsigned i = 0; i != OutNumElements; ++i)
1825 SplitHi[i] = i + OutNumElements;
1826 InHi = DAG.getVectorShuffle(InLoVT, dl, InLo, DAG.getUNDEF(InLoVT), SplitHi);
1827
1828 Lo = DAG.getNode(Opcode, dl, OutLoVT, InLo);
1829 Hi = DAG.getNode(Opcode, dl, OutHiVT, InHi);
1830 }
1831
SplitVecRes_StrictFPOp(SDNode * N,SDValue & Lo,SDValue & Hi)1832 void DAGTypeLegalizer::SplitVecRes_StrictFPOp(SDNode *N, SDValue &Lo,
1833 SDValue &Hi) {
1834 unsigned NumOps = N->getNumOperands();
1835 SDValue Chain = N->getOperand(0);
1836 EVT LoVT, HiVT;
1837 SDLoc dl(N);
1838 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
1839
1840 SmallVector<SDValue, 4> OpsLo(NumOps);
1841 SmallVector<SDValue, 4> OpsHi(NumOps);
1842
1843 // The Chain is the first operand.
1844 OpsLo[0] = Chain;
1845 OpsHi[0] = Chain;
1846
1847 // Now process the remaining operands.
1848 for (unsigned i = 1; i < NumOps; ++i) {
1849 SDValue Op = N->getOperand(i);
1850 SDValue OpLo = Op;
1851 SDValue OpHi = Op;
1852
1853 EVT InVT = Op.getValueType();
1854 if (InVT.isVector()) {
1855 // If the input also splits, handle it directly for a
1856 // compile time speedup. Otherwise split it by hand.
1857 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
1858 GetSplitVector(Op, OpLo, OpHi);
1859 else
1860 std::tie(OpLo, OpHi) = DAG.SplitVectorOperand(N, i);
1861 }
1862
1863 OpsLo[i] = OpLo;
1864 OpsHi[i] = OpHi;
1865 }
1866
1867 EVT LoValueVTs[] = {LoVT, MVT::Other};
1868 EVT HiValueVTs[] = {HiVT, MVT::Other};
1869 Lo = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(LoValueVTs), OpsLo,
1870 N->getFlags());
1871 Hi = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(HiValueVTs), OpsHi,
1872 N->getFlags());
1873
1874 // Build a factor node to remember that this Op is independent of the
1875 // other one.
1876 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1877 Lo.getValue(1), Hi.getValue(1));
1878
1879 // Legalize the chain result - switch anything that used the old chain to
1880 // use the new one.
1881 ReplaceValueWith(SDValue(N, 1), Chain);
1882 }
1883
UnrollVectorOp_StrictFP(SDNode * N,unsigned ResNE)1884 SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(SDNode *N, unsigned ResNE) {
1885 SDValue Chain = N->getOperand(0);
1886 EVT VT = N->getValueType(0);
1887 unsigned NE = VT.getVectorNumElements();
1888 EVT EltVT = VT.getVectorElementType();
1889 SDLoc dl(N);
1890
1891 SmallVector<SDValue, 8> Scalars;
1892 SmallVector<SDValue, 4> Operands(N->getNumOperands());
1893
1894 // If ResNE is 0, fully unroll the vector op.
1895 if (ResNE == 0)
1896 ResNE = NE;
1897 else if (NE > ResNE)
1898 NE = ResNE;
1899
1900 //The results of each unrolled operation, including the chain.
1901 EVT ChainVTs[] = {EltVT, MVT::Other};
1902 SmallVector<SDValue, 8> Chains;
1903
1904 unsigned i;
1905 for (i = 0; i != NE; ++i) {
1906 Operands[0] = Chain;
1907 for (unsigned j = 1, e = N->getNumOperands(); j != e; ++j) {
1908 SDValue Operand = N->getOperand(j);
1909 EVT OperandVT = Operand.getValueType();
1910 if (OperandVT.isVector()) {
1911 EVT OperandEltVT = OperandVT.getVectorElementType();
1912 Operands[j] = DAG.getExtractVectorElt(dl, OperandEltVT, Operand, i);
1913 } else {
1914 Operands[j] = Operand;
1915 }
1916 }
1917 SDValue Scalar = DAG.getNode(N->getOpcode(), dl, ChainVTs, Operands);
1918 Scalar.getNode()->setFlags(N->getFlags());
1919
1920 //Add in the scalar as well as its chain value to the
1921 //result vectors.
1922 Scalars.push_back(Scalar);
1923 Chains.push_back(Scalar.getValue(1));
1924 }
1925
1926 for (; i < ResNE; ++i)
1927 Scalars.push_back(DAG.getUNDEF(EltVT));
1928
1929 // Build a new factor node to connect the chain back together.
1930 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1931 ReplaceValueWith(SDValue(N, 1), Chain);
1932
1933 // Create a new BUILD_VECTOR node
1934 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, ResNE);
1935 return DAG.getBuildVector(VecVT, dl, Scalars);
1936 }
1937
SplitVecRes_OverflowOp(SDNode * N,unsigned ResNo,SDValue & Lo,SDValue & Hi)1938 void DAGTypeLegalizer::SplitVecRes_OverflowOp(SDNode *N, unsigned ResNo,
1939 SDValue &Lo, SDValue &Hi) {
1940 SDLoc dl(N);
1941 EVT ResVT = N->getValueType(0);
1942 EVT OvVT = N->getValueType(1);
1943 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1944 std::tie(LoResVT, HiResVT) = DAG.GetSplitDestVTs(ResVT);
1945 std::tie(LoOvVT, HiOvVT) = DAG.GetSplitDestVTs(OvVT);
1946
1947 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1948 if (getTypeAction(ResVT) == TargetLowering::TypeSplitVector) {
1949 GetSplitVector(N->getOperand(0), LoLHS, HiLHS);
1950 GetSplitVector(N->getOperand(1), LoRHS, HiRHS);
1951 } else {
1952 std::tie(LoLHS, HiLHS) = DAG.SplitVectorOperand(N, 0);
1953 std::tie(LoRHS, HiRHS) = DAG.SplitVectorOperand(N, 1);
1954 }
1955
1956 unsigned Opcode = N->getOpcode();
1957 SDVTList LoVTs = DAG.getVTList(LoResVT, LoOvVT);
1958 SDVTList HiVTs = DAG.getVTList(HiResVT, HiOvVT);
1959 SDNode *LoNode = DAG.getNode(Opcode, dl, LoVTs, LoLHS, LoRHS).getNode();
1960 SDNode *HiNode = DAG.getNode(Opcode, dl, HiVTs, HiLHS, HiRHS).getNode();
1961 LoNode->setFlags(N->getFlags());
1962 HiNode->setFlags(N->getFlags());
1963
1964 Lo = SDValue(LoNode, ResNo);
1965 Hi = SDValue(HiNode, ResNo);
1966
1967 // Replace the other vector result not being explicitly split here.
1968 unsigned OtherNo = 1 - ResNo;
1969 EVT OtherVT = N->getValueType(OtherNo);
1970 if (getTypeAction(OtherVT) == TargetLowering::TypeSplitVector) {
1971 SetSplitVector(SDValue(N, OtherNo),
1972 SDValue(LoNode, OtherNo), SDValue(HiNode, OtherNo));
1973 } else {
1974 SDValue OtherVal = DAG.getNode(
1975 ISD::CONCAT_VECTORS, dl, OtherVT,
1976 SDValue(LoNode, OtherNo), SDValue(HiNode, OtherNo));
1977 ReplaceValueWith(SDValue(N, OtherNo), OtherVal);
1978 }
1979 }
1980
SplitVecRes_INSERT_VECTOR_ELT(SDNode * N,SDValue & Lo,SDValue & Hi)1981 void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
1982 SDValue &Hi) {
1983 SDValue Vec = N->getOperand(0);
1984 SDValue Elt = N->getOperand(1);
1985 SDValue Idx = N->getOperand(2);
1986 SDLoc dl(N);
1987 GetSplitVector(Vec, Lo, Hi);
1988
1989 if (ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
1990 unsigned IdxVal = CIdx->getZExtValue();
1991 unsigned LoNumElts = Lo.getValueType().getVectorMinNumElements();
1992 if (IdxVal < LoNumElts) {
1993 Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
1994 Lo.getValueType(), Lo, Elt, Idx);
1995 return;
1996 } else if (!Vec.getValueType().isScalableVector()) {
1997 Hi = DAG.getInsertVectorElt(dl, Hi, Elt, IdxVal - LoNumElts);
1998 return;
1999 }
2000 }
2001
2002 // Make the vector elements byte-addressable if they aren't already.
2003 EVT VecVT = Vec.getValueType();
2004 EVT EltVT = VecVT.getVectorElementType();
2005 if (!EltVT.isByteSized()) {
2006 EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
2007 VecVT = VecVT.changeElementType(EltVT);
2008 Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
2009 // Extend the element type to match if needed.
2010 if (EltVT.bitsGT(Elt.getValueType()))
2011 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, EltVT, Elt);
2012 }
2013
2014 // Spill the vector to the stack.
2015 // In cases where the vector is illegal it will be broken down into parts
2016 // and stored in parts - we should use the alignment for the smallest part.
2017 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false);
2018 SDValue StackPtr =
2019 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign);
2020 auto &MF = DAG.getMachineFunction();
2021 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
2022 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
2023
2024 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
2025 SmallestAlign);
2026
2027 // Store the new element. This may be larger than the vector element type,
2028 // so use a truncating store.
2029 SDValue EltPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
2030 Store = DAG.getTruncStore(
2031 Store, dl, Elt, EltPtr, MachinePointerInfo::getUnknownStack(MF), EltVT,
2032 commonAlignment(SmallestAlign,
2033 EltVT.getFixedSizeInBits() / 8));
2034
2035 EVT LoVT, HiVT;
2036 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
2037
2038 // Load the Lo part from the stack slot.
2039 Lo = DAG.getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
2040
2041 // Increment the pointer to the other part.
2042 auto Load = cast<LoadSDNode>(Lo);
2043 MachinePointerInfo MPI = Load->getPointerInfo();
2044 IncrementPointer(Load, LoVT, MPI, StackPtr);
2045
2046 Hi = DAG.getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
2047
2048 // If we adjusted the original type, we need to truncate the results.
2049 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
2050 if (LoVT != Lo.getValueType())
2051 Lo = DAG.getNode(ISD::TRUNCATE, dl, LoVT, Lo);
2052 if (HiVT != Hi.getValueType())
2053 Hi = DAG.getNode(ISD::TRUNCATE, dl, HiVT, Hi);
2054 }
2055
SplitVecRes_STEP_VECTOR(SDNode * N,SDValue & Lo,SDValue & Hi)2056 void DAGTypeLegalizer::SplitVecRes_STEP_VECTOR(SDNode *N, SDValue &Lo,
2057 SDValue &Hi) {
2058 EVT LoVT, HiVT;
2059 SDLoc dl(N);
2060 assert(N->getValueType(0).isScalableVector() &&
2061 "Only scalable vectors are supported for STEP_VECTOR");
2062 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
2063 SDValue Step = N->getOperand(0);
2064
2065 Lo = DAG.getNode(ISD::STEP_VECTOR, dl, LoVT, Step);
2066
2067 // Hi = Lo + (EltCnt * Step)
2068 EVT EltVT = Step.getValueType();
2069 APInt StepVal = Step->getAsAPIntVal();
2070 SDValue StartOfHi =
2071 DAG.getVScale(dl, EltVT, StepVal * LoVT.getVectorMinNumElements());
2072 StartOfHi = DAG.getSExtOrTrunc(StartOfHi, dl, HiVT.getVectorElementType());
2073 StartOfHi = DAG.getNode(ISD::SPLAT_VECTOR, dl, HiVT, StartOfHi);
2074
2075 Hi = DAG.getNode(ISD::STEP_VECTOR, dl, HiVT, Step);
2076 Hi = DAG.getNode(ISD::ADD, dl, HiVT, Hi, StartOfHi);
2077 }
2078
SplitVecRes_ScalarOp(SDNode * N,SDValue & Lo,SDValue & Hi)2079 void DAGTypeLegalizer::SplitVecRes_ScalarOp(SDNode *N, SDValue &Lo,
2080 SDValue &Hi) {
2081 EVT LoVT, HiVT;
2082 SDLoc dl(N);
2083 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
2084 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, N->getOperand(0));
2085 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
2086 Hi = DAG.getUNDEF(HiVT);
2087 } else {
2088 assert(N->getOpcode() == ISD::SPLAT_VECTOR && "Unexpected opcode");
2089 Hi = Lo;
2090 }
2091 }
2092
SplitVecRes_VP_SPLAT(SDNode * N,SDValue & Lo,SDValue & Hi)2093 void DAGTypeLegalizer::SplitVecRes_VP_SPLAT(SDNode *N, SDValue &Lo,
2094 SDValue &Hi) {
2095 SDLoc dl(N);
2096 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(N->getValueType(0));
2097 auto [MaskLo, MaskHi] = SplitMask(N->getOperand(1));
2098 auto [EVLLo, EVLHi] = DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl);
2099 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, N->getOperand(0), MaskLo, EVLLo);
2100 Hi = DAG.getNode(N->getOpcode(), dl, HiVT, N->getOperand(0), MaskHi, EVLHi);
2101 }
2102
SplitVecRes_LOAD(LoadSDNode * LD,SDValue & Lo,SDValue & Hi)2103 void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
2104 SDValue &Hi) {
2105 assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!");
2106 EVT LoVT, HiVT;
2107 SDLoc dl(LD);
2108 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0));
2109
2110 ISD::LoadExtType ExtType = LD->getExtensionType();
2111 SDValue Ch = LD->getChain();
2112 SDValue Ptr = LD->getBasePtr();
2113 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
2114 EVT MemoryVT = LD->getMemoryVT();
2115 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
2116 AAMDNodes AAInfo = LD->getAAInfo();
2117
2118 EVT LoMemVT, HiMemVT;
2119 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
2120
2121 if (!LoMemVT.isByteSized() || !HiMemVT.isByteSized()) {
2122 SDValue Value, NewChain;
2123 std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG);
2124 std::tie(Lo, Hi) = DAG.SplitVector(Value, dl);
2125 ReplaceValueWith(SDValue(LD, 1), NewChain);
2126 return;
2127 }
2128
2129 Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset,
2130 LD->getPointerInfo(), LoMemVT, LD->getBaseAlign(), MMOFlags,
2131 AAInfo);
2132
2133 MachinePointerInfo MPI;
2134 IncrementPointer(LD, LoMemVT, MPI, Ptr);
2135
2136 Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset, MPI,
2137 HiMemVT, LD->getBaseAlign(), MMOFlags, AAInfo);
2138
2139 // Build a factor node to remember that this load is independent of the
2140 // other one.
2141 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
2142 Hi.getValue(1));
2143
2144 // Legalize the chain result - switch anything that used the old chain to
2145 // use the new one.
2146 ReplaceValueWith(SDValue(LD, 1), Ch);
2147 }
2148
SplitVecRes_VP_LOAD(VPLoadSDNode * LD,SDValue & Lo,SDValue & Hi)2149 void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo,
2150 SDValue &Hi) {
2151 assert(LD->isUnindexed() && "Indexed VP load during type legalization!");
2152 EVT LoVT, HiVT;
2153 SDLoc dl(LD);
2154 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0));
2155
2156 ISD::LoadExtType ExtType = LD->getExtensionType();
2157 SDValue Ch = LD->getChain();
2158 SDValue Ptr = LD->getBasePtr();
2159 SDValue Offset = LD->getOffset();
2160 assert(Offset.isUndef() && "Unexpected indexed variable-length load offset");
2161 Align Alignment = LD->getBaseAlign();
2162 SDValue Mask = LD->getMask();
2163 SDValue EVL = LD->getVectorLength();
2164 EVT MemoryVT = LD->getMemoryVT();
2165
2166 EVT LoMemVT, HiMemVT;
2167 bool HiIsEmpty = false;
2168 std::tie(LoMemVT, HiMemVT) =
2169 DAG.GetDependentSplitDestVTs(MemoryVT, LoVT, &HiIsEmpty);
2170
2171 // Split Mask operand
2172 SDValue MaskLo, MaskHi;
2173 if (Mask.getOpcode() == ISD::SETCC) {
2174 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
2175 } else {
2176 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
2177 GetSplitVector(Mask, MaskLo, MaskHi);
2178 else
2179 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
2180 }
2181
2182 // Split EVL operand
2183 SDValue EVLLo, EVLHi;
2184 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL, LD->getValueType(0), dl);
2185
2186 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
2187 LD->getPointerInfo(), MachineMemOperand::MOLoad,
2188 LocationSize::beforeOrAfterPointer(), Alignment, LD->getAAInfo(),
2189 LD->getRanges());
2190
2191 Lo =
2192 DAG.getLoadVP(LD->getAddressingMode(), ExtType, LoVT, dl, Ch, Ptr, Offset,
2193 MaskLo, EVLLo, LoMemVT, MMO, LD->isExpandingLoad());
2194
2195 if (HiIsEmpty) {
2196 // The hi vp_load has zero storage size. We therefore simply set it to
2197 // the low vp_load and rely on subsequent removal from the chain.
2198 Hi = Lo;
2199 } else {
2200 // Generate hi vp_load.
2201 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, dl, LoMemVT, DAG,
2202 LD->isExpandingLoad());
2203
2204 MachinePointerInfo MPI;
2205 if (LoMemVT.isScalableVector())
2206 MPI = MachinePointerInfo(LD->getPointerInfo().getAddrSpace());
2207 else
2208 MPI = LD->getPointerInfo().getWithOffset(
2209 LoMemVT.getStoreSize().getFixedValue());
2210
2211 MMO = DAG.getMachineFunction().getMachineMemOperand(
2212 MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
2213 Alignment, LD->getAAInfo(), LD->getRanges());
2214
2215 Hi = DAG.getLoadVP(LD->getAddressingMode(), ExtType, HiVT, dl, Ch, Ptr,
2216 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2217 LD->isExpandingLoad());
2218 }
2219
2220 // Build a factor node to remember that this load is independent of the
2221 // other one.
2222 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
2223 Hi.getValue(1));
2224
2225 // Legalize the chain result - switch anything that used the old chain to
2226 // use the new one.
2227 ReplaceValueWith(SDValue(LD, 1), Ch);
2228 }
2229
SplitVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode * SLD,SDValue & Lo,SDValue & Hi)2230 void DAGTypeLegalizer::SplitVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *SLD,
2231 SDValue &Lo, SDValue &Hi) {
2232 assert(SLD->isUnindexed() &&
2233 "Indexed VP strided load during type legalization!");
2234 assert(SLD->getOffset().isUndef() &&
2235 "Unexpected indexed variable-length load offset");
2236
2237 SDLoc DL(SLD);
2238
2239 EVT LoVT, HiVT;
2240 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(SLD->getValueType(0));
2241
2242 EVT LoMemVT, HiMemVT;
2243 bool HiIsEmpty = false;
2244 std::tie(LoMemVT, HiMemVT) =
2245 DAG.GetDependentSplitDestVTs(SLD->getMemoryVT(), LoVT, &HiIsEmpty);
2246
2247 SDValue Mask = SLD->getMask();
2248 SDValue LoMask, HiMask;
2249 if (Mask.getOpcode() == ISD::SETCC) {
2250 SplitVecRes_SETCC(Mask.getNode(), LoMask, HiMask);
2251 } else {
2252 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
2253 GetSplitVector(Mask, LoMask, HiMask);
2254 else
2255 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL);
2256 }
2257
2258 SDValue LoEVL, HiEVL;
2259 std::tie(LoEVL, HiEVL) =
2260 DAG.SplitEVL(SLD->getVectorLength(), SLD->getValueType(0), DL);
2261
2262 // Generate the low vp_strided_load
2263 Lo = DAG.getStridedLoadVP(
2264 SLD->getAddressingMode(), SLD->getExtensionType(), LoVT, DL,
2265 SLD->getChain(), SLD->getBasePtr(), SLD->getOffset(), SLD->getStride(),
2266 LoMask, LoEVL, LoMemVT, SLD->getMemOperand(), SLD->isExpandingLoad());
2267
2268 if (HiIsEmpty) {
2269 // The high vp_strided_load has zero storage size. We therefore simply set
2270 // it to the low vp_strided_load and rely on subsequent removal from the
2271 // chain.
2272 Hi = Lo;
2273 } else {
2274 // Generate the high vp_strided_load.
2275 // To calculate the high base address, we need to sum to the low base
2276 // address stride number of bytes for each element already loaded by low,
2277 // that is: Ptr = Ptr + (LoEVL * Stride)
2278 EVT PtrVT = SLD->getBasePtr().getValueType();
2279 SDValue Increment =
2280 DAG.getNode(ISD::MUL, DL, PtrVT, LoEVL,
2281 DAG.getSExtOrTrunc(SLD->getStride(), DL, PtrVT));
2282 SDValue Ptr =
2283 DAG.getNode(ISD::ADD, DL, PtrVT, SLD->getBasePtr(), Increment);
2284
2285 Align Alignment = SLD->getBaseAlign();
2286 if (LoMemVT.isScalableVector())
2287 Alignment = commonAlignment(
2288 Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8);
2289
2290 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
2291 MachinePointerInfo(SLD->getPointerInfo().getAddrSpace()),
2292 MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
2293 Alignment, SLD->getAAInfo(), SLD->getRanges());
2294
2295 Hi = DAG.getStridedLoadVP(SLD->getAddressingMode(), SLD->getExtensionType(),
2296 HiVT, DL, SLD->getChain(), Ptr, SLD->getOffset(),
2297 SLD->getStride(), HiMask, HiEVL, HiMemVT, MMO,
2298 SLD->isExpandingLoad());
2299 }
2300
2301 // Build a factor node to remember that this load is independent of the
2302 // other one.
2303 SDValue Ch = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
2304 Hi.getValue(1));
2305
2306 // Legalize the chain result - switch anything that used the old chain to
2307 // use the new one.
2308 ReplaceValueWith(SDValue(SLD, 1), Ch);
2309 }
2310
SplitVecRes_MLOAD(MaskedLoadSDNode * MLD,SDValue & Lo,SDValue & Hi)2311 void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
2312 SDValue &Lo, SDValue &Hi) {
2313 assert(MLD->isUnindexed() && "Indexed masked load during type legalization!");
2314 EVT LoVT, HiVT;
2315 SDLoc dl(MLD);
2316 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0));
2317
2318 SDValue Ch = MLD->getChain();
2319 SDValue Ptr = MLD->getBasePtr();
2320 SDValue Offset = MLD->getOffset();
2321 assert(Offset.isUndef() && "Unexpected indexed masked load offset");
2322 SDValue Mask = MLD->getMask();
2323 SDValue PassThru = MLD->getPassThru();
2324 Align Alignment = MLD->getBaseAlign();
2325 ISD::LoadExtType ExtType = MLD->getExtensionType();
2326
2327 // Split Mask operand
2328 SDValue MaskLo, MaskHi;
2329 if (Mask.getOpcode() == ISD::SETCC) {
2330 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
2331 } else {
2332 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
2333 GetSplitVector(Mask, MaskLo, MaskHi);
2334 else
2335 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
2336 }
2337
2338 EVT MemoryVT = MLD->getMemoryVT();
2339 EVT LoMemVT, HiMemVT;
2340 bool HiIsEmpty = false;
2341 std::tie(LoMemVT, HiMemVT) =
2342 DAG.GetDependentSplitDestVTs(MemoryVT, LoVT, &HiIsEmpty);
2343
2344 SDValue PassThruLo, PassThruHi;
2345 if (getTypeAction(PassThru.getValueType()) == TargetLowering::TypeSplitVector)
2346 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2347 else
2348 std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
2349
2350 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
2351 MLD->getPointerInfo(), MachineMemOperand::MOLoad,
2352 LocationSize::beforeOrAfterPointer(), Alignment, MLD->getAAInfo(),
2353 MLD->getRanges());
2354
2355 Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, Offset, MaskLo, PassThruLo, LoMemVT,
2356 MMO, MLD->getAddressingMode(), ExtType,
2357 MLD->isExpandingLoad());
2358
2359 if (HiIsEmpty) {
2360 // The hi masked load has zero storage size. We therefore simply set it to
2361 // the low masked load and rely on subsequent removal from the chain.
2362 Hi = Lo;
2363 } else {
2364 // Generate hi masked load.
2365 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, dl, LoMemVT, DAG,
2366 MLD->isExpandingLoad());
2367
2368 MachinePointerInfo MPI;
2369 if (LoMemVT.isScalableVector())
2370 MPI = MachinePointerInfo(MLD->getPointerInfo().getAddrSpace());
2371 else
2372 MPI = MLD->getPointerInfo().getWithOffset(
2373 LoMemVT.getStoreSize().getFixedValue());
2374
2375 MMO = DAG.getMachineFunction().getMachineMemOperand(
2376 MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
2377 Alignment, MLD->getAAInfo(), MLD->getRanges());
2378
2379 Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, Offset, MaskHi, PassThruHi,
2380 HiMemVT, MMO, MLD->getAddressingMode(), ExtType,
2381 MLD->isExpandingLoad());
2382 }
2383
2384 // Build a factor node to remember that this load is independent of the
2385 // other one.
2386 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
2387 Hi.getValue(1));
2388
2389 // Legalize the chain result - switch anything that used the old chain to
2390 // use the new one.
2391 ReplaceValueWith(SDValue(MLD, 1), Ch);
2392
2393 }
2394
SplitVecRes_Gather(MemSDNode * N,SDValue & Lo,SDValue & Hi,bool SplitSETCC)2395 void DAGTypeLegalizer::SplitVecRes_Gather(MemSDNode *N, SDValue &Lo,
2396 SDValue &Hi, bool SplitSETCC) {
2397 EVT LoVT, HiVT;
2398 SDLoc dl(N);
2399 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
2400
2401 SDValue Ch = N->getChain();
2402 SDValue Ptr = N->getBasePtr();
2403 struct Operands {
2404 SDValue Mask;
2405 SDValue Index;
2406 SDValue Scale;
2407 } Ops = [&]() -> Operands {
2408 if (auto *MSC = dyn_cast<MaskedGatherSDNode>(N)) {
2409 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2410 }
2411 auto *VPSC = cast<VPGatherSDNode>(N);
2412 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2413 }();
2414
2415 EVT MemoryVT = N->getMemoryVT();
2416 Align Alignment = N->getBaseAlign();
2417
2418 // Split Mask operand
2419 SDValue MaskLo, MaskHi;
2420 if (SplitSETCC && Ops.Mask.getOpcode() == ISD::SETCC) {
2421 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2422 } else {
2423 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2424 }
2425
2426 EVT LoMemVT, HiMemVT;
2427 // Split MemoryVT
2428 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
2429
2430 SDValue IndexHi, IndexLo;
2431 if (getTypeAction(Ops.Index.getValueType()) ==
2432 TargetLowering::TypeSplitVector)
2433 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2434 else
2435 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index, dl);
2436
2437 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
2438 N->getPointerInfo(), MachineMemOperand::MOLoad,
2439 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
2440 N->getRanges());
2441
2442 if (auto *MGT = dyn_cast<MaskedGatherSDNode>(N)) {
2443 SDValue PassThru = MGT->getPassThru();
2444 SDValue PassThruLo, PassThruHi;
2445 if (getTypeAction(PassThru.getValueType()) ==
2446 TargetLowering::TypeSplitVector)
2447 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2448 else
2449 std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
2450
2451 ISD::LoadExtType ExtType = MGT->getExtensionType();
2452 ISD::MemIndexType IndexTy = MGT->getIndexType();
2453
2454 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Ops.Scale};
2455 Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl,
2456 OpsLo, MMO, IndexTy, ExtType);
2457
2458 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi, Ptr, IndexHi, Ops.Scale};
2459 Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl,
2460 OpsHi, MMO, IndexTy, ExtType);
2461 } else {
2462 auto *VPGT = cast<VPGatherSDNode>(N);
2463 SDValue EVLLo, EVLHi;
2464 std::tie(EVLLo, EVLHi) =
2465 DAG.SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2466
2467 SDValue OpsLo[] = {Ch, Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2468 Lo = DAG.getGatherVP(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl, OpsLo,
2469 MMO, VPGT->getIndexType());
2470
2471 SDValue OpsHi[] = {Ch, Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2472 Hi = DAG.getGatherVP(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl, OpsHi,
2473 MMO, VPGT->getIndexType());
2474 }
2475
2476 // Build a factor node to remember that this load is independent of the
2477 // other one.
2478 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
2479 Hi.getValue(1));
2480
2481 // Legalize the chain result - switch anything that used the old chain to
2482 // use the new one.
2483 ReplaceValueWith(SDValue(N, 1), Ch);
2484 }
2485
SplitVecRes_VECTOR_COMPRESS(SDNode * N,SDValue & Lo,SDValue & Hi)2486 void DAGTypeLegalizer::SplitVecRes_VECTOR_COMPRESS(SDNode *N, SDValue &Lo,
2487 SDValue &Hi) {
2488 // This is not "trivial", as there is a dependency between the two subvectors.
2489 // Depending on the number of 1s in the mask, the elements from the Hi vector
2490 // need to be moved to the Lo vector. Passthru values make this even harder.
2491 // We try to use VECTOR_COMPRESS if the target has custom lowering with
2492 // smaller types and passthru is undef, as it is most likely faster than the
2493 // fully expand path. Otherwise, just do the full expansion as one "big"
2494 // operation and then extract the Lo and Hi vectors from that. This gets
2495 // rid of VECTOR_COMPRESS and all other operands can be legalized later.
2496 SDLoc DL(N);
2497 EVT VecVT = N->getValueType(0);
2498
2499 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VecVT);
2500 bool HasCustomLowering = false;
2501 EVT CheckVT = LoVT;
2502 while (CheckVT.getVectorMinNumElements() > 1) {
2503 // TLI.isOperationLegalOrCustom requires a legal type, but we could have a
2504 // custom lowering for illegal types. So we do the checks separately.
2505 if (TLI.isOperationLegal(ISD::VECTOR_COMPRESS, CheckVT) ||
2506 TLI.isOperationCustom(ISD::VECTOR_COMPRESS, CheckVT)) {
2507 HasCustomLowering = true;
2508 break;
2509 }
2510 CheckVT = CheckVT.getHalfNumVectorElementsVT(*DAG.getContext());
2511 }
2512
2513 SDValue Passthru = N->getOperand(2);
2514 if (!HasCustomLowering) {
2515 SDValue Compressed = TLI.expandVECTOR_COMPRESS(N, DAG);
2516 std::tie(Lo, Hi) = DAG.SplitVector(Compressed, DL, LoVT, HiVT);
2517 return;
2518 }
2519
2520 // Try to VECTOR_COMPRESS smaller vectors and combine via a stack store+load.
2521 SDValue Mask = N->getOperand(1);
2522 SDValue LoMask, HiMask;
2523 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
2524 std::tie(LoMask, HiMask) = SplitMask(Mask);
2525
2526 SDValue UndefPassthru = DAG.getUNDEF(LoVT);
2527 Lo = DAG.getNode(ISD::VECTOR_COMPRESS, DL, LoVT, Lo, LoMask, UndefPassthru);
2528 Hi = DAG.getNode(ISD::VECTOR_COMPRESS, DL, HiVT, Hi, HiMask, UndefPassthru);
2529
2530 SDValue StackPtr = DAG.CreateStackTemporary(
2531 VecVT.getStoreSize(), DAG.getReducedAlign(VecVT, /*UseABI=*/false));
2532 MachineFunction &MF = DAG.getMachineFunction();
2533 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(
2534 MF, cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex());
2535
2536 // We store LoVec and then insert HiVec starting at offset=|1s| in LoMask.
2537 SDValue WideMask =
2538 DAG.getNode(ISD::ZERO_EXTEND, DL, LoMask.getValueType(), LoMask);
2539 SDValue Offset = DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, WideMask);
2540 Offset = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Offset);
2541
2542 SDValue Chain = DAG.getEntryNode();
2543 Chain = DAG.getStore(Chain, DL, Lo, StackPtr, PtrInfo);
2544 Chain = DAG.getStore(Chain, DL, Hi, Offset,
2545 MachinePointerInfo::getUnknownStack(MF));
2546
2547 SDValue Compressed = DAG.getLoad(VecVT, DL, Chain, StackPtr, PtrInfo);
2548 if (!Passthru.isUndef()) {
2549 Compressed =
2550 DAG.getNode(ISD::VSELECT, DL, VecVT, Mask, Compressed, Passthru);
2551 }
2552 std::tie(Lo, Hi) = DAG.SplitVector(Compressed, DL);
2553 }
2554
SplitVecRes_SETCC(SDNode * N,SDValue & Lo,SDValue & Hi)2555 void DAGTypeLegalizer::SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi) {
2556 assert(N->getValueType(0).isVector() &&
2557 N->getOperand(0).getValueType().isVector() &&
2558 "Operand types must be vectors");
2559
2560 EVT LoVT, HiVT;
2561 SDLoc DL(N);
2562 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
2563
2564 // If the input also splits, handle it directly. Otherwise split it by hand.
2565 SDValue LL, LH, RL, RH;
2566 if (getTypeAction(N->getOperand(0).getValueType()) ==
2567 TargetLowering::TypeSplitVector)
2568 GetSplitVector(N->getOperand(0), LL, LH);
2569 else
2570 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
2571
2572 if (getTypeAction(N->getOperand(1).getValueType()) ==
2573 TargetLowering::TypeSplitVector)
2574 GetSplitVector(N->getOperand(1), RL, RH);
2575 else
2576 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
2577
2578 if (N->getOpcode() == ISD::SETCC) {
2579 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
2580 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
2581 } else {
2582 assert(N->getOpcode() == ISD::VP_SETCC && "Expected VP_SETCC opcode");
2583 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2584 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(3));
2585 std::tie(EVLLo, EVLHi) =
2586 DAG.SplitEVL(N->getOperand(4), N->getValueType(0), DL);
2587 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2), MaskLo,
2588 EVLLo);
2589 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2), MaskHi,
2590 EVLHi);
2591 }
2592 }
2593
SplitVecRes_UnaryOp(SDNode * N,SDValue & Lo,SDValue & Hi)2594 void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
2595 SDValue &Hi) {
2596 // Get the dest types - they may not match the input types, e.g. int_to_fp.
2597 EVT LoVT, HiVT;
2598 SDLoc dl(N);
2599 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
2600
2601 // If the input also splits, handle it directly for a compile time speedup.
2602 // Otherwise split it by hand.
2603 EVT InVT = N->getOperand(0).getValueType();
2604 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
2605 GetSplitVector(N->getOperand(0), Lo, Hi);
2606 else
2607 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
2608
2609 const SDNodeFlags Flags = N->getFlags();
2610 unsigned Opcode = N->getOpcode();
2611 if (N->getNumOperands() <= 2) {
2612 if (Opcode == ISD::FP_ROUND || Opcode == ISD::AssertNoFPClass) {
2613 Lo = DAG.getNode(Opcode, dl, LoVT, Lo, N->getOperand(1), Flags);
2614 Hi = DAG.getNode(Opcode, dl, HiVT, Hi, N->getOperand(1), Flags);
2615 } else {
2616 Lo = DAG.getNode(Opcode, dl, LoVT, Lo, Flags);
2617 Hi = DAG.getNode(Opcode, dl, HiVT, Hi, Flags);
2618 }
2619 return;
2620 }
2621
2622 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
2623 assert(N->isVPOpcode() && "Expected VP opcode");
2624
2625 SDValue MaskLo, MaskHi;
2626 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1));
2627
2628 SDValue EVLLo, EVLHi;
2629 std::tie(EVLLo, EVLHi) =
2630 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl);
2631
2632 Lo = DAG.getNode(Opcode, dl, LoVT, {Lo, MaskLo, EVLLo}, Flags);
2633 Hi = DAG.getNode(Opcode, dl, HiVT, {Hi, MaskHi, EVLHi}, Flags);
2634 }
2635
SplitVecRes_ADDRSPACECAST(SDNode * N,SDValue & Lo,SDValue & Hi)2636 void DAGTypeLegalizer::SplitVecRes_ADDRSPACECAST(SDNode *N, SDValue &Lo,
2637 SDValue &Hi) {
2638 SDLoc dl(N);
2639 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(N->getValueType(0));
2640
2641 // If the input also splits, handle it directly for a compile time speedup.
2642 // Otherwise split it by hand.
2643 EVT InVT = N->getOperand(0).getValueType();
2644 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
2645 GetSplitVector(N->getOperand(0), Lo, Hi);
2646 else
2647 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
2648
2649 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(N);
2650 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2651 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2652 Lo = DAG.getAddrSpaceCast(dl, LoVT, Lo, SrcAS, DestAS);
2653 Hi = DAG.getAddrSpaceCast(dl, HiVT, Hi, SrcAS, DestAS);
2654 }
2655
SplitVecRes_UnaryOpWithTwoResults(SDNode * N,unsigned ResNo,SDValue & Lo,SDValue & Hi)2656 void DAGTypeLegalizer::SplitVecRes_UnaryOpWithTwoResults(SDNode *N,
2657 unsigned ResNo,
2658 SDValue &Lo,
2659 SDValue &Hi) {
2660 SDLoc dl(N);
2661 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(N->getValueType(0));
2662 auto [LoVT1, HiVT1] = DAG.GetSplitDestVTs(N->getValueType(1));
2663
2664 // If the input also splits, handle it directly for a compile time speedup.
2665 // Otherwise split it by hand.
2666 EVT InVT = N->getOperand(0).getValueType();
2667 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
2668 GetSplitVector(N->getOperand(0), Lo, Hi);
2669 else
2670 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
2671
2672 Lo = DAG.getNode(N->getOpcode(), dl, {LoVT, LoVT1}, Lo);
2673 Hi = DAG.getNode(N->getOpcode(), dl, {HiVT, HiVT1}, Hi);
2674 Lo->setFlags(N->getFlags());
2675 Hi->setFlags(N->getFlags());
2676
2677 SDNode *HiNode = Hi.getNode();
2678 SDNode *LoNode = Lo.getNode();
2679
2680 // Replace the other vector result not being explicitly split here.
2681 unsigned OtherNo = 1 - ResNo;
2682 EVT OtherVT = N->getValueType(OtherNo);
2683 if (getTypeAction(OtherVT) == TargetLowering::TypeSplitVector) {
2684 SetSplitVector(SDValue(N, OtherNo), SDValue(LoNode, OtherNo),
2685 SDValue(HiNode, OtherNo));
2686 } else {
2687 SDValue OtherVal =
2688 DAG.getNode(ISD::CONCAT_VECTORS, dl, OtherVT, SDValue(LoNode, OtherNo),
2689 SDValue(HiNode, OtherNo));
2690 ReplaceValueWith(SDValue(N, OtherNo), OtherVal);
2691 }
2692 }
2693
SplitVecRes_ExtendOp(SDNode * N,SDValue & Lo,SDValue & Hi)2694 void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
2695 SDValue &Hi) {
2696 SDLoc dl(N);
2697 EVT SrcVT = N->getOperand(0).getValueType();
2698 EVT DestVT = N->getValueType(0);
2699 EVT LoVT, HiVT;
2700 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(DestVT);
2701
2702 // We can do better than a generic split operation if the extend is doing
2703 // more than just doubling the width of the elements and the following are
2704 // true:
2705 // - The number of vector elements is even,
2706 // - the source type is legal,
2707 // - the type of a split source is illegal,
2708 // - the type of an extended (by doubling element size) source is legal, and
2709 // - the type of that extended source when split is legal.
2710 //
2711 // This won't necessarily completely legalize the operation, but it will
2712 // more effectively move in the right direction and prevent falling down
2713 // to scalarization in many cases due to the input vector being split too
2714 // far.
2715 if (SrcVT.getVectorElementCount().isKnownEven() &&
2716 SrcVT.getScalarSizeInBits() * 2 < DestVT.getScalarSizeInBits()) {
2717 LLVMContext &Ctx = *DAG.getContext();
2718 EVT NewSrcVT = SrcVT.widenIntegerVectorElementType(Ctx);
2719 EVT SplitSrcVT = SrcVT.getHalfNumVectorElementsVT(Ctx);
2720
2721 EVT SplitLoVT, SplitHiVT;
2722 std::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT);
2723 if (TLI.isTypeLegal(SrcVT) && !TLI.isTypeLegal(SplitSrcVT) &&
2724 TLI.isTypeLegal(NewSrcVT) && TLI.isTypeLegal(SplitLoVT)) {
2725 LLVM_DEBUG(dbgs() << "Split vector extend via incremental extend:";
2726 N->dump(&DAG); dbgs() << "\n");
2727 if (!N->isVPOpcode()) {
2728 // Extend the source vector by one step.
2729 SDValue NewSrc =
2730 DAG.getNode(N->getOpcode(), dl, NewSrcVT, N->getOperand(0));
2731 // Get the low and high halves of the new, extended one step, vector.
2732 std::tie(Lo, Hi) = DAG.SplitVector(NewSrc, dl);
2733 // Extend those vector halves the rest of the way.
2734 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo);
2735 Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi);
2736 return;
2737 }
2738
2739 // Extend the source vector by one step.
2740 SDValue NewSrc =
2741 DAG.getNode(N->getOpcode(), dl, NewSrcVT, N->getOperand(0),
2742 N->getOperand(1), N->getOperand(2));
2743 // Get the low and high halves of the new, extended one step, vector.
2744 std::tie(Lo, Hi) = DAG.SplitVector(NewSrc, dl);
2745
2746 SDValue MaskLo, MaskHi;
2747 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1));
2748
2749 SDValue EVLLo, EVLHi;
2750 std::tie(EVLLo, EVLHi) =
2751 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl);
2752 // Extend those vector halves the rest of the way.
2753 Lo = DAG.getNode(N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2754 Hi = DAG.getNode(N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2755 return;
2756 }
2757 }
2758 // Fall back to the generic unary operator splitting otherwise.
2759 SplitVecRes_UnaryOp(N, Lo, Hi);
2760 }
2761
SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode * N,SDValue & Lo,SDValue & Hi)2762 void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
2763 SDValue &Lo, SDValue &Hi) {
2764 // The low and high parts of the original input give four input vectors.
2765 SDValue Inputs[4];
2766 SDLoc DL(N);
2767 GetSplitVector(N->getOperand(0), Inputs[0], Inputs[1]);
2768 GetSplitVector(N->getOperand(1), Inputs[2], Inputs[3]);
2769 EVT NewVT = Inputs[0].getValueType();
2770 unsigned NewElts = NewVT.getVectorNumElements();
2771
2772 auto &&IsConstant = [](const SDValue &N) {
2773 APInt SplatValue;
2774 return N.getResNo() == 0 &&
2775 (ISD::isConstantSplatVector(N.getNode(), SplatValue) ||
2776 ISD::isBuildVectorOfConstantSDNodes(N.getNode()));
2777 };
2778 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &DL](SDValue &Input1,
2779 SDValue &Input2,
2780 ArrayRef<int> Mask) {
2781 assert(Input1->getOpcode() == ISD::BUILD_VECTOR &&
2782 Input2->getOpcode() == ISD::BUILD_VECTOR &&
2783 "Expected build vector node.");
2784 EVT EltVT = NewVT.getVectorElementType();
2785 SmallVector<SDValue> Ops(NewElts, DAG.getUNDEF(EltVT));
2786 for (unsigned I = 0; I < NewElts; ++I) {
2787 if (Mask[I] == PoisonMaskElem)
2788 continue;
2789 unsigned Idx = Mask[I];
2790 if (Idx >= NewElts)
2791 Ops[I] = Input2.getOperand(Idx - NewElts);
2792 else
2793 Ops[I] = Input1.getOperand(Idx);
2794 // Make the type of all elements the same as the element type.
2795 if (Ops[I].getValueType().bitsGT(EltVT))
2796 Ops[I] = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Ops[I]);
2797 }
2798 return DAG.getBuildVector(NewVT, DL, Ops);
2799 };
2800
2801 // If Lo or Hi uses elements from at most two of the four input vectors, then
2802 // express it as a vector shuffle of those two inputs. Otherwise extract the
2803 // input elements by hand and construct the Lo/Hi output using a BUILD_VECTOR.
2804 SmallVector<int> OrigMask(N->getMask());
2805 // Try to pack incoming shuffles/inputs.
2806 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT, this, NewElts,
2807 &DL](SmallVectorImpl<int> &Mask) {
2808 // Check if all inputs are shuffles of the same operands or non-shuffles.
2809 MapVector<std::pair<SDValue, SDValue>, SmallVector<unsigned>> ShufflesIdxs;
2810 for (unsigned Idx = 0; Idx < std::size(Inputs); ++Idx) {
2811 SDValue Input = Inputs[Idx];
2812 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.getNode());
2813 if (!Shuffle ||
2814 Input.getOperand(0).getValueType() != Input.getValueType())
2815 continue;
2816 ShufflesIdxs[std::make_pair(Input.getOperand(0), Input.getOperand(1))]
2817 .push_back(Idx);
2818 ShufflesIdxs[std::make_pair(Input.getOperand(1), Input.getOperand(0))]
2819 .push_back(Idx);
2820 }
2821 for (auto &P : ShufflesIdxs) {
2822 if (P.second.size() < 2)
2823 continue;
2824 // Use shuffles operands instead of shuffles themselves.
2825 // 1. Adjust mask.
2826 for (int &Idx : Mask) {
2827 if (Idx == PoisonMaskElem)
2828 continue;
2829 unsigned SrcRegIdx = Idx / NewElts;
2830 if (Inputs[SrcRegIdx].isUndef()) {
2831 Idx = PoisonMaskElem;
2832 continue;
2833 }
2834 auto *Shuffle =
2835 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode());
2836 if (!Shuffle || !is_contained(P.second, SrcRegIdx))
2837 continue;
2838 int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
2839 if (MaskElt == PoisonMaskElem) {
2840 Idx = PoisonMaskElem;
2841 continue;
2842 }
2843 Idx = MaskElt % NewElts +
2844 P.second[Shuffle->getOperand(MaskElt / NewElts) == P.first.first
2845 ? 0
2846 : 1] *
2847 NewElts;
2848 }
2849 // 2. Update inputs.
2850 Inputs[P.second[0]] = P.first.first;
2851 Inputs[P.second[1]] = P.first.second;
2852 // Clear the pair data.
2853 P.second.clear();
2854 ShufflesIdxs[std::make_pair(P.first.second, P.first.first)].clear();
2855 }
2856 // Check if any concat_vectors can be simplified.
2857 SmallBitVector UsedSubVector(2 * std::size(Inputs));
2858 for (int &Idx : Mask) {
2859 if (Idx == PoisonMaskElem)
2860 continue;
2861 unsigned SrcRegIdx = Idx / NewElts;
2862 if (Inputs[SrcRegIdx].isUndef()) {
2863 Idx = PoisonMaskElem;
2864 continue;
2865 }
2866 TargetLowering::LegalizeTypeAction TypeAction =
2867 getTypeAction(Inputs[SrcRegIdx].getValueType());
2868 if (Inputs[SrcRegIdx].getOpcode() == ISD::CONCAT_VECTORS &&
2869 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2870 !Inputs[SrcRegIdx].getOperand(1).isUndef() &&
2871 (TypeAction == TargetLowering::TypeLegal ||
2872 TypeAction == TargetLowering::TypeWidenVector))
2873 UsedSubVector.set(2 * SrcRegIdx + (Idx % NewElts) / (NewElts / 2));
2874 }
2875 if (UsedSubVector.count() > 1) {
2876 SmallVector<SmallVector<std::pair<unsigned, int>, 2>> Pairs;
2877 for (unsigned I = 0; I < std::size(Inputs); ++I) {
2878 if (UsedSubVector.test(2 * I) == UsedSubVector.test(2 * I + 1))
2879 continue;
2880 if (Pairs.empty() || Pairs.back().size() == 2)
2881 Pairs.emplace_back();
2882 if (UsedSubVector.test(2 * I)) {
2883 Pairs.back().emplace_back(I, 0);
2884 } else {
2885 assert(UsedSubVector.test(2 * I + 1) &&
2886 "Expected to be used one of the subvectors.");
2887 Pairs.back().emplace_back(I, 1);
2888 }
2889 }
2890 if (!Pairs.empty() && Pairs.front().size() > 1) {
2891 // Adjust mask.
2892 for (int &Idx : Mask) {
2893 if (Idx == PoisonMaskElem)
2894 continue;
2895 unsigned SrcRegIdx = Idx / NewElts;
2896 auto *It = find_if(
2897 Pairs, [SrcRegIdx](ArrayRef<std::pair<unsigned, int>> Idxs) {
2898 return Idxs.front().first == SrcRegIdx ||
2899 Idxs.back().first == SrcRegIdx;
2900 });
2901 if (It == Pairs.end())
2902 continue;
2903 Idx = It->front().first * NewElts + (Idx % NewElts) % (NewElts / 2) +
2904 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2905 }
2906 // Adjust inputs.
2907 for (ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2908 Inputs[Idxs.front().first] = DAG.getNode(
2909 ISD::CONCAT_VECTORS, DL,
2910 Inputs[Idxs.front().first].getValueType(),
2911 Inputs[Idxs.front().first].getOperand(Idxs.front().second),
2912 Inputs[Idxs.back().first].getOperand(Idxs.back().second));
2913 }
2914 }
2915 }
2916 bool Changed;
2917 do {
2918 // Try to remove extra shuffles (except broadcasts) and shuffles with the
2919 // reused operands.
2920 Changed = false;
2921 for (unsigned I = 0; I < std::size(Inputs); ++I) {
2922 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[I].getNode());
2923 if (!Shuffle)
2924 continue;
2925 if (Shuffle->getOperand(0).getValueType() != NewVT)
2926 continue;
2927 int Op = -1;
2928 if (!Inputs[I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2929 !Shuffle->isSplat()) {
2930 Op = 0;
2931 } else if (!Inputs[I].hasOneUse() &&
2932 !Shuffle->getOperand(1).isUndef()) {
2933 // Find the only used operand, if possible.
2934 for (int &Idx : Mask) {
2935 if (Idx == PoisonMaskElem)
2936 continue;
2937 unsigned SrcRegIdx = Idx / NewElts;
2938 if (SrcRegIdx != I)
2939 continue;
2940 int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
2941 if (MaskElt == PoisonMaskElem) {
2942 Idx = PoisonMaskElem;
2943 continue;
2944 }
2945 int OpIdx = MaskElt / NewElts;
2946 if (Op == -1) {
2947 Op = OpIdx;
2948 continue;
2949 }
2950 if (Op != OpIdx) {
2951 Op = -1;
2952 break;
2953 }
2954 }
2955 }
2956 if (Op < 0) {
2957 // Try to check if one of the shuffle operands is used already.
2958 for (int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2959 if (Shuffle->getOperand(OpIdx).isUndef())
2960 continue;
2961 auto *It = find(Inputs, Shuffle->getOperand(OpIdx));
2962 if (It == std::end(Inputs))
2963 continue;
2964 int FoundOp = std::distance(std::begin(Inputs), It);
2965 // Found that operand is used already.
2966 // 1. Fix the mask for the reused operand.
2967 for (int &Idx : Mask) {
2968 if (Idx == PoisonMaskElem)
2969 continue;
2970 unsigned SrcRegIdx = Idx / NewElts;
2971 if (SrcRegIdx != I)
2972 continue;
2973 int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
2974 if (MaskElt == PoisonMaskElem) {
2975 Idx = PoisonMaskElem;
2976 continue;
2977 }
2978 int MaskIdx = MaskElt / NewElts;
2979 if (OpIdx == MaskIdx)
2980 Idx = MaskElt % NewElts + FoundOp * NewElts;
2981 }
2982 // 2. Set Op to the unused OpIdx.
2983 Op = (OpIdx + 1) % 2;
2984 break;
2985 }
2986 }
2987 if (Op >= 0) {
2988 Changed = true;
2989 Inputs[I] = Shuffle->getOperand(Op);
2990 // Adjust mask.
2991 for (int &Idx : Mask) {
2992 if (Idx == PoisonMaskElem)
2993 continue;
2994 unsigned SrcRegIdx = Idx / NewElts;
2995 if (SrcRegIdx != I)
2996 continue;
2997 int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
2998 int OpIdx = MaskElt / NewElts;
2999 if (OpIdx != Op)
3000 continue;
3001 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
3002 }
3003 }
3004 }
3005 } while (Changed);
3006 };
3007 TryPeekThroughShufflesInputs(OrigMask);
3008 // Proces unique inputs.
3009 auto &&MakeUniqueInputs = [&Inputs, &IsConstant,
3010 NewElts](SmallVectorImpl<int> &Mask) {
3011 SetVector<SDValue> UniqueInputs;
3012 SetVector<SDValue> UniqueConstantInputs;
3013 for (const auto &I : Inputs) {
3014 if (IsConstant(I))
3015 UniqueConstantInputs.insert(I);
3016 else if (!I.isUndef())
3017 UniqueInputs.insert(I);
3018 }
3019 // Adjust mask in case of reused inputs. Also, need to insert constant
3020 // inputs at first, otherwise it affects the final outcome.
3021 if (UniqueInputs.size() != std::size(Inputs)) {
3022 auto &&UniqueVec = UniqueInputs.takeVector();
3023 auto &&UniqueConstantVec = UniqueConstantInputs.takeVector();
3024 unsigned ConstNum = UniqueConstantVec.size();
3025 for (int &Idx : Mask) {
3026 if (Idx == PoisonMaskElem)
3027 continue;
3028 unsigned SrcRegIdx = Idx / NewElts;
3029 if (Inputs[SrcRegIdx].isUndef()) {
3030 Idx = PoisonMaskElem;
3031 continue;
3032 }
3033 const auto It = find(UniqueConstantVec, Inputs[SrcRegIdx]);
3034 if (It != UniqueConstantVec.end()) {
3035 Idx = (Idx % NewElts) +
3036 NewElts * std::distance(UniqueConstantVec.begin(), It);
3037 assert(Idx >= 0 && "Expected defined mask idx.");
3038 continue;
3039 }
3040 const auto RegIt = find(UniqueVec, Inputs[SrcRegIdx]);
3041 assert(RegIt != UniqueVec.end() && "Cannot find non-const value.");
3042 Idx = (Idx % NewElts) +
3043 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
3044 assert(Idx >= 0 && "Expected defined mask idx.");
3045 }
3046 copy(UniqueConstantVec, std::begin(Inputs));
3047 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
3048 }
3049 };
3050 MakeUniqueInputs(OrigMask);
3051 SDValue OrigInputs[4];
3052 copy(Inputs, std::begin(OrigInputs));
3053 for (unsigned High = 0; High < 2; ++High) {
3054 SDValue &Output = High ? Hi : Lo;
3055
3056 // Build a shuffle mask for the output, discovering on the fly which
3057 // input vectors to use as shuffle operands.
3058 unsigned FirstMaskIdx = High * NewElts;
3059 SmallVector<int> Mask(NewElts * std::size(Inputs), PoisonMaskElem);
3060 copy(ArrayRef(OrigMask).slice(FirstMaskIdx, NewElts), Mask.begin());
3061 assert(!Output && "Expected default initialized initial value.");
3062 TryPeekThroughShufflesInputs(Mask);
3063 MakeUniqueInputs(Mask);
3064 SDValue TmpInputs[4];
3065 copy(Inputs, std::begin(TmpInputs));
3066 // Track changes in the output registers.
3067 int UsedIdx = -1;
3068 bool SecondIteration = false;
3069 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](unsigned Idx) {
3070 if (UsedIdx < 0) {
3071 UsedIdx = Idx;
3072 return false;
3073 }
3074 if (UsedIdx >= 0 && static_cast<unsigned>(UsedIdx) == Idx)
3075 SecondIteration = true;
3076 return SecondIteration;
3077 };
3078 processShuffleMasks(
3079 Mask, std::size(Inputs), std::size(Inputs),
3080 /*NumOfUsedRegs=*/1,
3081 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
3082 [&Output, &DAG = DAG, NewVT, &DL, &Inputs,
3083 &BuildVector](ArrayRef<int> Mask, unsigned Idx, unsigned /*Unused*/) {
3084 if (Inputs[Idx]->getOpcode() == ISD::BUILD_VECTOR)
3085 Output = BuildVector(Inputs[Idx], Inputs[Idx], Mask);
3086 else
3087 Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx],
3088 DAG.getUNDEF(NewVT), Mask);
3089 Inputs[Idx] = Output;
3090 },
3091 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &DL, &Inputs,
3092 &TmpInputs, &BuildVector](ArrayRef<int> Mask, unsigned Idx1,
3093 unsigned Idx2, bool /*Unused*/) {
3094 if (AccumulateResults(Idx1)) {
3095 if (Inputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR &&
3096 Inputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR)
3097 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
3098 else
3099 Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx1],
3100 Inputs[Idx2], Mask);
3101 } else {
3102 if (TmpInputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR &&
3103 TmpInputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR)
3104 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
3105 else
3106 Output = DAG.getVectorShuffle(NewVT, DL, TmpInputs[Idx1],
3107 TmpInputs[Idx2], Mask);
3108 }
3109 Inputs[Idx1] = Output;
3110 });
3111 copy(OrigInputs, std::begin(Inputs));
3112 }
3113 }
3114
SplitVecRes_VAARG(SDNode * N,SDValue & Lo,SDValue & Hi)3115 void DAGTypeLegalizer::SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
3116 EVT OVT = N->getValueType(0);
3117 EVT NVT = OVT.getHalfNumVectorElementsVT(*DAG.getContext());
3118 SDValue Chain = N->getOperand(0);
3119 SDValue Ptr = N->getOperand(1);
3120 SDValue SV = N->getOperand(2);
3121 SDLoc dl(N);
3122
3123 const Align Alignment =
3124 DAG.getDataLayout().getABITypeAlign(NVT.getTypeForEVT(*DAG.getContext()));
3125
3126 Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, SV, Alignment.value());
3127 Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, SV, Alignment.value());
3128 Chain = Hi.getValue(1);
3129
3130 // Modified the chain - switch anything that used the old chain to use
3131 // the new one.
3132 ReplaceValueWith(SDValue(N, 1), Chain);
3133 }
3134
SplitVecRes_FP_TO_XINT_SAT(SDNode * N,SDValue & Lo,SDValue & Hi)3135 void DAGTypeLegalizer::SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo,
3136 SDValue &Hi) {
3137 EVT DstVTLo, DstVTHi;
3138 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(N->getValueType(0));
3139 SDLoc dl(N);
3140
3141 SDValue SrcLo, SrcHi;
3142 EVT SrcVT = N->getOperand(0).getValueType();
3143 if (getTypeAction(SrcVT) == TargetLowering::TypeSplitVector)
3144 GetSplitVector(N->getOperand(0), SrcLo, SrcHi);
3145 else
3146 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(N, 0);
3147
3148 Lo = DAG.getNode(N->getOpcode(), dl, DstVTLo, SrcLo, N->getOperand(1));
3149 Hi = DAG.getNode(N->getOpcode(), dl, DstVTHi, SrcHi, N->getOperand(1));
3150 }
3151
SplitVecRes_VECTOR_REVERSE(SDNode * N,SDValue & Lo,SDValue & Hi)3152 void DAGTypeLegalizer::SplitVecRes_VECTOR_REVERSE(SDNode *N, SDValue &Lo,
3153 SDValue &Hi) {
3154 SDValue InLo, InHi;
3155 GetSplitVector(N->getOperand(0), InLo, InHi);
3156 SDLoc DL(N);
3157
3158 Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, InHi.getValueType(), InHi);
3159 Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, InLo.getValueType(), InLo);
3160 }
3161
SplitVecRes_VECTOR_SPLICE(SDNode * N,SDValue & Lo,SDValue & Hi)3162 void DAGTypeLegalizer::SplitVecRes_VECTOR_SPLICE(SDNode *N, SDValue &Lo,
3163 SDValue &Hi) {
3164 SDLoc DL(N);
3165
3166 SDValue Expanded = TLI.expandVectorSplice(N, DAG);
3167 std::tie(Lo, Hi) = DAG.SplitVector(Expanded, DL);
3168 }
3169
SplitVecRes_VP_REVERSE(SDNode * N,SDValue & Lo,SDValue & Hi)3170 void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo,
3171 SDValue &Hi) {
3172 EVT VT = N->getValueType(0);
3173 SDValue Val = N->getOperand(0);
3174 SDValue Mask = N->getOperand(1);
3175 SDValue EVL = N->getOperand(2);
3176 SDLoc DL(N);
3177
3178 // Fallback to VP_STRIDED_STORE to stack followed by VP_LOAD.
3179 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false);
3180
3181 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
3182 VT.getVectorElementCount());
3183 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment);
3184 EVT PtrVT = StackPtr.getValueType();
3185 auto &MF = DAG.getMachineFunction();
3186 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
3187 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
3188
3189 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
3190 PtrInfo, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
3191 Alignment);
3192 MachineMemOperand *LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
3193 PtrInfo, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
3194 Alignment);
3195
3196 unsigned EltWidth = VT.getScalarSizeInBits() / 8;
3197 SDValue NumElemMinus1 =
3198 DAG.getNode(ISD::SUB, DL, PtrVT, DAG.getZExtOrTrunc(EVL, DL, PtrVT),
3199 DAG.getConstant(1, DL, PtrVT));
3200 SDValue StartOffset = DAG.getNode(ISD::MUL, DL, PtrVT, NumElemMinus1,
3201 DAG.getConstant(EltWidth, DL, PtrVT));
3202 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, StartOffset);
3203 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth, DL, PtrVT);
3204
3205 SDValue TrueMask = DAG.getBoolConstant(true, DL, Mask.getValueType(), VT);
3206 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(), DL, Val, StorePtr,
3207 DAG.getUNDEF(PtrVT), Stride, TrueMask,
3208 EVL, MemVT, StoreMMO, ISD::UNINDEXED);
3209
3210 SDValue Load = DAG.getLoadVP(VT, DL, Store, StackPtr, Mask, EVL, LoadMMO);
3211
3212 std::tie(Lo, Hi) = DAG.SplitVector(Load, DL);
3213 }
3214
SplitVecRes_VP_SPLICE(SDNode * N,SDValue & Lo,SDValue & Hi)3215 void DAGTypeLegalizer::SplitVecRes_VP_SPLICE(SDNode *N, SDValue &Lo,
3216 SDValue &Hi) {
3217 EVT VT = N->getValueType(0);
3218 SDValue V1 = N->getOperand(0);
3219 SDValue V2 = N->getOperand(1);
3220 int64_t Imm = cast<ConstantSDNode>(N->getOperand(2))->getSExtValue();
3221 SDValue Mask = N->getOperand(3);
3222 SDValue EVL1 = N->getOperand(4);
3223 SDValue EVL2 = N->getOperand(5);
3224 SDLoc DL(N);
3225
3226 // Since EVL2 is considered the real VL it gets promoted during
3227 // SelectionDAGBuilder. Promote EVL1 here if needed.
3228 if (getTypeAction(EVL1.getValueType()) == TargetLowering::TypePromoteInteger)
3229 EVL1 = ZExtPromotedInteger(EVL1);
3230
3231 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false);
3232
3233 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
3234 VT.getVectorElementCount() * 2);
3235 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment);
3236 EVT PtrVT = StackPtr.getValueType();
3237 auto &MF = DAG.getMachineFunction();
3238 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
3239 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
3240
3241 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
3242 PtrInfo, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
3243 Alignment);
3244 MachineMemOperand *LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
3245 PtrInfo, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
3246 Alignment);
3247
3248 SDValue StackPtr2 = TLI.getVectorElementPointer(DAG, StackPtr, VT, EVL1);
3249
3250 SDValue TrueMask = DAG.getBoolConstant(true, DL, Mask.getValueType(), VT);
3251 SDValue StoreV1 = DAG.getStoreVP(DAG.getEntryNode(), DL, V1, StackPtr,
3252 DAG.getUNDEF(PtrVT), TrueMask, EVL1,
3253 V1.getValueType(), StoreMMO, ISD::UNINDEXED);
3254
3255 SDValue StoreV2 =
3256 DAG.getStoreVP(StoreV1, DL, V2, StackPtr2, DAG.getUNDEF(PtrVT), TrueMask,
3257 EVL2, V2.getValueType(), StoreMMO, ISD::UNINDEXED);
3258
3259 SDValue Load;
3260 if (Imm >= 0) {
3261 StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VT, N->getOperand(2));
3262 Load = DAG.getLoadVP(VT, DL, StoreV2, StackPtr, Mask, EVL2, LoadMMO);
3263 } else {
3264 uint64_t TrailingElts = -Imm;
3265 unsigned EltWidth = VT.getScalarSizeInBits() / 8;
3266 SDValue TrailingBytes = DAG.getConstant(TrailingElts * EltWidth, DL, PtrVT);
3267
3268 // Make sure TrailingBytes doesn't exceed the size of vec1.
3269 SDValue OffsetToV2 = DAG.getNode(ISD::SUB, DL, PtrVT, StackPtr2, StackPtr);
3270 TrailingBytes =
3271 DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, OffsetToV2);
3272
3273 // Calculate the start address of the spliced result.
3274 StackPtr2 = DAG.getNode(ISD::SUB, DL, PtrVT, StackPtr2, TrailingBytes);
3275 Load = DAG.getLoadVP(VT, DL, StoreV2, StackPtr2, Mask, EVL2, LoadMMO);
3276 }
3277
3278 EVT LoVT, HiVT;
3279 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
3280 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, Load,
3281 DAG.getVectorIdxConstant(0, DL));
3282 Hi =
3283 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, Load,
3284 DAG.getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
3285 }
3286
SplitVecRes_PARTIAL_REDUCE_MLA(SDNode * N,SDValue & Lo,SDValue & Hi)3287 void DAGTypeLegalizer::SplitVecRes_PARTIAL_REDUCE_MLA(SDNode *N, SDValue &Lo,
3288 SDValue &Hi) {
3289 SDLoc DL(N);
3290 SDValue Acc = N->getOperand(0);
3291 SDValue Input1 = N->getOperand(1);
3292 SDValue Input2 = N->getOperand(2);
3293
3294 SDValue AccLo, AccHi;
3295 std::tie(AccLo, AccHi) = DAG.SplitVector(Acc, DL);
3296 unsigned Opcode = N->getOpcode();
3297
3298 // If the input types don't need splitting, just accumulate into the
3299 // low part of the accumulator.
3300 if (getTypeAction(Input1.getValueType()) != TargetLowering::TypeSplitVector) {
3301 Lo = DAG.getNode(Opcode, DL, AccLo.getValueType(), AccLo, Input1, Input2);
3302 Hi = AccHi;
3303 return;
3304 }
3305
3306 SDValue Input1Lo, Input1Hi;
3307 SDValue Input2Lo, Input2Hi;
3308 std::tie(Input1Lo, Input1Hi) = DAG.SplitVector(Input1, DL);
3309 std::tie(Input2Lo, Input2Hi) = DAG.SplitVector(Input2, DL);
3310 EVT ResultVT = AccLo.getValueType();
3311
3312 Lo = DAG.getNode(Opcode, DL, ResultVT, AccLo, Input1Lo, Input2Lo);
3313 Hi = DAG.getNode(Opcode, DL, ResultVT, AccHi, Input1Hi, Input2Hi);
3314 }
3315
SplitVecRes_GET_ACTIVE_LANE_MASK(SDNode * N,SDValue & Lo,SDValue & Hi)3316 void DAGTypeLegalizer::SplitVecRes_GET_ACTIVE_LANE_MASK(SDNode *N, SDValue &Lo,
3317 SDValue &Hi) {
3318 SDLoc DL(N);
3319 SDValue Op0 = N->getOperand(0);
3320 SDValue Op1 = N->getOperand(1);
3321 EVT OpVT = Op0.getValueType();
3322
3323 EVT LoVT, HiVT;
3324 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
3325
3326 Lo = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, LoVT, Op0, Op1);
3327 SDValue LoElts = DAG.getElementCount(DL, OpVT, LoVT.getVectorElementCount());
3328 SDValue HiStartVal = DAG.getNode(ISD::UADDSAT, DL, OpVT, Op0, LoElts);
3329 Hi = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, HiVT, HiStartVal, Op1);
3330 }
3331
SplitVecRes_VECTOR_DEINTERLEAVE(SDNode * N)3332 void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(SDNode *N) {
3333 unsigned Factor = N->getNumOperands();
3334
3335 SmallVector<SDValue, 8> Ops(Factor * 2);
3336 for (unsigned i = 0; i != Factor; ++i) {
3337 SDValue OpLo, OpHi;
3338 GetSplitVector(N->getOperand(i), OpLo, OpHi);
3339 Ops[i * 2] = OpLo;
3340 Ops[i * 2 + 1] = OpHi;
3341 }
3342
3343 SmallVector<EVT, 8> VTs(Factor, Ops[0].getValueType());
3344
3345 SDLoc DL(N);
3346 SDValue ResLo = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL, VTs,
3347 ArrayRef(Ops).slice(0, Factor));
3348 SDValue ResHi = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL, VTs,
3349 ArrayRef(Ops).slice(Factor, Factor));
3350
3351 for (unsigned i = 0; i != Factor; ++i)
3352 SetSplitVector(SDValue(N, i), ResLo.getValue(i), ResHi.getValue(i));
3353 }
3354
SplitVecRes_VECTOR_INTERLEAVE(SDNode * N)3355 void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(SDNode *N) {
3356 unsigned Factor = N->getNumOperands();
3357
3358 SmallVector<SDValue, 8> Ops(Factor * 2);
3359 for (unsigned i = 0; i != Factor; ++i) {
3360 SDValue OpLo, OpHi;
3361 GetSplitVector(N->getOperand(i), OpLo, OpHi);
3362 Ops[i] = OpLo;
3363 Ops[i + Factor] = OpHi;
3364 }
3365
3366 SmallVector<EVT, 8> VTs(Factor, Ops[0].getValueType());
3367
3368 SDLoc DL(N);
3369 SDValue Res[] = {DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, VTs,
3370 ArrayRef(Ops).slice(0, Factor)),
3371 DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, VTs,
3372 ArrayRef(Ops).slice(Factor, Factor))};
3373
3374 for (unsigned i = 0; i != Factor; ++i) {
3375 unsigned IdxLo = 2 * i;
3376 unsigned IdxHi = 2 * i + 1;
3377 SetSplitVector(SDValue(N, i), Res[IdxLo / Factor].getValue(IdxLo % Factor),
3378 Res[IdxHi / Factor].getValue(IdxHi % Factor));
3379 }
3380 }
3381
3382 //===----------------------------------------------------------------------===//
3383 // Operand Vector Splitting
3384 //===----------------------------------------------------------------------===//
3385
3386 /// This method is called when the specified operand of the specified node is
3387 /// found to need vector splitting. At this point, all of the result types of
3388 /// the node are known to be legal, but other operands of the node may need
3389 /// legalization as well as the specified one.
SplitVectorOperand(SDNode * N,unsigned OpNo)3390 bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
3391 LLVM_DEBUG(dbgs() << "Split node operand: "; N->dump(&DAG));
3392 SDValue Res = SDValue();
3393
3394 // See if the target wants to custom split this node.
3395 if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
3396 return false;
3397
3398 switch (N->getOpcode()) {
3399 default:
3400 #ifndef NDEBUG
3401 dbgs() << "SplitVectorOperand Op #" << OpNo << ": ";
3402 N->dump(&DAG);
3403 dbgs() << "\n";
3404 #endif
3405 report_fatal_error("Do not know how to split this operator's "
3406 "operand!\n");
3407
3408 case ISD::VP_SETCC:
3409 case ISD::STRICT_FSETCC:
3410 case ISD::STRICT_FSETCCS:
3411 case ISD::SETCC: Res = SplitVecOp_VSETCC(N); break;
3412 case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
3413 case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
3414 case ISD::INSERT_SUBVECTOR: Res = SplitVecOp_INSERT_SUBVECTOR(N, OpNo); break;
3415 case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
3416 case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
3417 case ISD::VP_TRUNCATE:
3418 case ISD::TRUNCATE:
3419 Res = SplitVecOp_TruncateHelper(N);
3420 break;
3421 case ISD::STRICT_FP_ROUND:
3422 case ISD::VP_FP_ROUND:
3423 case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break;
3424 case ISD::FCOPYSIGN: Res = SplitVecOp_FPOpDifferentTypes(N); break;
3425 case ISD::STORE:
3426 Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
3427 break;
3428 case ISD::VP_STORE:
3429 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(N), OpNo);
3430 break;
3431 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3432 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(N), OpNo);
3433 break;
3434 case ISD::MSTORE:
3435 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(N), OpNo);
3436 break;
3437 case ISD::MSCATTER:
3438 case ISD::VP_SCATTER:
3439 Res = SplitVecOp_Scatter(cast<MemSDNode>(N), OpNo);
3440 break;
3441 case ISD::MGATHER:
3442 case ISD::VP_GATHER:
3443 Res = SplitVecOp_Gather(cast<MemSDNode>(N), OpNo);
3444 break;
3445 case ISD::VSELECT:
3446 Res = SplitVecOp_VSELECT(N, OpNo);
3447 break;
3448 case ISD::VECTOR_COMPRESS:
3449 Res = SplitVecOp_VECTOR_COMPRESS(N, OpNo);
3450 break;
3451 case ISD::STRICT_SINT_TO_FP:
3452 case ISD::STRICT_UINT_TO_FP:
3453 case ISD::SINT_TO_FP:
3454 case ISD::UINT_TO_FP:
3455 case ISD::VP_SINT_TO_FP:
3456 case ISD::VP_UINT_TO_FP:
3457 if (N->getValueType(0).bitsLT(
3458 N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3459 Res = SplitVecOp_TruncateHelper(N);
3460 else
3461 Res = SplitVecOp_UnaryOp(N);
3462 break;
3463 case ISD::FP_TO_SINT_SAT:
3464 case ISD::FP_TO_UINT_SAT:
3465 Res = SplitVecOp_FP_TO_XINT_SAT(N);
3466 break;
3467 case ISD::FP_TO_SINT:
3468 case ISD::FP_TO_UINT:
3469 case ISD::VP_FP_TO_SINT:
3470 case ISD::VP_FP_TO_UINT:
3471 case ISD::STRICT_FP_TO_SINT:
3472 case ISD::STRICT_FP_TO_UINT:
3473 case ISD::STRICT_FP_EXTEND:
3474 case ISD::FP_EXTEND:
3475 case ISD::SIGN_EXTEND:
3476 case ISD::ZERO_EXTEND:
3477 case ISD::ANY_EXTEND:
3478 case ISD::FTRUNC:
3479 case ISD::LROUND:
3480 case ISD::LLROUND:
3481 case ISD::LRINT:
3482 case ISD::LLRINT:
3483 Res = SplitVecOp_UnaryOp(N);
3484 break;
3485 case ISD::FLDEXP:
3486 Res = SplitVecOp_FPOpDifferentTypes(N);
3487 break;
3488
3489 case ISD::SCMP:
3490 case ISD::UCMP:
3491 Res = SplitVecOp_CMP(N);
3492 break;
3493
3494 case ISD::FAKE_USE:
3495 Res = SplitVecOp_FAKE_USE(N);
3496 break;
3497 case ISD::ANY_EXTEND_VECTOR_INREG:
3498 case ISD::SIGN_EXTEND_VECTOR_INREG:
3499 case ISD::ZERO_EXTEND_VECTOR_INREG:
3500 Res = SplitVecOp_ExtVecInRegOp(N);
3501 break;
3502
3503 case ISD::VECREDUCE_FADD:
3504 case ISD::VECREDUCE_FMUL:
3505 case ISD::VECREDUCE_ADD:
3506 case ISD::VECREDUCE_MUL:
3507 case ISD::VECREDUCE_AND:
3508 case ISD::VECREDUCE_OR:
3509 case ISD::VECREDUCE_XOR:
3510 case ISD::VECREDUCE_SMAX:
3511 case ISD::VECREDUCE_SMIN:
3512 case ISD::VECREDUCE_UMAX:
3513 case ISD::VECREDUCE_UMIN:
3514 case ISD::VECREDUCE_FMAX:
3515 case ISD::VECREDUCE_FMIN:
3516 case ISD::VECREDUCE_FMAXIMUM:
3517 case ISD::VECREDUCE_FMINIMUM:
3518 Res = SplitVecOp_VECREDUCE(N, OpNo);
3519 break;
3520 case ISD::VECREDUCE_SEQ_FADD:
3521 case ISD::VECREDUCE_SEQ_FMUL:
3522 Res = SplitVecOp_VECREDUCE_SEQ(N);
3523 break;
3524 case ISD::VP_REDUCE_FADD:
3525 case ISD::VP_REDUCE_SEQ_FADD:
3526 case ISD::VP_REDUCE_FMUL:
3527 case ISD::VP_REDUCE_SEQ_FMUL:
3528 case ISD::VP_REDUCE_ADD:
3529 case ISD::VP_REDUCE_MUL:
3530 case ISD::VP_REDUCE_AND:
3531 case ISD::VP_REDUCE_OR:
3532 case ISD::VP_REDUCE_XOR:
3533 case ISD::VP_REDUCE_SMAX:
3534 case ISD::VP_REDUCE_SMIN:
3535 case ISD::VP_REDUCE_UMAX:
3536 case ISD::VP_REDUCE_UMIN:
3537 case ISD::VP_REDUCE_FMAX:
3538 case ISD::VP_REDUCE_FMIN:
3539 case ISD::VP_REDUCE_FMAXIMUM:
3540 case ISD::VP_REDUCE_FMINIMUM:
3541 Res = SplitVecOp_VP_REDUCE(N, OpNo);
3542 break;
3543 case ISD::VP_CTTZ_ELTS:
3544 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3545 Res = SplitVecOp_VP_CttzElements(N);
3546 break;
3547 case ISD::EXPERIMENTAL_VECTOR_HISTOGRAM:
3548 Res = SplitVecOp_VECTOR_HISTOGRAM(N);
3549 break;
3550 case ISD::PARTIAL_REDUCE_UMLA:
3551 case ISD::PARTIAL_REDUCE_SMLA:
3552 case ISD::PARTIAL_REDUCE_SUMLA:
3553 Res = SplitVecOp_PARTIAL_REDUCE_MLA(N);
3554 break;
3555 }
3556
3557 // If the result is null, the sub-method took care of registering results etc.
3558 if (!Res.getNode()) return false;
3559
3560 // If the result is N, the sub-method updated N in place. Tell the legalizer
3561 // core about this.
3562 if (Res.getNode() == N)
3563 return true;
3564
3565 if (N->isStrictFPOpcode())
3566 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 2 &&
3567 "Invalid operand expansion");
3568 else
3569 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
3570 "Invalid operand expansion");
3571
3572 ReplaceValueWith(SDValue(N, 0), Res);
3573 return false;
3574 }
3575
SplitVecOp_VSELECT(SDNode * N,unsigned OpNo)3576 SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(SDNode *N, unsigned OpNo) {
3577 // The only possibility for an illegal operand is the mask, since result type
3578 // legalization would have handled this node already otherwise.
3579 assert(OpNo == 0 && "Illegal operand must be mask");
3580
3581 SDValue Mask = N->getOperand(0);
3582 SDValue Src0 = N->getOperand(1);
3583 SDValue Src1 = N->getOperand(2);
3584 EVT Src0VT = Src0.getValueType();
3585 SDLoc DL(N);
3586 assert(Mask.getValueType().isVector() && "VSELECT without a vector mask?");
3587
3588 SDValue Lo, Hi;
3589 GetSplitVector(N->getOperand(0), Lo, Hi);
3590 assert(Lo.getValueType() == Hi.getValueType() &&
3591 "Lo and Hi have differing types");
3592
3593 EVT LoOpVT, HiOpVT;
3594 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3595 assert(LoOpVT == HiOpVT && "Asymmetric vector split?");
3596
3597 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3598 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0, DL);
3599 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1, DL);
3600 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL);
3601
3602 SDValue LoSelect =
3603 DAG.getNode(ISD::VSELECT, DL, LoOpVT, LoMask, LoOp0, LoOp1);
3604 SDValue HiSelect =
3605 DAG.getNode(ISD::VSELECT, DL, HiOpVT, HiMask, HiOp0, HiOp1);
3606
3607 return DAG.getNode(ISD::CONCAT_VECTORS, DL, Src0VT, LoSelect, HiSelect);
3608 }
3609
SplitVecOp_VECTOR_COMPRESS(SDNode * N,unsigned OpNo)3610 SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_COMPRESS(SDNode *N, unsigned OpNo) {
3611 // The only possibility for an illegal operand is the mask, since result type
3612 // legalization would have handled this node already otherwise.
3613 assert(OpNo == 1 && "Illegal operand must be mask");
3614
3615 // To split the mask, we need to split the result type too, so we can just
3616 // reuse that logic here.
3617 SDValue Lo, Hi;
3618 SplitVecRes_VECTOR_COMPRESS(N, Lo, Hi);
3619
3620 EVT VecVT = N->getValueType(0);
3621 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VecVT, Lo, Hi);
3622 }
3623
SplitVecOp_VECREDUCE(SDNode * N,unsigned OpNo)3624 SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo) {
3625 EVT ResVT = N->getValueType(0);
3626 SDValue Lo, Hi;
3627 SDLoc dl(N);
3628
3629 SDValue VecOp = N->getOperand(OpNo);
3630 EVT VecVT = VecOp.getValueType();
3631 assert(VecVT.isVector() && "Can only split reduce vector operand");
3632 GetSplitVector(VecOp, Lo, Hi);
3633 EVT LoOpVT, HiOpVT;
3634 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3635
3636 // Use the appropriate scalar instruction on the split subvectors before
3637 // reducing the now partially reduced smaller vector.
3638 unsigned CombineOpc = ISD::getVecReduceBaseOpcode(N->getOpcode());
3639 SDValue Partial = DAG.getNode(CombineOpc, dl, LoOpVT, Lo, Hi, N->getFlags());
3640 return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, N->getFlags());
3641 }
3642
SplitVecOp_VECREDUCE_SEQ(SDNode * N)3643 SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE_SEQ(SDNode *N) {
3644 EVT ResVT = N->getValueType(0);
3645 SDValue Lo, Hi;
3646 SDLoc dl(N);
3647
3648 SDValue AccOp = N->getOperand(0);
3649 SDValue VecOp = N->getOperand(1);
3650 SDNodeFlags Flags = N->getFlags();
3651
3652 EVT VecVT = VecOp.getValueType();
3653 assert(VecVT.isVector() && "Can only split reduce vector operand");
3654 GetSplitVector(VecOp, Lo, Hi);
3655 EVT LoOpVT, HiOpVT;
3656 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3657
3658 // Reduce low half.
3659 SDValue Partial = DAG.getNode(N->getOpcode(), dl, ResVT, AccOp, Lo, Flags);
3660
3661 // Reduce high half, using low half result as initial value.
3662 return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, Hi, Flags);
3663 }
3664
SplitVecOp_VP_REDUCE(SDNode * N,unsigned OpNo)3665 SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(SDNode *N, unsigned OpNo) {
3666 assert(N->isVPOpcode() && "Expected VP opcode");
3667 assert(OpNo == 1 && "Can only split reduce vector operand");
3668
3669 unsigned Opc = N->getOpcode();
3670 EVT ResVT = N->getValueType(0);
3671 SDValue Lo, Hi;
3672 SDLoc dl(N);
3673
3674 SDValue VecOp = N->getOperand(OpNo);
3675 EVT VecVT = VecOp.getValueType();
3676 assert(VecVT.isVector() && "Can only split reduce vector operand");
3677 GetSplitVector(VecOp, Lo, Hi);
3678
3679 SDValue MaskLo, MaskHi;
3680 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(2));
3681
3682 SDValue EVLLo, EVLHi;
3683 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(N->getOperand(3), VecVT, dl);
3684
3685 const SDNodeFlags Flags = N->getFlags();
3686
3687 SDValue ResLo =
3688 DAG.getNode(Opc, dl, ResVT, {N->getOperand(0), Lo, MaskLo, EVLLo}, Flags);
3689 return DAG.getNode(Opc, dl, ResVT, {ResLo, Hi, MaskHi, EVLHi}, Flags);
3690 }
3691
SplitVecOp_UnaryOp(SDNode * N)3692 SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
3693 // The result has a legal vector type, but the input needs splitting.
3694 EVT ResVT = N->getValueType(0);
3695 SDValue Lo, Hi;
3696 SDLoc dl(N);
3697 GetSplitVector(N->getOperand(N->isStrictFPOpcode() ? 1 : 0), Lo, Hi);
3698 EVT InVT = Lo.getValueType();
3699
3700 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
3701 InVT.getVectorElementCount());
3702
3703 if (N->isStrictFPOpcode()) {
3704 Lo = DAG.getNode(N->getOpcode(), dl, { OutVT, MVT::Other },
3705 { N->getOperand(0), Lo });
3706 Hi = DAG.getNode(N->getOpcode(), dl, { OutVT, MVT::Other },
3707 { N->getOperand(0), Hi });
3708
3709 // Build a factor node to remember that this operation is independent
3710 // of the other one.
3711 SDValue Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
3712 Hi.getValue(1));
3713
3714 // Legalize the chain result - switch anything that used the old chain to
3715 // use the new one.
3716 ReplaceValueWith(SDValue(N, 1), Ch);
3717 } else if (N->getNumOperands() == 3) {
3718 assert(N->isVPOpcode() && "Expected VP opcode");
3719 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3720 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1));
3721 std::tie(EVLLo, EVLHi) =
3722 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl);
3723 Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo, MaskLo, EVLLo);
3724 Hi = DAG.getNode(N->getOpcode(), dl, OutVT, Hi, MaskHi, EVLHi);
3725 } else {
3726 Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo);
3727 Hi = DAG.getNode(N->getOpcode(), dl, OutVT, Hi);
3728 }
3729
3730 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
3731 }
3732
3733 // Split a FAKE_USE use of a vector into FAKE_USEs of hi and lo part.
SplitVecOp_FAKE_USE(SDNode * N)3734 SDValue DAGTypeLegalizer::SplitVecOp_FAKE_USE(SDNode *N) {
3735 SDValue Lo, Hi;
3736 GetSplitVector(N->getOperand(1), Lo, Hi);
3737 SDValue Chain =
3738 DAG.getNode(ISD::FAKE_USE, SDLoc(), MVT::Other, N->getOperand(0), Lo);
3739 return DAG.getNode(ISD::FAKE_USE, SDLoc(), MVT::Other, Chain, Hi);
3740 }
3741
SplitVecOp_BITCAST(SDNode * N)3742 SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
3743 // For example, i64 = BITCAST v4i16 on alpha. Typically the vector will
3744 // end up being split all the way down to individual components. Convert the
3745 // split pieces into integers and reassemble.
3746 EVT ResVT = N->getValueType(0);
3747 SDValue Lo, Hi;
3748 GetSplitVector(N->getOperand(0), Lo, Hi);
3749 SDLoc dl(N);
3750
3751 if (ResVT.isScalableVector()) {
3752 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3753 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
3754 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
3755 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
3756 }
3757
3758 Lo = BitConvertToInteger(Lo);
3759 Hi = BitConvertToInteger(Hi);
3760
3761 if (DAG.getDataLayout().isBigEndian())
3762 std::swap(Lo, Hi);
3763
3764 return DAG.getNode(ISD::BITCAST, dl, ResVT, JoinIntegers(Lo, Hi));
3765 }
3766
SplitVecOp_INSERT_SUBVECTOR(SDNode * N,unsigned OpNo)3767 SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N,
3768 unsigned OpNo) {
3769 assert(OpNo == 1 && "Invalid OpNo; can only split SubVec.");
3770 // We know that the result type is legal.
3771 EVT ResVT = N->getValueType(0);
3772
3773 SDValue Vec = N->getOperand(0);
3774 SDValue SubVec = N->getOperand(1);
3775 SDValue Idx = N->getOperand(2);
3776 SDLoc dl(N);
3777
3778 SDValue Lo, Hi;
3779 GetSplitVector(SubVec, Lo, Hi);
3780
3781 uint64_t IdxVal = Idx->getAsZExtVal();
3782 uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
3783
3784 SDValue FirstInsertion =
3785 DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Lo, Idx);
3786 SDValue SecondInsertion =
3787 DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, FirstInsertion, Hi,
3788 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3789
3790 return SecondInsertion;
3791 }
3792
SplitVecOp_EXTRACT_SUBVECTOR(SDNode * N)3793 SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
3794 // We know that the extracted result type is legal.
3795 EVT SubVT = N->getValueType(0);
3796 SDValue Idx = N->getOperand(1);
3797 SDLoc dl(N);
3798 SDValue Lo, Hi;
3799
3800 GetSplitVector(N->getOperand(0), Lo, Hi);
3801
3802 uint64_t LoEltsMin = Lo.getValueType().getVectorMinNumElements();
3803 uint64_t IdxVal = Idx->getAsZExtVal();
3804
3805 if (IdxVal < LoEltsMin) {
3806 assert(IdxVal + SubVT.getVectorMinNumElements() <= LoEltsMin &&
3807 "Extracted subvector crosses vector split!");
3808 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Lo, Idx);
3809 } else if (SubVT.isScalableVector() ==
3810 N->getOperand(0).getValueType().isScalableVector())
3811 return DAG.getExtractSubvector(dl, SubVT, Hi, IdxVal - LoEltsMin);
3812
3813 // After this point the DAG node only permits extracting fixed-width
3814 // subvectors from scalable vectors.
3815 assert(SubVT.isFixedLengthVector() &&
3816 "Extracting scalable subvector from fixed-width unsupported");
3817
3818 // If the element type is i1 and we're not promoting the result, then we may
3819 // end up loading the wrong data since the bits are packed tightly into
3820 // bytes. For example, if we extract a v4i1 (legal) from a nxv4i1 (legal)
3821 // type at index 4, then we will load a byte starting at index 0.
3822 if (SubVT.getScalarType() == MVT::i1)
3823 report_fatal_error("Don't know how to extract fixed-width predicate "
3824 "subvector from a scalable predicate vector");
3825
3826 // Spill the vector to the stack. We should use the alignment for
3827 // the smallest part.
3828 SDValue Vec = N->getOperand(0);
3829 EVT VecVT = Vec.getValueType();
3830 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false);
3831 SDValue StackPtr =
3832 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign);
3833 auto &MF = DAG.getMachineFunction();
3834 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
3835 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
3836
3837 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3838 SmallestAlign);
3839
3840 // Extract the subvector by loading the correct part.
3841 StackPtr = TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT, SubVT, Idx);
3842
3843 return DAG.getLoad(
3844 SubVT, dl, Store, StackPtr,
3845 MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
3846 }
3847
SplitVecOp_EXTRACT_VECTOR_ELT(SDNode * N)3848 SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
3849 SDValue Vec = N->getOperand(0);
3850 SDValue Idx = N->getOperand(1);
3851 EVT VecVT = Vec.getValueType();
3852
3853 if (const ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Idx)) {
3854 uint64_t IdxVal = Index->getZExtValue();
3855
3856 SDValue Lo, Hi;
3857 GetSplitVector(Vec, Lo, Hi);
3858
3859 uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
3860
3861 if (IdxVal < LoElts)
3862 return SDValue(DAG.UpdateNodeOperands(N, Lo, Idx), 0);
3863 else if (!Vec.getValueType().isScalableVector())
3864 return SDValue(DAG.UpdateNodeOperands(N, Hi,
3865 DAG.getConstant(IdxVal - LoElts, SDLoc(N),
3866 Idx.getValueType())), 0);
3867 }
3868
3869 // See if the target wants to custom expand this node.
3870 if (CustomLowerNode(N, N->getValueType(0), true))
3871 return SDValue();
3872
3873 // Make the vector elements byte-addressable if they aren't already.
3874 SDLoc dl(N);
3875 EVT EltVT = VecVT.getVectorElementType();
3876 if (!EltVT.isByteSized()) {
3877 EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
3878 VecVT = VecVT.changeElementType(EltVT);
3879 Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
3880 SDValue NewExtract =
3881 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vec, Idx);
3882 return DAG.getAnyExtOrTrunc(NewExtract, dl, N->getValueType(0));
3883 }
3884
3885 // Store the vector to the stack.
3886 // In cases where the vector is illegal it will be broken down into parts
3887 // and stored in parts - we should use the alignment for the smallest part.
3888 Align SmallestAlign = DAG.getReducedAlign(VecVT, /*UseABI=*/false);
3889 SDValue StackPtr =
3890 DAG.CreateStackTemporary(VecVT.getStoreSize(), SmallestAlign);
3891 auto &MF = DAG.getMachineFunction();
3892 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
3893 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
3894 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3895 SmallestAlign);
3896
3897 // Load back the required element.
3898 StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
3899
3900 // EXTRACT_VECTOR_ELT can extend the element type to the width of the return
3901 // type, leaving the high bits undefined. But it can't truncate.
3902 assert(N->getValueType(0).bitsGE(EltVT) && "Illegal EXTRACT_VECTOR_ELT.");
3903
3904 return DAG.getExtLoad(
3905 ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
3906 MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), EltVT,
3907 commonAlignment(SmallestAlign, EltVT.getFixedSizeInBits() / 8));
3908 }
3909
SplitVecOp_ExtVecInRegOp(SDNode * N)3910 SDValue DAGTypeLegalizer::SplitVecOp_ExtVecInRegOp(SDNode *N) {
3911 SDValue Lo, Hi;
3912
3913 // *_EXTEND_VECTOR_INREG only reference the lower half of the input, so
3914 // splitting the result has the same effect as splitting the input operand.
3915 SplitVecRes_ExtVecInRegOp(N, Lo, Hi);
3916
3917 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), N->getValueType(0), Lo, Hi);
3918 }
3919
SplitVecOp_Gather(MemSDNode * N,unsigned OpNo)3920 SDValue DAGTypeLegalizer::SplitVecOp_Gather(MemSDNode *N, unsigned OpNo) {
3921 (void)OpNo;
3922 SDValue Lo, Hi;
3923 SplitVecRes_Gather(N, Lo, Hi);
3924
3925 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, N, N->getValueType(0), Lo, Hi);
3926 ReplaceValueWith(SDValue(N, 0), Res);
3927 return SDValue();
3928 }
3929
SplitVecOp_VP_STORE(VPStoreSDNode * N,unsigned OpNo)3930 SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
3931 assert(N->isUnindexed() && "Indexed vp_store of vector?");
3932 SDValue Ch = N->getChain();
3933 SDValue Ptr = N->getBasePtr();
3934 SDValue Offset = N->getOffset();
3935 assert(Offset.isUndef() && "Unexpected VP store offset");
3936 SDValue Mask = N->getMask();
3937 SDValue EVL = N->getVectorLength();
3938 SDValue Data = N->getValue();
3939 Align Alignment = N->getBaseAlign();
3940 SDLoc DL(N);
3941
3942 SDValue DataLo, DataHi;
3943 if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector)
3944 // Split Data operand
3945 GetSplitVector(Data, DataLo, DataHi);
3946 else
3947 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
3948
3949 // Split Mask operand
3950 SDValue MaskLo, MaskHi;
3951 if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC) {
3952 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
3953 } else {
3954 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
3955 GetSplitVector(Mask, MaskLo, MaskHi);
3956 else
3957 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL);
3958 }
3959
3960 EVT MemoryVT = N->getMemoryVT();
3961 EVT LoMemVT, HiMemVT;
3962 bool HiIsEmpty = false;
3963 std::tie(LoMemVT, HiMemVT) =
3964 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.getValueType(), &HiIsEmpty);
3965
3966 // Split EVL
3967 SDValue EVLLo, EVLHi;
3968 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL, Data.getValueType(), DL);
3969
3970 SDValue Lo, Hi;
3971 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
3972 N->getPointerInfo(), MachineMemOperand::MOStore,
3973 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
3974 N->getRanges());
3975
3976 Lo = DAG.getStoreVP(Ch, DL, DataLo, Ptr, Offset, MaskLo, EVLLo, LoMemVT, MMO,
3977 N->getAddressingMode(), N->isTruncatingStore(),
3978 N->isCompressingStore());
3979
3980 // If the hi vp_store has zero storage size, only the lo vp_store is needed.
3981 if (HiIsEmpty)
3982 return Lo;
3983
3984 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
3985 N->isCompressingStore());
3986
3987 MachinePointerInfo MPI;
3988 if (LoMemVT.isScalableVector()) {
3989 Alignment = commonAlignment(Alignment,
3990 LoMemVT.getSizeInBits().getKnownMinValue() / 8);
3991 MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
3992 } else
3993 MPI = N->getPointerInfo().getWithOffset(
3994 LoMemVT.getStoreSize().getFixedValue());
3995
3996 MMO = DAG.getMachineFunction().getMachineMemOperand(
3997 MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
3998 Alignment, N->getAAInfo(), N->getRanges());
3999
4000 Hi = DAG.getStoreVP(Ch, DL, DataHi, Ptr, Offset, MaskHi, EVLHi, HiMemVT, MMO,
4001 N->getAddressingMode(), N->isTruncatingStore(),
4002 N->isCompressingStore());
4003
4004 // Build a factor node to remember that this store is independent of the
4005 // other one.
4006 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
4007 }
4008
SplitVecOp_VP_STRIDED_STORE(VPStridedStoreSDNode * N,unsigned OpNo)4009 SDValue DAGTypeLegalizer::SplitVecOp_VP_STRIDED_STORE(VPStridedStoreSDNode *N,
4010 unsigned OpNo) {
4011 assert(N->isUnindexed() && "Indexed vp_strided_store of a vector?");
4012 assert(N->getOffset().isUndef() && "Unexpected VP strided store offset");
4013
4014 SDLoc DL(N);
4015
4016 SDValue Data = N->getValue();
4017 SDValue LoData, HiData;
4018 if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector)
4019 GetSplitVector(Data, LoData, HiData);
4020 else
4021 std::tie(LoData, HiData) = DAG.SplitVector(Data, DL);
4022
4023 EVT LoMemVT, HiMemVT;
4024 bool HiIsEmpty = false;
4025 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
4026 N->getMemoryVT(), LoData.getValueType(), &HiIsEmpty);
4027
4028 SDValue Mask = N->getMask();
4029 SDValue LoMask, HiMask;
4030 if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC)
4031 SplitVecRes_SETCC(Mask.getNode(), LoMask, HiMask);
4032 else if (getTypeAction(Mask.getValueType()) ==
4033 TargetLowering::TypeSplitVector)
4034 GetSplitVector(Mask, LoMask, HiMask);
4035 else
4036 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL);
4037
4038 SDValue LoEVL, HiEVL;
4039 std::tie(LoEVL, HiEVL) =
4040 DAG.SplitEVL(N->getVectorLength(), Data.getValueType(), DL);
4041
4042 // Generate the low vp_strided_store
4043 SDValue Lo = DAG.getStridedStoreVP(
4044 N->getChain(), DL, LoData, N->getBasePtr(), N->getOffset(),
4045 N->getStride(), LoMask, LoEVL, LoMemVT, N->getMemOperand(),
4046 N->getAddressingMode(), N->isTruncatingStore(), N->isCompressingStore());
4047
4048 // If the high vp_strided_store has zero storage size, only the low
4049 // vp_strided_store is needed.
4050 if (HiIsEmpty)
4051 return Lo;
4052
4053 // Generate the high vp_strided_store.
4054 // To calculate the high base address, we need to sum to the low base
4055 // address stride number of bytes for each element already stored by low,
4056 // that is: Ptr = Ptr + (LoEVL * Stride)
4057 EVT PtrVT = N->getBasePtr().getValueType();
4058 SDValue Increment =
4059 DAG.getNode(ISD::MUL, DL, PtrVT, LoEVL,
4060 DAG.getSExtOrTrunc(N->getStride(), DL, PtrVT));
4061 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, N->getBasePtr(), Increment);
4062
4063 Align Alignment = N->getBaseAlign();
4064 if (LoMemVT.isScalableVector())
4065 Alignment = commonAlignment(Alignment,
4066 LoMemVT.getSizeInBits().getKnownMinValue() / 8);
4067
4068 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4069 MachinePointerInfo(N->getPointerInfo().getAddrSpace()),
4070 MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
4071 Alignment, N->getAAInfo(), N->getRanges());
4072
4073 SDValue Hi = DAG.getStridedStoreVP(
4074 N->getChain(), DL, HiData, Ptr, N->getOffset(), N->getStride(), HiMask,
4075 HiEVL, HiMemVT, MMO, N->getAddressingMode(), N->isTruncatingStore(),
4076 N->isCompressingStore());
4077
4078 // Build a factor node to remember that this store is independent of the
4079 // other one.
4080 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
4081 }
4082
SplitVecOp_MSTORE(MaskedStoreSDNode * N,unsigned OpNo)4083 SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
4084 unsigned OpNo) {
4085 assert(N->isUnindexed() && "Indexed masked store of vector?");
4086 SDValue Ch = N->getChain();
4087 SDValue Ptr = N->getBasePtr();
4088 SDValue Offset = N->getOffset();
4089 assert(Offset.isUndef() && "Unexpected indexed masked store offset");
4090 SDValue Mask = N->getMask();
4091 SDValue Data = N->getValue();
4092 Align Alignment = N->getBaseAlign();
4093 SDLoc DL(N);
4094
4095 SDValue DataLo, DataHi;
4096 if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector)
4097 // Split Data operand
4098 GetSplitVector(Data, DataLo, DataHi);
4099 else
4100 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
4101
4102 // Split Mask operand
4103 SDValue MaskLo, MaskHi;
4104 if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC) {
4105 SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
4106 } else {
4107 if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
4108 GetSplitVector(Mask, MaskLo, MaskHi);
4109 else
4110 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL);
4111 }
4112
4113 EVT MemoryVT = N->getMemoryVT();
4114 EVT LoMemVT, HiMemVT;
4115 bool HiIsEmpty = false;
4116 std::tie(LoMemVT, HiMemVT) =
4117 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.getValueType(), &HiIsEmpty);
4118
4119 SDValue Lo, Hi, Res;
4120 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4121 N->getPointerInfo(), MachineMemOperand::MOStore,
4122 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
4123 N->getRanges());
4124
4125 Lo = DAG.getMaskedStore(Ch, DL, DataLo, Ptr, Offset, MaskLo, LoMemVT, MMO,
4126 N->getAddressingMode(), N->isTruncatingStore(),
4127 N->isCompressingStore());
4128
4129 if (HiIsEmpty) {
4130 // The hi masked store has zero storage size.
4131 // Only the lo masked store is needed.
4132 Res = Lo;
4133 } else {
4134
4135 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
4136 N->isCompressingStore());
4137
4138 MachinePointerInfo MPI;
4139 if (LoMemVT.isScalableVector()) {
4140 Alignment = commonAlignment(
4141 Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8);
4142 MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
4143 } else
4144 MPI = N->getPointerInfo().getWithOffset(
4145 LoMemVT.getStoreSize().getFixedValue());
4146
4147 MMO = DAG.getMachineFunction().getMachineMemOperand(
4148 MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
4149 Alignment, N->getAAInfo(), N->getRanges());
4150
4151 Hi = DAG.getMaskedStore(Ch, DL, DataHi, Ptr, Offset, MaskHi, HiMemVT, MMO,
4152 N->getAddressingMode(), N->isTruncatingStore(),
4153 N->isCompressingStore());
4154
4155 // Build a factor node to remember that this store is independent of the
4156 // other one.
4157 Res = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
4158 }
4159
4160 return Res;
4161 }
4162
SplitVecOp_Scatter(MemSDNode * N,unsigned OpNo)4163 SDValue DAGTypeLegalizer::SplitVecOp_Scatter(MemSDNode *N, unsigned OpNo) {
4164 SDValue Ch = N->getChain();
4165 SDValue Ptr = N->getBasePtr();
4166 EVT MemoryVT = N->getMemoryVT();
4167 Align Alignment = N->getBaseAlign();
4168 SDLoc DL(N);
4169 struct Operands {
4170 SDValue Mask;
4171 SDValue Index;
4172 SDValue Scale;
4173 SDValue Data;
4174 } Ops = [&]() -> Operands {
4175 if (auto *MSC = dyn_cast<MaskedScatterSDNode>(N)) {
4176 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
4177 MSC->getValue()};
4178 }
4179 auto *VPSC = cast<VPScatterSDNode>(N);
4180 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
4181 VPSC->getValue()};
4182 }();
4183 // Split all operands
4184
4185 EVT LoMemVT, HiMemVT;
4186 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
4187
4188 SDValue DataLo, DataHi;
4189 if (getTypeAction(Ops.Data.getValueType()) == TargetLowering::TypeSplitVector)
4190 // Split Data operand
4191 GetSplitVector(Ops.Data, DataLo, DataHi);
4192 else
4193 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data, DL);
4194
4195 // Split Mask operand
4196 SDValue MaskLo, MaskHi;
4197 if (OpNo == 1 && Ops.Mask.getOpcode() == ISD::SETCC) {
4198 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
4199 } else {
4200 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, DL);
4201 }
4202
4203 SDValue IndexHi, IndexLo;
4204 if (getTypeAction(Ops.Index.getValueType()) ==
4205 TargetLowering::TypeSplitVector)
4206 GetSplitVector(Ops.Index, IndexLo, IndexHi);
4207 else
4208 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index, DL);
4209
4210 SDValue Lo;
4211 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4212 N->getPointerInfo(), MachineMemOperand::MOStore,
4213 LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(),
4214 N->getRanges());
4215
4216 if (auto *MSC = dyn_cast<MaskedScatterSDNode>(N)) {
4217 SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Ops.Scale};
4218 Lo =
4219 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT, DL, OpsLo, MMO,
4220 MSC->getIndexType(), MSC->isTruncatingStore());
4221
4222 // The order of the Scatter operation after split is well defined. The "Hi"
4223 // part comes after the "Lo". So these two operations should be chained one
4224 // after another.
4225 SDValue OpsHi[] = {Lo, DataHi, MaskHi, Ptr, IndexHi, Ops.Scale};
4226 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT, DL, OpsHi,
4227 MMO, MSC->getIndexType(),
4228 MSC->isTruncatingStore());
4229 }
4230 auto *VPSC = cast<VPScatterSDNode>(N);
4231 SDValue EVLLo, EVLHi;
4232 std::tie(EVLLo, EVLHi) =
4233 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(), DL);
4234
4235 SDValue OpsLo[] = {Ch, DataLo, Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
4236 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT, DL, OpsLo, MMO,
4237 VPSC->getIndexType());
4238
4239 // The order of the Scatter operation after split is well defined. The "Hi"
4240 // part comes after the "Lo". So these two operations should be chained one
4241 // after another.
4242 SDValue OpsHi[] = {Lo, DataHi, Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
4243 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT, DL, OpsHi, MMO,
4244 VPSC->getIndexType());
4245 }
4246
SplitVecOp_STORE(StoreSDNode * N,unsigned OpNo)4247 SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
4248 assert(N->isUnindexed() && "Indexed store of vector?");
4249 assert(OpNo == 1 && "Can only split the stored value");
4250 SDLoc DL(N);
4251
4252 bool isTruncating = N->isTruncatingStore();
4253 SDValue Ch = N->getChain();
4254 SDValue Ptr = N->getBasePtr();
4255 EVT MemoryVT = N->getMemoryVT();
4256 Align Alignment = N->getBaseAlign();
4257 MachineMemOperand::Flags MMOFlags = N->getMemOperand()->getFlags();
4258 AAMDNodes AAInfo = N->getAAInfo();
4259 SDValue Lo, Hi;
4260 GetSplitVector(N->getOperand(1), Lo, Hi);
4261
4262 EVT LoMemVT, HiMemVT;
4263 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
4264
4265 // Scalarize if the split halves are not byte-sized.
4266 if (!LoMemVT.isByteSized() || !HiMemVT.isByteSized())
4267 return TLI.scalarizeVectorStore(N, DAG);
4268
4269 if (isTruncating)
4270 Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), LoMemVT,
4271 Alignment, MMOFlags, AAInfo);
4272 else
4273 Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment, MMOFlags,
4274 AAInfo);
4275
4276 MachinePointerInfo MPI;
4277 IncrementPointer(N, LoMemVT, MPI, Ptr);
4278
4279 if (isTruncating)
4280 Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, MPI,
4281 HiMemVT, Alignment, MMOFlags, AAInfo);
4282 else
4283 Hi = DAG.getStore(Ch, DL, Hi, Ptr, MPI, Alignment, MMOFlags, AAInfo);
4284
4285 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
4286 }
4287
SplitVecOp_CONCAT_VECTORS(SDNode * N)4288 SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
4289 SDLoc DL(N);
4290
4291 // The input operands all must have the same type, and we know the result
4292 // type is valid. Convert this to a buildvector which extracts all the
4293 // input elements.
4294 // TODO: If the input elements are power-two vectors, we could convert this to
4295 // a new CONCAT_VECTORS node with elements that are half-wide.
4296 SmallVector<SDValue, 32> Elts;
4297 EVT EltVT = N->getValueType(0).getVectorElementType();
4298 for (const SDValue &Op : N->op_values()) {
4299 for (unsigned i = 0, e = Op.getValueType().getVectorNumElements();
4300 i != e; ++i) {
4301 Elts.push_back(DAG.getExtractVectorElt(DL, EltVT, Op, i));
4302 }
4303 }
4304
4305 return DAG.getBuildVector(N->getValueType(0), DL, Elts);
4306 }
4307
SplitVecOp_TruncateHelper(SDNode * N)4308 SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) {
4309 // The result type is legal, but the input type is illegal. If splitting
4310 // ends up with the result type of each half still being legal, just
4311 // do that. If, however, that would result in an illegal result type,
4312 // we can try to get more clever with power-two vectors. Specifically,
4313 // split the input type, but also widen the result element size, then
4314 // concatenate the halves and truncate again. For example, consider a target
4315 // where v8i8 is legal and v8i32 is not (ARM, which doesn't have 256-bit
4316 // vectors). To perform a "%res = v8i8 trunc v8i32 %in" we do:
4317 // %inlo = v4i32 extract_subvector %in, 0
4318 // %inhi = v4i32 extract_subvector %in, 4
4319 // %lo16 = v4i16 trunc v4i32 %inlo
4320 // %hi16 = v4i16 trunc v4i32 %inhi
4321 // %in16 = v8i16 concat_vectors v4i16 %lo16, v4i16 %hi16
4322 // %res = v8i8 trunc v8i16 %in16
4323 //
4324 // Without this transform, the original truncate would end up being
4325 // scalarized, which is pretty much always a last resort.
4326 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
4327 SDValue InVec = N->getOperand(OpNo);
4328 EVT InVT = InVec->getValueType(0);
4329 EVT OutVT = N->getValueType(0);
4330 ElementCount NumElements = OutVT.getVectorElementCount();
4331 bool IsFloat = OutVT.isFloatingPoint();
4332
4333 unsigned InElementSize = InVT.getScalarSizeInBits();
4334 unsigned OutElementSize = OutVT.getScalarSizeInBits();
4335
4336 // Determine the split output VT. If its legal we can just split dirctly.
4337 EVT LoOutVT, HiOutVT;
4338 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
4339 assert(LoOutVT == HiOutVT && "Unequal split?");
4340
4341 // If the input elements are only 1/2 the width of the result elements,
4342 // just use the normal splitting. Our trick only work if there's room
4343 // to split more than once.
4344 if (isTypeLegal(LoOutVT) ||
4345 InElementSize <= OutElementSize * 2)
4346 return SplitVecOp_UnaryOp(N);
4347 SDLoc DL(N);
4348
4349 // Don't touch if this will be scalarized.
4350 EVT FinalVT = InVT;
4351 while (getTypeAction(FinalVT) == TargetLowering::TypeSplitVector)
4352 FinalVT = FinalVT.getHalfNumVectorElementsVT(*DAG.getContext());
4353
4354 if (getTypeAction(FinalVT) == TargetLowering::TypeScalarizeVector)
4355 return SplitVecOp_UnaryOp(N);
4356
4357 // Get the split input vector.
4358 SDValue InLoVec, InHiVec;
4359 GetSplitVector(InVec, InLoVec, InHiVec);
4360
4361 // Truncate them to 1/2 the element size.
4362 //
4363 // This assumes the number of elements is a power of two; any vector that
4364 // isn't should be widened, not split.
4365 EVT HalfElementVT = IsFloat ?
4366 EVT::getFloatingPointVT(InElementSize/2) :
4367 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
4368 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT,
4369 NumElements.divideCoefficientBy(2));
4370
4371 SDValue HalfLo;
4372 SDValue HalfHi;
4373 SDValue Chain;
4374 if (N->isStrictFPOpcode()) {
4375 HalfLo = DAG.getNode(N->getOpcode(), DL, {HalfVT, MVT::Other},
4376 {N->getOperand(0), InLoVec});
4377 HalfHi = DAG.getNode(N->getOpcode(), DL, {HalfVT, MVT::Other},
4378 {N->getOperand(0), InHiVec});
4379 // Legalize the chain result - switch anything that used the old chain to
4380 // use the new one.
4381 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, HalfLo.getValue(1),
4382 HalfHi.getValue(1));
4383 } else {
4384 HalfLo = DAG.getNode(N->getOpcode(), DL, HalfVT, InLoVec);
4385 HalfHi = DAG.getNode(N->getOpcode(), DL, HalfVT, InHiVec);
4386 }
4387
4388 // Concatenate them to get the full intermediate truncation result.
4389 EVT InterVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements);
4390 SDValue InterVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InterVT, HalfLo,
4391 HalfHi);
4392 // Now finish up by truncating all the way down to the original result
4393 // type. This should normally be something that ends up being legal directly,
4394 // but in theory if a target has very wide vectors and an annoyingly
4395 // restricted set of legal types, this split can chain to build things up.
4396
4397 if (N->isStrictFPOpcode()) {
4398 SDValue Res = DAG.getNode(
4399 ISD::STRICT_FP_ROUND, DL, {OutVT, MVT::Other},
4400 {Chain, InterVec,
4401 DAG.getTargetConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()))});
4402 // Relink the chain
4403 ReplaceValueWith(SDValue(N, 1), SDValue(Res.getNode(), 1));
4404 return Res;
4405 }
4406
4407 return IsFloat
4408 ? DAG.getNode(ISD::FP_ROUND, DL, OutVT, InterVec,
4409 DAG.getTargetConstant(
4410 0, DL, TLI.getPointerTy(DAG.getDataLayout())))
4411 : DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec);
4412 }
4413
SplitVecOp_VSETCC(SDNode * N)4414 SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) {
4415 unsigned Opc = N->getOpcode();
4416 bool isStrict = Opc == ISD::STRICT_FSETCC || Opc == ISD::STRICT_FSETCCS;
4417 assert(N->getValueType(0).isVector() &&
4418 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
4419 "Operand types must be vectors");
4420 // The result has a legal vector type, but the input needs splitting.
4421 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4422 SDLoc DL(N);
4423 GetSplitVector(N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
4424 GetSplitVector(N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
4425
4426 EVT VT = N->getValueType(0);
4427 EVT PartResVT = Lo0.getValueType().changeElementType(VT.getScalarType());
4428
4429 if (Opc == ISD::SETCC) {
4430 LoRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Lo0, Lo1, N->getOperand(2));
4431 HiRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Hi0, Hi1, N->getOperand(2));
4432 } else if (isStrict) {
4433 LoRes = DAG.getNode(Opc, DL, DAG.getVTList(PartResVT, N->getValueType(1)),
4434 N->getOperand(0), Lo0, Lo1, N->getOperand(3));
4435 HiRes = DAG.getNode(Opc, DL, DAG.getVTList(PartResVT, N->getValueType(1)),
4436 N->getOperand(0), Hi0, Hi1, N->getOperand(3));
4437 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
4438 LoRes.getValue(1), HiRes.getValue(1));
4439 ReplaceValueWith(SDValue(N, 1), NewChain);
4440 } else {
4441 assert(Opc == ISD::VP_SETCC && "Expected VP_SETCC opcode");
4442 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4443 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(3));
4444 std::tie(EVLLo, EVLHi) =
4445 DAG.SplitEVL(N->getOperand(4), N->getValueType(0), DL);
4446 LoRes = DAG.getNode(ISD::VP_SETCC, DL, PartResVT, Lo0, Lo1,
4447 N->getOperand(2), MaskLo, EVLLo);
4448 HiRes = DAG.getNode(ISD::VP_SETCC, DL, PartResVT, Hi0, Hi1,
4449 N->getOperand(2), MaskHi, EVLHi);
4450 }
4451
4452 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoRes, HiRes);
4453 }
4454
4455
SplitVecOp_FP_ROUND(SDNode * N)4456 SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) {
4457 // The result has a legal vector type, but the input needs splitting.
4458 EVT ResVT = N->getValueType(0);
4459 SDValue Lo, Hi;
4460 SDLoc DL(N);
4461 GetSplitVector(N->getOperand(N->isStrictFPOpcode() ? 1 : 0), Lo, Hi);
4462 EVT InVT = Lo.getValueType();
4463
4464 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
4465 InVT.getVectorElementCount());
4466
4467 if (N->isStrictFPOpcode()) {
4468 Lo = DAG.getNode(N->getOpcode(), DL, { OutVT, MVT::Other },
4469 { N->getOperand(0), Lo, N->getOperand(2) });
4470 Hi = DAG.getNode(N->getOpcode(), DL, { OutVT, MVT::Other },
4471 { N->getOperand(0), Hi, N->getOperand(2) });
4472 // Legalize the chain result - switch anything that used the old chain to
4473 // use the new one.
4474 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
4475 Lo.getValue(1), Hi.getValue(1));
4476 ReplaceValueWith(SDValue(N, 1), NewChain);
4477 } else if (N->getOpcode() == ISD::VP_FP_ROUND) {
4478 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4479 std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1));
4480 std::tie(EVLLo, EVLHi) =
4481 DAG.SplitEVL(N->getOperand(2), N->getValueType(0), DL);
4482 Lo = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Lo, MaskLo, EVLLo);
4483 Hi = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Hi, MaskHi, EVLHi);
4484 } else {
4485 Lo = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Lo, N->getOperand(1));
4486 Hi = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Hi, N->getOperand(1));
4487 }
4488
4489 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi);
4490 }
4491
4492 // Split a vector type in an FP binary operation where the second operand has a
4493 // different type from the first.
4494 //
4495 // The result (and the first input) has a legal vector type, but the second
4496 // input needs splitting.
SplitVecOp_FPOpDifferentTypes(SDNode * N)4497 SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(SDNode *N) {
4498 SDLoc DL(N);
4499
4500 EVT LHSLoVT, LHSHiVT;
4501 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
4502
4503 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4504 return DAG.UnrollVectorOp(N, N->getValueType(0).getVectorNumElements());
4505
4506 SDValue LHSLo, LHSHi;
4507 std::tie(LHSLo, LHSHi) =
4508 DAG.SplitVector(N->getOperand(0), DL, LHSLoVT, LHSHiVT);
4509
4510 SDValue RHSLo, RHSHi;
4511 std::tie(RHSLo, RHSHi) = DAG.SplitVector(N->getOperand(1), DL);
4512
4513 SDValue Lo = DAG.getNode(N->getOpcode(), DL, LHSLoVT, LHSLo, RHSLo);
4514 SDValue Hi = DAG.getNode(N->getOpcode(), DL, LHSHiVT, LHSHi, RHSHi);
4515
4516 return DAG.getNode(ISD::CONCAT_VECTORS, DL, N->getValueType(0), Lo, Hi);
4517 }
4518
SplitVecOp_CMP(SDNode * N)4519 SDValue DAGTypeLegalizer::SplitVecOp_CMP(SDNode *N) {
4520 LLVMContext &Ctxt = *DAG.getContext();
4521 SDLoc dl(N);
4522
4523 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
4524 GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
4525 GetSplitVector(N->getOperand(1), RHSLo, RHSHi);
4526
4527 EVT ResVT = N->getValueType(0);
4528 ElementCount SplitOpEC = LHSLo.getValueType().getVectorElementCount();
4529 EVT NewResVT =
4530 EVT::getVectorVT(Ctxt, ResVT.getVectorElementType(), SplitOpEC);
4531
4532 SDValue Lo = DAG.getNode(N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
4533 SDValue Hi = DAG.getNode(N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
4534
4535 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
4536 }
4537
SplitVecOp_FP_TO_XINT_SAT(SDNode * N)4538 SDValue DAGTypeLegalizer::SplitVecOp_FP_TO_XINT_SAT(SDNode *N) {
4539 EVT ResVT = N->getValueType(0);
4540 SDValue Lo, Hi;
4541 SDLoc dl(N);
4542 GetSplitVector(N->getOperand(0), Lo, Hi);
4543 EVT InVT = Lo.getValueType();
4544
4545 EVT NewResVT =
4546 EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
4547 InVT.getVectorElementCount());
4548
4549 Lo = DAG.getNode(N->getOpcode(), dl, NewResVT, Lo, N->getOperand(1));
4550 Hi = DAG.getNode(N->getOpcode(), dl, NewResVT, Hi, N->getOperand(1));
4551
4552 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
4553 }
4554
SplitVecOp_VP_CttzElements(SDNode * N)4555 SDValue DAGTypeLegalizer::SplitVecOp_VP_CttzElements(SDNode *N) {
4556 SDLoc DL(N);
4557 EVT ResVT = N->getValueType(0);
4558
4559 SDValue Lo, Hi;
4560 SDValue VecOp = N->getOperand(0);
4561 GetSplitVector(VecOp, Lo, Hi);
4562
4563 auto [MaskLo, MaskHi] = SplitMask(N->getOperand(1));
4564 auto [EVLLo, EVLHi] =
4565 DAG.SplitEVL(N->getOperand(2), VecOp.getValueType(), DL);
4566 SDValue VLo = DAG.getZExtOrTrunc(EVLLo, DL, ResVT);
4567
4568 // if VP_CTTZ_ELTS(Lo) != EVLLo => VP_CTTZ_ELTS(Lo).
4569 // else => EVLLo + (VP_CTTZ_ELTS(Hi) or VP_CTTZ_ELTS_ZERO_UNDEF(Hi)).
4570 SDValue ResLo = DAG.getNode(ISD::VP_CTTZ_ELTS, DL, ResVT, Lo, MaskLo, EVLLo);
4571 SDValue ResLoNotEVL =
4572 DAG.getSetCC(DL, getSetCCResultType(ResVT), ResLo, VLo, ISD::SETNE);
4573 SDValue ResHi = DAG.getNode(N->getOpcode(), DL, ResVT, Hi, MaskHi, EVLHi);
4574 return DAG.getSelect(DL, ResVT, ResLoNotEVL, ResLo,
4575 DAG.getNode(ISD::ADD, DL, ResVT, VLo, ResHi));
4576 }
4577
SplitVecOp_VECTOR_HISTOGRAM(SDNode * N)4578 SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_HISTOGRAM(SDNode *N) {
4579 MaskedHistogramSDNode *HG = cast<MaskedHistogramSDNode>(N);
4580 SDLoc DL(HG);
4581 SDValue Inc = HG->getInc();
4582 SDValue Ptr = HG->getBasePtr();
4583 SDValue Scale = HG->getScale();
4584 SDValue IntID = HG->getIntID();
4585 EVT MemVT = HG->getMemoryVT();
4586 MachineMemOperand *MMO = HG->getMemOperand();
4587 ISD::MemIndexType IndexType = HG->getIndexType();
4588
4589 SDValue IndexLo, IndexHi, MaskLo, MaskHi;
4590 std::tie(IndexLo, IndexHi) = DAG.SplitVector(HG->getIndex(), DL);
4591 std::tie(MaskLo, MaskHi) = DAG.SplitVector(HG->getMask(), DL);
4592 SDValue OpsLo[] = {HG->getChain(), Inc, MaskLo, Ptr, IndexLo, Scale, IntID};
4593 SDValue Lo = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT, DL,
4594 OpsLo, MMO, IndexType);
4595 SDValue OpsHi[] = {Lo, Inc, MaskHi, Ptr, IndexHi, Scale, IntID};
4596 return DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), MemVT, DL, OpsHi,
4597 MMO, IndexType);
4598 }
4599
SplitVecOp_PARTIAL_REDUCE_MLA(SDNode * N)4600 SDValue DAGTypeLegalizer::SplitVecOp_PARTIAL_REDUCE_MLA(SDNode *N) {
4601 SDValue Acc = N->getOperand(0);
4602 assert(getTypeAction(Acc.getValueType()) != TargetLowering::TypeSplitVector &&
4603 "Accumulator should already be a legal type, and shouldn't need "
4604 "further splitting");
4605
4606 SDLoc DL(N);
4607 SDValue Input1Lo, Input1Hi, Input2Lo, Input2Hi;
4608 std::tie(Input1Lo, Input1Hi) = DAG.SplitVector(N->getOperand(1), DL);
4609 std::tie(Input2Lo, Input2Hi) = DAG.SplitVector(N->getOperand(2), DL);
4610 unsigned Opcode = N->getOpcode();
4611 EVT ResultVT = Acc.getValueType();
4612
4613 SDValue Lo = DAG.getNode(Opcode, DL, ResultVT, Acc, Input1Lo, Input2Lo);
4614 return DAG.getNode(Opcode, DL, ResultVT, Lo, Input1Hi, Input2Hi);
4615 }
4616
4617 //===----------------------------------------------------------------------===//
4618 // Result Vector Widening
4619 //===----------------------------------------------------------------------===//
4620
ReplaceOtherWidenResults(SDNode * N,SDNode * WidenNode,unsigned WidenResNo)4621 void DAGTypeLegalizer::ReplaceOtherWidenResults(SDNode *N, SDNode *WidenNode,
4622 unsigned WidenResNo) {
4623 unsigned NumResults = N->getNumValues();
4624 for (unsigned ResNo = 0; ResNo < NumResults; ResNo++) {
4625 if (ResNo == WidenResNo)
4626 continue;
4627 EVT ResVT = N->getValueType(ResNo);
4628 if (getTypeAction(ResVT) == TargetLowering::TypeWidenVector) {
4629 SetWidenedVector(SDValue(N, ResNo), SDValue(WidenNode, ResNo));
4630 } else {
4631 SDLoc DL(N);
4632 SDValue ResVal =
4633 DAG.getExtractSubvector(DL, ResVT, SDValue(WidenNode, ResNo), 0);
4634 ReplaceValueWith(SDValue(N, ResNo), ResVal);
4635 }
4636 }
4637 }
4638
WidenVectorResult(SDNode * N,unsigned ResNo)4639 void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
4640 LLVM_DEBUG(dbgs() << "Widen node result " << ResNo << ": "; N->dump(&DAG));
4641
4642 // See if the target wants to custom widen this node.
4643 if (CustomWidenLowerNode(N, N->getValueType(ResNo)))
4644 return;
4645
4646 SDValue Res = SDValue();
4647
4648 auto unrollExpandedOp = [&]() {
4649 // We're going to widen this vector op to a legal type by padding with undef
4650 // elements. If the wide vector op is eventually going to be expanded to
4651 // scalar libcalls, then unroll into scalar ops now to avoid unnecessary
4652 // libcalls on the undef elements.
4653 EVT VT = N->getValueType(0);
4654 EVT WideVecVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
4655 if (!TLI.isOperationLegalOrCustomOrPromote(N->getOpcode(), WideVecVT) &&
4656 TLI.isOperationExpand(N->getOpcode(), VT.getScalarType())) {
4657 Res = DAG.UnrollVectorOp(N, WideVecVT.getVectorNumElements());
4658 if (N->getNumValues() > 1)
4659 ReplaceOtherWidenResults(N, Res.getNode(), ResNo);
4660 return true;
4661 }
4662 return false;
4663 };
4664
4665 switch (N->getOpcode()) {
4666 default:
4667 #ifndef NDEBUG
4668 dbgs() << "WidenVectorResult #" << ResNo << ": ";
4669 N->dump(&DAG);
4670 dbgs() << "\n";
4671 #endif
4672 report_fatal_error("Do not know how to widen the result of this operator!");
4673
4674 case ISD::MERGE_VALUES: Res = WidenVecRes_MERGE_VALUES(N, ResNo); break;
4675 case ISD::ADDRSPACECAST:
4676 Res = WidenVecRes_ADDRSPACECAST(N);
4677 break;
4678 case ISD::AssertZext: Res = WidenVecRes_AssertZext(N); break;
4679 case ISD::BITCAST: Res = WidenVecRes_BITCAST(N); break;
4680 case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break;
4681 case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break;
4682 case ISD::INSERT_SUBVECTOR:
4683 Res = WidenVecRes_INSERT_SUBVECTOR(N);
4684 break;
4685 case ISD::EXTRACT_SUBVECTOR: Res = WidenVecRes_EXTRACT_SUBVECTOR(N); break;
4686 case ISD::INSERT_VECTOR_ELT: Res = WidenVecRes_INSERT_VECTOR_ELT(N); break;
4687 case ISD::LOAD: Res = WidenVecRes_LOAD(N); break;
4688 case ISD::STEP_VECTOR:
4689 case ISD::SPLAT_VECTOR:
4690 case ISD::SCALAR_TO_VECTOR:
4691 case ISD::EXPERIMENTAL_VP_SPLAT:
4692 Res = WidenVecRes_ScalarOp(N);
4693 break;
4694 case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_InregOp(N); break;
4695 case ISD::VSELECT:
4696 case ISD::SELECT:
4697 case ISD::VP_SELECT:
4698 case ISD::VP_MERGE:
4699 Res = WidenVecRes_Select(N);
4700 break;
4701 case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break;
4702 case ISD::VP_SETCC:
4703 case ISD::SETCC: Res = WidenVecRes_SETCC(N); break;
4704 case ISD::POISON:
4705 case ISD::UNDEF: Res = WidenVecRes_UNDEF(N); break;
4706 case ISD::VECTOR_SHUFFLE:
4707 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N));
4708 break;
4709 case ISD::VP_LOAD:
4710 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(N));
4711 break;
4712 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4713 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(N));
4714 break;
4715 case ISD::VECTOR_COMPRESS:
4716 Res = WidenVecRes_VECTOR_COMPRESS(N);
4717 break;
4718 case ISD::MLOAD:
4719 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(N));
4720 break;
4721 case ISD::MGATHER:
4722 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(N));
4723 break;
4724 case ISD::VP_GATHER:
4725 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(N));
4726 break;
4727 case ISD::VECTOR_REVERSE:
4728 Res = WidenVecRes_VECTOR_REVERSE(N);
4729 break;
4730 case ISD::GET_ACTIVE_LANE_MASK:
4731 Res = WidenVecRes_GET_ACTIVE_LANE_MASK(N);
4732 break;
4733
4734 case ISD::ADD: case ISD::VP_ADD:
4735 case ISD::AND: case ISD::VP_AND:
4736 case ISD::MUL: case ISD::VP_MUL:
4737 case ISD::MULHS:
4738 case ISD::MULHU:
4739 case ISD::ABDS:
4740 case ISD::ABDU:
4741 case ISD::OR: case ISD::VP_OR:
4742 case ISD::SUB: case ISD::VP_SUB:
4743 case ISD::XOR: case ISD::VP_XOR:
4744 case ISD::SHL: case ISD::VP_SHL:
4745 case ISD::SRA: case ISD::VP_SRA:
4746 case ISD::SRL: case ISD::VP_SRL:
4747 case ISD::FMINNUM:
4748 case ISD::FMINNUM_IEEE:
4749 case ISD::VP_FMINNUM:
4750 case ISD::FMAXNUM:
4751 case ISD::FMAXNUM_IEEE:
4752 case ISD::VP_FMAXNUM:
4753 case ISD::FMINIMUM:
4754 case ISD::VP_FMINIMUM:
4755 case ISD::FMAXIMUM:
4756 case ISD::VP_FMAXIMUM:
4757 case ISD::FMINIMUMNUM:
4758 case ISD::FMAXIMUMNUM:
4759 case ISD::SMIN: case ISD::VP_SMIN:
4760 case ISD::SMAX: case ISD::VP_SMAX:
4761 case ISD::UMIN: case ISD::VP_UMIN:
4762 case ISD::UMAX: case ISD::VP_UMAX:
4763 case ISD::UADDSAT: case ISD::VP_UADDSAT:
4764 case ISD::SADDSAT: case ISD::VP_SADDSAT:
4765 case ISD::USUBSAT: case ISD::VP_USUBSAT:
4766 case ISD::SSUBSAT: case ISD::VP_SSUBSAT:
4767 case ISD::SSHLSAT:
4768 case ISD::USHLSAT:
4769 case ISD::ROTL:
4770 case ISD::ROTR:
4771 case ISD::AVGFLOORS:
4772 case ISD::AVGFLOORU:
4773 case ISD::AVGCEILS:
4774 case ISD::AVGCEILU:
4775 // Vector-predicated binary op widening. Note that -- unlike the
4776 // unpredicated versions -- we don't have to worry about trapping on
4777 // operations like UDIV, FADD, etc., as we pass on the original vector
4778 // length parameter. This means the widened elements containing garbage
4779 // aren't active.
4780 case ISD::VP_SDIV:
4781 case ISD::VP_UDIV:
4782 case ISD::VP_SREM:
4783 case ISD::VP_UREM:
4784 case ISD::VP_FADD:
4785 case ISD::VP_FSUB:
4786 case ISD::VP_FMUL:
4787 case ISD::VP_FDIV:
4788 case ISD::VP_FREM:
4789 case ISD::VP_FCOPYSIGN:
4790 Res = WidenVecRes_Binary(N);
4791 break;
4792
4793 case ISD::SCMP:
4794 case ISD::UCMP:
4795 Res = WidenVecRes_CMP(N);
4796 break;
4797
4798 case ISD::FPOW:
4799 case ISD::FATAN2:
4800 case ISD::FREM:
4801 if (unrollExpandedOp())
4802 break;
4803 // If the target has custom/legal support for the scalar FP intrinsic ops
4804 // (they are probably not destined to become libcalls), then widen those
4805 // like any other binary ops.
4806 [[fallthrough]];
4807
4808 case ISD::FADD:
4809 case ISD::FMUL:
4810 case ISD::FSUB:
4811 case ISD::FDIV:
4812 case ISD::SDIV:
4813 case ISD::UDIV:
4814 case ISD::SREM:
4815 case ISD::UREM:
4816 Res = WidenVecRes_BinaryCanTrap(N);
4817 break;
4818
4819 case ISD::SMULFIX:
4820 case ISD::SMULFIXSAT:
4821 case ISD::UMULFIX:
4822 case ISD::UMULFIXSAT:
4823 // These are binary operations, but with an extra operand that shouldn't
4824 // be widened (the scale).
4825 Res = WidenVecRes_BinaryWithExtraScalarOp(N);
4826 break;
4827
4828 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4829 case ISD::STRICT_##DAGN:
4830 #include "llvm/IR/ConstrainedOps.def"
4831 Res = WidenVecRes_StrictFP(N);
4832 break;
4833
4834 case ISD::UADDO:
4835 case ISD::SADDO:
4836 case ISD::USUBO:
4837 case ISD::SSUBO:
4838 case ISD::UMULO:
4839 case ISD::SMULO:
4840 Res = WidenVecRes_OverflowOp(N, ResNo);
4841 break;
4842
4843 case ISD::FCOPYSIGN:
4844 Res = WidenVecRes_FCOPYSIGN(N);
4845 break;
4846
4847 case ISD::IS_FPCLASS:
4848 case ISD::FPTRUNC_ROUND:
4849 Res = WidenVecRes_UnarySameEltsWithScalarArg(N);
4850 break;
4851
4852 case ISD::FLDEXP:
4853 case ISD::FPOWI:
4854 if (!unrollExpandedOp())
4855 Res = WidenVecRes_ExpOp(N);
4856 break;
4857
4858 case ISD::ANY_EXTEND_VECTOR_INREG:
4859 case ISD::SIGN_EXTEND_VECTOR_INREG:
4860 case ISD::ZERO_EXTEND_VECTOR_INREG:
4861 Res = WidenVecRes_EXTEND_VECTOR_INREG(N);
4862 break;
4863
4864 case ISD::ANY_EXTEND:
4865 case ISD::FP_EXTEND:
4866 case ISD::VP_FP_EXTEND:
4867 case ISD::FP_ROUND:
4868 case ISD::VP_FP_ROUND:
4869 case ISD::FP_TO_SINT:
4870 case ISD::VP_FP_TO_SINT:
4871 case ISD::FP_TO_UINT:
4872 case ISD::VP_FP_TO_UINT:
4873 case ISD::SIGN_EXTEND:
4874 case ISD::VP_SIGN_EXTEND:
4875 case ISD::SINT_TO_FP:
4876 case ISD::VP_SINT_TO_FP:
4877 case ISD::VP_TRUNCATE:
4878 case ISD::TRUNCATE:
4879 case ISD::UINT_TO_FP:
4880 case ISD::VP_UINT_TO_FP:
4881 case ISD::ZERO_EXTEND:
4882 case ISD::VP_ZERO_EXTEND:
4883 Res = WidenVecRes_Convert(N);
4884 break;
4885
4886 case ISD::FP_TO_SINT_SAT:
4887 case ISD::FP_TO_UINT_SAT:
4888 Res = WidenVecRes_FP_TO_XINT_SAT(N);
4889 break;
4890
4891 case ISD::LRINT:
4892 case ISD::LLRINT:
4893 case ISD::VP_LRINT:
4894 case ISD::VP_LLRINT:
4895 case ISD::LROUND:
4896 case ISD::LLROUND:
4897 Res = WidenVecRes_XROUND(N);
4898 break;
4899
4900 case ISD::FACOS:
4901 case ISD::FASIN:
4902 case ISD::FATAN:
4903 case ISD::FCEIL:
4904 case ISD::FCOS:
4905 case ISD::FCOSH:
4906 case ISD::FEXP:
4907 case ISD::FEXP2:
4908 case ISD::FEXP10:
4909 case ISD::FFLOOR:
4910 case ISD::FLOG:
4911 case ISD::FLOG10:
4912 case ISD::FLOG2:
4913 case ISD::FNEARBYINT:
4914 case ISD::FRINT:
4915 case ISD::FROUND:
4916 case ISD::FROUNDEVEN:
4917 case ISD::FSIN:
4918 case ISD::FSINH:
4919 case ISD::FSQRT:
4920 case ISD::FTAN:
4921 case ISD::FTANH:
4922 case ISD::FTRUNC:
4923 if (unrollExpandedOp())
4924 break;
4925 // If the target has custom/legal support for the scalar FP intrinsic ops
4926 // (they are probably not destined to become libcalls), then widen those
4927 // like any other unary ops.
4928 [[fallthrough]];
4929
4930 case ISD::ABS:
4931 case ISD::VP_ABS:
4932 case ISD::BITREVERSE:
4933 case ISD::VP_BITREVERSE:
4934 case ISD::BSWAP:
4935 case ISD::VP_BSWAP:
4936 case ISD::CTLZ:
4937 case ISD::VP_CTLZ:
4938 case ISD::CTLZ_ZERO_UNDEF:
4939 case ISD::VP_CTLZ_ZERO_UNDEF:
4940 case ISD::CTPOP:
4941 case ISD::VP_CTPOP:
4942 case ISD::CTTZ:
4943 case ISD::VP_CTTZ:
4944 case ISD::CTTZ_ZERO_UNDEF:
4945 case ISD::VP_CTTZ_ZERO_UNDEF:
4946 case ISD::FNEG: case ISD::VP_FNEG:
4947 case ISD::FABS: case ISD::VP_FABS:
4948 case ISD::VP_SQRT:
4949 case ISD::VP_FCEIL:
4950 case ISD::VP_FFLOOR:
4951 case ISD::VP_FRINT:
4952 case ISD::VP_FNEARBYINT:
4953 case ISD::VP_FROUND:
4954 case ISD::VP_FROUNDEVEN:
4955 case ISD::VP_FROUNDTOZERO:
4956 case ISD::FREEZE:
4957 case ISD::ARITH_FENCE:
4958 case ISD::FCANONICALIZE:
4959 case ISD::AssertNoFPClass:
4960 Res = WidenVecRes_Unary(N);
4961 break;
4962 case ISD::FMA: case ISD::VP_FMA:
4963 case ISD::FSHL:
4964 case ISD::VP_FSHL:
4965 case ISD::FSHR:
4966 case ISD::VP_FSHR:
4967 Res = WidenVecRes_Ternary(N);
4968 break;
4969 case ISD::FMODF:
4970 case ISD::FFREXP:
4971 case ISD::FSINCOS:
4972 case ISD::FSINCOSPI: {
4973 if (!unrollExpandedOp())
4974 Res = WidenVecRes_UnaryOpWithTwoResults(N, ResNo);
4975 break;
4976 }
4977 }
4978
4979 // If Res is null, the sub-method took care of registering the result.
4980 if (Res.getNode())
4981 SetWidenedVector(SDValue(N, ResNo), Res);
4982 }
4983
WidenVecRes_Ternary(SDNode * N)4984 SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) {
4985 // Ternary op widening.
4986 SDLoc dl(N);
4987 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
4988 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
4989 SDValue InOp2 = GetWidenedVector(N->getOperand(1));
4990 SDValue InOp3 = GetWidenedVector(N->getOperand(2));
4991 if (N->getNumOperands() == 3)
4992 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4993
4994 assert(N->getNumOperands() == 5 && "Unexpected number of operands!");
4995 assert(N->isVPOpcode() && "Expected VP opcode");
4996
4997 SDValue Mask =
4998 GetWidenedMask(N->getOperand(3), WidenVT.getVectorElementCount());
4999 return DAG.getNode(N->getOpcode(), dl, WidenVT,
5000 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
5001 }
5002
WidenVecRes_Binary(SDNode * N)5003 SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
5004 // Binary op widening.
5005 SDLoc dl(N);
5006 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5007 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
5008 SDValue InOp2 = GetWidenedVector(N->getOperand(1));
5009 if (N->getNumOperands() == 2)
5010 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2,
5011 N->getFlags());
5012
5013 assert(N->getNumOperands() == 4 && "Unexpected number of operands!");
5014 assert(N->isVPOpcode() && "Expected VP opcode");
5015
5016 SDValue Mask =
5017 GetWidenedMask(N->getOperand(2), WidenVT.getVectorElementCount());
5018 return DAG.getNode(N->getOpcode(), dl, WidenVT,
5019 {InOp1, InOp2, Mask, N->getOperand(3)}, N->getFlags());
5020 }
5021
WidenVecRes_CMP(SDNode * N)5022 SDValue DAGTypeLegalizer::WidenVecRes_CMP(SDNode *N) {
5023 LLVMContext &Ctxt = *DAG.getContext();
5024 SDLoc dl(N);
5025
5026 SDValue LHS = N->getOperand(0);
5027 SDValue RHS = N->getOperand(1);
5028 EVT OpVT = LHS.getValueType();
5029 if (getTypeAction(OpVT) == TargetLowering::TypeWidenVector) {
5030 LHS = GetWidenedVector(LHS);
5031 RHS = GetWidenedVector(RHS);
5032 OpVT = LHS.getValueType();
5033 }
5034
5035 EVT WidenResVT = TLI.getTypeToTransformTo(Ctxt, N->getValueType(0));
5036 ElementCount WidenResEC = WidenResVT.getVectorElementCount();
5037 if (WidenResEC == OpVT.getVectorElementCount()) {
5038 return DAG.getNode(N->getOpcode(), dl, WidenResVT, LHS, RHS);
5039 }
5040
5041 return DAG.UnrollVectorOp(N, WidenResVT.getVectorNumElements());
5042 }
5043
WidenVecRes_BinaryWithExtraScalarOp(SDNode * N)5044 SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(SDNode *N) {
5045 // Binary op widening, but with an extra operand that shouldn't be widened.
5046 SDLoc dl(N);
5047 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5048 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
5049 SDValue InOp2 = GetWidenedVector(N->getOperand(1));
5050 SDValue InOp3 = N->getOperand(2);
5051 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
5052 N->getFlags());
5053 }
5054
5055 // Given a vector of operations that have been broken up to widen, see
5056 // if we can collect them together into the next widest legal VT. This
5057 // implementation is trap-safe.
CollectOpsToWiden(SelectionDAG & DAG,const TargetLowering & TLI,SmallVectorImpl<SDValue> & ConcatOps,unsigned ConcatEnd,EVT VT,EVT MaxVT,EVT WidenVT)5058 static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI,
5059 SmallVectorImpl<SDValue> &ConcatOps,
5060 unsigned ConcatEnd, EVT VT, EVT MaxVT,
5061 EVT WidenVT) {
5062 // Check to see if we have a single operation with the widen type.
5063 if (ConcatEnd == 1) {
5064 VT = ConcatOps[0].getValueType();
5065 if (VT == WidenVT)
5066 return ConcatOps[0];
5067 }
5068
5069 SDLoc dl(ConcatOps[0]);
5070 EVT WidenEltVT = WidenVT.getVectorElementType();
5071
5072 // while (Some element of ConcatOps is not of type MaxVT) {
5073 // From the end of ConcatOps, collect elements of the same type and put
5074 // them into an op of the next larger supported type
5075 // }
5076 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
5077 int Idx = ConcatEnd - 1;
5078 VT = ConcatOps[Idx--].getValueType();
5079 while (Idx >= 0 && ConcatOps[Idx].getValueType() == VT)
5080 Idx--;
5081
5082 int NextSize = VT.isVector() ? VT.getVectorNumElements() : 1;
5083 EVT NextVT;
5084 do {
5085 NextSize *= 2;
5086 NextVT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NextSize);
5087 } while (!TLI.isTypeLegal(NextVT));
5088
5089 if (!VT.isVector()) {
5090 // Scalar type, create an INSERT_VECTOR_ELEMENT of type NextVT
5091 SDValue VecOp = DAG.getUNDEF(NextVT);
5092 unsigned NumToInsert = ConcatEnd - Idx - 1;
5093 for (unsigned i = 0, OpIdx = Idx + 1; i < NumToInsert; i++, OpIdx++)
5094 VecOp = DAG.getInsertVectorElt(dl, VecOp, ConcatOps[OpIdx], i);
5095 ConcatOps[Idx+1] = VecOp;
5096 ConcatEnd = Idx + 2;
5097 } else {
5098 // Vector type, create a CONCAT_VECTORS of type NextVT
5099 SDValue undefVec = DAG.getUNDEF(VT);
5100 unsigned OpsToConcat = NextSize/VT.getVectorNumElements();
5101 SmallVector<SDValue, 16> SubConcatOps(OpsToConcat);
5102 unsigned RealVals = ConcatEnd - Idx - 1;
5103 unsigned SubConcatEnd = 0;
5104 unsigned SubConcatIdx = Idx + 1;
5105 while (SubConcatEnd < RealVals)
5106 SubConcatOps[SubConcatEnd++] = ConcatOps[++Idx];
5107 while (SubConcatEnd < OpsToConcat)
5108 SubConcatOps[SubConcatEnd++] = undefVec;
5109 ConcatOps[SubConcatIdx] = DAG.getNode(ISD::CONCAT_VECTORS, dl,
5110 NextVT, SubConcatOps);
5111 ConcatEnd = SubConcatIdx + 1;
5112 }
5113 }
5114
5115 // Check to see if we have a single operation with the widen type.
5116 if (ConcatEnd == 1) {
5117 VT = ConcatOps[0].getValueType();
5118 if (VT == WidenVT)
5119 return ConcatOps[0];
5120 }
5121
5122 // add undefs of size MaxVT until ConcatOps grows to length of WidenVT
5123 unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
5124 if (NumOps != ConcatEnd ) {
5125 SDValue UndefVal = DAG.getUNDEF(MaxVT);
5126 for (unsigned j = ConcatEnd; j < NumOps; ++j)
5127 ConcatOps[j] = UndefVal;
5128 }
5129 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
5130 ArrayRef(ConcatOps.data(), NumOps));
5131 }
5132
WidenVecRes_BinaryCanTrap(SDNode * N)5133 SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) {
5134 // Binary op widening for operations that can trap.
5135 unsigned Opcode = N->getOpcode();
5136 SDLoc dl(N);
5137 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5138 EVT WidenEltVT = WidenVT.getVectorElementType();
5139 EVT VT = WidenVT;
5140 unsigned NumElts = VT.getVectorMinNumElements();
5141 const SDNodeFlags Flags = N->getFlags();
5142 while (!TLI.isTypeLegal(VT) && NumElts != 1) {
5143 NumElts = NumElts / 2;
5144 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
5145 }
5146
5147 if (NumElts != 1 && !TLI.canOpTrap(N->getOpcode(), VT)) {
5148 // Operation doesn't trap so just widen as normal.
5149 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
5150 SDValue InOp2 = GetWidenedVector(N->getOperand(1));
5151 return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
5152 }
5153
5154 // Generate a vp.op if it is custom/legal for the target. This avoids need
5155 // to split and tile the subvectors (below), because the inactive lanes can
5156 // simply be disabled. To avoid possible recursion, only do this if the
5157 // widened mask type is legal.
5158 if (auto VPOpcode = ISD::getVPForBaseOpcode(Opcode);
5159 VPOpcode && TLI.isOperationLegalOrCustom(*VPOpcode, WidenVT)) {
5160 if (EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
5161 WidenVT.getVectorElementCount());
5162 TLI.isTypeLegal(WideMaskVT)) {
5163 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
5164 SDValue InOp2 = GetWidenedVector(N->getOperand(1));
5165 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
5166 SDValue EVL =
5167 DAG.getElementCount(dl, TLI.getVPExplicitVectorLengthTy(),
5168 N->getValueType(0).getVectorElementCount());
5169 return DAG.getNode(*VPOpcode, dl, WidenVT, InOp1, InOp2, Mask, EVL,
5170 Flags);
5171 }
5172 }
5173
5174 // FIXME: Improve support for scalable vectors.
5175 assert(!VT.isScalableVector() && "Scalable vectors not handled yet.");
5176
5177 // No legal vector version so unroll the vector operation and then widen.
5178 if (NumElts == 1)
5179 return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
5180
5181 // Since the operation can trap, apply operation on the original vector.
5182 EVT MaxVT = VT;
5183 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
5184 SDValue InOp2 = GetWidenedVector(N->getOperand(1));
5185 unsigned CurNumElts = N->getValueType(0).getVectorNumElements();
5186
5187 SmallVector<SDValue, 16> ConcatOps(CurNumElts);
5188 unsigned ConcatEnd = 0; // Current ConcatOps index.
5189 int Idx = 0; // Current Idx into input vectors.
5190
5191 // NumElts := greatest legal vector size (at most WidenVT)
5192 // while (orig. vector has unhandled elements) {
5193 // take munches of size NumElts from the beginning and add to ConcatOps
5194 // NumElts := next smaller supported vector size or 1
5195 // }
5196 while (CurNumElts != 0) {
5197 while (CurNumElts >= NumElts) {
5198 SDValue EOp1 = DAG.getExtractSubvector(dl, VT, InOp1, Idx);
5199 SDValue EOp2 = DAG.getExtractSubvector(dl, VT, InOp2, Idx);
5200 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
5201 Idx += NumElts;
5202 CurNumElts -= NumElts;
5203 }
5204 do {
5205 NumElts = NumElts / 2;
5206 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
5207 } while (!TLI.isTypeLegal(VT) && NumElts != 1);
5208
5209 if (NumElts == 1) {
5210 for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
5211 SDValue EOp1 = DAG.getExtractVectorElt(dl, WidenEltVT, InOp1, Idx);
5212 SDValue EOp2 = DAG.getExtractVectorElt(dl, WidenEltVT, InOp2, Idx);
5213 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
5214 EOp1, EOp2, Flags);
5215 }
5216 CurNumElts = 0;
5217 }
5218 }
5219
5220 return CollectOpsToWiden(DAG, TLI, ConcatOps, ConcatEnd, VT, MaxVT, WidenVT);
5221 }
5222
WidenVecRes_StrictFP(SDNode * N)5223 SDValue DAGTypeLegalizer::WidenVecRes_StrictFP(SDNode *N) {
5224 switch (N->getOpcode()) {
5225 case ISD::STRICT_FSETCC:
5226 case ISD::STRICT_FSETCCS:
5227 return WidenVecRes_STRICT_FSETCC(N);
5228 case ISD::STRICT_FP_EXTEND:
5229 case ISD::STRICT_FP_ROUND:
5230 case ISD::STRICT_FP_TO_SINT:
5231 case ISD::STRICT_FP_TO_UINT:
5232 case ISD::STRICT_SINT_TO_FP:
5233 case ISD::STRICT_UINT_TO_FP:
5234 return WidenVecRes_Convert_StrictFP(N);
5235 default:
5236 break;
5237 }
5238
5239 // StrictFP op widening for operations that can trap.
5240 unsigned NumOpers = N->getNumOperands();
5241 unsigned Opcode = N->getOpcode();
5242 SDLoc dl(N);
5243 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5244 EVT WidenEltVT = WidenVT.getVectorElementType();
5245 EVT VT = WidenVT;
5246 unsigned NumElts = VT.getVectorNumElements();
5247 while (!TLI.isTypeLegal(VT) && NumElts != 1) {
5248 NumElts = NumElts / 2;
5249 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
5250 }
5251
5252 // No legal vector version so unroll the vector operation and then widen.
5253 if (NumElts == 1)
5254 return UnrollVectorOp_StrictFP(N, WidenVT.getVectorNumElements());
5255
5256 // Since the operation can trap, apply operation on the original vector.
5257 EVT MaxVT = VT;
5258 SmallVector<SDValue, 4> InOps;
5259 unsigned CurNumElts = N->getValueType(0).getVectorNumElements();
5260
5261 SmallVector<SDValue, 16> ConcatOps(CurNumElts);
5262 SmallVector<SDValue, 16> Chains;
5263 unsigned ConcatEnd = 0; // Current ConcatOps index.
5264 int Idx = 0; // Current Idx into input vectors.
5265
5266 // The Chain is the first operand.
5267 InOps.push_back(N->getOperand(0));
5268
5269 // Now process the remaining operands.
5270 for (unsigned i = 1; i < NumOpers; ++i) {
5271 SDValue Oper = N->getOperand(i);
5272
5273 EVT OpVT = Oper.getValueType();
5274 if (OpVT.isVector()) {
5275 if (getTypeAction(OpVT) == TargetLowering::TypeWidenVector)
5276 Oper = GetWidenedVector(Oper);
5277 else {
5278 EVT WideOpVT =
5279 EVT::getVectorVT(*DAG.getContext(), OpVT.getVectorElementType(),
5280 WidenVT.getVectorElementCount());
5281 Oper = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5282 DAG.getUNDEF(WideOpVT), Oper,
5283 DAG.getVectorIdxConstant(0, dl));
5284 }
5285 }
5286
5287 InOps.push_back(Oper);
5288 }
5289
5290 // NumElts := greatest legal vector size (at most WidenVT)
5291 // while (orig. vector has unhandled elements) {
5292 // take munches of size NumElts from the beginning and add to ConcatOps
5293 // NumElts := next smaller supported vector size or 1
5294 // }
5295 while (CurNumElts != 0) {
5296 while (CurNumElts >= NumElts) {
5297 SmallVector<SDValue, 4> EOps;
5298
5299 for (unsigned i = 0; i < NumOpers; ++i) {
5300 SDValue Op = InOps[i];
5301
5302 EVT OpVT = Op.getValueType();
5303 if (OpVT.isVector()) {
5304 EVT OpExtractVT =
5305 EVT::getVectorVT(*DAG.getContext(), OpVT.getVectorElementType(),
5306 VT.getVectorElementCount());
5307 Op = DAG.getExtractSubvector(dl, OpExtractVT, Op, Idx);
5308 }
5309
5310 EOps.push_back(Op);
5311 }
5312
5313 EVT OperVT[] = {VT, MVT::Other};
5314 SDValue Oper = DAG.getNode(Opcode, dl, OperVT, EOps);
5315 ConcatOps[ConcatEnd++] = Oper;
5316 Chains.push_back(Oper.getValue(1));
5317 Idx += NumElts;
5318 CurNumElts -= NumElts;
5319 }
5320 do {
5321 NumElts = NumElts / 2;
5322 VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
5323 } while (!TLI.isTypeLegal(VT) && NumElts != 1);
5324
5325 if (NumElts == 1) {
5326 for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
5327 SmallVector<SDValue, 4> EOps;
5328
5329 for (unsigned i = 0; i < NumOpers; ++i) {
5330 SDValue Op = InOps[i];
5331
5332 EVT OpVT = Op.getValueType();
5333 if (OpVT.isVector())
5334 Op = DAG.getExtractVectorElt(dl, OpVT.getVectorElementType(), Op,
5335 Idx);
5336
5337 EOps.push_back(Op);
5338 }
5339
5340 EVT WidenVT[] = {WidenEltVT, MVT::Other};
5341 SDValue Oper = DAG.getNode(Opcode, dl, WidenVT, EOps);
5342 ConcatOps[ConcatEnd++] = Oper;
5343 Chains.push_back(Oper.getValue(1));
5344 }
5345 CurNumElts = 0;
5346 }
5347 }
5348
5349 // Build a factor node to remember all the Ops that have been created.
5350 SDValue NewChain;
5351 if (Chains.size() == 1)
5352 NewChain = Chains[0];
5353 else
5354 NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
5355 ReplaceValueWith(SDValue(N, 1), NewChain);
5356
5357 return CollectOpsToWiden(DAG, TLI, ConcatOps, ConcatEnd, VT, MaxVT, WidenVT);
5358 }
5359
WidenVecRes_OverflowOp(SDNode * N,unsigned ResNo)5360 SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(SDNode *N, unsigned ResNo) {
5361 SDLoc DL(N);
5362 EVT ResVT = N->getValueType(0);
5363 EVT OvVT = N->getValueType(1);
5364 EVT WideResVT, WideOvVT;
5365 SDValue WideLHS, WideRHS;
5366
5367 // TODO: This might result in a widen/split loop.
5368 if (ResNo == 0) {
5369 WideResVT = TLI.getTypeToTransformTo(*DAG.getContext(), ResVT);
5370 WideOvVT = EVT::getVectorVT(
5371 *DAG.getContext(), OvVT.getVectorElementType(),
5372 WideResVT.getVectorNumElements());
5373
5374 WideLHS = GetWidenedVector(N->getOperand(0));
5375 WideRHS = GetWidenedVector(N->getOperand(1));
5376 } else {
5377 WideOvVT = TLI.getTypeToTransformTo(*DAG.getContext(), OvVT);
5378 WideResVT = EVT::getVectorVT(
5379 *DAG.getContext(), ResVT.getVectorElementType(),
5380 WideOvVT.getVectorNumElements());
5381
5382 SDValue Zero = DAG.getVectorIdxConstant(0, DL);
5383 WideLHS = DAG.getNode(
5384 ISD::INSERT_SUBVECTOR, DL, WideResVT, DAG.getUNDEF(WideResVT),
5385 N->getOperand(0), Zero);
5386 WideRHS = DAG.getNode(
5387 ISD::INSERT_SUBVECTOR, DL, WideResVT, DAG.getUNDEF(WideResVT),
5388 N->getOperand(1), Zero);
5389 }
5390
5391 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
5392 SDNode *WideNode = DAG.getNode(
5393 N->getOpcode(), DL, WideVTs, WideLHS, WideRHS).getNode();
5394
5395 // Replace the other vector result not being explicitly widened here.
5396 unsigned OtherNo = 1 - ResNo;
5397 EVT OtherVT = N->getValueType(OtherNo);
5398 if (getTypeAction(OtherVT) == TargetLowering::TypeWidenVector) {
5399 SetWidenedVector(SDValue(N, OtherNo), SDValue(WideNode, OtherNo));
5400 } else {
5401 SDValue Zero = DAG.getVectorIdxConstant(0, DL);
5402 SDValue OtherVal = DAG.getNode(
5403 ISD::EXTRACT_SUBVECTOR, DL, OtherVT, SDValue(WideNode, OtherNo), Zero);
5404 ReplaceValueWith(SDValue(N, OtherNo), OtherVal);
5405 }
5406
5407 return SDValue(WideNode, ResNo);
5408 }
5409
WidenVecRes_Convert(SDNode * N)5410 SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
5411 LLVMContext &Ctx = *DAG.getContext();
5412 SDValue InOp = N->getOperand(0);
5413 SDLoc DL(N);
5414
5415 EVT WidenVT = TLI.getTypeToTransformTo(Ctx, N->getValueType(0));
5416 ElementCount WidenEC = WidenVT.getVectorElementCount();
5417
5418 EVT InVT = InOp.getValueType();
5419
5420 unsigned Opcode = N->getOpcode();
5421 const SDNodeFlags Flags = N->getFlags();
5422
5423 // Handle the case of ZERO_EXTEND where the promoted InVT element size does
5424 // not equal that of WidenVT.
5425 if (N->getOpcode() == ISD::ZERO_EXTEND &&
5426 getTypeAction(InVT) == TargetLowering::TypePromoteInteger &&
5427 TLI.getTypeToTransformTo(Ctx, InVT).getScalarSizeInBits() !=
5428 WidenVT.getScalarSizeInBits()) {
5429 InOp = ZExtPromotedInteger(InOp);
5430 InVT = InOp.getValueType();
5431 if (WidenVT.getScalarSizeInBits() < InVT.getScalarSizeInBits())
5432 Opcode = ISD::TRUNCATE;
5433 }
5434
5435 EVT InEltVT = InVT.getVectorElementType();
5436 EVT InWidenVT = EVT::getVectorVT(Ctx, InEltVT, WidenEC);
5437 ElementCount InVTEC = InVT.getVectorElementCount();
5438
5439 if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) {
5440 InOp = GetWidenedVector(N->getOperand(0));
5441 InVT = InOp.getValueType();
5442 InVTEC = InVT.getVectorElementCount();
5443 if (InVTEC == WidenEC) {
5444 if (N->getNumOperands() == 1)
5445 return DAG.getNode(Opcode, DL, WidenVT, InOp, Flags);
5446 if (N->getNumOperands() == 3) {
5447 assert(N->isVPOpcode() && "Expected VP opcode");
5448 SDValue Mask =
5449 GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount());
5450 return DAG.getNode(Opcode, DL, WidenVT, InOp, Mask, N->getOperand(2));
5451 }
5452 return DAG.getNode(Opcode, DL, WidenVT, InOp, N->getOperand(1), Flags);
5453 }
5454 if (WidenVT.getSizeInBits() == InVT.getSizeInBits()) {
5455 // If both input and result vector types are of same width, extend
5456 // operations should be done with SIGN/ZERO_EXTEND_VECTOR_INREG, which
5457 // accepts fewer elements in the result than in the input.
5458 if (Opcode == ISD::ANY_EXTEND)
5459 return DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, WidenVT, InOp);
5460 if (Opcode == ISD::SIGN_EXTEND)
5461 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, WidenVT, InOp);
5462 if (Opcode == ISD::ZERO_EXTEND)
5463 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, WidenVT, InOp);
5464 }
5465 }
5466
5467 if (TLI.isTypeLegal(InWidenVT)) {
5468 // Because the result and the input are different vector types, widening
5469 // the result could create a legal type but widening the input might make
5470 // it an illegal type that might lead to repeatedly splitting the input
5471 // and then widening it. To avoid this, we widen the input only if
5472 // it results in a legal type.
5473 if (WidenEC.isKnownMultipleOf(InVTEC.getKnownMinValue())) {
5474 // Widen the input and call convert on the widened input vector.
5475 unsigned NumConcat =
5476 WidenEC.getKnownMinValue() / InVTEC.getKnownMinValue();
5477 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
5478 Ops[0] = InOp;
5479 SDValue InVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InWidenVT, Ops);
5480 if (N->getNumOperands() == 1)
5481 return DAG.getNode(Opcode, DL, WidenVT, InVec, Flags);
5482 return DAG.getNode(Opcode, DL, WidenVT, InVec, N->getOperand(1), Flags);
5483 }
5484
5485 if (InVTEC.isKnownMultipleOf(WidenEC.getKnownMinValue())) {
5486 SDValue InVal = DAG.getExtractSubvector(DL, InWidenVT, InOp, 0);
5487 // Extract the input and convert the shorten input vector.
5488 if (N->getNumOperands() == 1)
5489 return DAG.getNode(Opcode, DL, WidenVT, InVal, Flags);
5490 return DAG.getNode(Opcode, DL, WidenVT, InVal, N->getOperand(1), Flags);
5491 }
5492 }
5493
5494 // Otherwise unroll into some nasty scalar code and rebuild the vector.
5495 EVT EltVT = WidenVT.getVectorElementType();
5496 SmallVector<SDValue, 16> Ops(WidenEC.getFixedValue(), DAG.getUNDEF(EltVT));
5497 // Use the original element count so we don't do more scalar opts than
5498 // necessary.
5499 unsigned MinElts = N->getValueType(0).getVectorNumElements();
5500 for (unsigned i=0; i < MinElts; ++i) {
5501 SDValue Val = DAG.getExtractVectorElt(DL, InEltVT, InOp, i);
5502 if (N->getNumOperands() == 1)
5503 Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val, Flags);
5504 else
5505 Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val, N->getOperand(1), Flags);
5506 }
5507
5508 return DAG.getBuildVector(WidenVT, DL, Ops);
5509 }
5510
WidenVecRes_FP_TO_XINT_SAT(SDNode * N)5511 SDValue DAGTypeLegalizer::WidenVecRes_FP_TO_XINT_SAT(SDNode *N) {
5512 SDLoc dl(N);
5513 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5514 ElementCount WidenNumElts = WidenVT.getVectorElementCount();
5515
5516 SDValue Src = N->getOperand(0);
5517 EVT SrcVT = Src.getValueType();
5518
5519 // Also widen the input.
5520 if (getTypeAction(SrcVT) == TargetLowering::TypeWidenVector) {
5521 Src = GetWidenedVector(Src);
5522 SrcVT = Src.getValueType();
5523 }
5524
5525 // Input and output not widened to the same size, give up.
5526 if (WidenNumElts != SrcVT.getVectorElementCount())
5527 return DAG.UnrollVectorOp(N, WidenNumElts.getKnownMinValue());
5528
5529 return DAG.getNode(N->getOpcode(), dl, WidenVT, Src, N->getOperand(1));
5530 }
5531
WidenVecRes_XROUND(SDNode * N)5532 SDValue DAGTypeLegalizer::WidenVecRes_XROUND(SDNode *N) {
5533 SDLoc dl(N);
5534 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5535 ElementCount WidenNumElts = WidenVT.getVectorElementCount();
5536
5537 SDValue Src = N->getOperand(0);
5538 EVT SrcVT = Src.getValueType();
5539
5540 // Also widen the input.
5541 if (getTypeAction(SrcVT) == TargetLowering::TypeWidenVector) {
5542 Src = GetWidenedVector(Src);
5543 SrcVT = Src.getValueType();
5544 }
5545
5546 // Input and output not widened to the same size, give up.
5547 if (WidenNumElts != SrcVT.getVectorElementCount())
5548 return DAG.UnrollVectorOp(N, WidenNumElts.getKnownMinValue());
5549
5550 if (N->getNumOperands() == 1)
5551 return DAG.getNode(N->getOpcode(), dl, WidenVT, Src);
5552
5553 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
5554 assert(N->isVPOpcode() && "Expected VP opcode");
5555
5556 SDValue Mask =
5557 GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount());
5558 return DAG.getNode(N->getOpcode(), dl, WidenVT, Src, Mask, N->getOperand(2));
5559 }
5560
WidenVecRes_Convert_StrictFP(SDNode * N)5561 SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(SDNode *N) {
5562 SDValue InOp = N->getOperand(1);
5563 SDLoc DL(N);
5564 SmallVector<SDValue, 4> NewOps(N->ops());
5565
5566 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5567 unsigned WidenNumElts = WidenVT.getVectorNumElements();
5568
5569 EVT InVT = InOp.getValueType();
5570 EVT InEltVT = InVT.getVectorElementType();
5571
5572 unsigned Opcode = N->getOpcode();
5573
5574 // FIXME: Optimizations need to be implemented here.
5575
5576 // Otherwise unroll into some nasty scalar code and rebuild the vector.
5577 EVT EltVT = WidenVT.getVectorElementType();
5578 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5579 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
5580 SmallVector<SDValue, 32> OpChains;
5581 // Use the original element count so we don't do more scalar opts than
5582 // necessary.
5583 unsigned MinElts = N->getValueType(0).getVectorNumElements();
5584 for (unsigned i=0; i < MinElts; ++i) {
5585 NewOps[1] = DAG.getExtractVectorElt(DL, InEltVT, InOp, i);
5586 Ops[i] = DAG.getNode(Opcode, DL, EltVTs, NewOps);
5587 OpChains.push_back(Ops[i].getValue(1));
5588 }
5589 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OpChains);
5590 ReplaceValueWith(SDValue(N, 1), NewChain);
5591
5592 return DAG.getBuildVector(WidenVT, DL, Ops);
5593 }
5594
WidenVecRes_EXTEND_VECTOR_INREG(SDNode * N)5595 SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(SDNode *N) {
5596 unsigned Opcode = N->getOpcode();
5597 SDValue InOp = N->getOperand(0);
5598 SDLoc DL(N);
5599
5600 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5601 EVT WidenSVT = WidenVT.getVectorElementType();
5602 unsigned WidenNumElts = WidenVT.getVectorNumElements();
5603
5604 EVT InVT = InOp.getValueType();
5605 EVT InSVT = InVT.getVectorElementType();
5606 unsigned InVTNumElts = InVT.getVectorNumElements();
5607
5608 if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) {
5609 InOp = GetWidenedVector(InOp);
5610 InVT = InOp.getValueType();
5611 if (InVT.getSizeInBits() == WidenVT.getSizeInBits()) {
5612 switch (Opcode) {
5613 case ISD::ANY_EXTEND_VECTOR_INREG:
5614 case ISD::SIGN_EXTEND_VECTOR_INREG:
5615 case ISD::ZERO_EXTEND_VECTOR_INREG:
5616 return DAG.getNode(Opcode, DL, WidenVT, InOp);
5617 }
5618 }
5619 }
5620
5621 // Unroll, extend the scalars and rebuild the vector.
5622 SmallVector<SDValue, 16> Ops;
5623 for (unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i != e; ++i) {
5624 SDValue Val = DAG.getExtractVectorElt(DL, InSVT, InOp, i);
5625 switch (Opcode) {
5626 case ISD::ANY_EXTEND_VECTOR_INREG:
5627 Val = DAG.getNode(ISD::ANY_EXTEND, DL, WidenSVT, Val);
5628 break;
5629 case ISD::SIGN_EXTEND_VECTOR_INREG:
5630 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, WidenSVT, Val);
5631 break;
5632 case ISD::ZERO_EXTEND_VECTOR_INREG:
5633 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenSVT, Val);
5634 break;
5635 default:
5636 llvm_unreachable("A *_EXTEND_VECTOR_INREG node was expected");
5637 }
5638 Ops.push_back(Val);
5639 }
5640
5641 while (Ops.size() != WidenNumElts)
5642 Ops.push_back(DAG.getUNDEF(WidenSVT));
5643
5644 return DAG.getBuildVector(WidenVT, DL, Ops);
5645 }
5646
WidenVecRes_FCOPYSIGN(SDNode * N)5647 SDValue DAGTypeLegalizer::WidenVecRes_FCOPYSIGN(SDNode *N) {
5648 // If this is an FCOPYSIGN with same input types, we can treat it as a
5649 // normal (can trap) binary op.
5650 if (N->getOperand(0).getValueType() == N->getOperand(1).getValueType())
5651 return WidenVecRes_BinaryCanTrap(N);
5652
5653 // If the types are different, fall back to unrolling.
5654 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5655 return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
5656 }
5657
5658 /// Result and first source operand are different scalar types, but must have
5659 /// the same number of elements. There is an additional control argument which
5660 /// should be passed through unchanged.
WidenVecRes_UnarySameEltsWithScalarArg(SDNode * N)5661 SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(SDNode *N) {
5662 SDValue FpValue = N->getOperand(0);
5663 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5664 if (getTypeAction(FpValue.getValueType()) != TargetLowering::TypeWidenVector)
5665 return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
5666 SDValue Arg = GetWidenedVector(FpValue);
5667 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, {Arg, N->getOperand(1)},
5668 N->getFlags());
5669 }
5670
WidenVecRes_ExpOp(SDNode * N)5671 SDValue DAGTypeLegalizer::WidenVecRes_ExpOp(SDNode *N) {
5672 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5673 SDValue InOp = GetWidenedVector(N->getOperand(0));
5674 SDValue RHS = N->getOperand(1);
5675 EVT ExpVT = RHS.getValueType();
5676 SDValue ExpOp = RHS;
5677 if (ExpVT.isVector()) {
5678 EVT WideExpVT =
5679 WidenVT.changeVectorElementType(ExpVT.getVectorElementType());
5680 ExpOp = ModifyToType(RHS, WideExpVT);
5681 }
5682
5683 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp, ExpOp);
5684 }
5685
WidenVecRes_Unary(SDNode * N)5686 SDValue DAGTypeLegalizer::WidenVecRes_Unary(SDNode *N) {
5687 // Unary op widening.
5688 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5689 SDValue InOp = GetWidenedVector(N->getOperand(0));
5690 if (N->getNumOperands() == 1)
5691 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp, N->getFlags());
5692 if (N->getOpcode() == ISD::AssertNoFPClass)
5693 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, InOp,
5694 N->getOperand(1), N->getFlags());
5695
5696 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
5697 assert(N->isVPOpcode() && "Expected VP opcode");
5698
5699 SDValue Mask =
5700 GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount());
5701 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT,
5702 {InOp, Mask, N->getOperand(2)});
5703 }
5704
WidenVecRes_InregOp(SDNode * N)5705 SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) {
5706 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5707 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
5708 cast<VTSDNode>(N->getOperand(1))->getVT()
5709 .getVectorElementType(),
5710 WidenVT.getVectorNumElements());
5711 SDValue WidenLHS = GetWidenedVector(N->getOperand(0));
5712 return DAG.getNode(N->getOpcode(), SDLoc(N),
5713 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5714 }
5715
WidenVecRes_UnaryOpWithTwoResults(SDNode * N,unsigned ResNo)5716 SDValue DAGTypeLegalizer::WidenVecRes_UnaryOpWithTwoResults(SDNode *N,
5717 unsigned ResNo) {
5718 EVT VT0 = N->getValueType(0);
5719 EVT VT1 = N->getValueType(1);
5720
5721 assert(VT0.isVector() && VT1.isVector() &&
5722 VT0.getVectorElementCount() == VT1.getVectorElementCount() &&
5723 "expected both results to be vectors of matching element count");
5724
5725 LLVMContext &Ctx = *DAG.getContext();
5726 SDValue InOp = GetWidenedVector(N->getOperand(0));
5727
5728 EVT WidenVT = TLI.getTypeToTransformTo(Ctx, N->getValueType(ResNo));
5729 ElementCount WidenEC = WidenVT.getVectorElementCount();
5730
5731 EVT WidenVT0 = EVT::getVectorVT(Ctx, VT0.getVectorElementType(), WidenEC);
5732 EVT WidenVT1 = EVT::getVectorVT(Ctx, VT1.getVectorElementType(), WidenEC);
5733
5734 SDNode *WidenNode =
5735 DAG.getNode(N->getOpcode(), SDLoc(N), {WidenVT0, WidenVT1}, InOp)
5736 .getNode();
5737
5738 ReplaceOtherWidenResults(N, WidenNode, ResNo);
5739 return SDValue(WidenNode, ResNo);
5740 }
5741
WidenVecRes_MERGE_VALUES(SDNode * N,unsigned ResNo)5742 SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(SDNode *N, unsigned ResNo) {
5743 SDValue WidenVec = DisintegrateMERGE_VALUES(N, ResNo);
5744 return GetWidenedVector(WidenVec);
5745 }
5746
WidenVecRes_ADDRSPACECAST(SDNode * N)5747 SDValue DAGTypeLegalizer::WidenVecRes_ADDRSPACECAST(SDNode *N) {
5748 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5749 SDValue InOp = GetWidenedVector(N->getOperand(0));
5750 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(N);
5751
5752 return DAG.getAddrSpaceCast(SDLoc(N), WidenVT, InOp,
5753 AddrSpaceCastN->getSrcAddressSpace(),
5754 AddrSpaceCastN->getDestAddressSpace());
5755 }
5756
WidenVecRes_BITCAST(SDNode * N)5757 SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
5758 SDValue InOp = N->getOperand(0);
5759 EVT InVT = InOp.getValueType();
5760 EVT VT = N->getValueType(0);
5761 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
5762 SDLoc dl(N);
5763
5764 switch (getTypeAction(InVT)) {
5765 case TargetLowering::TypeLegal:
5766 break;
5767 case TargetLowering::TypeScalarizeScalableVector:
5768 report_fatal_error("Scalarization of scalable vectors is not supported.");
5769 case TargetLowering::TypePromoteInteger: {
5770 // If the incoming type is a vector that is being promoted, then
5771 // we know that the elements are arranged differently and that we
5772 // must perform the conversion using a stack slot.
5773 if (InVT.isVector())
5774 break;
5775
5776 // If the InOp is promoted to the same size, convert it. Otherwise,
5777 // fall out of the switch and widen the promoted input.
5778 SDValue NInOp = GetPromotedInteger(InOp);
5779 EVT NInVT = NInOp.getValueType();
5780 if (WidenVT.bitsEq(NInVT)) {
5781 // For big endian targets we need to shift the input integer or the
5782 // interesting bits will end up at the wrong place.
5783 if (DAG.getDataLayout().isBigEndian()) {
5784 unsigned ShiftAmt = NInVT.getSizeInBits() - InVT.getSizeInBits();
5785 EVT ShiftAmtTy = TLI.getShiftAmountTy(NInVT, DAG.getDataLayout());
5786 assert(ShiftAmt < WidenVT.getSizeInBits() && "Too large shift amount!");
5787 NInOp = DAG.getNode(ISD::SHL, dl, NInVT, NInOp,
5788 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5789 }
5790 return DAG.getNode(ISD::BITCAST, dl, WidenVT, NInOp);
5791 }
5792 InOp = NInOp;
5793 InVT = NInVT;
5794 break;
5795 }
5796 case TargetLowering::TypeSoftenFloat:
5797 case TargetLowering::TypePromoteFloat:
5798 case TargetLowering::TypeSoftPromoteHalf:
5799 case TargetLowering::TypeExpandInteger:
5800 case TargetLowering::TypeExpandFloat:
5801 case TargetLowering::TypeScalarizeVector:
5802 case TargetLowering::TypeSplitVector:
5803 break;
5804 case TargetLowering::TypeWidenVector:
5805 // If the InOp is widened to the same size, convert it. Otherwise, fall
5806 // out of the switch and widen the widened input.
5807 InOp = GetWidenedVector(InOp);
5808 InVT = InOp.getValueType();
5809 if (WidenVT.bitsEq(InVT))
5810 // The input widens to the same size. Convert to the widen value.
5811 return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
5812 break;
5813 }
5814
5815 unsigned WidenSize = WidenVT.getSizeInBits();
5816 unsigned InSize = InVT.getSizeInBits();
5817 unsigned InScalarSize = InVT.getScalarSizeInBits();
5818 // x86mmx is not an acceptable vector element type, so don't try.
5819 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5820 // Determine new input vector type. The new input vector type will use
5821 // the same element type (if its a vector) or use the input type as a
5822 // vector. It is the same size as the type to widen to.
5823 EVT NewInVT;
5824 unsigned NewNumParts = WidenSize / InSize;
5825 if (InVT.isVector()) {
5826 EVT InEltVT = InVT.getVectorElementType();
5827 NewInVT = EVT::getVectorVT(*DAG.getContext(), InEltVT,
5828 WidenSize / InEltVT.getSizeInBits());
5829 } else {
5830 // For big endian systems, using the promoted input scalar type
5831 // to produce the scalar_to_vector would put the desired bits into
5832 // the least significant byte(s) of the wider element zero. This
5833 // will mean that the users of the result vector are using incorrect
5834 // bits. Use the original input type instead. Although either input
5835 // type can be used on little endian systems, for consistency we
5836 // use the original type there as well.
5837 EVT OrigInVT = N->getOperand(0).getValueType();
5838 NewNumParts = WidenSize / OrigInVT.getSizeInBits();
5839 NewInVT = EVT::getVectorVT(*DAG.getContext(), OrigInVT, NewNumParts);
5840 }
5841
5842 if (TLI.isTypeLegal(NewInVT)) {
5843 SDValue NewVec;
5844 if (InVT.isVector()) {
5845 // Because the result and the input are different vector types, widening
5846 // the result could create a legal type but widening the input might
5847 // make it an illegal type that might lead to repeatedly splitting the
5848 // input and then widening it. To avoid this, we widen the input only if
5849 // it results in a legal type.
5850 if (WidenSize % InSize == 0) {
5851 SmallVector<SDValue, 16> Ops(NewNumParts, DAG.getUNDEF(InVT));
5852 Ops[0] = InOp;
5853
5854 NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewInVT, Ops);
5855 } else {
5856 SmallVector<SDValue, 16> Ops;
5857 DAG.ExtractVectorElements(InOp, Ops);
5858 Ops.append(WidenSize / InScalarSize - Ops.size(),
5859 DAG.getUNDEF(InVT.getVectorElementType()));
5860
5861 NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl, NewInVT, Ops);
5862 }
5863 } else {
5864 NewVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewInVT, InOp);
5865 }
5866 return DAG.getNode(ISD::BITCAST, dl, WidenVT, NewVec);
5867 }
5868 }
5869
5870 return CreateStackStoreLoad(InOp, WidenVT);
5871 }
5872
WidenVecRes_BUILD_VECTOR(SDNode * N)5873 SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
5874 SDLoc dl(N);
5875 // Build a vector with undefined for the new nodes.
5876 EVT VT = N->getValueType(0);
5877
5878 // Integer BUILD_VECTOR operands may be larger than the node's vector element
5879 // type. The UNDEFs need to have the same type as the existing operands.
5880 EVT EltVT = N->getOperand(0).getValueType();
5881 unsigned NumElts = VT.getVectorNumElements();
5882
5883 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
5884 unsigned WidenNumElts = WidenVT.getVectorNumElements();
5885
5886 SmallVector<SDValue, 16> NewOps(N->ops());
5887 assert(WidenNumElts >= NumElts && "Shrinking vector instead of widening!");
5888 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5889
5890 return DAG.getBuildVector(WidenVT, dl, NewOps);
5891 }
5892
WidenVecRes_CONCAT_VECTORS(SDNode * N)5893 SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
5894 EVT InVT = N->getOperand(0).getValueType();
5895 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
5896 SDLoc dl(N);
5897 unsigned NumOperands = N->getNumOperands();
5898
5899 bool InputWidened = false; // Indicates we need to widen the input.
5900 if (getTypeAction(InVT) != TargetLowering::TypeWidenVector) {
5901 unsigned WidenNumElts = WidenVT.getVectorMinNumElements();
5902 unsigned NumInElts = InVT.getVectorMinNumElements();
5903 if (WidenNumElts % NumInElts == 0) {
5904 // Add undef vectors to widen to correct length.
5905 unsigned NumConcat = WidenNumElts / NumInElts;
5906 SDValue UndefVal = DAG.getUNDEF(InVT);
5907 SmallVector<SDValue, 16> Ops(NumConcat);
5908 for (unsigned i=0; i < NumOperands; ++i)
5909 Ops[i] = N->getOperand(i);
5910 for (unsigned i = NumOperands; i != NumConcat; ++i)
5911 Ops[i] = UndefVal;
5912 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Ops);
5913 }
5914 } else {
5915 InputWidened = true;
5916 if (WidenVT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) {
5917 // The inputs and the result are widen to the same value.
5918 unsigned i;
5919 for (i=1; i < NumOperands; ++i)
5920 if (!N->getOperand(i).isUndef())
5921 break;
5922
5923 if (i == NumOperands)
5924 // Everything but the first operand is an UNDEF so just return the
5925 // widened first operand.
5926 return GetWidenedVector(N->getOperand(0));
5927
5928 if (NumOperands == 2) {
5929 assert(!WidenVT.isScalableVector() &&
5930 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5931 unsigned WidenNumElts = WidenVT.getVectorNumElements();
5932 unsigned NumInElts = InVT.getVectorNumElements();
5933
5934 // Replace concat of two operands with a shuffle.
5935 SmallVector<int, 16> MaskOps(WidenNumElts, -1);
5936 for (unsigned i = 0; i < NumInElts; ++i) {
5937 MaskOps[i] = i;
5938 MaskOps[i + NumInElts] = i + WidenNumElts;
5939 }
5940 return DAG.getVectorShuffle(WidenVT, dl,
5941 GetWidenedVector(N->getOperand(0)),
5942 GetWidenedVector(N->getOperand(1)),
5943 MaskOps);
5944 }
5945 }
5946 }
5947
5948 assert(!WidenVT.isScalableVector() &&
5949 "Cannot use build vectors to widen CONCAT_VECTOR result");
5950 unsigned WidenNumElts = WidenVT.getVectorNumElements();
5951 unsigned NumInElts = InVT.getVectorNumElements();
5952
5953 // Fall back to use extracts and build vector.
5954 EVT EltVT = WidenVT.getVectorElementType();
5955 SmallVector<SDValue, 16> Ops(WidenNumElts);
5956 unsigned Idx = 0;
5957 for (unsigned i=0; i < NumOperands; ++i) {
5958 SDValue InOp = N->getOperand(i);
5959 if (InputWidened)
5960 InOp = GetWidenedVector(InOp);
5961 for (unsigned j = 0; j < NumInElts; ++j)
5962 Ops[Idx++] = DAG.getExtractVectorElt(dl, EltVT, InOp, j);
5963 }
5964 SDValue UndefVal = DAG.getUNDEF(EltVT);
5965 for (; Idx < WidenNumElts; ++Idx)
5966 Ops[Idx] = UndefVal;
5967 return DAG.getBuildVector(WidenVT, dl, Ops);
5968 }
5969
WidenVecRes_INSERT_SUBVECTOR(SDNode * N)5970 SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(SDNode *N) {
5971 EVT VT = N->getValueType(0);
5972 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
5973 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
5974 SDValue InOp2 = N->getOperand(1);
5975 SDValue Idx = N->getOperand(2);
5976 SDLoc dl(N);
5977 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WidenVT, InOp1, InOp2, Idx);
5978 }
5979
WidenVecRes_EXTRACT_SUBVECTOR(SDNode * N)5980 SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
5981 EVT VT = N->getValueType(0);
5982 EVT EltVT = VT.getVectorElementType();
5983 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
5984 SDValue InOp = N->getOperand(0);
5985 SDValue Idx = N->getOperand(1);
5986 SDLoc dl(N);
5987
5988 auto InOpTypeAction = getTypeAction(InOp.getValueType());
5989 if (InOpTypeAction == TargetLowering::TypeWidenVector)
5990 InOp = GetWidenedVector(InOp);
5991
5992 EVT InVT = InOp.getValueType();
5993
5994 // Check if we can just return the input vector after widening.
5995 uint64_t IdxVal = Idx->getAsZExtVal();
5996 if (IdxVal == 0 && InVT == WidenVT)
5997 return InOp;
5998
5999 // Check if we can extract from the vector.
6000 unsigned WidenNumElts = WidenVT.getVectorMinNumElements();
6001 unsigned InNumElts = InVT.getVectorMinNumElements();
6002 unsigned VTNumElts = VT.getVectorMinNumElements();
6003 assert(IdxVal % VTNumElts == 0 &&
6004 "Expected Idx to be a multiple of subvector minimum vector length");
6005 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
6006 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, WidenVT, InOp, Idx);
6007
6008 if (VT.isScalableVector()) {
6009 // Try to split the operation up into smaller extracts and concat the
6010 // results together, e.g.
6011 // nxv6i64 extract_subvector(nxv12i64, 6)
6012 // <->
6013 // nxv8i64 concat(
6014 // nxv2i64 extract_subvector(nxv16i64, 6)
6015 // nxv2i64 extract_subvector(nxv16i64, 8)
6016 // nxv2i64 extract_subvector(nxv16i64, 10)
6017 // undef)
6018 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6019 assert((IdxVal % GCD) == 0 && "Expected Idx to be a multiple of the broken "
6020 "down type's element count");
6021 EVT PartVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
6022 ElementCount::getScalable(GCD));
6023 // Avoid recursion around e.g. nxv1i8.
6024 if (getTypeAction(PartVT) != TargetLowering::TypeWidenVector) {
6025 SmallVector<SDValue> Parts;
6026 unsigned I = 0;
6027 for (; I < VTNumElts / GCD; ++I)
6028 Parts.push_back(
6029 DAG.getExtractSubvector(dl, PartVT, InOp, IdxVal + I * GCD));
6030 for (; I < WidenNumElts / GCD; ++I)
6031 Parts.push_back(DAG.getUNDEF(PartVT));
6032
6033 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Parts);
6034 }
6035
6036 report_fatal_error("Don't know how to widen the result of "
6037 "EXTRACT_SUBVECTOR for scalable vectors");
6038 }
6039
6040 // We could try widening the input to the right length but for now, extract
6041 // the original elements, fill the rest with undefs and build a vector.
6042 SmallVector<SDValue, 16> Ops(WidenNumElts);
6043 unsigned i;
6044 for (i = 0; i < VTNumElts; ++i)
6045 Ops[i] = DAG.getExtractVectorElt(dl, EltVT, InOp, IdxVal + i);
6046
6047 SDValue UndefVal = DAG.getUNDEF(EltVT);
6048 for (; i < WidenNumElts; ++i)
6049 Ops[i] = UndefVal;
6050 return DAG.getBuildVector(WidenVT, dl, Ops);
6051 }
6052
WidenVecRes_AssertZext(SDNode * N)6053 SDValue DAGTypeLegalizer::WidenVecRes_AssertZext(SDNode *N) {
6054 SDValue InOp = ModifyToType(
6055 N->getOperand(0),
6056 TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)), true);
6057 return DAG.getNode(ISD::AssertZext, SDLoc(N), InOp.getValueType(), InOp,
6058 N->getOperand(1));
6059 }
6060
WidenVecRes_INSERT_VECTOR_ELT(SDNode * N)6061 SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) {
6062 SDValue InOp = GetWidenedVector(N->getOperand(0));
6063 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N),
6064 InOp.getValueType(), InOp,
6065 N->getOperand(1), N->getOperand(2));
6066 }
6067
WidenVecRes_LOAD(SDNode * N)6068 SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
6069 LoadSDNode *LD = cast<LoadSDNode>(N);
6070 ISD::LoadExtType ExtType = LD->getExtensionType();
6071
6072 // A vector must always be stored in memory as-is, i.e. without any padding
6073 // between the elements, since various code depend on it, e.g. in the
6074 // handling of a bitcast of a vector type to int, which may be done with a
6075 // vector store followed by an integer load. A vector that does not have
6076 // elements that are byte-sized must therefore be stored as an integer
6077 // built out of the extracted vector elements.
6078 if (!LD->getMemoryVT().isByteSized()) {
6079 SDValue Value, NewChain;
6080 std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG);
6081 ReplaceValueWith(SDValue(LD, 0), Value);
6082 ReplaceValueWith(SDValue(LD, 1), NewChain);
6083 return SDValue();
6084 }
6085
6086 // Generate a vector-predicated load if it is custom/legal on the target. To
6087 // avoid possible recursion, only do this if the widened mask type is legal.
6088 // FIXME: Not all targets may support EVL in VP_LOAD. These will have been
6089 // removed from the IR by the ExpandVectorPredication pass but we're
6090 // reintroducing them here.
6091 EVT LdVT = LD->getMemoryVT();
6092 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), LdVT);
6093 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
6094 WideVT.getVectorElementCount());
6095 if (ExtType == ISD::NON_EXTLOAD &&
6096 TLI.isOperationLegalOrCustom(ISD::VP_LOAD, WideVT) &&
6097 TLI.isTypeLegal(WideMaskVT)) {
6098 SDLoc DL(N);
6099 SDValue Mask = DAG.getAllOnesConstant(DL, WideMaskVT);
6100 SDValue EVL = DAG.getElementCount(DL, TLI.getVPExplicitVectorLengthTy(),
6101 LdVT.getVectorElementCount());
6102 SDValue NewLoad =
6103 DAG.getLoadVP(LD->getAddressingMode(), ISD::NON_EXTLOAD, WideVT, DL,
6104 LD->getChain(), LD->getBasePtr(), LD->getOffset(), Mask,
6105 EVL, LD->getMemoryVT(), LD->getMemOperand());
6106
6107 // Modified the chain - switch anything that used the old chain to use
6108 // the new one.
6109 ReplaceValueWith(SDValue(N, 1), NewLoad.getValue(1));
6110
6111 return NewLoad;
6112 }
6113
6114 SDValue Result;
6115 SmallVector<SDValue, 16> LdChain; // Chain for the series of load
6116 if (ExtType != ISD::NON_EXTLOAD)
6117 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
6118 else
6119 Result = GenWidenVectorLoads(LdChain, LD);
6120
6121 if (Result) {
6122 // If we generate a single load, we can use that for the chain. Otherwise,
6123 // build a factor node to remember the multiple loads are independent and
6124 // chain to that.
6125 SDValue NewChain;
6126 if (LdChain.size() == 1)
6127 NewChain = LdChain[0];
6128 else
6129 NewChain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, LdChain);
6130
6131 // Modified the chain - switch anything that used the old chain to use
6132 // the new one.
6133 ReplaceValueWith(SDValue(N, 1), NewChain);
6134
6135 return Result;
6136 }
6137
6138 report_fatal_error("Unable to widen vector load");
6139 }
6140
WidenVecRes_VP_LOAD(VPLoadSDNode * N)6141 SDValue DAGTypeLegalizer::WidenVecRes_VP_LOAD(VPLoadSDNode *N) {
6142 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6143 SDValue Mask = N->getMask();
6144 SDValue EVL = N->getVectorLength();
6145 ISD::LoadExtType ExtType = N->getExtensionType();
6146 SDLoc dl(N);
6147
6148 // The mask should be widened as well
6149 assert(getTypeAction(Mask.getValueType()) ==
6150 TargetLowering::TypeWidenVector &&
6151 "Unable to widen binary VP op");
6152 Mask = GetWidenedVector(Mask);
6153 assert(Mask.getValueType().getVectorElementCount() ==
6154 TLI.getTypeToTransformTo(*DAG.getContext(), Mask.getValueType())
6155 .getVectorElementCount() &&
6156 "Unable to widen vector load");
6157
6158 SDValue Res =
6159 DAG.getLoadVP(N->getAddressingMode(), ExtType, WidenVT, dl, N->getChain(),
6160 N->getBasePtr(), N->getOffset(), Mask, EVL,
6161 N->getMemoryVT(), N->getMemOperand(), N->isExpandingLoad());
6162 // Legalize the chain result - switch anything that used the old chain to
6163 // use the new one.
6164 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
6165 return Res;
6166 }
6167
WidenVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode * N)6168 SDValue DAGTypeLegalizer::WidenVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *N) {
6169 SDLoc DL(N);
6170
6171 // The mask should be widened as well
6172 SDValue Mask = N->getMask();
6173 assert(getTypeAction(Mask.getValueType()) ==
6174 TargetLowering::TypeWidenVector &&
6175 "Unable to widen VP strided load");
6176 Mask = GetWidenedVector(Mask);
6177
6178 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6179 assert(Mask.getValueType().getVectorElementCount() ==
6180 WidenVT.getVectorElementCount() &&
6181 "Data and mask vectors should have the same number of elements");
6182
6183 SDValue Res = DAG.getStridedLoadVP(
6184 N->getAddressingMode(), N->getExtensionType(), WidenVT, DL, N->getChain(),
6185 N->getBasePtr(), N->getOffset(), N->getStride(), Mask,
6186 N->getVectorLength(), N->getMemoryVT(), N->getMemOperand(),
6187 N->isExpandingLoad());
6188
6189 // Legalize the chain result - switch anything that used the old chain to
6190 // use the new one.
6191 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
6192 return Res;
6193 }
6194
WidenVecRes_VECTOR_COMPRESS(SDNode * N)6195 SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_COMPRESS(SDNode *N) {
6196 SDValue Vec = N->getOperand(0);
6197 SDValue Mask = N->getOperand(1);
6198 SDValue Passthru = N->getOperand(2);
6199 EVT WideVecVT =
6200 TLI.getTypeToTransformTo(*DAG.getContext(), Vec.getValueType());
6201 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(),
6202 Mask.getValueType().getVectorElementType(),
6203 WideVecVT.getVectorElementCount());
6204
6205 SDValue WideVec = ModifyToType(Vec, WideVecVT);
6206 SDValue WideMask = ModifyToType(Mask, WideMaskVT, /*FillWithZeroes=*/true);
6207 SDValue WidePassthru = ModifyToType(Passthru, WideVecVT);
6208 return DAG.getNode(ISD::VECTOR_COMPRESS, SDLoc(N), WideVecVT, WideVec,
6209 WideMask, WidePassthru);
6210 }
6211
WidenVecRes_MLOAD(MaskedLoadSDNode * N)6212 SDValue DAGTypeLegalizer::WidenVecRes_MLOAD(MaskedLoadSDNode *N) {
6213 EVT VT = N->getValueType(0);
6214 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
6215 SDValue Mask = N->getMask();
6216 EVT MaskVT = Mask.getValueType();
6217 SDValue PassThru = GetWidenedVector(N->getPassThru());
6218 ISD::LoadExtType ExtType = N->getExtensionType();
6219 SDLoc dl(N);
6220
6221 EVT WideMaskVT =
6222 EVT::getVectorVT(*DAG.getContext(), MaskVT.getVectorElementType(),
6223 WidenVT.getVectorElementCount());
6224
6225 if (ExtType == ISD::NON_EXTLOAD &&
6226 TLI.isOperationLegalOrCustom(ISD::VP_LOAD, WidenVT) &&
6227 TLI.isTypeLegal(WideMaskVT) &&
6228 // If there is a passthru, we shouldn't use vp.load. However,
6229 // type legalizer will struggle on masked.load with
6230 // scalable vectors, so for scalable vectors, we still use vp.load
6231 // but manually merge the load result with the passthru using vp.select.
6232 (N->getPassThru()->isUndef() || VT.isScalableVector())) {
6233 Mask = DAG.getInsertSubvector(dl, DAG.getUNDEF(WideMaskVT), Mask, 0);
6234 SDValue EVL = DAG.getElementCount(dl, TLI.getVPExplicitVectorLengthTy(),
6235 VT.getVectorElementCount());
6236 SDValue NewLoad =
6237 DAG.getLoadVP(N->getAddressingMode(), ISD::NON_EXTLOAD, WidenVT, dl,
6238 N->getChain(), N->getBasePtr(), N->getOffset(), Mask, EVL,
6239 N->getMemoryVT(), N->getMemOperand());
6240 SDValue NewVal = NewLoad;
6241
6242 // Manually merge with vp.select
6243 if (!N->getPassThru()->isUndef()) {
6244 assert(WidenVT.isScalableVector());
6245 NewVal =
6246 DAG.getNode(ISD::VP_SELECT, dl, WidenVT, Mask, NewVal, PassThru, EVL);
6247 }
6248
6249 // Modified the chain - switch anything that used the old chain to use
6250 // the new one.
6251 ReplaceValueWith(SDValue(N, 1), NewLoad.getValue(1));
6252
6253 return NewVal;
6254 }
6255
6256 // The mask should be widened as well
6257 Mask = ModifyToType(Mask, WideMaskVT, true);
6258
6259 SDValue Res = DAG.getMaskedLoad(
6260 WidenVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
6261 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
6262 ExtType, N->isExpandingLoad());
6263 // Legalize the chain result - switch anything that used the old chain to
6264 // use the new one.
6265 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
6266 return Res;
6267 }
6268
WidenVecRes_MGATHER(MaskedGatherSDNode * N)6269 SDValue DAGTypeLegalizer::WidenVecRes_MGATHER(MaskedGatherSDNode *N) {
6270
6271 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6272 SDValue Mask = N->getMask();
6273 EVT MaskVT = Mask.getValueType();
6274 SDValue PassThru = GetWidenedVector(N->getPassThru());
6275 SDValue Scale = N->getScale();
6276 unsigned NumElts = WideVT.getVectorNumElements();
6277 SDLoc dl(N);
6278
6279 // The mask should be widened as well
6280 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(),
6281 MaskVT.getVectorElementType(),
6282 WideVT.getVectorNumElements());
6283 Mask = ModifyToType(Mask, WideMaskVT, true);
6284
6285 // Widen the Index operand
6286 SDValue Index = N->getIndex();
6287 EVT WideIndexVT = EVT::getVectorVT(*DAG.getContext(),
6288 Index.getValueType().getScalarType(),
6289 NumElts);
6290 Index = ModifyToType(Index, WideIndexVT);
6291 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
6292 Scale };
6293
6294 // Widen the MemoryType
6295 EVT WideMemVT = EVT::getVectorVT(*DAG.getContext(),
6296 N->getMemoryVT().getScalarType(), NumElts);
6297 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
6298 WideMemVT, dl, Ops, N->getMemOperand(),
6299 N->getIndexType(), N->getExtensionType());
6300
6301 // Legalize the chain result - switch anything that used the old chain to
6302 // use the new one.
6303 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
6304 return Res;
6305 }
6306
WidenVecRes_VP_GATHER(VPGatherSDNode * N)6307 SDValue DAGTypeLegalizer::WidenVecRes_VP_GATHER(VPGatherSDNode *N) {
6308 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6309 SDValue Mask = N->getMask();
6310 SDValue Scale = N->getScale();
6311 ElementCount WideEC = WideVT.getVectorElementCount();
6312 SDLoc dl(N);
6313
6314 SDValue Index = GetWidenedVector(N->getIndex());
6315 EVT WideMemVT = EVT::getVectorVT(*DAG.getContext(),
6316 N->getMemoryVT().getScalarType(), WideEC);
6317 Mask = GetWidenedMask(Mask, WideEC);
6318
6319 SDValue Ops[] = {N->getChain(), N->getBasePtr(), Index, Scale,
6320 Mask, N->getVectorLength()};
6321 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
6322 dl, Ops, N->getMemOperand(), N->getIndexType());
6323
6324 // Legalize the chain result - switch anything that used the old chain to
6325 // use the new one.
6326 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
6327 return Res;
6328 }
6329
WidenVecRes_ScalarOp(SDNode * N)6330 SDValue DAGTypeLegalizer::WidenVecRes_ScalarOp(SDNode *N) {
6331 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6332 if (N->isVPOpcode())
6333 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, N->getOperand(0),
6334 N->getOperand(1), N->getOperand(2));
6335 return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, N->getOperand(0));
6336 }
6337
6338 // Return true is this is a SETCC node or a strict version of it.
isSETCCOp(unsigned Opcode)6339 static inline bool isSETCCOp(unsigned Opcode) {
6340 switch (Opcode) {
6341 case ISD::SETCC:
6342 case ISD::STRICT_FSETCC:
6343 case ISD::STRICT_FSETCCS:
6344 return true;
6345 }
6346 return false;
6347 }
6348
6349 // Return true if this is a node that could have two SETCCs as operands.
isLogicalMaskOp(unsigned Opcode)6350 static inline bool isLogicalMaskOp(unsigned Opcode) {
6351 switch (Opcode) {
6352 case ISD::AND:
6353 case ISD::OR:
6354 case ISD::XOR:
6355 return true;
6356 }
6357 return false;
6358 }
6359
6360 // If N is a SETCC or a strict variant of it, return the type
6361 // of the compare operands.
getSETCCOperandType(SDValue N)6362 static inline EVT getSETCCOperandType(SDValue N) {
6363 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
6364 return N->getOperand(OpNo).getValueType();
6365 }
6366
6367 // This is used just for the assert in convertMask(). Check that this either
6368 // a SETCC or a previously handled SETCC by convertMask().
6369 #ifndef NDEBUG
isSETCCorConvertedSETCC(SDValue N)6370 static inline bool isSETCCorConvertedSETCC(SDValue N) {
6371 if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR)
6372 N = N.getOperand(0);
6373 else if (N.getOpcode() == ISD::CONCAT_VECTORS) {
6374 for (unsigned i = 1; i < N->getNumOperands(); ++i)
6375 if (!N->getOperand(i)->isUndef())
6376 return false;
6377 N = N.getOperand(0);
6378 }
6379
6380 if (N.getOpcode() == ISD::TRUNCATE)
6381 N = N.getOperand(0);
6382 else if (N.getOpcode() == ISD::SIGN_EXTEND)
6383 N = N.getOperand(0);
6384
6385 if (isLogicalMaskOp(N.getOpcode()))
6386 return isSETCCorConvertedSETCC(N.getOperand(0)) &&
6387 isSETCCorConvertedSETCC(N.getOperand(1));
6388
6389 return (isSETCCOp(N.getOpcode()) ||
6390 ISD::isBuildVectorOfConstantSDNodes(N.getNode()));
6391 }
6392 #endif
6393
6394 // Return a mask of vector type MaskVT to replace InMask. Also adjust MaskVT
6395 // to ToMaskVT if needed with vector extension or truncation.
convertMask(SDValue InMask,EVT MaskVT,EVT ToMaskVT)6396 SDValue DAGTypeLegalizer::convertMask(SDValue InMask, EVT MaskVT,
6397 EVT ToMaskVT) {
6398 // Currently a SETCC or a AND/OR/XOR with two SETCCs are handled.
6399 // FIXME: This code seems to be too restrictive, we might consider
6400 // generalizing it or dropping it.
6401 assert(isSETCCorConvertedSETCC(InMask) && "Unexpected mask argument.");
6402
6403 // Make a new Mask node, with a legal result VT.
6404 SDValue Mask;
6405 SmallVector<SDValue, 4> Ops;
6406 for (unsigned i = 0, e = InMask->getNumOperands(); i < e; ++i)
6407 Ops.push_back(InMask->getOperand(i));
6408 if (InMask->isStrictFPOpcode()) {
6409 Mask = DAG.getNode(InMask->getOpcode(), SDLoc(InMask),
6410 { MaskVT, MVT::Other }, Ops);
6411 ReplaceValueWith(InMask.getValue(1), Mask.getValue(1));
6412 }
6413 else
6414 Mask = DAG.getNode(InMask->getOpcode(), SDLoc(InMask), MaskVT, Ops);
6415
6416 // If MaskVT has smaller or bigger elements than ToMaskVT, a vector sign
6417 // extend or truncate is needed.
6418 LLVMContext &Ctx = *DAG.getContext();
6419 unsigned MaskScalarBits = MaskVT.getScalarSizeInBits();
6420 unsigned ToMaskScalBits = ToMaskVT.getScalarSizeInBits();
6421 if (MaskScalarBits < ToMaskScalBits) {
6422 EVT ExtVT = EVT::getVectorVT(Ctx, ToMaskVT.getVectorElementType(),
6423 MaskVT.getVectorNumElements());
6424 Mask = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(Mask), ExtVT, Mask);
6425 } else if (MaskScalarBits > ToMaskScalBits) {
6426 EVT TruncVT = EVT::getVectorVT(Ctx, ToMaskVT.getVectorElementType(),
6427 MaskVT.getVectorNumElements());
6428 Mask = DAG.getNode(ISD::TRUNCATE, SDLoc(Mask), TruncVT, Mask);
6429 }
6430
6431 assert(Mask->getValueType(0).getScalarSizeInBits() ==
6432 ToMaskVT.getScalarSizeInBits() &&
6433 "Mask should have the right element size by now.");
6434
6435 // Adjust Mask to the right number of elements.
6436 unsigned CurrMaskNumEls = Mask->getValueType(0).getVectorNumElements();
6437 if (CurrMaskNumEls > ToMaskVT.getVectorNumElements()) {
6438 Mask = DAG.getExtractSubvector(SDLoc(Mask), ToMaskVT, Mask, 0);
6439 } else if (CurrMaskNumEls < ToMaskVT.getVectorNumElements()) {
6440 unsigned NumSubVecs = (ToMaskVT.getVectorNumElements() / CurrMaskNumEls);
6441 EVT SubVT = Mask->getValueType(0);
6442 SmallVector<SDValue, 16> SubOps(NumSubVecs, DAG.getUNDEF(SubVT));
6443 SubOps[0] = Mask;
6444 Mask = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Mask), ToMaskVT, SubOps);
6445 }
6446
6447 assert((Mask->getValueType(0) == ToMaskVT) &&
6448 "A mask of ToMaskVT should have been produced by now.");
6449
6450 return Mask;
6451 }
6452
6453 // This method tries to handle some special cases for the vselect mask
6454 // and if needed adjusting the mask vector type to match that of the VSELECT.
6455 // Without it, many cases end up with scalarization of the SETCC, with many
6456 // unnecessary instructions.
WidenVSELECTMask(SDNode * N)6457 SDValue DAGTypeLegalizer::WidenVSELECTMask(SDNode *N) {
6458 LLVMContext &Ctx = *DAG.getContext();
6459 SDValue Cond = N->getOperand(0);
6460
6461 if (N->getOpcode() != ISD::VSELECT)
6462 return SDValue();
6463
6464 if (!isSETCCOp(Cond->getOpcode()) && !isLogicalMaskOp(Cond->getOpcode()))
6465 return SDValue();
6466
6467 // If this is a splitted VSELECT that was previously already handled, do
6468 // nothing.
6469 EVT CondVT = Cond->getValueType(0);
6470 if (CondVT.getScalarSizeInBits() != 1)
6471 return SDValue();
6472
6473 EVT VSelVT = N->getValueType(0);
6474
6475 // This method can't handle scalable vector types.
6476 // FIXME: This support could be added in the future.
6477 if (VSelVT.isScalableVector())
6478 return SDValue();
6479
6480 // Only handle vector types which are a power of 2.
6481 if (!isPowerOf2_64(VSelVT.getSizeInBits()))
6482 return SDValue();
6483
6484 // Don't touch if this will be scalarized.
6485 EVT FinalVT = VSelVT;
6486 while (getTypeAction(FinalVT) == TargetLowering::TypeSplitVector)
6487 FinalVT = FinalVT.getHalfNumVectorElementsVT(Ctx);
6488
6489 if (FinalVT.getVectorNumElements() == 1)
6490 return SDValue();
6491
6492 // If there is support for an i1 vector mask, don't touch.
6493 if (isSETCCOp(Cond.getOpcode())) {
6494 EVT SetCCOpVT = getSETCCOperandType(Cond);
6495 while (TLI.getTypeAction(Ctx, SetCCOpVT) != TargetLowering::TypeLegal)
6496 SetCCOpVT = TLI.getTypeToTransformTo(Ctx, SetCCOpVT);
6497 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
6498 if (SetCCResVT.getScalarSizeInBits() == 1)
6499 return SDValue();
6500 } else if (CondVT.getScalarType() == MVT::i1) {
6501 // If there is support for an i1 vector mask (or only scalar i1 conditions),
6502 // don't touch.
6503 while (TLI.getTypeAction(Ctx, CondVT) != TargetLowering::TypeLegal)
6504 CondVT = TLI.getTypeToTransformTo(Ctx, CondVT);
6505
6506 if (CondVT.getScalarType() == MVT::i1)
6507 return SDValue();
6508 }
6509
6510 // Widen the vselect result type if needed.
6511 if (getTypeAction(VSelVT) == TargetLowering::TypeWidenVector)
6512 VSelVT = TLI.getTypeToTransformTo(Ctx, VSelVT);
6513
6514 // The mask of the VSELECT should have integer elements.
6515 EVT ToMaskVT = VSelVT;
6516 if (!ToMaskVT.getScalarType().isInteger())
6517 ToMaskVT = ToMaskVT.changeVectorElementTypeToInteger();
6518
6519 SDValue Mask;
6520 if (isSETCCOp(Cond->getOpcode())) {
6521 EVT MaskVT = getSetCCResultType(getSETCCOperandType(Cond));
6522 Mask = convertMask(Cond, MaskVT, ToMaskVT);
6523 } else if (isLogicalMaskOp(Cond->getOpcode()) &&
6524 isSETCCOp(Cond->getOperand(0).getOpcode()) &&
6525 isSETCCOp(Cond->getOperand(1).getOpcode())) {
6526 // Cond is (AND/OR/XOR (SETCC, SETCC))
6527 SDValue SETCC0 = Cond->getOperand(0);
6528 SDValue SETCC1 = Cond->getOperand(1);
6529 EVT VT0 = getSetCCResultType(getSETCCOperandType(SETCC0));
6530 EVT VT1 = getSetCCResultType(getSETCCOperandType(SETCC1));
6531 unsigned ScalarBits0 = VT0.getScalarSizeInBits();
6532 unsigned ScalarBits1 = VT1.getScalarSizeInBits();
6533 unsigned ScalarBits_ToMask = ToMaskVT.getScalarSizeInBits();
6534 EVT MaskVT;
6535 // If the two SETCCs have different VTs, either extend/truncate one of
6536 // them to the other "towards" ToMaskVT, or truncate one and extend the
6537 // other to ToMaskVT.
6538 if (ScalarBits0 != ScalarBits1) {
6539 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
6540 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
6541 if (ScalarBits_ToMask >= WideVT.getScalarSizeInBits())
6542 MaskVT = WideVT;
6543 else if (ScalarBits_ToMask <= NarrowVT.getScalarSizeInBits())
6544 MaskVT = NarrowVT;
6545 else
6546 MaskVT = ToMaskVT;
6547 } else
6548 // If the two SETCCs have the same VT, don't change it.
6549 MaskVT = VT0;
6550
6551 // Make new SETCCs and logical nodes.
6552 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
6553 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
6554 Cond = DAG.getNode(Cond->getOpcode(), SDLoc(Cond), MaskVT, SETCC0, SETCC1);
6555
6556 // Convert the logical op for VSELECT if needed.
6557 Mask = convertMask(Cond, MaskVT, ToMaskVT);
6558 } else
6559 return SDValue();
6560
6561 return Mask;
6562 }
6563
WidenVecRes_Select(SDNode * N)6564 SDValue DAGTypeLegalizer::WidenVecRes_Select(SDNode *N) {
6565 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6566 ElementCount WidenEC = WidenVT.getVectorElementCount();
6567
6568 SDValue Cond1 = N->getOperand(0);
6569 EVT CondVT = Cond1.getValueType();
6570 unsigned Opcode = N->getOpcode();
6571 if (CondVT.isVector()) {
6572 if (SDValue WideCond = WidenVSELECTMask(N)) {
6573 SDValue InOp1 = GetWidenedVector(N->getOperand(1));
6574 SDValue InOp2 = GetWidenedVector(N->getOperand(2));
6575 assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
6576 return DAG.getNode(Opcode, SDLoc(N), WidenVT, WideCond, InOp1, InOp2);
6577 }
6578
6579 EVT CondEltVT = CondVT.getVectorElementType();
6580 EVT CondWidenVT = EVT::getVectorVT(*DAG.getContext(), CondEltVT, WidenEC);
6581 if (getTypeAction(CondVT) == TargetLowering::TypeWidenVector)
6582 Cond1 = GetWidenedVector(Cond1);
6583
6584 // If we have to split the condition there is no point in widening the
6585 // select. This would result in an cycle of widening the select ->
6586 // widening the condition operand -> splitting the condition operand ->
6587 // splitting the select -> widening the select. Instead split this select
6588 // further and widen the resulting type.
6589 if (getTypeAction(CondVT) == TargetLowering::TypeSplitVector) {
6590 SDValue SplitSelect = SplitVecOp_VSELECT(N, 0);
6591 SDValue Res = ModifyToType(SplitSelect, WidenVT);
6592 return Res;
6593 }
6594
6595 if (Cond1.getValueType() != CondWidenVT)
6596 Cond1 = ModifyToType(Cond1, CondWidenVT);
6597 }
6598
6599 SDValue InOp1 = GetWidenedVector(N->getOperand(1));
6600 SDValue InOp2 = GetWidenedVector(N->getOperand(2));
6601 assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
6602 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
6603 return DAG.getNode(Opcode, SDLoc(N), WidenVT, Cond1, InOp1, InOp2,
6604 N->getOperand(3));
6605 return DAG.getNode(Opcode, SDLoc(N), WidenVT, Cond1, InOp1, InOp2);
6606 }
6607
WidenVecRes_SELECT_CC(SDNode * N)6608 SDValue DAGTypeLegalizer::WidenVecRes_SELECT_CC(SDNode *N) {
6609 SDValue InOp1 = GetWidenedVector(N->getOperand(2));
6610 SDValue InOp2 = GetWidenedVector(N->getOperand(3));
6611 return DAG.getNode(ISD::SELECT_CC, SDLoc(N),
6612 InOp1.getValueType(), N->getOperand(0),
6613 N->getOperand(1), InOp1, InOp2, N->getOperand(4));
6614 }
6615
WidenVecRes_UNDEF(SDNode * N)6616 SDValue DAGTypeLegalizer::WidenVecRes_UNDEF(SDNode *N) {
6617 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6618 return DAG.getUNDEF(WidenVT);
6619 }
6620
WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode * N)6621 SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N) {
6622 EVT VT = N->getValueType(0);
6623 SDLoc dl(N);
6624
6625 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
6626 unsigned NumElts = VT.getVectorNumElements();
6627 unsigned WidenNumElts = WidenVT.getVectorNumElements();
6628
6629 SDValue InOp1 = GetWidenedVector(N->getOperand(0));
6630 SDValue InOp2 = GetWidenedVector(N->getOperand(1));
6631
6632 // Adjust mask based on new input vector length.
6633 SmallVector<int, 16> NewMask(WidenNumElts, -1);
6634 for (unsigned i = 0; i != NumElts; ++i) {
6635 int Idx = N->getMaskElt(i);
6636 if (Idx < (int)NumElts)
6637 NewMask[i] = Idx;
6638 else
6639 NewMask[i] = Idx - NumElts + WidenNumElts;
6640 }
6641 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6642 }
6643
WidenVecRes_VECTOR_REVERSE(SDNode * N)6644 SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_REVERSE(SDNode *N) {
6645 EVT VT = N->getValueType(0);
6646 EVT EltVT = VT.getVectorElementType();
6647 SDLoc dl(N);
6648
6649 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
6650 SDValue OpValue = GetWidenedVector(N->getOperand(0));
6651 assert(WidenVT == OpValue.getValueType() && "Unexpected widened vector type");
6652
6653 SDValue ReverseVal = DAG.getNode(ISD::VECTOR_REVERSE, dl, WidenVT, OpValue);
6654 unsigned WidenNumElts = WidenVT.getVectorMinNumElements();
6655 unsigned VTNumElts = VT.getVectorMinNumElements();
6656 unsigned IdxVal = WidenNumElts - VTNumElts;
6657
6658 if (VT.isScalableVector()) {
6659 // Try to split the 'Widen ReverseVal' into smaller extracts and concat the
6660 // results together, e.g.(nxv6i64 -> nxv8i64)
6661 // nxv8i64 vector_reverse
6662 // <->
6663 // nxv8i64 concat(
6664 // nxv2i64 extract_subvector(nxv8i64, 2)
6665 // nxv2i64 extract_subvector(nxv8i64, 4)
6666 // nxv2i64 extract_subvector(nxv8i64, 6)
6667 // nxv2i64 undef)
6668
6669 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6670 EVT PartVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
6671 ElementCount::getScalable(GCD));
6672 assert((IdxVal % GCD) == 0 && "Expected Idx to be a multiple of the broken "
6673 "down type's element count");
6674 SmallVector<SDValue> Parts;
6675 unsigned i = 0;
6676 for (; i < VTNumElts / GCD; ++i)
6677 Parts.push_back(
6678 DAG.getExtractSubvector(dl, PartVT, ReverseVal, IdxVal + i * GCD));
6679 for (; i < WidenNumElts / GCD; ++i)
6680 Parts.push_back(DAG.getUNDEF(PartVT));
6681
6682 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Parts);
6683 }
6684
6685 // Use VECTOR_SHUFFLE to combine new vector from 'ReverseVal' for
6686 // fixed-vectors.
6687 SmallVector<int, 16> Mask(WidenNumElts, -1);
6688 std::iota(Mask.begin(), Mask.begin() + VTNumElts, IdxVal);
6689
6690 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6691 Mask);
6692 }
6693
WidenVecRes_GET_ACTIVE_LANE_MASK(SDNode * N)6694 SDValue DAGTypeLegalizer::WidenVecRes_GET_ACTIVE_LANE_MASK(SDNode *N) {
6695 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6696 return DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, SDLoc(N), NVT, N->ops());
6697 }
6698
WidenVecRes_SETCC(SDNode * N)6699 SDValue DAGTypeLegalizer::WidenVecRes_SETCC(SDNode *N) {
6700 assert(N->getValueType(0).isVector() &&
6701 N->getOperand(0).getValueType().isVector() &&
6702 "Operands must be vectors");
6703 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
6704 ElementCount WidenEC = WidenVT.getVectorElementCount();
6705
6706 SDValue InOp1 = N->getOperand(0);
6707 EVT InVT = InOp1.getValueType();
6708 assert(InVT.isVector() && "can not widen non-vector type");
6709 EVT WidenInVT =
6710 EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), WidenEC);
6711
6712 // The input and output types often differ here, and it could be that while
6713 // we'd prefer to widen the result type, the input operands have been split.
6714 // In this case, we also need to split the result of this node as well.
6715 if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) {
6716 SDValue SplitVSetCC = SplitVecOp_VSETCC(N);
6717 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6718 return Res;
6719 }
6720
6721 // If the inputs also widen, handle them directly. Otherwise widen by hand.
6722 SDValue InOp2 = N->getOperand(1);
6723 if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) {
6724 InOp1 = GetWidenedVector(InOp1);
6725 InOp2 = GetWidenedVector(InOp2);
6726 } else {
6727 SDValue Poison = DAG.getPOISON(WidenInVT);
6728 SDValue ZeroIdx = DAG.getVectorIdxConstant(0, SDLoc(N));
6729 InOp1 = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), WidenInVT, Poison,
6730 InOp1, ZeroIdx);
6731 InOp2 = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), WidenInVT, Poison,
6732 InOp2, ZeroIdx);
6733 }
6734
6735 // Assume that the input and output will be widen appropriately. If not,
6736 // we will have to unroll it at some point.
6737 assert(InOp1.getValueType() == WidenInVT &&
6738 InOp2.getValueType() == WidenInVT &&
6739 "Input not widened to expected type!");
6740 (void)WidenInVT;
6741 if (N->getOpcode() == ISD::VP_SETCC) {
6742 SDValue Mask =
6743 GetWidenedMask(N->getOperand(3), WidenVT.getVectorElementCount());
6744 return DAG.getNode(ISD::VP_SETCC, SDLoc(N), WidenVT, InOp1, InOp2,
6745 N->getOperand(2), Mask, N->getOperand(4));
6746 }
6747 return DAG.getNode(ISD::SETCC, SDLoc(N), WidenVT, InOp1, InOp2,
6748 N->getOperand(2));
6749 }
6750
WidenVecRes_STRICT_FSETCC(SDNode * N)6751 SDValue DAGTypeLegalizer::WidenVecRes_STRICT_FSETCC(SDNode *N) {
6752 assert(N->getValueType(0).isVector() &&
6753 N->getOperand(1).getValueType().isVector() &&
6754 "Operands must be vectors");
6755 EVT VT = N->getValueType(0);
6756 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
6757 unsigned WidenNumElts = WidenVT.getVectorNumElements();
6758 unsigned NumElts = VT.getVectorNumElements();
6759 EVT EltVT = VT.getVectorElementType();
6760
6761 SDLoc dl(N);
6762 SDValue Chain = N->getOperand(0);
6763 SDValue LHS = N->getOperand(1);
6764 SDValue RHS = N->getOperand(2);
6765 SDValue CC = N->getOperand(3);
6766 EVT TmpEltVT = LHS.getValueType().getVectorElementType();
6767
6768 // Fully unroll and reassemble.
6769 SmallVector<SDValue, 8> Scalars(WidenNumElts, DAG.getUNDEF(EltVT));
6770 SmallVector<SDValue, 8> Chains(NumElts);
6771 for (unsigned i = 0; i != NumElts; ++i) {
6772 SDValue LHSElem = DAG.getExtractVectorElt(dl, TmpEltVT, LHS, i);
6773 SDValue RHSElem = DAG.getExtractVectorElt(dl, TmpEltVT, RHS, i);
6774
6775 Scalars[i] = DAG.getNode(N->getOpcode(), dl, {MVT::i1, MVT::Other},
6776 {Chain, LHSElem, RHSElem, CC});
6777 Chains[i] = Scalars[i].getValue(1);
6778 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6779 DAG.getBoolConstant(true, dl, EltVT, VT),
6780 DAG.getBoolConstant(false, dl, EltVT, VT));
6781 }
6782
6783 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
6784 ReplaceValueWith(SDValue(N, 1), NewChain);
6785
6786 return DAG.getBuildVector(WidenVT, dl, Scalars);
6787 }
6788
6789 //===----------------------------------------------------------------------===//
6790 // Widen Vector Operand
6791 //===----------------------------------------------------------------------===//
WidenVectorOperand(SDNode * N,unsigned OpNo)6792 bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
6793 LLVM_DEBUG(dbgs() << "Widen node operand " << OpNo << ": "; N->dump(&DAG));
6794 SDValue Res = SDValue();
6795
6796 // See if the target wants to custom widen this node.
6797 if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
6798 return false;
6799
6800 switch (N->getOpcode()) {
6801 default:
6802 #ifndef NDEBUG
6803 dbgs() << "WidenVectorOperand op #" << OpNo << ": ";
6804 N->dump(&DAG);
6805 dbgs() << "\n";
6806 #endif
6807 report_fatal_error("Do not know how to widen this operator's operand!");
6808
6809 case ISD::BITCAST: Res = WidenVecOp_BITCAST(N); break;
6810 case ISD::FAKE_USE:
6811 Res = WidenVecOp_FAKE_USE(N);
6812 break;
6813 case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break;
6814 case ISD::INSERT_SUBVECTOR: Res = WidenVecOp_INSERT_SUBVECTOR(N); break;
6815 case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break;
6816 case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break;
6817 case ISD::STORE: Res = WidenVecOp_STORE(N); break;
6818 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(N, OpNo); break;
6819 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6820 Res = WidenVecOp_VP_STRIDED_STORE(N, OpNo);
6821 break;
6822 case ISD::ANY_EXTEND_VECTOR_INREG:
6823 case ISD::SIGN_EXTEND_VECTOR_INREG:
6824 case ISD::ZERO_EXTEND_VECTOR_INREG:
6825 Res = WidenVecOp_EXTEND_VECTOR_INREG(N);
6826 break;
6827 case ISD::MSTORE: Res = WidenVecOp_MSTORE(N, OpNo); break;
6828 case ISD::MGATHER: Res = WidenVecOp_MGATHER(N, OpNo); break;
6829 case ISD::MSCATTER: Res = WidenVecOp_MSCATTER(N, OpNo); break;
6830 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(N, OpNo); break;
6831 case ISD::SETCC: Res = WidenVecOp_SETCC(N); break;
6832 case ISD::STRICT_FSETCC:
6833 case ISD::STRICT_FSETCCS: Res = WidenVecOp_STRICT_FSETCC(N); break;
6834 case ISD::VSELECT: Res = WidenVecOp_VSELECT(N); break;
6835 case ISD::FLDEXP:
6836 case ISD::FCOPYSIGN:
6837 case ISD::LROUND:
6838 case ISD::LLROUND:
6839 case ISD::LRINT:
6840 case ISD::LLRINT:
6841 Res = WidenVecOp_UnrollVectorOp(N);
6842 break;
6843 case ISD::IS_FPCLASS: Res = WidenVecOp_IS_FPCLASS(N); break;
6844
6845 case ISD::ANY_EXTEND:
6846 case ISD::SIGN_EXTEND:
6847 case ISD::ZERO_EXTEND:
6848 Res = WidenVecOp_EXTEND(N);
6849 break;
6850
6851 case ISD::SCMP:
6852 case ISD::UCMP:
6853 Res = WidenVecOp_CMP(N);
6854 break;
6855
6856 case ISD::FP_EXTEND:
6857 case ISD::STRICT_FP_EXTEND:
6858 case ISD::FP_ROUND:
6859 case ISD::STRICT_FP_ROUND:
6860 case ISD::FP_TO_SINT:
6861 case ISD::STRICT_FP_TO_SINT:
6862 case ISD::FP_TO_UINT:
6863 case ISD::STRICT_FP_TO_UINT:
6864 case ISD::SINT_TO_FP:
6865 case ISD::STRICT_SINT_TO_FP:
6866 case ISD::UINT_TO_FP:
6867 case ISD::STRICT_UINT_TO_FP:
6868 case ISD::TRUNCATE:
6869 Res = WidenVecOp_Convert(N);
6870 break;
6871
6872 case ISD::FP_TO_SINT_SAT:
6873 case ISD::FP_TO_UINT_SAT:
6874 Res = WidenVecOp_FP_TO_XINT_SAT(N);
6875 break;
6876
6877 case ISD::EXPERIMENTAL_VP_SPLAT:
6878 Res = WidenVecOp_VP_SPLAT(N, OpNo);
6879 break;
6880
6881 case ISD::VECREDUCE_FADD:
6882 case ISD::VECREDUCE_FMUL:
6883 case ISD::VECREDUCE_ADD:
6884 case ISD::VECREDUCE_MUL:
6885 case ISD::VECREDUCE_AND:
6886 case ISD::VECREDUCE_OR:
6887 case ISD::VECREDUCE_XOR:
6888 case ISD::VECREDUCE_SMAX:
6889 case ISD::VECREDUCE_SMIN:
6890 case ISD::VECREDUCE_UMAX:
6891 case ISD::VECREDUCE_UMIN:
6892 case ISD::VECREDUCE_FMAX:
6893 case ISD::VECREDUCE_FMIN:
6894 case ISD::VECREDUCE_FMAXIMUM:
6895 case ISD::VECREDUCE_FMINIMUM:
6896 Res = WidenVecOp_VECREDUCE(N);
6897 break;
6898 case ISD::VECREDUCE_SEQ_FADD:
6899 case ISD::VECREDUCE_SEQ_FMUL:
6900 Res = WidenVecOp_VECREDUCE_SEQ(N);
6901 break;
6902 case ISD::VP_REDUCE_FADD:
6903 case ISD::VP_REDUCE_SEQ_FADD:
6904 case ISD::VP_REDUCE_FMUL:
6905 case ISD::VP_REDUCE_SEQ_FMUL:
6906 case ISD::VP_REDUCE_ADD:
6907 case ISD::VP_REDUCE_MUL:
6908 case ISD::VP_REDUCE_AND:
6909 case ISD::VP_REDUCE_OR:
6910 case ISD::VP_REDUCE_XOR:
6911 case ISD::VP_REDUCE_SMAX:
6912 case ISD::VP_REDUCE_SMIN:
6913 case ISD::VP_REDUCE_UMAX:
6914 case ISD::VP_REDUCE_UMIN:
6915 case ISD::VP_REDUCE_FMAX:
6916 case ISD::VP_REDUCE_FMIN:
6917 case ISD::VP_REDUCE_FMAXIMUM:
6918 case ISD::VP_REDUCE_FMINIMUM:
6919 Res = WidenVecOp_VP_REDUCE(N);
6920 break;
6921 case ISD::VP_CTTZ_ELTS:
6922 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6923 Res = WidenVecOp_VP_CttzElements(N);
6924 break;
6925 }
6926
6927 // If Res is null, the sub-method took care of registering the result.
6928 if (!Res.getNode()) return false;
6929
6930 // If the result is N, the sub-method updated N in place. Tell the legalizer
6931 // core about this.
6932 if (Res.getNode() == N)
6933 return true;
6934
6935
6936 if (N->isStrictFPOpcode())
6937 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 2 &&
6938 "Invalid operand expansion");
6939 else
6940 assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
6941 "Invalid operand expansion");
6942
6943 ReplaceValueWith(SDValue(N, 0), Res);
6944 return false;
6945 }
6946
WidenVecOp_EXTEND(SDNode * N)6947 SDValue DAGTypeLegalizer::WidenVecOp_EXTEND(SDNode *N) {
6948 SDLoc DL(N);
6949 EVT VT = N->getValueType(0);
6950
6951 SDValue InOp = N->getOperand(0);
6952 assert(getTypeAction(InOp.getValueType()) ==
6953 TargetLowering::TypeWidenVector &&
6954 "Unexpected type action");
6955 InOp = GetWidenedVector(InOp);
6956 assert(VT.getVectorNumElements() <
6957 InOp.getValueType().getVectorNumElements() &&
6958 "Input wasn't widened!");
6959
6960 // We may need to further widen the operand until it has the same total
6961 // vector size as the result.
6962 EVT InVT = InOp.getValueType();
6963 if (InVT.getSizeInBits() != VT.getSizeInBits()) {
6964 EVT InEltVT = InVT.getVectorElementType();
6965 for (EVT FixedVT : MVT::vector_valuetypes()) {
6966 EVT FixedEltVT = FixedVT.getVectorElementType();
6967 if (TLI.isTypeLegal(FixedVT) &&
6968 FixedVT.getSizeInBits() == VT.getSizeInBits() &&
6969 FixedEltVT == InEltVT) {
6970 assert(FixedVT.getVectorNumElements() >= VT.getVectorNumElements() &&
6971 "Not enough elements in the fixed type for the operand!");
6972 assert(FixedVT.getVectorNumElements() != InVT.getVectorNumElements() &&
6973 "We can't have the same type as we started with!");
6974 if (FixedVT.getVectorNumElements() > InVT.getVectorNumElements())
6975 InOp = DAG.getInsertSubvector(DL, DAG.getUNDEF(FixedVT), InOp, 0);
6976 else
6977 InOp = DAG.getExtractSubvector(DL, FixedVT, InOp, 0);
6978 break;
6979 }
6980 }
6981 InVT = InOp.getValueType();
6982 if (InVT.getSizeInBits() != VT.getSizeInBits())
6983 // We couldn't find a legal vector type that was a widening of the input
6984 // and could be extended in-register to the result type, so we have to
6985 // scalarize.
6986 return WidenVecOp_Convert(N);
6987 }
6988
6989 // Use special DAG nodes to represent the operation of extending the
6990 // low lanes.
6991 switch (N->getOpcode()) {
6992 default:
6993 llvm_unreachable("Extend legalization on extend operation!");
6994 case ISD::ANY_EXTEND:
6995 return DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, InOp);
6996 case ISD::SIGN_EXTEND:
6997 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, InOp);
6998 case ISD::ZERO_EXTEND:
6999 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, InOp);
7000 }
7001 }
7002
WidenVecOp_CMP(SDNode * N)7003 SDValue DAGTypeLegalizer::WidenVecOp_CMP(SDNode *N) {
7004 SDLoc dl(N);
7005
7006 EVT OpVT = N->getOperand(0).getValueType();
7007 EVT ResVT = N->getValueType(0);
7008 SDValue LHS = GetWidenedVector(N->getOperand(0));
7009 SDValue RHS = GetWidenedVector(N->getOperand(1));
7010
7011 // 1. EXTRACT_SUBVECTOR
7012 // 2. SIGN_EXTEND/ZERO_EXTEND
7013 // 3. CMP
7014 LHS = DAG.getExtractSubvector(dl, OpVT, LHS, 0);
7015 RHS = DAG.getExtractSubvector(dl, OpVT, RHS, 0);
7016
7017 // At this point the result type is guaranteed to be valid, so we can use it
7018 // as the operand type by extending it appropriately
7019 ISD::NodeType ExtendOpcode =
7020 N->getOpcode() == ISD::SCMP ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
7021 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
7022 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
7023
7024 return DAG.getNode(N->getOpcode(), dl, ResVT, LHS, RHS);
7025 }
7026
WidenVecOp_UnrollVectorOp(SDNode * N)7027 SDValue DAGTypeLegalizer::WidenVecOp_UnrollVectorOp(SDNode *N) {
7028 // The result (and first input) is legal, but the second input is illegal.
7029 // We can't do much to fix that, so just unroll and let the extracts off of
7030 // the second input be widened as needed later.
7031 return DAG.UnrollVectorOp(N);
7032 }
7033
WidenVecOp_IS_FPCLASS(SDNode * N)7034 SDValue DAGTypeLegalizer::WidenVecOp_IS_FPCLASS(SDNode *N) {
7035 SDLoc DL(N);
7036 EVT ResultVT = N->getValueType(0);
7037 SDValue Test = N->getOperand(1);
7038 SDValue WideArg = GetWidenedVector(N->getOperand(0));
7039
7040 // Process this node similarly to SETCC.
7041 EVT WideResultVT = getSetCCResultType(WideArg.getValueType());
7042 if (ResultVT.getScalarType() == MVT::i1)
7043 WideResultVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7044 WideResultVT.getVectorNumElements());
7045
7046 SDValue WideNode = DAG.getNode(ISD::IS_FPCLASS, DL, WideResultVT,
7047 {WideArg, Test}, N->getFlags());
7048
7049 // Extract the needed results from the result vector.
7050 EVT ResVT =
7051 EVT::getVectorVT(*DAG.getContext(), WideResultVT.getVectorElementType(),
7052 ResultVT.getVectorNumElements());
7053 SDValue CC = DAG.getExtractSubvector(DL, ResVT, WideNode, 0);
7054
7055 EVT OpVT = N->getOperand(0).getValueType();
7056 ISD::NodeType ExtendCode =
7057 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
7058 return DAG.getNode(ExtendCode, DL, ResultVT, CC);
7059 }
7060
WidenVecOp_Convert(SDNode * N)7061 SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
7062 // Since the result is legal and the input is illegal.
7063 EVT VT = N->getValueType(0);
7064 EVT EltVT = VT.getVectorElementType();
7065 SDLoc dl(N);
7066 SDValue InOp = N->getOperand(N->isStrictFPOpcode() ? 1 : 0);
7067 assert(getTypeAction(InOp.getValueType()) ==
7068 TargetLowering::TypeWidenVector &&
7069 "Unexpected type action");
7070 InOp = GetWidenedVector(InOp);
7071 EVT InVT = InOp.getValueType();
7072 unsigned Opcode = N->getOpcode();
7073
7074 // See if a widened result type would be legal, if so widen the node.
7075 // FIXME: This isn't safe for StrictFP. Other optimization here is needed.
7076 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
7077 InVT.getVectorElementCount());
7078 if (TLI.isTypeLegal(WideVT) && !N->isStrictFPOpcode()) {
7079 SDValue Res;
7080 if (N->isStrictFPOpcode()) {
7081 if (Opcode == ISD::STRICT_FP_ROUND)
7082 Res = DAG.getNode(Opcode, dl, { WideVT, MVT::Other },
7083 { N->getOperand(0), InOp, N->getOperand(2) });
7084 else
7085 Res = DAG.getNode(Opcode, dl, { WideVT, MVT::Other },
7086 { N->getOperand(0), InOp });
7087 // Legalize the chain result - switch anything that used the old chain to
7088 // use the new one.
7089 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
7090 } else {
7091 if (Opcode == ISD::FP_ROUND)
7092 Res = DAG.getNode(Opcode, dl, WideVT, InOp, N->getOperand(1));
7093 else
7094 Res = DAG.getNode(Opcode, dl, WideVT, InOp);
7095 }
7096 return DAG.getExtractSubvector(dl, VT, Res, 0);
7097 }
7098
7099 EVT InEltVT = InVT.getVectorElementType();
7100
7101 // Unroll the convert into some scalar code and create a nasty build vector.
7102 unsigned NumElts = VT.getVectorNumElements();
7103 SmallVector<SDValue, 16> Ops(NumElts);
7104 if (N->isStrictFPOpcode()) {
7105 SmallVector<SDValue, 4> NewOps(N->ops());
7106 SmallVector<SDValue, 32> OpChains;
7107 for (unsigned i=0; i < NumElts; ++i) {
7108 NewOps[1] = DAG.getExtractVectorElt(dl, InEltVT, InOp, i);
7109 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
7110 OpChains.push_back(Ops[i].getValue(1));
7111 }
7112 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OpChains);
7113 ReplaceValueWith(SDValue(N, 1), NewChain);
7114 } else {
7115 for (unsigned i = 0; i < NumElts; ++i)
7116 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
7117 DAG.getExtractVectorElt(dl, InEltVT, InOp, i));
7118 }
7119
7120 return DAG.getBuildVector(VT, dl, Ops);
7121 }
7122
WidenVecOp_FP_TO_XINT_SAT(SDNode * N)7123 SDValue DAGTypeLegalizer::WidenVecOp_FP_TO_XINT_SAT(SDNode *N) {
7124 EVT DstVT = N->getValueType(0);
7125 SDValue Src = GetWidenedVector(N->getOperand(0));
7126 EVT SrcVT = Src.getValueType();
7127 ElementCount WideNumElts = SrcVT.getVectorElementCount();
7128 SDLoc dl(N);
7129
7130 // See if a widened result type would be legal, if so widen the node.
7131 EVT WideDstVT = EVT::getVectorVT(*DAG.getContext(),
7132 DstVT.getVectorElementType(), WideNumElts);
7133 if (TLI.isTypeLegal(WideDstVT)) {
7134 SDValue Res =
7135 DAG.getNode(N->getOpcode(), dl, WideDstVT, Src, N->getOperand(1));
7136 return DAG.getNode(
7137 ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res,
7138 DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
7139 }
7140
7141 // Give up and unroll.
7142 return DAG.UnrollVectorOp(N);
7143 }
7144
WidenVecOp_BITCAST(SDNode * N)7145 SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
7146 EVT VT = N->getValueType(0);
7147 SDValue InOp = GetWidenedVector(N->getOperand(0));
7148 EVT InWidenVT = InOp.getValueType();
7149 SDLoc dl(N);
7150
7151 // Check if we can convert between two legal vector types and extract.
7152 TypeSize InWidenSize = InWidenVT.getSizeInBits();
7153 TypeSize Size = VT.getSizeInBits();
7154 // x86mmx is not an acceptable vector element type, so don't try.
7155 if (!VT.isVector() && VT != MVT::x86mmx &&
7156 InWidenSize.hasKnownScalarFactor(Size)) {
7157 unsigned NewNumElts = InWidenSize.getKnownScalarFactor(Size);
7158 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
7159 if (TLI.isTypeLegal(NewVT)) {
7160 SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
7161 return DAG.getExtractVectorElt(dl, VT, BitOp, 0);
7162 }
7163 }
7164
7165 // Handle a case like bitcast v12i8 -> v3i32. Normally that would get widened
7166 // to v16i8 -> v4i32, but for a target where v3i32 is legal but v12i8 is not,
7167 // we end up here. Handling the case here with EXTRACT_SUBVECTOR avoids
7168 // having to copy via memory.
7169 if (VT.isVector()) {
7170 EVT EltVT = VT.getVectorElementType();
7171 unsigned EltSize = EltVT.getFixedSizeInBits();
7172 if (InWidenSize.isKnownMultipleOf(EltSize)) {
7173 ElementCount NewNumElts =
7174 (InWidenVT.getVectorElementCount() * InWidenVT.getScalarSizeInBits())
7175 .divideCoefficientBy(EltSize);
7176 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NewNumElts);
7177 if (TLI.isTypeLegal(NewVT)) {
7178 SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
7179 return DAG.getExtractSubvector(dl, VT, BitOp, 0);
7180 }
7181 }
7182 }
7183
7184 return CreateStackStoreLoad(InOp, VT);
7185 }
7186
7187 // Vectors with sizes that are not powers of 2 need to be widened to the
7188 // next largest power of 2. For example, we may get a vector of 3 32-bit
7189 // integers or of 6 16-bit integers, both of which have to be widened to a
7190 // 128-bit vector.
WidenVecOp_FAKE_USE(SDNode * N)7191 SDValue DAGTypeLegalizer::WidenVecOp_FAKE_USE(SDNode *N) {
7192 SDValue WidenedOp = GetWidenedVector(N->getOperand(1));
7193 return DAG.getNode(ISD::FAKE_USE, SDLoc(), MVT::Other, N->getOperand(0),
7194 WidenedOp);
7195 }
7196
WidenVecOp_CONCAT_VECTORS(SDNode * N)7197 SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
7198 EVT VT = N->getValueType(0);
7199 EVT EltVT = VT.getVectorElementType();
7200 EVT InVT = N->getOperand(0).getValueType();
7201 SDLoc dl(N);
7202
7203 // If the widen width for this operand is the same as the width of the concat
7204 // and all but the first operand is undef, just use the widened operand.
7205 unsigned NumOperands = N->getNumOperands();
7206 if (VT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) {
7207 unsigned i;
7208 for (i = 1; i < NumOperands; ++i)
7209 if (!N->getOperand(i).isUndef())
7210 break;
7211
7212 if (i == NumOperands)
7213 return GetWidenedVector(N->getOperand(0));
7214 }
7215
7216 // Otherwise, fall back to a nasty build vector.
7217 unsigned NumElts = VT.getVectorNumElements();
7218 SmallVector<SDValue, 16> Ops(NumElts);
7219
7220 unsigned NumInElts = InVT.getVectorNumElements();
7221
7222 unsigned Idx = 0;
7223 for (unsigned i=0; i < NumOperands; ++i) {
7224 SDValue InOp = N->getOperand(i);
7225 assert(getTypeAction(InOp.getValueType()) ==
7226 TargetLowering::TypeWidenVector &&
7227 "Unexpected type action");
7228 InOp = GetWidenedVector(InOp);
7229 for (unsigned j = 0; j < NumInElts; ++j)
7230 Ops[Idx++] = DAG.getExtractVectorElt(dl, EltVT, InOp, j);
7231 }
7232 return DAG.getBuildVector(VT, dl, Ops);
7233 }
7234
WidenVecOp_INSERT_SUBVECTOR(SDNode * N)7235 SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(SDNode *N) {
7236 EVT VT = N->getValueType(0);
7237 SDValue SubVec = N->getOperand(1);
7238 SDValue InVec = N->getOperand(0);
7239
7240 EVT OrigVT = SubVec.getValueType();
7241 if (getTypeAction(SubVec.getValueType()) == TargetLowering::TypeWidenVector)
7242 SubVec = GetWidenedVector(SubVec);
7243
7244 EVT SubVT = SubVec.getValueType();
7245
7246 // Whether or not all the elements of the widened SubVec will be inserted into
7247 // valid indices of VT.
7248 bool IndicesValid = false;
7249 // If we statically know that VT can fit SubVT, the indices are valid.
7250 if (VT.knownBitsGE(SubVT))
7251 IndicesValid = true;
7252 else if (VT.isScalableVector() && SubVT.isFixedLengthVector()) {
7253 // Otherwise, if we're inserting a fixed vector into a scalable vector and
7254 // we know the minimum vscale we can work out if it's valid ourselves.
7255 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
7256 Attribute::VScaleRange);
7257 if (Attr.isValid()) {
7258 unsigned VScaleMin = Attr.getVScaleRangeMin();
7259 if (VT.getSizeInBits().getKnownMinValue() * VScaleMin >=
7260 SubVT.getFixedSizeInBits())
7261 IndicesValid = true;
7262 }
7263 }
7264
7265 SDLoc DL(N);
7266
7267 // We need to make sure that the indices are still valid, otherwise we might
7268 // widen what was previously well-defined to something undefined.
7269 if (IndicesValid && InVec.isUndef() && N->getConstantOperandVal(2) == 0)
7270 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, InVec, SubVec,
7271 N->getOperand(2));
7272
7273 if (!IndicesValid || OrigVT.isScalableVector())
7274 report_fatal_error(
7275 "Don't know how to widen the operands for INSERT_SUBVECTOR");
7276
7277 // If the operands can't be widened legally, just replace the INSERT_SUBVECTOR
7278 // with a series of INSERT_VECTOR_ELT
7279 unsigned Idx = N->getConstantOperandVal(2);
7280
7281 SDValue InsertElt = InVec;
7282 for (unsigned I = 0, E = OrigVT.getVectorNumElements(); I != E; ++I) {
7283 SDValue ExtractElt =
7284 DAG.getExtractVectorElt(DL, VT.getVectorElementType(), SubVec, I);
7285 InsertElt = DAG.getInsertVectorElt(DL, InsertElt, ExtractElt, I + Idx);
7286 }
7287
7288 return InsertElt;
7289 }
7290
WidenVecOp_EXTRACT_SUBVECTOR(SDNode * N)7291 SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
7292 SDValue InOp = GetWidenedVector(N->getOperand(0));
7293 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N),
7294 N->getValueType(0), InOp, N->getOperand(1));
7295 }
7296
WidenVecOp_EXTRACT_VECTOR_ELT(SDNode * N)7297 SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
7298 SDValue InOp = GetWidenedVector(N->getOperand(0));
7299 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
7300 N->getValueType(0), InOp, N->getOperand(1));
7301 }
7302
WidenVecOp_EXTEND_VECTOR_INREG(SDNode * N)7303 SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(SDNode *N) {
7304 SDValue InOp = GetWidenedVector(N->getOperand(0));
7305 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), InOp);
7306 }
7307
WidenVecOp_STORE(SDNode * N)7308 SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
7309 // We have to widen the value, but we want only to store the original
7310 // vector type.
7311 StoreSDNode *ST = cast<StoreSDNode>(N);
7312
7313 if (!ST->getMemoryVT().getScalarType().isByteSized())
7314 return TLI.scalarizeVectorStore(ST, DAG);
7315
7316 if (ST->isTruncatingStore())
7317 return TLI.scalarizeVectorStore(ST, DAG);
7318
7319 // Generate a vector-predicated store if it is custom/legal on the target.
7320 // To avoid possible recursion, only do this if the widened mask type is
7321 // legal.
7322 // FIXME: Not all targets may support EVL in VP_STORE. These will have been
7323 // removed from the IR by the ExpandVectorPredication pass but we're
7324 // reintroducing them here.
7325 SDValue StVal = ST->getValue();
7326 EVT StVT = StVal.getValueType();
7327 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StVT);
7328 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7329 WideVT.getVectorElementCount());
7330
7331 if (TLI.isOperationLegalOrCustom(ISD::VP_STORE, WideVT) &&
7332 TLI.isTypeLegal(WideMaskVT)) {
7333 // Widen the value.
7334 SDLoc DL(N);
7335 StVal = GetWidenedVector(StVal);
7336 SDValue Mask = DAG.getAllOnesConstant(DL, WideMaskVT);
7337 SDValue EVL = DAG.getElementCount(DL, TLI.getVPExplicitVectorLengthTy(),
7338 StVT.getVectorElementCount());
7339 return DAG.getStoreVP(ST->getChain(), DL, StVal, ST->getBasePtr(),
7340 ST->getOffset(), Mask, EVL, StVT, ST->getMemOperand(),
7341 ST->getAddressingMode());
7342 }
7343
7344 SmallVector<SDValue, 16> StChain;
7345 if (GenWidenVectorStores(StChain, ST)) {
7346 if (StChain.size() == 1)
7347 return StChain[0];
7348
7349 return DAG.getNode(ISD::TokenFactor, SDLoc(ST), MVT::Other, StChain);
7350 }
7351
7352 report_fatal_error("Unable to widen vector store");
7353 }
7354
WidenVecOp_VP_SPLAT(SDNode * N,unsigned OpNo)7355 SDValue DAGTypeLegalizer::WidenVecOp_VP_SPLAT(SDNode *N, unsigned OpNo) {
7356 assert(OpNo == 1 && "Can widen only mask operand of vp_splat");
7357 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
7358 N->getOperand(0), GetWidenedVector(N->getOperand(1)),
7359 N->getOperand(2));
7360 }
7361
WidenVecOp_VP_STORE(SDNode * N,unsigned OpNo)7362 SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(SDNode *N, unsigned OpNo) {
7363 assert((OpNo == 1 || OpNo == 3) &&
7364 "Can widen only data or mask operand of vp_store");
7365 VPStoreSDNode *ST = cast<VPStoreSDNode>(N);
7366 SDValue Mask = ST->getMask();
7367 SDValue StVal = ST->getValue();
7368 SDLoc dl(N);
7369
7370 if (OpNo == 1) {
7371 // Widen the value.
7372 StVal = GetWidenedVector(StVal);
7373
7374 // We only handle the case where the mask needs widening to an
7375 // identically-sized type as the vector inputs.
7376 assert(getTypeAction(Mask.getValueType()) ==
7377 TargetLowering::TypeWidenVector &&
7378 "Unable to widen VP store");
7379 Mask = GetWidenedVector(Mask);
7380 } else {
7381 Mask = GetWidenedVector(Mask);
7382
7383 // We only handle the case where the stored value needs widening to an
7384 // identically-sized type as the mask.
7385 assert(getTypeAction(StVal.getValueType()) ==
7386 TargetLowering::TypeWidenVector &&
7387 "Unable to widen VP store");
7388 StVal = GetWidenedVector(StVal);
7389 }
7390
7391 assert(Mask.getValueType().getVectorElementCount() ==
7392 StVal.getValueType().getVectorElementCount() &&
7393 "Mask and data vectors should have the same number of elements");
7394 return DAG.getStoreVP(ST->getChain(), dl, StVal, ST->getBasePtr(),
7395 ST->getOffset(), Mask, ST->getVectorLength(),
7396 ST->getMemoryVT(), ST->getMemOperand(),
7397 ST->getAddressingMode(), ST->isTruncatingStore(),
7398 ST->isCompressingStore());
7399 }
7400
WidenVecOp_VP_STRIDED_STORE(SDNode * N,unsigned OpNo)7401 SDValue DAGTypeLegalizer::WidenVecOp_VP_STRIDED_STORE(SDNode *N,
7402 unsigned OpNo) {
7403 assert((OpNo == 1 || OpNo == 4) &&
7404 "Can widen only data or mask operand of vp_strided_store");
7405 VPStridedStoreSDNode *SST = cast<VPStridedStoreSDNode>(N);
7406 SDValue Mask = SST->getMask();
7407 SDValue StVal = SST->getValue();
7408 SDLoc DL(N);
7409
7410 if (OpNo == 1)
7411 assert(getTypeAction(Mask.getValueType()) ==
7412 TargetLowering::TypeWidenVector &&
7413 "Unable to widen VP strided store");
7414 else
7415 assert(getTypeAction(StVal.getValueType()) ==
7416 TargetLowering::TypeWidenVector &&
7417 "Unable to widen VP strided store");
7418
7419 StVal = GetWidenedVector(StVal);
7420 Mask = GetWidenedVector(Mask);
7421
7422 assert(StVal.getValueType().getVectorElementCount() ==
7423 Mask.getValueType().getVectorElementCount() &&
7424 "Data and mask vectors should have the same number of elements");
7425
7426 return DAG.getStridedStoreVP(
7427 SST->getChain(), DL, StVal, SST->getBasePtr(), SST->getOffset(),
7428 SST->getStride(), Mask, SST->getVectorLength(), SST->getMemoryVT(),
7429 SST->getMemOperand(), SST->getAddressingMode(), SST->isTruncatingStore(),
7430 SST->isCompressingStore());
7431 }
7432
WidenVecOp_MSTORE(SDNode * N,unsigned OpNo)7433 SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(SDNode *N, unsigned OpNo) {
7434 assert((OpNo == 1 || OpNo == 4) &&
7435 "Can widen only data or mask operand of mstore");
7436 MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
7437 SDValue Mask = MST->getMask();
7438 EVT MaskVT = Mask.getValueType();
7439 SDValue StVal = MST->getValue();
7440 EVT VT = StVal.getValueType();
7441 SDLoc dl(N);
7442
7443 EVT WideVT, WideMaskVT;
7444 if (OpNo == 1) {
7445 // Widen the value.
7446 StVal = GetWidenedVector(StVal);
7447
7448 WideVT = StVal.getValueType();
7449 WideMaskVT =
7450 EVT::getVectorVT(*DAG.getContext(), MaskVT.getVectorElementType(),
7451 WideVT.getVectorElementCount());
7452 } else {
7453 WideMaskVT = TLI.getTypeToTransformTo(*DAG.getContext(), MaskVT);
7454
7455 EVT ValueVT = StVal.getValueType();
7456 WideVT = EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
7457 WideMaskVT.getVectorElementCount());
7458 }
7459
7460 if (TLI.isOperationLegalOrCustom(ISD::VP_STORE, WideVT) &&
7461 TLI.isTypeLegal(WideMaskVT)) {
7462 Mask = DAG.getInsertSubvector(dl, DAG.getUNDEF(WideMaskVT), Mask, 0);
7463 SDValue EVL = DAG.getElementCount(dl, TLI.getVPExplicitVectorLengthTy(),
7464 VT.getVectorElementCount());
7465 return DAG.getStoreVP(MST->getChain(), dl, StVal, MST->getBasePtr(),
7466 MST->getOffset(), Mask, EVL, MST->getMemoryVT(),
7467 MST->getMemOperand(), MST->getAddressingMode());
7468 }
7469
7470 if (OpNo == 1) {
7471 // The mask should be widened as well.
7472 Mask = ModifyToType(Mask, WideMaskVT, true);
7473 } else {
7474 // Widen the mask.
7475 Mask = ModifyToType(Mask, WideMaskVT, true);
7476
7477 StVal = ModifyToType(StVal, WideVT);
7478 }
7479
7480 assert(Mask.getValueType().getVectorElementCount() ==
7481 StVal.getValueType().getVectorElementCount() &&
7482 "Mask and data vectors should have the same number of elements");
7483 return DAG.getMaskedStore(MST->getChain(), dl, StVal, MST->getBasePtr(),
7484 MST->getOffset(), Mask, MST->getMemoryVT(),
7485 MST->getMemOperand(), MST->getAddressingMode(),
7486 false, MST->isCompressingStore());
7487 }
7488
WidenVecOp_MGATHER(SDNode * N,unsigned OpNo)7489 SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(SDNode *N, unsigned OpNo) {
7490 assert(OpNo == 4 && "Can widen only the index of mgather");
7491 auto *MG = cast<MaskedGatherSDNode>(N);
7492 SDValue DataOp = MG->getPassThru();
7493 SDValue Mask = MG->getMask();
7494 SDValue Scale = MG->getScale();
7495
7496 // Just widen the index. It's allowed to have extra elements.
7497 SDValue Index = GetWidenedVector(MG->getIndex());
7498
7499 SDLoc dl(N);
7500 SDValue Ops[] = {MG->getChain(), DataOp, Mask, MG->getBasePtr(), Index,
7501 Scale};
7502 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
7503 MG->getMemOperand(), MG->getIndexType(),
7504 MG->getExtensionType());
7505 ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
7506 ReplaceValueWith(SDValue(N, 0), Res.getValue(0));
7507 return SDValue();
7508 }
7509
WidenVecOp_MSCATTER(SDNode * N,unsigned OpNo)7510 SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(SDNode *N, unsigned OpNo) {
7511 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N);
7512 SDValue DataOp = MSC->getValue();
7513 SDValue Mask = MSC->getMask();
7514 SDValue Index = MSC->getIndex();
7515 SDValue Scale = MSC->getScale();
7516 EVT WideMemVT = MSC->getMemoryVT();
7517
7518 if (OpNo == 1) {
7519 DataOp = GetWidenedVector(DataOp);
7520 unsigned NumElts = DataOp.getValueType().getVectorNumElements();
7521
7522 // Widen index.
7523 EVT IndexVT = Index.getValueType();
7524 EVT WideIndexVT = EVT::getVectorVT(*DAG.getContext(),
7525 IndexVT.getVectorElementType(), NumElts);
7526 Index = ModifyToType(Index, WideIndexVT);
7527
7528 // The mask should be widened as well.
7529 EVT MaskVT = Mask.getValueType();
7530 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(),
7531 MaskVT.getVectorElementType(), NumElts);
7532 Mask = ModifyToType(Mask, WideMaskVT, true);
7533
7534 // Widen the MemoryType
7535 WideMemVT = EVT::getVectorVT(*DAG.getContext(),
7536 MSC->getMemoryVT().getScalarType(), NumElts);
7537 } else if (OpNo == 4) {
7538 // Just widen the index. It's allowed to have extra elements.
7539 Index = GetWidenedVector(Index);
7540 } else
7541 llvm_unreachable("Can't widen this operand of mscatter");
7542
7543 SDValue Ops[] = {MSC->getChain(), DataOp, Mask, MSC->getBasePtr(), Index,
7544 Scale};
7545 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT, SDLoc(N),
7546 Ops, MSC->getMemOperand(), MSC->getIndexType(),
7547 MSC->isTruncatingStore());
7548 }
7549
WidenVecOp_VP_SCATTER(SDNode * N,unsigned OpNo)7550 SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(SDNode *N, unsigned OpNo) {
7551 VPScatterSDNode *VPSC = cast<VPScatterSDNode>(N);
7552 SDValue DataOp = VPSC->getValue();
7553 SDValue Mask = VPSC->getMask();
7554 SDValue Index = VPSC->getIndex();
7555 SDValue Scale = VPSC->getScale();
7556 EVT WideMemVT = VPSC->getMemoryVT();
7557
7558 if (OpNo == 1) {
7559 DataOp = GetWidenedVector(DataOp);
7560 Index = GetWidenedVector(Index);
7561 const auto WideEC = DataOp.getValueType().getVectorElementCount();
7562 Mask = GetWidenedMask(Mask, WideEC);
7563 WideMemVT = EVT::getVectorVT(*DAG.getContext(),
7564 VPSC->getMemoryVT().getScalarType(), WideEC);
7565 } else if (OpNo == 3) {
7566 // Just widen the index. It's allowed to have extra elements.
7567 Index = GetWidenedVector(Index);
7568 } else
7569 llvm_unreachable("Can't widen this operand of VP_SCATTER");
7570
7571 SDValue Ops[] = {
7572 VPSC->getChain(), DataOp, VPSC->getBasePtr(), Index, Scale, Mask,
7573 VPSC->getVectorLength()};
7574 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT, SDLoc(N), Ops,
7575 VPSC->getMemOperand(), VPSC->getIndexType());
7576 }
7577
WidenVecOp_SETCC(SDNode * N)7578 SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
7579 SDValue InOp0 = GetWidenedVector(N->getOperand(0));
7580 SDValue InOp1 = GetWidenedVector(N->getOperand(1));
7581 SDLoc dl(N);
7582 EVT VT = N->getValueType(0);
7583
7584 // WARNING: In this code we widen the compare instruction with garbage.
7585 // This garbage may contain denormal floats which may be slow. Is this a real
7586 // concern ? Should we zero the unused lanes if this is a float compare ?
7587
7588 // Get a new SETCC node to compare the newly widened operands.
7589 // Only some of the compared elements are legal.
7590 EVT SVT = getSetCCResultType(InOp0.getValueType());
7591 // The result type is legal, if its vXi1, keep vXi1 for the new SETCC.
7592 if (VT.getScalarType() == MVT::i1)
7593 SVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7594 SVT.getVectorElementCount());
7595
7596 SDValue WideSETCC = DAG.getNode(ISD::SETCC, SDLoc(N),
7597 SVT, InOp0, InOp1, N->getOperand(2));
7598
7599 // Extract the needed results from the result vector.
7600 EVT ResVT = EVT::getVectorVT(*DAG.getContext(),
7601 SVT.getVectorElementType(),
7602 VT.getVectorElementCount());
7603 SDValue CC = DAG.getExtractSubvector(dl, ResVT, WideSETCC, 0);
7604
7605 EVT OpVT = N->getOperand(0).getValueType();
7606 ISD::NodeType ExtendCode =
7607 TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
7608 return DAG.getNode(ExtendCode, dl, VT, CC);
7609 }
7610
WidenVecOp_STRICT_FSETCC(SDNode * N)7611 SDValue DAGTypeLegalizer::WidenVecOp_STRICT_FSETCC(SDNode *N) {
7612 SDValue Chain = N->getOperand(0);
7613 SDValue LHS = GetWidenedVector(N->getOperand(1));
7614 SDValue RHS = GetWidenedVector(N->getOperand(2));
7615 SDValue CC = N->getOperand(3);
7616 SDLoc dl(N);
7617
7618 EVT VT = N->getValueType(0);
7619 EVT EltVT = VT.getVectorElementType();
7620 EVT TmpEltVT = LHS.getValueType().getVectorElementType();
7621 unsigned NumElts = VT.getVectorNumElements();
7622
7623 // Unroll into a build vector.
7624 SmallVector<SDValue, 8> Scalars(NumElts);
7625 SmallVector<SDValue, 8> Chains(NumElts);
7626
7627 for (unsigned i = 0; i != NumElts; ++i) {
7628 SDValue LHSElem = DAG.getExtractVectorElt(dl, TmpEltVT, LHS, i);
7629 SDValue RHSElem = DAG.getExtractVectorElt(dl, TmpEltVT, RHS, i);
7630
7631 Scalars[i] = DAG.getNode(N->getOpcode(), dl, {MVT::i1, MVT::Other},
7632 {Chain, LHSElem, RHSElem, CC});
7633 Chains[i] = Scalars[i].getValue(1);
7634 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
7635 DAG.getBoolConstant(true, dl, EltVT, VT),
7636 DAG.getBoolConstant(false, dl, EltVT, VT));
7637 }
7638
7639 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
7640 ReplaceValueWith(SDValue(N, 1), NewChain);
7641
7642 return DAG.getBuildVector(VT, dl, Scalars);
7643 }
7644
getExtendForIntVecReduction(unsigned Opc)7645 static unsigned getExtendForIntVecReduction(unsigned Opc) {
7646 switch (Opc) {
7647 default:
7648 llvm_unreachable("Expected integer vector reduction");
7649 case ISD::VECREDUCE_ADD:
7650 case ISD::VECREDUCE_MUL:
7651 case ISD::VECREDUCE_AND:
7652 case ISD::VECREDUCE_OR:
7653 case ISD::VECREDUCE_XOR:
7654 return ISD::ANY_EXTEND;
7655 case ISD::VECREDUCE_SMAX:
7656 case ISD::VECREDUCE_SMIN:
7657 return ISD::SIGN_EXTEND;
7658 case ISD::VECREDUCE_UMAX:
7659 case ISD::VECREDUCE_UMIN:
7660 return ISD::ZERO_EXTEND;
7661 }
7662 }
7663
WidenVecOp_VECREDUCE(SDNode * N)7664 SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE(SDNode *N) {
7665 SDLoc dl(N);
7666 SDValue Op = GetWidenedVector(N->getOperand(0));
7667 EVT VT = N->getValueType(0);
7668 EVT OrigVT = N->getOperand(0).getValueType();
7669 EVT WideVT = Op.getValueType();
7670 EVT ElemVT = OrigVT.getVectorElementType();
7671 SDNodeFlags Flags = N->getFlags();
7672
7673 unsigned Opc = N->getOpcode();
7674 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
7675 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7676 assert(NeutralElem && "Neutral element must exist");
7677
7678 // Pad the vector with the neutral element.
7679 unsigned OrigElts = OrigVT.getVectorMinNumElements();
7680 unsigned WideElts = WideVT.getVectorMinNumElements();
7681
7682 // Generate a vp.reduce_op if it is custom/legal for the target. This avoids
7683 // needing to pad the source vector, because the inactive lanes can simply be
7684 // disabled and not contribute to the result.
7685 if (auto VPOpcode = ISD::getVPForBaseOpcode(Opc);
7686 VPOpcode && TLI.isOperationLegalOrCustom(*VPOpcode, WideVT)) {
7687 SDValue Start = NeutralElem;
7688 if (VT.isInteger())
7689 Start = DAG.getNode(getExtendForIntVecReduction(Opc), dl, VT, Start);
7690 assert(Start.getValueType() == VT);
7691 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7692 WideVT.getVectorElementCount());
7693 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
7694 SDValue EVL = DAG.getElementCount(dl, TLI.getVPExplicitVectorLengthTy(),
7695 OrigVT.getVectorElementCount());
7696 return DAG.getNode(*VPOpcode, dl, VT, {Start, Op, Mask, EVL}, Flags);
7697 }
7698
7699 if (WideVT.isScalableVector()) {
7700 unsigned GCD = std::gcd(OrigElts, WideElts);
7701 EVT SplatVT = EVT::getVectorVT(*DAG.getContext(), ElemVT,
7702 ElementCount::getScalable(GCD));
7703 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7704 for (unsigned Idx = OrigElts; Idx < WideElts; Idx = Idx + GCD)
7705 Op = DAG.getInsertSubvector(dl, Op, SplatNeutral, Idx);
7706 return DAG.getNode(Opc, dl, VT, Op, Flags);
7707 }
7708
7709 for (unsigned Idx = OrigElts; Idx < WideElts; Idx++)
7710 Op = DAG.getInsertVectorElt(dl, Op, NeutralElem, Idx);
7711
7712 return DAG.getNode(Opc, dl, VT, Op, Flags);
7713 }
7714
WidenVecOp_VECREDUCE_SEQ(SDNode * N)7715 SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE_SEQ(SDNode *N) {
7716 SDLoc dl(N);
7717 SDValue AccOp = N->getOperand(0);
7718 SDValue VecOp = N->getOperand(1);
7719 SDValue Op = GetWidenedVector(VecOp);
7720
7721 EVT VT = N->getValueType(0);
7722 EVT OrigVT = VecOp.getValueType();
7723 EVT WideVT = Op.getValueType();
7724 EVT ElemVT = OrigVT.getVectorElementType();
7725 SDNodeFlags Flags = N->getFlags();
7726
7727 unsigned Opc = N->getOpcode();
7728 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
7729 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7730
7731 // Pad the vector with the neutral element.
7732 unsigned OrigElts = OrigVT.getVectorMinNumElements();
7733 unsigned WideElts = WideVT.getVectorMinNumElements();
7734
7735 // Generate a vp.reduce_op if it is custom/legal for the target. This avoids
7736 // needing to pad the source vector, because the inactive lanes can simply be
7737 // disabled and not contribute to the result.
7738 if (auto VPOpcode = ISD::getVPForBaseOpcode(Opc);
7739 VPOpcode && TLI.isOperationLegalOrCustom(*VPOpcode, WideVT)) {
7740 EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7741 WideVT.getVectorElementCount());
7742 SDValue Mask = DAG.getAllOnesConstant(dl, WideMaskVT);
7743 SDValue EVL = DAG.getElementCount(dl, TLI.getVPExplicitVectorLengthTy(),
7744 OrigVT.getVectorElementCount());
7745 return DAG.getNode(*VPOpcode, dl, VT, {AccOp, Op, Mask, EVL}, Flags);
7746 }
7747
7748 if (WideVT.isScalableVector()) {
7749 unsigned GCD = std::gcd(OrigElts, WideElts);
7750 EVT SplatVT = EVT::getVectorVT(*DAG.getContext(), ElemVT,
7751 ElementCount::getScalable(GCD));
7752 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7753 for (unsigned Idx = OrigElts; Idx < WideElts; Idx = Idx + GCD)
7754 Op = DAG.getInsertSubvector(dl, Op, SplatNeutral, Idx);
7755 return DAG.getNode(Opc, dl, VT, AccOp, Op, Flags);
7756 }
7757
7758 for (unsigned Idx = OrigElts; Idx < WideElts; Idx++)
7759 Op = DAG.getInsertVectorElt(dl, Op, NeutralElem, Idx);
7760
7761 return DAG.getNode(Opc, dl, VT, AccOp, Op, Flags);
7762 }
7763
WidenVecOp_VP_REDUCE(SDNode * N)7764 SDValue DAGTypeLegalizer::WidenVecOp_VP_REDUCE(SDNode *N) {
7765 assert(N->isVPOpcode() && "Expected VP opcode");
7766
7767 SDLoc dl(N);
7768 SDValue Op = GetWidenedVector(N->getOperand(1));
7769 SDValue Mask = GetWidenedMask(N->getOperand(2),
7770 Op.getValueType().getVectorElementCount());
7771
7772 return DAG.getNode(N->getOpcode(), dl, N->getValueType(0),
7773 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7774 N->getFlags());
7775 }
7776
WidenVecOp_VSELECT(SDNode * N)7777 SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) {
7778 // This only gets called in the case that the left and right inputs and
7779 // result are of a legal odd vector type, and the condition is illegal i1 of
7780 // the same odd width that needs widening.
7781 EVT VT = N->getValueType(0);
7782 assert(VT.isVector() && !VT.isPow2VectorType() && isTypeLegal(VT));
7783
7784 SDValue Cond = GetWidenedVector(N->getOperand(0));
7785 SDValue LeftIn = DAG.WidenVector(N->getOperand(1), SDLoc(N));
7786 SDValue RightIn = DAG.WidenVector(N->getOperand(2), SDLoc(N));
7787 SDLoc DL(N);
7788
7789 SDValue Select = DAG.getNode(N->getOpcode(), DL, LeftIn.getValueType(), Cond,
7790 LeftIn, RightIn);
7791 return DAG.getExtractSubvector(DL, VT, Select, 0);
7792 }
7793
WidenVecOp_VP_CttzElements(SDNode * N)7794 SDValue DAGTypeLegalizer::WidenVecOp_VP_CttzElements(SDNode *N) {
7795 SDLoc DL(N);
7796 SDValue Source = GetWidenedVector(N->getOperand(0));
7797 EVT SrcVT = Source.getValueType();
7798 SDValue Mask =
7799 GetWidenedMask(N->getOperand(1), SrcVT.getVectorElementCount());
7800
7801 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0),
7802 {Source, Mask, N->getOperand(2)}, N->getFlags());
7803 }
7804
7805 //===----------------------------------------------------------------------===//
7806 // Vector Widening Utilities
7807 //===----------------------------------------------------------------------===//
7808
7809 // Utility function to find the type to chop up a widen vector for load/store
7810 // TLI: Target lowering used to determine legal types.
7811 // Width: Width left need to load/store.
7812 // WidenVT: The widen vector type to load to/store from
7813 // Align: If 0, don't allow use of a wider type
7814 // WidenEx: If Align is not 0, the amount additional we can load/store from.
7815
findMemType(SelectionDAG & DAG,const TargetLowering & TLI,unsigned Width,EVT WidenVT,unsigned Align=0,unsigned WidenEx=0)7816 static std::optional<EVT> findMemType(SelectionDAG &DAG,
7817 const TargetLowering &TLI, unsigned Width,
7818 EVT WidenVT, unsigned Align = 0,
7819 unsigned WidenEx = 0) {
7820 EVT WidenEltVT = WidenVT.getVectorElementType();
7821 const bool Scalable = WidenVT.isScalableVector();
7822 unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinValue();
7823 unsigned WidenEltWidth = WidenEltVT.getSizeInBits();
7824 unsigned AlignInBits = Align*8;
7825
7826 EVT RetVT = WidenEltVT;
7827 // Don't bother looking for an integer type if the vector is scalable, skip
7828 // to vector types.
7829 if (!Scalable) {
7830 // If we have one element to load/store, return it.
7831 if (Width == WidenEltWidth)
7832 return RetVT;
7833
7834 // See if there is larger legal integer than the element type to load/store.
7835 for (EVT MemVT : reverse(MVT::integer_valuetypes())) {
7836 unsigned MemVTWidth = MemVT.getSizeInBits();
7837 if (MemVT.getSizeInBits() <= WidenEltWidth)
7838 break;
7839 auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT);
7840 if ((Action == TargetLowering::TypeLegal ||
7841 Action == TargetLowering::TypePromoteInteger) &&
7842 (WidenWidth % MemVTWidth) == 0 &&
7843 isPowerOf2_32(WidenWidth / MemVTWidth) &&
7844 (MemVTWidth <= Width ||
7845 (Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7846 if (MemVTWidth == WidenWidth)
7847 return MemVT;
7848 RetVT = MemVT;
7849 break;
7850 }
7851 }
7852 }
7853
7854 // See if there is a larger vector type to load/store that has the same vector
7855 // element type and is evenly divisible with the WidenVT.
7856 for (EVT MemVT : reverse(MVT::vector_valuetypes())) {
7857 // Skip vector MVTs which don't match the scalable property of WidenVT.
7858 if (Scalable != MemVT.isScalableVector())
7859 continue;
7860 unsigned MemVTWidth = MemVT.getSizeInBits().getKnownMinValue();
7861 auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT);
7862 if ((Action == TargetLowering::TypeLegal ||
7863 Action == TargetLowering::TypePromoteInteger) &&
7864 WidenEltVT == MemVT.getVectorElementType() &&
7865 (WidenWidth % MemVTWidth) == 0 &&
7866 isPowerOf2_32(WidenWidth / MemVTWidth) &&
7867 (MemVTWidth <= Width ||
7868 (Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7869 if (RetVT.getFixedSizeInBits() < MemVTWidth || MemVT == WidenVT)
7870 return MemVT;
7871 }
7872 }
7873
7874 // Using element-wise loads and stores for widening operations is not
7875 // supported for scalable vectors
7876 if (Scalable)
7877 return std::nullopt;
7878
7879 return RetVT;
7880 }
7881
7882 // Builds a vector type from scalar loads
7883 // VecTy: Resulting Vector type
7884 // LDOps: Load operators to build a vector type
7885 // [Start,End) the list of loads to use.
BuildVectorFromScalar(SelectionDAG & DAG,EVT VecTy,SmallVectorImpl<SDValue> & LdOps,unsigned Start,unsigned End)7886 static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
7887 SmallVectorImpl<SDValue> &LdOps,
7888 unsigned Start, unsigned End) {
7889 SDLoc dl(LdOps[Start]);
7890 EVT LdTy = LdOps[Start].getValueType();
7891 unsigned Width = VecTy.getSizeInBits();
7892 unsigned NumElts = Width / LdTy.getSizeInBits();
7893 EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), LdTy, NumElts);
7894
7895 unsigned Idx = 1;
7896 SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT,LdOps[Start]);
7897
7898 for (unsigned i = Start + 1; i != End; ++i) {
7899 EVT NewLdTy = LdOps[i].getValueType();
7900 if (NewLdTy != LdTy) {
7901 NumElts = Width / NewLdTy.getSizeInBits();
7902 NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewLdTy, NumElts);
7903 VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, VecOp);
7904 // Readjust position and vector position based on new load type.
7905 Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits();
7906 LdTy = NewLdTy;
7907 }
7908 VecOp = DAG.getInsertVectorElt(dl, VecOp, LdOps[i], Idx++);
7909 }
7910 return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
7911 }
7912
GenWidenVectorLoads(SmallVectorImpl<SDValue> & LdChain,LoadSDNode * LD)7913 SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
7914 LoadSDNode *LD) {
7915 // The strategy assumes that we can efficiently load power-of-two widths.
7916 // The routine chops the vector into the largest vector loads with the same
7917 // element type or scalar loads and then recombines it to the widen vector
7918 // type.
7919 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),LD->getValueType(0));
7920 EVT LdVT = LD->getMemoryVT();
7921 SDLoc dl(LD);
7922 assert(LdVT.isVector() && WidenVT.isVector());
7923 assert(LdVT.isScalableVector() == WidenVT.isScalableVector());
7924 assert(LdVT.getVectorElementType() == WidenVT.getVectorElementType());
7925
7926 // Load information
7927 SDValue Chain = LD->getChain();
7928 SDValue BasePtr = LD->getBasePtr();
7929 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
7930 AAMDNodes AAInfo = LD->getAAInfo();
7931
7932 TypeSize LdWidth = LdVT.getSizeInBits();
7933 TypeSize WidenWidth = WidenVT.getSizeInBits();
7934 TypeSize WidthDiff = WidenWidth - LdWidth;
7935 // Allow wider loads if they are sufficiently aligned to avoid memory faults
7936 // and if the original load is simple.
7937 unsigned LdAlign =
7938 (!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlign().value();
7939
7940 // Find the vector type that can load from.
7941 std::optional<EVT> FirstVT =
7942 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7943 WidthDiff.getKnownMinValue());
7944
7945 if (!FirstVT)
7946 return SDValue();
7947
7948 SmallVector<EVT, 8> MemVTs;
7949 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7950
7951 // Unless we're able to load in one instruction we must work out how to load
7952 // the remainder.
7953 if (!TypeSize::isKnownLE(LdWidth, FirstVTWidth)) {
7954 std::optional<EVT> NewVT = FirstVT;
7955 TypeSize RemainingWidth = LdWidth;
7956 TypeSize NewVTWidth = FirstVTWidth;
7957 do {
7958 RemainingWidth -= NewVTWidth;
7959 if (TypeSize::isKnownLT(RemainingWidth, NewVTWidth)) {
7960 // The current type we are using is too large. Find a better size.
7961 NewVT = findMemType(DAG, TLI, RemainingWidth.getKnownMinValue(),
7962 WidenVT, LdAlign, WidthDiff.getKnownMinValue());
7963 if (!NewVT)
7964 return SDValue();
7965 NewVTWidth = NewVT->getSizeInBits();
7966 }
7967 MemVTs.push_back(*NewVT);
7968 } while (TypeSize::isKnownGT(RemainingWidth, NewVTWidth));
7969 }
7970
7971 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr, LD->getPointerInfo(),
7972 LD->getBaseAlign(), MMOFlags, AAInfo);
7973 LdChain.push_back(LdOp.getValue(1));
7974
7975 // Check if we can load the element with one instruction.
7976 if (MemVTs.empty()) {
7977 assert(TypeSize::isKnownLE(LdWidth, FirstVTWidth));
7978 if (!FirstVT->isVector()) {
7979 unsigned NumElts =
7980 WidenWidth.getFixedValue() / FirstVTWidth.getFixedValue();
7981 EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), *FirstVT, NumElts);
7982 SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
7983 return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp);
7984 }
7985 if (FirstVT == WidenVT)
7986 return LdOp;
7987
7988 // TODO: We don't currently have any tests that exercise this code path.
7989 assert(WidenWidth.getFixedValue() % FirstVTWidth.getFixedValue() == 0);
7990 unsigned NumConcat =
7991 WidenWidth.getFixedValue() / FirstVTWidth.getFixedValue();
7992 SmallVector<SDValue, 16> ConcatOps(NumConcat);
7993 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7994 ConcatOps[0] = LdOp;
7995 for (unsigned i = 1; i != NumConcat; ++i)
7996 ConcatOps[i] = UndefVal;
7997 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, ConcatOps);
7998 }
7999
8000 // Load vector by using multiple loads from largest vector to scalar.
8001 SmallVector<SDValue, 16> LdOps;
8002 LdOps.push_back(LdOp);
8003
8004 uint64_t ScaledOffset = 0;
8005 MachinePointerInfo MPI = LD->getPointerInfo();
8006
8007 // First incremement past the first load.
8008 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
8009 &ScaledOffset);
8010
8011 for (EVT MemVT : MemVTs) {
8012 Align NewAlign = ScaledOffset == 0
8013 ? LD->getBaseAlign()
8014 : commonAlignment(LD->getAlign(), ScaledOffset);
8015 SDValue L =
8016 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
8017
8018 LdOps.push_back(L);
8019 LdChain.push_back(L.getValue(1));
8020 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
8021 }
8022
8023 // Build the vector from the load operations.
8024 unsigned End = LdOps.size();
8025 if (!LdOps[0].getValueType().isVector())
8026 // All the loads are scalar loads.
8027 return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End);
8028
8029 // If the load contains vectors, build the vector using concat vector.
8030 // All of the vectors used to load are power-of-2, and the scalar loads can be
8031 // combined to make a power-of-2 vector.
8032 SmallVector<SDValue, 16> ConcatOps(End);
8033 int i = End - 1;
8034 int Idx = End;
8035 EVT LdTy = LdOps[i].getValueType();
8036 // First, combine the scalar loads to a vector.
8037 if (!LdTy.isVector()) {
8038 for (--i; i >= 0; --i) {
8039 LdTy = LdOps[i].getValueType();
8040 if (LdTy.isVector())
8041 break;
8042 }
8043 ConcatOps[--Idx] = BuildVectorFromScalar(DAG, LdTy, LdOps, i + 1, End);
8044 }
8045
8046 ConcatOps[--Idx] = LdOps[i];
8047 for (--i; i >= 0; --i) {
8048 EVT NewLdTy = LdOps[i].getValueType();
8049 if (NewLdTy != LdTy) {
8050 // Create a larger vector.
8051 TypeSize LdTySize = LdTy.getSizeInBits();
8052 TypeSize NewLdTySize = NewLdTy.getSizeInBits();
8053 assert(NewLdTySize.isScalable() == LdTySize.isScalable() &&
8054 NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinValue()));
8055 unsigned NumOps =
8056 NewLdTySize.getKnownMinValue() / LdTySize.getKnownMinValue();
8057 SmallVector<SDValue, 16> WidenOps(NumOps);
8058 unsigned j = 0;
8059 for (; j != End-Idx; ++j)
8060 WidenOps[j] = ConcatOps[Idx+j];
8061 for (; j != NumOps; ++j)
8062 WidenOps[j] = DAG.getUNDEF(LdTy);
8063
8064 ConcatOps[End-1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewLdTy,
8065 WidenOps);
8066 Idx = End - 1;
8067 LdTy = NewLdTy;
8068 }
8069 ConcatOps[--Idx] = LdOps[i];
8070 }
8071
8072 if (WidenWidth == LdTy.getSizeInBits() * (End - Idx))
8073 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
8074 ArrayRef(&ConcatOps[Idx], End - Idx));
8075
8076 // We need to fill the rest with undefs to build the vector.
8077 unsigned NumOps =
8078 WidenWidth.getKnownMinValue() / LdTy.getSizeInBits().getKnownMinValue();
8079 SmallVector<SDValue, 16> WidenOps(NumOps);
8080 SDValue UndefVal = DAG.getUNDEF(LdTy);
8081 {
8082 unsigned i = 0;
8083 for (; i != End-Idx; ++i)
8084 WidenOps[i] = ConcatOps[Idx+i];
8085 for (; i != NumOps; ++i)
8086 WidenOps[i] = UndefVal;
8087 }
8088 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, WidenOps);
8089 }
8090
8091 SDValue
GenWidenVectorExtLoads(SmallVectorImpl<SDValue> & LdChain,LoadSDNode * LD,ISD::LoadExtType ExtType)8092 DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
8093 LoadSDNode *LD,
8094 ISD::LoadExtType ExtType) {
8095 // For extension loads, it may not be more efficient to chop up the vector
8096 // and then extend it. Instead, we unroll the load and build a new vector.
8097 EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),LD->getValueType(0));
8098 EVT LdVT = LD->getMemoryVT();
8099 SDLoc dl(LD);
8100 assert(LdVT.isVector() && WidenVT.isVector());
8101 assert(LdVT.isScalableVector() == WidenVT.isScalableVector());
8102
8103 // Load information
8104 SDValue Chain = LD->getChain();
8105 SDValue BasePtr = LD->getBasePtr();
8106 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
8107 AAMDNodes AAInfo = LD->getAAInfo();
8108
8109 if (LdVT.isScalableVector())
8110 report_fatal_error("Generating widen scalable extending vector loads is "
8111 "not yet supported");
8112
8113 EVT EltVT = WidenVT.getVectorElementType();
8114 EVT LdEltVT = LdVT.getVectorElementType();
8115 unsigned NumElts = LdVT.getVectorNumElements();
8116
8117 // Load each element and widen.
8118 unsigned WidenNumElts = WidenVT.getVectorNumElements();
8119 SmallVector<SDValue, 16> Ops(WidenNumElts);
8120 unsigned Increment = LdEltVT.getSizeInBits() / 8;
8121 Ops[0] =
8122 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr, LD->getPointerInfo(),
8123 LdEltVT, LD->getBaseAlign(), MMOFlags, AAInfo);
8124 LdChain.push_back(Ops[0].getValue(1));
8125 unsigned i = 0, Offset = Increment;
8126 for (i=1; i < NumElts; ++i, Offset += Increment) {
8127 SDValue NewBasePtr =
8128 DAG.getObjectPtrOffset(dl, BasePtr, TypeSize::getFixed(Offset));
8129 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
8130 LD->getPointerInfo().getWithOffset(Offset), LdEltVT,
8131 LD->getBaseAlign(), MMOFlags, AAInfo);
8132 LdChain.push_back(Ops[i].getValue(1));
8133 }
8134
8135 // Fill the rest with undefs.
8136 SDValue UndefVal = DAG.getUNDEF(EltVT);
8137 for (; i != WidenNumElts; ++i)
8138 Ops[i] = UndefVal;
8139
8140 return DAG.getBuildVector(WidenVT, dl, Ops);
8141 }
8142
GenWidenVectorStores(SmallVectorImpl<SDValue> & StChain,StoreSDNode * ST)8143 bool DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
8144 StoreSDNode *ST) {
8145 // The strategy assumes that we can efficiently store power-of-two widths.
8146 // The routine chops the vector into the largest vector stores with the same
8147 // element type or scalar stores.
8148 SDValue Chain = ST->getChain();
8149 SDValue BasePtr = ST->getBasePtr();
8150 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
8151 AAMDNodes AAInfo = ST->getAAInfo();
8152 SDValue ValOp = GetWidenedVector(ST->getValue());
8153 SDLoc dl(ST);
8154
8155 EVT StVT = ST->getMemoryVT();
8156 TypeSize StWidth = StVT.getSizeInBits();
8157 EVT ValVT = ValOp.getValueType();
8158 TypeSize ValWidth = ValVT.getSizeInBits();
8159 EVT ValEltVT = ValVT.getVectorElementType();
8160 unsigned ValEltWidth = ValEltVT.getFixedSizeInBits();
8161 assert(StVT.getVectorElementType() == ValEltVT);
8162 assert(StVT.isScalableVector() == ValVT.isScalableVector() &&
8163 "Mismatch between store and value types");
8164
8165 int Idx = 0; // current index to store
8166
8167 MachinePointerInfo MPI = ST->getPointerInfo();
8168 uint64_t ScaledOffset = 0;
8169
8170 // A breakdown of how to widen this vector store. Each element of the vector
8171 // is a memory VT combined with the number of times it is to be stored to,
8172 // e,g., v5i32 -> {{v2i32,2},{i32,1}}
8173 SmallVector<std::pair<EVT, unsigned>, 4> MemVTs;
8174
8175 while (StWidth.isNonZero()) {
8176 // Find the largest vector type we can store with.
8177 std::optional<EVT> NewVT =
8178 findMemType(DAG, TLI, StWidth.getKnownMinValue(), ValVT);
8179 if (!NewVT)
8180 return false;
8181 MemVTs.push_back({*NewVT, 0});
8182 TypeSize NewVTWidth = NewVT->getSizeInBits();
8183
8184 do {
8185 StWidth -= NewVTWidth;
8186 MemVTs.back().second++;
8187 } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth));
8188 }
8189
8190 for (const auto &Pair : MemVTs) {
8191 EVT NewVT = Pair.first;
8192 unsigned Count = Pair.second;
8193 TypeSize NewVTWidth = NewVT.getSizeInBits();
8194
8195 if (NewVT.isVector()) {
8196 unsigned NumVTElts = NewVT.getVectorMinNumElements();
8197 do {
8198 Align NewAlign = ScaledOffset == 0
8199 ? ST->getBaseAlign()
8200 : commonAlignment(ST->getAlign(), ScaledOffset);
8201 SDValue EOp = DAG.getExtractSubvector(dl, NewVT, ValOp, Idx);
8202 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
8203 MMOFlags, AAInfo);
8204 StChain.push_back(PartStore);
8205
8206 Idx += NumVTElts;
8207 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
8208 &ScaledOffset);
8209 } while (--Count);
8210 } else {
8211 // Cast the vector to the scalar type we can store.
8212 unsigned NumElts = ValWidth.getFixedValue() / NewVTWidth.getFixedValue();
8213 EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
8214 SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp);
8215 // Readjust index position based on new vector type.
8216 Idx = Idx * ValEltWidth / NewVTWidth.getFixedValue();
8217 do {
8218 SDValue EOp = DAG.getExtractVectorElt(dl, NewVT, VecOp, Idx++);
8219 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
8220 ST->getBaseAlign(), MMOFlags, AAInfo);
8221 StChain.push_back(PartStore);
8222
8223 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
8224 } while (--Count);
8225 // Restore index back to be relative to the original widen element type.
8226 Idx = Idx * NewVTWidth.getFixedValue() / ValEltWidth;
8227 }
8228 }
8229
8230 return true;
8231 }
8232
8233 /// Modifies a vector input (widen or narrows) to a vector of NVT. The
8234 /// input vector must have the same element type as NVT.
8235 /// FillWithZeroes specifies that the vector should be widened with zeroes.
ModifyToType(SDValue InOp,EVT NVT,bool FillWithZeroes)8236 SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT,
8237 bool FillWithZeroes) {
8238 // Note that InOp might have been widened so it might already have
8239 // the right width or it might need be narrowed.
8240 EVT InVT = InOp.getValueType();
8241 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
8242 "input and widen element type must match");
8243 assert(InVT.isScalableVector() == NVT.isScalableVector() &&
8244 "cannot modify scalable vectors in this way");
8245 SDLoc dl(InOp);
8246
8247 // Check if InOp already has the right width.
8248 if (InVT == NVT)
8249 return InOp;
8250
8251 ElementCount InEC = InVT.getVectorElementCount();
8252 ElementCount WidenEC = NVT.getVectorElementCount();
8253 if (WidenEC.hasKnownScalarFactor(InEC)) {
8254 unsigned NumConcat = WidenEC.getKnownScalarFactor(InEC);
8255 SmallVector<SDValue, 16> Ops(NumConcat);
8256 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
8257 DAG.getUNDEF(InVT);
8258 Ops[0] = InOp;
8259 for (unsigned i = 1; i != NumConcat; ++i)
8260 Ops[i] = FillVal;
8261
8262 return DAG.getNode(ISD::CONCAT_VECTORS, dl, NVT, Ops);
8263 }
8264
8265 if (InEC.hasKnownScalarFactor(WidenEC))
8266 return DAG.getExtractSubvector(dl, NVT, InOp, 0);
8267
8268 assert(!InVT.isScalableVector() && !NVT.isScalableVector() &&
8269 "Scalable vectors should have been handled already.");
8270
8271 unsigned InNumElts = InEC.getFixedValue();
8272 unsigned WidenNumElts = WidenEC.getFixedValue();
8273
8274 // Fall back to extract and build (+ mask, if padding with zeros).
8275 SmallVector<SDValue, 16> Ops(WidenNumElts);
8276 EVT EltVT = NVT.getVectorElementType();
8277 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
8278 unsigned Idx;
8279 for (Idx = 0; Idx < MinNumElts; ++Idx)
8280 Ops[Idx] = DAG.getExtractVectorElt(dl, EltVT, InOp, Idx);
8281
8282 SDValue UndefVal = DAG.getUNDEF(EltVT);
8283 for (; Idx < WidenNumElts; ++Idx)
8284 Ops[Idx] = UndefVal;
8285
8286 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
8287 if (!FillWithZeroes)
8288 return Widened;
8289
8290 assert(NVT.isInteger() &&
8291 "We expect to never want to FillWithZeroes for non-integral types.");
8292
8293 SmallVector<SDValue, 16> MaskOps;
8294 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
8295 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
8296
8297 return DAG.getNode(ISD::AND, dl, NVT, Widened,
8298 DAG.getBuildVector(NVT, dl, MaskOps));
8299 }
8300