xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64LegalizerInfo.h"
15 #include "AArch64RegisterBankInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
20 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
21 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
22 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
23 #include "llvm/CodeGen/GlobalISel/Utils.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/TargetOpcodes.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/IntrinsicsAArch64.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/MathExtras.h"
33 #include <initializer_list>
34 
35 #define DEBUG_TYPE "aarch64-legalinfo"
36 
37 using namespace llvm;
38 using namespace LegalizeActions;
39 using namespace LegalizeMutations;
40 using namespace LegalityPredicates;
41 using namespace MIPatternMatch;
42 
43 AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
44     : ST(&ST) {
45   using namespace TargetOpcode;
46   const LLT p0 = LLT::pointer(0, 64);
47   const LLT s8 = LLT::scalar(8);
48   const LLT s16 = LLT::scalar(16);
49   const LLT s32 = LLT::scalar(32);
50   const LLT s64 = LLT::scalar(64);
51   const LLT s128 = LLT::scalar(128);
52   const LLT v16s8 = LLT::fixed_vector(16, 8);
53   const LLT v8s8 = LLT::fixed_vector(8, 8);
54   const LLT v4s8 = LLT::fixed_vector(4, 8);
55   const LLT v8s16 = LLT::fixed_vector(8, 16);
56   const LLT v4s16 = LLT::fixed_vector(4, 16);
57   const LLT v2s16 = LLT::fixed_vector(2, 16);
58   const LLT v2s32 = LLT::fixed_vector(2, 32);
59   const LLT v4s32 = LLT::fixed_vector(4, 32);
60   const LLT v2s64 = LLT::fixed_vector(2, 64);
61   const LLT v2p0 = LLT::fixed_vector(2, p0);
62 
63   std::initializer_list<LLT> PackedVectorAllTypeList = {/* Begin 128bit types */
64                                                         v16s8, v8s16, v4s32,
65                                                         v2s64, v2p0,
66                                                         /* End 128bit types */
67                                                         /* Begin 64bit types */
68                                                         v8s8, v4s16, v2s32};
69   std::initializer_list<LLT> ScalarAndPtrTypesList = {s8, s16, s32, s64, p0};
70   SmallVector<LLT, 8> PackedVectorAllTypesVec(PackedVectorAllTypeList);
71   SmallVector<LLT, 8> ScalarAndPtrTypesVec(ScalarAndPtrTypesList);
72 
73   const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine();
74 
75   // FIXME: support subtargets which have neon/fp-armv8 disabled.
76   if (!ST.hasNEON() || !ST.hasFPARMv8()) {
77     getLegacyLegalizerInfo().computeTables();
78     return;
79   }
80 
81   // Some instructions only support s16 if the subtarget has full 16-bit FP
82   // support.
83   const bool HasFP16 = ST.hasFullFP16();
84   const LLT &MinFPScalar = HasFP16 ? s16 : s32;
85 
86   const bool HasCSSC = ST.hasCSSC();
87   const bool HasRCPC3 = ST.hasRCPC3();
88 
89   getActionDefinitionsBuilder(
90       {G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
91       .legalFor({p0, s8, s16, s32, s64})
92       .legalFor(PackedVectorAllTypeList)
93       .widenScalarToNextPow2(0)
94       .clampScalar(0, s8, s64)
95       .fewerElementsIf(
96           [=](const LegalityQuery &Query) {
97             return Query.Types[0].isVector() &&
98                    (Query.Types[0].getElementType() != s64 ||
99                     Query.Types[0].getNumElements() != 2);
100           },
101           [=](const LegalityQuery &Query) {
102             LLT EltTy = Query.Types[0].getElementType();
103             if (EltTy == s64)
104               return std::make_pair(0, LLT::fixed_vector(2, 64));
105             return std::make_pair(0, EltTy);
106           });
107 
108   getActionDefinitionsBuilder(G_PHI)
109       .legalFor({p0, s16, s32, s64})
110       .legalFor(PackedVectorAllTypeList)
111       .widenScalarToNextPow2(0)
112       .clampScalar(0, s16, s64)
113       // Maximum: sN * k = 128
114       .clampMaxNumElements(0, s8, 16)
115       .clampMaxNumElements(0, s16, 8)
116       .clampMaxNumElements(0, s32, 4)
117       .clampMaxNumElements(0, s64, 2)
118       .clampMaxNumElements(0, p0, 2);
119 
120   getActionDefinitionsBuilder(G_BSWAP)
121       .legalFor({s32, s64, v4s32, v2s32, v2s64})
122       .widenScalarToNextPow2(0)
123       .clampScalar(0, s32, s64);
124 
125   getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
126       .legalFor({s32, s64, v2s32, v2s64, v4s32, v4s16, v8s16, v16s8, v8s8})
127       .widenScalarToNextPow2(0)
128       .clampScalar(0, s32, s64)
129       .clampMaxNumElements(0, s8, 16)
130       .clampMaxNumElements(0, s16, 8)
131       .clampNumElements(0, v2s32, v4s32)
132       .clampNumElements(0, v2s64, v2s64)
133       .minScalarOrEltIf(
134           [=](const LegalityQuery &Query) {
135             return Query.Types[0].getNumElements() <= 2;
136           },
137           0, s32)
138       .minScalarOrEltIf(
139           [=](const LegalityQuery &Query) {
140             return Query.Types[0].getNumElements() <= 4;
141           },
142           0, s16)
143       .minScalarOrEltIf(
144           [=](const LegalityQuery &Query) {
145             return Query.Types[0].getNumElements() <= 16;
146           },
147           0, s8)
148       .moreElementsToNextPow2(0);
149 
150   getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
151       .customIf([=](const LegalityQuery &Query) {
152         const auto &SrcTy = Query.Types[0];
153         const auto &AmtTy = Query.Types[1];
154         return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
155                AmtTy.getSizeInBits() == 32;
156       })
157       .legalFor({
158           {s32, s32},
159           {s32, s64},
160           {s64, s64},
161           {v8s8, v8s8},
162           {v16s8, v16s8},
163           {v4s16, v4s16},
164           {v8s16, v8s16},
165           {v2s32, v2s32},
166           {v4s32, v4s32},
167           {v2s64, v2s64},
168       })
169       .widenScalarToNextPow2(0)
170       .clampScalar(1, s32, s64)
171       .clampScalar(0, s32, s64)
172       .clampNumElements(0, v2s32, v4s32)
173       .clampNumElements(0, v2s64, v2s64)
174       .moreElementsToNextPow2(0)
175       .minScalarSameAs(1, 0);
176 
177   getActionDefinitionsBuilder(G_PTR_ADD)
178       .legalFor({{p0, s64}, {v2p0, v2s64}})
179       .clampScalar(1, s64, s64);
180 
181   getActionDefinitionsBuilder(G_PTRMASK).legalFor({{p0, s64}});
182 
183   getActionDefinitionsBuilder({G_SDIV, G_UDIV})
184       .legalFor({s32, s64})
185       .libcallFor({s128})
186       .clampScalar(0, s32, s64)
187       .widenScalarToNextPow2(0)
188       .scalarize(0);
189 
190   getActionDefinitionsBuilder({G_SREM, G_UREM, G_SDIVREM, G_UDIVREM})
191       .lowerFor({s8, s16, s32, s64, v2s64, v4s32, v2s32})
192       .widenScalarOrEltToNextPow2(0)
193       .clampScalarOrElt(0, s32, s64)
194       .clampNumElements(0, v2s32, v4s32)
195       .clampNumElements(0, v2s64, v2s64)
196       .moreElementsToNextPow2(0);
197 
198 
199   getActionDefinitionsBuilder({G_SMULO, G_UMULO})
200       .widenScalarToNextPow2(0, /*Min = */ 32)
201       .clampScalar(0, s32, s64)
202       .lower();
203 
204   getActionDefinitionsBuilder({G_SMULH, G_UMULH})
205       .legalFor({s64, v8s16, v16s8, v4s32})
206       .lower();
207 
208   auto &MinMaxActions = getActionDefinitionsBuilder(
209       {G_SMIN, G_SMAX, G_UMIN, G_UMAX});
210   if (HasCSSC)
211     MinMaxActions
212         .legalFor({s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
213         // Making clamping conditional on CSSC extension as without legal types we
214         // lower to CMP which can fold one of the two sxtb's we'd otherwise need
215         // if we detect a type smaller than 32-bit.
216         .minScalar(0, s32);
217   else
218     MinMaxActions
219         .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32});
220   MinMaxActions
221       .clampNumElements(0, v8s8, v16s8)
222       .clampNumElements(0, v4s16, v8s16)
223       .clampNumElements(0, v2s32, v4s32)
224       // FIXME: This sholdn't be needed as v2s64 types are going to
225       // be expanded anyway, but G_ICMP doesn't support splitting vectors yet
226       .clampNumElements(0, v2s64, v2s64)
227       .lower();
228 
229   getActionDefinitionsBuilder(
230       {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
231       .legalFor({{s32, s32}, {s64, s32}})
232       .clampScalar(0, s32, s64)
233        .clampScalar(1, s32, s64)
234       .widenScalarToNextPow2(0);
235 
236   getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FNEG,
237                                G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM,
238                                G_FMAXIMUM, G_FMINIMUM, G_FCEIL, G_FFLOOR,
239                                G_FRINT, G_FNEARBYINT, G_INTRINSIC_TRUNC,
240                                G_INTRINSIC_ROUND, G_INTRINSIC_ROUNDEVEN})
241       .legalFor({MinFPScalar, s32, s64, v2s32, v4s32, v2s64})
242       .legalIf([=](const LegalityQuery &Query) {
243         const auto &Ty = Query.Types[0];
244         return (Ty == v8s16 || Ty == v4s16) && HasFP16;
245       })
246       .libcallFor({s128})
247       .minScalarOrElt(0, MinFPScalar)
248       .clampNumElements(0, v4s16, v8s16)
249       .clampNumElements(0, v2s32, v4s32)
250       .clampNumElements(0, v2s64, v2s64)
251       .moreElementsToNextPow2(0);
252 
253   getActionDefinitionsBuilder(G_FREM)
254       .libcallFor({s32, s64})
255       .minScalar(0, s32)
256       .scalarize(0);
257 
258   getActionDefinitionsBuilder(G_INTRINSIC_LRINT)
259       // If we don't have full FP16 support, then scalarize the elements of
260       // vectors containing fp16 types.
261       .fewerElementsIf(
262           [=, &ST](const LegalityQuery &Query) {
263             const auto &Ty = Query.Types[0];
264             return Ty.isVector() && Ty.getElementType() == s16 &&
265                    !ST.hasFullFP16();
266           },
267           [=](const LegalityQuery &Query) { return std::make_pair(0, s16); })
268       // If we don't have full FP16 support, then widen s16 to s32 if we
269       // encounter it.
270       .widenScalarIf(
271           [=, &ST](const LegalityQuery &Query) {
272             return Query.Types[0] == s16 && !ST.hasFullFP16();
273           },
274           [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
275       .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16});
276 
277   getActionDefinitionsBuilder(
278       {G_FCOS, G_FSIN, G_FPOW, G_FLOG, G_FLOG2, G_FLOG10,
279        G_FEXP, G_FEXP2, G_FEXP10})
280       // We need a call for these, so we always need to scalarize.
281       .scalarize(0)
282       // Regardless of FP16 support, widen 16-bit elements to 32-bits.
283       .minScalar(0, s32)
284       .libcallFor({s32, s64});
285   getActionDefinitionsBuilder(G_FPOWI)
286       .scalarize(0)
287       .minScalar(0, s32)
288       .libcallFor({{s32, s32}, {s64, s32}});
289 
290   getActionDefinitionsBuilder(G_INSERT)
291       .legalIf(all(typeInSet(0, {s32, s64, p0}),
292                    typeInSet(1, {s8, s16, s32}), smallerThan(1, 0)))
293       .widenScalarToNextPow2(0)
294       .clampScalar(0, s32, s64)
295       .widenScalarToNextPow2(1)
296       .minScalar(1, s8)
297       .maxScalarIf(typeInSet(0, {s32}), 1, s16)
298       .maxScalarIf(typeInSet(0, {s64, p0}), 1, s32);
299 
300   getActionDefinitionsBuilder(G_EXTRACT)
301       .legalIf(all(typeInSet(0, {s16, s32, s64, p0}),
302                    typeInSet(1, {s32, s64, s128, p0}), smallerThan(0, 1)))
303       .widenScalarToNextPow2(1)
304       .clampScalar(1, s32, s128)
305       .widenScalarToNextPow2(0)
306       .minScalar(0, s16)
307       .maxScalarIf(typeInSet(1, {s32}), 0, s16)
308       .maxScalarIf(typeInSet(1, {s64, p0}), 0, s32)
309       .maxScalarIf(typeInSet(1, {s128}), 0, s64);
310 
311 
312   for (unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) {
313     auto &Actions =  getActionDefinitionsBuilder(Op);
314 
315     if (Op == G_SEXTLOAD)
316       Actions.lowerIf(atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Unordered));
317 
318     // Atomics have zero extending behavior.
319     Actions
320       .legalForTypesWithMemDesc({{s32, p0, s8, 8},
321                                  {s32, p0, s16, 8},
322                                  {s32, p0, s32, 8},
323                                  {s64, p0, s8, 2},
324                                  {s64, p0, s16, 2},
325                                  {s64, p0, s32, 4},
326                                  {s64, p0, s64, 8},
327                                  {p0, p0, s64, 8},
328                                  {v2s32, p0, s64, 8}})
329       .widenScalarToNextPow2(0)
330       .clampScalar(0, s32, s64)
331       // TODO: We could support sum-of-pow2's but the lowering code doesn't know
332       //       how to do that yet.
333       .unsupportedIfMemSizeNotPow2()
334       // Lower anything left over into G_*EXT and G_LOAD
335       .lower();
336   }
337 
338   auto IsPtrVecPred = [=](const LegalityQuery &Query) {
339     const LLT &ValTy = Query.Types[0];
340     if (!ValTy.isVector())
341       return false;
342     const LLT EltTy = ValTy.getElementType();
343     return EltTy.isPointer() && EltTy.getAddressSpace() == 0;
344   };
345 
346   getActionDefinitionsBuilder(G_LOAD)
347       .customIf([=](const LegalityQuery &Query) {
348         return HasRCPC3 && Query.Types[0] == s128 &&
349                Query.MMODescrs[0].Ordering == AtomicOrdering::Acquire;
350       })
351       .customIf([=](const LegalityQuery &Query) {
352         return Query.Types[0] == s128 &&
353                Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
354       })
355       .legalForTypesWithMemDesc({{s8, p0, s8, 8},
356                                  {s16, p0, s16, 8},
357                                  {s32, p0, s32, 8},
358                                  {s64, p0, s64, 8},
359                                  {p0, p0, s64, 8},
360                                  {s128, p0, s128, 8},
361                                  {v8s8, p0, s64, 8},
362                                  {v16s8, p0, s128, 8},
363                                  {v4s16, p0, s64, 8},
364                                  {v8s16, p0, s128, 8},
365                                  {v2s32, p0, s64, 8},
366                                  {v4s32, p0, s128, 8},
367                                  {v2s64, p0, s128, 8}})
368       // These extends are also legal
369       .legalForTypesWithMemDesc(
370           {{s32, p0, s8, 8}, {s32, p0, s16, 8}, {s64, p0, s32, 8}})
371       .widenScalarToNextPow2(0, /* MinSize = */ 8)
372       .lowerIfMemSizeNotByteSizePow2()
373       .clampScalar(0, s8, s64)
374       .narrowScalarIf(
375           [=](const LegalityQuery &Query) {
376             // Clamp extending load results to 32-bits.
377             return Query.Types[0].isScalar() &&
378                    Query.Types[0] != Query.MMODescrs[0].MemoryTy &&
379                    Query.Types[0].getSizeInBits() > 32;
380           },
381           changeTo(0, s32))
382       .clampMaxNumElements(0, s8, 16)
383       .clampMaxNumElements(0, s16, 8)
384       .clampMaxNumElements(0, s32, 4)
385       .clampMaxNumElements(0, s64, 2)
386       .clampMaxNumElements(0, p0, 2)
387       .customIf(IsPtrVecPred)
388       .scalarizeIf(typeIs(0, v2s16), 0);
389 
390   getActionDefinitionsBuilder(G_STORE)
391       .customIf([=](const LegalityQuery &Query) {
392         return HasRCPC3 && Query.Types[0] == s128 &&
393                Query.MMODescrs[0].Ordering == AtomicOrdering::Release;
394       })
395       .customIf([=](const LegalityQuery &Query) {
396         return Query.Types[0] == s128 &&
397                Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
398       })
399       .legalForTypesWithMemDesc(
400           {{s8, p0, s8, 8},     {s16, p0, s8, 8},  // truncstorei8 from s16
401            {s32, p0, s8, 8},                       // truncstorei8 from s32
402            {s64, p0, s8, 8},                       // truncstorei8 from s64
403            {s16, p0, s16, 8},   {s32, p0, s16, 8}, // truncstorei16 from s32
404            {s64, p0, s16, 8},                      // truncstorei16 from s64
405            {s32, p0, s8, 8},    {s32, p0, s16, 8},    {s32, p0, s32, 8},
406            {s64, p0, s64, 8},   {s64, p0, s32, 8}, // truncstorei32 from s64
407            {p0, p0, s64, 8},    {s128, p0, s128, 8},  {v16s8, p0, s128, 8},
408            {v8s8, p0, s64, 8},  {v4s16, p0, s64, 8},  {v8s16, p0, s128, 8},
409            {v2s32, p0, s64, 8}, {v4s32, p0, s128, 8}, {v2s64, p0, s128, 8}})
410       .clampScalar(0, s8, s64)
411       .lowerIf([=](const LegalityQuery &Query) {
412         return Query.Types[0].isScalar() &&
413                Query.Types[0] != Query.MMODescrs[0].MemoryTy;
414       })
415       // Maximum: sN * k = 128
416       .clampMaxNumElements(0, s8, 16)
417       .clampMaxNumElements(0, s16, 8)
418       .clampMaxNumElements(0, s32, 4)
419       .clampMaxNumElements(0, s64, 2)
420       .clampMaxNumElements(0, p0, 2)
421       .lowerIfMemSizeNotPow2()
422       .customIf(IsPtrVecPred)
423       .scalarizeIf(typeIs(0, v2s16), 0);
424 
425   getActionDefinitionsBuilder(G_INDEXED_STORE)
426       // Idx 0 == Ptr, Idx 1 == Val
427       // TODO: we can implement legalizations but as of now these are
428       // generated in a very specific way.
429       .legalForTypesWithMemDesc({
430           {p0, s8, s8, 8},
431           {p0, s16, s16, 8},
432           {p0, s32, s8, 8},
433           {p0, s32, s16, 8},
434           {p0, s32, s32, 8},
435           {p0, s64, s64, 8},
436           {p0, p0, p0, 8},
437           {p0, v8s8, v8s8, 8},
438           {p0, v16s8, v16s8, 8},
439           {p0, v4s16, v4s16, 8},
440           {p0, v8s16, v8s16, 8},
441           {p0, v2s32, v2s32, 8},
442           {p0, v4s32, v4s32, 8},
443           {p0, v2s64, v2s64, 8},
444           {p0, v2p0, v2p0, 8},
445           {p0, s128, s128, 8},
446       })
447       .unsupported();
448 
449   auto IndexedLoadBasicPred = [=](const LegalityQuery &Query) {
450     LLT LdTy = Query.Types[0];
451     LLT PtrTy = Query.Types[1];
452     if (llvm::find(PackedVectorAllTypesVec, LdTy) ==
453             PackedVectorAllTypesVec.end() &&
454         llvm::find(ScalarAndPtrTypesVec, LdTy) == ScalarAndPtrTypesVec.end() &&
455         LdTy != s128)
456       return false;
457     if (PtrTy != p0)
458       return false;
459     return true;
460   };
461   getActionDefinitionsBuilder(G_INDEXED_LOAD)
462       .unsupportedIf(
463           atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Unordered))
464       .legalIf(IndexedLoadBasicPred)
465       .unsupported();
466   getActionDefinitionsBuilder({G_INDEXED_SEXTLOAD, G_INDEXED_ZEXTLOAD})
467       .unsupportedIf(
468           atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Unordered))
469       .legalIf(all(typeInSet(0, {s16, s32, s64}),
470                    LegalityPredicate([=](const LegalityQuery &Q) {
471                      LLT LdTy = Q.Types[0];
472                      LLT PtrTy = Q.Types[1];
473                      LLT MemTy = Q.MMODescrs[0].MemoryTy;
474                      if (PtrTy != p0)
475                        return false;
476                      if (LdTy == s16)
477                        return MemTy == s8;
478                      if (LdTy == s32)
479                        return MemTy == s8 || MemTy == s16;
480                      if (LdTy == s64)
481                        return MemTy == s8 || MemTy == s16 || MemTy == s32;
482                      return false;
483                    })))
484       .unsupported();
485 
486   // Constants
487   getActionDefinitionsBuilder(G_CONSTANT)
488       .legalFor({p0, s8, s16, s32, s64})
489       .widenScalarToNextPow2(0)
490       .clampScalar(0, s8, s64);
491   getActionDefinitionsBuilder(G_FCONSTANT)
492       .legalIf([=](const LegalityQuery &Query) {
493         const auto &Ty = Query.Types[0];
494         if (HasFP16 && Ty == s16)
495           return true;
496         return Ty == s32 || Ty == s64 || Ty == s128;
497       })
498       .clampScalar(0, MinFPScalar, s128);
499 
500   getActionDefinitionsBuilder(G_ICMP)
501       .legalFor({{s32, s32},
502                  {s32, s64},
503                  {s32, p0},
504                  {v4s32, v4s32},
505                  {v2s32, v2s32},
506                  {v2s64, v2s64},
507                  {v2s64, v2p0},
508                  {v4s16, v4s16},
509                  {v8s16, v8s16},
510                  {v8s8, v8s8},
511                  {v16s8, v16s8}})
512       .widenScalarOrEltToNextPow2(1)
513       .clampScalar(1, s32, s64)
514       .clampScalar(0, s32, s32)
515       .minScalarEltSameAsIf(
516           [=](const LegalityQuery &Query) {
517             const LLT &Ty = Query.Types[0];
518             const LLT &SrcTy = Query.Types[1];
519             return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
520                    Ty.getElementType() != SrcTy.getElementType();
521           },
522           0, 1)
523       .minScalarOrEltIf(
524           [=](const LegalityQuery &Query) { return Query.Types[1] == v2s16; },
525           1, s32)
526       .minScalarOrEltIf(
527           [=](const LegalityQuery &Query) { return Query.Types[1] == v2p0; }, 0,
528           s64)
529       .clampNumElements(0, v2s32, v4s32);
530 
531   getActionDefinitionsBuilder(G_FCMP)
532       // If we don't have full FP16 support, then scalarize the elements of
533       // vectors containing fp16 types.
534       .fewerElementsIf(
535           [=](const LegalityQuery &Query) {
536             const auto &Ty = Query.Types[0];
537             return Ty.isVector() && Ty.getElementType() == s16 && !HasFP16;
538           },
539           [=](const LegalityQuery &Query) { return std::make_pair(0, s16); })
540       // If we don't have full FP16 support, then widen s16 to s32 if we
541       // encounter it.
542       .widenScalarIf(
543           [=](const LegalityQuery &Query) {
544             return Query.Types[0] == s16 && !HasFP16;
545           },
546           [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
547       .legalFor({{s16, s16},
548                  {s32, s32},
549                  {s32, s64},
550                  {v4s32, v4s32},
551                  {v2s32, v2s32},
552                  {v2s64, v2s64},
553                  {v4s16, v4s16},
554                  {v8s16, v8s16}})
555       .widenScalarOrEltToNextPow2(1)
556       .clampScalar(1, s32, s64)
557       .clampScalar(0, s32, s32)
558       .minScalarEltSameAsIf(
559           [=](const LegalityQuery &Query) {
560             const LLT &Ty = Query.Types[0];
561             const LLT &SrcTy = Query.Types[1];
562             return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
563                    Ty.getElementType() != SrcTy.getElementType();
564           },
565           0, 1)
566       .clampNumElements(0, v2s32, v4s32)
567       .clampMaxNumElements(1, s64, 2);
568 
569   // Extensions
570   auto ExtLegalFunc = [=](const LegalityQuery &Query) {
571     unsigned DstSize = Query.Types[0].getSizeInBits();
572 
573     // Handle legal vectors using legalFor
574     if (Query.Types[0].isVector())
575       return false;
576 
577     if (DstSize < 8 || DstSize >= 128 || !isPowerOf2_32(DstSize))
578       return false; // Extending to a scalar s128 needs narrowing.
579 
580     const LLT &SrcTy = Query.Types[1];
581 
582     // Make sure we fit in a register otherwise. Don't bother checking that
583     // the source type is below 128 bits. We shouldn't be allowing anything
584     // through which is wider than the destination in the first place.
585     unsigned SrcSize = SrcTy.getSizeInBits();
586     if (SrcSize < 8 || !isPowerOf2_32(SrcSize))
587       return false;
588 
589     return true;
590   };
591   getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
592       .legalIf(ExtLegalFunc)
593       .legalFor({{v2s64, v2s32}, {v4s32, v4s16}, {v8s16, v8s8}})
594       .clampScalar(0, s64, s64) // Just for s128, others are handled above.
595       .moreElementsToNextPow2(1)
596       .clampMaxNumElements(1, s8, 8)
597       .clampMaxNumElements(1, s16, 4)
598       .clampMaxNumElements(1, s32, 2)
599       // Tries to convert a large EXTEND into two smaller EXTENDs
600       .lowerIf([=](const LegalityQuery &Query) {
601         return (Query.Types[0].getScalarSizeInBits() >
602                 Query.Types[1].getScalarSizeInBits() * 2) &&
603                Query.Types[0].isVector() &&
604                (Query.Types[1].getScalarSizeInBits() == 8 ||
605                 Query.Types[1].getScalarSizeInBits() == 16);
606       });
607 
608   getActionDefinitionsBuilder(G_TRUNC)
609       .legalFor({{v2s32, v2s64}, {v4s16, v4s32}, {v8s8, v8s16}})
610       .moreElementsToNextPow2(0)
611       .clampMaxNumElements(0, s8, 8)
612       .clampMaxNumElements(0, s16, 4)
613       .clampMaxNumElements(0, s32, 2)
614       .minScalarOrEltIf(
615           [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); },
616           0, s8)
617       .lowerIf([=](const LegalityQuery &Query) {
618         LLT DstTy = Query.Types[0];
619         LLT SrcTy = Query.Types[1];
620         return DstTy.isVector() && (SrcTy.getSizeInBits() > 128 ||
621                                     (DstTy.getScalarSizeInBits() * 2 <
622                                      SrcTy.getScalarSizeInBits()));
623       })
624 
625       .alwaysLegal();
626 
627   getActionDefinitionsBuilder(G_SEXT_INREG)
628       .legalFor({s32, s64})
629       .legalFor(PackedVectorAllTypeList)
630       .maxScalar(0, s64)
631       .clampNumElements(0, v8s8, v16s8)
632       .clampNumElements(0, v4s16, v8s16)
633       .clampNumElements(0, v2s32, v4s32)
634       .clampMaxNumElements(0, s64, 2)
635       .lower();
636 
637   // FP conversions
638   getActionDefinitionsBuilder(G_FPTRUNC)
639       .legalFor(
640           {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
641       .clampNumElements(0, v4s16, v4s16)
642       .clampNumElements(0, v2s32, v2s32)
643       .scalarize(0);
644 
645   getActionDefinitionsBuilder(G_FPEXT)
646       .legalFor(
647           {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}})
648       .clampNumElements(0, v4s32, v4s32)
649       .clampNumElements(0, v2s64, v2s64)
650       .scalarize(0);
651 
652   // Conversions
653   getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
654       .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
655       .legalIf([=](const LegalityQuery &Query) {
656         return HasFP16 &&
657                (Query.Types[1] == s16 || Query.Types[1] == v4s16 ||
658                 Query.Types[1] == v8s16) &&
659                (Query.Types[0] == s32 || Query.Types[0] == s64 ||
660                 Query.Types[0] == v4s16 || Query.Types[0] == v8s16);
661       })
662       .widenScalarToNextPow2(0)
663       .clampScalar(0, s32, s64)
664       .widenScalarToNextPow2(1)
665       .clampScalarOrElt(1, MinFPScalar, s64)
666       .moreElementsToNextPow2(0)
667       .widenScalarIf(
668           [=](const LegalityQuery &Query) {
669             return Query.Types[0].getScalarSizeInBits() >
670                    Query.Types[1].getScalarSizeInBits();
671           },
672           LegalizeMutations::changeElementSizeTo(1, 0))
673       .widenScalarIf(
674           [=](const LegalityQuery &Query) {
675             return Query.Types[0].getScalarSizeInBits() <
676                    Query.Types[1].getScalarSizeInBits();
677           },
678           LegalizeMutations::changeElementSizeTo(0, 1))
679       .clampNumElements(0, v4s16, v8s16)
680       .clampNumElements(0, v2s32, v4s32)
681       .clampMaxNumElements(0, s64, 2);
682 
683   getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
684       .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
685       .legalIf([=](const LegalityQuery &Query) {
686         return HasFP16 &&
687                (Query.Types[0] == s16 || Query.Types[0] == v4s16 ||
688                 Query.Types[0] == v8s16) &&
689                (Query.Types[1] == s32 || Query.Types[1] == s64 ||
690                 Query.Types[1] == v4s16 || Query.Types[1] == v8s16);
691       })
692       .widenScalarToNextPow2(1)
693       .clampScalar(1, s32, s64)
694       .widenScalarToNextPow2(0)
695       .clampScalarOrElt(0, MinFPScalar, s64)
696       .moreElementsToNextPow2(0)
697       .widenScalarIf(
698           [=](const LegalityQuery &Query) {
699             return Query.Types[0].getScalarSizeInBits() <
700                    Query.Types[1].getScalarSizeInBits();
701           },
702           LegalizeMutations::changeElementSizeTo(0, 1))
703       .widenScalarIf(
704           [=](const LegalityQuery &Query) {
705             return Query.Types[0].getScalarSizeInBits() >
706                    Query.Types[1].getScalarSizeInBits();
707           },
708           LegalizeMutations::changeElementSizeTo(1, 0))
709       .clampNumElements(0, v4s16, v8s16)
710       .clampNumElements(0, v2s32, v4s32)
711       .clampMaxNumElements(0, s64, 2);
712 
713   // Control-flow
714   getActionDefinitionsBuilder(G_BRCOND)
715     .legalFor({s32})
716     .clampScalar(0, s32, s32);
717   getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
718 
719   getActionDefinitionsBuilder(G_SELECT)
720       .legalFor({{s32, s32}, {s64, s32}, {p0, s32}})
721       .widenScalarToNextPow2(0)
722       .clampScalar(0, s32, s64)
723       .clampScalar(1, s32, s32)
724       .minScalarEltSameAsIf(all(isVector(0), isVector(1)), 1, 0)
725       .lowerIf(isVector(0));
726 
727   // Pointer-handling
728   getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
729 
730   if (TM.getCodeModel() == CodeModel::Small)
731     getActionDefinitionsBuilder(G_GLOBAL_VALUE).custom();
732   else
733     getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
734 
735   getActionDefinitionsBuilder(G_PTRTOINT)
736       .legalFor({{s64, p0}, {v2s64, v2p0}})
737       .widenScalarToNextPow2(0, 64)
738       .clampScalar(0, s64, s64);
739 
740   getActionDefinitionsBuilder(G_INTTOPTR)
741       .unsupportedIf([&](const LegalityQuery &Query) {
742         return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
743       })
744       .legalFor({{p0, s64}, {v2p0, v2s64}});
745 
746   // Casts for 32 and 64-bit width type are just copies.
747   // Same for 128-bit width type, except they are on the FPR bank.
748   getActionDefinitionsBuilder(G_BITCAST)
749       // FIXME: This is wrong since G_BITCAST is not allowed to change the
750       // number of bits but it's what the previous code described and fixing
751       // it breaks tests.
752       .legalForCartesianProduct({s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
753                                  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64,
754                                  v2p0});
755 
756   getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
757 
758   // va_list must be a pointer, but most sized types are pretty easy to handle
759   // as the destination.
760   getActionDefinitionsBuilder(G_VAARG)
761       .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
762       .clampScalar(0, s8, s64)
763       .widenScalarToNextPow2(0, /*Min*/ 8);
764 
765   getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
766       .lowerIf(
767           all(typeInSet(0, {s8, s16, s32, s64, s128}), typeIs(2, p0)));
768 
769   LegalityPredicate UseOutlineAtomics = [&ST](const LegalityQuery &Query) {
770     return ST.outlineAtomics() && !ST.hasLSE();
771   };
772 
773   getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
774       .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0),
775                    predNot(UseOutlineAtomics)))
776       .customIf(all(typeIs(0, s128), predNot(UseOutlineAtomics)))
777       .customIf([UseOutlineAtomics](const LegalityQuery &Query) {
778         return Query.Types[0].getSizeInBits() == 128 &&
779                !UseOutlineAtomics(Query);
780       })
781       .libcallIf(all(typeInSet(0, {s8, s16, s32, s64, s128}), typeIs(1, p0),
782                      UseOutlineAtomics))
783       .clampScalar(0, s32, s64);
784 
785   getActionDefinitionsBuilder({G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD,
786                                G_ATOMICRMW_SUB, G_ATOMICRMW_AND, G_ATOMICRMW_OR,
787                                G_ATOMICRMW_XOR})
788       .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0),
789                    predNot(UseOutlineAtomics)))
790       .libcallIf(all(typeInSet(0, {s8, s16, s32, s64}), typeIs(1, p0),
791                      UseOutlineAtomics))
792       .clampScalar(0, s32, s64);
793 
794   // Do not outline these atomics operations, as per comment in
795   // AArch64ISelLowering.cpp's shouldExpandAtomicRMWInIR().
796   getActionDefinitionsBuilder(
797       {G_ATOMICRMW_MIN, G_ATOMICRMW_MAX, G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX})
798       .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0)))
799       .clampScalar(0, s32, s64);
800 
801   getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0});
802 
803   // Merge/Unmerge
804   for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
805     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
806     unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
807     getActionDefinitionsBuilder(Op)
808         .widenScalarToNextPow2(LitTyIdx, 8)
809         .widenScalarToNextPow2(BigTyIdx, 32)
810         .clampScalar(LitTyIdx, s8, s64)
811         .clampScalar(BigTyIdx, s32, s128)
812         .legalIf([=](const LegalityQuery &Q) {
813           switch (Q.Types[BigTyIdx].getSizeInBits()) {
814           case 32:
815           case 64:
816           case 128:
817             break;
818           default:
819             return false;
820           }
821           switch (Q.Types[LitTyIdx].getSizeInBits()) {
822           case 8:
823           case 16:
824           case 32:
825           case 64:
826             return true;
827           default:
828             return false;
829           }
830         });
831   }
832 
833   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
834       .unsupportedIf([=](const LegalityQuery &Query) {
835         const LLT &EltTy = Query.Types[1].getElementType();
836         return Query.Types[0] != EltTy;
837       })
838       .minScalar(2, s64)
839       .customIf([=](const LegalityQuery &Query) {
840         const LLT &VecTy = Query.Types[1];
841         return VecTy == v2s16 || VecTy == v4s16 || VecTy == v8s16 ||
842                VecTy == v4s32 || VecTy == v2s64 || VecTy == v2s32 ||
843                VecTy == v8s8 || VecTy == v16s8 || VecTy == v2p0;
844       })
845       .minScalarOrEltIf(
846           [=](const LegalityQuery &Query) {
847             // We want to promote to <M x s1> to <M x s64> if that wouldn't
848             // cause the total vec size to be > 128b.
849             return Query.Types[1].getNumElements() <= 2;
850           },
851           0, s64)
852       .minScalarOrEltIf(
853           [=](const LegalityQuery &Query) {
854             return Query.Types[1].getNumElements() <= 4;
855           },
856           0, s32)
857       .minScalarOrEltIf(
858           [=](const LegalityQuery &Query) {
859             return Query.Types[1].getNumElements() <= 8;
860           },
861           0, s16)
862       .minScalarOrEltIf(
863           [=](const LegalityQuery &Query) {
864             return Query.Types[1].getNumElements() <= 16;
865           },
866           0, s8)
867       .minScalarOrElt(0, s8) // Worst case, we need at least s8.
868       .clampMaxNumElements(1, s64, 2)
869       .clampMaxNumElements(1, s32, 4)
870       .clampMaxNumElements(1, s16, 8)
871       .clampMaxNumElements(1, p0, 2);
872 
873   getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
874       .legalIf(typeInSet(0, {v16s8, v8s8, v8s16, v4s16, v4s32, v2s32, v2s64}))
875       .widenVectorEltsToVectorMinSize(0, 64);
876 
877   getActionDefinitionsBuilder(G_BUILD_VECTOR)
878       .legalFor({{v8s8, s8},
879                  {v16s8, s8},
880                  {v4s16, s16},
881                  {v8s16, s16},
882                  {v2s32, s32},
883                  {v4s32, s32},
884                  {v2p0, p0},
885                  {v2s64, s64}})
886       .clampNumElements(0, v4s32, v4s32)
887       .clampNumElements(0, v2s64, v2s64)
888       .minScalarOrElt(0, s8)
889       .widenVectorEltsToVectorMinSize(0, 64)
890       .minScalarSameAs(1, 0);
891 
892   getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC).lower();
893 
894   getActionDefinitionsBuilder(G_CTLZ)
895       .legalForCartesianProduct(
896           {s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
897       .scalarize(1)
898       .widenScalarToNextPow2(1, /*Min=*/32)
899       .clampScalar(1, s32, s64)
900       .scalarSameSizeAs(0, 1);
901   getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF).lower();
902 
903   // TODO: Custom lowering for v2s32, v4s32, v2s64.
904   getActionDefinitionsBuilder(G_BITREVERSE)
905       .legalFor({s32, s64, v8s8, v16s8})
906       .widenScalarToNextPow2(0, /*Min = */ 32)
907       .clampScalar(0, s32, s64);
908 
909   getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF).lower();
910 
911   getActionDefinitionsBuilder(G_CTTZ)
912       .lowerIf(isVector(0))
913       .widenScalarToNextPow2(1, /*Min=*/32)
914       .clampScalar(1, s32, s64)
915       .scalarSameSizeAs(0, 1)
916       .legalIf([=](const LegalityQuery &Query) {
917         return (HasCSSC && typeInSet(0, {s32, s64})(Query));
918       })
919       .customIf([=](const LegalityQuery &Query) {
920         return (!HasCSSC && typeInSet(0, {s32, s64})(Query));
921       });
922 
923   getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
924       .legalIf([=](const LegalityQuery &Query) {
925         const LLT &DstTy = Query.Types[0];
926         const LLT &SrcTy = Query.Types[1];
927         // For now just support the TBL2 variant which needs the source vectors
928         // to be the same size as the dest.
929         if (DstTy != SrcTy)
930           return false;
931         return llvm::is_contained(
932             {v2s64, v2p0, v2s32, v4s32, v4s16, v16s8, v8s8, v8s16}, DstTy);
933       })
934       // G_SHUFFLE_VECTOR can have scalar sources (from 1 x s vectors), we
935       // just want those lowered into G_BUILD_VECTOR
936       .lowerIf([=](const LegalityQuery &Query) {
937         return !Query.Types[1].isVector();
938       })
939       .moreElementsIf(
940           [](const LegalityQuery &Query) {
941             return Query.Types[0].isVector() && Query.Types[1].isVector() &&
942                    Query.Types[0].getNumElements() >
943                        Query.Types[1].getNumElements();
944           },
945           changeTo(1, 0))
946       .moreElementsToNextPow2(0)
947       .clampNumElements(0, v4s32, v4s32)
948       .clampNumElements(0, v2s64, v2s64)
949       .moreElementsIf(
950           [](const LegalityQuery &Query) {
951             return Query.Types[0].isVector() && Query.Types[1].isVector() &&
952                    Query.Types[0].getNumElements() <
953                        Query.Types[1].getNumElements();
954           },
955           changeTo(0, 1));
956 
957   getActionDefinitionsBuilder(G_CONCAT_VECTORS)
958       .legalFor({{v4s32, v2s32}, {v8s16, v4s16}, {v16s8, v8s8}});
959 
960   getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({p0});
961 
962   getActionDefinitionsBuilder(G_BRJT).legalFor({{p0, s64}});
963 
964   getActionDefinitionsBuilder(G_DYN_STACKALLOC).custom();
965 
966   getActionDefinitionsBuilder({G_STACKSAVE, G_STACKRESTORE}).lower();
967 
968   if (ST.hasMOPS()) {
969     // G_BZERO is not supported. Currently it is only emitted by
970     // PreLegalizerCombiner for G_MEMSET with zero constant.
971     getActionDefinitionsBuilder(G_BZERO).unsupported();
972 
973     getActionDefinitionsBuilder(G_MEMSET)
974         .legalForCartesianProduct({p0}, {s64}, {s64})
975         .customForCartesianProduct({p0}, {s8}, {s64})
976         .immIdx(0); // Inform verifier imm idx 0 is handled.
977 
978     getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE})
979         .legalForCartesianProduct({p0}, {p0}, {s64})
980         .immIdx(0); // Inform verifier imm idx 0 is handled.
981 
982     // G_MEMCPY_INLINE does not have a tailcall immediate
983     getActionDefinitionsBuilder(G_MEMCPY_INLINE)
984         .legalForCartesianProduct({p0}, {p0}, {s64});
985 
986   } else {
987     getActionDefinitionsBuilder({G_BZERO, G_MEMCPY, G_MEMMOVE, G_MEMSET})
988         .libcall();
989   }
990 
991   // FIXME: Legal vector types are only legal with NEON.
992   auto &ABSActions = getActionDefinitionsBuilder(G_ABS);
993   if (HasCSSC)
994     ABSActions
995         .legalFor({s32, s64});
996   ABSActions
997       .legalFor(PackedVectorAllTypeList)
998       .lowerIf(isScalar(0));
999 
1000   // For fadd reductions we have pairwise operations available. We treat the
1001   // usual legal types as legal and handle the lowering to pairwise instructions
1002   // later.
1003   getActionDefinitionsBuilder(G_VECREDUCE_FADD)
1004       .legalFor({{s32, v2s32}, {s32, v4s32}, {s64, v2s64}})
1005       .legalIf([=](const LegalityQuery &Query) {
1006         const auto &Ty = Query.Types[1];
1007         return (Ty == v4s16 || Ty == v8s16) && HasFP16;
1008       })
1009       .minScalarOrElt(0, MinFPScalar)
1010       .clampMaxNumElements(1, s64, 2)
1011       .clampMaxNumElements(1, s32, 4)
1012       .clampMaxNumElements(1, s16, 8)
1013       .lower();
1014 
1015   // For fmul reductions we need to split up into individual operations. We
1016   // clamp to 128 bit vectors then to 64bit vectors to produce a cascade of
1017   // smaller types, followed by scalarizing what remains.
1018   getActionDefinitionsBuilder(G_VECREDUCE_FMUL)
1019       .minScalarOrElt(0, MinFPScalar)
1020       .clampMaxNumElements(1, s64, 2)
1021       .clampMaxNumElements(1, s32, 4)
1022       .clampMaxNumElements(1, s16, 8)
1023       .clampMaxNumElements(1, s32, 2)
1024       .clampMaxNumElements(1, s16, 4)
1025       .scalarize(1)
1026       .lower();
1027 
1028   getActionDefinitionsBuilder({G_VECREDUCE_SEQ_FADD, G_VECREDUCE_SEQ_FMUL})
1029       .scalarize(2)
1030       .lower();
1031 
1032   getActionDefinitionsBuilder(G_VECREDUCE_ADD)
1033       .legalFor({{s8, v16s8},
1034                  {s8, v8s8},
1035                  {s16, v8s16},
1036                  {s16, v4s16},
1037                  {s32, v4s32},
1038                  {s32, v2s32},
1039                  {s64, v2s64}})
1040       .clampMaxNumElements(1, s64, 2)
1041       .clampMaxNumElements(1, s32, 4)
1042       .clampMaxNumElements(1, s16, 8)
1043       .clampMaxNumElements(1, s8, 16)
1044       .lower();
1045 
1046   getActionDefinitionsBuilder({G_VECREDUCE_FMIN, G_VECREDUCE_FMAX,
1047                                G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM})
1048       .legalFor({{s32, v4s32}, {s32, v2s32}, {s64, v2s64}})
1049       .legalIf([=](const LegalityQuery &Query) {
1050         const auto &Ty = Query.Types[1];
1051         return Query.Types[0] == s16 && (Ty == v8s16 || Ty == v4s16) && HasFP16;
1052       })
1053       .minScalarOrElt(0, MinFPScalar)
1054       .clampMaxNumElements(1, s64, 2)
1055       .clampMaxNumElements(1, s32, 4)
1056       .clampMaxNumElements(1, s16, 8)
1057       .lower();
1058 
1059   getActionDefinitionsBuilder(G_VECREDUCE_MUL)
1060       .clampMaxNumElements(1, s32, 2)
1061       .clampMaxNumElements(1, s16, 4)
1062       .clampMaxNumElements(1, s8, 8)
1063       .scalarize(1)
1064       .lower();
1065 
1066   getActionDefinitionsBuilder(
1067       {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX})
1068       .legalFor({{s8, v8s8},
1069                  {s8, v16s8},
1070                  {s16, v4s16},
1071                  {s16, v8s16},
1072                  {s32, v2s32},
1073                  {s32, v4s32}})
1074       .clampMaxNumElements(1, s64, 2)
1075       .clampMaxNumElements(1, s32, 4)
1076       .clampMaxNumElements(1, s16, 8)
1077       .clampMaxNumElements(1, s8, 16)
1078       .scalarize(1)
1079       .lower();
1080 
1081   getActionDefinitionsBuilder(
1082       {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
1083       // Try to break down into smaller vectors as long as they're at least 64
1084       // bits. This lets us use vector operations for some parts of the
1085       // reduction.
1086       .fewerElementsIf(
1087           [=](const LegalityQuery &Q) {
1088             LLT SrcTy = Q.Types[1];
1089             if (SrcTy.isScalar())
1090               return false;
1091             if (!isPowerOf2_32(SrcTy.getNumElements()))
1092               return false;
1093             // We can usually perform 64b vector operations.
1094             return SrcTy.getSizeInBits() > 64;
1095           },
1096           [=](const LegalityQuery &Q) {
1097             LLT SrcTy = Q.Types[1];
1098             return std::make_pair(1, SrcTy.divide(2));
1099           })
1100       .scalarize(1)
1101       .lower();
1102 
1103   getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
1104       .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
1105 
1106   getActionDefinitionsBuilder({G_FSHL, G_FSHR})
1107       .customFor({{s32, s32}, {s32, s64}, {s64, s64}})
1108       .lower();
1109 
1110   getActionDefinitionsBuilder(G_ROTR)
1111       .legalFor({{s32, s64}, {s64, s64}})
1112       .customIf([=](const LegalityQuery &Q) {
1113         return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64;
1114       })
1115       .lower();
1116   getActionDefinitionsBuilder(G_ROTL).lower();
1117 
1118   getActionDefinitionsBuilder({G_SBFX, G_UBFX})
1119       .customFor({{s32, s32}, {s64, s64}});
1120 
1121   auto always = [=](const LegalityQuery &Q) { return true; };
1122   auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
1123   if (HasCSSC)
1124     CTPOPActions
1125         .legalFor({{s32, s32},
1126                    {s64, s64},
1127                    {v8s8, v8s8},
1128                    {v16s8, v16s8}})
1129         .customFor({{s128, s128},
1130                     {v2s64, v2s64},
1131                     {v2s32, v2s32},
1132                     {v4s32, v4s32},
1133                     {v4s16, v4s16},
1134                     {v8s16, v8s16}});
1135   else
1136     CTPOPActions
1137         .legalFor({{v8s8, v8s8},
1138                    {v16s8, v16s8}})
1139         .customFor({{s32, s32},
1140                     {s64, s64},
1141                     {s128, s128},
1142                     {v2s64, v2s64},
1143                     {v2s32, v2s32},
1144                     {v4s32, v4s32},
1145                     {v4s16, v4s16},
1146                     {v8s16, v8s16}});
1147   CTPOPActions
1148       .clampScalar(0, s32, s128)
1149       .widenScalarToNextPow2(0)
1150       .minScalarEltSameAsIf(always, 1, 0)
1151       .maxScalarEltSameAsIf(always, 1, 0);
1152 
1153   // TODO: Vector types.
1154   getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT}).lowerIf(isScalar(0));
1155 
1156   // TODO: Libcall support for s128.
1157   // TODO: s16 should be legal with full FP16 support.
1158   getActionDefinitionsBuilder({G_LROUND, G_LLROUND})
1159       .legalFor({{s64, s32}, {s64, s64}});
1160 
1161   // TODO: Custom legalization for vector types.
1162   // TODO: Custom legalization for mismatched types.
1163   // TODO: s16 support.
1164   getActionDefinitionsBuilder(G_FCOPYSIGN).customFor({{s32, s32}, {s64, s64}});
1165 
1166   getActionDefinitionsBuilder(G_FMAD).lower();
1167 
1168   // Access to floating-point environment.
1169   getActionDefinitionsBuilder({G_GET_FPMODE, G_SET_FPMODE, G_RESET_FPMODE})
1170       .libcall();
1171 
1172   getActionDefinitionsBuilder(G_IS_FPCLASS).lower();
1173 
1174   getActionDefinitionsBuilder(G_PREFETCH).custom();
1175 
1176   getLegacyLegalizerInfo().computeTables();
1177   verify(*ST.getInstrInfo());
1178 }
1179 
1180 bool AArch64LegalizerInfo::legalizeCustom(
1181     LegalizerHelper &Helper, MachineInstr &MI,
1182     LostDebugLocObserver &LocObserver) const {
1183   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1184   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1185   GISelChangeObserver &Observer = Helper.Observer;
1186   switch (MI.getOpcode()) {
1187   default:
1188     // No idea what to do.
1189     return false;
1190   case TargetOpcode::G_VAARG:
1191     return legalizeVaArg(MI, MRI, MIRBuilder);
1192   case TargetOpcode::G_LOAD:
1193   case TargetOpcode::G_STORE:
1194     return legalizeLoadStore(MI, MRI, MIRBuilder, Observer);
1195   case TargetOpcode::G_SHL:
1196   case TargetOpcode::G_ASHR:
1197   case TargetOpcode::G_LSHR:
1198     return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer);
1199   case TargetOpcode::G_GLOBAL_VALUE:
1200     return legalizeSmallCMGlobalValue(MI, MRI, MIRBuilder, Observer);
1201   case TargetOpcode::G_SBFX:
1202   case TargetOpcode::G_UBFX:
1203     return legalizeBitfieldExtract(MI, MRI, Helper);
1204   case TargetOpcode::G_FSHL:
1205   case TargetOpcode::G_FSHR:
1206     return legalizeFunnelShift(MI, MRI, MIRBuilder, Observer, Helper);
1207   case TargetOpcode::G_ROTR:
1208     return legalizeRotate(MI, MRI, Helper);
1209   case TargetOpcode::G_CTPOP:
1210     return legalizeCTPOP(MI, MRI, Helper);
1211   case TargetOpcode::G_ATOMIC_CMPXCHG:
1212     return legalizeAtomicCmpxchg128(MI, MRI, Helper);
1213   case TargetOpcode::G_CTTZ:
1214     return legalizeCTTZ(MI, Helper);
1215   case TargetOpcode::G_BZERO:
1216   case TargetOpcode::G_MEMCPY:
1217   case TargetOpcode::G_MEMMOVE:
1218   case TargetOpcode::G_MEMSET:
1219     return legalizeMemOps(MI, Helper);
1220   case TargetOpcode::G_FCOPYSIGN:
1221     return legalizeFCopySign(MI, Helper);
1222   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1223     return legalizeExtractVectorElt(MI, MRI, Helper);
1224   case TargetOpcode::G_DYN_STACKALLOC:
1225     return legalizeDynStackAlloc(MI, Helper);
1226   case TargetOpcode::G_PREFETCH:
1227     return legalizePrefetch(MI, Helper);
1228   }
1229 
1230   llvm_unreachable("expected switch to return");
1231 }
1232 
1233 bool AArch64LegalizerInfo::legalizeFunnelShift(MachineInstr &MI,
1234                                                MachineRegisterInfo &MRI,
1235                                                MachineIRBuilder &MIRBuilder,
1236                                                GISelChangeObserver &Observer,
1237                                                LegalizerHelper &Helper) const {
1238   assert(MI.getOpcode() == TargetOpcode::G_FSHL ||
1239          MI.getOpcode() == TargetOpcode::G_FSHR);
1240 
1241   // Keep as G_FSHR if shift amount is a G_CONSTANT, else use generic
1242   // lowering
1243   Register ShiftNo = MI.getOperand(3).getReg();
1244   LLT ShiftTy = MRI.getType(ShiftNo);
1245   auto VRegAndVal = getIConstantVRegValWithLookThrough(ShiftNo, MRI);
1246 
1247   // Adjust shift amount according to Opcode (FSHL/FSHR)
1248   // Convert FSHL to FSHR
1249   LLT OperationTy = MRI.getType(MI.getOperand(0).getReg());
1250   APInt BitWidth(ShiftTy.getSizeInBits(), OperationTy.getSizeInBits(), false);
1251 
1252   // Lower non-constant shifts and leave zero shifts to the optimizer.
1253   if (!VRegAndVal || VRegAndVal->Value.urem(BitWidth) == 0)
1254     return (Helper.lowerFunnelShiftAsShifts(MI) ==
1255             LegalizerHelper::LegalizeResult::Legalized);
1256 
1257   APInt Amount = VRegAndVal->Value.urem(BitWidth);
1258 
1259   Amount = MI.getOpcode() == TargetOpcode::G_FSHL ? BitWidth - Amount : Amount;
1260 
1261   // If the instruction is G_FSHR, has a 64-bit G_CONSTANT for shift amount
1262   // in the range of 0 <-> BitWidth, it is legal
1263   if (ShiftTy.getSizeInBits() == 64 && MI.getOpcode() == TargetOpcode::G_FSHR &&
1264       VRegAndVal->Value.ult(BitWidth))
1265     return true;
1266 
1267   // Cast the ShiftNumber to a 64-bit type
1268   auto Cast64 = MIRBuilder.buildConstant(LLT::scalar(64), Amount.zext(64));
1269 
1270   if (MI.getOpcode() == TargetOpcode::G_FSHR) {
1271     Observer.changingInstr(MI);
1272     MI.getOperand(3).setReg(Cast64.getReg(0));
1273     Observer.changedInstr(MI);
1274   }
1275   // If Opcode is FSHL, remove the FSHL instruction and create a FSHR
1276   // instruction
1277   else if (MI.getOpcode() == TargetOpcode::G_FSHL) {
1278     MIRBuilder.buildInstr(TargetOpcode::G_FSHR, {MI.getOperand(0).getReg()},
1279                           {MI.getOperand(1).getReg(), MI.getOperand(2).getReg(),
1280                            Cast64.getReg(0)});
1281     MI.eraseFromParent();
1282   }
1283   return true;
1284 }
1285 
1286 bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI,
1287                                           MachineRegisterInfo &MRI,
1288                                           LegalizerHelper &Helper) const {
1289   // To allow for imported patterns to match, we ensure that the rotate amount
1290   // is 64b with an extension.
1291   Register AmtReg = MI.getOperand(2).getReg();
1292   LLT AmtTy = MRI.getType(AmtReg);
1293   (void)AmtTy;
1294   assert(AmtTy.isScalar() && "Expected a scalar rotate");
1295   assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal");
1296   auto NewAmt = Helper.MIRBuilder.buildZExt(LLT::scalar(64), AmtReg);
1297   Helper.Observer.changingInstr(MI);
1298   MI.getOperand(2).setReg(NewAmt.getReg(0));
1299   Helper.Observer.changedInstr(MI);
1300   return true;
1301 }
1302 
1303 bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
1304     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
1305     GISelChangeObserver &Observer) const {
1306   assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1307   // We do this custom legalization to convert G_GLOBAL_VALUE into target ADRP +
1308   // G_ADD_LOW instructions.
1309   // By splitting this here, we can optimize accesses in the small code model by
1310   // folding in the G_ADD_LOW into the load/store offset.
1311   auto &GlobalOp = MI.getOperand(1);
1312   const auto* GV = GlobalOp.getGlobal();
1313   if (GV->isThreadLocal())
1314     return true; // Don't want to modify TLS vars.
1315 
1316   auto &TM = ST->getTargetLowering()->getTargetMachine();
1317   unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM);
1318 
1319   if (OpFlags & AArch64II::MO_GOT)
1320     return true;
1321 
1322   auto Offset = GlobalOp.getOffset();
1323   Register DstReg = MI.getOperand(0).getReg();
1324   auto ADRP = MIRBuilder.buildInstr(AArch64::ADRP, {LLT::pointer(0, 64)}, {})
1325                   .addGlobalAddress(GV, Offset, OpFlags | AArch64II::MO_PAGE);
1326   // Set the regclass on the dest reg too.
1327   MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1328 
1329   // MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so
1330   // by creating a MOVK that sets bits 48-63 of the register to (global address
1331   // + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to
1332   // prevent an incorrect tag being generated during relocation when the
1333   // global appears before the code section. Without the offset, a global at
1334   // `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced
1335   // by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 =
1336   // 0x0eff'ffff'ffff'f000`, meaning the tag would be incorrectly set to `0xe`
1337   // instead of `0xf`.
1338   // This assumes that we're in the small code model so we can assume a binary
1339   // size of <= 4GB, which makes the untagged PC relative offset positive. The
1340   // binary must also be loaded into address range [0, 2^48). Both of these
1341   // properties need to be ensured at runtime when using tagged addresses.
1342   if (OpFlags & AArch64II::MO_TAGGED) {
1343     assert(!Offset &&
1344            "Should not have folded in an offset for a tagged global!");
1345     ADRP = MIRBuilder.buildInstr(AArch64::MOVKXi, {LLT::pointer(0, 64)}, {ADRP})
1346                .addGlobalAddress(GV, 0x100000000,
1347                                  AArch64II::MO_PREL | AArch64II::MO_G3)
1348                .addImm(48);
1349     MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1350   }
1351 
1352   MIRBuilder.buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP})
1353       .addGlobalAddress(GV, Offset,
1354                         OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1355   MI.eraseFromParent();
1356   return true;
1357 }
1358 
1359 bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
1360                                              MachineInstr &MI) const {
1361   Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
1362   switch (IntrinsicID) {
1363   case Intrinsic::vacopy: {
1364     unsigned PtrSize = ST->isTargetILP32() ? 4 : 8;
1365     unsigned VaListSize =
1366       (ST->isTargetDarwin() || ST->isTargetWindows())
1367           ? PtrSize
1368           : ST->isTargetILP32() ? 20 : 32;
1369 
1370     MachineFunction &MF = *MI.getMF();
1371     auto Val = MF.getRegInfo().createGenericVirtualRegister(
1372         LLT::scalar(VaListSize * 8));
1373     MachineIRBuilder MIB(MI);
1374     MIB.buildLoad(Val, MI.getOperand(2),
1375                   *MF.getMachineMemOperand(MachinePointerInfo(),
1376                                            MachineMemOperand::MOLoad,
1377                                            VaListSize, Align(PtrSize)));
1378     MIB.buildStore(Val, MI.getOperand(1),
1379                    *MF.getMachineMemOperand(MachinePointerInfo(),
1380                                             MachineMemOperand::MOStore,
1381                                             VaListSize, Align(PtrSize)));
1382     MI.eraseFromParent();
1383     return true;
1384   }
1385   case Intrinsic::get_dynamic_area_offset: {
1386     MachineIRBuilder &MIB = Helper.MIRBuilder;
1387     MIB.buildConstant(MI.getOperand(0).getReg(), 0);
1388     MI.eraseFromParent();
1389     return true;
1390   }
1391   case Intrinsic::aarch64_mops_memset_tag: {
1392     assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1393     // Anyext the value being set to 64 bit (only the bottom 8 bits are read by
1394     // the instruction).
1395     MachineIRBuilder MIB(MI);
1396     auto &Value = MI.getOperand(3);
1397     Register ExtValueReg = MIB.buildAnyExt(LLT::scalar(64), Value).getReg(0);
1398     Value.setReg(ExtValueReg);
1399     return true;
1400   }
1401   case Intrinsic::aarch64_prefetch: {
1402     MachineIRBuilder MIB(MI);
1403     auto &AddrVal = MI.getOperand(1);
1404 
1405     int64_t IsWrite = MI.getOperand(2).getImm();
1406     int64_t Target = MI.getOperand(3).getImm();
1407     int64_t IsStream = MI.getOperand(4).getImm();
1408     int64_t IsData = MI.getOperand(5).getImm();
1409 
1410     unsigned PrfOp = (IsWrite << 4) |    // Load/Store bit
1411                      (!IsData << 3) |    // IsDataCache bit
1412                      (Target << 1) |     // Cache level bits
1413                      (unsigned)IsStream; // Stream bit
1414 
1415     MIB.buildInstr(AArch64::G_AARCH64_PREFETCH).addImm(PrfOp).add(AddrVal);
1416     MI.eraseFromParent();
1417     return true;
1418   }
1419   case Intrinsic::aarch64_neon_uaddv:
1420   case Intrinsic::aarch64_neon_saddv:
1421   case Intrinsic::aarch64_neon_umaxv:
1422   case Intrinsic::aarch64_neon_smaxv:
1423   case Intrinsic::aarch64_neon_uminv:
1424   case Intrinsic::aarch64_neon_sminv: {
1425     MachineIRBuilder MIB(MI);
1426     MachineRegisterInfo &MRI = *MIB.getMRI();
1427     bool IsSigned = IntrinsicID == Intrinsic::aarch64_neon_saddv ||
1428                     IntrinsicID == Intrinsic::aarch64_neon_smaxv ||
1429                     IntrinsicID == Intrinsic::aarch64_neon_sminv;
1430 
1431     auto OldDst = MI.getOperand(0).getReg();
1432     auto OldDstTy = MRI.getType(OldDst);
1433     LLT NewDstTy = MRI.getType(MI.getOperand(2).getReg()).getElementType();
1434     if (OldDstTy == NewDstTy)
1435       return true;
1436 
1437     auto NewDst = MRI.createGenericVirtualRegister(NewDstTy);
1438 
1439     Helper.Observer.changingInstr(MI);
1440     MI.getOperand(0).setReg(NewDst);
1441     Helper.Observer.changedInstr(MI);
1442 
1443     MIB.setInsertPt(MIB.getMBB(), ++MIB.getInsertPt());
1444     MIB.buildExtOrTrunc(IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT,
1445                         OldDst, NewDst);
1446 
1447     return true;
1448   }
1449   case Intrinsic::aarch64_neon_smax:
1450   case Intrinsic::aarch64_neon_smin:
1451   case Intrinsic::aarch64_neon_umax:
1452   case Intrinsic::aarch64_neon_umin:
1453   case Intrinsic::aarch64_neon_fmax:
1454   case Intrinsic::aarch64_neon_fmin:
1455   case Intrinsic::aarch64_neon_fmaxnm:
1456   case Intrinsic::aarch64_neon_fminnm: {
1457     MachineIRBuilder MIB(MI);
1458     if (IntrinsicID == Intrinsic::aarch64_neon_smax)
1459       MIB.buildSMax(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3));
1460     else if (IntrinsicID == Intrinsic::aarch64_neon_smin)
1461       MIB.buildSMin(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3));
1462     else if (IntrinsicID == Intrinsic::aarch64_neon_umax)
1463       MIB.buildUMax(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3));
1464     else if (IntrinsicID == Intrinsic::aarch64_neon_umin)
1465       MIB.buildUMin(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3));
1466     else if (IntrinsicID == Intrinsic::aarch64_neon_fmax)
1467       MIB.buildInstr(TargetOpcode::G_FMAXIMUM, {MI.getOperand(0)},
1468                      {MI.getOperand(2), MI.getOperand(3)});
1469     else if (IntrinsicID == Intrinsic::aarch64_neon_fmin)
1470       MIB.buildInstr(TargetOpcode::G_FMINIMUM, {MI.getOperand(0)},
1471                      {MI.getOperand(2), MI.getOperand(3)});
1472     else if (IntrinsicID == Intrinsic::aarch64_neon_fmaxnm)
1473       MIB.buildInstr(TargetOpcode::G_FMAXNUM, {MI.getOperand(0)},
1474                      {MI.getOperand(2), MI.getOperand(3)});
1475     else if (IntrinsicID == Intrinsic::aarch64_neon_fminnm)
1476       MIB.buildInstr(TargetOpcode::G_FMINNUM, {MI.getOperand(0)},
1477                      {MI.getOperand(2), MI.getOperand(3)});
1478     MI.eraseFromParent();
1479     return true;
1480   }
1481   case Intrinsic::experimental_vector_reverse:
1482     // TODO: Add support for vector_reverse
1483     return false;
1484   }
1485 
1486   return true;
1487 }
1488 
1489 bool AArch64LegalizerInfo::legalizeShlAshrLshr(
1490     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
1491     GISelChangeObserver &Observer) const {
1492   assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
1493          MI.getOpcode() == TargetOpcode::G_LSHR ||
1494          MI.getOpcode() == TargetOpcode::G_SHL);
1495   // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
1496   // imported patterns can select it later. Either way, it will be legal.
1497   Register AmtReg = MI.getOperand(2).getReg();
1498   auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI);
1499   if (!VRegAndVal)
1500     return true;
1501   // Check the shift amount is in range for an immediate form.
1502   int64_t Amount = VRegAndVal->Value.getSExtValue();
1503   if (Amount > 31)
1504     return true; // This will have to remain a register variant.
1505   auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
1506   Observer.changingInstr(MI);
1507   MI.getOperand(2).setReg(ExtCst.getReg(0));
1508   Observer.changedInstr(MI);
1509   return true;
1510 }
1511 
1512 static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset,
1513                                 MachineRegisterInfo &MRI) {
1514   Base = Root;
1515   Offset = 0;
1516 
1517   Register NewBase;
1518   int64_t NewOffset;
1519   if (mi_match(Root, MRI, m_GPtrAdd(m_Reg(NewBase), m_ICst(NewOffset))) &&
1520       isShiftedInt<7, 3>(NewOffset)) {
1521     Base = NewBase;
1522     Offset = NewOffset;
1523   }
1524 }
1525 
1526 // FIXME: This should be removed and replaced with the generic bitcast legalize
1527 // action.
1528 bool AArch64LegalizerInfo::legalizeLoadStore(
1529     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
1530     GISelChangeObserver &Observer) const {
1531   assert(MI.getOpcode() == TargetOpcode::G_STORE ||
1532          MI.getOpcode() == TargetOpcode::G_LOAD);
1533   // Here we just try to handle vector loads/stores where our value type might
1534   // have pointer elements, which the SelectionDAG importer can't handle. To
1535   // allow the existing patterns for s64 to fire for p0, we just try to bitcast
1536   // the value to use s64 types.
1537 
1538   // Custom legalization requires the instruction, if not deleted, must be fully
1539   // legalized. In order to allow further legalization of the inst, we create
1540   // a new instruction and erase the existing one.
1541 
1542   Register ValReg = MI.getOperand(0).getReg();
1543   const LLT ValTy = MRI.getType(ValReg);
1544 
1545   if (ValTy == LLT::scalar(128)) {
1546 
1547     AtomicOrdering Ordering = (*MI.memoperands_begin())->getSuccessOrdering();
1548     bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
1549     bool IsLoadAcquire = IsLoad && Ordering == AtomicOrdering::Acquire;
1550     bool IsStoreRelease = !IsLoad && Ordering == AtomicOrdering::Release;
1551     bool IsRcpC3 =
1552         ST->hasLSE2() && ST->hasRCPC3() && (IsLoadAcquire || IsStoreRelease);
1553 
1554     LLT s64 = LLT::scalar(64);
1555 
1556     unsigned Opcode;
1557     if (IsRcpC3) {
1558       Opcode = IsLoad ? AArch64::LDIAPPX : AArch64::STILPX;
1559     } else {
1560       // For LSE2, loads/stores should have been converted to monotonic and had
1561       // a fence inserted after them.
1562       assert(Ordering == AtomicOrdering::Monotonic ||
1563              Ordering == AtomicOrdering::Unordered);
1564       assert(ST->hasLSE2() && "ldp/stp not single copy atomic without +lse2");
1565 
1566       Opcode = IsLoad ? AArch64::LDPXi : AArch64::STPXi;
1567     }
1568 
1569     MachineInstrBuilder NewI;
1570     if (IsLoad) {
1571       NewI = MIRBuilder.buildInstr(Opcode, {s64, s64}, {});
1572       MIRBuilder.buildMergeLikeInstr(
1573           ValReg, {NewI->getOperand(0), NewI->getOperand(1)});
1574     } else {
1575       auto Split = MIRBuilder.buildUnmerge(s64, MI.getOperand(0));
1576       NewI = MIRBuilder.buildInstr(
1577           Opcode, {}, {Split->getOperand(0), Split->getOperand(1)});
1578     }
1579 
1580     if (IsRcpC3) {
1581       NewI.addUse(MI.getOperand(1).getReg());
1582     } else {
1583       Register Base;
1584       int Offset;
1585       matchLDPSTPAddrMode(MI.getOperand(1).getReg(), Base, Offset, MRI);
1586       NewI.addUse(Base);
1587       NewI.addImm(Offset / 8);
1588     }
1589 
1590     NewI.cloneMemRefs(MI);
1591     constrainSelectedInstRegOperands(*NewI, *ST->getInstrInfo(),
1592                                      *MRI.getTargetRegisterInfo(),
1593                                      *ST->getRegBankInfo());
1594     MI.eraseFromParent();
1595     return true;
1596   }
1597 
1598   if (!ValTy.isVector() || !ValTy.getElementType().isPointer() ||
1599       ValTy.getElementType().getAddressSpace() != 0) {
1600     LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store");
1601     return false;
1602   }
1603 
1604   unsigned PtrSize = ValTy.getElementType().getSizeInBits();
1605   const LLT NewTy = LLT::vector(ValTy.getElementCount(), PtrSize);
1606   auto &MMO = **MI.memoperands_begin();
1607   MMO.setType(NewTy);
1608 
1609   if (MI.getOpcode() == TargetOpcode::G_STORE) {
1610     auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg);
1611     MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1), MMO);
1612   } else {
1613     auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO);
1614     MIRBuilder.buildBitcast(ValReg, NewLoad);
1615   }
1616   MI.eraseFromParent();
1617   return true;
1618 }
1619 
1620 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
1621                                          MachineRegisterInfo &MRI,
1622                                          MachineIRBuilder &MIRBuilder) const {
1623   MachineFunction &MF = MIRBuilder.getMF();
1624   Align Alignment(MI.getOperand(2).getImm());
1625   Register Dst = MI.getOperand(0).getReg();
1626   Register ListPtr = MI.getOperand(1).getReg();
1627 
1628   LLT PtrTy = MRI.getType(ListPtr);
1629   LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
1630 
1631   const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
1632   const Align PtrAlign = Align(PtrSize);
1633   auto List = MIRBuilder.buildLoad(
1634       PtrTy, ListPtr,
1635       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
1636                                PtrTy, PtrAlign));
1637 
1638   MachineInstrBuilder DstPtr;
1639   if (Alignment > PtrAlign) {
1640     // Realign the list to the actual required alignment.
1641     auto AlignMinus1 =
1642         MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1);
1643     auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
1644     DstPtr = MIRBuilder.buildMaskLowPtrBits(PtrTy, ListTmp, Log2(Alignment));
1645   } else
1646     DstPtr = List;
1647 
1648   LLT ValTy = MRI.getType(Dst);
1649   uint64_t ValSize = ValTy.getSizeInBits() / 8;
1650   MIRBuilder.buildLoad(
1651       Dst, DstPtr,
1652       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
1653                                ValTy, std::max(Alignment, PtrAlign)));
1654 
1655   auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrAlign));
1656 
1657   auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0));
1658 
1659   MIRBuilder.buildStore(NewList, ListPtr,
1660                         *MF.getMachineMemOperand(MachinePointerInfo(),
1661                                                  MachineMemOperand::MOStore,
1662                                                  PtrTy, PtrAlign));
1663 
1664   MI.eraseFromParent();
1665   return true;
1666 }
1667 
1668 bool AArch64LegalizerInfo::legalizeBitfieldExtract(
1669     MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
1670   // Only legal if we can select immediate forms.
1671   // TODO: Lower this otherwise.
1672   return getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) &&
1673          getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
1674 }
1675 
1676 bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
1677                                          MachineRegisterInfo &MRI,
1678                                          LegalizerHelper &Helper) const {
1679   // When there is no integer popcount instruction (FEAT_CSSC isn't available),
1680   // it can be more efficiently lowered to the following sequence that uses
1681   // AdvSIMD registers/instructions as long as the copies to/from the AdvSIMD
1682   // registers are cheap.
1683   //  FMOV    D0, X0        // copy 64-bit int to vector, high bits zero'd
1684   //  CNT     V0.8B, V0.8B  // 8xbyte pop-counts
1685   //  ADDV    B0, V0.8B     // sum 8xbyte pop-counts
1686   //  UMOV    X0, V0.B[0]   // copy byte result back to integer reg
1687   //
1688   // For 128 bit vector popcounts, we lower to the following sequence:
1689   //  cnt.16b   v0, v0  // v8s16, v4s32, v2s64
1690   //  uaddlp.8h v0, v0  // v8s16, v4s32, v2s64
1691   //  uaddlp.4s v0, v0  //        v4s32, v2s64
1692   //  uaddlp.2d v0, v0  //               v2s64
1693   //
1694   // For 64 bit vector popcounts, we lower to the following sequence:
1695   //  cnt.8b    v0, v0  // v4s16, v2s32
1696   //  uaddlp.4h v0, v0  // v4s16, v2s32
1697   //  uaddlp.2s v0, v0  //        v2s32
1698 
1699   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1700   Register Dst = MI.getOperand(0).getReg();
1701   Register Val = MI.getOperand(1).getReg();
1702   LLT Ty = MRI.getType(Val);
1703   unsigned Size = Ty.getSizeInBits();
1704 
1705   assert(Ty == MRI.getType(Dst) &&
1706          "Expected src and dst to have the same type!");
1707 
1708   if (ST->hasCSSC() && Ty.isScalar() && Size == 128) {
1709     LLT s64 = LLT::scalar(64);
1710 
1711     auto Split = MIRBuilder.buildUnmerge(s64, Val);
1712     auto CTPOP1 = MIRBuilder.buildCTPOP(s64, Split->getOperand(0));
1713     auto CTPOP2 = MIRBuilder.buildCTPOP(s64, Split->getOperand(1));
1714     auto Add = MIRBuilder.buildAdd(s64, CTPOP1, CTPOP2);
1715 
1716     MIRBuilder.buildZExt(Dst, Add);
1717     MI.eraseFromParent();
1718     return true;
1719   }
1720 
1721   if (!ST->hasNEON() ||
1722       MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) {
1723     // Use generic lowering when custom lowering is not possible.
1724     return Ty.isScalar() && (Size == 32 || Size == 64) &&
1725            Helper.lowerBitCount(MI) ==
1726                LegalizerHelper::LegalizeResult::Legalized;
1727   }
1728 
1729   // Pre-conditioning: widen Val up to the nearest vector type.
1730   // s32,s64,v4s16,v2s32 -> v8i8
1731   // v8s16,v4s32,v2s64 -> v16i8
1732   LLT VTy = Size == 128 ? LLT::fixed_vector(16, 8) : LLT::fixed_vector(8, 8);
1733   if (Ty.isScalar()) {
1734     assert((Size == 32 || Size == 64 || Size == 128) && "Expected only 32, 64, or 128 bit scalars!");
1735     if (Size == 32) {
1736       Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0);
1737     }
1738   }
1739   Val = MIRBuilder.buildBitcast(VTy, Val).getReg(0);
1740 
1741   // Count bits in each byte-sized lane.
1742   auto CTPOP = MIRBuilder.buildCTPOP(VTy, Val);
1743 
1744   // Sum across lanes.
1745   Register HSum = CTPOP.getReg(0);
1746   unsigned Opc;
1747   SmallVector<LLT> HAddTys;
1748   if (Ty.isScalar()) {
1749     Opc = Intrinsic::aarch64_neon_uaddlv;
1750     HAddTys.push_back(LLT::scalar(32));
1751   } else if (Ty == LLT::fixed_vector(8, 16)) {
1752     Opc = Intrinsic::aarch64_neon_uaddlp;
1753     HAddTys.push_back(LLT::fixed_vector(8, 16));
1754   } else if (Ty == LLT::fixed_vector(4, 32)) {
1755     Opc = Intrinsic::aarch64_neon_uaddlp;
1756     HAddTys.push_back(LLT::fixed_vector(8, 16));
1757     HAddTys.push_back(LLT::fixed_vector(4, 32));
1758   } else if (Ty == LLT::fixed_vector(2, 64)) {
1759     Opc = Intrinsic::aarch64_neon_uaddlp;
1760     HAddTys.push_back(LLT::fixed_vector(8, 16));
1761     HAddTys.push_back(LLT::fixed_vector(4, 32));
1762     HAddTys.push_back(LLT::fixed_vector(2, 64));
1763   } else if (Ty == LLT::fixed_vector(4, 16)) {
1764     Opc = Intrinsic::aarch64_neon_uaddlp;
1765     HAddTys.push_back(LLT::fixed_vector(4, 16));
1766   } else if (Ty == LLT::fixed_vector(2, 32)) {
1767     Opc = Intrinsic::aarch64_neon_uaddlp;
1768     HAddTys.push_back(LLT::fixed_vector(4, 16));
1769     HAddTys.push_back(LLT::fixed_vector(2, 32));
1770   } else
1771     llvm_unreachable("unexpected vector shape");
1772   MachineInstrBuilder UADD;
1773   for (LLT HTy : HAddTys) {
1774     UADD = MIRBuilder.buildIntrinsic(Opc, {HTy}).addUse(HSum);
1775     HSum = UADD.getReg(0);
1776   }
1777 
1778   // Post-conditioning.
1779   if (Ty.isScalar() && (Size == 64 || Size == 128))
1780     MIRBuilder.buildZExt(Dst, UADD);
1781   else
1782     UADD->getOperand(0).setReg(Dst);
1783   MI.eraseFromParent();
1784   return true;
1785 }
1786 
1787 bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
1788     MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
1789   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1790   LLT s64 = LLT::scalar(64);
1791   auto Addr = MI.getOperand(1).getReg();
1792   auto DesiredI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(2));
1793   auto NewI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(3));
1794   auto DstLo = MRI.createGenericVirtualRegister(s64);
1795   auto DstHi = MRI.createGenericVirtualRegister(s64);
1796 
1797   MachineInstrBuilder CAS;
1798   if (ST->hasLSE()) {
1799     // We have 128-bit CASP instructions taking XSeqPair registers, which are
1800     // s128. We need the merge/unmerge to bracket the expansion and pair up with
1801     // the rest of the MIR so we must reassemble the extracted registers into a
1802     // 128-bit known-regclass one with code like this:
1803     //
1804     //     %in1 = REG_SEQUENCE Lo, Hi    ; One for each input
1805     //     %out = CASP %in1, ...
1806     //     %OldLo = G_EXTRACT %out, 0
1807     //     %OldHi = G_EXTRACT %out, 64
1808     auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
1809     unsigned Opcode;
1810     switch (Ordering) {
1811     case AtomicOrdering::Acquire:
1812       Opcode = AArch64::CASPAX;
1813       break;
1814     case AtomicOrdering::Release:
1815       Opcode = AArch64::CASPLX;
1816       break;
1817     case AtomicOrdering::AcquireRelease:
1818     case AtomicOrdering::SequentiallyConsistent:
1819       Opcode = AArch64::CASPALX;
1820       break;
1821     default:
1822       Opcode = AArch64::CASPX;
1823       break;
1824     }
1825 
1826     LLT s128 = LLT::scalar(128);
1827     auto CASDst = MRI.createGenericVirtualRegister(s128);
1828     auto CASDesired = MRI.createGenericVirtualRegister(s128);
1829     auto CASNew = MRI.createGenericVirtualRegister(s128);
1830     MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {})
1831         .addUse(DesiredI->getOperand(0).getReg())
1832         .addImm(AArch64::sube64)
1833         .addUse(DesiredI->getOperand(1).getReg())
1834         .addImm(AArch64::subo64);
1835     MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {})
1836         .addUse(NewI->getOperand(0).getReg())
1837         .addImm(AArch64::sube64)
1838         .addUse(NewI->getOperand(1).getReg())
1839         .addImm(AArch64::subo64);
1840 
1841     CAS = MIRBuilder.buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr});
1842 
1843     MIRBuilder.buildExtract({DstLo}, {CASDst}, 0);
1844     MIRBuilder.buildExtract({DstHi}, {CASDst}, 64);
1845   } else {
1846     // The -O0 CMP_SWAP_128 is friendlier to generate code for because LDXP/STXP
1847     // can take arbitrary registers so it just has the normal GPR64 operands the
1848     // rest of AArch64 is expecting.
1849     auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
1850     unsigned Opcode;
1851     switch (Ordering) {
1852     case AtomicOrdering::Acquire:
1853       Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
1854       break;
1855     case AtomicOrdering::Release:
1856       Opcode = AArch64::CMP_SWAP_128_RELEASE;
1857       break;
1858     case AtomicOrdering::AcquireRelease:
1859     case AtomicOrdering::SequentiallyConsistent:
1860       Opcode = AArch64::CMP_SWAP_128;
1861       break;
1862     default:
1863       Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
1864       break;
1865     }
1866 
1867     auto Scratch = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1868     CAS = MIRBuilder.buildInstr(Opcode, {DstLo, DstHi, Scratch},
1869                                 {Addr, DesiredI->getOperand(0),
1870                                  DesiredI->getOperand(1), NewI->getOperand(0),
1871                                  NewI->getOperand(1)});
1872   }
1873 
1874   CAS.cloneMemRefs(MI);
1875   constrainSelectedInstRegOperands(*CAS, *ST->getInstrInfo(),
1876                                    *MRI.getTargetRegisterInfo(),
1877                                    *ST->getRegBankInfo());
1878 
1879   MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {DstLo, DstHi});
1880   MI.eraseFromParent();
1881   return true;
1882 }
1883 
1884 bool AArch64LegalizerInfo::legalizeCTTZ(MachineInstr &MI,
1885                                         LegalizerHelper &Helper) const {
1886   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1887   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1888   LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1889   auto BitReverse = MIRBuilder.buildBitReverse(Ty, MI.getOperand(1));
1890   MIRBuilder.buildCTLZ(MI.getOperand(0).getReg(), BitReverse);
1891   MI.eraseFromParent();
1892   return true;
1893 }
1894 
1895 bool AArch64LegalizerInfo::legalizeMemOps(MachineInstr &MI,
1896                                           LegalizerHelper &Helper) const {
1897   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1898 
1899   // Tagged version MOPSMemorySetTagged is legalised in legalizeIntrinsic
1900   if (MI.getOpcode() == TargetOpcode::G_MEMSET) {
1901     // Anyext the value being set to 64 bit (only the bottom 8 bits are read by
1902     // the instruction).
1903     auto &Value = MI.getOperand(1);
1904     Register ExtValueReg =
1905         MIRBuilder.buildAnyExt(LLT::scalar(64), Value).getReg(0);
1906     Value.setReg(ExtValueReg);
1907     return true;
1908   }
1909 
1910   return false;
1911 }
1912 
1913 bool AArch64LegalizerInfo::legalizeFCopySign(MachineInstr &MI,
1914                                              LegalizerHelper &Helper) const {
1915   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1916   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1917   Register Dst = MI.getOperand(0).getReg();
1918   LLT DstTy = MRI.getType(Dst);
1919   assert(DstTy.isScalar() && "Only expected scalars right now!");
1920   const unsigned DstSize = DstTy.getSizeInBits();
1921   assert((DstSize == 32 || DstSize == 64) && "Unexpected dst type!");
1922   assert(MRI.getType(MI.getOperand(2).getReg()) == DstTy &&
1923          "Expected homogeneous types!");
1924 
1925   // We want to materialize a mask with the high bit set.
1926   uint64_t EltMask;
1927   LLT VecTy;
1928 
1929   // TODO: s16 support.
1930   switch (DstSize) {
1931   default:
1932     llvm_unreachable("Unexpected type for G_FCOPYSIGN!");
1933   case 64: {
1934     // AdvSIMD immediate moves cannot materialize out mask in a single
1935     // instruction for 64-bit elements. Instead, materialize zero and then
1936     // negate it.
1937     EltMask = 0;
1938     VecTy = LLT::fixed_vector(2, DstTy);
1939     break;
1940   }
1941   case 32:
1942     EltMask = 0x80000000ULL;
1943     VecTy = LLT::fixed_vector(4, DstTy);
1944     break;
1945   }
1946 
1947   // Widen In1 and In2 to 128 bits. We want these to eventually become
1948   // INSERT_SUBREGs.
1949   auto Undef = MIRBuilder.buildUndef(VecTy);
1950   auto Zero = MIRBuilder.buildConstant(DstTy, 0);
1951   auto Ins1 = MIRBuilder.buildInsertVectorElement(
1952       VecTy, Undef, MI.getOperand(1).getReg(), Zero);
1953   auto Ins2 = MIRBuilder.buildInsertVectorElement(
1954       VecTy, Undef, MI.getOperand(2).getReg(), Zero);
1955 
1956   // Construct the mask.
1957   auto Mask = MIRBuilder.buildConstant(VecTy, EltMask);
1958   if (DstSize == 64)
1959     Mask = MIRBuilder.buildFNeg(VecTy, Mask);
1960 
1961   auto Sel = MIRBuilder.buildInstr(AArch64::G_BSP, {VecTy}, {Mask, Ins2, Ins1});
1962 
1963   // Build an unmerge whose 0th elt is the original G_FCOPYSIGN destination. We
1964   // want this to eventually become an EXTRACT_SUBREG.
1965   SmallVector<Register, 2> DstRegs(1, Dst);
1966   for (unsigned I = 1, E = VecTy.getNumElements(); I < E; ++I)
1967     DstRegs.push_back(MRI.createGenericVirtualRegister(DstTy));
1968   MIRBuilder.buildUnmerge(DstRegs, Sel);
1969   MI.eraseFromParent();
1970   return true;
1971 }
1972 
1973 bool AArch64LegalizerInfo::legalizeExtractVectorElt(
1974     MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
1975   assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
1976   auto VRegAndVal =
1977       getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1978   if (VRegAndVal)
1979     return true;
1980   return Helper.lowerExtractInsertVectorElt(MI) !=
1981          LegalizerHelper::LegalizeResult::UnableToLegalize;
1982 }
1983 
1984 bool AArch64LegalizerInfo::legalizeDynStackAlloc(
1985     MachineInstr &MI, LegalizerHelper &Helper) const {
1986   MachineFunction &MF = *MI.getParent()->getParent();
1987   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1988   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1989 
1990   // If stack probing is not enabled for this function, use the default
1991   // lowering.
1992   if (!MF.getFunction().hasFnAttribute("probe-stack") ||
1993       MF.getFunction().getFnAttribute("probe-stack").getValueAsString() !=
1994           "inline-asm") {
1995     Helper.lowerDynStackAlloc(MI);
1996     return true;
1997   }
1998 
1999   Register Dst = MI.getOperand(0).getReg();
2000   Register AllocSize = MI.getOperand(1).getReg();
2001   Align Alignment = assumeAligned(MI.getOperand(2).getImm());
2002 
2003   assert(MRI.getType(Dst) == LLT::pointer(0, 64) &&
2004          "Unexpected type for dynamic alloca");
2005   assert(MRI.getType(AllocSize) == LLT::scalar(64) &&
2006          "Unexpected type for dynamic alloca");
2007 
2008   LLT PtrTy = MRI.getType(Dst);
2009   Register SPReg =
2010       Helper.getTargetLowering().getStackPointerRegisterToSaveRestore();
2011   Register SPTmp =
2012       Helper.getDynStackAllocTargetPtr(SPReg, AllocSize, Alignment, PtrTy);
2013   auto NewMI =
2014       MIRBuilder.buildInstr(AArch64::PROBED_STACKALLOC_DYN, {}, {SPTmp});
2015   MRI.setRegClass(NewMI.getReg(0), &AArch64::GPR64commonRegClass);
2016   MIRBuilder.setInsertPt(*NewMI->getParent(), NewMI);
2017   MIRBuilder.buildCopy(Dst, SPTmp);
2018 
2019   MI.eraseFromParent();
2020   return true;
2021 }
2022 
2023 bool AArch64LegalizerInfo::legalizePrefetch(MachineInstr &MI,
2024                                             LegalizerHelper &Helper) const {
2025   MachineIRBuilder &MIB = Helper.MIRBuilder;
2026   auto &AddrVal = MI.getOperand(0);
2027 
2028   int64_t IsWrite = MI.getOperand(1).getImm();
2029   int64_t Locality = MI.getOperand(2).getImm();
2030   int64_t IsData = MI.getOperand(3).getImm();
2031 
2032   bool IsStream = Locality == 0;
2033   if (Locality != 0) {
2034     assert(Locality <= 3 && "Prefetch locality out-of-range");
2035     // The locality degree is the opposite of the cache speed.
2036     // Put the number the other way around.
2037     // The encoding starts at 0 for level 1
2038     Locality = 3 - Locality;
2039   }
2040 
2041   unsigned PrfOp = (IsWrite << 4) | (!IsData << 3) | (Locality << 1) | IsStream;
2042 
2043   MIB.buildInstr(AArch64::G_AARCH64_PREFETCH).addImm(PrfOp).add(AddrVal);
2044   MI.eraseFromParent();
2045   return true;
2046 }
2047