xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp (revision cb14a3fe5122c879eae1fb480ed7ce82a699ddb6)
1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the ARM target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ARM.h"
14 #include "ARMBaseInstrInfo.h"
15 #include "ARMTargetMachine.h"
16 #include "MCTargetDesc/ARMAddressingModes.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APSInt.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/CodeGen/TargetLowering.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/IntrinsicsARM.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include <optional>
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "arm-isel"
43 #define PASS_NAME "ARM Instruction Selection"
44 
45 static cl::opt<bool>
46 DisableShifterOp("disable-shifter-op", cl::Hidden,
47   cl::desc("Disable isel of shifter-op"),
48   cl::init(false));
49 
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
53 ///
54 namespace {
55 
56 class ARMDAGToDAGISel : public SelectionDAGISel {
57   /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
58   /// make the right decision when generating code for different targets.
59   const ARMSubtarget *Subtarget;
60 
61 public:
62   static char ID;
63 
64   ARMDAGToDAGISel() = delete;
65 
66   explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOptLevel OptLevel)
67       : SelectionDAGISel(ID, tm, OptLevel) {}
68 
69   bool runOnMachineFunction(MachineFunction &MF) override {
70     // Reset the subtarget each time through.
71     Subtarget = &MF.getSubtarget<ARMSubtarget>();
72     SelectionDAGISel::runOnMachineFunction(MF);
73     return true;
74   }
75 
76   void PreprocessISelDAG() override;
77 
78   /// getI32Imm - Return a target constant of type i32 with the specified
79   /// value.
80   inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
81     return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
82   }
83 
84   void Select(SDNode *N) override;
85 
86   /// Return true as some complex patterns, like those that call
87   /// canExtractShiftFromMul can modify the DAG inplace.
88   bool ComplexPatternFuncMutatesDAG() const override { return true; }
89 
90   bool hasNoVMLxHazardUse(SDNode *N) const;
91   bool isShifterOpProfitable(const SDValue &Shift,
92                              ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93   bool SelectRegShifterOperand(SDValue N, SDValue &A,
94                                SDValue &B, SDValue &C,
95                                bool CheckProfitability = true);
96   bool SelectImmShifterOperand(SDValue N, SDValue &A,
97                                SDValue &B, bool CheckProfitability = true);
98   bool SelectShiftRegShifterOperand(SDValue N, SDValue &A, SDValue &B,
99                                     SDValue &C) {
100     // Don't apply the profitability check
101     return SelectRegShifterOperand(N, A, B, C, false);
102   }
103   bool SelectShiftImmShifterOperand(SDValue N, SDValue &A, SDValue &B) {
104     // Don't apply the profitability check
105     return SelectImmShifterOperand(N, A, B, false);
106   }
107   bool SelectShiftImmShifterOperandOneUse(SDValue N, SDValue &A, SDValue &B) {
108     if (!N.hasOneUse())
109       return false;
110     return SelectImmShifterOperand(N, A, B, false);
111   }
112 
113   bool SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out);
114 
115   bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
116   bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
117 
118   bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
119     const ConstantSDNode *CN = cast<ConstantSDNode>(N);
120     Pred = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), MVT::i32);
121     Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
122     return true;
123   }
124 
125   bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
126                              SDValue &Offset, SDValue &Opc);
127   bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
128                              SDValue &Offset, SDValue &Opc);
129   bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
130                              SDValue &Offset, SDValue &Opc);
131   bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
132   bool SelectAddrMode3(SDValue N, SDValue &Base,
133                        SDValue &Offset, SDValue &Opc);
134   bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
135                              SDValue &Offset, SDValue &Opc);
136   bool IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset, bool FP16);
137   bool SelectAddrMode5(SDValue N, SDValue &Base, SDValue &Offset);
138   bool SelectAddrMode5FP16(SDValue N, SDValue &Base, SDValue &Offset);
139   bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
140   bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
141 
142   bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
143 
144   // Thumb Addressing Modes:
145   bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
146   bool SelectThumbAddrModeRRSext(SDValue N, SDValue &Base, SDValue &Offset);
147   bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
148                                 SDValue &OffImm);
149   bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
150                                  SDValue &OffImm);
151   bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
152                                  SDValue &OffImm);
153   bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
154                                  SDValue &OffImm);
155   bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
156   template <unsigned Shift>
157   bool SelectTAddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
158 
159   // Thumb 2 Addressing Modes:
160   bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
161   template <unsigned Shift>
162   bool SelectT2AddrModeImm8(SDValue N, SDValue &Base, SDValue &OffImm);
163   bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
164                             SDValue &OffImm);
165   bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
166                                  SDValue &OffImm);
167   template <unsigned Shift>
168   bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm);
169   bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm,
170                                   unsigned Shift);
171   template <unsigned Shift>
172   bool SelectT2AddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
173   bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
174                              SDValue &OffReg, SDValue &ShImm);
175   bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
176 
177   template<int Min, int Max>
178   bool SelectImmediateInRange(SDValue N, SDValue &OffImm);
179 
180   inline bool is_so_imm(unsigned Imm) const {
181     return ARM_AM::getSOImmVal(Imm) != -1;
182   }
183 
184   inline bool is_so_imm_not(unsigned Imm) const {
185     return ARM_AM::getSOImmVal(~Imm) != -1;
186   }
187 
188   inline bool is_t2_so_imm(unsigned Imm) const {
189     return ARM_AM::getT2SOImmVal(Imm) != -1;
190   }
191 
192   inline bool is_t2_so_imm_not(unsigned Imm) const {
193     return ARM_AM::getT2SOImmVal(~Imm) != -1;
194   }
195 
196   // Include the pieces autogenerated from the target description.
197 #include "ARMGenDAGISel.inc"
198 
199 private:
200   void transferMemOperands(SDNode *Src, SDNode *Dst);
201 
202   /// Indexed (pre/post inc/dec) load matching code for ARM.
203   bool tryARMIndexedLoad(SDNode *N);
204   bool tryT1IndexedLoad(SDNode *N);
205   bool tryT2IndexedLoad(SDNode *N);
206   bool tryMVEIndexedLoad(SDNode *N);
207   bool tryFMULFixed(SDNode *N, SDLoc dl);
208   bool tryFP_TO_INT(SDNode *N, SDLoc dl);
209   bool transformFixedFloatingPointConversion(SDNode *N, SDNode *FMul,
210                                              bool IsUnsigned,
211                                              bool FixedToFloat);
212 
213   /// SelectVLD - Select NEON load intrinsics.  NumVecs should be
214   /// 1, 2, 3 or 4.  The opcode arrays specify the instructions used for
215   /// loads of D registers and even subregs and odd subregs of Q registers.
216   /// For NumVecs <= 2, QOpcodes1 is not used.
217   void SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
218                  const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
219                  const uint16_t *QOpcodes1);
220 
221   /// SelectVST - Select NEON store intrinsics.  NumVecs should
222   /// be 1, 2, 3 or 4.  The opcode arrays specify the instructions used for
223   /// stores of D registers and even subregs and odd subregs of Q registers.
224   /// For NumVecs <= 2, QOpcodes1 is not used.
225   void SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
226                  const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
227                  const uint16_t *QOpcodes1);
228 
229   /// SelectVLDSTLane - Select NEON load/store lane intrinsics.  NumVecs should
230   /// be 2, 3 or 4.  The opcode arrays specify the instructions used for
231   /// load/store of D registers and Q registers.
232   void SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
233                        unsigned NumVecs, const uint16_t *DOpcodes,
234                        const uint16_t *QOpcodes);
235 
236   /// Helper functions for setting up clusters of MVE predication operands.
237   template <typename SDValueVector>
238   void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
239                             SDValue PredicateMask);
240   template <typename SDValueVector>
241   void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
242                             SDValue PredicateMask, SDValue Inactive);
243 
244   template <typename SDValueVector>
245   void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc);
246   template <typename SDValueVector>
247   void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc, EVT InactiveTy);
248 
249   /// SelectMVE_WB - Select MVE writeback load/store intrinsics.
250   void SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, bool Predicated);
251 
252   /// SelectMVE_LongShift - Select MVE 64-bit scalar shift intrinsics.
253   void SelectMVE_LongShift(SDNode *N, uint16_t Opcode, bool Immediate,
254                            bool HasSaturationOperand);
255 
256   /// SelectMVE_VADCSBC - Select MVE vector add/sub-with-carry intrinsics.
257   void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
258                          uint16_t OpcodeWithNoCarry, bool Add, bool Predicated);
259 
260   /// SelectMVE_VSHLC - Select MVE intrinsics for a shift that carries between
261   /// vector lanes.
262   void SelectMVE_VSHLC(SDNode *N, bool Predicated);
263 
264   /// Select long MVE vector reductions with two vector operands
265   /// Stride is the number of vector element widths the instruction can operate
266   /// on:
267   /// 2 for long non-rounding variants, vml{a,s}ldav[a][x]: [i16, i32]
268   /// 1 for long rounding variants: vrml{a,s}ldavh[a][x]: [i32]
269   /// Stride is used when addressing the OpcodesS array which contains multiple
270   /// opcodes for each element width.
271   /// TySize is the index into the list of element types listed above
272   void SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
273                              const uint16_t *OpcodesS, const uint16_t *OpcodesU,
274                              size_t Stride, size_t TySize);
275 
276   /// Select a 64-bit MVE vector reduction with two vector operands
277   /// arm_mve_vmlldava_[predicated]
278   void SelectMVE_VMLLDAV(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
279                          const uint16_t *OpcodesU);
280   /// Select a 72-bit MVE vector rounding reduction with two vector operands
281   /// int_arm_mve_vrmlldavha[_predicated]
282   void SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
283                            const uint16_t *OpcodesU);
284 
285   /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs
286   /// should be 2 or 4. The opcode array specifies the instructions
287   /// used for 8, 16 and 32-bit lane sizes respectively, and each
288   /// pointer points to a set of NumVecs sub-opcodes used for the
289   /// different stages (e.g. VLD20 versus VLD21) of each load family.
290   void SelectMVE_VLD(SDNode *N, unsigned NumVecs,
291                      const uint16_t *const *Opcodes, bool HasWriteback);
292 
293   /// SelectMVE_VxDUP - Select MVE incrementing-dup instructions. Opcodes is an
294   /// array of 3 elements for the 8, 16 and 32-bit lane sizes.
295   void SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
296                        bool Wrapping, bool Predicated);
297 
298   /// Select SelectCDE_CXxD - Select CDE dual-GPR instruction (one of CX1D,
299   /// CX1DA, CX2D, CX2DA, CX3, CX3DA).
300   /// \arg \c NumExtraOps number of extra operands besides the coprocossor,
301   ///                     the accumulator and the immediate operand, i.e. 0
302   ///                     for CX1*, 1 for CX2*, 2 for CX3*
303   /// \arg \c HasAccum whether the instruction has an accumulator operand
304   void SelectCDE_CXxD(SDNode *N, uint16_t Opcode, size_t NumExtraOps,
305                       bool HasAccum);
306 
307   /// SelectVLDDup - Select NEON load-duplicate intrinsics.  NumVecs
308   /// should be 1, 2, 3 or 4.  The opcode array specifies the instructions used
309   /// for loading D registers.
310   void SelectVLDDup(SDNode *N, bool IsIntrinsic, bool isUpdating,
311                     unsigned NumVecs, const uint16_t *DOpcodes,
312                     const uint16_t *QOpcodes0 = nullptr,
313                     const uint16_t *QOpcodes1 = nullptr);
314 
315   /// Try to select SBFX/UBFX instructions for ARM.
316   bool tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
317 
318   bool tryInsertVectorElt(SDNode *N);
319 
320   // Select special operations if node forms integer ABS pattern
321   bool tryABSOp(SDNode *N);
322 
323   bool tryReadRegister(SDNode *N);
324   bool tryWriteRegister(SDNode *N);
325 
326   bool tryInlineAsm(SDNode *N);
327 
328   void SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI);
329 
330   void SelectCMP_SWAP(SDNode *N);
331 
332   /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
333   /// inline asm expressions.
334   bool SelectInlineAsmMemoryOperand(const SDValue &Op,
335                                     InlineAsm::ConstraintCode ConstraintID,
336                                     std::vector<SDValue> &OutOps) override;
337 
338   // Form pairs of consecutive R, S, D, or Q registers.
339   SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
340   SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
341   SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
342   SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
343 
344   // Form sequences of 4 consecutive S, D, or Q registers.
345   SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
346   SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
347   SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
348 
349   // Get the alignment operand for a NEON VLD or VST instruction.
350   SDValue GetVLDSTAlign(SDValue Align, const SDLoc &dl, unsigned NumVecs,
351                         bool is64BitVector);
352 
353   /// Checks if N is a multiplication by a constant where we can extract out a
354   /// power of two from the constant so that it can be used in a shift, but only
355   /// if it simplifies the materialization of the constant. Returns true if it
356   /// is, and assigns to PowerOfTwo the power of two that should be extracted
357   /// out and to NewMulConst the new constant to be multiplied by.
358   bool canExtractShiftFromMul(const SDValue &N, unsigned MaxShift,
359                               unsigned &PowerOfTwo, SDValue &NewMulConst) const;
360 
361   /// Replace N with M in CurDAG, in a way that also ensures that M gets
362   /// selected when N would have been selected.
363   void replaceDAGValue(const SDValue &N, SDValue M);
364 };
365 }
366 
367 char ARMDAGToDAGISel::ID = 0;
368 
369 INITIALIZE_PASS(ARMDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
370 
371 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
372 /// operand. If so Imm will receive the 32-bit value.
373 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
374   if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
375     Imm = cast<ConstantSDNode>(N)->getZExtValue();
376     return true;
377   }
378   return false;
379 }
380 
381 // isInt32Immediate - This method tests to see if a constant operand.
382 // If so Imm will receive the 32 bit value.
383 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
384   return isInt32Immediate(N.getNode(), Imm);
385 }
386 
387 // isOpcWithIntImmediate - This method tests to see if the node is a specific
388 // opcode and that it has a immediate integer right operand.
389 // If so Imm will receive the 32 bit value.
390 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
391   return N->getOpcode() == Opc &&
392          isInt32Immediate(N->getOperand(1).getNode(), Imm);
393 }
394 
395 /// Check whether a particular node is a constant value representable as
396 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
397 ///
398 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
399 static bool isScaledConstantInRange(SDValue Node, int Scale,
400                                     int RangeMin, int RangeMax,
401                                     int &ScaledConstant) {
402   assert(Scale > 0 && "Invalid scale!");
403 
404   // Check that this is a constant.
405   const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
406   if (!C)
407     return false;
408 
409   ScaledConstant = (int) C->getZExtValue();
410   if ((ScaledConstant % Scale) != 0)
411     return false;
412 
413   ScaledConstant /= Scale;
414   return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
415 }
416 
417 void ARMDAGToDAGISel::PreprocessISelDAG() {
418   if (!Subtarget->hasV6T2Ops())
419     return;
420 
421   bool isThumb2 = Subtarget->isThumb();
422   // We use make_early_inc_range to avoid invalidation issues.
423   for (SDNode &N : llvm::make_early_inc_range(CurDAG->allnodes())) {
424     if (N.getOpcode() != ISD::ADD)
425       continue;
426 
427     // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
428     // leading zeros, followed by consecutive set bits, followed by 1 or 2
429     // trailing zeros, e.g. 1020.
430     // Transform the expression to
431     // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
432     // of trailing zeros of c2. The left shift would be folded as an shifter
433     // operand of 'add' and the 'and' and 'srl' would become a bits extraction
434     // node (UBFX).
435 
436     SDValue N0 = N.getOperand(0);
437     SDValue N1 = N.getOperand(1);
438     unsigned And_imm = 0;
439     if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
440       if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
441         std::swap(N0, N1);
442     }
443     if (!And_imm)
444       continue;
445 
446     // Check if the AND mask is an immediate of the form: 000.....1111111100
447     unsigned TZ = llvm::countr_zero(And_imm);
448     if (TZ != 1 && TZ != 2)
449       // Be conservative here. Shifter operands aren't always free. e.g. On
450       // Swift, left shifter operand of 1 / 2 for free but others are not.
451       // e.g.
452       //  ubfx   r3, r1, #16, #8
453       //  ldr.w  r3, [r0, r3, lsl #2]
454       // vs.
455       //  mov.w  r9, #1020
456       //  and.w  r2, r9, r1, lsr #14
457       //  ldr    r2, [r0, r2]
458       continue;
459     And_imm >>= TZ;
460     if (And_imm & (And_imm + 1))
461       continue;
462 
463     // Look for (and (srl X, c1), c2).
464     SDValue Srl = N1.getOperand(0);
465     unsigned Srl_imm = 0;
466     if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
467         (Srl_imm <= 2))
468       continue;
469 
470     // Make sure first operand is not a shifter operand which would prevent
471     // folding of the left shift.
472     SDValue CPTmp0;
473     SDValue CPTmp1;
474     SDValue CPTmp2;
475     if (isThumb2) {
476       if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1))
477         continue;
478     } else {
479       if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
480           SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
481         continue;
482     }
483 
484     // Now make the transformation.
485     Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
486                           Srl.getOperand(0),
487                           CurDAG->getConstant(Srl_imm + TZ, SDLoc(Srl),
488                                               MVT::i32));
489     N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
490                          Srl,
491                          CurDAG->getConstant(And_imm, SDLoc(Srl), MVT::i32));
492     N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
493                          N1, CurDAG->getConstant(TZ, SDLoc(Srl), MVT::i32));
494     CurDAG->UpdateNodeOperands(&N, N0, N1);
495   }
496 }
497 
498 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
499 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
500 /// least on current ARM implementations) which should be avoidded.
501 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
502   if (OptLevel == CodeGenOptLevel::None)
503     return true;
504 
505   if (!Subtarget->hasVMLxHazards())
506     return true;
507 
508   if (!N->hasOneUse())
509     return false;
510 
511   SDNode *Use = *N->use_begin();
512   if (Use->getOpcode() == ISD::CopyToReg)
513     return true;
514   if (Use->isMachineOpcode()) {
515     const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
516         CurDAG->getSubtarget().getInstrInfo());
517 
518     const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
519     if (MCID.mayStore())
520       return true;
521     unsigned Opcode = MCID.getOpcode();
522     if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
523       return true;
524     // vmlx feeding into another vmlx. We actually want to unfold
525     // the use later in the MLxExpansion pass. e.g.
526     // vmla
527     // vmla (stall 8 cycles)
528     //
529     // vmul (5 cycles)
530     // vadd (5 cycles)
531     // vmla
532     // This adds up to about 18 - 19 cycles.
533     //
534     // vmla
535     // vmul (stall 4 cycles)
536     // vadd adds up to about 14 cycles.
537     return TII->isFpMLxInstruction(Opcode);
538   }
539 
540   return false;
541 }
542 
543 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
544                                             ARM_AM::ShiftOpc ShOpcVal,
545                                             unsigned ShAmt) {
546   if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
547     return true;
548   if (Shift.hasOneUse())
549     return true;
550   // R << 2 is free.
551   return ShOpcVal == ARM_AM::lsl &&
552          (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
553 }
554 
555 bool ARMDAGToDAGISel::canExtractShiftFromMul(const SDValue &N,
556                                              unsigned MaxShift,
557                                              unsigned &PowerOfTwo,
558                                              SDValue &NewMulConst) const {
559   assert(N.getOpcode() == ISD::MUL);
560   assert(MaxShift > 0);
561 
562   // If the multiply is used in more than one place then changing the constant
563   // will make other uses incorrect, so don't.
564   if (!N.hasOneUse()) return false;
565   // Check if the multiply is by a constant
566   ConstantSDNode *MulConst = dyn_cast<ConstantSDNode>(N.getOperand(1));
567   if (!MulConst) return false;
568   // If the constant is used in more than one place then modifying it will mean
569   // we need to materialize two constants instead of one, which is a bad idea.
570   if (!MulConst->hasOneUse()) return false;
571   unsigned MulConstVal = MulConst->getZExtValue();
572   if (MulConstVal == 0) return false;
573 
574   // Find the largest power of 2 that MulConstVal is a multiple of
575   PowerOfTwo = MaxShift;
576   while ((MulConstVal % (1 << PowerOfTwo)) != 0) {
577     --PowerOfTwo;
578     if (PowerOfTwo == 0) return false;
579   }
580 
581   // Only optimise if the new cost is better
582   unsigned NewMulConstVal = MulConstVal / (1 << PowerOfTwo);
583   NewMulConst = CurDAG->getConstant(NewMulConstVal, SDLoc(N), MVT::i32);
584   unsigned OldCost = ConstantMaterializationCost(MulConstVal, Subtarget);
585   unsigned NewCost = ConstantMaterializationCost(NewMulConstVal, Subtarget);
586   return NewCost < OldCost;
587 }
588 
589 void ARMDAGToDAGISel::replaceDAGValue(const SDValue &N, SDValue M) {
590   CurDAG->RepositionNode(N.getNode()->getIterator(), M.getNode());
591   ReplaceUses(N, M);
592 }
593 
594 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
595                                               SDValue &BaseReg,
596                                               SDValue &Opc,
597                                               bool CheckProfitability) {
598   if (DisableShifterOp)
599     return false;
600 
601   // If N is a multiply-by-constant and it's profitable to extract a shift and
602   // use it in a shifted operand do so.
603   if (N.getOpcode() == ISD::MUL) {
604     unsigned PowerOfTwo = 0;
605     SDValue NewMulConst;
606     if (canExtractShiftFromMul(N, 31, PowerOfTwo, NewMulConst)) {
607       HandleSDNode Handle(N);
608       SDLoc Loc(N);
609       replaceDAGValue(N.getOperand(1), NewMulConst);
610       BaseReg = Handle.getValue();
611       Opc = CurDAG->getTargetConstant(
612           ARM_AM::getSORegOpc(ARM_AM::lsl, PowerOfTwo), Loc, MVT::i32);
613       return true;
614     }
615   }
616 
617   ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
618 
619   // Don't match base register only case. That is matched to a separate
620   // lower complexity pattern with explicit register operand.
621   if (ShOpcVal == ARM_AM::no_shift) return false;
622 
623   BaseReg = N.getOperand(0);
624   unsigned ShImmVal = 0;
625   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
626   if (!RHS) return false;
627   ShImmVal = RHS->getZExtValue() & 31;
628   Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
629                                   SDLoc(N), MVT::i32);
630   return true;
631 }
632 
633 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
634                                               SDValue &BaseReg,
635                                               SDValue &ShReg,
636                                               SDValue &Opc,
637                                               bool CheckProfitability) {
638   if (DisableShifterOp)
639     return false;
640 
641   ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
642 
643   // Don't match base register only case. That is matched to a separate
644   // lower complexity pattern with explicit register operand.
645   if (ShOpcVal == ARM_AM::no_shift) return false;
646 
647   BaseReg = N.getOperand(0);
648   unsigned ShImmVal = 0;
649   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
650   if (RHS) return false;
651 
652   ShReg = N.getOperand(1);
653   if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
654     return false;
655   Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
656                                   SDLoc(N), MVT::i32);
657   return true;
658 }
659 
660 // Determine whether an ISD::OR's operands are suitable to turn the operation
661 // into an addition, which often has more compact encodings.
662 bool ARMDAGToDAGISel::SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out) {
663   assert(Parent->getOpcode() == ISD::OR && "unexpected parent");
664   Out = N;
665   return CurDAG->haveNoCommonBitsSet(N, Parent->getOperand(1));
666 }
667 
668 
669 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
670                                           SDValue &Base,
671                                           SDValue &OffImm) {
672   // Match simple R + imm12 operands.
673 
674   // Base only.
675   if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
676       !CurDAG->isBaseWithConstantOffset(N)) {
677     if (N.getOpcode() == ISD::FrameIndex) {
678       // Match frame index.
679       int FI = cast<FrameIndexSDNode>(N)->getIndex();
680       Base = CurDAG->getTargetFrameIndex(
681           FI, TLI->getPointerTy(CurDAG->getDataLayout()));
682       OffImm  = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
683       return true;
684     }
685 
686     if (N.getOpcode() == ARMISD::Wrapper &&
687         N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
688         N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
689         N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
690       Base = N.getOperand(0);
691     } else
692       Base = N;
693     OffImm  = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
694     return true;
695   }
696 
697   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
698     int RHSC = (int)RHS->getSExtValue();
699     if (N.getOpcode() == ISD::SUB)
700       RHSC = -RHSC;
701 
702     if (RHSC > -0x1000 && RHSC < 0x1000) { // 12 bits
703       Base   = N.getOperand(0);
704       if (Base.getOpcode() == ISD::FrameIndex) {
705         int FI = cast<FrameIndexSDNode>(Base)->getIndex();
706         Base = CurDAG->getTargetFrameIndex(
707             FI, TLI->getPointerTy(CurDAG->getDataLayout()));
708       }
709       OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
710       return true;
711     }
712   }
713 
714   // Base only.
715   Base = N;
716   OffImm  = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
717   return true;
718 }
719 
720 
721 
722 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
723                                       SDValue &Opc) {
724   if (N.getOpcode() == ISD::MUL &&
725       ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
726     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
727       // X * [3,5,9] -> X + X * [2,4,8] etc.
728       int RHSC = (int)RHS->getZExtValue();
729       if (RHSC & 1) {
730         RHSC = RHSC & ~1;
731         ARM_AM::AddrOpc AddSub = ARM_AM::add;
732         if (RHSC < 0) {
733           AddSub = ARM_AM::sub;
734           RHSC = - RHSC;
735         }
736         if (isPowerOf2_32(RHSC)) {
737           unsigned ShAmt = Log2_32(RHSC);
738           Base = Offset = N.getOperand(0);
739           Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
740                                                             ARM_AM::lsl),
741                                           SDLoc(N), MVT::i32);
742           return true;
743         }
744       }
745     }
746   }
747 
748   if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
749       // ISD::OR that is equivalent to an ISD::ADD.
750       !CurDAG->isBaseWithConstantOffset(N))
751     return false;
752 
753   // Leave simple R +/- imm12 operands for LDRi12
754   if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
755     int RHSC;
756     if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
757                                 -0x1000+1, 0x1000, RHSC)) // 12 bits.
758       return false;
759   }
760 
761   // Otherwise this is R +/- [possibly shifted] R.
762   ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
763   ARM_AM::ShiftOpc ShOpcVal =
764     ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
765   unsigned ShAmt = 0;
766 
767   Base   = N.getOperand(0);
768   Offset = N.getOperand(1);
769 
770   if (ShOpcVal != ARM_AM::no_shift) {
771     // Check to see if the RHS of the shift is a constant, if not, we can't fold
772     // it.
773     if (ConstantSDNode *Sh =
774            dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
775       ShAmt = Sh->getZExtValue();
776       if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
777         Offset = N.getOperand(1).getOperand(0);
778       else {
779         ShAmt = 0;
780         ShOpcVal = ARM_AM::no_shift;
781       }
782     } else {
783       ShOpcVal = ARM_AM::no_shift;
784     }
785   }
786 
787   // Try matching (R shl C) + (R).
788   if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
789       !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
790         N.getOperand(0).hasOneUse())) {
791     ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
792     if (ShOpcVal != ARM_AM::no_shift) {
793       // Check to see if the RHS of the shift is a constant, if not, we can't
794       // fold it.
795       if (ConstantSDNode *Sh =
796           dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
797         ShAmt = Sh->getZExtValue();
798         if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
799           Offset = N.getOperand(0).getOperand(0);
800           Base = N.getOperand(1);
801         } else {
802           ShAmt = 0;
803           ShOpcVal = ARM_AM::no_shift;
804         }
805       } else {
806         ShOpcVal = ARM_AM::no_shift;
807       }
808     }
809   }
810 
811   // If Offset is a multiply-by-constant and it's profitable to extract a shift
812   // and use it in a shifted operand do so.
813   if (Offset.getOpcode() == ISD::MUL && N.hasOneUse()) {
814     unsigned PowerOfTwo = 0;
815     SDValue NewMulConst;
816     if (canExtractShiftFromMul(Offset, 31, PowerOfTwo, NewMulConst)) {
817       HandleSDNode Handle(Offset);
818       replaceDAGValue(Offset.getOperand(1), NewMulConst);
819       Offset = Handle.getValue();
820       ShAmt = PowerOfTwo;
821       ShOpcVal = ARM_AM::lsl;
822     }
823   }
824 
825   Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
826                                   SDLoc(N), MVT::i32);
827   return true;
828 }
829 
830 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
831                                             SDValue &Offset, SDValue &Opc) {
832   unsigned Opcode = Op->getOpcode();
833   ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
834     ? cast<LoadSDNode>(Op)->getAddressingMode()
835     : cast<StoreSDNode>(Op)->getAddressingMode();
836   ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
837     ? ARM_AM::add : ARM_AM::sub;
838   int Val;
839   if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
840     return false;
841 
842   Offset = N;
843   ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
844   unsigned ShAmt = 0;
845   if (ShOpcVal != ARM_AM::no_shift) {
846     // Check to see if the RHS of the shift is a constant, if not, we can't fold
847     // it.
848     if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
849       ShAmt = Sh->getZExtValue();
850       if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
851         Offset = N.getOperand(0);
852       else {
853         ShAmt = 0;
854         ShOpcVal = ARM_AM::no_shift;
855       }
856     } else {
857       ShOpcVal = ARM_AM::no_shift;
858     }
859   }
860 
861   Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
862                                   SDLoc(N), MVT::i32);
863   return true;
864 }
865 
866 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
867                                             SDValue &Offset, SDValue &Opc) {
868   unsigned Opcode = Op->getOpcode();
869   ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
870     ? cast<LoadSDNode>(Op)->getAddressingMode()
871     : cast<StoreSDNode>(Op)->getAddressingMode();
872   ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
873     ? ARM_AM::add : ARM_AM::sub;
874   int Val;
875   if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
876     if (AddSub == ARM_AM::sub) Val *= -1;
877     Offset = CurDAG->getRegister(0, MVT::i32);
878     Opc = CurDAG->getTargetConstant(Val, SDLoc(Op), MVT::i32);
879     return true;
880   }
881 
882   return false;
883 }
884 
885 
886 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
887                                             SDValue &Offset, SDValue &Opc) {
888   unsigned Opcode = Op->getOpcode();
889   ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
890     ? cast<LoadSDNode>(Op)->getAddressingMode()
891     : cast<StoreSDNode>(Op)->getAddressingMode();
892   ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
893     ? ARM_AM::add : ARM_AM::sub;
894   int Val;
895   if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
896     Offset = CurDAG->getRegister(0, MVT::i32);
897     Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
898                                                       ARM_AM::no_shift),
899                                     SDLoc(Op), MVT::i32);
900     return true;
901   }
902 
903   return false;
904 }
905 
906 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
907   Base = N;
908   return true;
909 }
910 
911 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
912                                       SDValue &Base, SDValue &Offset,
913                                       SDValue &Opc) {
914   if (N.getOpcode() == ISD::SUB) {
915     // X - C  is canonicalize to X + -C, no need to handle it here.
916     Base = N.getOperand(0);
917     Offset = N.getOperand(1);
918     Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0), SDLoc(N),
919                                     MVT::i32);
920     return true;
921   }
922 
923   if (!CurDAG->isBaseWithConstantOffset(N)) {
924     Base = N;
925     if (N.getOpcode() == ISD::FrameIndex) {
926       int FI = cast<FrameIndexSDNode>(N)->getIndex();
927       Base = CurDAG->getTargetFrameIndex(
928           FI, TLI->getPointerTy(CurDAG->getDataLayout()));
929     }
930     Offset = CurDAG->getRegister(0, MVT::i32);
931     Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
932                                     MVT::i32);
933     return true;
934   }
935 
936   // If the RHS is +/- imm8, fold into addr mode.
937   int RHSC;
938   if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
939                               -256 + 1, 256, RHSC)) { // 8 bits.
940     Base = N.getOperand(0);
941     if (Base.getOpcode() == ISD::FrameIndex) {
942       int FI = cast<FrameIndexSDNode>(Base)->getIndex();
943       Base = CurDAG->getTargetFrameIndex(
944           FI, TLI->getPointerTy(CurDAG->getDataLayout()));
945     }
946     Offset = CurDAG->getRegister(0, MVT::i32);
947 
948     ARM_AM::AddrOpc AddSub = ARM_AM::add;
949     if (RHSC < 0) {
950       AddSub = ARM_AM::sub;
951       RHSC = -RHSC;
952     }
953     Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC), SDLoc(N),
954                                     MVT::i32);
955     return true;
956   }
957 
958   Base = N.getOperand(0);
959   Offset = N.getOperand(1);
960   Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
961                                   MVT::i32);
962   return true;
963 }
964 
965 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
966                                             SDValue &Offset, SDValue &Opc) {
967   unsigned Opcode = Op->getOpcode();
968   ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
969     ? cast<LoadSDNode>(Op)->getAddressingMode()
970     : cast<StoreSDNode>(Op)->getAddressingMode();
971   ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
972     ? ARM_AM::add : ARM_AM::sub;
973   int Val;
974   if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
975     Offset = CurDAG->getRegister(0, MVT::i32);
976     Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), SDLoc(Op),
977                                     MVT::i32);
978     return true;
979   }
980 
981   Offset = N;
982   Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), SDLoc(Op),
983                                   MVT::i32);
984   return true;
985 }
986 
987 bool ARMDAGToDAGISel::IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset,
988                                         bool FP16) {
989   if (!CurDAG->isBaseWithConstantOffset(N)) {
990     Base = N;
991     if (N.getOpcode() == ISD::FrameIndex) {
992       int FI = cast<FrameIndexSDNode>(N)->getIndex();
993       Base = CurDAG->getTargetFrameIndex(
994           FI, TLI->getPointerTy(CurDAG->getDataLayout()));
995     } else if (N.getOpcode() == ARMISD::Wrapper &&
996                N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
997                N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
998                N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
999       Base = N.getOperand(0);
1000     }
1001     Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1002                                        SDLoc(N), MVT::i32);
1003     return true;
1004   }
1005 
1006   // If the RHS is +/- imm8, fold into addr mode.
1007   int RHSC;
1008   const int Scale = FP16 ? 2 : 4;
1009 
1010   if (isScaledConstantInRange(N.getOperand(1), Scale, -255, 256, RHSC)) {
1011     Base = N.getOperand(0);
1012     if (Base.getOpcode() == ISD::FrameIndex) {
1013       int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1014       Base = CurDAG->getTargetFrameIndex(
1015           FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1016     }
1017 
1018     ARM_AM::AddrOpc AddSub = ARM_AM::add;
1019     if (RHSC < 0) {
1020       AddSub = ARM_AM::sub;
1021       RHSC = -RHSC;
1022     }
1023 
1024     if (FP16)
1025       Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(AddSub, RHSC),
1026                                          SDLoc(N), MVT::i32);
1027     else
1028       Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
1029                                          SDLoc(N), MVT::i32);
1030 
1031     return true;
1032   }
1033 
1034   Base = N;
1035 
1036   if (FP16)
1037     Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(ARM_AM::add, 0),
1038                                        SDLoc(N), MVT::i32);
1039   else
1040     Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1041                                        SDLoc(N), MVT::i32);
1042 
1043   return true;
1044 }
1045 
1046 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
1047                                       SDValue &Base, SDValue &Offset) {
1048   return IsAddressingMode5(N, Base, Offset, /*FP16=*/ false);
1049 }
1050 
1051 bool ARMDAGToDAGISel::SelectAddrMode5FP16(SDValue N,
1052                                           SDValue &Base, SDValue &Offset) {
1053   return IsAddressingMode5(N, Base, Offset, /*FP16=*/ true);
1054 }
1055 
1056 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
1057                                       SDValue &Align) {
1058   Addr = N;
1059 
1060   unsigned Alignment = 0;
1061 
1062   MemSDNode *MemN = cast<MemSDNode>(Parent);
1063 
1064   if (isa<LSBaseSDNode>(MemN) ||
1065       ((MemN->getOpcode() == ARMISD::VST1_UPD ||
1066         MemN->getOpcode() == ARMISD::VLD1_UPD) &&
1067        MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) {
1068     // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1069     // The maximum alignment is equal to the memory size being referenced.
1070     llvm::Align MMOAlign = MemN->getAlign();
1071     unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8;
1072     if (MMOAlign.value() >= MemSize && MemSize > 1)
1073       Alignment = MemSize;
1074   } else {
1075     // All other uses of addrmode6 are for intrinsics.  For now just record
1076     // the raw alignment value; it will be refined later based on the legal
1077     // alignment operands for the intrinsic.
1078     Alignment = MemN->getAlign().value();
1079   }
1080 
1081   Align = CurDAG->getTargetConstant(Alignment, SDLoc(N), MVT::i32);
1082   return true;
1083 }
1084 
1085 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1086                                             SDValue &Offset) {
1087   LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1088   ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1089   if (AM != ISD::POST_INC)
1090     return false;
1091   Offset = N;
1092   if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1093     if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1094       Offset = CurDAG->getRegister(0, MVT::i32);
1095   }
1096   return true;
1097 }
1098 
1099 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1100                                        SDValue &Offset, SDValue &Label) {
1101   if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1102     Offset = N.getOperand(0);
1103     SDValue N1 = N.getOperand(1);
1104     Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1105                                       SDLoc(N), MVT::i32);
1106     return true;
1107   }
1108 
1109   return false;
1110 }
1111 
1112 
1113 //===----------------------------------------------------------------------===//
1114 //                         Thumb Addressing Modes
1115 //===----------------------------------------------------------------------===//
1116 
1117 static bool shouldUseZeroOffsetLdSt(SDValue N) {
1118   // Negative numbers are difficult to materialise in thumb1. If we are
1119   // selecting the add of a negative, instead try to select ri with a zero
1120   // offset, so create the add node directly which will become a sub.
1121   if (N.getOpcode() != ISD::ADD)
1122     return false;
1123 
1124   // Look for an imm which is not legal for ld/st, but is legal for sub.
1125   if (auto C = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1126     return C->getSExtValue() < 0 && C->getSExtValue() >= -255;
1127 
1128   return false;
1129 }
1130 
1131 bool ARMDAGToDAGISel::SelectThumbAddrModeRRSext(SDValue N, SDValue &Base,
1132                                                 SDValue &Offset) {
1133   if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1134     if (!isNullConstant(N))
1135       return false;
1136 
1137     Base = Offset = N;
1138     return true;
1139   }
1140 
1141   Base = N.getOperand(0);
1142   Offset = N.getOperand(1);
1143   return true;
1144 }
1145 
1146 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, SDValue &Base,
1147                                             SDValue &Offset) {
1148   if (shouldUseZeroOffsetLdSt(N))
1149     return false; // Select ri instead
1150   return SelectThumbAddrModeRRSext(N, Base, Offset);
1151 }
1152 
1153 bool
1154 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1155                                           SDValue &Base, SDValue &OffImm) {
1156   if (shouldUseZeroOffsetLdSt(N)) {
1157     Base = N;
1158     OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1159     return true;
1160   }
1161 
1162   if (!CurDAG->isBaseWithConstantOffset(N)) {
1163     if (N.getOpcode() == ISD::ADD) {
1164       return false; // We want to select register offset instead
1165     } else if (N.getOpcode() == ARMISD::Wrapper &&
1166         N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1167         N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1168         N.getOperand(0).getOpcode() != ISD::TargetConstantPool &&
1169         N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1170       Base = N.getOperand(0);
1171     } else {
1172       Base = N;
1173     }
1174 
1175     OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1176     return true;
1177   }
1178 
1179   // If the RHS is + imm5 * scale, fold into addr mode.
1180   int RHSC;
1181   if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1182     Base = N.getOperand(0);
1183     OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1184     return true;
1185   }
1186 
1187   // Offset is too large, so use register offset instead.
1188   return false;
1189 }
1190 
1191 bool
1192 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1193                                            SDValue &OffImm) {
1194   return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1195 }
1196 
1197 bool
1198 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1199                                            SDValue &OffImm) {
1200   return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1201 }
1202 
1203 bool
1204 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1205                                            SDValue &OffImm) {
1206   return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1207 }
1208 
1209 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1210                                             SDValue &Base, SDValue &OffImm) {
1211   if (N.getOpcode() == ISD::FrameIndex) {
1212     int FI = cast<FrameIndexSDNode>(N)->getIndex();
1213     // Only multiples of 4 are allowed for the offset, so the frame object
1214     // alignment must be at least 4.
1215     MachineFrameInfo &MFI = MF->getFrameInfo();
1216     if (MFI.getObjectAlign(FI) < Align(4))
1217       MFI.setObjectAlignment(FI, Align(4));
1218     Base = CurDAG->getTargetFrameIndex(
1219         FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1220     OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1221     return true;
1222   }
1223 
1224   if (!CurDAG->isBaseWithConstantOffset(N))
1225     return false;
1226 
1227   if (N.getOperand(0).getOpcode() == ISD::FrameIndex) {
1228     // If the RHS is + imm8 * scale, fold into addr mode.
1229     int RHSC;
1230     if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1231       Base = N.getOperand(0);
1232       int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1233       // Make sure the offset is inside the object, or we might fail to
1234       // allocate an emergency spill slot. (An out-of-range access is UB, but
1235       // it could show up anyway.)
1236       MachineFrameInfo &MFI = MF->getFrameInfo();
1237       if (RHSC * 4 < MFI.getObjectSize(FI)) {
1238         // For LHS+RHS to result in an offset that's a multiple of 4 the object
1239         // indexed by the LHS must be 4-byte aligned.
1240         if (!MFI.isFixedObjectIndex(FI) && MFI.getObjectAlign(FI) < Align(4))
1241           MFI.setObjectAlignment(FI, Align(4));
1242         if (MFI.getObjectAlign(FI) >= Align(4)) {
1243           Base = CurDAG->getTargetFrameIndex(
1244               FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1245           OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1246           return true;
1247         }
1248       }
1249     }
1250   }
1251 
1252   return false;
1253 }
1254 
1255 template <unsigned Shift>
1256 bool ARMDAGToDAGISel::SelectTAddrModeImm7(SDValue N, SDValue &Base,
1257                                           SDValue &OffImm) {
1258   if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1259     int RHSC;
1260     if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1261                                 RHSC)) {
1262       Base = N.getOperand(0);
1263       if (N.getOpcode() == ISD::SUB)
1264         RHSC = -RHSC;
1265       OffImm =
1266           CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1267       return true;
1268     }
1269   }
1270 
1271   // Base only.
1272   Base = N;
1273   OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1274   return true;
1275 }
1276 
1277 
1278 //===----------------------------------------------------------------------===//
1279 //                        Thumb 2 Addressing Modes
1280 //===----------------------------------------------------------------------===//
1281 
1282 
1283 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1284                                             SDValue &Base, SDValue &OffImm) {
1285   // Match simple R + imm12 operands.
1286 
1287   // Base only.
1288   if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1289       !CurDAG->isBaseWithConstantOffset(N)) {
1290     if (N.getOpcode() == ISD::FrameIndex) {
1291       // Match frame index.
1292       int FI = cast<FrameIndexSDNode>(N)->getIndex();
1293       Base = CurDAG->getTargetFrameIndex(
1294           FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1295       OffImm  = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1296       return true;
1297     }
1298 
1299     if (N.getOpcode() == ARMISD::Wrapper &&
1300         N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1301         N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1302         N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1303       Base = N.getOperand(0);
1304       if (Base.getOpcode() == ISD::TargetConstantPool)
1305         return false;  // We want to select t2LDRpci instead.
1306     } else
1307       Base = N;
1308     OffImm  = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1309     return true;
1310   }
1311 
1312   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1313     if (SelectT2AddrModeImm8(N, Base, OffImm))
1314       // Let t2LDRi8 handle (R - imm8).
1315       return false;
1316 
1317     int RHSC = (int)RHS->getZExtValue();
1318     if (N.getOpcode() == ISD::SUB)
1319       RHSC = -RHSC;
1320 
1321     if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1322       Base   = N.getOperand(0);
1323       if (Base.getOpcode() == ISD::FrameIndex) {
1324         int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1325         Base = CurDAG->getTargetFrameIndex(
1326             FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1327       }
1328       OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1329       return true;
1330     }
1331   }
1332 
1333   // Base only.
1334   Base = N;
1335   OffImm  = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1336   return true;
1337 }
1338 
1339 template <unsigned Shift>
1340 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, SDValue &Base,
1341                                            SDValue &OffImm) {
1342   if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1343     int RHSC;
1344     if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -255, 256, RHSC)) {
1345       Base = N.getOperand(0);
1346       if (Base.getOpcode() == ISD::FrameIndex) {
1347         int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1348         Base = CurDAG->getTargetFrameIndex(
1349             FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1350       }
1351 
1352       if (N.getOpcode() == ISD::SUB)
1353         RHSC = -RHSC;
1354       OffImm =
1355           CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1356       return true;
1357     }
1358   }
1359 
1360   // Base only.
1361   Base = N;
1362   OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1363   return true;
1364 }
1365 
1366 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1367                                            SDValue &Base, SDValue &OffImm) {
1368   // Match simple R - imm8 operands.
1369   if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1370       !CurDAG->isBaseWithConstantOffset(N))
1371     return false;
1372 
1373   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1374     int RHSC = (int)RHS->getSExtValue();
1375     if (N.getOpcode() == ISD::SUB)
1376       RHSC = -RHSC;
1377 
1378     if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1379       Base = N.getOperand(0);
1380       if (Base.getOpcode() == ISD::FrameIndex) {
1381         int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1382         Base = CurDAG->getTargetFrameIndex(
1383             FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1384       }
1385       OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1386       return true;
1387     }
1388   }
1389 
1390   return false;
1391 }
1392 
1393 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1394                                                  SDValue &OffImm){
1395   unsigned Opcode = Op->getOpcode();
1396   ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1397     ? cast<LoadSDNode>(Op)->getAddressingMode()
1398     : cast<StoreSDNode>(Op)->getAddressingMode();
1399   int RHSC;
1400   if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1401     OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1402       ? CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32)
1403       : CurDAG->getTargetConstant(-RHSC, SDLoc(N), MVT::i32);
1404     return true;
1405   }
1406 
1407   return false;
1408 }
1409 
1410 template <unsigned Shift>
1411 bool ARMDAGToDAGISel::SelectT2AddrModeImm7(SDValue N, SDValue &Base,
1412                                            SDValue &OffImm) {
1413   if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1414     int RHSC;
1415     if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1416                                 RHSC)) {
1417       Base = N.getOperand(0);
1418       if (Base.getOpcode() == ISD::FrameIndex) {
1419         int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1420         Base = CurDAG->getTargetFrameIndex(
1421             FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1422       }
1423 
1424       if (N.getOpcode() == ISD::SUB)
1425         RHSC = -RHSC;
1426       OffImm =
1427           CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1428       return true;
1429     }
1430   }
1431 
1432   // Base only.
1433   Base = N;
1434   OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1435   return true;
1436 }
1437 
1438 template <unsigned Shift>
1439 bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1440                                                  SDValue &OffImm) {
1441   return SelectT2AddrModeImm7Offset(Op, N, OffImm, Shift);
1442 }
1443 
1444 bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1445                                                  SDValue &OffImm,
1446                                                  unsigned Shift) {
1447   unsigned Opcode = Op->getOpcode();
1448   ISD::MemIndexedMode AM;
1449   switch (Opcode) {
1450   case ISD::LOAD:
1451     AM = cast<LoadSDNode>(Op)->getAddressingMode();
1452     break;
1453   case ISD::STORE:
1454     AM = cast<StoreSDNode>(Op)->getAddressingMode();
1455     break;
1456   case ISD::MLOAD:
1457     AM = cast<MaskedLoadSDNode>(Op)->getAddressingMode();
1458     break;
1459   case ISD::MSTORE:
1460     AM = cast<MaskedStoreSDNode>(Op)->getAddressingMode();
1461     break;
1462   default:
1463     llvm_unreachable("Unexpected Opcode for Imm7Offset");
1464   }
1465 
1466   int RHSC;
1467   // 7 bit constant, shifted by Shift.
1468   if (isScaledConstantInRange(N, 1 << Shift, 0, 0x80, RHSC)) {
1469     OffImm =
1470         ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1471             ? CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32)
1472             : CurDAG->getTargetConstant(-RHSC * (1 << Shift), SDLoc(N),
1473                                         MVT::i32);
1474     return true;
1475   }
1476   return false;
1477 }
1478 
1479 template <int Min, int Max>
1480 bool ARMDAGToDAGISel::SelectImmediateInRange(SDValue N, SDValue &OffImm) {
1481   int Val;
1482   if (isScaledConstantInRange(N, 1, Min, Max, Val)) {
1483     OffImm = CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
1484     return true;
1485   }
1486   return false;
1487 }
1488 
1489 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1490                                             SDValue &Base,
1491                                             SDValue &OffReg, SDValue &ShImm) {
1492   // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1493   if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1494     return false;
1495 
1496   // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1497   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1498     int RHSC = (int)RHS->getZExtValue();
1499     if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1500       return false;
1501     else if (RHSC < 0 && RHSC >= -255) // 8 bits
1502       return false;
1503   }
1504 
1505   // Look for (R + R) or (R + (R << [1,2,3])).
1506   unsigned ShAmt = 0;
1507   Base   = N.getOperand(0);
1508   OffReg = N.getOperand(1);
1509 
1510   // Swap if it is ((R << c) + R).
1511   ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1512   if (ShOpcVal != ARM_AM::lsl) {
1513     ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1514     if (ShOpcVal == ARM_AM::lsl)
1515       std::swap(Base, OffReg);
1516   }
1517 
1518   if (ShOpcVal == ARM_AM::lsl) {
1519     // Check to see if the RHS of the shift is a constant, if not, we can't fold
1520     // it.
1521     if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1522       ShAmt = Sh->getZExtValue();
1523       if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1524         OffReg = OffReg.getOperand(0);
1525       else {
1526         ShAmt = 0;
1527       }
1528     }
1529   }
1530 
1531   // If OffReg is a multiply-by-constant and it's profitable to extract a shift
1532   // and use it in a shifted operand do so.
1533   if (OffReg.getOpcode() == ISD::MUL && N.hasOneUse()) {
1534     unsigned PowerOfTwo = 0;
1535     SDValue NewMulConst;
1536     if (canExtractShiftFromMul(OffReg, 3, PowerOfTwo, NewMulConst)) {
1537       HandleSDNode Handle(OffReg);
1538       replaceDAGValue(OffReg.getOperand(1), NewMulConst);
1539       OffReg = Handle.getValue();
1540       ShAmt = PowerOfTwo;
1541     }
1542   }
1543 
1544   ShImm = CurDAG->getTargetConstant(ShAmt, SDLoc(N), MVT::i32);
1545 
1546   return true;
1547 }
1548 
1549 bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1550                                                 SDValue &OffImm) {
1551   // This *must* succeed since it's used for the irreplaceable ldrex and strex
1552   // instructions.
1553   Base = N;
1554   OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1555 
1556   if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1557     return true;
1558 
1559   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1560   if (!RHS)
1561     return true;
1562 
1563   uint32_t RHSC = (int)RHS->getZExtValue();
1564   if (RHSC > 1020 || RHSC % 4 != 0)
1565     return true;
1566 
1567   Base = N.getOperand(0);
1568   if (Base.getOpcode() == ISD::FrameIndex) {
1569     int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1570     Base = CurDAG->getTargetFrameIndex(
1571         FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1572   }
1573 
1574   OffImm = CurDAG->getTargetConstant(RHSC/4, SDLoc(N), MVT::i32);
1575   return true;
1576 }
1577 
1578 //===--------------------------------------------------------------------===//
1579 
1580 /// getAL - Returns a ARMCC::AL immediate node.
1581 static inline SDValue getAL(SelectionDAG *CurDAG, const SDLoc &dl) {
1582   return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
1583 }
1584 
1585 void ARMDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
1586   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1587   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp});
1588 }
1589 
1590 bool ARMDAGToDAGISel::tryARMIndexedLoad(SDNode *N) {
1591   LoadSDNode *LD = cast<LoadSDNode>(N);
1592   ISD::MemIndexedMode AM = LD->getAddressingMode();
1593   if (AM == ISD::UNINDEXED)
1594     return false;
1595 
1596   EVT LoadedVT = LD->getMemoryVT();
1597   SDValue Offset, AMOpc;
1598   bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1599   unsigned Opcode = 0;
1600   bool Match = false;
1601   if (LoadedVT == MVT::i32 && isPre &&
1602       SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1603     Opcode = ARM::LDR_PRE_IMM;
1604     Match = true;
1605   } else if (LoadedVT == MVT::i32 && !isPre &&
1606       SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1607     Opcode = ARM::LDR_POST_IMM;
1608     Match = true;
1609   } else if (LoadedVT == MVT::i32 &&
1610       SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1611     Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1612     Match = true;
1613 
1614   } else if (LoadedVT == MVT::i16 &&
1615              SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1616     Match = true;
1617     Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1618       ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1619       : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1620   } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1621     if (LD->getExtensionType() == ISD::SEXTLOAD) {
1622       if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1623         Match = true;
1624         Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1625       }
1626     } else {
1627       if (isPre &&
1628           SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1629         Match = true;
1630         Opcode = ARM::LDRB_PRE_IMM;
1631       } else if (!isPre &&
1632                   SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1633         Match = true;
1634         Opcode = ARM::LDRB_POST_IMM;
1635       } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1636         Match = true;
1637         Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1638       }
1639     }
1640   }
1641 
1642   if (Match) {
1643     if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1644       SDValue Chain = LD->getChain();
1645       SDValue Base = LD->getBasePtr();
1646       SDValue Ops[]= { Base, AMOpc, getAL(CurDAG, SDLoc(N)),
1647                        CurDAG->getRegister(0, MVT::i32), Chain };
1648       SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1649                                            MVT::Other, Ops);
1650       transferMemOperands(N, New);
1651       ReplaceNode(N, New);
1652       return true;
1653     } else {
1654       SDValue Chain = LD->getChain();
1655       SDValue Base = LD->getBasePtr();
1656       SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG, SDLoc(N)),
1657                        CurDAG->getRegister(0, MVT::i32), Chain };
1658       SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1659                                            MVT::Other, Ops);
1660       transferMemOperands(N, New);
1661       ReplaceNode(N, New);
1662       return true;
1663     }
1664   }
1665 
1666   return false;
1667 }
1668 
1669 bool ARMDAGToDAGISel::tryT1IndexedLoad(SDNode *N) {
1670   LoadSDNode *LD = cast<LoadSDNode>(N);
1671   EVT LoadedVT = LD->getMemoryVT();
1672   ISD::MemIndexedMode AM = LD->getAddressingMode();
1673   if (AM != ISD::POST_INC || LD->getExtensionType() != ISD::NON_EXTLOAD ||
1674       LoadedVT.getSimpleVT().SimpleTy != MVT::i32)
1675     return false;
1676 
1677   auto *COffs = dyn_cast<ConstantSDNode>(LD->getOffset());
1678   if (!COffs || COffs->getZExtValue() != 4)
1679     return false;
1680 
1681   // A T1 post-indexed load is just a single register LDM: LDM r0!, {r1}.
1682   // The encoding of LDM is not how the rest of ISel expects a post-inc load to
1683   // look however, so we use a pseudo here and switch it for a tLDMIA_UPD after
1684   // ISel.
1685   SDValue Chain = LD->getChain();
1686   SDValue Base = LD->getBasePtr();
1687   SDValue Ops[]= { Base, getAL(CurDAG, SDLoc(N)),
1688                    CurDAG->getRegister(0, MVT::i32), Chain };
1689   SDNode *New = CurDAG->getMachineNode(ARM::tLDR_postidx, SDLoc(N), MVT::i32,
1690                                        MVT::i32, MVT::Other, Ops);
1691   transferMemOperands(N, New);
1692   ReplaceNode(N, New);
1693   return true;
1694 }
1695 
1696 bool ARMDAGToDAGISel::tryT2IndexedLoad(SDNode *N) {
1697   LoadSDNode *LD = cast<LoadSDNode>(N);
1698   ISD::MemIndexedMode AM = LD->getAddressingMode();
1699   if (AM == ISD::UNINDEXED)
1700     return false;
1701 
1702   EVT LoadedVT = LD->getMemoryVT();
1703   bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1704   SDValue Offset;
1705   bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1706   unsigned Opcode = 0;
1707   bool Match = false;
1708   if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1709     switch (LoadedVT.getSimpleVT().SimpleTy) {
1710     case MVT::i32:
1711       Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1712       break;
1713     case MVT::i16:
1714       if (isSExtLd)
1715         Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1716       else
1717         Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1718       break;
1719     case MVT::i8:
1720     case MVT::i1:
1721       if (isSExtLd)
1722         Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1723       else
1724         Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1725       break;
1726     default:
1727       return false;
1728     }
1729     Match = true;
1730   }
1731 
1732   if (Match) {
1733     SDValue Chain = LD->getChain();
1734     SDValue Base = LD->getBasePtr();
1735     SDValue Ops[]= { Base, Offset, getAL(CurDAG, SDLoc(N)),
1736                      CurDAG->getRegister(0, MVT::i32), Chain };
1737     SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1738                                          MVT::Other, Ops);
1739     transferMemOperands(N, New);
1740     ReplaceNode(N, New);
1741     return true;
1742   }
1743 
1744   return false;
1745 }
1746 
1747 bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
1748   EVT LoadedVT;
1749   unsigned Opcode = 0;
1750   bool isSExtLd, isPre;
1751   Align Alignment;
1752   ARMVCC::VPTCodes Pred;
1753   SDValue PredReg;
1754   SDValue Chain, Base, Offset;
1755 
1756   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1757     ISD::MemIndexedMode AM = LD->getAddressingMode();
1758     if (AM == ISD::UNINDEXED)
1759       return false;
1760     LoadedVT = LD->getMemoryVT();
1761     if (!LoadedVT.isVector())
1762       return false;
1763 
1764     Chain = LD->getChain();
1765     Base = LD->getBasePtr();
1766     Offset = LD->getOffset();
1767     Alignment = LD->getAlign();
1768     isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1769     isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1770     Pred = ARMVCC::None;
1771     PredReg = CurDAG->getRegister(0, MVT::i32);
1772   } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
1773     ISD::MemIndexedMode AM = LD->getAddressingMode();
1774     if (AM == ISD::UNINDEXED)
1775       return false;
1776     LoadedVT = LD->getMemoryVT();
1777     if (!LoadedVT.isVector())
1778       return false;
1779 
1780     Chain = LD->getChain();
1781     Base = LD->getBasePtr();
1782     Offset = LD->getOffset();
1783     Alignment = LD->getAlign();
1784     isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1785     isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1786     Pred = ARMVCC::Then;
1787     PredReg = LD->getMask();
1788   } else
1789     llvm_unreachable("Expected a Load or a Masked Load!");
1790 
1791   // We allow LE non-masked loads to change the type (for example use a vldrb.8
1792   // as opposed to a vldrw.32). This can allow extra addressing modes or
1793   // alignments for what is otherwise an equivalent instruction.
1794   bool CanChangeType = Subtarget->isLittle() && !isa<MaskedLoadSDNode>(N);
1795 
1796   SDValue NewOffset;
1797   if (Alignment >= Align(2) && LoadedVT == MVT::v4i16 &&
1798       SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1)) {
1799     if (isSExtLd)
1800       Opcode = isPre ? ARM::MVE_VLDRHS32_pre : ARM::MVE_VLDRHS32_post;
1801     else
1802       Opcode = isPre ? ARM::MVE_VLDRHU32_pre : ARM::MVE_VLDRHU32_post;
1803   } else if (LoadedVT == MVT::v8i8 &&
1804              SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1805     if (isSExtLd)
1806       Opcode = isPre ? ARM::MVE_VLDRBS16_pre : ARM::MVE_VLDRBS16_post;
1807     else
1808       Opcode = isPre ? ARM::MVE_VLDRBU16_pre : ARM::MVE_VLDRBU16_post;
1809   } else if (LoadedVT == MVT::v4i8 &&
1810              SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1811     if (isSExtLd)
1812       Opcode = isPre ? ARM::MVE_VLDRBS32_pre : ARM::MVE_VLDRBS32_post;
1813     else
1814       Opcode = isPre ? ARM::MVE_VLDRBU32_pre : ARM::MVE_VLDRBU32_post;
1815   } else if (Alignment >= Align(4) &&
1816              (CanChangeType || LoadedVT == MVT::v4i32 ||
1817               LoadedVT == MVT::v4f32) &&
1818              SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 2))
1819     Opcode = isPre ? ARM::MVE_VLDRWU32_pre : ARM::MVE_VLDRWU32_post;
1820   else if (Alignment >= Align(2) &&
1821            (CanChangeType || LoadedVT == MVT::v8i16 ||
1822             LoadedVT == MVT::v8f16) &&
1823            SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1))
1824     Opcode = isPre ? ARM::MVE_VLDRHU16_pre : ARM::MVE_VLDRHU16_post;
1825   else if ((CanChangeType || LoadedVT == MVT::v16i8) &&
1826            SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0))
1827     Opcode = isPre ? ARM::MVE_VLDRBU8_pre : ARM::MVE_VLDRBU8_post;
1828   else
1829     return false;
1830 
1831   SDValue Ops[] = {Base,
1832                    NewOffset,
1833                    CurDAG->getTargetConstant(Pred, SDLoc(N), MVT::i32),
1834                    PredReg,
1835                    CurDAG->getRegister(0, MVT::i32), // tp_reg
1836                    Chain};
1837   SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1838                                        N->getValueType(0), MVT::Other, Ops);
1839   transferMemOperands(N, New);
1840   ReplaceUses(SDValue(N, 0), SDValue(New, 1));
1841   ReplaceUses(SDValue(N, 1), SDValue(New, 0));
1842   ReplaceUses(SDValue(N, 2), SDValue(New, 2));
1843   CurDAG->RemoveDeadNode(N);
1844   return true;
1845 }
1846 
1847 /// Form a GPRPair pseudo register from a pair of GPR regs.
1848 SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1849   SDLoc dl(V0.getNode());
1850   SDValue RegClass =
1851     CurDAG->getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
1852   SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
1853   SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
1854   const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1855   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1856 }
1857 
1858 /// Form a D register from a pair of S registers.
1859 SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1860   SDLoc dl(V0.getNode());
1861   SDValue RegClass =
1862     CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, dl, MVT::i32);
1863   SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1864   SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1865   const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1866   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1867 }
1868 
1869 /// Form a quad register from a pair of D registers.
1870 SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1871   SDLoc dl(V0.getNode());
1872   SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, dl,
1873                                                MVT::i32);
1874   SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1875   SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1876   const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1877   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1878 }
1879 
1880 /// Form 4 consecutive D registers from a pair of Q registers.
1881 SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1882   SDLoc dl(V0.getNode());
1883   SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1884                                                MVT::i32);
1885   SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1886   SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1887   const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1888   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1889 }
1890 
1891 /// Form 4 consecutive S registers.
1892 SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1893                                    SDValue V2, SDValue V3) {
1894   SDLoc dl(V0.getNode());
1895   SDValue RegClass =
1896     CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, dl, MVT::i32);
1897   SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1898   SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1899   SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, dl, MVT::i32);
1900   SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, dl, MVT::i32);
1901   const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1902                                     V2, SubReg2, V3, SubReg3 };
1903   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1904 }
1905 
1906 /// Form 4 consecutive D registers.
1907 SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1908                                    SDValue V2, SDValue V3) {
1909   SDLoc dl(V0.getNode());
1910   SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1911                                                MVT::i32);
1912   SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1913   SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1914   SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, dl, MVT::i32);
1915   SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, dl, MVT::i32);
1916   const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1917                                     V2, SubReg2, V3, SubReg3 };
1918   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1919 }
1920 
1921 /// Form 4 consecutive Q registers.
1922 SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1923                                    SDValue V2, SDValue V3) {
1924   SDLoc dl(V0.getNode());
1925   SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, dl,
1926                                                MVT::i32);
1927   SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1928   SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1929   SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, dl, MVT::i32);
1930   SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, dl, MVT::i32);
1931   const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1932                                     V2, SubReg2, V3, SubReg3 };
1933   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1934 }
1935 
1936 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1937 /// of a NEON VLD or VST instruction.  The supported values depend on the
1938 /// number of registers being loaded.
1939 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
1940                                        unsigned NumVecs, bool is64BitVector) {
1941   unsigned NumRegs = NumVecs;
1942   if (!is64BitVector && NumVecs < 3)
1943     NumRegs *= 2;
1944 
1945   unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1946   if (Alignment >= 32 && NumRegs == 4)
1947     Alignment = 32;
1948   else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1949     Alignment = 16;
1950   else if (Alignment >= 8)
1951     Alignment = 8;
1952   else
1953     Alignment = 0;
1954 
1955   return CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
1956 }
1957 
1958 static bool isVLDfixed(unsigned Opc)
1959 {
1960   switch (Opc) {
1961   default: return false;
1962   case ARM::VLD1d8wb_fixed : return true;
1963   case ARM::VLD1d16wb_fixed : return true;
1964   case ARM::VLD1d64Qwb_fixed : return true;
1965   case ARM::VLD1d32wb_fixed : return true;
1966   case ARM::VLD1d64wb_fixed : return true;
1967   case ARM::VLD1d8TPseudoWB_fixed : return true;
1968   case ARM::VLD1d16TPseudoWB_fixed : return true;
1969   case ARM::VLD1d32TPseudoWB_fixed : return true;
1970   case ARM::VLD1d64TPseudoWB_fixed : return true;
1971   case ARM::VLD1d8QPseudoWB_fixed : return true;
1972   case ARM::VLD1d16QPseudoWB_fixed : return true;
1973   case ARM::VLD1d32QPseudoWB_fixed : return true;
1974   case ARM::VLD1d64QPseudoWB_fixed : return true;
1975   case ARM::VLD1q8wb_fixed : return true;
1976   case ARM::VLD1q16wb_fixed : return true;
1977   case ARM::VLD1q32wb_fixed : return true;
1978   case ARM::VLD1q64wb_fixed : return true;
1979   case ARM::VLD1DUPd8wb_fixed : return true;
1980   case ARM::VLD1DUPd16wb_fixed : return true;
1981   case ARM::VLD1DUPd32wb_fixed : return true;
1982   case ARM::VLD1DUPq8wb_fixed : return true;
1983   case ARM::VLD1DUPq16wb_fixed : return true;
1984   case ARM::VLD1DUPq32wb_fixed : return true;
1985   case ARM::VLD2d8wb_fixed : return true;
1986   case ARM::VLD2d16wb_fixed : return true;
1987   case ARM::VLD2d32wb_fixed : return true;
1988   case ARM::VLD2q8PseudoWB_fixed : return true;
1989   case ARM::VLD2q16PseudoWB_fixed : return true;
1990   case ARM::VLD2q32PseudoWB_fixed : return true;
1991   case ARM::VLD2DUPd8wb_fixed : return true;
1992   case ARM::VLD2DUPd16wb_fixed : return true;
1993   case ARM::VLD2DUPd32wb_fixed : return true;
1994   case ARM::VLD2DUPq8OddPseudoWB_fixed: return true;
1995   case ARM::VLD2DUPq16OddPseudoWB_fixed: return true;
1996   case ARM::VLD2DUPq32OddPseudoWB_fixed: return true;
1997   }
1998 }
1999 
2000 static bool isVSTfixed(unsigned Opc)
2001 {
2002   switch (Opc) {
2003   default: return false;
2004   case ARM::VST1d8wb_fixed : return true;
2005   case ARM::VST1d16wb_fixed : return true;
2006   case ARM::VST1d32wb_fixed : return true;
2007   case ARM::VST1d64wb_fixed : return true;
2008   case ARM::VST1q8wb_fixed : return true;
2009   case ARM::VST1q16wb_fixed : return true;
2010   case ARM::VST1q32wb_fixed : return true;
2011   case ARM::VST1q64wb_fixed : return true;
2012   case ARM::VST1d8TPseudoWB_fixed : return true;
2013   case ARM::VST1d16TPseudoWB_fixed : return true;
2014   case ARM::VST1d32TPseudoWB_fixed : return true;
2015   case ARM::VST1d64TPseudoWB_fixed : return true;
2016   case ARM::VST1d8QPseudoWB_fixed : return true;
2017   case ARM::VST1d16QPseudoWB_fixed : return true;
2018   case ARM::VST1d32QPseudoWB_fixed : return true;
2019   case ARM::VST1d64QPseudoWB_fixed : return true;
2020   case ARM::VST2d8wb_fixed : return true;
2021   case ARM::VST2d16wb_fixed : return true;
2022   case ARM::VST2d32wb_fixed : return true;
2023   case ARM::VST2q8PseudoWB_fixed : return true;
2024   case ARM::VST2q16PseudoWB_fixed : return true;
2025   case ARM::VST2q32PseudoWB_fixed : return true;
2026   }
2027 }
2028 
2029 // Get the register stride update opcode of a VLD/VST instruction that
2030 // is otherwise equivalent to the given fixed stride updating instruction.
2031 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
2032   assert((isVLDfixed(Opc) || isVSTfixed(Opc))
2033     && "Incorrect fixed stride updating instruction.");
2034   switch (Opc) {
2035   default: break;
2036   case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
2037   case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
2038   case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
2039   case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
2040   case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
2041   case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
2042   case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
2043   case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
2044   case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
2045   case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
2046   case ARM::VLD1d8TPseudoWB_fixed: return ARM::VLD1d8TPseudoWB_register;
2047   case ARM::VLD1d16TPseudoWB_fixed: return ARM::VLD1d16TPseudoWB_register;
2048   case ARM::VLD1d32TPseudoWB_fixed: return ARM::VLD1d32TPseudoWB_register;
2049   case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
2050   case ARM::VLD1d8QPseudoWB_fixed: return ARM::VLD1d8QPseudoWB_register;
2051   case ARM::VLD1d16QPseudoWB_fixed: return ARM::VLD1d16QPseudoWB_register;
2052   case ARM::VLD1d32QPseudoWB_fixed: return ARM::VLD1d32QPseudoWB_register;
2053   case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
2054   case ARM::VLD1DUPd8wb_fixed : return ARM::VLD1DUPd8wb_register;
2055   case ARM::VLD1DUPd16wb_fixed : return ARM::VLD1DUPd16wb_register;
2056   case ARM::VLD1DUPd32wb_fixed : return ARM::VLD1DUPd32wb_register;
2057   case ARM::VLD1DUPq8wb_fixed : return ARM::VLD1DUPq8wb_register;
2058   case ARM::VLD1DUPq16wb_fixed : return ARM::VLD1DUPq16wb_register;
2059   case ARM::VLD1DUPq32wb_fixed : return ARM::VLD1DUPq32wb_register;
2060   case ARM::VLD2DUPq8OddPseudoWB_fixed: return ARM::VLD2DUPq8OddPseudoWB_register;
2061   case ARM::VLD2DUPq16OddPseudoWB_fixed: return ARM::VLD2DUPq16OddPseudoWB_register;
2062   case ARM::VLD2DUPq32OddPseudoWB_fixed: return ARM::VLD2DUPq32OddPseudoWB_register;
2063 
2064   case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
2065   case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
2066   case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
2067   case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
2068   case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
2069   case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
2070   case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
2071   case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
2072   case ARM::VST1d8TPseudoWB_fixed: return ARM::VST1d8TPseudoWB_register;
2073   case ARM::VST1d16TPseudoWB_fixed: return ARM::VST1d16TPseudoWB_register;
2074   case ARM::VST1d32TPseudoWB_fixed: return ARM::VST1d32TPseudoWB_register;
2075   case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
2076   case ARM::VST1d8QPseudoWB_fixed: return ARM::VST1d8QPseudoWB_register;
2077   case ARM::VST1d16QPseudoWB_fixed: return ARM::VST1d16QPseudoWB_register;
2078   case ARM::VST1d32QPseudoWB_fixed: return ARM::VST1d32QPseudoWB_register;
2079   case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
2080 
2081   case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
2082   case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
2083   case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
2084   case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
2085   case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
2086   case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
2087 
2088   case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
2089   case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
2090   case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
2091   case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
2092   case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
2093   case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
2094 
2095   case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
2096   case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
2097   case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
2098   }
2099   return Opc; // If not one we handle, return it unchanged.
2100 }
2101 
2102 /// Returns true if the given increment is a Constant known to be equal to the
2103 /// access size performed by a NEON load/store. This means the "[rN]!" form can
2104 /// be used.
2105 static bool isPerfectIncrement(SDValue Inc, EVT VecTy, unsigned NumVecs) {
2106   auto C = dyn_cast<ConstantSDNode>(Inc);
2107   return C && C->getZExtValue() == VecTy.getSizeInBits() / 8 * NumVecs;
2108 }
2109 
2110 void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
2111                                 const uint16_t *DOpcodes,
2112                                 const uint16_t *QOpcodes0,
2113                                 const uint16_t *QOpcodes1) {
2114   assert(Subtarget->hasNEON());
2115   assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
2116   SDLoc dl(N);
2117 
2118   SDValue MemAddr, Align;
2119   bool IsIntrinsic = !isUpdating;  // By coincidence, all supported updating
2120                                    // nodes are not intrinsics.
2121   unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2122   if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2123     return;
2124 
2125   SDValue Chain = N->getOperand(0);
2126   EVT VT = N->getValueType(0);
2127   bool is64BitVector = VT.is64BitVector();
2128   Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2129 
2130   unsigned OpcodeIndex;
2131   switch (VT.getSimpleVT().SimpleTy) {
2132   default: llvm_unreachable("unhandled vld type");
2133     // Double-register operations:
2134   case MVT::v8i8:  OpcodeIndex = 0; break;
2135   case MVT::v4f16:
2136   case MVT::v4bf16:
2137   case MVT::v4i16: OpcodeIndex = 1; break;
2138   case MVT::v2f32:
2139   case MVT::v2i32: OpcodeIndex = 2; break;
2140   case MVT::v1i64: OpcodeIndex = 3; break;
2141     // Quad-register operations:
2142   case MVT::v16i8: OpcodeIndex = 0; break;
2143   case MVT::v8f16:
2144   case MVT::v8bf16:
2145   case MVT::v8i16: OpcodeIndex = 1; break;
2146   case MVT::v4f32:
2147   case MVT::v4i32: OpcodeIndex = 2; break;
2148   case MVT::v2f64:
2149   case MVT::v2i64: OpcodeIndex = 3; break;
2150   }
2151 
2152   EVT ResTy;
2153   if (NumVecs == 1)
2154     ResTy = VT;
2155   else {
2156     unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2157     if (!is64BitVector)
2158       ResTyElts *= 2;
2159     ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
2160   }
2161   std::vector<EVT> ResTys;
2162   ResTys.push_back(ResTy);
2163   if (isUpdating)
2164     ResTys.push_back(MVT::i32);
2165   ResTys.push_back(MVT::Other);
2166 
2167   SDValue Pred = getAL(CurDAG, dl);
2168   SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2169   SDNode *VLd;
2170   SmallVector<SDValue, 7> Ops;
2171 
2172   // Double registers and VLD1/VLD2 quad registers are directly supported.
2173   if (is64BitVector || NumVecs <= 2) {
2174     unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2175                     QOpcodes0[OpcodeIndex]);
2176     Ops.push_back(MemAddr);
2177     Ops.push_back(Align);
2178     if (isUpdating) {
2179       SDValue Inc = N->getOperand(AddrOpIdx + 1);
2180       bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2181       if (!IsImmUpdate) {
2182         // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
2183         // check for the opcode rather than the number of vector elements.
2184         if (isVLDfixed(Opc))
2185           Opc = getVLDSTRegisterUpdateOpcode(Opc);
2186         Ops.push_back(Inc);
2187       // VLD1/VLD2 fixed increment does not need Reg0 so only include it in
2188       // the operands if not such an opcode.
2189       } else if (!isVLDfixed(Opc))
2190         Ops.push_back(Reg0);
2191     }
2192     Ops.push_back(Pred);
2193     Ops.push_back(Reg0);
2194     Ops.push_back(Chain);
2195     VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2196 
2197   } else {
2198     // Otherwise, quad registers are loaded with two separate instructions,
2199     // where one loads the even registers and the other loads the odd registers.
2200     EVT AddrTy = MemAddr.getValueType();
2201 
2202     // Load the even subregs.  This is always an updating load, so that it
2203     // provides the address to the second load for the odd subregs.
2204     SDValue ImplDef =
2205       SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
2206     const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
2207     SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2208                                           ResTy, AddrTy, MVT::Other, OpsA);
2209     Chain = SDValue(VLdA, 2);
2210 
2211     // Load the odd subregs.
2212     Ops.push_back(SDValue(VLdA, 1));
2213     Ops.push_back(Align);
2214     if (isUpdating) {
2215       SDValue Inc = N->getOperand(AddrOpIdx + 1);
2216       assert(isa<ConstantSDNode>(Inc.getNode()) &&
2217              "only constant post-increment update allowed for VLD3/4");
2218       (void)Inc;
2219       Ops.push_back(Reg0);
2220     }
2221     Ops.push_back(SDValue(VLdA, 0));
2222     Ops.push_back(Pred);
2223     Ops.push_back(Reg0);
2224     Ops.push_back(Chain);
2225     VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
2226   }
2227 
2228   // Transfer memoperands.
2229   MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2230   CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLd), {MemOp});
2231 
2232   if (NumVecs == 1) {
2233     ReplaceNode(N, VLd);
2234     return;
2235   }
2236 
2237   // Extract out the subregisters.
2238   SDValue SuperReg = SDValue(VLd, 0);
2239   static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2240                     ARM::qsub_3 == ARM::qsub_0 + 3,
2241                 "Unexpected subreg numbering");
2242   unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
2243   for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2244     ReplaceUses(SDValue(N, Vec),
2245                 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2246   ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
2247   if (isUpdating)
2248     ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
2249   CurDAG->RemoveDeadNode(N);
2250 }
2251 
2252 void ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
2253                                 const uint16_t *DOpcodes,
2254                                 const uint16_t *QOpcodes0,
2255                                 const uint16_t *QOpcodes1) {
2256   assert(Subtarget->hasNEON());
2257   assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
2258   SDLoc dl(N);
2259 
2260   SDValue MemAddr, Align;
2261   bool IsIntrinsic = !isUpdating;  // By coincidence, all supported updating
2262                                    // nodes are not intrinsics.
2263   unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2264   unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2265   if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2266     return;
2267 
2268   MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2269 
2270   SDValue Chain = N->getOperand(0);
2271   EVT VT = N->getOperand(Vec0Idx).getValueType();
2272   bool is64BitVector = VT.is64BitVector();
2273   Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2274 
2275   unsigned OpcodeIndex;
2276   switch (VT.getSimpleVT().SimpleTy) {
2277   default: llvm_unreachable("unhandled vst type");
2278     // Double-register operations:
2279   case MVT::v8i8:  OpcodeIndex = 0; break;
2280   case MVT::v4f16:
2281   case MVT::v4bf16:
2282   case MVT::v4i16: OpcodeIndex = 1; break;
2283   case MVT::v2f32:
2284   case MVT::v2i32: OpcodeIndex = 2; break;
2285   case MVT::v1i64: OpcodeIndex = 3; break;
2286     // Quad-register operations:
2287   case MVT::v16i8: OpcodeIndex = 0; break;
2288   case MVT::v8f16:
2289   case MVT::v8bf16:
2290   case MVT::v8i16: OpcodeIndex = 1; break;
2291   case MVT::v4f32:
2292   case MVT::v4i32: OpcodeIndex = 2; break;
2293   case MVT::v2f64:
2294   case MVT::v2i64: OpcodeIndex = 3; break;
2295   }
2296 
2297   std::vector<EVT> ResTys;
2298   if (isUpdating)
2299     ResTys.push_back(MVT::i32);
2300   ResTys.push_back(MVT::Other);
2301 
2302   SDValue Pred = getAL(CurDAG, dl);
2303   SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2304   SmallVector<SDValue, 7> Ops;
2305 
2306   // Double registers and VST1/VST2 quad registers are directly supported.
2307   if (is64BitVector || NumVecs <= 2) {
2308     SDValue SrcReg;
2309     if (NumVecs == 1) {
2310       SrcReg = N->getOperand(Vec0Idx);
2311     } else if (is64BitVector) {
2312       // Form a REG_SEQUENCE to force register allocation.
2313       SDValue V0 = N->getOperand(Vec0Idx + 0);
2314       SDValue V1 = N->getOperand(Vec0Idx + 1);
2315       if (NumVecs == 2)
2316         SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2317       else {
2318         SDValue V2 = N->getOperand(Vec0Idx + 2);
2319         // If it's a vst3, form a quad D-register and leave the last part as
2320         // an undef.
2321         SDValue V3 = (NumVecs == 3)
2322           ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
2323           : N->getOperand(Vec0Idx + 3);
2324         SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2325       }
2326     } else {
2327       // Form a QQ register.
2328       SDValue Q0 = N->getOperand(Vec0Idx);
2329       SDValue Q1 = N->getOperand(Vec0Idx + 1);
2330       SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
2331     }
2332 
2333     unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2334                     QOpcodes0[OpcodeIndex]);
2335     Ops.push_back(MemAddr);
2336     Ops.push_back(Align);
2337     if (isUpdating) {
2338       SDValue Inc = N->getOperand(AddrOpIdx + 1);
2339       bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2340       if (!IsImmUpdate) {
2341         // We use a VST1 for v1i64 even if the pseudo says VST2/3/4, so
2342         // check for the opcode rather than the number of vector elements.
2343         if (isVSTfixed(Opc))
2344           Opc = getVLDSTRegisterUpdateOpcode(Opc);
2345         Ops.push_back(Inc);
2346       }
2347       // VST1/VST2 fixed increment does not need Reg0 so only include it in
2348       // the operands if not such an opcode.
2349       else if (!isVSTfixed(Opc))
2350         Ops.push_back(Reg0);
2351     }
2352     Ops.push_back(SrcReg);
2353     Ops.push_back(Pred);
2354     Ops.push_back(Reg0);
2355     Ops.push_back(Chain);
2356     SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2357 
2358     // Transfer memoperands.
2359     CurDAG->setNodeMemRefs(cast<MachineSDNode>(VSt), {MemOp});
2360 
2361     ReplaceNode(N, VSt);
2362     return;
2363   }
2364 
2365   // Otherwise, quad registers are stored with two separate instructions,
2366   // where one stores the even registers and the other stores the odd registers.
2367 
2368   // Form the QQQQ REG_SEQUENCE.
2369   SDValue V0 = N->getOperand(Vec0Idx + 0);
2370   SDValue V1 = N->getOperand(Vec0Idx + 1);
2371   SDValue V2 = N->getOperand(Vec0Idx + 2);
2372   SDValue V3 = (NumVecs == 3)
2373     ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2374     : N->getOperand(Vec0Idx + 3);
2375   SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2376 
2377   // Store the even D registers.  This is always an updating store, so that it
2378   // provides the address to the second store for the odd subregs.
2379   const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2380   SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2381                                         MemAddr.getValueType(),
2382                                         MVT::Other, OpsA);
2383   CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStA), {MemOp});
2384   Chain = SDValue(VStA, 1);
2385 
2386   // Store the odd D registers.
2387   Ops.push_back(SDValue(VStA, 0));
2388   Ops.push_back(Align);
2389   if (isUpdating) {
2390     SDValue Inc = N->getOperand(AddrOpIdx + 1);
2391     assert(isa<ConstantSDNode>(Inc.getNode()) &&
2392            "only constant post-increment update allowed for VST3/4");
2393     (void)Inc;
2394     Ops.push_back(Reg0);
2395   }
2396   Ops.push_back(RegSeq);
2397   Ops.push_back(Pred);
2398   Ops.push_back(Reg0);
2399   Ops.push_back(Chain);
2400   SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2401                                         Ops);
2402   CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStB), {MemOp});
2403   ReplaceNode(N, VStB);
2404 }
2405 
2406 void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
2407                                       unsigned NumVecs,
2408                                       const uint16_t *DOpcodes,
2409                                       const uint16_t *QOpcodes) {
2410   assert(Subtarget->hasNEON());
2411   assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
2412   SDLoc dl(N);
2413 
2414   SDValue MemAddr, Align;
2415   bool IsIntrinsic = !isUpdating;  // By coincidence, all supported updating
2416                                    // nodes are not intrinsics.
2417   unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2418   unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2419   if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2420     return;
2421 
2422   MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2423 
2424   SDValue Chain = N->getOperand(0);
2425   unsigned Lane =
2426     cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2427   EVT VT = N->getOperand(Vec0Idx).getValueType();
2428   bool is64BitVector = VT.is64BitVector();
2429 
2430   unsigned Alignment = 0;
2431   if (NumVecs != 3) {
2432     Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2433     unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2434     if (Alignment > NumBytes)
2435       Alignment = NumBytes;
2436     if (Alignment < 8 && Alignment < NumBytes)
2437       Alignment = 0;
2438     // Alignment must be a power of two; make sure of that.
2439     Alignment = (Alignment & -Alignment);
2440     if (Alignment == 1)
2441       Alignment = 0;
2442   }
2443   Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2444 
2445   unsigned OpcodeIndex;
2446   switch (VT.getSimpleVT().SimpleTy) {
2447   default: llvm_unreachable("unhandled vld/vst lane type");
2448     // Double-register operations:
2449   case MVT::v8i8:  OpcodeIndex = 0; break;
2450   case MVT::v4f16:
2451   case MVT::v4bf16:
2452   case MVT::v4i16: OpcodeIndex = 1; break;
2453   case MVT::v2f32:
2454   case MVT::v2i32: OpcodeIndex = 2; break;
2455     // Quad-register operations:
2456   case MVT::v8f16:
2457   case MVT::v8bf16:
2458   case MVT::v8i16: OpcodeIndex = 0; break;
2459   case MVT::v4f32:
2460   case MVT::v4i32: OpcodeIndex = 1; break;
2461   }
2462 
2463   std::vector<EVT> ResTys;
2464   if (IsLoad) {
2465     unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2466     if (!is64BitVector)
2467       ResTyElts *= 2;
2468     ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2469                                       MVT::i64, ResTyElts));
2470   }
2471   if (isUpdating)
2472     ResTys.push_back(MVT::i32);
2473   ResTys.push_back(MVT::Other);
2474 
2475   SDValue Pred = getAL(CurDAG, dl);
2476   SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2477 
2478   SmallVector<SDValue, 8> Ops;
2479   Ops.push_back(MemAddr);
2480   Ops.push_back(Align);
2481   if (isUpdating) {
2482     SDValue Inc = N->getOperand(AddrOpIdx + 1);
2483     bool IsImmUpdate =
2484         isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
2485     Ops.push_back(IsImmUpdate ? Reg0 : Inc);
2486   }
2487 
2488   SDValue SuperReg;
2489   SDValue V0 = N->getOperand(Vec0Idx + 0);
2490   SDValue V1 = N->getOperand(Vec0Idx + 1);
2491   if (NumVecs == 2) {
2492     if (is64BitVector)
2493       SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2494     else
2495       SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2496   } else {
2497     SDValue V2 = N->getOperand(Vec0Idx + 2);
2498     SDValue V3 = (NumVecs == 3)
2499       ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2500       : N->getOperand(Vec0Idx + 3);
2501     if (is64BitVector)
2502       SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2503     else
2504       SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2505   }
2506   Ops.push_back(SuperReg);
2507   Ops.push_back(getI32Imm(Lane, dl));
2508   Ops.push_back(Pred);
2509   Ops.push_back(Reg0);
2510   Ops.push_back(Chain);
2511 
2512   unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2513                                   QOpcodes[OpcodeIndex]);
2514   SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2515   CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdLn), {MemOp});
2516   if (!IsLoad) {
2517     ReplaceNode(N, VLdLn);
2518     return;
2519   }
2520 
2521   // Extract the subregisters.
2522   SuperReg = SDValue(VLdLn, 0);
2523   static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2524                     ARM::qsub_3 == ARM::qsub_0 + 3,
2525                 "Unexpected subreg numbering");
2526   unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2527   for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2528     ReplaceUses(SDValue(N, Vec),
2529                 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2530   ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2531   if (isUpdating)
2532     ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2533   CurDAG->RemoveDeadNode(N);
2534 }
2535 
2536 template <typename SDValueVector>
2537 void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2538                                            SDValue PredicateMask) {
2539   Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2540   Ops.push_back(PredicateMask);
2541   Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2542 }
2543 
2544 template <typename SDValueVector>
2545 void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2546                                            SDValue PredicateMask,
2547                                            SDValue Inactive) {
2548   Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2549   Ops.push_back(PredicateMask);
2550   Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2551   Ops.push_back(Inactive);
2552 }
2553 
2554 template <typename SDValueVector>
2555 void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc) {
2556   Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2557   Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2558   Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2559 }
2560 
2561 template <typename SDValueVector>
2562 void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2563                                                 EVT InactiveTy) {
2564   Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2565   Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2566   Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2567   Ops.push_back(SDValue(
2568       CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, InactiveTy), 0));
2569 }
2570 
2571 void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes,
2572                                    bool Predicated) {
2573   SDLoc Loc(N);
2574   SmallVector<SDValue, 8> Ops;
2575 
2576   uint16_t Opcode;
2577   switch (N->getValueType(1).getVectorElementType().getSizeInBits()) {
2578   case 32:
2579     Opcode = Opcodes[0];
2580     break;
2581   case 64:
2582     Opcode = Opcodes[1];
2583     break;
2584   default:
2585     llvm_unreachable("bad vector element size in SelectMVE_WB");
2586   }
2587 
2588   Ops.push_back(N->getOperand(2)); // vector of base addresses
2589 
2590   int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2591   Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset
2592 
2593   if (Predicated)
2594     AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2595   else
2596     AddEmptyMVEPredicateToOps(Ops, Loc);
2597 
2598   Ops.push_back(N->getOperand(0)); // chain
2599 
2600   SmallVector<EVT, 8> VTs;
2601   VTs.push_back(N->getValueType(1));
2602   VTs.push_back(N->getValueType(0));
2603   VTs.push_back(N->getValueType(2));
2604 
2605   SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), VTs, Ops);
2606   ReplaceUses(SDValue(N, 0), SDValue(New, 1));
2607   ReplaceUses(SDValue(N, 1), SDValue(New, 0));
2608   ReplaceUses(SDValue(N, 2), SDValue(New, 2));
2609   transferMemOperands(N, New);
2610   CurDAG->RemoveDeadNode(N);
2611 }
2612 
2613 void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode,
2614                                           bool Immediate,
2615                                           bool HasSaturationOperand) {
2616   SDLoc Loc(N);
2617   SmallVector<SDValue, 8> Ops;
2618 
2619   // Two 32-bit halves of the value to be shifted
2620   Ops.push_back(N->getOperand(1));
2621   Ops.push_back(N->getOperand(2));
2622 
2623   // The shift count
2624   if (Immediate) {
2625     int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2626     Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2627   } else {
2628     Ops.push_back(N->getOperand(3));
2629   }
2630 
2631   // The immediate saturation operand, if any
2632   if (HasSaturationOperand) {
2633     int32_t SatOp = cast<ConstantSDNode>(N->getOperand(4))->getZExtValue();
2634     int SatBit = (SatOp == 64 ? 0 : 1);
2635     Ops.push_back(getI32Imm(SatBit, Loc));
2636   }
2637 
2638   // MVE scalar shifts are IT-predicable, so include the standard
2639   // predicate arguments.
2640   Ops.push_back(getAL(CurDAG, Loc));
2641   Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2642 
2643   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2644 }
2645 
2646 void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
2647                                         uint16_t OpcodeWithNoCarry,
2648                                         bool Add, bool Predicated) {
2649   SDLoc Loc(N);
2650   SmallVector<SDValue, 8> Ops;
2651   uint16_t Opcode;
2652 
2653   unsigned FirstInputOp = Predicated ? 2 : 1;
2654 
2655   // Two input vectors and the input carry flag
2656   Ops.push_back(N->getOperand(FirstInputOp));
2657   Ops.push_back(N->getOperand(FirstInputOp + 1));
2658   SDValue CarryIn = N->getOperand(FirstInputOp + 2);
2659   ConstantSDNode *CarryInConstant = dyn_cast<ConstantSDNode>(CarryIn);
2660   uint32_t CarryMask = 1 << 29;
2661   uint32_t CarryExpected = Add ? 0 : CarryMask;
2662   if (CarryInConstant &&
2663       (CarryInConstant->getZExtValue() & CarryMask) == CarryExpected) {
2664     Opcode = OpcodeWithNoCarry;
2665   } else {
2666     Ops.push_back(CarryIn);
2667     Opcode = OpcodeWithCarry;
2668   }
2669 
2670   if (Predicated)
2671     AddMVEPredicateToOps(Ops, Loc,
2672                          N->getOperand(FirstInputOp + 3),  // predicate
2673                          N->getOperand(FirstInputOp - 1)); // inactive
2674   else
2675     AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2676 
2677   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2678 }
2679 
2680 void ARMDAGToDAGISel::SelectMVE_VSHLC(SDNode *N, bool Predicated) {
2681   SDLoc Loc(N);
2682   SmallVector<SDValue, 8> Ops;
2683 
2684   // One vector input, followed by a 32-bit word of bits to shift in
2685   // and then an immediate shift count
2686   Ops.push_back(N->getOperand(1));
2687   Ops.push_back(N->getOperand(2));
2688   int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2689   Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2690 
2691   if (Predicated)
2692     AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2693   else
2694     AddEmptyMVEPredicateToOps(Ops, Loc);
2695 
2696   CurDAG->SelectNodeTo(N, ARM::MVE_VSHLC, N->getVTList(), ArrayRef(Ops));
2697 }
2698 
2699 static bool SDValueToConstBool(SDValue SDVal) {
2700   assert(isa<ConstantSDNode>(SDVal) && "expected a compile-time constant");
2701   ConstantSDNode *SDValConstant = dyn_cast<ConstantSDNode>(SDVal);
2702   uint64_t Value = SDValConstant->getZExtValue();
2703   assert((Value == 0 || Value == 1) && "expected value 0 or 1");
2704   return Value;
2705 }
2706 
2707 void ARMDAGToDAGISel::SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
2708                                             const uint16_t *OpcodesS,
2709                                             const uint16_t *OpcodesU,
2710                                             size_t Stride, size_t TySize) {
2711   assert(TySize < Stride && "Invalid TySize");
2712   bool IsUnsigned = SDValueToConstBool(N->getOperand(1));
2713   bool IsSub = SDValueToConstBool(N->getOperand(2));
2714   bool IsExchange = SDValueToConstBool(N->getOperand(3));
2715   if (IsUnsigned) {
2716     assert(!IsSub &&
2717            "Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist");
2718     assert(!IsExchange &&
2719            "Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist");
2720   }
2721 
2722   auto OpIsZero = [N](size_t OpNo) {
2723     return isNullConstant(N->getOperand(OpNo));
2724   };
2725 
2726   // If the input accumulator value is not zero, select an instruction with
2727   // accumulator, otherwise select an instruction without accumulator
2728   bool IsAccum = !(OpIsZero(4) && OpIsZero(5));
2729 
2730   const uint16_t *Opcodes = IsUnsigned ? OpcodesU : OpcodesS;
2731   if (IsSub)
2732     Opcodes += 4 * Stride;
2733   if (IsExchange)
2734     Opcodes += 2 * Stride;
2735   if (IsAccum)
2736     Opcodes += Stride;
2737   uint16_t Opcode = Opcodes[TySize];
2738 
2739   SDLoc Loc(N);
2740   SmallVector<SDValue, 8> Ops;
2741   // Push the accumulator operands, if they are used
2742   if (IsAccum) {
2743     Ops.push_back(N->getOperand(4));
2744     Ops.push_back(N->getOperand(5));
2745   }
2746   // Push the two vector operands
2747   Ops.push_back(N->getOperand(6));
2748   Ops.push_back(N->getOperand(7));
2749 
2750   if (Predicated)
2751     AddMVEPredicateToOps(Ops, Loc, N->getOperand(8));
2752   else
2753     AddEmptyMVEPredicateToOps(Ops, Loc);
2754 
2755   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2756 }
2757 
2758 void ARMDAGToDAGISel::SelectMVE_VMLLDAV(SDNode *N, bool Predicated,
2759                                         const uint16_t *OpcodesS,
2760                                         const uint16_t *OpcodesU) {
2761   EVT VecTy = N->getOperand(6).getValueType();
2762   size_t SizeIndex;
2763   switch (VecTy.getVectorElementType().getSizeInBits()) {
2764   case 16:
2765     SizeIndex = 0;
2766     break;
2767   case 32:
2768     SizeIndex = 1;
2769     break;
2770   default:
2771     llvm_unreachable("bad vector element size");
2772   }
2773 
2774   SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 2, SizeIndex);
2775 }
2776 
2777 void ARMDAGToDAGISel::SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated,
2778                                           const uint16_t *OpcodesS,
2779                                           const uint16_t *OpcodesU) {
2780   assert(
2781       N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() ==
2782           32 &&
2783       "bad vector element size");
2784   SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 1, 0);
2785 }
2786 
2787 void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs,
2788                                     const uint16_t *const *Opcodes,
2789                                     bool HasWriteback) {
2790   EVT VT = N->getValueType(0);
2791   SDLoc Loc(N);
2792 
2793   const uint16_t *OurOpcodes;
2794   switch (VT.getVectorElementType().getSizeInBits()) {
2795   case 8:
2796     OurOpcodes = Opcodes[0];
2797     break;
2798   case 16:
2799     OurOpcodes = Opcodes[1];
2800     break;
2801   case 32:
2802     OurOpcodes = Opcodes[2];
2803     break;
2804   default:
2805     llvm_unreachable("bad vector element size in SelectMVE_VLD");
2806   }
2807 
2808   EVT DataTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, NumVecs * 2);
2809   SmallVector<EVT, 4> ResultTys = {DataTy, MVT::Other};
2810   unsigned PtrOperand = HasWriteback ? 1 : 2;
2811 
2812   auto Data = SDValue(
2813       CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, DataTy), 0);
2814   SDValue Chain = N->getOperand(0);
2815   // Add a MVE_VLDn instruction for each Vec, except the last
2816   for (unsigned Stage = 0; Stage < NumVecs - 1; ++Stage) {
2817     SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2818     auto LoadInst =
2819         CurDAG->getMachineNode(OurOpcodes[Stage], Loc, ResultTys, Ops);
2820     Data = SDValue(LoadInst, 0);
2821     Chain = SDValue(LoadInst, 1);
2822     transferMemOperands(N, LoadInst);
2823   }
2824   // The last may need a writeback on it
2825   if (HasWriteback)
2826     ResultTys = {DataTy, MVT::i32, MVT::Other};
2827   SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2828   auto LoadInst =
2829       CurDAG->getMachineNode(OurOpcodes[NumVecs - 1], Loc, ResultTys, Ops);
2830   transferMemOperands(N, LoadInst);
2831 
2832   unsigned i;
2833   for (i = 0; i < NumVecs; i++)
2834     ReplaceUses(SDValue(N, i),
2835                 CurDAG->getTargetExtractSubreg(ARM::qsub_0 + i, Loc, VT,
2836                                                SDValue(LoadInst, 0)));
2837   if (HasWriteback)
2838     ReplaceUses(SDValue(N, i++), SDValue(LoadInst, 1));
2839   ReplaceUses(SDValue(N, i), SDValue(LoadInst, HasWriteback ? 2 : 1));
2840   CurDAG->RemoveDeadNode(N);
2841 }
2842 
2843 void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
2844                                       bool Wrapping, bool Predicated) {
2845   EVT VT = N->getValueType(0);
2846   SDLoc Loc(N);
2847 
2848   uint16_t Opcode;
2849   switch (VT.getScalarSizeInBits()) {
2850   case 8:
2851     Opcode = Opcodes[0];
2852     break;
2853   case 16:
2854     Opcode = Opcodes[1];
2855     break;
2856   case 32:
2857     Opcode = Opcodes[2];
2858     break;
2859   default:
2860     llvm_unreachable("bad vector element size in SelectMVE_VxDUP");
2861   }
2862 
2863   SmallVector<SDValue, 8> Ops;
2864   unsigned OpIdx = 1;
2865 
2866   SDValue Inactive;
2867   if (Predicated)
2868     Inactive = N->getOperand(OpIdx++);
2869 
2870   Ops.push_back(N->getOperand(OpIdx++));     // base
2871   if (Wrapping)
2872     Ops.push_back(N->getOperand(OpIdx++));   // limit
2873 
2874   SDValue ImmOp = N->getOperand(OpIdx++);    // step
2875   int ImmValue = cast<ConstantSDNode>(ImmOp)->getZExtValue();
2876   Ops.push_back(getI32Imm(ImmValue, Loc));
2877 
2878   if (Predicated)
2879     AddMVEPredicateToOps(Ops, Loc, N->getOperand(OpIdx), Inactive);
2880   else
2881     AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2882 
2883   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2884 }
2885 
2886 void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
2887                                      size_t NumExtraOps, bool HasAccum) {
2888   bool IsBigEndian = CurDAG->getDataLayout().isBigEndian();
2889   SDLoc Loc(N);
2890   SmallVector<SDValue, 8> Ops;
2891 
2892   unsigned OpIdx = 1;
2893 
2894   // Convert and append the immediate operand designating the coprocessor.
2895   SDValue ImmCorpoc = N->getOperand(OpIdx++);
2896   uint32_t ImmCoprocVal = cast<ConstantSDNode>(ImmCorpoc)->getZExtValue();
2897   Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
2898 
2899   // For accumulating variants copy the low and high order parts of the
2900   // accumulator into a register pair and add it to the operand vector.
2901   if (HasAccum) {
2902     SDValue AccLo = N->getOperand(OpIdx++);
2903     SDValue AccHi = N->getOperand(OpIdx++);
2904     if (IsBigEndian)
2905       std::swap(AccLo, AccHi);
2906     Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, AccLo, AccHi), 0));
2907   }
2908 
2909   // Copy extra operands as-is.
2910   for (size_t I = 0; I < NumExtraOps; I++)
2911     Ops.push_back(N->getOperand(OpIdx++));
2912 
2913   // Convert and append the immediate operand
2914   SDValue Imm = N->getOperand(OpIdx);
2915   uint32_t ImmVal = cast<ConstantSDNode>(Imm)->getZExtValue();
2916   Ops.push_back(getI32Imm(ImmVal, Loc));
2917 
2918   // Accumulating variants are IT-predicable, add predicate operands.
2919   if (HasAccum) {
2920     SDValue Pred = getAL(CurDAG, Loc);
2921     SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2922     Ops.push_back(Pred);
2923     Ops.push_back(PredReg);
2924   }
2925 
2926   // Create the CDE intruction
2927   SDNode *InstrNode = CurDAG->getMachineNode(Opcode, Loc, MVT::Untyped, Ops);
2928   SDValue ResultPair = SDValue(InstrNode, 0);
2929 
2930   // The original intrinsic had two outputs, and the output of the dual-register
2931   // CDE instruction is a register pair. We need to extract the two subregisters
2932   // and replace all uses of the original outputs with the extracted
2933   // subregisters.
2934   uint16_t SubRegs[2] = {ARM::gsub_0, ARM::gsub_1};
2935   if (IsBigEndian)
2936     std::swap(SubRegs[0], SubRegs[1]);
2937 
2938   for (size_t ResIdx = 0; ResIdx < 2; ResIdx++) {
2939     if (SDValue(N, ResIdx).use_empty())
2940       continue;
2941     SDValue SubReg = CurDAG->getTargetExtractSubreg(SubRegs[ResIdx], Loc,
2942                                                     MVT::i32, ResultPair);
2943     ReplaceUses(SDValue(N, ResIdx), SubReg);
2944   }
2945 
2946   CurDAG->RemoveDeadNode(N);
2947 }
2948 
2949 void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
2950                                    bool isUpdating, unsigned NumVecs,
2951                                    const uint16_t *DOpcodes,
2952                                    const uint16_t *QOpcodes0,
2953                                    const uint16_t *QOpcodes1) {
2954   assert(Subtarget->hasNEON());
2955   assert(NumVecs >= 1 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2956   SDLoc dl(N);
2957 
2958   SDValue MemAddr, Align;
2959   unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2960   if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2961     return;
2962 
2963   SDValue Chain = N->getOperand(0);
2964   EVT VT = N->getValueType(0);
2965   bool is64BitVector = VT.is64BitVector();
2966 
2967   unsigned Alignment = 0;
2968   if (NumVecs != 3) {
2969     Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2970     unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2971     if (Alignment > NumBytes)
2972       Alignment = NumBytes;
2973     if (Alignment < 8 && Alignment < NumBytes)
2974       Alignment = 0;
2975     // Alignment must be a power of two; make sure of that.
2976     Alignment = (Alignment & -Alignment);
2977     if (Alignment == 1)
2978       Alignment = 0;
2979   }
2980   Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2981 
2982   unsigned OpcodeIndex;
2983   switch (VT.getSimpleVT().SimpleTy) {
2984   default: llvm_unreachable("unhandled vld-dup type");
2985   case MVT::v8i8:
2986   case MVT::v16i8: OpcodeIndex = 0; break;
2987   case MVT::v4i16:
2988   case MVT::v8i16:
2989   case MVT::v4f16:
2990   case MVT::v8f16:
2991   case MVT::v4bf16:
2992   case MVT::v8bf16:
2993                   OpcodeIndex = 1; break;
2994   case MVT::v2f32:
2995   case MVT::v2i32:
2996   case MVT::v4f32:
2997   case MVT::v4i32: OpcodeIndex = 2; break;
2998   case MVT::v1f64:
2999   case MVT::v1i64: OpcodeIndex = 3; break;
3000   }
3001 
3002   unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
3003   if (!is64BitVector)
3004     ResTyElts *= 2;
3005   EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
3006 
3007   std::vector<EVT> ResTys;
3008   ResTys.push_back(ResTy);
3009   if (isUpdating)
3010     ResTys.push_back(MVT::i32);
3011   ResTys.push_back(MVT::Other);
3012 
3013   SDValue Pred = getAL(CurDAG, dl);
3014   SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3015 
3016   SmallVector<SDValue, 6> Ops;
3017   Ops.push_back(MemAddr);
3018   Ops.push_back(Align);
3019   unsigned Opc = is64BitVector    ? DOpcodes[OpcodeIndex]
3020                  : (NumVecs == 1) ? QOpcodes0[OpcodeIndex]
3021                                   : QOpcodes1[OpcodeIndex];
3022   if (isUpdating) {
3023     SDValue Inc = N->getOperand(2);
3024     bool IsImmUpdate =
3025         isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
3026     if (IsImmUpdate) {
3027       if (!isVLDfixed(Opc))
3028         Ops.push_back(Reg0);
3029     } else {
3030       if (isVLDfixed(Opc))
3031         Opc = getVLDSTRegisterUpdateOpcode(Opc);
3032       Ops.push_back(Inc);
3033     }
3034   }
3035   if (is64BitVector || NumVecs == 1) {
3036     // Double registers and VLD1 quad registers are directly supported.
3037   } else if (NumVecs == 2) {
3038     const SDValue OpsA[] = {MemAddr, Align, Pred, Reg0, Chain};
3039     SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3040                                           MVT::Other, OpsA);
3041     Chain = SDValue(VLdA, 1);
3042   } else {
3043     SDValue ImplDef = SDValue(
3044         CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
3045     const SDValue OpsA[] = {MemAddr, Align, ImplDef, Pred, Reg0, Chain};
3046     SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3047                                           MVT::Other, OpsA);
3048     Ops.push_back(SDValue(VLdA, 0));
3049     Chain = SDValue(VLdA, 1);
3050   }
3051 
3052   Ops.push_back(Pred);
3053   Ops.push_back(Reg0);
3054   Ops.push_back(Chain);
3055 
3056   SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
3057 
3058   // Transfer memoperands.
3059   MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3060   CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdDup), {MemOp});
3061 
3062   // Extract the subregisters.
3063   if (NumVecs == 1) {
3064     ReplaceUses(SDValue(N, 0), SDValue(VLdDup, 0));
3065   } else {
3066     SDValue SuperReg = SDValue(VLdDup, 0);
3067     static_assert(ARM::dsub_7 == ARM::dsub_0 + 7, "Unexpected subreg numbering");
3068     unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
3069     for (unsigned Vec = 0; Vec != NumVecs; ++Vec) {
3070       ReplaceUses(SDValue(N, Vec),
3071                   CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
3072     }
3073   }
3074   ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
3075   if (isUpdating)
3076     ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
3077   CurDAG->RemoveDeadNode(N);
3078 }
3079 
3080 bool ARMDAGToDAGISel::tryInsertVectorElt(SDNode *N) {
3081   if (!Subtarget->hasMVEIntegerOps())
3082     return false;
3083 
3084   SDLoc dl(N);
3085 
3086   // We are trying to use VMOV/VMOVX/VINS to more efficiently lower insert and
3087   // extracts of v8f16 and v8i16 vectors. Check that we have two adjacent
3088   // inserts of the correct type:
3089   SDValue Ins1 = SDValue(N, 0);
3090   SDValue Ins2 = N->getOperand(0);
3091   EVT VT = Ins1.getValueType();
3092   if (Ins2.getOpcode() != ISD::INSERT_VECTOR_ELT || !Ins2.hasOneUse() ||
3093       !isa<ConstantSDNode>(Ins1.getOperand(2)) ||
3094       !isa<ConstantSDNode>(Ins2.getOperand(2)) ||
3095       (VT != MVT::v8f16 && VT != MVT::v8i16) || (Ins2.getValueType() != VT))
3096     return false;
3097 
3098   unsigned Lane1 = Ins1.getConstantOperandVal(2);
3099   unsigned Lane2 = Ins2.getConstantOperandVal(2);
3100   if (Lane2 % 2 != 0 || Lane1 != Lane2 + 1)
3101     return false;
3102 
3103   // If the inserted values will be able to use T/B already, leave it to the
3104   // existing tablegen patterns. For example VCVTT/VCVTB.
3105   SDValue Val1 = Ins1.getOperand(1);
3106   SDValue Val2 = Ins2.getOperand(1);
3107   if (Val1.getOpcode() == ISD::FP_ROUND || Val2.getOpcode() == ISD::FP_ROUND)
3108     return false;
3109 
3110   // Check if the inserted values are both extracts.
3111   if ((Val1.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
3112        Val1.getOpcode() == ARMISD::VGETLANEu) &&
3113       (Val2.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
3114        Val2.getOpcode() == ARMISD::VGETLANEu) &&
3115       isa<ConstantSDNode>(Val1.getOperand(1)) &&
3116       isa<ConstantSDNode>(Val2.getOperand(1)) &&
3117       (Val1.getOperand(0).getValueType() == MVT::v8f16 ||
3118        Val1.getOperand(0).getValueType() == MVT::v8i16) &&
3119       (Val2.getOperand(0).getValueType() == MVT::v8f16 ||
3120        Val2.getOperand(0).getValueType() == MVT::v8i16)) {
3121     unsigned ExtractLane1 = Val1.getConstantOperandVal(1);
3122     unsigned ExtractLane2 = Val2.getConstantOperandVal(1);
3123 
3124     // If the two extracted lanes are from the same place and adjacent, this
3125     // simplifies into a f32 lane move.
3126     if (Val1.getOperand(0) == Val2.getOperand(0) && ExtractLane2 % 2 == 0 &&
3127         ExtractLane1 == ExtractLane2 + 1) {
3128       SDValue NewExt = CurDAG->getTargetExtractSubreg(
3129           ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val1.getOperand(0));
3130       SDValue NewIns = CurDAG->getTargetInsertSubreg(
3131           ARM::ssub_0 + Lane2 / 2, dl, VT, Ins2.getOperand(0),
3132           NewExt);
3133       ReplaceUses(Ins1, NewIns);
3134       return true;
3135     }
3136 
3137     // Else v8i16 pattern of an extract and an insert, with a optional vmovx for
3138     // extracting odd lanes.
3139     if (VT == MVT::v8i16 && Subtarget->hasFullFP16()) {
3140       SDValue Inp1 = CurDAG->getTargetExtractSubreg(
3141           ARM::ssub_0 + ExtractLane1 / 2, dl, MVT::f32, Val1.getOperand(0));
3142       SDValue Inp2 = CurDAG->getTargetExtractSubreg(
3143           ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val2.getOperand(0));
3144       if (ExtractLane1 % 2 != 0)
3145         Inp1 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp1), 0);
3146       if (ExtractLane2 % 2 != 0)
3147         Inp2 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp2), 0);
3148       SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Inp2, Inp1);
3149       SDValue NewIns =
3150           CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3151                                         Ins2.getOperand(0), SDValue(VINS, 0));
3152       ReplaceUses(Ins1, NewIns);
3153       return true;
3154     }
3155   }
3156 
3157   // The inserted values are not extracted - if they are f16 then insert them
3158   // directly using a VINS.
3159   if (VT == MVT::v8f16 && Subtarget->hasFullFP16()) {
3160     SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Val2, Val1);
3161     SDValue NewIns =
3162         CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3163                                       Ins2.getOperand(0), SDValue(VINS, 0));
3164     ReplaceUses(Ins1, NewIns);
3165     return true;
3166   }
3167 
3168   return false;
3169 }
3170 
3171 bool ARMDAGToDAGISel::transformFixedFloatingPointConversion(SDNode *N,
3172                                                             SDNode *FMul,
3173                                                             bool IsUnsigned,
3174                                                             bool FixedToFloat) {
3175   auto Type = N->getValueType(0);
3176   unsigned ScalarBits = Type.getScalarSizeInBits();
3177   if (ScalarBits > 32)
3178     return false;
3179 
3180   SDNodeFlags FMulFlags = FMul->getFlags();
3181   // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3182   // allowed in 16 bit unsigned floats
3183   if (ScalarBits == 16 && !FMulFlags.hasNoInfs() && IsUnsigned)
3184     return false;
3185 
3186   SDValue ImmNode = FMul->getOperand(1);
3187   SDValue VecVal = FMul->getOperand(0);
3188   if (VecVal->getOpcode() == ISD::UINT_TO_FP ||
3189       VecVal->getOpcode() == ISD::SINT_TO_FP)
3190     VecVal = VecVal->getOperand(0);
3191 
3192   if (VecVal.getValueType().getScalarSizeInBits() != ScalarBits)
3193     return false;
3194 
3195   if (ImmNode.getOpcode() == ISD::BITCAST) {
3196     if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3197       return false;
3198     ImmNode = ImmNode.getOperand(0);
3199   }
3200 
3201   if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3202     return false;
3203 
3204   APFloat ImmAPF(0.0f);
3205   switch (ImmNode.getOpcode()) {
3206   case ARMISD::VMOVIMM:
3207   case ARMISD::VDUP: {
3208     if (!isa<ConstantSDNode>(ImmNode.getOperand(0)))
3209       return false;
3210     unsigned Imm = ImmNode.getConstantOperandVal(0);
3211     if (ImmNode.getOpcode() == ARMISD::VMOVIMM)
3212       Imm = ARM_AM::decodeVMOVModImm(Imm, ScalarBits);
3213     ImmAPF =
3214         APFloat(ScalarBits == 32 ? APFloat::IEEEsingle() : APFloat::IEEEhalf(),
3215                 APInt(ScalarBits, Imm));
3216     break;
3217   }
3218   case ARMISD::VMOVFPIMM: {
3219     ImmAPF = APFloat(ARM_AM::getFPImmFloat(ImmNode.getConstantOperandVal(0)));
3220     break;
3221   }
3222   default:
3223     return false;
3224   }
3225 
3226   // Where n is the number of fractional bits, multiplying by 2^n will convert
3227   // from float to fixed and multiplying by 2^-n will convert from fixed to
3228   // float. Taking log2 of the factor (after taking the inverse in the case of
3229   // float to fixed) will give n.
3230   APFloat ToConvert = ImmAPF;
3231   if (FixedToFloat) {
3232     if (!ImmAPF.getExactInverse(&ToConvert))
3233       return false;
3234   }
3235   APSInt Converted(64, false);
3236   bool IsExact;
3237   ToConvert.convertToInteger(Converted, llvm::RoundingMode::NearestTiesToEven,
3238                              &IsExact);
3239   if (!IsExact || !Converted.isPowerOf2())
3240     return false;
3241 
3242   unsigned FracBits = Converted.logBase2();
3243   if (FracBits > ScalarBits)
3244     return false;
3245 
3246   SmallVector<SDValue, 3> Ops{
3247       VecVal, CurDAG->getConstant(FracBits, SDLoc(N), MVT::i32)};
3248   AddEmptyMVEPredicateToOps(Ops, SDLoc(N), Type);
3249 
3250   unsigned int Opcode;
3251   switch (ScalarBits) {
3252   case 16:
3253     if (FixedToFloat)
3254       Opcode = IsUnsigned ? ARM::MVE_VCVTf16u16_fix : ARM::MVE_VCVTf16s16_fix;
3255     else
3256       Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3257     break;
3258   case 32:
3259     if (FixedToFloat)
3260       Opcode = IsUnsigned ? ARM::MVE_VCVTf32u32_fix : ARM::MVE_VCVTf32s32_fix;
3261     else
3262       Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3263     break;
3264   default:
3265     llvm_unreachable("unexpected number of scalar bits");
3266     break;
3267   }
3268 
3269   ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), Type, Ops));
3270   return true;
3271 }
3272 
3273 bool ARMDAGToDAGISel::tryFP_TO_INT(SDNode *N, SDLoc dl) {
3274   // Transform a floating-point to fixed-point conversion to a VCVT
3275   if (!Subtarget->hasMVEFloatOps())
3276     return false;
3277   EVT Type = N->getValueType(0);
3278   if (!Type.isVector())
3279     return false;
3280   unsigned int ScalarBits = Type.getScalarSizeInBits();
3281 
3282   bool IsUnsigned = N->getOpcode() == ISD::FP_TO_UINT ||
3283                     N->getOpcode() == ISD::FP_TO_UINT_SAT;
3284   SDNode *Node = N->getOperand(0).getNode();
3285 
3286   // floating-point to fixed-point with one fractional bit gets turned into an
3287   // FP_TO_[U|S]INT(FADD (x, x)) rather than an FP_TO_[U|S]INT(FMUL (x, y))
3288   if (Node->getOpcode() == ISD::FADD) {
3289     if (Node->getOperand(0) != Node->getOperand(1))
3290       return false;
3291     SDNodeFlags Flags = Node->getFlags();
3292     // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3293     // allowed in 16 bit unsigned floats
3294     if (ScalarBits == 16 && !Flags.hasNoInfs() && IsUnsigned)
3295       return false;
3296 
3297     unsigned Opcode;
3298     switch (ScalarBits) {
3299     case 16:
3300       Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3301       break;
3302     case 32:
3303       Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3304       break;
3305     }
3306     SmallVector<SDValue, 3> Ops{Node->getOperand(0),
3307                                 CurDAG->getConstant(1, dl, MVT::i32)};
3308     AddEmptyMVEPredicateToOps(Ops, dl, Type);
3309 
3310     ReplaceNode(N, CurDAG->getMachineNode(Opcode, dl, Type, Ops));
3311     return true;
3312   }
3313 
3314   if (Node->getOpcode() != ISD::FMUL)
3315     return false;
3316 
3317   return transformFixedFloatingPointConversion(N, Node, IsUnsigned, false);
3318 }
3319 
3320 bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
3321   // Transform a fixed-point to floating-point conversion to a VCVT
3322   if (!Subtarget->hasMVEFloatOps())
3323     return false;
3324   auto Type = N->getValueType(0);
3325   if (!Type.isVector())
3326     return false;
3327 
3328   auto LHS = N->getOperand(0);
3329   if (LHS.getOpcode() != ISD::SINT_TO_FP && LHS.getOpcode() != ISD::UINT_TO_FP)
3330     return false;
3331 
3332   return transformFixedFloatingPointConversion(
3333       N, N, LHS.getOpcode() == ISD::UINT_TO_FP, true);
3334 }
3335 
3336 bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) {
3337   if (!Subtarget->hasV6T2Ops())
3338     return false;
3339 
3340   unsigned Opc = isSigned
3341     ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
3342     : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
3343   SDLoc dl(N);
3344 
3345   // For unsigned extracts, check for a shift right and mask
3346   unsigned And_imm = 0;
3347   if (N->getOpcode() == ISD::AND) {
3348     if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
3349 
3350       // The immediate is a mask of the low bits iff imm & (imm+1) == 0
3351       if (And_imm & (And_imm + 1))
3352         return false;
3353 
3354       unsigned Srl_imm = 0;
3355       if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
3356                                 Srl_imm)) {
3357         assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3358 
3359         // Mask off the unnecessary bits of the AND immediate; normally
3360         // DAGCombine will do this, but that might not happen if
3361         // targetShrinkDemandedConstant chooses a different immediate.
3362         And_imm &= -1U >> Srl_imm;
3363 
3364         // Note: The width operand is encoded as width-1.
3365         unsigned Width = llvm::countr_one(And_imm) - 1;
3366         unsigned LSB = Srl_imm;
3367 
3368         SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3369 
3370         if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
3371           // It's cheaper to use a right shift to extract the top bits.
3372           if (Subtarget->isThumb()) {
3373             Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
3374             SDValue Ops[] = { N->getOperand(0).getOperand(0),
3375                               CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3376                               getAL(CurDAG, dl), Reg0, Reg0 };
3377             CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3378             return true;
3379           }
3380 
3381           // ARM models shift instructions as MOVsi with shifter operand.
3382           ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
3383           SDValue ShOpc =
3384             CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB), dl,
3385                                       MVT::i32);
3386           SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
3387                             getAL(CurDAG, dl), Reg0, Reg0 };
3388           CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
3389           return true;
3390         }
3391 
3392         assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3393         SDValue Ops[] = { N->getOperand(0).getOperand(0),
3394                           CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3395                           CurDAG->getTargetConstant(Width, dl, MVT::i32),
3396                           getAL(CurDAG, dl), Reg0 };
3397         CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3398         return true;
3399       }
3400     }
3401     return false;
3402   }
3403 
3404   // Otherwise, we're looking for a shift of a shift
3405   unsigned Shl_imm = 0;
3406   if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
3407     assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
3408     unsigned Srl_imm = 0;
3409     if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
3410       assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3411       // Note: The width operand is encoded as width-1.
3412       unsigned Width = 32 - Srl_imm - 1;
3413       int LSB = Srl_imm - Shl_imm;
3414       if (LSB < 0)
3415         return false;
3416       SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3417       assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3418       SDValue Ops[] = { N->getOperand(0).getOperand(0),
3419                         CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3420                         CurDAG->getTargetConstant(Width, dl, MVT::i32),
3421                         getAL(CurDAG, dl), Reg0 };
3422       CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3423       return true;
3424     }
3425   }
3426 
3427   // Or we are looking for a shift of an and, with a mask operand
3428   if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_imm) &&
3429       isShiftedMask_32(And_imm)) {
3430     unsigned Srl_imm = 0;
3431     unsigned LSB = llvm::countr_zero(And_imm);
3432     // Shift must be the same as the ands lsb
3433     if (isInt32Immediate(N->getOperand(1), Srl_imm) && Srl_imm == LSB) {
3434       assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3435       unsigned MSB = llvm::Log2_32(And_imm);
3436       // Note: The width operand is encoded as width-1.
3437       unsigned Width = MSB - LSB;
3438       SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3439       assert(Srl_imm + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3440       SDValue Ops[] = { N->getOperand(0).getOperand(0),
3441                         CurDAG->getTargetConstant(Srl_imm, dl, MVT::i32),
3442                         CurDAG->getTargetConstant(Width, dl, MVT::i32),
3443                         getAL(CurDAG, dl), Reg0 };
3444       CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3445       return true;
3446     }
3447   }
3448 
3449   if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
3450     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
3451     unsigned LSB = 0;
3452     if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
3453         !isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
3454       return false;
3455 
3456     if (LSB + Width > 32)
3457       return false;
3458 
3459     SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3460     assert(LSB + Width <= 32 && "Shouldn't create an invalid ubfx");
3461     SDValue Ops[] = { N->getOperand(0).getOperand(0),
3462                       CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3463                       CurDAG->getTargetConstant(Width - 1, dl, MVT::i32),
3464                       getAL(CurDAG, dl), Reg0 };
3465     CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3466     return true;
3467   }
3468 
3469   return false;
3470 }
3471 
3472 /// Target-specific DAG combining for ISD::SUB.
3473 /// Target-independent combining lowers SELECT_CC nodes of the form
3474 /// select_cc setg[ge] X,  0,  X, -X
3475 /// select_cc setgt    X, -1,  X, -X
3476 /// select_cc setl[te] X,  0, -X,  X
3477 /// select_cc setlt    X,  1, -X,  X
3478 /// which represent Integer ABS into:
3479 /// Y = sra (X, size(X)-1); sub (xor (X, Y), Y)
3480 /// ARM instruction selection detects the latter and matches it to
3481 /// ARM::ABS or ARM::t2ABS machine node.
3482 bool ARMDAGToDAGISel::tryABSOp(SDNode *N){
3483   SDValue SUBSrc0 = N->getOperand(0);
3484   SDValue SUBSrc1 = N->getOperand(1);
3485   EVT VT = N->getValueType(0);
3486 
3487   if (Subtarget->isThumb1Only())
3488     return false;
3489 
3490   if (SUBSrc0.getOpcode() != ISD::XOR || SUBSrc1.getOpcode() != ISD::SRA)
3491     return false;
3492 
3493   SDValue XORSrc0 = SUBSrc0.getOperand(0);
3494   SDValue XORSrc1 = SUBSrc0.getOperand(1);
3495   SDValue SRASrc0 = SUBSrc1.getOperand(0);
3496   SDValue SRASrc1 = SUBSrc1.getOperand(1);
3497   ConstantSDNode *SRAConstant =  dyn_cast<ConstantSDNode>(SRASrc1);
3498   EVT XType = SRASrc0.getValueType();
3499   unsigned Size = XType.getSizeInBits() - 1;
3500 
3501   if (XORSrc1 == SUBSrc1 && XORSrc0 == SRASrc0 && XType.isInteger() &&
3502       SRAConstant != nullptr && Size == SRAConstant->getZExtValue()) {
3503     unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
3504     CurDAG->SelectNodeTo(N, Opcode, VT, XORSrc0);
3505     return true;
3506   }
3507 
3508   return false;
3509 }
3510 
3511 /// We've got special pseudo-instructions for these
3512 void ARMDAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3513   unsigned Opcode;
3514   EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3515   if (MemTy == MVT::i8)
3516     Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_8 : ARM::CMP_SWAP_8;
3517   else if (MemTy == MVT::i16)
3518     Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_16 : ARM::CMP_SWAP_16;
3519   else if (MemTy == MVT::i32)
3520     Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_32 : ARM::CMP_SWAP_32;
3521   else
3522     llvm_unreachable("Unknown AtomicCmpSwap type");
3523 
3524   SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3525                    N->getOperand(0)};
3526   SDNode *CmpSwap = CurDAG->getMachineNode(
3527       Opcode, SDLoc(N),
3528       CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other), Ops);
3529 
3530   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3531   CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3532 
3533   ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3534   ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3535   CurDAG->RemoveDeadNode(N);
3536 }
3537 
3538 static std::optional<std::pair<unsigned, unsigned>>
3539 getContiguousRangeOfSetBits(const APInt &A) {
3540   unsigned FirstOne = A.getBitWidth() - A.countl_zero() - 1;
3541   unsigned LastOne = A.countr_zero();
3542   if (A.popcount() != (FirstOne - LastOne + 1))
3543     return std::nullopt;
3544   return std::make_pair(FirstOne, LastOne);
3545 }
3546 
3547 void ARMDAGToDAGISel::SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI) {
3548   assert(N->getOpcode() == ARMISD::CMPZ);
3549   SwitchEQNEToPLMI = false;
3550 
3551   if (!Subtarget->isThumb())
3552     // FIXME: Work out whether it is profitable to do this in A32 mode - LSL and
3553     // LSR don't exist as standalone instructions - they need the barrel shifter.
3554     return;
3555 
3556   // select (cmpz (and X, C), #0) -> (LSLS X) or (LSRS X) or (LSRS (LSLS X))
3557   SDValue And = N->getOperand(0);
3558   if (!And->hasOneUse())
3559     return;
3560 
3561   SDValue Zero = N->getOperand(1);
3562   if (!isNullConstant(Zero) || And->getOpcode() != ISD::AND)
3563     return;
3564   SDValue X = And.getOperand(0);
3565   auto C = dyn_cast<ConstantSDNode>(And.getOperand(1));
3566 
3567   if (!C)
3568     return;
3569   auto Range = getContiguousRangeOfSetBits(C->getAPIntValue());
3570   if (!Range)
3571     return;
3572 
3573   // There are several ways to lower this:
3574   SDNode *NewN;
3575   SDLoc dl(N);
3576 
3577   auto EmitShift = [&](unsigned Opc, SDValue Src, unsigned Imm) -> SDNode* {
3578     if (Subtarget->isThumb2()) {
3579       Opc = (Opc == ARM::tLSLri) ? ARM::t2LSLri : ARM::t2LSRri;
3580       SDValue Ops[] = { Src, CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3581                         getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3582                         CurDAG->getRegister(0, MVT::i32) };
3583       return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3584     } else {
3585       SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), Src,
3586                        CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3587                        getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
3588       return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3589     }
3590   };
3591 
3592   if (Range->second == 0) {
3593     //  1. Mask includes the LSB -> Simply shift the top N bits off
3594     NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3595     ReplaceNode(And.getNode(), NewN);
3596   } else if (Range->first == 31) {
3597     //  2. Mask includes the MSB -> Simply shift the bottom N bits off
3598     NewN = EmitShift(ARM::tLSRri, X, Range->second);
3599     ReplaceNode(And.getNode(), NewN);
3600   } else if (Range->first == Range->second) {
3601     //  3. Only one bit is set. We can shift this into the sign bit and use a
3602     //     PL/MI comparison.
3603     NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3604     ReplaceNode(And.getNode(), NewN);
3605 
3606     SwitchEQNEToPLMI = true;
3607   } else if (!Subtarget->hasV6T2Ops()) {
3608     //  4. Do a double shift to clear bottom and top bits, but only in
3609     //     thumb-1 mode as in thumb-2 we can use UBFX.
3610     NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3611     NewN = EmitShift(ARM::tLSRri, SDValue(NewN, 0),
3612                      Range->second + (31 - Range->first));
3613     ReplaceNode(And.getNode(), NewN);
3614   }
3615 }
3616 
3617 static unsigned getVectorShuffleOpcode(EVT VT, unsigned Opc64[3],
3618                                        unsigned Opc128[3]) {
3619   assert((VT.is64BitVector() || VT.is128BitVector()) &&
3620          "Unexpected vector shuffle length");
3621   switch (VT.getScalarSizeInBits()) {
3622   default:
3623     llvm_unreachable("Unexpected vector shuffle element size");
3624   case 8:
3625     return VT.is64BitVector() ? Opc64[0] : Opc128[0];
3626   case 16:
3627     return VT.is64BitVector() ? Opc64[1] : Opc128[1];
3628   case 32:
3629     return VT.is64BitVector() ? Opc64[2] : Opc128[2];
3630   }
3631 }
3632 
3633 void ARMDAGToDAGISel::Select(SDNode *N) {
3634   SDLoc dl(N);
3635 
3636   if (N->isMachineOpcode()) {
3637     N->setNodeId(-1);
3638     return;   // Already selected.
3639   }
3640 
3641   switch (N->getOpcode()) {
3642   default: break;
3643   case ISD::STORE: {
3644     // For Thumb1, match an sp-relative store in C++. This is a little
3645     // unfortunate, but I don't think I can make the chain check work
3646     // otherwise.  (The chain of the store has to be the same as the chain
3647     // of the CopyFromReg, or else we can't replace the CopyFromReg with
3648     // a direct reference to "SP".)
3649     //
3650     // This is only necessary on Thumb1 because Thumb1 sp-relative stores use
3651     // a different addressing mode from other four-byte stores.
3652     //
3653     // This pattern usually comes up with call arguments.
3654     StoreSDNode *ST = cast<StoreSDNode>(N);
3655     SDValue Ptr = ST->getBasePtr();
3656     if (Subtarget->isThumb1Only() && ST->isUnindexed()) {
3657       int RHSC = 0;
3658       if (Ptr.getOpcode() == ISD::ADD &&
3659           isScaledConstantInRange(Ptr.getOperand(1), /*Scale=*/4, 0, 256, RHSC))
3660         Ptr = Ptr.getOperand(0);
3661 
3662       if (Ptr.getOpcode() == ISD::CopyFromReg &&
3663           cast<RegisterSDNode>(Ptr.getOperand(1))->getReg() == ARM::SP &&
3664           Ptr.getOperand(0) == ST->getChain()) {
3665         SDValue Ops[] = {ST->getValue(),
3666                          CurDAG->getRegister(ARM::SP, MVT::i32),
3667                          CurDAG->getTargetConstant(RHSC, dl, MVT::i32),
3668                          getAL(CurDAG, dl),
3669                          CurDAG->getRegister(0, MVT::i32),
3670                          ST->getChain()};
3671         MachineSDNode *ResNode =
3672             CurDAG->getMachineNode(ARM::tSTRspi, dl, MVT::Other, Ops);
3673         MachineMemOperand *MemOp = ST->getMemOperand();
3674         CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3675         ReplaceNode(N, ResNode);
3676         return;
3677       }
3678     }
3679     break;
3680   }
3681   case ISD::WRITE_REGISTER:
3682     if (tryWriteRegister(N))
3683       return;
3684     break;
3685   case ISD::READ_REGISTER:
3686     if (tryReadRegister(N))
3687       return;
3688     break;
3689   case ISD::INLINEASM:
3690   case ISD::INLINEASM_BR:
3691     if (tryInlineAsm(N))
3692       return;
3693     break;
3694   case ISD::SUB:
3695     // Select special operations if SUB node forms integer ABS pattern
3696     if (tryABSOp(N))
3697       return;
3698     // Other cases are autogenerated.
3699     break;
3700   case ISD::Constant: {
3701     unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
3702     // If we can't materialize the constant we need to use a literal pool
3703     if (ConstantMaterializationCost(Val, Subtarget) > 2 &&
3704         !Subtarget->genExecuteOnly()) {
3705       SDValue CPIdx = CurDAG->getTargetConstantPool(
3706           ConstantInt::get(Type::getInt32Ty(*CurDAG->getContext()), Val),
3707           TLI->getPointerTy(CurDAG->getDataLayout()));
3708 
3709       SDNode *ResNode;
3710       if (Subtarget->isThumb()) {
3711         SDValue Ops[] = {
3712           CPIdx,
3713           getAL(CurDAG, dl),
3714           CurDAG->getRegister(0, MVT::i32),
3715           CurDAG->getEntryNode()
3716         };
3717         ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
3718                                          Ops);
3719       } else {
3720         SDValue Ops[] = {
3721           CPIdx,
3722           CurDAG->getTargetConstant(0, dl, MVT::i32),
3723           getAL(CurDAG, dl),
3724           CurDAG->getRegister(0, MVT::i32),
3725           CurDAG->getEntryNode()
3726         };
3727         ResNode = CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
3728                                          Ops);
3729       }
3730       // Annotate the Node with memory operand information so that MachineInstr
3731       // queries work properly. This e.g. gives the register allocation the
3732       // required information for rematerialization.
3733       MachineFunction& MF = CurDAG->getMachineFunction();
3734       MachineMemOperand *MemOp =
3735           MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
3736                                   MachineMemOperand::MOLoad, 4, Align(4));
3737 
3738       CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3739 
3740       ReplaceNode(N, ResNode);
3741       return;
3742     }
3743 
3744     // Other cases are autogenerated.
3745     break;
3746   }
3747   case ISD::FrameIndex: {
3748     // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
3749     int FI = cast<FrameIndexSDNode>(N)->getIndex();
3750     SDValue TFI = CurDAG->getTargetFrameIndex(
3751         FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3752     if (Subtarget->isThumb1Only()) {
3753       // Set the alignment of the frame object to 4, to avoid having to generate
3754       // more than one ADD
3755       MachineFrameInfo &MFI = MF->getFrameInfo();
3756       if (MFI.getObjectAlign(FI) < Align(4))
3757         MFI.setObjectAlignment(FI, Align(4));
3758       CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
3759                            CurDAG->getTargetConstant(0, dl, MVT::i32));
3760       return;
3761     } else {
3762       unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
3763                       ARM::t2ADDri : ARM::ADDri);
3764       SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, dl, MVT::i32),
3765                         getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3766                         CurDAG->getRegister(0, MVT::i32) };
3767       CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3768       return;
3769     }
3770   }
3771   case ISD::INSERT_VECTOR_ELT: {
3772     if (tryInsertVectorElt(N))
3773       return;
3774     break;
3775   }
3776   case ISD::SRL:
3777     if (tryV6T2BitfieldExtractOp(N, false))
3778       return;
3779     break;
3780   case ISD::SIGN_EXTEND_INREG:
3781   case ISD::SRA:
3782     if (tryV6T2BitfieldExtractOp(N, true))
3783       return;
3784     break;
3785   case ISD::FP_TO_UINT:
3786   case ISD::FP_TO_SINT:
3787   case ISD::FP_TO_UINT_SAT:
3788   case ISD::FP_TO_SINT_SAT:
3789     if (tryFP_TO_INT(N, dl))
3790       return;
3791     break;
3792   case ISD::FMUL:
3793     if (tryFMULFixed(N, dl))
3794       return;
3795     break;
3796   case ISD::MUL:
3797     if (Subtarget->isThumb1Only())
3798       break;
3799     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
3800       unsigned RHSV = C->getZExtValue();
3801       if (!RHSV) break;
3802       if (isPowerOf2_32(RHSV-1)) {  // 2^n+1?
3803         unsigned ShImm = Log2_32(RHSV-1);
3804         if (ShImm >= 32)
3805           break;
3806         SDValue V = N->getOperand(0);
3807         ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3808         SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3809         SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3810         if (Subtarget->isThumb()) {
3811           SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3812           CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
3813           return;
3814         } else {
3815           SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3816                             Reg0 };
3817           CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
3818           return;
3819         }
3820       }
3821       if (isPowerOf2_32(RHSV+1)) {  // 2^n-1?
3822         unsigned ShImm = Log2_32(RHSV+1);
3823         if (ShImm >= 32)
3824           break;
3825         SDValue V = N->getOperand(0);
3826         ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3827         SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3828         SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3829         if (Subtarget->isThumb()) {
3830           SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3831           CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
3832           return;
3833         } else {
3834           SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3835                             Reg0 };
3836           CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
3837           return;
3838         }
3839       }
3840     }
3841     break;
3842   case ISD::AND: {
3843     // Check for unsigned bitfield extract
3844     if (tryV6T2BitfieldExtractOp(N, false))
3845       return;
3846 
3847     // If an immediate is used in an AND node, it is possible that the immediate
3848     // can be more optimally materialized when negated. If this is the case we
3849     // can negate the immediate and use a BIC instead.
3850     auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3851     if (N1C && N1C->hasOneUse() && Subtarget->isThumb()) {
3852       uint32_t Imm = (uint32_t) N1C->getZExtValue();
3853 
3854       // In Thumb2 mode, an AND can take a 12-bit immediate. If this
3855       // immediate can be negated and fit in the immediate operand of
3856       // a t2BIC, don't do any manual transform here as this can be
3857       // handled by the generic ISel machinery.
3858       bool PreferImmediateEncoding =
3859         Subtarget->hasThumb2() && (is_t2_so_imm(Imm) || is_t2_so_imm_not(Imm));
3860       if (!PreferImmediateEncoding &&
3861           ConstantMaterializationCost(Imm, Subtarget) >
3862               ConstantMaterializationCost(~Imm, Subtarget)) {
3863         // The current immediate costs more to materialize than a negated
3864         // immediate, so negate the immediate and use a BIC.
3865         SDValue NewImm =
3866           CurDAG->getConstant(~N1C->getZExtValue(), dl, MVT::i32);
3867         // If the new constant didn't exist before, reposition it in the topological
3868         // ordering so it is just before N. Otherwise, don't touch its location.
3869         if (NewImm->getNodeId() == -1)
3870           CurDAG->RepositionNode(N->getIterator(), NewImm.getNode());
3871 
3872         if (!Subtarget->hasThumb2()) {
3873           SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32),
3874                            N->getOperand(0), NewImm, getAL(CurDAG, dl),
3875                            CurDAG->getRegister(0, MVT::i32)};
3876           ReplaceNode(N, CurDAG->getMachineNode(ARM::tBIC, dl, MVT::i32, Ops));
3877           return;
3878         } else {
3879           SDValue Ops[] = {N->getOperand(0), NewImm, getAL(CurDAG, dl),
3880                            CurDAG->getRegister(0, MVT::i32),
3881                            CurDAG->getRegister(0, MVT::i32)};
3882           ReplaceNode(N,
3883                       CurDAG->getMachineNode(ARM::t2BICrr, dl, MVT::i32, Ops));
3884           return;
3885         }
3886       }
3887     }
3888 
3889     // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
3890     // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
3891     // are entirely contributed by c2 and lower 16-bits are entirely contributed
3892     // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
3893     // Select it to: "movt x, ((c1 & 0xffff) >> 16)
3894     EVT VT = N->getValueType(0);
3895     if (VT != MVT::i32)
3896       break;
3897     unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
3898       ? ARM::t2MOVTi16
3899       : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
3900     if (!Opc)
3901       break;
3902     SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3903     N1C = dyn_cast<ConstantSDNode>(N1);
3904     if (!N1C)
3905       break;
3906     if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
3907       SDValue N2 = N0.getOperand(1);
3908       ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3909       if (!N2C)
3910         break;
3911       unsigned N1CVal = N1C->getZExtValue();
3912       unsigned N2CVal = N2C->getZExtValue();
3913       if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
3914           (N1CVal & 0xffffU) == 0xffffU &&
3915           (N2CVal & 0xffffU) == 0x0U) {
3916         SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
3917                                                   dl, MVT::i32);
3918         SDValue Ops[] = { N0.getOperand(0), Imm16,
3919                           getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
3920         ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
3921         return;
3922       }
3923     }
3924 
3925     break;
3926   }
3927   case ARMISD::UMAAL: {
3928     unsigned Opc = Subtarget->isThumb() ? ARM::t2UMAAL : ARM::UMAAL;
3929     SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
3930                       N->getOperand(2), N->getOperand(3),
3931                       getAL(CurDAG, dl),
3932                       CurDAG->getRegister(0, MVT::i32) };
3933     ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, MVT::i32, Ops));
3934     return;
3935   }
3936   case ARMISD::UMLAL:{
3937     if (Subtarget->isThumb()) {
3938       SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3939                         N->getOperand(3), getAL(CurDAG, dl),
3940                         CurDAG->getRegister(0, MVT::i32)};
3941       ReplaceNode(
3942           N, CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops));
3943       return;
3944     }else{
3945       SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3946                         N->getOperand(3), getAL(CurDAG, dl),
3947                         CurDAG->getRegister(0, MVT::i32),
3948                         CurDAG->getRegister(0, MVT::i32) };
3949       ReplaceNode(N, CurDAG->getMachineNode(
3950                          Subtarget->hasV6Ops() ? ARM::UMLAL : ARM::UMLALv5, dl,
3951                          MVT::i32, MVT::i32, Ops));
3952       return;
3953     }
3954   }
3955   case ARMISD::SMLAL:{
3956     if (Subtarget->isThumb()) {
3957       SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3958                         N->getOperand(3), getAL(CurDAG, dl),
3959                         CurDAG->getRegister(0, MVT::i32)};
3960       ReplaceNode(
3961           N, CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops));
3962       return;
3963     }else{
3964       SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3965                         N->getOperand(3), getAL(CurDAG, dl),
3966                         CurDAG->getRegister(0, MVT::i32),
3967                         CurDAG->getRegister(0, MVT::i32) };
3968       ReplaceNode(N, CurDAG->getMachineNode(
3969                          Subtarget->hasV6Ops() ? ARM::SMLAL : ARM::SMLALv5, dl,
3970                          MVT::i32, MVT::i32, Ops));
3971       return;
3972     }
3973   }
3974   case ARMISD::SUBE: {
3975     if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
3976       break;
3977     // Look for a pattern to match SMMLS
3978     // (sube a, (smul_loHi a, b), (subc 0, (smul_LOhi(a, b))))
3979     if (N->getOperand(1).getOpcode() != ISD::SMUL_LOHI ||
3980         N->getOperand(2).getOpcode() != ARMISD::SUBC ||
3981         !SDValue(N, 1).use_empty())
3982       break;
3983 
3984     if (Subtarget->isThumb())
3985       assert(Subtarget->hasThumb2() &&
3986              "This pattern should not be generated for Thumb");
3987 
3988     SDValue SmulLoHi = N->getOperand(1);
3989     SDValue Subc = N->getOperand(2);
3990     SDValue Zero = Subc.getOperand(0);
3991 
3992     if (!isNullConstant(Zero) || Subc.getOperand(1) != SmulLoHi.getValue(0) ||
3993         N->getOperand(1) != SmulLoHi.getValue(1) ||
3994         N->getOperand(2) != Subc.getValue(1))
3995       break;
3996 
3997     unsigned Opc = Subtarget->isThumb2() ? ARM::t2SMMLS : ARM::SMMLS;
3998     SDValue Ops[] = { SmulLoHi.getOperand(0), SmulLoHi.getOperand(1),
3999                       N->getOperand(0), getAL(CurDAG, dl),
4000                       CurDAG->getRegister(0, MVT::i32) };
4001     ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops));
4002     return;
4003   }
4004   case ISD::LOAD: {
4005     if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
4006       return;
4007     if (Subtarget->isThumb() && Subtarget->hasThumb2()) {
4008       if (tryT2IndexedLoad(N))
4009         return;
4010     } else if (Subtarget->isThumb()) {
4011       if (tryT1IndexedLoad(N))
4012         return;
4013     } else if (tryARMIndexedLoad(N))
4014       return;
4015     // Other cases are autogenerated.
4016     break;
4017   }
4018   case ISD::MLOAD:
4019     if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
4020       return;
4021     // Other cases are autogenerated.
4022     break;
4023   case ARMISD::WLSSETUP: {
4024     SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopSetup, dl, MVT::i32,
4025                                          N->getOperand(0));
4026     ReplaceUses(N, New);
4027     CurDAG->RemoveDeadNode(N);
4028     return;
4029   }
4030   case ARMISD::WLS: {
4031     SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopStart, dl, MVT::Other,
4032                                          N->getOperand(1), N->getOperand(2),
4033                                          N->getOperand(0));
4034     ReplaceUses(N, New);
4035     CurDAG->RemoveDeadNode(N);
4036     return;
4037   }
4038   case ARMISD::LE: {
4039     SDValue Ops[] = { N->getOperand(1),
4040                       N->getOperand(2),
4041                       N->getOperand(0) };
4042     unsigned Opc = ARM::t2LoopEnd;
4043     SDNode *New = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4044     ReplaceUses(N, New);
4045     CurDAG->RemoveDeadNode(N);
4046     return;
4047   }
4048   case ARMISD::LDRD: {
4049     if (Subtarget->isThumb2())
4050       break; // TableGen handles isel in this case.
4051     SDValue Base, RegOffset, ImmOffset;
4052     const SDValue &Chain = N->getOperand(0);
4053     const SDValue &Addr = N->getOperand(1);
4054     SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4055     if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4056       // The register-offset variant of LDRD mandates that the register
4057       // allocated to RegOffset is not reused in any of the remaining operands.
4058       // This restriction is currently not enforced. Therefore emitting this
4059       // variant is explicitly avoided.
4060       Base = Addr;
4061       RegOffset = CurDAG->getRegister(0, MVT::i32);
4062     }
4063     SDValue Ops[] = {Base, RegOffset, ImmOffset, Chain};
4064     SDNode *New = CurDAG->getMachineNode(ARM::LOADDUAL, dl,
4065                                          {MVT::Untyped, MVT::Other}, Ops);
4066     SDValue Lo = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
4067                                                 SDValue(New, 0));
4068     SDValue Hi = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
4069                                                 SDValue(New, 0));
4070     transferMemOperands(N, New);
4071     ReplaceUses(SDValue(N, 0), Lo);
4072     ReplaceUses(SDValue(N, 1), Hi);
4073     ReplaceUses(SDValue(N, 2), SDValue(New, 1));
4074     CurDAG->RemoveDeadNode(N);
4075     return;
4076   }
4077   case ARMISD::STRD: {
4078     if (Subtarget->isThumb2())
4079       break; // TableGen handles isel in this case.
4080     SDValue Base, RegOffset, ImmOffset;
4081     const SDValue &Chain = N->getOperand(0);
4082     const SDValue &Addr = N->getOperand(3);
4083     SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4084     if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4085       // The register-offset variant of STRD mandates that the register
4086       // allocated to RegOffset is not reused in any of the remaining operands.
4087       // This restriction is currently not enforced. Therefore emitting this
4088       // variant is explicitly avoided.
4089       Base = Addr;
4090       RegOffset = CurDAG->getRegister(0, MVT::i32);
4091     }
4092     SDNode *RegPair =
4093         createGPRPairNode(MVT::Untyped, N->getOperand(1), N->getOperand(2));
4094     SDValue Ops[] = {SDValue(RegPair, 0), Base, RegOffset, ImmOffset, Chain};
4095     SDNode *New = CurDAG->getMachineNode(ARM::STOREDUAL, dl, MVT::Other, Ops);
4096     transferMemOperands(N, New);
4097     ReplaceUses(SDValue(N, 0), SDValue(New, 0));
4098     CurDAG->RemoveDeadNode(N);
4099     return;
4100   }
4101   case ARMISD::LOOP_DEC: {
4102     SDValue Ops[] = { N->getOperand(1),
4103                       N->getOperand(2),
4104                       N->getOperand(0) };
4105     SDNode *Dec =
4106       CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4107                              CurDAG->getVTList(MVT::i32, MVT::Other), Ops);
4108     ReplaceUses(N, Dec);
4109     CurDAG->RemoveDeadNode(N);
4110     return;
4111   }
4112   case ARMISD::BRCOND: {
4113     // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4114     // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4115     // Pattern complexity = 6  cost = 1  size = 0
4116 
4117     // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4118     // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
4119     // Pattern complexity = 6  cost = 1  size = 0
4120 
4121     // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4122     // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4123     // Pattern complexity = 6  cost = 1  size = 0
4124 
4125     unsigned Opc = Subtarget->isThumb() ?
4126       ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
4127     SDValue Chain = N->getOperand(0);
4128     SDValue N1 = N->getOperand(1);
4129     SDValue N2 = N->getOperand(2);
4130     SDValue N3 = N->getOperand(3);
4131     SDValue InGlue = N->getOperand(4);
4132     assert(N1.getOpcode() == ISD::BasicBlock);
4133     assert(N2.getOpcode() == ISD::Constant);
4134     assert(N3.getOpcode() == ISD::Register);
4135 
4136     unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
4137 
4138     if (InGlue.getOpcode() == ARMISD::CMPZ) {
4139       if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4140         SDValue Int = InGlue.getOperand(0);
4141         uint64_t ID = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
4142 
4143         // Handle low-overhead loops.
4144         if (ID == Intrinsic::loop_decrement_reg) {
4145           SDValue Elements = Int.getOperand(2);
4146           SDValue Size = CurDAG->getTargetConstant(
4147             cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl,
4148                                  MVT::i32);
4149 
4150           SDValue Args[] = { Elements, Size, Int.getOperand(0) };
4151           SDNode *LoopDec =
4152             CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4153                                    CurDAG->getVTList(MVT::i32, MVT::Other),
4154                                    Args);
4155           ReplaceUses(Int.getNode(), LoopDec);
4156 
4157           SDValue EndArgs[] = { SDValue(LoopDec, 0), N1, Chain };
4158           SDNode *LoopEnd =
4159             CurDAG->getMachineNode(ARM::t2LoopEnd, dl, MVT::Other, EndArgs);
4160 
4161           ReplaceUses(N, LoopEnd);
4162           CurDAG->RemoveDeadNode(N);
4163           CurDAG->RemoveDeadNode(InGlue.getNode());
4164           CurDAG->RemoveDeadNode(Int.getNode());
4165           return;
4166         }
4167       }
4168 
4169       bool SwitchEQNEToPLMI;
4170       SelectCMPZ(InGlue.getNode(), SwitchEQNEToPLMI);
4171       InGlue = N->getOperand(4);
4172 
4173       if (SwitchEQNEToPLMI) {
4174         switch ((ARMCC::CondCodes)CC) {
4175         default: llvm_unreachable("CMPZ must be either NE or EQ!");
4176         case ARMCC::NE:
4177           CC = (unsigned)ARMCC::MI;
4178           break;
4179         case ARMCC::EQ:
4180           CC = (unsigned)ARMCC::PL;
4181           break;
4182         }
4183       }
4184     }
4185 
4186     SDValue Tmp2 = CurDAG->getTargetConstant(CC, dl, MVT::i32);
4187     SDValue Ops[] = { N1, Tmp2, N3, Chain, InGlue };
4188     SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4189                                              MVT::Glue, Ops);
4190     Chain = SDValue(ResNode, 0);
4191     if (N->getNumValues() == 2) {
4192       InGlue = SDValue(ResNode, 1);
4193       ReplaceUses(SDValue(N, 1), InGlue);
4194     }
4195     ReplaceUses(SDValue(N, 0),
4196                 SDValue(Chain.getNode(), Chain.getResNo()));
4197     CurDAG->RemoveDeadNode(N);
4198     return;
4199   }
4200 
4201   case ARMISD::CMPZ: {
4202     // select (CMPZ X, #-C) -> (CMPZ (ADDS X, #C), #0)
4203     //   This allows us to avoid materializing the expensive negative constant.
4204     //   The CMPZ #0 is useless and will be peepholed away but we need to keep it
4205     //   for its glue output.
4206     SDValue X = N->getOperand(0);
4207     auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1).getNode());
4208     if (C && C->getSExtValue() < 0 && Subtarget->isThumb()) {
4209       int64_t Addend = -C->getSExtValue();
4210 
4211       SDNode *Add = nullptr;
4212       // ADDS can be better than CMN if the immediate fits in a
4213       // 16-bit ADDS, which means either [0,256) for tADDi8 or [0,8) for tADDi3.
4214       // Outside that range we can just use a CMN which is 32-bit but has a
4215       // 12-bit immediate range.
4216       if (Addend < 1<<8) {
4217         if (Subtarget->isThumb2()) {
4218           SDValue Ops[] = { X, CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4219                             getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
4220                             CurDAG->getRegister(0, MVT::i32) };
4221           Add = CurDAG->getMachineNode(ARM::t2ADDri, dl, MVT::i32, Ops);
4222         } else {
4223           unsigned Opc = (Addend < 1<<3) ? ARM::tADDi3 : ARM::tADDi8;
4224           SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), X,
4225                            CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4226                            getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
4227           Add = CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
4228         }
4229       }
4230       if (Add) {
4231         SDValue Ops2[] = {SDValue(Add, 0), CurDAG->getConstant(0, dl, MVT::i32)};
4232         CurDAG->MorphNodeTo(N, ARMISD::CMPZ, CurDAG->getVTList(MVT::Glue), Ops2);
4233       }
4234     }
4235     // Other cases are autogenerated.
4236     break;
4237   }
4238 
4239   case ARMISD::CMOV: {
4240     SDValue InGlue = N->getOperand(4);
4241 
4242     if (InGlue.getOpcode() == ARMISD::CMPZ) {
4243       bool SwitchEQNEToPLMI;
4244       SelectCMPZ(InGlue.getNode(), SwitchEQNEToPLMI);
4245 
4246       if (SwitchEQNEToPLMI) {
4247         SDValue ARMcc = N->getOperand(2);
4248         ARMCC::CondCodes CC =
4249           (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
4250 
4251         switch (CC) {
4252         default: llvm_unreachable("CMPZ must be either NE or EQ!");
4253         case ARMCC::NE:
4254           CC = ARMCC::MI;
4255           break;
4256         case ARMCC::EQ:
4257           CC = ARMCC::PL;
4258           break;
4259         }
4260         SDValue NewARMcc = CurDAG->getConstant((unsigned)CC, dl, MVT::i32);
4261         SDValue Ops[] = {N->getOperand(0), N->getOperand(1), NewARMcc,
4262                          N->getOperand(3), N->getOperand(4)};
4263         CurDAG->MorphNodeTo(N, ARMISD::CMOV, N->getVTList(), Ops);
4264       }
4265 
4266     }
4267     // Other cases are autogenerated.
4268     break;
4269   }
4270   case ARMISD::VZIP: {
4271     EVT VT = N->getValueType(0);
4272     // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4273     unsigned Opc64[] = {ARM::VZIPd8, ARM::VZIPd16, ARM::VTRNd32};
4274     unsigned Opc128[] = {ARM::VZIPq8, ARM::VZIPq16, ARM::VZIPq32};
4275     unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4276     SDValue Pred = getAL(CurDAG, dl);
4277     SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4278     SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4279     ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4280     return;
4281   }
4282   case ARMISD::VUZP: {
4283     EVT VT = N->getValueType(0);
4284     // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4285     unsigned Opc64[] = {ARM::VUZPd8, ARM::VUZPd16, ARM::VTRNd32};
4286     unsigned Opc128[] = {ARM::VUZPq8, ARM::VUZPq16, ARM::VUZPq32};
4287     unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4288     SDValue Pred = getAL(CurDAG, dl);
4289     SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4290     SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4291     ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4292     return;
4293   }
4294   case ARMISD::VTRN: {
4295     EVT VT = N->getValueType(0);
4296     unsigned Opc64[] = {ARM::VTRNd8, ARM::VTRNd16, ARM::VTRNd32};
4297     unsigned Opc128[] = {ARM::VTRNq8, ARM::VTRNq16, ARM::VTRNq32};
4298     unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4299     SDValue Pred = getAL(CurDAG, dl);
4300     SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4301     SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4302     ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4303     return;
4304   }
4305   case ARMISD::BUILD_VECTOR: {
4306     EVT VecVT = N->getValueType(0);
4307     EVT EltVT = VecVT.getVectorElementType();
4308     unsigned NumElts = VecVT.getVectorNumElements();
4309     if (EltVT == MVT::f64) {
4310       assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
4311       ReplaceNode(
4312           N, createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4313       return;
4314     }
4315     assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
4316     if (NumElts == 2) {
4317       ReplaceNode(
4318           N, createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4319       return;
4320     }
4321     assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
4322     ReplaceNode(N,
4323                 createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
4324                                     N->getOperand(2), N->getOperand(3)));
4325     return;
4326   }
4327 
4328   case ARMISD::VLD1DUP: {
4329     static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8, ARM::VLD1DUPd16,
4330                                          ARM::VLD1DUPd32 };
4331     static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8, ARM::VLD1DUPq16,
4332                                          ARM::VLD1DUPq32 };
4333     SelectVLDDup(N, /* IsIntrinsic= */ false, false, 1, DOpcodes, QOpcodes);
4334     return;
4335   }
4336 
4337   case ARMISD::VLD2DUP: {
4338     static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
4339                                         ARM::VLD2DUPd32 };
4340     SelectVLDDup(N, /* IsIntrinsic= */ false, false, 2, Opcodes);
4341     return;
4342   }
4343 
4344   case ARMISD::VLD3DUP: {
4345     static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
4346                                         ARM::VLD3DUPd16Pseudo,
4347                                         ARM::VLD3DUPd32Pseudo };
4348     SelectVLDDup(N, /* IsIntrinsic= */ false, false, 3, Opcodes);
4349     return;
4350   }
4351 
4352   case ARMISD::VLD4DUP: {
4353     static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
4354                                         ARM::VLD4DUPd16Pseudo,
4355                                         ARM::VLD4DUPd32Pseudo };
4356     SelectVLDDup(N, /* IsIntrinsic= */ false, false, 4, Opcodes);
4357     return;
4358   }
4359 
4360   case ARMISD::VLD1DUP_UPD: {
4361     static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8wb_fixed,
4362                                          ARM::VLD1DUPd16wb_fixed,
4363                                          ARM::VLD1DUPd32wb_fixed };
4364     static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8wb_fixed,
4365                                          ARM::VLD1DUPq16wb_fixed,
4366                                          ARM::VLD1DUPq32wb_fixed };
4367     SelectVLDDup(N, /* IsIntrinsic= */ false, true, 1, DOpcodes, QOpcodes);
4368     return;
4369   }
4370 
4371   case ARMISD::VLD2DUP_UPD: {
4372     static const uint16_t DOpcodes[] = { ARM::VLD2DUPd8wb_fixed,
4373                                          ARM::VLD2DUPd16wb_fixed,
4374                                          ARM::VLD2DUPd32wb_fixed,
4375                                          ARM::VLD1q64wb_fixed };
4376     static const uint16_t QOpcodes0[] = { ARM::VLD2DUPq8EvenPseudo,
4377                                           ARM::VLD2DUPq16EvenPseudo,
4378                                           ARM::VLD2DUPq32EvenPseudo };
4379     static const uint16_t QOpcodes1[] = { ARM::VLD2DUPq8OddPseudoWB_fixed,
4380                                           ARM::VLD2DUPq16OddPseudoWB_fixed,
4381                                           ARM::VLD2DUPq32OddPseudoWB_fixed };
4382     SelectVLDDup(N, /* IsIntrinsic= */ false, true, 2, DOpcodes, QOpcodes0, QOpcodes1);
4383     return;
4384   }
4385 
4386   case ARMISD::VLD3DUP_UPD: {
4387     static const uint16_t DOpcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
4388                                          ARM::VLD3DUPd16Pseudo_UPD,
4389                                          ARM::VLD3DUPd32Pseudo_UPD,
4390                                          ARM::VLD1d64TPseudoWB_fixed };
4391     static const uint16_t QOpcodes0[] = { ARM::VLD3DUPq8EvenPseudo,
4392                                           ARM::VLD3DUPq16EvenPseudo,
4393                                           ARM::VLD3DUPq32EvenPseudo };
4394     static const uint16_t QOpcodes1[] = { ARM::VLD3DUPq8OddPseudo_UPD,
4395                                           ARM::VLD3DUPq16OddPseudo_UPD,
4396                                           ARM::VLD3DUPq32OddPseudo_UPD };
4397     SelectVLDDup(N, /* IsIntrinsic= */ false, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4398     return;
4399   }
4400 
4401   case ARMISD::VLD4DUP_UPD: {
4402     static const uint16_t DOpcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
4403                                          ARM::VLD4DUPd16Pseudo_UPD,
4404                                          ARM::VLD4DUPd32Pseudo_UPD,
4405                                          ARM::VLD1d64QPseudoWB_fixed };
4406     static const uint16_t QOpcodes0[] = { ARM::VLD4DUPq8EvenPseudo,
4407                                           ARM::VLD4DUPq16EvenPseudo,
4408                                           ARM::VLD4DUPq32EvenPseudo };
4409     static const uint16_t QOpcodes1[] = { ARM::VLD4DUPq8OddPseudo_UPD,
4410                                           ARM::VLD4DUPq16OddPseudo_UPD,
4411                                           ARM::VLD4DUPq32OddPseudo_UPD };
4412     SelectVLDDup(N, /* IsIntrinsic= */ false, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4413     return;
4414   }
4415 
4416   case ARMISD::VLD1_UPD: {
4417     static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
4418                                          ARM::VLD1d16wb_fixed,
4419                                          ARM::VLD1d32wb_fixed,
4420                                          ARM::VLD1d64wb_fixed };
4421     static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
4422                                          ARM::VLD1q16wb_fixed,
4423                                          ARM::VLD1q32wb_fixed,
4424                                          ARM::VLD1q64wb_fixed };
4425     SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
4426     return;
4427   }
4428 
4429   case ARMISD::VLD2_UPD: {
4430     if (Subtarget->hasNEON()) {
4431       static const uint16_t DOpcodes[] = {
4432           ARM::VLD2d8wb_fixed, ARM::VLD2d16wb_fixed, ARM::VLD2d32wb_fixed,
4433           ARM::VLD1q64wb_fixed};
4434       static const uint16_t QOpcodes[] = {ARM::VLD2q8PseudoWB_fixed,
4435                                           ARM::VLD2q16PseudoWB_fixed,
4436                                           ARM::VLD2q32PseudoWB_fixed};
4437       SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4438     } else {
4439       static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8,
4440                                           ARM::MVE_VLD21_8_wb};
4441       static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16,
4442                                            ARM::MVE_VLD21_16_wb};
4443       static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32,
4444                                            ARM::MVE_VLD21_32_wb};
4445       static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4446       SelectMVE_VLD(N, 2, Opcodes, true);
4447     }
4448     return;
4449   }
4450 
4451   case ARMISD::VLD3_UPD: {
4452     static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
4453                                          ARM::VLD3d16Pseudo_UPD,
4454                                          ARM::VLD3d32Pseudo_UPD,
4455                                          ARM::VLD1d64TPseudoWB_fixed};
4456     static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
4457                                           ARM::VLD3q16Pseudo_UPD,
4458                                           ARM::VLD3q32Pseudo_UPD };
4459     static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
4460                                           ARM::VLD3q16oddPseudo_UPD,
4461                                           ARM::VLD3q32oddPseudo_UPD };
4462     SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4463     return;
4464   }
4465 
4466   case ARMISD::VLD4_UPD: {
4467     if (Subtarget->hasNEON()) {
4468       static const uint16_t DOpcodes[] = {
4469           ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD, ARM::VLD4d32Pseudo_UPD,
4470           ARM::VLD1d64QPseudoWB_fixed};
4471       static const uint16_t QOpcodes0[] = {ARM::VLD4q8Pseudo_UPD,
4472                                            ARM::VLD4q16Pseudo_UPD,
4473                                            ARM::VLD4q32Pseudo_UPD};
4474       static const uint16_t QOpcodes1[] = {ARM::VLD4q8oddPseudo_UPD,
4475                                            ARM::VLD4q16oddPseudo_UPD,
4476                                            ARM::VLD4q32oddPseudo_UPD};
4477       SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4478     } else {
4479       static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8,
4480                                           ARM::MVE_VLD42_8,
4481                                           ARM::MVE_VLD43_8_wb};
4482       static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16,
4483                                            ARM::MVE_VLD42_16,
4484                                            ARM::MVE_VLD43_16_wb};
4485       static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32,
4486                                            ARM::MVE_VLD42_32,
4487                                            ARM::MVE_VLD43_32_wb};
4488       static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4489       SelectMVE_VLD(N, 4, Opcodes, true);
4490     }
4491     return;
4492   }
4493 
4494   case ARMISD::VLD1x2_UPD: {
4495     if (Subtarget->hasNEON()) {
4496       static const uint16_t DOpcodes[] = {
4497           ARM::VLD1q8wb_fixed, ARM::VLD1q16wb_fixed, ARM::VLD1q32wb_fixed,
4498           ARM::VLD1q64wb_fixed};
4499       static const uint16_t QOpcodes[] = {
4500           ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4501           ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4502       SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4503       return;
4504     }
4505     break;
4506   }
4507 
4508   case ARMISD::VLD1x3_UPD: {
4509     if (Subtarget->hasNEON()) {
4510       static const uint16_t DOpcodes[] = {
4511           ARM::VLD1d8TPseudoWB_fixed, ARM::VLD1d16TPseudoWB_fixed,
4512           ARM::VLD1d32TPseudoWB_fixed, ARM::VLD1d64TPseudoWB_fixed};
4513       static const uint16_t QOpcodes0[] = {
4514           ARM::VLD1q8LowTPseudo_UPD, ARM::VLD1q16LowTPseudo_UPD,
4515           ARM::VLD1q32LowTPseudo_UPD, ARM::VLD1q64LowTPseudo_UPD};
4516       static const uint16_t QOpcodes1[] = {
4517           ARM::VLD1q8HighTPseudo_UPD, ARM::VLD1q16HighTPseudo_UPD,
4518           ARM::VLD1q32HighTPseudo_UPD, ARM::VLD1q64HighTPseudo_UPD};
4519       SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4520       return;
4521     }
4522     break;
4523   }
4524 
4525   case ARMISD::VLD1x4_UPD: {
4526     if (Subtarget->hasNEON()) {
4527       static const uint16_t DOpcodes[] = {
4528           ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4529           ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4530       static const uint16_t QOpcodes0[] = {
4531           ARM::VLD1q8LowQPseudo_UPD, ARM::VLD1q16LowQPseudo_UPD,
4532           ARM::VLD1q32LowQPseudo_UPD, ARM::VLD1q64LowQPseudo_UPD};
4533       static const uint16_t QOpcodes1[] = {
4534           ARM::VLD1q8HighQPseudo_UPD, ARM::VLD1q16HighQPseudo_UPD,
4535           ARM::VLD1q32HighQPseudo_UPD, ARM::VLD1q64HighQPseudo_UPD};
4536       SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4537       return;
4538     }
4539     break;
4540   }
4541 
4542   case ARMISD::VLD2LN_UPD: {
4543     static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
4544                                          ARM::VLD2LNd16Pseudo_UPD,
4545                                          ARM::VLD2LNd32Pseudo_UPD };
4546     static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
4547                                          ARM::VLD2LNq32Pseudo_UPD };
4548     SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
4549     return;
4550   }
4551 
4552   case ARMISD::VLD3LN_UPD: {
4553     static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
4554                                          ARM::VLD3LNd16Pseudo_UPD,
4555                                          ARM::VLD3LNd32Pseudo_UPD };
4556     static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
4557                                          ARM::VLD3LNq32Pseudo_UPD };
4558     SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
4559     return;
4560   }
4561 
4562   case ARMISD::VLD4LN_UPD: {
4563     static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
4564                                          ARM::VLD4LNd16Pseudo_UPD,
4565                                          ARM::VLD4LNd32Pseudo_UPD };
4566     static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
4567                                          ARM::VLD4LNq32Pseudo_UPD };
4568     SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
4569     return;
4570   }
4571 
4572   case ARMISD::VST1_UPD: {
4573     static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
4574                                          ARM::VST1d16wb_fixed,
4575                                          ARM::VST1d32wb_fixed,
4576                                          ARM::VST1d64wb_fixed };
4577     static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
4578                                          ARM::VST1q16wb_fixed,
4579                                          ARM::VST1q32wb_fixed,
4580                                          ARM::VST1q64wb_fixed };
4581     SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
4582     return;
4583   }
4584 
4585   case ARMISD::VST2_UPD: {
4586     if (Subtarget->hasNEON()) {
4587       static const uint16_t DOpcodes[] = {
4588           ARM::VST2d8wb_fixed, ARM::VST2d16wb_fixed, ARM::VST2d32wb_fixed,
4589           ARM::VST1q64wb_fixed};
4590       static const uint16_t QOpcodes[] = {ARM::VST2q8PseudoWB_fixed,
4591                                           ARM::VST2q16PseudoWB_fixed,
4592                                           ARM::VST2q32PseudoWB_fixed};
4593       SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4594       return;
4595     }
4596     break;
4597   }
4598 
4599   case ARMISD::VST3_UPD: {
4600     static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
4601                                          ARM::VST3d16Pseudo_UPD,
4602                                          ARM::VST3d32Pseudo_UPD,
4603                                          ARM::VST1d64TPseudoWB_fixed};
4604     static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
4605                                           ARM::VST3q16Pseudo_UPD,
4606                                           ARM::VST3q32Pseudo_UPD };
4607     static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
4608                                           ARM::VST3q16oddPseudo_UPD,
4609                                           ARM::VST3q32oddPseudo_UPD };
4610     SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4611     return;
4612   }
4613 
4614   case ARMISD::VST4_UPD: {
4615     if (Subtarget->hasNEON()) {
4616       static const uint16_t DOpcodes[] = {
4617           ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD, ARM::VST4d32Pseudo_UPD,
4618           ARM::VST1d64QPseudoWB_fixed};
4619       static const uint16_t QOpcodes0[] = {ARM::VST4q8Pseudo_UPD,
4620                                            ARM::VST4q16Pseudo_UPD,
4621                                            ARM::VST4q32Pseudo_UPD};
4622       static const uint16_t QOpcodes1[] = {ARM::VST4q8oddPseudo_UPD,
4623                                            ARM::VST4q16oddPseudo_UPD,
4624                                            ARM::VST4q32oddPseudo_UPD};
4625       SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4626       return;
4627     }
4628     break;
4629   }
4630 
4631   case ARMISD::VST1x2_UPD: {
4632     if (Subtarget->hasNEON()) {
4633       static const uint16_t DOpcodes[] = { ARM::VST1q8wb_fixed,
4634                                            ARM::VST1q16wb_fixed,
4635                                            ARM::VST1q32wb_fixed,
4636                                            ARM::VST1q64wb_fixed};
4637       static const uint16_t QOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4638                                            ARM::VST1d16QPseudoWB_fixed,
4639                                            ARM::VST1d32QPseudoWB_fixed,
4640                                            ARM::VST1d64QPseudoWB_fixed };
4641       SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4642       return;
4643     }
4644     break;
4645   }
4646 
4647   case ARMISD::VST1x3_UPD: {
4648     if (Subtarget->hasNEON()) {
4649       static const uint16_t DOpcodes[] = { ARM::VST1d8TPseudoWB_fixed,
4650                                            ARM::VST1d16TPseudoWB_fixed,
4651                                            ARM::VST1d32TPseudoWB_fixed,
4652                                            ARM::VST1d64TPseudoWB_fixed };
4653       static const uint16_t QOpcodes0[] = { ARM::VST1q8LowTPseudo_UPD,
4654                                             ARM::VST1q16LowTPseudo_UPD,
4655                                             ARM::VST1q32LowTPseudo_UPD,
4656                                             ARM::VST1q64LowTPseudo_UPD };
4657       static const uint16_t QOpcodes1[] = { ARM::VST1q8HighTPseudo_UPD,
4658                                             ARM::VST1q16HighTPseudo_UPD,
4659                                             ARM::VST1q32HighTPseudo_UPD,
4660                                             ARM::VST1q64HighTPseudo_UPD };
4661       SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4662       return;
4663     }
4664     break;
4665   }
4666 
4667   case ARMISD::VST1x4_UPD: {
4668     if (Subtarget->hasNEON()) {
4669       static const uint16_t DOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4670                                            ARM::VST1d16QPseudoWB_fixed,
4671                                            ARM::VST1d32QPseudoWB_fixed,
4672                                            ARM::VST1d64QPseudoWB_fixed };
4673       static const uint16_t QOpcodes0[] = { ARM::VST1q8LowQPseudo_UPD,
4674                                             ARM::VST1q16LowQPseudo_UPD,
4675                                             ARM::VST1q32LowQPseudo_UPD,
4676                                             ARM::VST1q64LowQPseudo_UPD };
4677       static const uint16_t QOpcodes1[] = { ARM::VST1q8HighQPseudo_UPD,
4678                                             ARM::VST1q16HighQPseudo_UPD,
4679                                             ARM::VST1q32HighQPseudo_UPD,
4680                                             ARM::VST1q64HighQPseudo_UPD };
4681       SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4682       return;
4683     }
4684     break;
4685   }
4686   case ARMISD::VST2LN_UPD: {
4687     static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
4688                                          ARM::VST2LNd16Pseudo_UPD,
4689                                          ARM::VST2LNd32Pseudo_UPD };
4690     static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
4691                                          ARM::VST2LNq32Pseudo_UPD };
4692     SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
4693     return;
4694   }
4695 
4696   case ARMISD::VST3LN_UPD: {
4697     static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
4698                                          ARM::VST3LNd16Pseudo_UPD,
4699                                          ARM::VST3LNd32Pseudo_UPD };
4700     static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
4701                                          ARM::VST3LNq32Pseudo_UPD };
4702     SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
4703     return;
4704   }
4705 
4706   case ARMISD::VST4LN_UPD: {
4707     static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
4708                                          ARM::VST4LNd16Pseudo_UPD,
4709                                          ARM::VST4LNd32Pseudo_UPD };
4710     static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
4711                                          ARM::VST4LNq32Pseudo_UPD };
4712     SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
4713     return;
4714   }
4715 
4716   case ISD::INTRINSIC_VOID:
4717   case ISD::INTRINSIC_W_CHAIN: {
4718     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
4719     switch (IntNo) {
4720     default:
4721       break;
4722 
4723     case Intrinsic::arm_mrrc:
4724     case Intrinsic::arm_mrrc2: {
4725       SDLoc dl(N);
4726       SDValue Chain = N->getOperand(0);
4727       unsigned Opc;
4728 
4729       if (Subtarget->isThumb())
4730         Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::t2MRRC : ARM::t2MRRC2);
4731       else
4732         Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::MRRC : ARM::MRRC2);
4733 
4734       SmallVector<SDValue, 5> Ops;
4735       Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(), dl)); /* coproc */
4736       Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(3))->getZExtValue(), dl)); /* opc */
4737       Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(4))->getZExtValue(), dl)); /* CRm */
4738 
4739       // The mrrc2 instruction in ARM doesn't allow predicates, the top 4 bits of the encoded
4740       // instruction will always be '1111' but it is possible in assembly language to specify
4741       // AL as a predicate to mrrc2 but it doesn't make any difference to the encoded instruction.
4742       if (Opc != ARM::MRRC2) {
4743         Ops.push_back(getAL(CurDAG, dl));
4744         Ops.push_back(CurDAG->getRegister(0, MVT::i32));
4745       }
4746 
4747       Ops.push_back(Chain);
4748 
4749       // Writes to two registers.
4750       const EVT RetType[] = {MVT::i32, MVT::i32, MVT::Other};
4751 
4752       ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, RetType, Ops));
4753       return;
4754     }
4755     case Intrinsic::arm_ldaexd:
4756     case Intrinsic::arm_ldrexd: {
4757       SDLoc dl(N);
4758       SDValue Chain = N->getOperand(0);
4759       SDValue MemAddr = N->getOperand(2);
4760       bool isThumb = Subtarget->isThumb() && Subtarget->hasV8MBaselineOps();
4761 
4762       bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
4763       unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
4764                                 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
4765 
4766       // arm_ldrexd returns a i64 value in {i32, i32}
4767       std::vector<EVT> ResTys;
4768       if (isThumb) {
4769         ResTys.push_back(MVT::i32);
4770         ResTys.push_back(MVT::i32);
4771       } else
4772         ResTys.push_back(MVT::Untyped);
4773       ResTys.push_back(MVT::Other);
4774 
4775       // Place arguments in the right order.
4776       SDValue Ops[] = {MemAddr, getAL(CurDAG, dl),
4777                        CurDAG->getRegister(0, MVT::i32), Chain};
4778       SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
4779       // Transfer memoperands.
4780       MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
4781       CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
4782 
4783       // Remap uses.
4784       SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
4785       if (!SDValue(N, 0).use_empty()) {
4786         SDValue Result;
4787         if (isThumb)
4788           Result = SDValue(Ld, 0);
4789         else {
4790           SDValue SubRegIdx =
4791             CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
4792           SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
4793               dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
4794           Result = SDValue(ResNode,0);
4795         }
4796         ReplaceUses(SDValue(N, 0), Result);
4797       }
4798       if (!SDValue(N, 1).use_empty()) {
4799         SDValue Result;
4800         if (isThumb)
4801           Result = SDValue(Ld, 1);
4802         else {
4803           SDValue SubRegIdx =
4804             CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
4805           SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
4806               dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
4807           Result = SDValue(ResNode,0);
4808         }
4809         ReplaceUses(SDValue(N, 1), Result);
4810       }
4811       ReplaceUses(SDValue(N, 2), OutChain);
4812       CurDAG->RemoveDeadNode(N);
4813       return;
4814     }
4815     case Intrinsic::arm_stlexd:
4816     case Intrinsic::arm_strexd: {
4817       SDLoc dl(N);
4818       SDValue Chain = N->getOperand(0);
4819       SDValue Val0 = N->getOperand(2);
4820       SDValue Val1 = N->getOperand(3);
4821       SDValue MemAddr = N->getOperand(4);
4822 
4823       // Store exclusive double return a i32 value which is the return status
4824       // of the issued store.
4825       const EVT ResTys[] = {MVT::i32, MVT::Other};
4826 
4827       bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
4828       // Place arguments in the right order.
4829       SmallVector<SDValue, 7> Ops;
4830       if (isThumb) {
4831         Ops.push_back(Val0);
4832         Ops.push_back(Val1);
4833       } else
4834         // arm_strexd uses GPRPair.
4835         Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
4836       Ops.push_back(MemAddr);
4837       Ops.push_back(getAL(CurDAG, dl));
4838       Ops.push_back(CurDAG->getRegister(0, MVT::i32));
4839       Ops.push_back(Chain);
4840 
4841       bool IsRelease = IntNo == Intrinsic::arm_stlexd;
4842       unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
4843                                 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
4844 
4845       SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
4846       // Transfer memoperands.
4847       MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
4848       CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
4849 
4850       ReplaceNode(N, St);
4851       return;
4852     }
4853 
4854     case Intrinsic::arm_neon_vld1: {
4855       static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
4856                                            ARM::VLD1d32, ARM::VLD1d64 };
4857       static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
4858                                            ARM::VLD1q32, ARM::VLD1q64};
4859       SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
4860       return;
4861     }
4862 
4863     case Intrinsic::arm_neon_vld1x2: {
4864       static const uint16_t DOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
4865                                            ARM::VLD1q32, ARM::VLD1q64 };
4866       static const uint16_t QOpcodes[] = { ARM::VLD1d8QPseudo,
4867                                            ARM::VLD1d16QPseudo,
4868                                            ARM::VLD1d32QPseudo,
4869                                            ARM::VLD1d64QPseudo };
4870       SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
4871       return;
4872     }
4873 
4874     case Intrinsic::arm_neon_vld1x3: {
4875       static const uint16_t DOpcodes[] = { ARM::VLD1d8TPseudo,
4876                                            ARM::VLD1d16TPseudo,
4877                                            ARM::VLD1d32TPseudo,
4878                                            ARM::VLD1d64TPseudo };
4879       static const uint16_t QOpcodes0[] = { ARM::VLD1q8LowTPseudo_UPD,
4880                                             ARM::VLD1q16LowTPseudo_UPD,
4881                                             ARM::VLD1q32LowTPseudo_UPD,
4882                                             ARM::VLD1q64LowTPseudo_UPD };
4883       static const uint16_t QOpcodes1[] = { ARM::VLD1q8HighTPseudo,
4884                                             ARM::VLD1q16HighTPseudo,
4885                                             ARM::VLD1q32HighTPseudo,
4886                                             ARM::VLD1q64HighTPseudo };
4887       SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
4888       return;
4889     }
4890 
4891     case Intrinsic::arm_neon_vld1x4: {
4892       static const uint16_t DOpcodes[] = { ARM::VLD1d8QPseudo,
4893                                            ARM::VLD1d16QPseudo,
4894                                            ARM::VLD1d32QPseudo,
4895                                            ARM::VLD1d64QPseudo };
4896       static const uint16_t QOpcodes0[] = { ARM::VLD1q8LowQPseudo_UPD,
4897                                             ARM::VLD1q16LowQPseudo_UPD,
4898                                             ARM::VLD1q32LowQPseudo_UPD,
4899                                             ARM::VLD1q64LowQPseudo_UPD };
4900       static const uint16_t QOpcodes1[] = { ARM::VLD1q8HighQPseudo,
4901                                             ARM::VLD1q16HighQPseudo,
4902                                             ARM::VLD1q32HighQPseudo,
4903                                             ARM::VLD1q64HighQPseudo };
4904       SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
4905       return;
4906     }
4907 
4908     case Intrinsic::arm_neon_vld2: {
4909       static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
4910                                            ARM::VLD2d32, ARM::VLD1q64 };
4911       static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
4912                                            ARM::VLD2q32Pseudo };
4913       SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
4914       return;
4915     }
4916 
4917     case Intrinsic::arm_neon_vld3: {
4918       static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
4919                                            ARM::VLD3d16Pseudo,
4920                                            ARM::VLD3d32Pseudo,
4921                                            ARM::VLD1d64TPseudo };
4922       static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
4923                                             ARM::VLD3q16Pseudo_UPD,
4924                                             ARM::VLD3q32Pseudo_UPD };
4925       static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
4926                                             ARM::VLD3q16oddPseudo,
4927                                             ARM::VLD3q32oddPseudo };
4928       SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
4929       return;
4930     }
4931 
4932     case Intrinsic::arm_neon_vld4: {
4933       static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
4934                                            ARM::VLD4d16Pseudo,
4935                                            ARM::VLD4d32Pseudo,
4936                                            ARM::VLD1d64QPseudo };
4937       static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
4938                                             ARM::VLD4q16Pseudo_UPD,
4939                                             ARM::VLD4q32Pseudo_UPD };
4940       static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
4941                                             ARM::VLD4q16oddPseudo,
4942                                             ARM::VLD4q32oddPseudo };
4943       SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
4944       return;
4945     }
4946 
4947     case Intrinsic::arm_neon_vld2dup: {
4948       static const uint16_t DOpcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
4949                                            ARM::VLD2DUPd32, ARM::VLD1q64 };
4950       static const uint16_t QOpcodes0[] = { ARM::VLD2DUPq8EvenPseudo,
4951                                             ARM::VLD2DUPq16EvenPseudo,
4952                                             ARM::VLD2DUPq32EvenPseudo };
4953       static const uint16_t QOpcodes1[] = { ARM::VLD2DUPq8OddPseudo,
4954                                             ARM::VLD2DUPq16OddPseudo,
4955                                             ARM::VLD2DUPq32OddPseudo };
4956       SelectVLDDup(N, /* IsIntrinsic= */ true, false, 2,
4957                    DOpcodes, QOpcodes0, QOpcodes1);
4958       return;
4959     }
4960 
4961     case Intrinsic::arm_neon_vld3dup: {
4962       static const uint16_t DOpcodes[] = { ARM::VLD3DUPd8Pseudo,
4963                                            ARM::VLD3DUPd16Pseudo,
4964                                            ARM::VLD3DUPd32Pseudo,
4965                                            ARM::VLD1d64TPseudo };
4966       static const uint16_t QOpcodes0[] = { ARM::VLD3DUPq8EvenPseudo,
4967                                             ARM::VLD3DUPq16EvenPseudo,
4968                                             ARM::VLD3DUPq32EvenPseudo };
4969       static const uint16_t QOpcodes1[] = { ARM::VLD3DUPq8OddPseudo,
4970                                             ARM::VLD3DUPq16OddPseudo,
4971                                             ARM::VLD3DUPq32OddPseudo };
4972       SelectVLDDup(N, /* IsIntrinsic= */ true, false, 3,
4973                    DOpcodes, QOpcodes0, QOpcodes1);
4974       return;
4975     }
4976 
4977     case Intrinsic::arm_neon_vld4dup: {
4978       static const uint16_t DOpcodes[] = { ARM::VLD4DUPd8Pseudo,
4979                                            ARM::VLD4DUPd16Pseudo,
4980                                            ARM::VLD4DUPd32Pseudo,
4981                                            ARM::VLD1d64QPseudo };
4982       static const uint16_t QOpcodes0[] = { ARM::VLD4DUPq8EvenPseudo,
4983                                             ARM::VLD4DUPq16EvenPseudo,
4984                                             ARM::VLD4DUPq32EvenPseudo };
4985       static const uint16_t QOpcodes1[] = { ARM::VLD4DUPq8OddPseudo,
4986                                             ARM::VLD4DUPq16OddPseudo,
4987                                             ARM::VLD4DUPq32OddPseudo };
4988       SelectVLDDup(N, /* IsIntrinsic= */ true, false, 4,
4989                    DOpcodes, QOpcodes0, QOpcodes1);
4990       return;
4991     }
4992 
4993     case Intrinsic::arm_neon_vld2lane: {
4994       static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
4995                                            ARM::VLD2LNd16Pseudo,
4996                                            ARM::VLD2LNd32Pseudo };
4997       static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
4998                                            ARM::VLD2LNq32Pseudo };
4999       SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
5000       return;
5001     }
5002 
5003     case Intrinsic::arm_neon_vld3lane: {
5004       static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
5005                                            ARM::VLD3LNd16Pseudo,
5006                                            ARM::VLD3LNd32Pseudo };
5007       static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
5008                                            ARM::VLD3LNq32Pseudo };
5009       SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
5010       return;
5011     }
5012 
5013     case Intrinsic::arm_neon_vld4lane: {
5014       static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
5015                                            ARM::VLD4LNd16Pseudo,
5016                                            ARM::VLD4LNd32Pseudo };
5017       static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
5018                                            ARM::VLD4LNq32Pseudo };
5019       SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
5020       return;
5021     }
5022 
5023     case Intrinsic::arm_neon_vst1: {
5024       static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
5025                                            ARM::VST1d32, ARM::VST1d64 };
5026       static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
5027                                            ARM::VST1q32, ARM::VST1q64 };
5028       SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
5029       return;
5030     }
5031 
5032     case Intrinsic::arm_neon_vst1x2: {
5033       static const uint16_t DOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
5034                                            ARM::VST1q32, ARM::VST1q64 };
5035       static const uint16_t QOpcodes[] = { ARM::VST1d8QPseudo,
5036                                            ARM::VST1d16QPseudo,
5037                                            ARM::VST1d32QPseudo,
5038                                            ARM::VST1d64QPseudo };
5039       SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
5040       return;
5041     }
5042 
5043     case Intrinsic::arm_neon_vst1x3: {
5044       static const uint16_t DOpcodes[] = { ARM::VST1d8TPseudo,
5045                                            ARM::VST1d16TPseudo,
5046                                            ARM::VST1d32TPseudo,
5047                                            ARM::VST1d64TPseudo };
5048       static const uint16_t QOpcodes0[] = { ARM::VST1q8LowTPseudo_UPD,
5049                                             ARM::VST1q16LowTPseudo_UPD,
5050                                             ARM::VST1q32LowTPseudo_UPD,
5051                                             ARM::VST1q64LowTPseudo_UPD };
5052       static const uint16_t QOpcodes1[] = { ARM::VST1q8HighTPseudo,
5053                                             ARM::VST1q16HighTPseudo,
5054                                             ARM::VST1q32HighTPseudo,
5055                                             ARM::VST1q64HighTPseudo };
5056       SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
5057       return;
5058     }
5059 
5060     case Intrinsic::arm_neon_vst1x4: {
5061       static const uint16_t DOpcodes[] = { ARM::VST1d8QPseudo,
5062                                            ARM::VST1d16QPseudo,
5063                                            ARM::VST1d32QPseudo,
5064                                            ARM::VST1d64QPseudo };
5065       static const uint16_t QOpcodes0[] = { ARM::VST1q8LowQPseudo_UPD,
5066                                             ARM::VST1q16LowQPseudo_UPD,
5067                                             ARM::VST1q32LowQPseudo_UPD,
5068                                             ARM::VST1q64LowQPseudo_UPD };
5069       static const uint16_t QOpcodes1[] = { ARM::VST1q8HighQPseudo,
5070                                             ARM::VST1q16HighQPseudo,
5071                                             ARM::VST1q32HighQPseudo,
5072                                             ARM::VST1q64HighQPseudo };
5073       SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
5074       return;
5075     }
5076 
5077     case Intrinsic::arm_neon_vst2: {
5078       static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
5079                                            ARM::VST2d32, ARM::VST1q64 };
5080       static const uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
5081                                            ARM::VST2q32Pseudo };
5082       SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
5083       return;
5084     }
5085 
5086     case Intrinsic::arm_neon_vst3: {
5087       static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
5088                                            ARM::VST3d16Pseudo,
5089                                            ARM::VST3d32Pseudo,
5090                                            ARM::VST1d64TPseudo };
5091       static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
5092                                             ARM::VST3q16Pseudo_UPD,
5093                                             ARM::VST3q32Pseudo_UPD };
5094       static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
5095                                             ARM::VST3q16oddPseudo,
5096                                             ARM::VST3q32oddPseudo };
5097       SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
5098       return;
5099     }
5100 
5101     case Intrinsic::arm_neon_vst4: {
5102       static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
5103                                            ARM::VST4d16Pseudo,
5104                                            ARM::VST4d32Pseudo,
5105                                            ARM::VST1d64QPseudo };
5106       static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
5107                                             ARM::VST4q16Pseudo_UPD,
5108                                             ARM::VST4q32Pseudo_UPD };
5109       static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
5110                                             ARM::VST4q16oddPseudo,
5111                                             ARM::VST4q32oddPseudo };
5112       SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
5113       return;
5114     }
5115 
5116     case Intrinsic::arm_neon_vst2lane: {
5117       static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
5118                                            ARM::VST2LNd16Pseudo,
5119                                            ARM::VST2LNd32Pseudo };
5120       static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
5121                                            ARM::VST2LNq32Pseudo };
5122       SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
5123       return;
5124     }
5125 
5126     case Intrinsic::arm_neon_vst3lane: {
5127       static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
5128                                            ARM::VST3LNd16Pseudo,
5129                                            ARM::VST3LNd32Pseudo };
5130       static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
5131                                            ARM::VST3LNq32Pseudo };
5132       SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
5133       return;
5134     }
5135 
5136     case Intrinsic::arm_neon_vst4lane: {
5137       static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
5138                                            ARM::VST4LNd16Pseudo,
5139                                            ARM::VST4LNd32Pseudo };
5140       static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
5141                                            ARM::VST4LNq32Pseudo };
5142       SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
5143       return;
5144     }
5145 
5146     case Intrinsic::arm_mve_vldr_gather_base_wb:
5147     case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: {
5148       static const uint16_t Opcodes[] = {ARM::MVE_VLDRWU32_qi_pre,
5149                                          ARM::MVE_VLDRDU64_qi_pre};
5150       SelectMVE_WB(N, Opcodes,
5151                    IntNo == Intrinsic::arm_mve_vldr_gather_base_wb_predicated);
5152       return;
5153     }
5154 
5155     case Intrinsic::arm_mve_vld2q: {
5156       static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8, ARM::MVE_VLD21_8};
5157       static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16,
5158                                            ARM::MVE_VLD21_16};
5159       static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32,
5160                                            ARM::MVE_VLD21_32};
5161       static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
5162       SelectMVE_VLD(N, 2, Opcodes, false);
5163       return;
5164     }
5165 
5166     case Intrinsic::arm_mve_vld4q: {
5167       static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8,
5168                                           ARM::MVE_VLD42_8, ARM::MVE_VLD43_8};
5169       static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16,
5170                                            ARM::MVE_VLD42_16,
5171                                            ARM::MVE_VLD43_16};
5172       static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32,
5173                                            ARM::MVE_VLD42_32,
5174                                            ARM::MVE_VLD43_32};
5175       static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
5176       SelectMVE_VLD(N, 4, Opcodes, false);
5177       return;
5178     }
5179     }
5180     break;
5181   }
5182 
5183   case ISD::INTRINSIC_WO_CHAIN: {
5184     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5185     switch (IntNo) {
5186     default:
5187       break;
5188 
5189     // Scalar f32 -> bf16
5190     case Intrinsic::arm_neon_vcvtbfp2bf: {
5191       SDLoc dl(N);
5192       const SDValue &Src = N->getOperand(1);
5193       llvm::EVT DestTy = N->getValueType(0);
5194       SDValue Pred = getAL(CurDAG, dl);
5195       SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
5196       SDValue Ops[] = { Src, Src, Pred, Reg0 };
5197       CurDAG->SelectNodeTo(N, ARM::BF16_VCVTB, DestTy, Ops);
5198       return;
5199     }
5200 
5201     // Vector v4f32 -> v4bf16
5202     case Intrinsic::arm_neon_vcvtfp2bf: {
5203       SDLoc dl(N);
5204       const SDValue &Src = N->getOperand(1);
5205       SDValue Pred = getAL(CurDAG, dl);
5206       SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
5207       SDValue Ops[] = { Src, Pred, Reg0 };
5208       CurDAG->SelectNodeTo(N, ARM::BF16_VCVT, MVT::v4bf16, Ops);
5209       return;
5210     }
5211 
5212     case Intrinsic::arm_mve_urshrl:
5213       SelectMVE_LongShift(N, ARM::MVE_URSHRL, true, false);
5214       return;
5215     case Intrinsic::arm_mve_uqshll:
5216       SelectMVE_LongShift(N, ARM::MVE_UQSHLL, true, false);
5217       return;
5218     case Intrinsic::arm_mve_srshrl:
5219       SelectMVE_LongShift(N, ARM::MVE_SRSHRL, true, false);
5220       return;
5221     case Intrinsic::arm_mve_sqshll:
5222       SelectMVE_LongShift(N, ARM::MVE_SQSHLL, true, false);
5223       return;
5224     case Intrinsic::arm_mve_uqrshll:
5225       SelectMVE_LongShift(N, ARM::MVE_UQRSHLL, false, true);
5226       return;
5227     case Intrinsic::arm_mve_sqrshrl:
5228       SelectMVE_LongShift(N, ARM::MVE_SQRSHRL, false, true);
5229       return;
5230 
5231     case Intrinsic::arm_mve_vadc:
5232     case Intrinsic::arm_mve_vadc_predicated:
5233       SelectMVE_VADCSBC(N, ARM::MVE_VADC, ARM::MVE_VADCI, true,
5234                         IntNo == Intrinsic::arm_mve_vadc_predicated);
5235       return;
5236     case Intrinsic::arm_mve_vsbc:
5237     case Intrinsic::arm_mve_vsbc_predicated:
5238       SelectMVE_VADCSBC(N, ARM::MVE_VSBC, ARM::MVE_VSBCI, true,
5239                         IntNo == Intrinsic::arm_mve_vsbc_predicated);
5240       return;
5241     case Intrinsic::arm_mve_vshlc:
5242     case Intrinsic::arm_mve_vshlc_predicated:
5243       SelectMVE_VSHLC(N, IntNo == Intrinsic::arm_mve_vshlc_predicated);
5244       return;
5245 
5246     case Intrinsic::arm_mve_vmlldava:
5247     case Intrinsic::arm_mve_vmlldava_predicated: {
5248       static const uint16_t OpcodesU[] = {
5249           ARM::MVE_VMLALDAVu16,   ARM::MVE_VMLALDAVu32,
5250           ARM::MVE_VMLALDAVau16,  ARM::MVE_VMLALDAVau32,
5251       };
5252       static const uint16_t OpcodesS[] = {
5253           ARM::MVE_VMLALDAVs16,   ARM::MVE_VMLALDAVs32,
5254           ARM::MVE_VMLALDAVas16,  ARM::MVE_VMLALDAVas32,
5255           ARM::MVE_VMLALDAVxs16,  ARM::MVE_VMLALDAVxs32,
5256           ARM::MVE_VMLALDAVaxs16, ARM::MVE_VMLALDAVaxs32,
5257           ARM::MVE_VMLSLDAVs16,   ARM::MVE_VMLSLDAVs32,
5258           ARM::MVE_VMLSLDAVas16,  ARM::MVE_VMLSLDAVas32,
5259           ARM::MVE_VMLSLDAVxs16,  ARM::MVE_VMLSLDAVxs32,
5260           ARM::MVE_VMLSLDAVaxs16, ARM::MVE_VMLSLDAVaxs32,
5261       };
5262       SelectMVE_VMLLDAV(N, IntNo == Intrinsic::arm_mve_vmlldava_predicated,
5263                         OpcodesS, OpcodesU);
5264       return;
5265     }
5266 
5267     case Intrinsic::arm_mve_vrmlldavha:
5268     case Intrinsic::arm_mve_vrmlldavha_predicated: {
5269       static const uint16_t OpcodesU[] = {
5270           ARM::MVE_VRMLALDAVHu32,  ARM::MVE_VRMLALDAVHau32,
5271       };
5272       static const uint16_t OpcodesS[] = {
5273           ARM::MVE_VRMLALDAVHs32,  ARM::MVE_VRMLALDAVHas32,
5274           ARM::MVE_VRMLALDAVHxs32, ARM::MVE_VRMLALDAVHaxs32,
5275           ARM::MVE_VRMLSLDAVHs32,  ARM::MVE_VRMLSLDAVHas32,
5276           ARM::MVE_VRMLSLDAVHxs32, ARM::MVE_VRMLSLDAVHaxs32,
5277       };
5278       SelectMVE_VRMLLDAVH(N, IntNo == Intrinsic::arm_mve_vrmlldavha_predicated,
5279                           OpcodesS, OpcodesU);
5280       return;
5281     }
5282 
5283     case Intrinsic::arm_mve_vidup:
5284     case Intrinsic::arm_mve_vidup_predicated: {
5285       static const uint16_t Opcodes[] = {
5286           ARM::MVE_VIDUPu8, ARM::MVE_VIDUPu16, ARM::MVE_VIDUPu32,
5287       };
5288       SelectMVE_VxDUP(N, Opcodes, false,
5289                       IntNo == Intrinsic::arm_mve_vidup_predicated);
5290       return;
5291     }
5292 
5293     case Intrinsic::arm_mve_vddup:
5294     case Intrinsic::arm_mve_vddup_predicated: {
5295       static const uint16_t Opcodes[] = {
5296           ARM::MVE_VDDUPu8, ARM::MVE_VDDUPu16, ARM::MVE_VDDUPu32,
5297       };
5298       SelectMVE_VxDUP(N, Opcodes, false,
5299                       IntNo == Intrinsic::arm_mve_vddup_predicated);
5300       return;
5301     }
5302 
5303     case Intrinsic::arm_mve_viwdup:
5304     case Intrinsic::arm_mve_viwdup_predicated: {
5305       static const uint16_t Opcodes[] = {
5306           ARM::MVE_VIWDUPu8, ARM::MVE_VIWDUPu16, ARM::MVE_VIWDUPu32,
5307       };
5308       SelectMVE_VxDUP(N, Opcodes, true,
5309                       IntNo == Intrinsic::arm_mve_viwdup_predicated);
5310       return;
5311     }
5312 
5313     case Intrinsic::arm_mve_vdwdup:
5314     case Intrinsic::arm_mve_vdwdup_predicated: {
5315       static const uint16_t Opcodes[] = {
5316           ARM::MVE_VDWDUPu8, ARM::MVE_VDWDUPu16, ARM::MVE_VDWDUPu32,
5317       };
5318       SelectMVE_VxDUP(N, Opcodes, true,
5319                       IntNo == Intrinsic::arm_mve_vdwdup_predicated);
5320       return;
5321     }
5322 
5323     case Intrinsic::arm_cde_cx1d:
5324     case Intrinsic::arm_cde_cx1da:
5325     case Intrinsic::arm_cde_cx2d:
5326     case Intrinsic::arm_cde_cx2da:
5327     case Intrinsic::arm_cde_cx3d:
5328     case Intrinsic::arm_cde_cx3da: {
5329       bool HasAccum = IntNo == Intrinsic::arm_cde_cx1da ||
5330                       IntNo == Intrinsic::arm_cde_cx2da ||
5331                       IntNo == Intrinsic::arm_cde_cx3da;
5332       size_t NumExtraOps;
5333       uint16_t Opcode;
5334       switch (IntNo) {
5335       case Intrinsic::arm_cde_cx1d:
5336       case Intrinsic::arm_cde_cx1da:
5337         NumExtraOps = 0;
5338         Opcode = HasAccum ? ARM::CDE_CX1DA : ARM::CDE_CX1D;
5339         break;
5340       case Intrinsic::arm_cde_cx2d:
5341       case Intrinsic::arm_cde_cx2da:
5342         NumExtraOps = 1;
5343         Opcode = HasAccum ? ARM::CDE_CX2DA : ARM::CDE_CX2D;
5344         break;
5345       case Intrinsic::arm_cde_cx3d:
5346       case Intrinsic::arm_cde_cx3da:
5347         NumExtraOps = 2;
5348         Opcode = HasAccum ? ARM::CDE_CX3DA : ARM::CDE_CX3D;
5349         break;
5350       default:
5351         llvm_unreachable("Unexpected opcode");
5352       }
5353       SelectCDE_CXxD(N, Opcode, NumExtraOps, HasAccum);
5354       return;
5355     }
5356     }
5357     break;
5358   }
5359 
5360   case ISD::ATOMIC_CMP_SWAP:
5361     SelectCMP_SWAP(N);
5362     return;
5363   }
5364 
5365   SelectCode(N);
5366 }
5367 
5368 // Inspect a register string of the form
5369 // cp<coprocessor>:<opc1>:c<CRn>:c<CRm>:<opc2> (32bit) or
5370 // cp<coprocessor>:<opc1>:c<CRm> (64bit) inspect the fields of the string
5371 // and obtain the integer operands from them, adding these operands to the
5372 // provided vector.
5373 static void getIntOperandsFromRegisterString(StringRef RegString,
5374                                              SelectionDAG *CurDAG,
5375                                              const SDLoc &DL,
5376                                              std::vector<SDValue> &Ops) {
5377   SmallVector<StringRef, 5> Fields;
5378   RegString.split(Fields, ':');
5379 
5380   if (Fields.size() > 1) {
5381     bool AllIntFields = true;
5382 
5383     for (StringRef Field : Fields) {
5384       // Need to trim out leading 'cp' characters and get the integer field.
5385       unsigned IntField;
5386       AllIntFields &= !Field.trim("CPcp").getAsInteger(10, IntField);
5387       Ops.push_back(CurDAG->getTargetConstant(IntField, DL, MVT::i32));
5388     }
5389 
5390     assert(AllIntFields &&
5391             "Unexpected non-integer value in special register string.");
5392     (void)AllIntFields;
5393   }
5394 }
5395 
5396 // Maps a Banked Register string to its mask value. The mask value returned is
5397 // for use in the MRSbanked / MSRbanked instruction nodes as the Banked Register
5398 // mask operand, which expresses which register is to be used, e.g. r8, and in
5399 // which mode it is to be used, e.g. usr. Returns -1 to signify that the string
5400 // was invalid.
5401 static inline int getBankedRegisterMask(StringRef RegString) {
5402   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegString.lower());
5403   if (!TheReg)
5404      return -1;
5405   return TheReg->Encoding;
5406 }
5407 
5408 // The flags here are common to those allowed for apsr in the A class cores and
5409 // those allowed for the special registers in the M class cores. Returns a
5410 // value representing which flags were present, -1 if invalid.
5411 static inline int getMClassFlagsMask(StringRef Flags) {
5412   return StringSwitch<int>(Flags)
5413           .Case("", 0x2) // no flags means nzcvq for psr registers, and 0x2 is
5414                          // correct when flags are not permitted
5415           .Case("g", 0x1)
5416           .Case("nzcvq", 0x2)
5417           .Case("nzcvqg", 0x3)
5418           .Default(-1);
5419 }
5420 
5421 // Maps MClass special registers string to its value for use in the
5422 // t2MRS_M/t2MSR_M instruction nodes as the SYSm value operand.
5423 // Returns -1 to signify that the string was invalid.
5424 static int getMClassRegisterMask(StringRef Reg, const ARMSubtarget *Subtarget) {
5425   auto TheReg = ARMSysReg::lookupMClassSysRegByName(Reg);
5426   const FeatureBitset &FeatureBits = Subtarget->getFeatureBits();
5427   if (!TheReg || !TheReg->hasRequiredFeatures(FeatureBits))
5428     return -1;
5429   return (int)(TheReg->Encoding & 0xFFF); // SYSm value
5430 }
5431 
5432 static int getARClassRegisterMask(StringRef Reg, StringRef Flags) {
5433   // The mask operand contains the special register (R Bit) in bit 4, whether
5434   // the register is spsr (R bit is 1) or one of cpsr/apsr (R bit is 0), and
5435   // bits 3-0 contains the fields to be accessed in the special register, set by
5436   // the flags provided with the register.
5437   int Mask = 0;
5438   if (Reg == "apsr") {
5439     // The flags permitted for apsr are the same flags that are allowed in
5440     // M class registers. We get the flag value and then shift the flags into
5441     // the correct place to combine with the mask.
5442     Mask = getMClassFlagsMask(Flags);
5443     if (Mask == -1)
5444       return -1;
5445     return Mask << 2;
5446   }
5447 
5448   if (Reg != "cpsr" && Reg != "spsr") {
5449     return -1;
5450   }
5451 
5452   // This is the same as if the flags were "fc"
5453   if (Flags.empty() || Flags == "all")
5454     return Mask | 0x9;
5455 
5456   // Inspect the supplied flags string and set the bits in the mask for
5457   // the relevant and valid flags allowed for cpsr and spsr.
5458   for (char Flag : Flags) {
5459     int FlagVal;
5460     switch (Flag) {
5461       case 'c':
5462         FlagVal = 0x1;
5463         break;
5464       case 'x':
5465         FlagVal = 0x2;
5466         break;
5467       case 's':
5468         FlagVal = 0x4;
5469         break;
5470       case 'f':
5471         FlagVal = 0x8;
5472         break;
5473       default:
5474         FlagVal = 0;
5475     }
5476 
5477     // This avoids allowing strings where the same flag bit appears twice.
5478     if (!FlagVal || (Mask & FlagVal))
5479       return -1;
5480     Mask |= FlagVal;
5481   }
5482 
5483   // If the register is spsr then we need to set the R bit.
5484   if (Reg == "spsr")
5485     Mask |= 0x10;
5486 
5487   return Mask;
5488 }
5489 
5490 // Lower the read_register intrinsic to ARM specific DAG nodes
5491 // using the supplied metadata string to select the instruction node to use
5492 // and the registers/masks to construct as operands for the node.
5493 bool ARMDAGToDAGISel::tryReadRegister(SDNode *N){
5494   const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
5495   const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
5496   bool IsThumb2 = Subtarget->isThumb2();
5497   SDLoc DL(N);
5498 
5499   std::vector<SDValue> Ops;
5500   getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
5501 
5502   if (!Ops.empty()) {
5503     // If the special register string was constructed of fields (as defined
5504     // in the ACLE) then need to lower to MRC node (32 bit) or
5505     // MRRC node(64 bit), we can make the distinction based on the number of
5506     // operands we have.
5507     unsigned Opcode;
5508     SmallVector<EVT, 3> ResTypes;
5509     if (Ops.size() == 5){
5510       Opcode = IsThumb2 ? ARM::t2MRC : ARM::MRC;
5511       ResTypes.append({ MVT::i32, MVT::Other });
5512     } else {
5513       assert(Ops.size() == 3 &&
5514               "Invalid number of fields in special register string.");
5515       Opcode = IsThumb2 ? ARM::t2MRRC : ARM::MRRC;
5516       ResTypes.append({ MVT::i32, MVT::i32, MVT::Other });
5517     }
5518 
5519     Ops.push_back(getAL(CurDAG, DL));
5520     Ops.push_back(CurDAG->getRegister(0, MVT::i32));
5521     Ops.push_back(N->getOperand(0));
5522     ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, ResTypes, Ops));
5523     return true;
5524   }
5525 
5526   std::string SpecialReg = RegString->getString().lower();
5527 
5528   int BankedReg = getBankedRegisterMask(SpecialReg);
5529   if (BankedReg != -1) {
5530     Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32),
5531             getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5532             N->getOperand(0) };
5533     ReplaceNode(
5534         N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSbanked : ARM::MRSbanked,
5535                                   DL, MVT::i32, MVT::Other, Ops));
5536     return true;
5537   }
5538 
5539   // The VFP registers are read by creating SelectionDAG nodes with opcodes
5540   // corresponding to the register that is being read from. So we switch on the
5541   // string to find which opcode we need to use.
5542   unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
5543                     .Case("fpscr", ARM::VMRS)
5544                     .Case("fpexc", ARM::VMRS_FPEXC)
5545                     .Case("fpsid", ARM::VMRS_FPSID)
5546                     .Case("mvfr0", ARM::VMRS_MVFR0)
5547                     .Case("mvfr1", ARM::VMRS_MVFR1)
5548                     .Case("mvfr2", ARM::VMRS_MVFR2)
5549                     .Case("fpinst", ARM::VMRS_FPINST)
5550                     .Case("fpinst2", ARM::VMRS_FPINST2)
5551                     .Default(0);
5552 
5553   // If an opcode was found then we can lower the read to a VFP instruction.
5554   if (Opcode) {
5555     if (!Subtarget->hasVFP2Base())
5556       return false;
5557     if (Opcode == ARM::VMRS_MVFR2 && !Subtarget->hasFPARMv8Base())
5558       return false;
5559 
5560     Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5561             N->getOperand(0) };
5562     ReplaceNode(N,
5563                 CurDAG->getMachineNode(Opcode, DL, MVT::i32, MVT::Other, Ops));
5564     return true;
5565   }
5566 
5567   // If the target is M Class then need to validate that the register string
5568   // is an acceptable value, so check that a mask can be constructed from the
5569   // string.
5570   if (Subtarget->isMClass()) {
5571     int SYSmValue = getMClassRegisterMask(SpecialReg, Subtarget);
5572     if (SYSmValue == -1)
5573       return false;
5574 
5575     SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
5576                       getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5577                       N->getOperand(0) };
5578     ReplaceNode(
5579         N, CurDAG->getMachineNode(ARM::t2MRS_M, DL, MVT::i32, MVT::Other, Ops));
5580     return true;
5581   }
5582 
5583   // Here we know the target is not M Class so we need to check if it is one
5584   // of the remaining possible values which are apsr, cpsr or spsr.
5585   if (SpecialReg == "apsr" || SpecialReg == "cpsr") {
5586     Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5587             N->getOperand(0) };
5588     ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRS_AR : ARM::MRS,
5589                                           DL, MVT::i32, MVT::Other, Ops));
5590     return true;
5591   }
5592 
5593   if (SpecialReg == "spsr") {
5594     Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5595             N->getOperand(0) };
5596     ReplaceNode(
5597         N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSsys_AR : ARM::MRSsys, DL,
5598                                   MVT::i32, MVT::Other, Ops));
5599     return true;
5600   }
5601 
5602   return false;
5603 }
5604 
5605 // Lower the write_register intrinsic to ARM specific DAG nodes
5606 // using the supplied metadata string to select the instruction node to use
5607 // and the registers/masks to use in the nodes
5608 bool ARMDAGToDAGISel::tryWriteRegister(SDNode *N){
5609   const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
5610   const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
5611   bool IsThumb2 = Subtarget->isThumb2();
5612   SDLoc DL(N);
5613 
5614   std::vector<SDValue> Ops;
5615   getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
5616 
5617   if (!Ops.empty()) {
5618     // If the special register string was constructed of fields (as defined
5619     // in the ACLE) then need to lower to MCR node (32 bit) or
5620     // MCRR node(64 bit), we can make the distinction based on the number of
5621     // operands we have.
5622     unsigned Opcode;
5623     if (Ops.size() == 5) {
5624       Opcode = IsThumb2 ? ARM::t2MCR : ARM::MCR;
5625       Ops.insert(Ops.begin()+2, N->getOperand(2));
5626     } else {
5627       assert(Ops.size() == 3 &&
5628               "Invalid number of fields in special register string.");
5629       Opcode = IsThumb2 ? ARM::t2MCRR : ARM::MCRR;
5630       SDValue WriteValue[] = { N->getOperand(2), N->getOperand(3) };
5631       Ops.insert(Ops.begin()+2, WriteValue, WriteValue+2);
5632     }
5633 
5634     Ops.push_back(getAL(CurDAG, DL));
5635     Ops.push_back(CurDAG->getRegister(0, MVT::i32));
5636     Ops.push_back(N->getOperand(0));
5637 
5638     ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
5639     return true;
5640   }
5641 
5642   std::string SpecialReg = RegString->getString().lower();
5643   int BankedReg = getBankedRegisterMask(SpecialReg);
5644   if (BankedReg != -1) {
5645     Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32), N->getOperand(2),
5646             getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5647             N->getOperand(0) };
5648     ReplaceNode(
5649         N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSRbanked : ARM::MSRbanked,
5650                                   DL, MVT::Other, Ops));
5651     return true;
5652   }
5653 
5654   // The VFP registers are written to by creating SelectionDAG nodes with
5655   // opcodes corresponding to the register that is being written. So we switch
5656   // on the string to find which opcode we need to use.
5657   unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
5658                     .Case("fpscr", ARM::VMSR)
5659                     .Case("fpexc", ARM::VMSR_FPEXC)
5660                     .Case("fpsid", ARM::VMSR_FPSID)
5661                     .Case("fpinst", ARM::VMSR_FPINST)
5662                     .Case("fpinst2", ARM::VMSR_FPINST2)
5663                     .Default(0);
5664 
5665   if (Opcode) {
5666     if (!Subtarget->hasVFP2Base())
5667       return false;
5668     Ops = { N->getOperand(2), getAL(CurDAG, DL),
5669             CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
5670     ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
5671     return true;
5672   }
5673 
5674   std::pair<StringRef, StringRef> Fields;
5675   Fields = StringRef(SpecialReg).rsplit('_');
5676   std::string Reg = Fields.first.str();
5677   StringRef Flags = Fields.second;
5678 
5679   // If the target was M Class then need to validate the special register value
5680   // and retrieve the mask for use in the instruction node.
5681   if (Subtarget->isMClass()) {
5682     int SYSmValue = getMClassRegisterMask(SpecialReg, Subtarget);
5683     if (SYSmValue == -1)
5684       return false;
5685 
5686     SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
5687                       N->getOperand(2), getAL(CurDAG, DL),
5688                       CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
5689     ReplaceNode(N, CurDAG->getMachineNode(ARM::t2MSR_M, DL, MVT::Other, Ops));
5690     return true;
5691   }
5692 
5693   // We then check to see if a valid mask can be constructed for one of the
5694   // register string values permitted for the A and R class cores. These values
5695   // are apsr, spsr and cpsr; these are also valid on older cores.
5696   int Mask = getARClassRegisterMask(Reg, Flags);
5697   if (Mask != -1) {
5698     Ops = { CurDAG->getTargetConstant(Mask, DL, MVT::i32), N->getOperand(2),
5699             getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5700             N->getOperand(0) };
5701     ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSR_AR : ARM::MSR,
5702                                           DL, MVT::Other, Ops));
5703     return true;
5704   }
5705 
5706   return false;
5707 }
5708 
5709 bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
5710   std::vector<SDValue> AsmNodeOperands;
5711   InlineAsm::Flag Flag;
5712   bool Changed = false;
5713   unsigned NumOps = N->getNumOperands();
5714 
5715   // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
5716   // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
5717   // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
5718   // respectively. Since there is no constraint to explicitly specify a
5719   // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
5720   // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
5721   // them into a GPRPair.
5722 
5723   SDLoc dl(N);
5724   SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps - 1) : SDValue();
5725 
5726   SmallVector<bool, 8> OpChanged;
5727   // Glue node will be appended late.
5728   for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
5729     SDValue op = N->getOperand(i);
5730     AsmNodeOperands.push_back(op);
5731 
5732     if (i < InlineAsm::Op_FirstOperand)
5733       continue;
5734 
5735     if (const auto *C = dyn_cast<ConstantSDNode>(N->getOperand(i)))
5736       Flag = InlineAsm::Flag(C->getZExtValue());
5737     else
5738       continue;
5739 
5740     // Immediate operands to inline asm in the SelectionDAG are modeled with
5741     // two operands. The first is a constant of value InlineAsm::Kind::Imm, and
5742     // the second is a constant with the value of the immediate. If we get here
5743     // and we have a Kind::Imm, skip the next operand, and continue.
5744     if (Flag.isImmKind()) {
5745       SDValue op = N->getOperand(++i);
5746       AsmNodeOperands.push_back(op);
5747       continue;
5748     }
5749 
5750     const unsigned NumRegs = Flag.getNumOperandRegisters();
5751     if (NumRegs)
5752       OpChanged.push_back(false);
5753 
5754     unsigned DefIdx = 0;
5755     bool IsTiedToChangedOp = false;
5756     // If it's a use that is tied with a previous def, it has no
5757     // reg class constraint.
5758     if (Changed && Flag.isUseOperandTiedToDef(DefIdx))
5759       IsTiedToChangedOp = OpChanged[DefIdx];
5760 
5761     // Memory operands to inline asm in the SelectionDAG are modeled with two
5762     // operands: a constant of value InlineAsm::Kind::Mem followed by the input
5763     // operand. If we get here and we have a Kind::Mem, skip the next operand
5764     // (so it doesn't get misinterpreted), and continue. We do this here because
5765     // it's important to update the OpChanged array correctly before moving on.
5766     if (Flag.isMemKind()) {
5767       SDValue op = N->getOperand(++i);
5768       AsmNodeOperands.push_back(op);
5769       continue;
5770     }
5771 
5772     if (!Flag.isRegUseKind() && !Flag.isRegDefKind() &&
5773         !Flag.isRegDefEarlyClobberKind())
5774       continue;
5775 
5776     unsigned RC;
5777     const bool HasRC = Flag.hasRegClassConstraint(RC);
5778     if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
5779         || NumRegs != 2)
5780       continue;
5781 
5782     assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
5783     SDValue V0 = N->getOperand(i+1);
5784     SDValue V1 = N->getOperand(i+2);
5785     Register Reg0 = cast<RegisterSDNode>(V0)->getReg();
5786     Register Reg1 = cast<RegisterSDNode>(V1)->getReg();
5787     SDValue PairedReg;
5788     MachineRegisterInfo &MRI = MF->getRegInfo();
5789 
5790     if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
5791       // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
5792       // the original GPRs.
5793 
5794       Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
5795       PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
5796       SDValue Chain = SDValue(N,0);
5797 
5798       SDNode *GU = N->getGluedUser();
5799       SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
5800                                                Chain.getValue(1));
5801 
5802       // Extract values from a GPRPair reg and copy to the original GPR reg.
5803       SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
5804                                                     RegCopy);
5805       SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
5806                                                     RegCopy);
5807       SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
5808                                         RegCopy.getValue(1));
5809       SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
5810 
5811       // Update the original glue user.
5812       std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
5813       Ops.push_back(T1.getValue(1));
5814       CurDAG->UpdateNodeOperands(GU, Ops);
5815     } else {
5816       // For Kind  == InlineAsm::Kind::RegUse, we first copy two GPRs into a
5817       // GPRPair and then pass the GPRPair to the inline asm.
5818       SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
5819 
5820       // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
5821       SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
5822                                           Chain.getValue(1));
5823       SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
5824                                           T0.getValue(1));
5825       SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
5826 
5827       // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
5828       // i32 VRs of inline asm with it.
5829       Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
5830       PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
5831       Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
5832 
5833       AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
5834       Glue = Chain.getValue(1);
5835     }
5836 
5837     Changed = true;
5838 
5839     if(PairedReg.getNode()) {
5840       OpChanged[OpChanged.size() -1 ] = true;
5841       Flag = InlineAsm::Flag(Flag.getKind(), 1 /* RegNum*/);
5842       if (IsTiedToChangedOp)
5843         Flag.setMatchingOp(DefIdx);
5844       else
5845         Flag.setRegClass(ARM::GPRPairRegClassID);
5846       // Replace the current flag.
5847       AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
5848           Flag, dl, MVT::i32);
5849       // Add the new register node and skip the original two GPRs.
5850       AsmNodeOperands.push_back(PairedReg);
5851       // Skip the next two GPRs.
5852       i += 2;
5853     }
5854   }
5855 
5856   if (Glue.getNode())
5857     AsmNodeOperands.push_back(Glue);
5858   if (!Changed)
5859     return false;
5860 
5861   SDValue New = CurDAG->getNode(N->getOpcode(), SDLoc(N),
5862       CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
5863   New->setNodeId(-1);
5864   ReplaceNode(N, New.getNode());
5865   return true;
5866 }
5867 
5868 bool ARMDAGToDAGISel::SelectInlineAsmMemoryOperand(
5869     const SDValue &Op, InlineAsm::ConstraintCode ConstraintID,
5870     std::vector<SDValue> &OutOps) {
5871   switch(ConstraintID) {
5872   default:
5873     llvm_unreachable("Unexpected asm memory constraint");
5874   case InlineAsm::ConstraintCode::m:
5875   case InlineAsm::ConstraintCode::o:
5876   case InlineAsm::ConstraintCode::Q:
5877   case InlineAsm::ConstraintCode::Um:
5878   case InlineAsm::ConstraintCode::Un:
5879   case InlineAsm::ConstraintCode::Uq:
5880   case InlineAsm::ConstraintCode::Us:
5881   case InlineAsm::ConstraintCode::Ut:
5882   case InlineAsm::ConstraintCode::Uv:
5883   case InlineAsm::ConstraintCode::Uy:
5884     // Require the address to be in a register.  That is safe for all ARM
5885     // variants and it is hard to do anything much smarter without knowing
5886     // how the operand is used.
5887     OutOps.push_back(Op);
5888     return false;
5889   }
5890   return true;
5891 }
5892 
5893 /// createARMISelDag - This pass converts a legalized DAG into a
5894 /// ARM-specific DAG, ready for instruction scheduling.
5895 ///
5896 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
5897                                      CodeGenOptLevel OptLevel) {
5898   return new ARMDAGToDAGISel(TM, OptLevel);
5899 }
5900