xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelLowering.h (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
15 #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
16 
17 #include "MCTargetDesc/ARMBaseInfo.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/ISDOpcodes.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/SelectionDAGNodes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGenTypes/MachineValueType.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/Support/CodeGen.h"
32 #include <optional>
33 #include <utility>
34 
35 namespace llvm {
36 
37 class ARMBaseTargetMachine;
38 class ARMSubtarget;
39 class DataLayout;
40 class FastISel;
41 class FunctionLoweringInfo;
42 class GlobalValue;
43 class InstrItineraryData;
44 class Instruction;
45 class IRBuilderBase;
46 class MachineBasicBlock;
47 class MachineInstr;
48 class SelectionDAG;
49 class TargetLibraryInfo;
50 class TargetMachine;
51 class TargetRegisterInfo;
52 class VectorType;
53 
54   namespace ARMISD {
55 
56   // ARM Specific DAG Nodes
57   enum NodeType : unsigned {
58     // Start the numbering where the builtin ops and target ops leave off.
59     FIRST_NUMBER = ISD::BUILTIN_OP_END,
60 
61     Wrapper,    // Wrapper - A wrapper node for TargetConstantPool,
62                 // TargetExternalSymbol, and TargetGlobalAddress.
63     WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
64                 // PIC mode.
65     WrapperJT,  // WrapperJT - A wrapper node for TargetJumpTable
66 
67     // Add pseudo op to model memcpy for struct byval.
68     COPY_STRUCT_BYVAL,
69 
70     CALL,        // Function call.
71     CALL_PRED,   // Function call that's predicable.
72     CALL_NOLINK, // Function call with branch not branch-and-link.
73     tSECALL,     // CMSE non-secure function call.
74     t2CALL_BTI,  // Thumb function call followed by BTI instruction.
75     BRCOND,      // Conditional branch.
76     BR_JT,       // Jumptable branch.
77     BR2_JT,      // Jumptable branch (2 level - jumptable entry is a jump).
78     RET_GLUE,    // Return with a flag operand.
79     SERET_GLUE,  // CMSE Entry function return with a flag operand.
80     INTRET_GLUE, // Interrupt return with an LR-offset and a flag operand.
81 
82     PIC_ADD, // Add with a PC operand and a PIC label.
83 
84     ASRL, // MVE long arithmetic shift right.
85     LSRL, // MVE long shift right.
86     LSLL, // MVE long shift left.
87 
88     CMP,      // ARM compare instructions.
89     CMN,      // ARM CMN instructions.
90     CMPZ,     // ARM compare that sets only Z flag.
91     CMPFP,    // ARM VFP compare instruction, sets FPSCR.
92     CMPFPE,   // ARM VFP signalling compare instruction, sets FPSCR.
93     CMPFPw0,  // ARM VFP compare against zero instruction, sets FPSCR.
94     CMPFPEw0, // ARM VFP signalling compare against zero instruction, sets
95               // FPSCR.
96     FMSTAT,   // ARM fmstat instruction.
97 
98     CMOV, // ARM conditional move instructions.
99 
100     SSAT, // Signed saturation
101     USAT, // Unsigned saturation
102 
103     BCC_i64,
104 
105     LSLS,  // Flag-setting shift left.
106     LSRS1, // Flag-setting logical shift right by one bit.
107     ASRS1, // Flag-setting arithmetic shift right by one bit.
108     RRX,   // Shift right one bit with carry in.
109 
110     ADDC, // Add with carry
111     ADDE, // Add using carry
112     SUBC, // Sub with carry
113     SUBE, // Sub using carry
114 
115     VMOVRRD, // double to two gprs.
116     VMOVDRR, // Two gprs to double.
117     VMOVSR,  // move gpr to single, used for f32 literal constructed in a gpr
118 
119     EH_SJLJ_SETJMP,         // SjLj exception handling setjmp.
120     EH_SJLJ_LONGJMP,        // SjLj exception handling longjmp.
121     EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
122 
123     TC_RETURN, // Tail call return pseudo.
124 
125     THREAD_POINTER,
126 
127     DYN_ALLOC, // Dynamic allocation on the stack.
128 
129     MEMBARRIER_MCR, // Memory barrier (MCR)
130 
131     PRELOAD, // Preload
132 
133     WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
134     WIN__DBZCHK, // Windows' divide by zero check
135 
136     WLS, // Low-overhead loops, While Loop Start branch. See t2WhileLoopStart
137     WLSSETUP, // Setup for the iteration count of a WLS. See t2WhileLoopSetup.
138     LOOP_DEC, // Really a part of LE, performs the sub
139     LE,       // Low-overhead loops, Loop End
140 
141     PREDICATE_CAST,  // Predicate cast for MVE i1 types
142     VECTOR_REG_CAST, // Reinterpret the current contents of a vector register
143 
144     MVESEXT,  // Legalization aids for extending a vector into two/four vectors.
145     MVEZEXT,  //  or truncating two/four vectors into one. Eventually becomes
146     MVETRUNC, //  stack store/load sequence, if not optimized to anything else.
147 
148     VCMP,  // Vector compare.
149     VCMPZ, // Vector compare to zero.
150     VTST,  // Vector test bits.
151 
152     // Vector shift by vector
153     VSHLs, // ...left/right by signed
154     VSHLu, // ...left/right by unsigned
155 
156     // Vector shift by immediate:
157     VSHLIMM,  // ...left
158     VSHRsIMM, // ...right (signed)
159     VSHRuIMM, // ...right (unsigned)
160 
161     // Vector rounding shift by immediate:
162     VRSHRsIMM, // ...right (signed)
163     VRSHRuIMM, // ...right (unsigned)
164     VRSHRNIMM, // ...right narrow
165 
166     // Vector saturating shift by immediate:
167     VQSHLsIMM,   // ...left (signed)
168     VQSHLuIMM,   // ...left (unsigned)
169     VQSHLsuIMM,  // ...left (signed to unsigned)
170     VQSHRNsIMM,  // ...right narrow (signed)
171     VQSHRNuIMM,  // ...right narrow (unsigned)
172     VQSHRNsuIMM, // ...right narrow (signed to unsigned)
173 
174     // Vector saturating rounding shift by immediate:
175     VQRSHRNsIMM,  // ...right narrow (signed)
176     VQRSHRNuIMM,  // ...right narrow (unsigned)
177     VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
178 
179     // Vector shift and insert:
180     VSLIIMM, // ...left
181     VSRIIMM, // ...right
182 
183     // Vector get lane (VMOV scalar to ARM core register)
184     // (These are used for 8- and 16-bit element types only.)
185     VGETLANEu, // zero-extend vector extract element
186     VGETLANEs, // sign-extend vector extract element
187 
188     // Vector move immediate and move negated immediate:
189     VMOVIMM,
190     VMVNIMM,
191 
192     // Vector move f32 immediate:
193     VMOVFPIMM,
194 
195     // Move H <-> R, clearing top 16 bits
196     VMOVrh,
197     VMOVhr,
198 
199     // Vector duplicate:
200     VDUP,
201     VDUPLANE,
202 
203     // Vector shuffles:
204     VEXT,   // extract
205     VREV64, // reverse elements within 64-bit doublewords
206     VREV32, // reverse elements within 32-bit words
207     VREV16, // reverse elements within 16-bit halfwords
208     VZIP,   // zip (interleave)
209     VUZP,   // unzip (deinterleave)
210     VTRN,   // transpose
211     VTBL1,  // 1-register shuffle with mask
212     VTBL2,  // 2-register shuffle with mask
213     VMOVN,  // MVE vmovn
214 
215     // MVE Saturating truncates
216     VQMOVNs, // Vector (V) Saturating (Q) Move and Narrow (N), signed (s)
217     VQMOVNu, // Vector (V) Saturating (Q) Move and Narrow (N), unsigned (u)
218 
219     // MVE float <> half converts
220     VCVTN, // MVE vcvt f32 -> f16, truncating into either the bottom or top
221            // lanes
222     VCVTL, // MVE vcvt f16 -> f32, extending from either the bottom or top lanes
223 
224     // MVE VIDUP instruction, taking a start value and increment.
225     VIDUP,
226 
227     // Vector multiply long:
228     VMULLs, // ...signed
229     VMULLu, // ...unsigned
230 
231     VQDMULH, // MVE vqdmulh instruction
232 
233     // MVE reductions
234     VADDVs,  // sign- or zero-extend the elements of a vector to i32,
235     VADDVu,  //   add them all together, and return an i32 of their sum
236     VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
237     VADDVpu,
238     VADDLVs,  // sign- or zero-extend elements to i64 and sum, returning
239     VADDLVu,  //   the low and high 32-bit halves of the sum
240     VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
241     VADDLVAu, //   provided as low and high halves
242     VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
243     VADDLVpu,
244     VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
245     VADDLVApu,
246     VMLAVs, // sign- or zero-extend the elements of two vectors to i32, multiply
247     VMLAVu, //   them and add the results together, returning an i32 of the sum
248     VMLAVps, // Same as VMLAV[su] with a v4i1 predicate mask
249     VMLAVpu,
250     VMLALVs,  // Same as VMLAV but with i64, returning the low and
251     VMLALVu,  //   high 32-bit halves of the sum
252     VMLALVps, // Same as VMLALV[su] with a v4i1 predicate mask
253     VMLALVpu,
254     VMLALVAs,  // Same as VMLALV but also add an input accumulator
255     VMLALVAu,  //   provided as low and high halves
256     VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask
257     VMLALVApu,
258     VMINVu, // Find minimum unsigned value of a vector and register
259     VMINVs, // Find minimum signed value of a vector and register
260     VMAXVu, // Find maximum unsigned value of a vector and register
261     VMAXVs, // Find maximum signed value of a vector and register
262 
263     SMULWB,  // Signed multiply word by half word, bottom
264     SMULWT,  // Signed multiply word by half word, top
265     UMLAL,   // 64bit Unsigned Accumulate Multiply
266     SMLAL,   // 64bit Signed Accumulate Multiply
267     UMAAL,   // 64-bit Unsigned Accumulate Accumulate Multiply
268     SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
269     SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
270     SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
271     SMLALTT, // 64-bit signed accumulate multiply top, top 16
272     SMLALD,  // Signed multiply accumulate long dual
273     SMLALDX, // Signed multiply accumulate long dual exchange
274     SMLSLD,  // Signed multiply subtract long dual
275     SMLSLDX, // Signed multiply subtract long dual exchange
276     SMMLAR,  // Signed multiply long, round and add
277     SMMLSR,  // Signed multiply long, subtract and round
278 
279     // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b
280     // stands for.
281     QADD8b,
282     QSUB8b,
283     QADD16b,
284     QSUB16b,
285     UQADD8b,
286     UQSUB8b,
287     UQADD16b,
288     UQSUB16b,
289 
290     // Operands of the standard BUILD_VECTOR node are not legalized, which
291     // is fine if BUILD_VECTORs are always lowered to shuffles or other
292     // operations, but for ARM some BUILD_VECTORs are legal as-is and their
293     // operands need to be legalized.  Define an ARM-specific version of
294     // BUILD_VECTOR for this purpose.
295     BUILD_VECTOR,
296 
297     // Bit-field insert
298     BFI,
299 
300     // Vector OR with immediate
301     VORRIMM,
302     // Vector AND with NOT of immediate
303     VBICIMM,
304 
305     // Pseudo vector bitwise select
306     VBSP,
307 
308     // Pseudo-instruction representing a memory copy using ldm/stm
309     // instructions.
310     MEMCPY,
311 
312     // Pseudo-instruction representing a memory copy using a tail predicated
313     // loop
314     MEMCPYLOOP,
315     // Pseudo-instruction representing a memset using a tail predicated
316     // loop
317     MEMSETLOOP,
318 
319     // V8.1MMainline condition select
320     CSINV, // Conditional select invert.
321     CSNEG, // Conditional select negate.
322     CSINC, // Conditional select increment.
323 
324     // Vector load N-element structure to all lanes:
325     FIRST_MEMORY_OPCODE,
326     VLD1DUP = FIRST_MEMORY_OPCODE,
327     VLD2DUP,
328     VLD3DUP,
329     VLD4DUP,
330 
331     // NEON loads with post-increment base updates:
332     VLD1_UPD,
333     VLD2_UPD,
334     VLD3_UPD,
335     VLD4_UPD,
336     VLD2LN_UPD,
337     VLD3LN_UPD,
338     VLD4LN_UPD,
339     VLD1DUP_UPD,
340     VLD2DUP_UPD,
341     VLD3DUP_UPD,
342     VLD4DUP_UPD,
343     VLD1x2_UPD,
344     VLD1x3_UPD,
345     VLD1x4_UPD,
346 
347     // NEON stores with post-increment base updates:
348     VST1_UPD,
349     VST2_UPD,
350     VST3_UPD,
351     VST4_UPD,
352     VST2LN_UPD,
353     VST3LN_UPD,
354     VST4LN_UPD,
355     VST1x2_UPD,
356     VST1x3_UPD,
357     VST1x4_UPD,
358 
359     // Load/Store of dual registers
360     LDRD,
361     STRD,
362     LAST_MEMORY_OPCODE = STRD,
363   };
364 
365   } // end namespace ARMISD
366 
367   namespace ARM {
368   /// Possible values of current rounding mode, which is specified in bits
369   /// 23:22 of FPSCR.
370   enum Rounding {
371     RN = 0,    // Round to Nearest
372     RP = 1,    // Round towards Plus infinity
373     RM = 2,    // Round towards Minus infinity
374     RZ = 3,    // Round towards Zero
375     rmMask = 3 // Bit mask selecting rounding mode
376   };
377 
378   // Bit position of rounding mode bits in FPSCR.
379   const unsigned RoundingBitsPos = 22;
380 
381   // Bits of floating-point status. These are NZCV flags, QC bit and cumulative
382   // FP exception bits.
383   const unsigned FPStatusBits = 0xf800009f;
384 
385   // Some bits in the FPSCR are not yet defined.  They must be preserved when
386   // modifying the contents.
387   const unsigned FPReservedBits = 0x00006060;
388   } // namespace ARM
389 
390   /// Define some predicates that are used for node matching.
391   namespace ARM {
392 
393     bool isBitFieldInvertedMask(unsigned v);
394 
395   } // end namespace ARM
396 
397   //===--------------------------------------------------------------------===//
398   //  ARMTargetLowering - ARM Implementation of the TargetLowering interface
399 
400   class ARMTargetLowering : public TargetLowering {
401     // Copying needed for an outgoing byval argument.
402     enum ByValCopyKind {
403       // Argument is already in the correct location, no copy needed.
404       NoCopy,
405       // Argument value is currently in the local stack frame, needs copying to
406       // outgoing arguemnt area.
407       CopyOnce,
408       // Argument value is currently in the outgoing argument area, but not at
409       // the correct offset, so needs copying via a temporary in local stack
410       // space.
411       CopyViaTemp,
412     };
413 
414   public:
415     explicit ARMTargetLowering(const TargetMachine &TM,
416                                const ARMSubtarget &STI);
417 
418     const ARMBaseTargetMachine &getTM() const;
419 
420     unsigned getJumpTableEncoding() const override;
421     bool useSoftFloat() const override;
422 
423     SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
424 
425     /// ReplaceNodeResults - Replace the results of node with an illegal result
426     /// type with new values built out of custom code.
427     void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
428                             SelectionDAG &DAG) const override;
429 
430     const char *getTargetNodeName(unsigned Opcode) const override;
431 
isSelectSupported(SelectSupportKind Kind)432     bool isSelectSupported(SelectSupportKind Kind) const override {
433       // ARM does not support scalar condition selects on vectors.
434       return (Kind != ScalarCondVectorVal);
435     }
436 
437     bool isReadOnly(const GlobalValue *GV) const;
438 
439     /// getSetCCResultType - Return the value type to use for ISD::SETCC.
440     EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
441                            EVT VT) const override;
442 
443     MachineBasicBlock *
444     EmitInstrWithCustomInserter(MachineInstr &MI,
445                                 MachineBasicBlock *MBB) const override;
446 
447     void AdjustInstrPostInstrSelection(MachineInstr &MI,
448                                        SDNode *Node) const override;
449 
450     SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
451     SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
452     SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
453     SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const;
454     SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const;
455     SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const;
456     SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
457 
458     bool SimplifyDemandedBitsForTargetNode(SDValue Op,
459                                            const APInt &OriginalDemandedBits,
460                                            const APInt &OriginalDemandedElts,
461                                            KnownBits &Known,
462                                            TargetLoweringOpt &TLO,
463                                            unsigned Depth) const override;
464 
465     bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
466 
467     /// allowsMisalignedMemoryAccesses - Returns true if the target allows
468     /// unaligned memory accesses of the specified type. Returns whether it
469     /// is "fast" by reference in the second argument.
470     bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
471                                         Align Alignment,
472                                         MachineMemOperand::Flags Flags,
473                                         unsigned *Fast) const override;
474 
475     EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
476                             const AttributeList &FuncAttributes) const override;
477 
478     bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
479     bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
480     bool isZExtFree(SDValue Val, EVT VT2) const override;
481     Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const override;
482 
483     bool isFNegFree(EVT VT) const override;
484 
485     bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
486 
487     bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
488 
489 
490     /// isLegalAddressingMode - Return true if the addressing mode represented
491     /// by AM is legal for this target, for a load/store of the specified type.
492     bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
493                                Type *Ty, unsigned AS,
494                                Instruction *I = nullptr) const override;
495 
496     bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
497 
498     /// Returns true if the addressing mode representing by AM is legal
499     /// for the Thumb1 target, for a load/store of the specified type.
500     bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
501 
502     /// isLegalICmpImmediate - Return true if the specified immediate is legal
503     /// icmp immediate, that is the target has icmp instructions which can
504     /// compare a register against the immediate without having to materialize
505     /// the immediate into a register.
506     bool isLegalICmpImmediate(int64_t Imm) const override;
507 
508     /// isLegalAddImmediate - Return true if the specified immediate is legal
509     /// add immediate, that is the target has add instructions which can
510     /// add a register and the immediate without having to materialize
511     /// the immediate into a register.
512     bool isLegalAddImmediate(int64_t Imm) const override;
513 
514     /// getPreIndexedAddressParts - returns true by value, base pointer and
515     /// offset pointer and addressing mode by reference if the node's address
516     /// can be legally represented as pre-indexed load / store address.
517     bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
518                                    ISD::MemIndexedMode &AM,
519                                    SelectionDAG &DAG) const override;
520 
521     /// getPostIndexedAddressParts - returns true by value, base pointer and
522     /// offset pointer and addressing mode by reference if this node can be
523     /// combined with a load / store to form a post-indexed load / store.
524     bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
525                                     SDValue &Offset, ISD::MemIndexedMode &AM,
526                                     SelectionDAG &DAG) const override;
527 
528     void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
529                                        const APInt &DemandedElts,
530                                        const SelectionDAG &DAG,
531                                        unsigned Depth) const override;
532 
533     bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
534                                       const APInt &DemandedElts,
535                                       TargetLoweringOpt &TLO) const override;
536 
537     bool ExpandInlineAsm(CallInst *CI) const override;
538 
539     ConstraintType getConstraintType(StringRef Constraint) const override;
540 
541     /// Examine constraint string and operand type and determine a weight value.
542     /// The operand object must already have been set up with the operand type.
543     ConstraintWeight getSingleConstraintMatchWeight(
544       AsmOperandInfo &info, const char *constraint) const override;
545 
546     std::pair<unsigned, const TargetRegisterClass *>
547     getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
548                                  StringRef Constraint, MVT VT) const override;
549 
550     const char *LowerXConstraint(EVT ConstraintVT) const override;
551 
552     /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
553     /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is
554     /// true it means one of the asm constraint of the inline asm instruction
555     /// being processed is 'm'.
556     void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
557                                       std::vector<SDValue> &Ops,
558                                       SelectionDAG &DAG) const override;
559 
560     InlineAsm::ConstraintCode
getInlineAsmMemConstraint(StringRef ConstraintCode)561     getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
562       if (ConstraintCode == "Q")
563         return InlineAsm::ConstraintCode::Q;
564       if (ConstraintCode.size() == 2) {
565         if (ConstraintCode[0] == 'U') {
566           switch(ConstraintCode[1]) {
567           default:
568             break;
569           case 'm':
570             return InlineAsm::ConstraintCode::Um;
571           case 'n':
572             return InlineAsm::ConstraintCode::Un;
573           case 'q':
574             return InlineAsm::ConstraintCode::Uq;
575           case 's':
576             return InlineAsm::ConstraintCode::Us;
577           case 't':
578             return InlineAsm::ConstraintCode::Ut;
579           case 'v':
580             return InlineAsm::ConstraintCode::Uv;
581           case 'y':
582             return InlineAsm::ConstraintCode::Uy;
583           }
584         }
585       }
586       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
587     }
588 
getSubtarget()589     const ARMSubtarget* getSubtarget() const {
590       return Subtarget;
591     }
592 
593     /// getRegClassFor - Return the register class that should be used for the
594     /// specified value type.
595     const TargetRegisterClass *
596     getRegClassFor(MVT VT, bool isDivergent = false) const override;
597 
598     bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
599                                 Align &PrefAlign) const override;
600 
601     /// createFastISel - This method returns a target specific FastISel object,
602     /// or null if the target does not support "fast" ISel.
603     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
604                              const TargetLibraryInfo *libInfo) const override;
605 
606     Sched::Preference getSchedulingPreference(SDNode *N) const override;
607 
preferZeroCompareBranch()608     bool preferZeroCompareBranch() const override { return true; }
609 
610     bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
611 
hasAndNotCompare(SDValue V)612     bool hasAndNotCompare(SDValue V) const override {
613       // We can use bics for any scalar.
614       return V.getValueType().isScalarInteger();
615     }
616 
617     bool
618     isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
619     bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
620 
621     /// isFPImmLegal - Returns true if the target can instruction select the
622     /// specified FP immediate natively. If false, the legalizer will
623     /// materialize the FP immediate as a load from a constant pool.
624     bool isFPImmLegal(const APFloat &Imm, EVT VT,
625                       bool ForCodeSize = false) const override;
626 
627     bool getTgtMemIntrinsic(IntrinsicInfo &Info,
628                             const CallInst &I,
629                             MachineFunction &MF,
630                             unsigned Intrinsic) const override;
631 
632     /// Returns true if it is beneficial to convert a load of a constant
633     /// to just the constant itself.
634     bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
635                                            Type *Ty) const override;
636 
637     /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
638     /// with this index.
639     bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
640                                  unsigned Index) const override;
641 
shouldFormOverflowOp(unsigned Opcode,EVT VT,bool MathUsed)642     bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
643                               bool MathUsed) const override {
644       // Using overflow ops for overflow checks only should beneficial on ARM.
645       return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
646     }
647 
shouldReassociateReduction(unsigned Opc,EVT VT)648     bool shouldReassociateReduction(unsigned Opc, EVT VT) const override {
649       return Opc != ISD::VECREDUCE_ADD;
650     }
651 
652     /// Returns true if an argument of type Ty needs to be passed in a
653     /// contiguous block of registers in calling convention CallConv.
654     bool functionArgumentNeedsConsecutiveRegisters(
655         Type *Ty, CallingConv::ID CallConv, bool isVarArg,
656         const DataLayout &DL) const override;
657 
658     /// If a physical register, this returns the register that receives the
659     /// exception address on entry to an EH pad.
660     Register
661     getExceptionPointerRegister(const Constant *PersonalityFn) const override;
662 
663     /// If a physical register, this returns the register that receives the
664     /// exception typeid on entry to a landing pad.
665     Register
666     getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
667 
668     Instruction *makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const;
669     Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
670                           AtomicOrdering Ord) const override;
671     Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
672                                 AtomicOrdering Ord) const override;
673 
674     void
675     emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
676 
677     Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
678                                   AtomicOrdering Ord) const override;
679     Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
680                                    AtomicOrdering Ord) const override;
681 
682     unsigned getMaxSupportedInterleaveFactor() const override;
683 
684     bool lowerInterleavedLoad(LoadInst *LI,
685                               ArrayRef<ShuffleVectorInst *> Shuffles,
686                               ArrayRef<unsigned> Indices,
687                               unsigned Factor) const override;
688     bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
689                                unsigned Factor) const override;
690 
691     bool shouldInsertFencesForAtomic(const Instruction *I) const override;
692     TargetLoweringBase::AtomicExpansionKind
693     shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
694     TargetLoweringBase::AtomicExpansionKind
695     shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
696     TargetLoweringBase::AtomicExpansionKind
697     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
698     TargetLoweringBase::AtomicExpansionKind
699     shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
700 
701     bool useLoadStackGuardNode(const Module &M) const override;
702 
703     void insertSSPDeclarations(Module &M) const override;
704     Value *getSDagStackGuard(const Module &M) const override;
705     Function *getSSPStackGuardCheck(const Module &M) const override;
706 
707     bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
708                                    unsigned &Cost) const override;
709 
canMergeStoresTo(unsigned AddressSpace,EVT MemVT,const MachineFunction & MF)710     bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
711                           const MachineFunction &MF) const override {
712       // Do not merge to larger than i32.
713       return (MemVT.getSizeInBits() <= 32);
714     }
715 
716     bool isCheapToSpeculateCttz(Type *Ty) const override;
717     bool isCheapToSpeculateCtlz(Type *Ty) const override;
718 
convertSetCCLogicToBitwiseLogic(EVT VT)719     bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
720       return VT.isScalarInteger();
721     }
722 
supportSwiftError()723     bool supportSwiftError() const override {
724       return true;
725     }
726 
supportSplitCSR(MachineFunction * MF)727     bool supportSplitCSR(MachineFunction *MF) const override {
728       return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
729              MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
730     }
731 
hasStandaloneRem(EVT VT)732     bool hasStandaloneRem(EVT VT) const override {
733       return HasStandaloneRem;
734     }
735 
736     ShiftLegalizationStrategy
737     preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
738                                        unsigned ExpansionFactor) const override;
739 
740     CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
741     CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
742 
743     /// Returns true if \p VecTy is a legal interleaved access type. This
744     /// function checks the vector element type and the overall width of the
745     /// vector.
746     bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy,
747                                       Align Alignment,
748                                       const DataLayout &DL) const;
749 
750     bool isMulAddWithConstProfitable(SDValue AddNode,
751                                      SDValue ConstNode) const override;
752 
753     bool alignLoopsWithOptSize() const override;
754 
755     /// Returns the number of interleaved accesses that will be generated when
756     /// lowering accesses of the given type.
757     unsigned getNumInterleavedAccesses(VectorType *VecTy,
758                                        const DataLayout &DL) const;
759 
760     void finalizeLowering(MachineFunction &MF) const override;
761 
762     /// Return the correct alignment for the current calling convention.
763     Align getABIAlignmentForCallingConv(Type *ArgTy,
764                                         const DataLayout &DL) const override;
765 
766     bool isDesirableToCommuteWithShift(const SDNode *N,
767                                        CombineLevel Level) const override;
768 
769     bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
770 
771     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
772                                            CombineLevel Level) const override;
773 
774     bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
775                                               unsigned SelectOpcode, SDValue X,
776                                               SDValue Y) const override;
777 
778     bool preferIncOfAddToSubOfNot(EVT VT) const override;
779 
780     bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
781 
782     bool isComplexDeinterleavingSupported() const override;
783     bool isComplexDeinterleavingOperationSupported(
784         ComplexDeinterleavingOperation Operation, Type *Ty) const override;
785 
786     Value *createComplexDeinterleavingIR(
787         IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
788         ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
789         Value *Accumulator = nullptr) const override;
790 
softPromoteHalfType()791     bool softPromoteHalfType() const override { return true; }
792 
useFPRegsForHalfType()793     bool useFPRegsForHalfType() const override { return true; }
794 
795   protected:
796     std::pair<const TargetRegisterClass *, uint8_t>
797     findRepresentativeClass(const TargetRegisterInfo *TRI,
798                             MVT VT) const override;
799 
800   private:
801     /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
802     /// make the right decision when generating code for different targets.
803     const ARMSubtarget *Subtarget;
804 
805     const TargetRegisterInfo *RegInfo;
806 
807     const InstrItineraryData *Itins;
808 
809     // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
810     // check.
811     bool InsertFencesForAtomic;
812 
813     bool HasStandaloneRem = true;
814 
815     void addTypeForNEON(MVT VT, MVT PromotedLdStVT);
816     void addDRTypeForNEON(MVT VT);
817     void addQRTypeForNEON(MVT VT);
818     std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
819 
820     using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
821 
822     void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
823                           SDValue &Arg, RegsToPassVector &RegsToPass,
824                           CCValAssign &VA, CCValAssign &NextVA,
825                           SDValue &StackPtr,
826                           SmallVectorImpl<SDValue> &MemOpChains,
827                           bool IsTailCall,
828                           int SPDiff) const;
829     SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
830                                  SDValue &Root, SelectionDAG &DAG,
831                                  const SDLoc &dl) const;
832 
833     CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
834                                             bool isVarArg) const;
835     CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
836                                   bool isVarArg) const;
837     std::pair<SDValue, MachinePointerInfo>
838     computeAddrForCallArg(const SDLoc &dl, SelectionDAG &DAG,
839                           const CCValAssign &VA, SDValue StackPtr,
840                           bool IsTailCall, int SPDiff) const;
841     ByValCopyKind ByValNeedsCopyForTailCall(SelectionDAG &DAG, SDValue Src,
842                                             SDValue Dst,
843                                             ISD::ArgFlagsTy Flags) const;
844     SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
845     SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
846     SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
847     SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG,
848                                     const ARMSubtarget *Subtarget) const;
849     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
850                                     const ARMSubtarget *Subtarget) const;
851     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
852     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
853     SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
854     SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
855     SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
856     SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
857     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
858     SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
859                                             SelectionDAG &DAG) const;
860     SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
861                                  SelectionDAG &DAG,
862                                  TLSModel::Model model) const;
863     SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
864     SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
865     SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
866     SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const;
867     SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const;
868     SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
869     SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
870     SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
871     SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
872     SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
873     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
874     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
875     SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
876     SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
877     SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
878     SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
879     SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
880     SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
881     SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
882                             const ARMSubtarget *ST) const;
883     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
884                               const ARMSubtarget *ST) const;
885     SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
886     SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
887     SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
888     SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
889     void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
890                            SmallVectorImpl<SDValue> &Results) const;
891     SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
892                           const ARMSubtarget *Subtarget) const;
893     SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
894                                    SDValue &Chain) const;
895     SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
896     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
897     SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
898     SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
899     SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
900     SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
901     SDValue LowerFSETCC(SDValue Op, SelectionDAG &DAG) const;
902     SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
903     void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
904                    SelectionDAG &DAG) const;
905     SDValue LowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
906 
907     Register getRegisterByName(const char* RegName, LLT VT,
908                                const MachineFunction &MF) const override;
909 
910     SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
911                           SmallVectorImpl<SDNode *> &Created) const override;
912 
913     bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
914                                     EVT VT) const override;
915 
916     SDValue MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT, MVT ValVT,
917                       SDValue Val) const;
918     SDValue MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, MVT LocVT,
919                         MVT ValVT, SDValue Val) const;
920 
921     SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
922 
923     SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
924                             CallingConv::ID CallConv, bool isVarArg,
925                             const SmallVectorImpl<ISD::InputArg> &Ins,
926                             const SDLoc &dl, SelectionDAG &DAG,
927                             SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
928                             SDValue ThisVal, bool isCmseNSCall) const;
929 
930     void initializeSplitCSR(MachineBasicBlock *Entry) const override;
931     void insertCopiesSplitCSR(
932       MachineBasicBlock *Entry,
933       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
934 
935     bool splitValueIntoRegisterParts(
936         SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
937         unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
938         const override;
939 
940     SDValue joinRegisterPartsIntoValue(
941         SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
942         unsigned NumParts, MVT PartVT, EVT ValueVT,
943         std::optional<CallingConv::ID> CC) const override;
944 
945     SDValue
946     LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
947                          const SmallVectorImpl<ISD::InputArg> &Ins,
948                          const SDLoc &dl, SelectionDAG &DAG,
949                          SmallVectorImpl<SDValue> &InVals) const override;
950 
951     int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
952                        SDValue &Chain, const Value *OrigArg,
953                        unsigned InRegsParamRecordIdx, int ArgOffset,
954                        unsigned ArgSize) const;
955 
956     void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
957                               const SDLoc &dl, SDValue &Chain,
958                               unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
959                               bool ForceMutable = false) const;
960 
961     SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
962                       SmallVectorImpl<SDValue> &InVals) const override;
963 
964     /// HandleByVal - Target-specific cleanup for ByVal support.
965     void HandleByVal(CCState *, unsigned &, Align) const override;
966 
967     /// IsEligibleForTailCallOptimization - Check whether the call is eligible
968     /// for tail call optimization. Targets which want to do tail call
969     /// optimization should implement this function.
970     bool IsEligibleForTailCallOptimization(
971         TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
972         SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const;
973 
974     bool CanLowerReturn(CallingConv::ID CallConv,
975                         MachineFunction &MF, bool isVarArg,
976                         const SmallVectorImpl<ISD::OutputArg> &Outs,
977                         LLVMContext &Context, const Type *RetTy) const override;
978 
979     SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
980                         const SmallVectorImpl<ISD::OutputArg> &Outs,
981                         const SmallVectorImpl<SDValue> &OutVals,
982                         const SDLoc &dl, SelectionDAG &DAG) const override;
983 
984     bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
985 
986     bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
987 
shouldConsiderGEPOffsetSplit()988     bool shouldConsiderGEPOffsetSplit() const override { return true; }
989 
990     bool isUnsupportedFloatingType(EVT VT) const;
991 
992     SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
993                     SDValue ARMcc, SDValue Flags, SelectionDAG &DAG) const;
994     SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
995                       SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
996     SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
997                       const SDLoc &dl, bool Signaling = false) const;
998 
999     SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
1000 
1001     void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
1002                                 MachineBasicBlock *DispatchBB, int FI) const;
1003 
1004     void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
1005 
1006     MachineBasicBlock *EmitStructByval(MachineInstr &MI,
1007                                        MachineBasicBlock *MBB) const;
1008 
1009     MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
1010                                            MachineBasicBlock *MBB) const;
1011     MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
1012                                            MachineBasicBlock *MBB) const;
1013     void addMVEVectorTypes(bool HasMVEFP);
1014     void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
1015     void setAllExpand(MVT VT);
1016   };
1017 
1018   enum VMOVModImmType {
1019     VMOVModImm,
1020     VMVNModImm,
1021     MVEVMVNModImm,
1022     OtherModImm
1023   };
1024 
1025   namespace ARM {
1026 
1027     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1028                              const TargetLibraryInfo *libInfo);
1029 
1030   } // end namespace ARM
1031 
1032 } // end namespace llvm
1033 
1034 #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
1035