xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h (revision 480093f4440d54b30b3025afeac24b48f2ba7a2e)
1 //===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that Hexagon uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
15 #define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
16 
17 #include "Hexagon.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/CodeGen/ISDOpcodes.h"
20 #include "llvm/CodeGen/SelectionDAGNodes.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/CodeGen/ValueTypes.h"
23 #include "llvm/IR/CallingConv.h"
24 #include "llvm/IR/InlineAsm.h"
25 #include "llvm/Support/MachineValueType.h"
26 #include <cstdint>
27 #include <utility>
28 
29 namespace llvm {
30 
31 namespace HexagonISD {
32 
33     enum NodeType : unsigned {
34       OP_BEGIN = ISD::BUILTIN_OP_END,
35 
36       CONST32 = OP_BEGIN,
37       CONST32_GP,  // For marking data present in GP.
38       ADDC,        // Add with carry: (X, Y, Cin) -> (X+Y, Cout).
39       SUBC,        // Sub with carry: (X, Y, Cin) -> (X+~Y+Cin, Cout).
40       ALLOCA,
41 
42       AT_GOT,      // Index in GOT.
43       AT_PCREL,    // Offset relative to PC.
44 
45       CALL,        // Function call.
46       CALLnr,      // Function call that does not return.
47       CALLR,
48 
49       RET_FLAG,    // Return with a flag operand.
50       BARRIER,     // Memory barrier.
51       JT,          // Jump table.
52       CP,          // Constant pool.
53 
54       COMBINE,
55       VSPLAT,      // Generic splat, selection depends on argument/return
56                    // types.
57       VASL,
58       VASR,
59       VLSR,
60 
61       TSTBIT,
62       INSERT,
63       EXTRACTU,
64       VEXTRACTW,
65       VINSERTW0,
66       VROR,
67       TC_RETURN,
68       EH_RETURN,
69       DCFETCH,
70       READCYCLE,
71       PTRUE,
72       PFALSE,
73       D2P,         // Convert 8-byte value to 8-bit predicate register. [*]
74       P2D,         // Convert 8-bit predicate register to 8-byte value. [*]
75       V2Q,         // Convert HVX vector to a vector predicate reg. [*]
76       Q2V,         // Convert vector predicate to an HVX vector. [*]
77                    // [*] The equivalence is defined as "Q <=> (V != 0)",
78                    //     where the != operation compares bytes.
79                    // Note: V != 0 is implemented as V >u 0.
80       QCAT,
81       QTRUE,
82       QFALSE,
83       VZERO,
84       VSPLATW,     // HVX splat of a 32-bit word with an arbitrary result type.
85       TYPECAST,    // No-op that's used to convert between different legal
86                    // types in a register.
87       VALIGN,      // Align two vectors (in Op0, Op1) to one that would have
88                    // been loaded from address in Op2.
89       VALIGNADDR,  // Align vector address: Op0 & -Op1, except when it is
90                    // an address in a vector load, then it's a no-op.
91       OP_END
92     };
93 
94 } // end namespace HexagonISD
95 
96   class HexagonSubtarget;
97 
98   class HexagonTargetLowering : public TargetLowering {
99     int VarArgsFrameOffset;   // Frame offset to start of varargs area.
100     const HexagonTargetMachine &HTM;
101     const HexagonSubtarget &Subtarget;
102 
103     bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize)
104         const;
105 
106   public:
107     explicit HexagonTargetLowering(const TargetMachine &TM,
108                                    const HexagonSubtarget &ST);
109 
110     bool isHVXVectorType(MVT Ty) const;
111 
112     /// IsEligibleForTailCallOptimization - Check whether the call is eligible
113     /// for tail call optimization. Targets which want to do tail call
114     /// optimization should implement this function.
115     bool IsEligibleForTailCallOptimization(SDValue Callee,
116         CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet,
117         bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs,
118         const SmallVectorImpl<SDValue> &OutVals,
119         const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const;
120 
121     bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
122                             MachineFunction &MF,
123                             unsigned Intrinsic) const override;
124 
125     bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
126     bool isTruncateFree(EVT VT1, EVT VT2) const override;
127 
128     bool isCheapToSpeculateCttz() const override { return true; }
129     bool isCheapToSpeculateCtlz() const override { return true; }
130     bool isCtlzFast() const override { return true; }
131 
132     bool hasBitTest(SDValue X, SDValue Y) const override;
133 
134     bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
135 
136     /// Return true if an FMA operation is faster than a pair of mul and add
137     /// instructions. fmuladd intrinsics will be expanded to FMAs when this
138     /// method returns true (and FMAs are legal), otherwise fmuladd is
139     /// expanded to mul + add.
140     bool isFMAFasterThanFMulAndFAdd(const MachineFunction &,
141                                     EVT) const override;
142 
143     // Should we expand the build vector with shuffles?
144     bool shouldExpandBuildVectorWithShuffles(EVT VT,
145         unsigned DefinedValues) const override;
146 
147     bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
148     TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
149         const override;
150 
151     SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
152     void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
153                                SelectionDAG &DAG) const override;
154     void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
155                             SelectionDAG &DAG) const override;
156 
157     const char *getTargetNodeName(unsigned Opcode) const override;
158 
159     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
160     SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
161     SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
162     SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
163     SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
164     SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
165     SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
166     SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const;
167     SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
168     SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
169     SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const;
170     SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
171     SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
172     SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const;
173     SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const;
174     SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const;
175     SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const;
176     SDValue LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const;
177 
178     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
179     SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
180     SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
181     SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
182     SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
183     SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
184     SDValue
185     LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
186                          const SmallVectorImpl<ISD::InputArg> &Ins,
187                          const SDLoc &dl, SelectionDAG &DAG,
188                          SmallVectorImpl<SDValue> &InVals) const override;
189     SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
190     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
191     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
192     SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
193         SelectionDAG &DAG) const;
194     SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
195         SelectionDAG &DAG) const;
196     SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
197         SelectionDAG &DAG) const;
198     SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
199         GlobalAddressSDNode *GA, SDValue InFlag, EVT PtrVT,
200         unsigned ReturnReg, unsigned char OperandFlags) const;
201     SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
202 
203     SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
204         SmallVectorImpl<SDValue> &InVals) const override;
205     SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
206                             CallingConv::ID CallConv, bool isVarArg,
207                             const SmallVectorImpl<ISD::InputArg> &Ins,
208                             const SDLoc &dl, SelectionDAG &DAG,
209                             SmallVectorImpl<SDValue> &InVals,
210                             const SmallVectorImpl<SDValue> &OutVals,
211                             SDValue Callee) const;
212 
213     SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
214     SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
215     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
216     SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
217     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
218 
219     bool CanLowerReturn(CallingConv::ID CallConv,
220                         MachineFunction &MF, bool isVarArg,
221                         const SmallVectorImpl<ISD::OutputArg> &Outs,
222                         LLVMContext &Context) const override;
223 
224     SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
225                         const SmallVectorImpl<ISD::OutputArg> &Outs,
226                         const SmallVectorImpl<SDValue> &OutVals,
227                         const SDLoc &dl, SelectionDAG &DAG) const override;
228 
229     SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
230 
231     bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
232 
233     Register getRegisterByName(const char* RegName, LLT VT,
234                                const MachineFunction &MF) const override;
235 
236     /// If a physical register, this returns the register that receives the
237     /// exception address on entry to an EH pad.
238     unsigned
239     getExceptionPointerRegister(const Constant *PersonalityFn) const override {
240       return Hexagon::R0;
241     }
242 
243     /// If a physical register, this returns the register that receives the
244     /// exception typeid on entry to a landing pad.
245     unsigned
246     getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
247       return Hexagon::R1;
248     }
249 
250     SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
251     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
252     SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
253 
254     EVT getSetCCResultType(const DataLayout &, LLVMContext &C,
255                            EVT VT) const override {
256       if (!VT.isVector())
257         return MVT::i1;
258       else
259         return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
260     }
261 
262     bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
263                                     SDValue &Base, SDValue &Offset,
264                                     ISD::MemIndexedMode &AM,
265                                     SelectionDAG &DAG) const override;
266 
267     ConstraintType getConstraintType(StringRef Constraint) const override;
268 
269     std::pair<unsigned, const TargetRegisterClass *>
270     getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
271                                  StringRef Constraint, MVT VT) const override;
272 
273     unsigned
274     getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
275       if (ConstraintCode == "o")
276         return InlineAsm::Constraint_o;
277       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
278     }
279 
280     // Intrinsics
281     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
282     SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
283     /// isLegalAddressingMode - Return true if the addressing mode represented
284     /// by AM is legal for this target, for a load/store of the specified type.
285     /// The type may be VoidTy, in which case only return true if the addressing
286     /// mode is legal for a load/store of any legal type.
287     /// TODO: Handle pre/postinc as well.
288     bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
289                                Type *Ty, unsigned AS,
290                                Instruction *I = nullptr) const override;
291     /// Return true if folding a constant offset with the given GlobalAddress
292     /// is legal.  It is frequently not legal in PIC relocation models.
293     bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
294 
295     bool isFPImmLegal(const APFloat &Imm, EVT VT,
296                       bool ForCodeSize) const override;
297 
298     /// isLegalICmpImmediate - Return true if the specified immediate is legal
299     /// icmp immediate, that is the target has icmp instructions which can
300     /// compare a register against the immediate without having to materialize
301     /// the immediate into a register.
302     bool isLegalICmpImmediate(int64_t Imm) const override;
303 
304     EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
305         unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
306         const AttributeList &FuncAttributes) const override;
307 
308     bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
309         unsigned Align, MachineMemOperand::Flags Flags, bool *Fast)
310         const override;
311 
312     /// Returns relocation base for the given PIC jumptable.
313     SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
314                                      const override;
315 
316     bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
317                                EVT NewVT) const override;
318 
319     // Handling of atomic RMW instructions.
320     Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
321         AtomicOrdering Ord) const override;
322     Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
323         Value *Addr, AtomicOrdering Ord) const override;
324     AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
325     bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
326     AtomicExpansionKind
327     shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
328 
329     AtomicExpansionKind
330     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {
331       return AtomicExpansionKind::LLSC;
332     }
333 
334   private:
335     void initializeHVXLowering();
336     void validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
337                                    unsigned NeedAlign) const;
338 
339     std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const;
340 
341     bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy,
342                                  SelectionDAG &DAG,
343                                  MutableArrayRef<ConstantInt*> Consts) const;
344     SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
345                           SelectionDAG &DAG) const;
346     SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
347                           SelectionDAG &DAG) const;
348     SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl,
349                           MVT ValTy, MVT ResTy, SelectionDAG &DAG) const;
350     SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
351                          const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const;
352     SDValue expandPredicate(SDValue Vec32, const SDLoc &dl,
353                             SelectionDAG &DAG) const;
354     SDValue contractPredicate(SDValue Vec64, const SDLoc &dl,
355                               SelectionDAG &DAG) const;
356     SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const;
357 
358     bool isUndef(SDValue Op) const {
359       if (Op.isMachineOpcode())
360         return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
361       return Op.getOpcode() == ISD::UNDEF;
362     }
363     SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty,
364                      ArrayRef<SDValue> Ops, SelectionDAG &DAG) const {
365       SDNode *N = DAG.getMachineNode(MachineOpc, dl, Ty, Ops);
366       return SDValue(N, 0);
367     }
368     SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const;
369 
370     using VectorPair = std::pair<SDValue, SDValue>;
371     using TypePair = std::pair<MVT, MVT>;
372 
373     SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops,
374                    const SDLoc &dl, SelectionDAG &DAG) const;
375 
376     MVT ty(SDValue Op) const {
377       return Op.getValueType().getSimpleVT();
378     }
379     TypePair ty(const VectorPair &Ops) const {
380       return { Ops.first.getValueType().getSimpleVT(),
381                Ops.second.getValueType().getSimpleVT() };
382     }
383     MVT tyScalar(MVT Ty) const {
384       if (!Ty.isVector())
385         return Ty;
386       return MVT::getIntegerVT(Ty.getSizeInBits());
387     }
388     MVT tyVector(MVT Ty, MVT ElemTy) const {
389       if (Ty.isVector() && Ty.getVectorElementType() == ElemTy)
390         return Ty;
391       unsigned TyWidth = Ty.getSizeInBits();
392       unsigned ElemWidth = ElemTy.getSizeInBits();
393       assert((TyWidth % ElemWidth) == 0);
394       return MVT::getVectorVT(ElemTy, TyWidth/ElemWidth);
395     }
396 
397     MVT typeJoin(const TypePair &Tys) const;
398     TypePair typeSplit(MVT Ty) const;
399     MVT typeExtElem(MVT VecTy, unsigned Factor) const;
400     MVT typeTruncElem(MVT VecTy, unsigned Factor) const;
401 
402     SDValue opJoin(const VectorPair &Ops, const SDLoc &dl,
403                    SelectionDAG &DAG) const;
404     VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const;
405     SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const;
406 
407     bool isHvxSingleTy(MVT Ty) const;
408     bool isHvxPairTy(MVT Ty) const;
409     SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy,
410                                SelectionDAG &DAG) const;
411     SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const;
412     SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1,
413                            ArrayRef<int> Mask, SelectionDAG &DAG) const;
414 
415     SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl,
416                               MVT VecTy, SelectionDAG &DAG) const;
417     SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl,
418                                MVT VecTy, SelectionDAG &DAG) const;
419     SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl,
420                                 unsigned BitBytes, bool ZeroFill,
421                                 SelectionDAG &DAG) const;
422     SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
423                                  MVT ResTy, SelectionDAG &DAG) const;
424     SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
425                                   MVT ResTy, SelectionDAG &DAG) const;
426     SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV,
427                                 const SDLoc &dl, SelectionDAG &DAG) const;
428     SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV,
429                                  const SDLoc &dl, SelectionDAG &DAG) const;
430     SDValue extractHvxSubvectorReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
431                                    MVT ResTy, SelectionDAG &DAG) const;
432     SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
433                                     MVT ResTy, SelectionDAG &DAG) const;
434     SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV,
435                                   const SDLoc &dl, SelectionDAG &DAG) const;
436     SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV,
437                                    const SDLoc &dl, SelectionDAG &DAG) const;
438     SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy,
439                                 bool ZeroExt, SelectionDAG &DAG) const;
440 
441     SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const;
442     SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const;
443     SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const;
444     SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const;
445     SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const;
446     SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const;
447 
448     SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const;
449     SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const;
450     SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const;
451     SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const;
452     SDValue LowerHvxMul(SDValue Op, SelectionDAG &DAG) const;
453     SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const;
454     SDValue LowerHvxSetCC(SDValue Op, SelectionDAG &DAG) const;
455     SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const;
456     SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const;
457 
458     SDValue SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const;
459     SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const;
460 
461     std::pair<const TargetRegisterClass*, uint8_t>
462     findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT)
463         const override;
464 
465     bool isHvxOperation(SDValue Op) const;
466     SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const;
467 
468     SDValue PerformHvxDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
469   };
470 
471 } // end namespace llvm
472 
473 #endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
474