xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h (revision 95eb4b873b6a8b527c5bd78d7191975dfca38998)
1 //===-- RISCVBaseInfo.h - Top level definitions for RISC-V MC ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains small standalone enum definitions for the RISC-V target
10 // useful for the compiler back-end and the MC libraries.
11 //
12 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
14 #define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
15 
16 #include "MCTargetDesc/RISCVMCTargetDesc.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/ADT/StringSwitch.h"
21 #include "llvm/MC/MCInstrDesc.h"
22 #include "llvm/Support/RISCVISAInfo.h"
23 #include "llvm/TargetParser/SubtargetFeature.h"
24 
25 namespace llvm {
26 
27 // RISCVII - This namespace holds all of the target specific flags that
28 // instruction info tracks. All definitions must match RISCVInstrFormats.td.
29 namespace RISCVII {
30 enum {
31   InstFormatPseudo = 0,
32   InstFormatR = 1,
33   InstFormatR4 = 2,
34   InstFormatI = 3,
35   InstFormatS = 4,
36   InstFormatB = 5,
37   InstFormatU = 6,
38   InstFormatJ = 7,
39   InstFormatCR = 8,
40   InstFormatCI = 9,
41   InstFormatCSS = 10,
42   InstFormatCIW = 11,
43   InstFormatCL = 12,
44   InstFormatCS = 13,
45   InstFormatCA = 14,
46   InstFormatCB = 15,
47   InstFormatCJ = 16,
48   InstFormatCU = 17,
49   InstFormatCLB = 18,
50   InstFormatCLH = 19,
51   InstFormatCSB = 20,
52   InstFormatCSH = 21,
53   InstFormatOther = 22,
54 
55   InstFormatMask = 31,
56   InstFormatShift = 0,
57 
58   ConstraintShift = InstFormatShift + 5,
59   VS2Constraint = 0b001 << ConstraintShift,
60   VS1Constraint = 0b010 << ConstraintShift,
61   VMConstraint = 0b100 << ConstraintShift,
62   ConstraintMask = 0b111 << ConstraintShift,
63 
64   VLMulShift = ConstraintShift + 3,
65   VLMulMask = 0b111 << VLMulShift,
66 
67   // Force a tail agnostic policy even this instruction has a tied destination.
68   ForceTailAgnosticShift = VLMulShift + 3,
69   ForceTailAgnosticMask = 1 << ForceTailAgnosticShift,
70 
71   // Is this a _TIED vector pseudo instruction. For these instructions we
72   // shouldn't skip the tied operand when converting to MC instructions.
73   IsTiedPseudoShift = ForceTailAgnosticShift + 1,
74   IsTiedPseudoMask = 1 << IsTiedPseudoShift,
75 
76   // Does this instruction have a SEW operand. It will be the last explicit
77   // operand unless there is a vector policy operand. Used by RVV Pseudos.
78   HasSEWOpShift = IsTiedPseudoShift + 1,
79   HasSEWOpMask = 1 << HasSEWOpShift,
80 
81   // Does this instruction have a VL operand. It will be the second to last
82   // explicit operand unless there is a vector policy operand. Used by RVV
83   // Pseudos.
84   HasVLOpShift = HasSEWOpShift + 1,
85   HasVLOpMask = 1 << HasVLOpShift,
86 
87   // Does this instruction have a vector policy operand. It will be the last
88   // explicit operand. Used by RVV Pseudos.
89   HasVecPolicyOpShift = HasVLOpShift + 1,
90   HasVecPolicyOpMask = 1 << HasVecPolicyOpShift,
91 
92   // Is this instruction a vector widening reduction instruction. Used by RVV
93   // Pseudos.
94   IsRVVWideningReductionShift = HasVecPolicyOpShift + 1,
95   IsRVVWideningReductionMask = 1 << IsRVVWideningReductionShift,
96 
97   // Does this instruction care about mask policy. If it is not, the mask policy
98   // could be either agnostic or undisturbed. For example, unmasked, store, and
99   // reduction operations result would not be affected by mask policy, so
100   // compiler has free to select either one.
101   UsesMaskPolicyShift = IsRVVWideningReductionShift + 1,
102   UsesMaskPolicyMask = 1 << UsesMaskPolicyShift,
103 
104   // Indicates that the result can be considered sign extended from bit 31. Some
105   // instructions with this flag aren't W instructions, but are either sign
106   // extended from a smaller size, always outputs a small integer, or put zeros
107   // in bits 63:31. Used by the SExtWRemoval pass.
108   IsSignExtendingOpWShift = UsesMaskPolicyShift + 1,
109   IsSignExtendingOpWMask = 1ULL << IsSignExtendingOpWShift,
110 
111   HasRoundModeOpShift = IsSignExtendingOpWShift + 1,
112   HasRoundModeOpMask = 1 << HasRoundModeOpShift,
113 
114   UsesVXRMShift = HasRoundModeOpShift + 1,
115   UsesVXRMMask = 1 << UsesVXRMShift,
116 
117   // Indicates whether these instructions can partially overlap between source
118   // registers and destination registers according to the vector spec.
119   // 0 -> not a vector pseudo
120   // 1 -> default value for vector pseudos. not widening or narrowing.
121   // 2 -> narrowing case
122   // 3 -> widening case
123   TargetOverlapConstraintTypeShift = UsesVXRMShift + 1,
124   TargetOverlapConstraintTypeMask = 3ULL << TargetOverlapConstraintTypeShift,
125 };
126 
127 enum VLMUL : uint8_t {
128   LMUL_1 = 0,
129   LMUL_2,
130   LMUL_4,
131   LMUL_8,
132   LMUL_RESERVED,
133   LMUL_F8,
134   LMUL_F4,
135   LMUL_F2
136 };
137 
138 enum {
139   TAIL_UNDISTURBED_MASK_UNDISTURBED = 0,
140   TAIL_AGNOSTIC = 1,
141   MASK_AGNOSTIC = 2,
142 };
143 
144 // Helper functions to read TSFlags.
145 /// \returns the format of the instruction.
146 static inline unsigned getFormat(uint64_t TSFlags) {
147   return (TSFlags & InstFormatMask) >> InstFormatShift;
148 }
149 /// \returns the LMUL for the instruction.
150 static inline VLMUL getLMul(uint64_t TSFlags) {
151   return static_cast<VLMUL>((TSFlags & VLMulMask) >> VLMulShift);
152 }
153 /// \returns true if tail agnostic is enforced for the instruction.
154 static inline bool doesForceTailAgnostic(uint64_t TSFlags) {
155   return TSFlags & ForceTailAgnosticMask;
156 }
157 /// \returns true if this a _TIED pseudo.
158 static inline bool isTiedPseudo(uint64_t TSFlags) {
159   return TSFlags & IsTiedPseudoMask;
160 }
161 /// \returns true if there is a SEW operand for the instruction.
162 static inline bool hasSEWOp(uint64_t TSFlags) {
163   return TSFlags & HasSEWOpMask;
164 }
165 /// \returns true if there is a VL operand for the instruction.
166 static inline bool hasVLOp(uint64_t TSFlags) {
167   return TSFlags & HasVLOpMask;
168 }
169 /// \returns true if there is a vector policy operand for this instruction.
170 static inline bool hasVecPolicyOp(uint64_t TSFlags) {
171   return TSFlags & HasVecPolicyOpMask;
172 }
173 /// \returns true if it is a vector widening reduction instruction.
174 static inline bool isRVVWideningReduction(uint64_t TSFlags) {
175   return TSFlags & IsRVVWideningReductionMask;
176 }
177 /// \returns true if mask policy is valid for the instruction.
178 static inline bool usesMaskPolicy(uint64_t TSFlags) {
179   return TSFlags & UsesMaskPolicyMask;
180 }
181 
182 /// \returns true if there is a rounding mode operand for this instruction
183 static inline bool hasRoundModeOp(uint64_t TSFlags) {
184   return TSFlags & HasRoundModeOpMask;
185 }
186 
187 /// \returns true if this instruction uses vxrm
188 static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; }
189 
190 static inline unsigned getVLOpNum(const MCInstrDesc &Desc) {
191   const uint64_t TSFlags = Desc.TSFlags;
192   // This method is only called if we expect to have a VL operand, and all
193   // instructions with VL also have SEW.
194   assert(hasSEWOp(TSFlags) && hasVLOp(TSFlags));
195   unsigned Offset = 2;
196   if (hasVecPolicyOp(TSFlags))
197     Offset = 3;
198   return Desc.getNumOperands() - Offset;
199 }
200 
201 static inline unsigned getSEWOpNum(const MCInstrDesc &Desc) {
202   const uint64_t TSFlags = Desc.TSFlags;
203   assert(hasSEWOp(TSFlags));
204   unsigned Offset = 1;
205   if (hasVecPolicyOp(TSFlags))
206     Offset = 2;
207   return Desc.getNumOperands() - Offset;
208 }
209 
210 static inline unsigned getVecPolicyOpNum(const MCInstrDesc &Desc) {
211   assert(hasVecPolicyOp(Desc.TSFlags));
212   return Desc.getNumOperands() - 1;
213 }
214 
215 /// \returns  the index to the rounding mode immediate value if any, otherwise
216 /// returns -1.
217 static inline int getFRMOpNum(const MCInstrDesc &Desc) {
218   const uint64_t TSFlags = Desc.TSFlags;
219   if (!hasRoundModeOp(TSFlags) || usesVXRM(TSFlags))
220     return -1;
221 
222   // The operand order
223   // --------------------------------------
224   // | n-1 (if any)   | n-2  | n-3 | n-4 |
225   // | policy         | sew  | vl  | frm |
226   // --------------------------------------
227   return getVLOpNum(Desc) - 1;
228 }
229 
230 /// \returns  the index to the rounding mode immediate value if any, otherwise
231 /// returns -1.
232 static inline int getVXRMOpNum(const MCInstrDesc &Desc) {
233   const uint64_t TSFlags = Desc.TSFlags;
234   if (!hasRoundModeOp(TSFlags) || !usesVXRM(TSFlags))
235     return -1;
236   // The operand order
237   // --------------------------------------
238   // | n-1 (if any)   | n-2  | n-3 | n-4  |
239   // | policy         | sew  | vl  | vxrm |
240   // --------------------------------------
241   return getVLOpNum(Desc) - 1;
242 }
243 
244 // Is the first def operand tied to the first use operand. This is true for
245 // vector pseudo instructions that have a merge operand for tail/mask
246 // undisturbed. It's also true for vector FMA instructions where one of the
247 // operands is also the destination register.
248 static inline bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc) {
249   return Desc.getNumDefs() < Desc.getNumOperands() &&
250          Desc.getOperandConstraint(Desc.getNumDefs(), MCOI::TIED_TO) == 0;
251 }
252 
253 // RISC-V Specific Machine Operand Flags
254 enum {
255   MO_None = 0,
256   MO_CALL = 1,
257   MO_LO = 3,
258   MO_HI = 4,
259   MO_PCREL_LO = 5,
260   MO_PCREL_HI = 6,
261   MO_GOT_HI = 7,
262   MO_TPREL_LO = 8,
263   MO_TPREL_HI = 9,
264   MO_TPREL_ADD = 10,
265   MO_TLS_GOT_HI = 11,
266   MO_TLS_GD_HI = 12,
267   MO_TLSDESC_HI = 13,
268   MO_TLSDESC_LOAD_LO = 14,
269   MO_TLSDESC_ADD_LO = 15,
270   MO_TLSDESC_CALL = 16,
271 
272   // Used to differentiate between target-specific "direct" flags and "bitmask"
273   // flags. A machine operand can only have one "direct" flag, but can have
274   // multiple "bitmask" flags.
275   MO_DIRECT_FLAG_MASK = 31
276 };
277 } // namespace RISCVII
278 
279 namespace RISCVOp {
280 enum OperandType : unsigned {
281   OPERAND_FIRST_RISCV_IMM = MCOI::OPERAND_FIRST_TARGET,
282   OPERAND_UIMM1 = OPERAND_FIRST_RISCV_IMM,
283   OPERAND_UIMM2,
284   OPERAND_UIMM2_LSB0,
285   OPERAND_UIMM3,
286   OPERAND_UIMM4,
287   OPERAND_UIMM5,
288   OPERAND_UIMM6,
289   OPERAND_UIMM7,
290   OPERAND_UIMM7_LSB00,
291   OPERAND_UIMM8_LSB00,
292   OPERAND_UIMM8,
293   OPERAND_UIMM8_LSB000,
294   OPERAND_UIMM8_GE32,
295   OPERAND_UIMM9_LSB000,
296   OPERAND_UIMM10_LSB00_NONZERO,
297   OPERAND_UIMM12,
298   OPERAND_ZERO,
299   OPERAND_SIMM5,
300   OPERAND_SIMM5_PLUS1,
301   OPERAND_SIMM6,
302   OPERAND_SIMM6_NONZERO,
303   OPERAND_SIMM10_LSB0000_NONZERO,
304   OPERAND_SIMM12,
305   OPERAND_SIMM12_LSB00000,
306   OPERAND_UIMM20,
307   OPERAND_UIMMLOG2XLEN,
308   OPERAND_UIMMLOG2XLEN_NONZERO,
309   OPERAND_CLUI_IMM,
310   OPERAND_VTYPEI10,
311   OPERAND_VTYPEI11,
312   OPERAND_RVKRNUM,
313   OPERAND_RVKRNUM_0_7,
314   OPERAND_RVKRNUM_1_10,
315   OPERAND_RVKRNUM_2_14,
316   OPERAND_LAST_RISCV_IMM = OPERAND_RVKRNUM_2_14,
317   // Operand is either a register or uimm5, this is used by V extension pseudo
318   // instructions to represent a value that be passed as AVL to either vsetvli
319   // or vsetivli.
320   OPERAND_AVL,
321 };
322 } // namespace RISCVOp
323 
324 // Describes the predecessor/successor bits used in the FENCE instruction.
325 namespace RISCVFenceField {
326 enum FenceField {
327   I = 8,
328   O = 4,
329   R = 2,
330   W = 1
331 };
332 }
333 
334 // Describes the supported floating point rounding mode encodings.
335 namespace RISCVFPRndMode {
336 enum RoundingMode {
337   RNE = 0,
338   RTZ = 1,
339   RDN = 2,
340   RUP = 3,
341   RMM = 4,
342   DYN = 7,
343   Invalid
344 };
345 
346 inline static StringRef roundingModeToString(RoundingMode RndMode) {
347   switch (RndMode) {
348   default:
349     llvm_unreachable("Unknown floating point rounding mode");
350   case RISCVFPRndMode::RNE:
351     return "rne";
352   case RISCVFPRndMode::RTZ:
353     return "rtz";
354   case RISCVFPRndMode::RDN:
355     return "rdn";
356   case RISCVFPRndMode::RUP:
357     return "rup";
358   case RISCVFPRndMode::RMM:
359     return "rmm";
360   case RISCVFPRndMode::DYN:
361     return "dyn";
362   }
363 }
364 
365 inline static RoundingMode stringToRoundingMode(StringRef Str) {
366   return StringSwitch<RoundingMode>(Str)
367       .Case("rne", RISCVFPRndMode::RNE)
368       .Case("rtz", RISCVFPRndMode::RTZ)
369       .Case("rdn", RISCVFPRndMode::RDN)
370       .Case("rup", RISCVFPRndMode::RUP)
371       .Case("rmm", RISCVFPRndMode::RMM)
372       .Case("dyn", RISCVFPRndMode::DYN)
373       .Default(RISCVFPRndMode::Invalid);
374 }
375 
376 inline static bool isValidRoundingMode(unsigned Mode) {
377   switch (Mode) {
378   default:
379     return false;
380   case RISCVFPRndMode::RNE:
381   case RISCVFPRndMode::RTZ:
382   case RISCVFPRndMode::RDN:
383   case RISCVFPRndMode::RUP:
384   case RISCVFPRndMode::RMM:
385   case RISCVFPRndMode::DYN:
386     return true;
387   }
388 }
389 } // namespace RISCVFPRndMode
390 
391 //===----------------------------------------------------------------------===//
392 // Floating-point Immediates
393 //
394 
395 namespace RISCVLoadFPImm {
396 float getFPImm(unsigned Imm);
397 
398 /// getLoadFPImm - Return a 5-bit binary encoding of the floating-point
399 /// immediate value. If the value cannot be represented as a 5-bit binary
400 /// encoding, then return -1.
401 int getLoadFPImm(APFloat FPImm);
402 } // namespace RISCVLoadFPImm
403 
404 namespace RISCVSysReg {
405 struct SysReg {
406   const char *Name;
407   const char *AltName;
408   const char *DeprecatedName;
409   unsigned Encoding;
410   // FIXME: add these additional fields when needed.
411   // Privilege Access: Read, Write, Read-Only.
412   // unsigned ReadWrite;
413   // Privilege Mode: User, System or Machine.
414   // unsigned Mode;
415   // Check field name.
416   // unsigned Extra;
417   // Register number without the privilege bits.
418   // unsigned Number;
419   FeatureBitset FeaturesRequired;
420   bool isRV32Only;
421 
422   bool haveRequiredFeatures(const FeatureBitset &ActiveFeatures) const {
423     // Not in 32-bit mode.
424     if (isRV32Only && ActiveFeatures[RISCV::Feature64Bit])
425       return false;
426     // No required feature associated with the system register.
427     if (FeaturesRequired.none())
428       return true;
429     return (FeaturesRequired & ActiveFeatures) == FeaturesRequired;
430   }
431 };
432 
433 #define GET_SysRegsList_DECL
434 #include "RISCVGenSearchableTables.inc"
435 } // end namespace RISCVSysReg
436 
437 namespace RISCVInsnOpcode {
438 struct RISCVOpcode {
439   const char *Name;
440   unsigned Value;
441 };
442 
443 #define GET_RISCVOpcodesList_DECL
444 #include "RISCVGenSearchableTables.inc"
445 } // end namespace RISCVInsnOpcode
446 
447 namespace RISCVABI {
448 
449 enum ABI {
450   ABI_ILP32,
451   ABI_ILP32F,
452   ABI_ILP32D,
453   ABI_ILP32E,
454   ABI_LP64,
455   ABI_LP64F,
456   ABI_LP64D,
457   ABI_LP64E,
458   ABI_Unknown
459 };
460 
461 // Returns the target ABI, or else a StringError if the requested ABIName is
462 // not supported for the given TT and FeatureBits combination.
463 ABI computeTargetABI(const Triple &TT, const FeatureBitset &FeatureBits,
464                      StringRef ABIName);
465 
466 ABI getTargetABI(StringRef ABIName);
467 
468 // Returns the register used to hold the stack pointer after realignment.
469 MCRegister getBPReg();
470 
471 // Returns the register holding shadow call stack pointer.
472 MCRegister getSCSPReg();
473 
474 } // namespace RISCVABI
475 
476 namespace RISCVFeatures {
477 
478 // Validates if the given combination of features are valid for the target
479 // triple. Exits with report_fatal_error if not.
480 void validate(const Triple &TT, const FeatureBitset &FeatureBits);
481 
482 llvm::Expected<std::unique_ptr<RISCVISAInfo>>
483 parseFeatureBits(bool IsRV64, const FeatureBitset &FeatureBits);
484 
485 } // namespace RISCVFeatures
486 
487 namespace RISCVVType {
488 // Is this a SEW value that can be encoded into the VTYPE format.
489 inline static bool isValidSEW(unsigned SEW) {
490   return isPowerOf2_32(SEW) && SEW >= 8 && SEW <= 1024;
491 }
492 
493 // Is this a LMUL value that can be encoded into the VTYPE format.
494 inline static bool isValidLMUL(unsigned LMUL, bool Fractional) {
495   return isPowerOf2_32(LMUL) && LMUL <= 8 && (!Fractional || LMUL != 1);
496 }
497 
498 unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
499                      bool MaskAgnostic);
500 
501 inline static RISCVII::VLMUL getVLMUL(unsigned VType) {
502   unsigned VLMUL = VType & 0x7;
503   return static_cast<RISCVII::VLMUL>(VLMUL);
504 }
505 
506 // Decode VLMUL into 1,2,4,8 and fractional indicator.
507 std::pair<unsigned, bool> decodeVLMUL(RISCVII::VLMUL VLMUL);
508 
509 inline static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional) {
510   assert(isValidLMUL(LMUL, Fractional) && "Unsupported LMUL");
511   unsigned LmulLog2 = Log2_32(LMUL);
512   return static_cast<RISCVII::VLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);
513 }
514 
515 inline static unsigned decodeVSEW(unsigned VSEW) {
516   assert(VSEW < 8 && "Unexpected VSEW value");
517   return 1 << (VSEW + 3);
518 }
519 
520 inline static unsigned encodeSEW(unsigned SEW) {
521   assert(isValidSEW(SEW) && "Unexpected SEW value");
522   return Log2_32(SEW) - 3;
523 }
524 
525 inline static unsigned getSEW(unsigned VType) {
526   unsigned VSEW = (VType >> 3) & 0x7;
527   return decodeVSEW(VSEW);
528 }
529 
530 inline static bool isTailAgnostic(unsigned VType) { return VType & 0x40; }
531 
532 inline static bool isMaskAgnostic(unsigned VType) { return VType & 0x80; }
533 
534 void printVType(unsigned VType, raw_ostream &OS);
535 
536 unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul);
537 
538 std::optional<RISCVII::VLMUL>
539 getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW);
540 } // namespace RISCVVType
541 
542 namespace RISCVRVC {
543 bool compress(MCInst &OutInst, const MCInst &MI, const MCSubtargetInfo &STI);
544 bool uncompress(MCInst &OutInst, const MCInst &MI, const MCSubtargetInfo &STI);
545 } // namespace RISCVRVC
546 
547 namespace RISCVZC {
548 enum RLISTENCODE {
549   RA = 4,
550   RA_S0,
551   RA_S0_S1,
552   RA_S0_S2,
553   RA_S0_S3,
554   RA_S0_S4,
555   RA_S0_S5,
556   RA_S0_S6,
557   RA_S0_S7,
558   RA_S0_S8,
559   RA_S0_S9,
560   // note - to include s10, s11 must also be included
561   RA_S0_S11,
562   INVALID_RLIST,
563 };
564 
565 inline unsigned encodeRlist(MCRegister EndReg, bool IsRV32E = false) {
566   assert((!IsRV32E || EndReg <= RISCV::X9) && "Invalid Rlist for RV32E");
567   switch (EndReg) {
568   case RISCV::X1:
569     return RLISTENCODE::RA;
570   case RISCV::X8:
571     return RLISTENCODE::RA_S0;
572   case RISCV::X9:
573     return RLISTENCODE::RA_S0_S1;
574   case RISCV::X18:
575     return RLISTENCODE::RA_S0_S2;
576   case RISCV::X19:
577     return RLISTENCODE::RA_S0_S3;
578   case RISCV::X20:
579     return RLISTENCODE::RA_S0_S4;
580   case RISCV::X21:
581     return RLISTENCODE::RA_S0_S5;
582   case RISCV::X22:
583     return RLISTENCODE::RA_S0_S6;
584   case RISCV::X23:
585     return RLISTENCODE::RA_S0_S7;
586   case RISCV::X24:
587     return RLISTENCODE::RA_S0_S8;
588   case RISCV::X25:
589     return RLISTENCODE::RA_S0_S9;
590   case RISCV::X26:
591     return RLISTENCODE::INVALID_RLIST;
592   case RISCV::X27:
593     return RLISTENCODE::RA_S0_S11;
594   default:
595     llvm_unreachable("Undefined input.");
596   }
597 }
598 
599 inline static unsigned getStackAdjBase(unsigned RlistVal, bool IsRV64,
600                                        bool IsEABI) {
601   assert(RlistVal != RLISTENCODE::INVALID_RLIST &&
602          "{ra, s0-s10} is not supported, s11 must be included.");
603   if (IsEABI)
604     return 16;
605   if (!IsRV64) {
606     switch (RlistVal) {
607     case RLISTENCODE::RA:
608     case RLISTENCODE::RA_S0:
609     case RLISTENCODE::RA_S0_S1:
610     case RLISTENCODE::RA_S0_S2:
611       return 16;
612     case RLISTENCODE::RA_S0_S3:
613     case RLISTENCODE::RA_S0_S4:
614     case RLISTENCODE::RA_S0_S5:
615     case RLISTENCODE::RA_S0_S6:
616       return 32;
617     case RLISTENCODE::RA_S0_S7:
618     case RLISTENCODE::RA_S0_S8:
619     case RLISTENCODE::RA_S0_S9:
620       return 48;
621     case RLISTENCODE::RA_S0_S11:
622       return 64;
623     }
624   } else {
625     switch (RlistVal) {
626     case RLISTENCODE::RA:
627     case RLISTENCODE::RA_S0:
628       return 16;
629     case RLISTENCODE::RA_S0_S1:
630     case RLISTENCODE::RA_S0_S2:
631       return 32;
632     case RLISTENCODE::RA_S0_S3:
633     case RLISTENCODE::RA_S0_S4:
634       return 48;
635     case RLISTENCODE::RA_S0_S5:
636     case RLISTENCODE::RA_S0_S6:
637       return 64;
638     case RLISTENCODE::RA_S0_S7:
639     case RLISTENCODE::RA_S0_S8:
640       return 80;
641     case RLISTENCODE::RA_S0_S9:
642       return 96;
643     case RLISTENCODE::RA_S0_S11:
644       return 112;
645     }
646   }
647   llvm_unreachable("Unexpected RlistVal");
648 }
649 
650 inline static bool getSpimm(unsigned RlistVal, unsigned &SpimmVal,
651                             int64_t StackAdjustment, bool IsRV64, bool IsEABI) {
652   if (RlistVal == RLISTENCODE::INVALID_RLIST)
653     return false;
654   unsigned stackAdj = getStackAdjBase(RlistVal, IsRV64, IsEABI);
655   SpimmVal = (StackAdjustment - stackAdj) / 16;
656   if (SpimmVal > 3)
657     return false;
658   return true;
659 }
660 
661 void printRlist(unsigned SlistEncode, raw_ostream &OS);
662 void printSpimm(int64_t Spimm, raw_ostream &OS);
663 } // namespace RISCVZC
664 
665 } // namespace llvm
666 
667 #endif
668