1 //===-- RISCVTargetParser.cpp - Parser for target features ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a target parser to recognise hardware features
10 // for RISC-V CPUs.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/TargetParser/RISCVTargetParser.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/TargetParser/RISCVISAInfo.h"
18 #include "llvm/TargetParser/Triple.h"
19
20 namespace llvm {
21 namespace RISCV {
22
23 enum CPUKind : unsigned {
24 #define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, \
25 FAST_VECTOR_UNALIGN) \
26 CK_##ENUM,
27 #define TUNE_PROC(ENUM, NAME) CK_##ENUM,
28 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
29 };
30
31 struct CPUInfo {
32 StringLiteral Name;
33 StringLiteral DefaultMarch;
34 bool FastScalarUnalignedAccess;
35 bool FastVectorUnalignedAccess;
is64Bitllvm::RISCV::CPUInfo36 bool is64Bit() const { return DefaultMarch.starts_with("rv64"); }
37 };
38
39 constexpr CPUInfo RISCVCPUInfo[] = {
40 #define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, \
41 FAST_VECTOR_UNALIGN) \
42 {NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, FAST_VECTOR_UNALIGN},
43 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
44 };
45
getCPUInfoByName(StringRef CPU)46 static const CPUInfo *getCPUInfoByName(StringRef CPU) {
47 for (auto &C : RISCVCPUInfo)
48 if (C.Name == CPU)
49 return &C;
50 return nullptr;
51 }
52
hasFastScalarUnalignedAccess(StringRef CPU)53 bool hasFastScalarUnalignedAccess(StringRef CPU) {
54 const CPUInfo *Info = getCPUInfoByName(CPU);
55 return Info && Info->FastScalarUnalignedAccess;
56 }
57
hasFastVectorUnalignedAccess(StringRef CPU)58 bool hasFastVectorUnalignedAccess(StringRef CPU) {
59 const CPUInfo *Info = getCPUInfoByName(CPU);
60 return Info && Info->FastVectorUnalignedAccess;
61 }
62
parseCPU(StringRef CPU,bool IsRV64)63 bool parseCPU(StringRef CPU, bool IsRV64) {
64 const CPUInfo *Info = getCPUInfoByName(CPU);
65
66 if (!Info)
67 return false;
68 return Info->is64Bit() == IsRV64;
69 }
70
parseTuneCPU(StringRef TuneCPU,bool IsRV64)71 bool parseTuneCPU(StringRef TuneCPU, bool IsRV64) {
72 std::optional<CPUKind> Kind =
73 llvm::StringSwitch<std::optional<CPUKind>>(TuneCPU)
74 #define TUNE_PROC(ENUM, NAME) .Case(NAME, CK_##ENUM)
75 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
76 .Default(std::nullopt);
77
78 if (Kind.has_value())
79 return true;
80
81 // Fallback to parsing as a CPU.
82 return parseCPU(TuneCPU, IsRV64);
83 }
84
getMArchFromMcpu(StringRef CPU)85 StringRef getMArchFromMcpu(StringRef CPU) {
86 const CPUInfo *Info = getCPUInfoByName(CPU);
87 if (!Info)
88 return "";
89 return Info->DefaultMarch;
90 }
91
fillValidCPUArchList(SmallVectorImpl<StringRef> & Values,bool IsRV64)92 void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
93 for (const auto &C : RISCVCPUInfo) {
94 if (IsRV64 == C.is64Bit())
95 Values.emplace_back(C.Name);
96 }
97 }
98
fillValidTuneCPUArchList(SmallVectorImpl<StringRef> & Values,bool IsRV64)99 void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
100 for (const auto &C : RISCVCPUInfo) {
101 if (IsRV64 == C.is64Bit())
102 Values.emplace_back(C.Name);
103 }
104 #define TUNE_PROC(ENUM, NAME) Values.emplace_back(StringRef(NAME));
105 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
106 }
107
108 // This function is currently used by IREE, so it's not dead code.
getFeaturesForCPU(StringRef CPU,SmallVectorImpl<std::string> & EnabledFeatures,bool NeedPlus)109 void getFeaturesForCPU(StringRef CPU,
110 SmallVectorImpl<std::string> &EnabledFeatures,
111 bool NeedPlus) {
112 StringRef MarchFromCPU = llvm::RISCV::getMArchFromMcpu(CPU);
113 if (MarchFromCPU == "")
114 return;
115
116 EnabledFeatures.clear();
117 auto RII = RISCVISAInfo::parseArchString(
118 MarchFromCPU, /* EnableExperimentalExtension */ true);
119
120 if (llvm::errorToBool(RII.takeError()))
121 return;
122
123 std::vector<std::string> FeatStrings =
124 (*RII)->toFeatures(/* AddAllExtensions */ false);
125 for (const auto &F : FeatStrings)
126 if (NeedPlus)
127 EnabledFeatures.push_back(F);
128 else
129 EnabledFeatures.push_back(F.substr(1));
130 }
131
132 namespace RISCVExtensionBitmaskTable {
133 #define GET_RISCVExtensionBitmaskTable_IMPL
134 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
135
136 } // namespace RISCVExtensionBitmaskTable
137
138 namespace {
139 struct LessExtName {
operator ()llvm::RISCV::__anonab35bf6e0111::LessExtName140 bool operator()(const RISCVExtensionBitmaskTable::RISCVExtensionBitmask &LHS,
141 StringRef RHS) {
142 return StringRef(LHS.Name) < RHS;
143 }
144 };
145 } // namespace
146
147 } // namespace RISCV
148
149 namespace RISCVVType {
150 // Encode VTYPE into the binary format used by the the VSETVLI instruction which
151 // is used by our MC layer representation.
152 //
153 // Bits | Name | Description
154 // -----+------------+------------------------------------------------
155 // 7 | vma | Vector mask agnostic
156 // 6 | vta | Vector tail agnostic
157 // 5:3 | vsew[2:0] | Standard element width (SEW) setting
158 // 2:0 | vlmul[2:0] | Vector register group multiplier (LMUL) setting
encodeVTYPE(RISCVII::VLMUL VLMUL,unsigned SEW,bool TailAgnostic,bool MaskAgnostic)159 unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
160 bool MaskAgnostic) {
161 assert(isValidSEW(SEW) && "Invalid SEW");
162 unsigned VLMULBits = static_cast<unsigned>(VLMUL);
163 unsigned VSEWBits = encodeSEW(SEW);
164 unsigned VTypeI = (VSEWBits << 3) | (VLMULBits & 0x7);
165 if (TailAgnostic)
166 VTypeI |= 0x40;
167 if (MaskAgnostic)
168 VTypeI |= 0x80;
169
170 return VTypeI;
171 }
172
decodeVLMUL(RISCVII::VLMUL VLMUL)173 std::pair<unsigned, bool> decodeVLMUL(RISCVII::VLMUL VLMUL) {
174 switch (VLMUL) {
175 default:
176 llvm_unreachable("Unexpected LMUL value!");
177 case RISCVII::VLMUL::LMUL_1:
178 case RISCVII::VLMUL::LMUL_2:
179 case RISCVII::VLMUL::LMUL_4:
180 case RISCVII::VLMUL::LMUL_8:
181 return std::make_pair(1 << static_cast<unsigned>(VLMUL), false);
182 case RISCVII::VLMUL::LMUL_F2:
183 case RISCVII::VLMUL::LMUL_F4:
184 case RISCVII::VLMUL::LMUL_F8:
185 return std::make_pair(1 << (8 - static_cast<unsigned>(VLMUL)), true);
186 }
187 }
188
printVType(unsigned VType,raw_ostream & OS)189 void printVType(unsigned VType, raw_ostream &OS) {
190 unsigned Sew = getSEW(VType);
191 OS << "e" << Sew;
192
193 unsigned LMul;
194 bool Fractional;
195 std::tie(LMul, Fractional) = decodeVLMUL(getVLMUL(VType));
196
197 if (Fractional)
198 OS << ", mf";
199 else
200 OS << ", m";
201 OS << LMul;
202
203 if (isTailAgnostic(VType))
204 OS << ", ta";
205 else
206 OS << ", tu";
207
208 if (isMaskAgnostic(VType))
209 OS << ", ma";
210 else
211 OS << ", mu";
212 }
213
getSEWLMULRatio(unsigned SEW,RISCVII::VLMUL VLMul)214 unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) {
215 unsigned LMul;
216 bool Fractional;
217 std::tie(LMul, Fractional) = decodeVLMUL(VLMul);
218
219 // Convert LMul to a fixed point value with 3 fractional bits.
220 LMul = Fractional ? (8 / LMul) : (LMul * 8);
221
222 assert(SEW >= 8 && "Unexpected SEW value");
223 return (SEW * 8) / LMul;
224 }
225
226 std::optional<RISCVII::VLMUL>
getSameRatioLMUL(unsigned SEW,RISCVII::VLMUL VLMUL,unsigned EEW)227 getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW) {
228 unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMUL);
229 unsigned EMULFixedPoint = (EEW * 8) / Ratio;
230 bool Fractional = EMULFixedPoint < 8;
231 unsigned EMUL = Fractional ? 8 / EMULFixedPoint : EMULFixedPoint / 8;
232 if (!isValidLMUL(EMUL, Fractional))
233 return std::nullopt;
234 return RISCVVType::encodeLMUL(EMUL, Fractional);
235 }
236
237 } // namespace RISCVVType
238
239 } // namespace llvm
240