1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Check for KVM_GET_REG_LIST regressions.
4 *
5 * Copyright (c) 2023 Intel Corporation
6 *
7 */
8 #include <stdio.h>
9 #include "kvm_util.h"
10 #include "test_util.h"
11 #include "processor.h"
12
13 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
14
15 enum {
16 VCPU_FEATURE_ISA_EXT = 0,
17 VCPU_FEATURE_SBI_EXT,
18 };
19
20 enum {
21 KVM_RISC_V_REG_OFFSET_VSTART = 0,
22 KVM_RISC_V_REG_OFFSET_VL,
23 KVM_RISC_V_REG_OFFSET_VTYPE,
24 KVM_RISC_V_REG_OFFSET_VCSR,
25 KVM_RISC_V_REG_OFFSET_VLENB,
26 KVM_RISC_V_REG_OFFSET_MAX,
27 };
28
29 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX];
30
filter_reg(__u64 reg)31 bool filter_reg(__u64 reg)
32 {
33 switch (reg & ~REG_MASK) {
34 /*
35 * Same set of ISA_EXT registers are not present on all host because
36 * ISA_EXT registers are visible to the KVM user space based on the
37 * ISA extensions available on the host. Also, disabling an ISA
38 * extension using corresponding ISA_EXT register does not affect
39 * the visibility of the ISA_EXT register itself.
40 *
41 * Based on above, we should filter-out all ISA_EXT registers.
42 *
43 * Note: The below list is alphabetically sorted.
44 */
45 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_A:
46 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_C:
47 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D:
48 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F:
49 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_H:
50 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_I:
51 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_M:
52 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V:
53 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMNPM:
54 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN:
55 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA:
56 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSCOFPMF:
57 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSNPM:
58 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSTC:
59 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVADE:
60 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVADU:
61 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL:
62 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT:
63 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT:
64 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVVPTC:
65 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZAAMO:
66 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZABHA:
67 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZACAS:
68 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZALRSC:
69 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZAWRS:
70 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
71 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB:
72 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC:
73 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKB:
74 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKC:
75 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKX:
76 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBS:
77 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCA:
78 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCB:
79 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCD:
80 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCF:
81 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCMOP:
82 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA:
83 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFH:
84 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFHMIN:
85 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM:
86 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ:
87 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICCRSE:
88 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR:
89 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICOND:
90 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICSR:
91 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIFENCEI:
92 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTNTL:
93 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
94 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM:
95 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIMOP:
96 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKND:
97 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNE:
98 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNH:
99 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKR:
100 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED:
101 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH:
102 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT:
103 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZTSO:
104 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB:
105 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC:
106 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH:
107 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFHMIN:
108 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKB:
109 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKG:
110 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNED:
111 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHA:
112 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHB:
113 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSED:
114 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSH:
115 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKT:
116 /*
117 * Like ISA_EXT registers, SBI_EXT registers are only visible when the
118 * host supports them and disabling them does not affect the visibility
119 * of the SBI_EXT register itself.
120 */
121 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01:
122 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME:
123 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI:
124 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE:
125 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST:
126 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM:
127 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU:
128 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN:
129 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SUSP:
130 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA:
131 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL:
132 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR:
133 return true;
134 /* AIA registers are always available when Ssaia can't be disabled */
135 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect):
136 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1):
137 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2):
138 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh):
139 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph):
140 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
141 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
142 return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA];
143 default:
144 break;
145 }
146
147 return false;
148 }
149
check_reject_set(int err)150 bool check_reject_set(int err)
151 {
152 return err == EINVAL;
153 }
154
override_vector_reg_size(struct kvm_vcpu * vcpu,struct vcpu_reg_sublist * s,uint64_t feature)155 static int override_vector_reg_size(struct kvm_vcpu *vcpu, struct vcpu_reg_sublist *s,
156 uint64_t feature)
157 {
158 unsigned long vlenb_reg = 0;
159 int rc;
160 u64 reg, size;
161
162 /* Enable V extension so that we can get the vlenb register */
163 rc = __vcpu_set_reg(vcpu, feature, 1);
164 if (rc)
165 return rc;
166
167 vlenb_reg = vcpu_get_reg(vcpu, s->regs[KVM_RISC_V_REG_OFFSET_VLENB]);
168 if (!vlenb_reg) {
169 TEST_FAIL("Can't compute vector register size from zero vlenb\n");
170 return -EPERM;
171 }
172
173 size = __builtin_ctzl(vlenb_reg);
174 size <<= KVM_REG_SIZE_SHIFT;
175
176 for (int i = 0; i < 32; i++) {
177 reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size | KVM_REG_RISCV_VECTOR_REG(i);
178 s->regs[KVM_RISC_V_REG_OFFSET_MAX + i] = reg;
179 }
180
181 /* We should assert if disabling failed here while enabling succeeded before */
182 vcpu_set_reg(vcpu, feature, 0);
183
184 return 0;
185 }
186
finalize_vcpu(struct kvm_vcpu * vcpu,struct vcpu_reg_list * c)187 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
188 {
189 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
190 struct vcpu_reg_sublist *s;
191 uint64_t feature;
192 int rc;
193
194 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
195 __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]);
196
197 /*
198 * Disable all extensions which were enabled by default
199 * if they were available in the risc-v host.
200 */
201 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
202 rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
203 if (rc && isa_ext_state[i])
204 isa_ext_cant_disable[i] = true;
205 }
206
207 for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
208 rc = __vcpu_set_reg(vcpu, RISCV_SBI_EXT_REG(i), 0);
209 TEST_ASSERT(!rc || (rc == -1 && errno == ENOENT), "Unexpected error");
210 }
211
212 for_each_sublist(c, s) {
213 if (!s->feature)
214 continue;
215
216 if (s->feature == KVM_RISCV_ISA_EXT_V) {
217 feature = RISCV_ISA_EXT_REG(s->feature);
218 rc = override_vector_reg_size(vcpu, s, feature);
219 if (rc)
220 goto skip;
221 }
222
223 switch (s->feature_type) {
224 case VCPU_FEATURE_ISA_EXT:
225 feature = RISCV_ISA_EXT_REG(s->feature);
226 break;
227 case VCPU_FEATURE_SBI_EXT:
228 feature = RISCV_SBI_EXT_REG(s->feature);
229 break;
230 default:
231 TEST_FAIL("Unknown feature type");
232 }
233
234 /* Try to enable the desired extension */
235 __vcpu_set_reg(vcpu, feature, 1);
236
237 skip:
238 /* Double check whether the desired extension was enabled */
239 __TEST_REQUIRE(__vcpu_has_ext(vcpu, feature),
240 "%s not available, skipping tests", s->name);
241 }
242 }
243
config_id_to_str(const char * prefix,__u64 id)244 static const char *config_id_to_str(const char *prefix, __u64 id)
245 {
246 /* reg_off is the offset into struct kvm_riscv_config */
247 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
248
249 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG);
250
251 switch (reg_off) {
252 case KVM_REG_RISCV_CONFIG_REG(isa):
253 return "KVM_REG_RISCV_CONFIG_REG(isa)";
254 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
255 return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
256 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
257 return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
258 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
259 return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
260 case KVM_REG_RISCV_CONFIG_REG(marchid):
261 return "KVM_REG_RISCV_CONFIG_REG(marchid)";
262 case KVM_REG_RISCV_CONFIG_REG(mimpid):
263 return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
264 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
265 return "KVM_REG_RISCV_CONFIG_REG(satp_mode)";
266 }
267
268 return strdup_printf("%lld /* UNKNOWN */", reg_off);
269 }
270
core_id_to_str(const char * prefix,__u64 id)271 static const char *core_id_to_str(const char *prefix, __u64 id)
272 {
273 /* reg_off is the offset into struct kvm_riscv_core */
274 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
275
276 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE);
277
278 switch (reg_off) {
279 case KVM_REG_RISCV_CORE_REG(regs.pc):
280 return "KVM_REG_RISCV_CORE_REG(regs.pc)";
281 case KVM_REG_RISCV_CORE_REG(regs.ra):
282 return "KVM_REG_RISCV_CORE_REG(regs.ra)";
283 case KVM_REG_RISCV_CORE_REG(regs.sp):
284 return "KVM_REG_RISCV_CORE_REG(regs.sp)";
285 case KVM_REG_RISCV_CORE_REG(regs.gp):
286 return "KVM_REG_RISCV_CORE_REG(regs.gp)";
287 case KVM_REG_RISCV_CORE_REG(regs.tp):
288 return "KVM_REG_RISCV_CORE_REG(regs.tp)";
289 case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
290 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
291 reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
292 case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
293 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
294 reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
295 case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
296 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
297 reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
298 case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
299 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
300 reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
301 case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
302 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
303 reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
304 case KVM_REG_RISCV_CORE_REG(mode):
305 return "KVM_REG_RISCV_CORE_REG(mode)";
306 }
307
308 return strdup_printf("%lld /* UNKNOWN */", reg_off);
309 }
310
311 #define RISCV_CSR_GENERAL(csr) \
312 "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
313 #define RISCV_CSR_AIA(csr) \
314 "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
315 #define RISCV_CSR_SMSTATEEN(csr) \
316 "KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_REG(" #csr ")"
317
general_csr_id_to_str(__u64 reg_off)318 static const char *general_csr_id_to_str(__u64 reg_off)
319 {
320 /* reg_off is the offset into struct kvm_riscv_csr */
321 switch (reg_off) {
322 case KVM_REG_RISCV_CSR_REG(sstatus):
323 return RISCV_CSR_GENERAL(sstatus);
324 case KVM_REG_RISCV_CSR_REG(sie):
325 return RISCV_CSR_GENERAL(sie);
326 case KVM_REG_RISCV_CSR_REG(stvec):
327 return RISCV_CSR_GENERAL(stvec);
328 case KVM_REG_RISCV_CSR_REG(sscratch):
329 return RISCV_CSR_GENERAL(sscratch);
330 case KVM_REG_RISCV_CSR_REG(sepc):
331 return RISCV_CSR_GENERAL(sepc);
332 case KVM_REG_RISCV_CSR_REG(scause):
333 return RISCV_CSR_GENERAL(scause);
334 case KVM_REG_RISCV_CSR_REG(stval):
335 return RISCV_CSR_GENERAL(stval);
336 case KVM_REG_RISCV_CSR_REG(sip):
337 return RISCV_CSR_GENERAL(sip);
338 case KVM_REG_RISCV_CSR_REG(satp):
339 return RISCV_CSR_GENERAL(satp);
340 case KVM_REG_RISCV_CSR_REG(scounteren):
341 return RISCV_CSR_GENERAL(scounteren);
342 case KVM_REG_RISCV_CSR_REG(senvcfg):
343 return RISCV_CSR_GENERAL(senvcfg);
344 }
345
346 return strdup_printf("KVM_REG_RISCV_CSR_GENERAL | %lld /* UNKNOWN */", reg_off);
347 }
348
aia_csr_id_to_str(__u64 reg_off)349 static const char *aia_csr_id_to_str(__u64 reg_off)
350 {
351 /* reg_off is the offset into struct kvm_riscv_aia_csr */
352 switch (reg_off) {
353 case KVM_REG_RISCV_CSR_AIA_REG(siselect):
354 return RISCV_CSR_AIA(siselect);
355 case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
356 return RISCV_CSR_AIA(iprio1);
357 case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
358 return RISCV_CSR_AIA(iprio2);
359 case KVM_REG_RISCV_CSR_AIA_REG(sieh):
360 return RISCV_CSR_AIA(sieh);
361 case KVM_REG_RISCV_CSR_AIA_REG(siph):
362 return RISCV_CSR_AIA(siph);
363 case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
364 return RISCV_CSR_AIA(iprio1h);
365 case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
366 return RISCV_CSR_AIA(iprio2h);
367 }
368
369 return strdup_printf("KVM_REG_RISCV_CSR_AIA | %lld /* UNKNOWN */", reg_off);
370 }
371
smstateen_csr_id_to_str(__u64 reg_off)372 static const char *smstateen_csr_id_to_str(__u64 reg_off)
373 {
374 /* reg_off is the offset into struct kvm_riscv_smstateen_csr */
375 switch (reg_off) {
376 case KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0):
377 return RISCV_CSR_SMSTATEEN(sstateen0);
378 }
379
380 TEST_FAIL("Unknown smstateen csr reg: 0x%llx", reg_off);
381 return NULL;
382 }
383
csr_id_to_str(const char * prefix,__u64 id)384 static const char *csr_id_to_str(const char *prefix, __u64 id)
385 {
386 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
387 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
388
389 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR);
390
391 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
392
393 switch (reg_subtype) {
394 case KVM_REG_RISCV_CSR_GENERAL:
395 return general_csr_id_to_str(reg_off);
396 case KVM_REG_RISCV_CSR_AIA:
397 return aia_csr_id_to_str(reg_off);
398 case KVM_REG_RISCV_CSR_SMSTATEEN:
399 return smstateen_csr_id_to_str(reg_off);
400 }
401
402 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
403 }
404
timer_id_to_str(const char * prefix,__u64 id)405 static const char *timer_id_to_str(const char *prefix, __u64 id)
406 {
407 /* reg_off is the offset into struct kvm_riscv_timer */
408 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
409
410 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER);
411
412 switch (reg_off) {
413 case KVM_REG_RISCV_TIMER_REG(frequency):
414 return "KVM_REG_RISCV_TIMER_REG(frequency)";
415 case KVM_REG_RISCV_TIMER_REG(time):
416 return "KVM_REG_RISCV_TIMER_REG(time)";
417 case KVM_REG_RISCV_TIMER_REG(compare):
418 return "KVM_REG_RISCV_TIMER_REG(compare)";
419 case KVM_REG_RISCV_TIMER_REG(state):
420 return "KVM_REG_RISCV_TIMER_REG(state)";
421 }
422
423 return strdup_printf("%lld /* UNKNOWN */", reg_off);
424 }
425
fp_f_id_to_str(const char * prefix,__u64 id)426 static const char *fp_f_id_to_str(const char *prefix, __u64 id)
427 {
428 /* reg_off is the offset into struct __riscv_f_ext_state */
429 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
430
431 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F);
432
433 switch (reg_off) {
434 case KVM_REG_RISCV_FP_F_REG(f[0]) ...
435 KVM_REG_RISCV_FP_F_REG(f[31]):
436 return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
437 case KVM_REG_RISCV_FP_F_REG(fcsr):
438 return "KVM_REG_RISCV_FP_F_REG(fcsr)";
439 }
440
441 return strdup_printf("%lld /* UNKNOWN */", reg_off);
442 }
443
fp_d_id_to_str(const char * prefix,__u64 id)444 static const char *fp_d_id_to_str(const char *prefix, __u64 id)
445 {
446 /* reg_off is the offset into struct __riscv_d_ext_state */
447 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
448
449 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D);
450
451 switch (reg_off) {
452 case KVM_REG_RISCV_FP_D_REG(f[0]) ...
453 KVM_REG_RISCV_FP_D_REG(f[31]):
454 return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
455 case KVM_REG_RISCV_FP_D_REG(fcsr):
456 return "KVM_REG_RISCV_FP_D_REG(fcsr)";
457 }
458
459 return strdup_printf("%lld /* UNKNOWN */", reg_off);
460 }
461
vector_id_to_str(const char * prefix,__u64 id)462 static const char *vector_id_to_str(const char *prefix, __u64 id)
463 {
464 /* reg_off is the offset into struct __riscv_v_ext_state */
465 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_VECTOR);
466 int reg_index = 0;
467
468 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_VECTOR);
469
470 if (reg_off >= KVM_REG_RISCV_VECTOR_REG(0))
471 reg_index = reg_off - KVM_REG_RISCV_VECTOR_REG(0);
472 switch (reg_off) {
473 case KVM_REG_RISCV_VECTOR_REG(0) ...
474 KVM_REG_RISCV_VECTOR_REG(31):
475 return strdup_printf("KVM_REG_RISCV_VECTOR_REG(%d)", reg_index);
476 case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
477 return "KVM_REG_RISCV_VECTOR_CSR_REG(vstart)";
478 case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
479 return "KVM_REG_RISCV_VECTOR_CSR_REG(vl)";
480 case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
481 return "KVM_REG_RISCV_VECTOR_CSR_REG(vtype)";
482 case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
483 return "KVM_REG_RISCV_VECTOR_CSR_REG(vcsr)";
484 case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb):
485 return "KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)";
486 }
487
488 return strdup_printf("%lld /* UNKNOWN */", reg_off);
489 }
490
491 #define KVM_ISA_EXT_ARR(ext) \
492 [KVM_RISCV_ISA_EXT_##ext] = "KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_" #ext
493
isa_ext_single_id_to_str(__u64 reg_off)494 static const char *isa_ext_single_id_to_str(__u64 reg_off)
495 {
496 static const char * const kvm_isa_ext_reg_name[] = {
497 KVM_ISA_EXT_ARR(A),
498 KVM_ISA_EXT_ARR(C),
499 KVM_ISA_EXT_ARR(D),
500 KVM_ISA_EXT_ARR(F),
501 KVM_ISA_EXT_ARR(H),
502 KVM_ISA_EXT_ARR(I),
503 KVM_ISA_EXT_ARR(M),
504 KVM_ISA_EXT_ARR(V),
505 KVM_ISA_EXT_ARR(SMNPM),
506 KVM_ISA_EXT_ARR(SMSTATEEN),
507 KVM_ISA_EXT_ARR(SSAIA),
508 KVM_ISA_EXT_ARR(SSCOFPMF),
509 KVM_ISA_EXT_ARR(SSNPM),
510 KVM_ISA_EXT_ARR(SSTC),
511 KVM_ISA_EXT_ARR(SVADE),
512 KVM_ISA_EXT_ARR(SVADU),
513 KVM_ISA_EXT_ARR(SVINVAL),
514 KVM_ISA_EXT_ARR(SVNAPOT),
515 KVM_ISA_EXT_ARR(SVPBMT),
516 KVM_ISA_EXT_ARR(SVVPTC),
517 KVM_ISA_EXT_ARR(ZAAMO),
518 KVM_ISA_EXT_ARR(ZABHA),
519 KVM_ISA_EXT_ARR(ZACAS),
520 KVM_ISA_EXT_ARR(ZALRSC),
521 KVM_ISA_EXT_ARR(ZAWRS),
522 KVM_ISA_EXT_ARR(ZBA),
523 KVM_ISA_EXT_ARR(ZBB),
524 KVM_ISA_EXT_ARR(ZBC),
525 KVM_ISA_EXT_ARR(ZBKB),
526 KVM_ISA_EXT_ARR(ZBKC),
527 KVM_ISA_EXT_ARR(ZBKX),
528 KVM_ISA_EXT_ARR(ZBS),
529 KVM_ISA_EXT_ARR(ZCA),
530 KVM_ISA_EXT_ARR(ZCB),
531 KVM_ISA_EXT_ARR(ZCD),
532 KVM_ISA_EXT_ARR(ZCF),
533 KVM_ISA_EXT_ARR(ZCMOP),
534 KVM_ISA_EXT_ARR(ZFA),
535 KVM_ISA_EXT_ARR(ZFH),
536 KVM_ISA_EXT_ARR(ZFHMIN),
537 KVM_ISA_EXT_ARR(ZICBOM),
538 KVM_ISA_EXT_ARR(ZICBOZ),
539 KVM_ISA_EXT_ARR(ZICCRSE),
540 KVM_ISA_EXT_ARR(ZICNTR),
541 KVM_ISA_EXT_ARR(ZICOND),
542 KVM_ISA_EXT_ARR(ZICSR),
543 KVM_ISA_EXT_ARR(ZIFENCEI),
544 KVM_ISA_EXT_ARR(ZIHINTNTL),
545 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
546 KVM_ISA_EXT_ARR(ZIHPM),
547 KVM_ISA_EXT_ARR(ZIMOP),
548 KVM_ISA_EXT_ARR(ZKND),
549 KVM_ISA_EXT_ARR(ZKNE),
550 KVM_ISA_EXT_ARR(ZKNH),
551 KVM_ISA_EXT_ARR(ZKR),
552 KVM_ISA_EXT_ARR(ZKSED),
553 KVM_ISA_EXT_ARR(ZKSH),
554 KVM_ISA_EXT_ARR(ZKT),
555 KVM_ISA_EXT_ARR(ZTSO),
556 KVM_ISA_EXT_ARR(ZVBB),
557 KVM_ISA_EXT_ARR(ZVBC),
558 KVM_ISA_EXT_ARR(ZVFH),
559 KVM_ISA_EXT_ARR(ZVFHMIN),
560 KVM_ISA_EXT_ARR(ZVKB),
561 KVM_ISA_EXT_ARR(ZVKG),
562 KVM_ISA_EXT_ARR(ZVKNED),
563 KVM_ISA_EXT_ARR(ZVKNHA),
564 KVM_ISA_EXT_ARR(ZVKNHB),
565 KVM_ISA_EXT_ARR(ZVKSED),
566 KVM_ISA_EXT_ARR(ZVKSH),
567 KVM_ISA_EXT_ARR(ZVKT),
568 };
569
570 if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name))
571 return strdup_printf("KVM_REG_RISCV_ISA_SINGLE | %lld /* UNKNOWN */", reg_off);
572
573 return kvm_isa_ext_reg_name[reg_off];
574 }
575
isa_ext_multi_id_to_str(__u64 reg_subtype,__u64 reg_off)576 static const char *isa_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
577 {
578 const char *unknown = "";
579
580 if (reg_off > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
581 unknown = " /* UNKNOWN */";
582
583 switch (reg_subtype) {
584 case KVM_REG_RISCV_ISA_MULTI_EN:
585 return strdup_printf("KVM_REG_RISCV_ISA_MULTI_EN | %lld%s", reg_off, unknown);
586 case KVM_REG_RISCV_ISA_MULTI_DIS:
587 return strdup_printf("KVM_REG_RISCV_ISA_MULTI_DIS | %lld%s", reg_off, unknown);
588 }
589
590 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
591 }
592
isa_ext_id_to_str(const char * prefix,__u64 id)593 static const char *isa_ext_id_to_str(const char *prefix, __u64 id)
594 {
595 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
596 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
597
598 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT);
599
600 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
601
602 switch (reg_subtype) {
603 case KVM_REG_RISCV_ISA_SINGLE:
604 return isa_ext_single_id_to_str(reg_off);
605 case KVM_REG_RISCV_ISA_MULTI_EN:
606 case KVM_REG_RISCV_ISA_MULTI_DIS:
607 return isa_ext_multi_id_to_str(reg_subtype, reg_off);
608 }
609
610 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
611 }
612
613 #define KVM_SBI_EXT_ARR(ext) \
614 [ext] = "KVM_REG_RISCV_SBI_SINGLE | " #ext
615
sbi_ext_single_id_to_str(__u64 reg_off)616 static const char *sbi_ext_single_id_to_str(__u64 reg_off)
617 {
618 /* reg_off is KVM_RISCV_SBI_EXT_ID */
619 static const char * const kvm_sbi_ext_reg_name[] = {
620 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_V01),
621 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_TIME),
622 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_IPI),
623 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_RFENCE),
624 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST),
625 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM),
626 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU),
627 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN),
628 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SUSP),
629 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA),
630 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL),
631 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR),
632 };
633
634 if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name))
635 return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
636
637 return kvm_sbi_ext_reg_name[reg_off];
638 }
639
sbi_ext_multi_id_to_str(__u64 reg_subtype,__u64 reg_off)640 static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
641 {
642 const char *unknown = "";
643
644 if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
645 unknown = " /* UNKNOWN */";
646
647 switch (reg_subtype) {
648 case KVM_REG_RISCV_SBI_MULTI_EN:
649 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld%s", reg_off, unknown);
650 case KVM_REG_RISCV_SBI_MULTI_DIS:
651 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld%s", reg_off, unknown);
652 }
653
654 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
655 }
656
sbi_ext_id_to_str(const char * prefix,__u64 id)657 static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
658 {
659 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
660 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
661
662 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_EXT);
663
664 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
665
666 switch (reg_subtype) {
667 case KVM_REG_RISCV_SBI_SINGLE:
668 return sbi_ext_single_id_to_str(reg_off);
669 case KVM_REG_RISCV_SBI_MULTI_EN:
670 case KVM_REG_RISCV_SBI_MULTI_DIS:
671 return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
672 }
673
674 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
675 }
676
sbi_sta_id_to_str(__u64 reg_off)677 static const char *sbi_sta_id_to_str(__u64 reg_off)
678 {
679 switch (reg_off) {
680 case 0: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo)";
681 case 1: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi)";
682 }
683 return strdup_printf("KVM_REG_RISCV_SBI_STA | %lld /* UNKNOWN */", reg_off);
684 }
685
sbi_id_to_str(const char * prefix,__u64 id)686 static const char *sbi_id_to_str(const char *prefix, __u64 id)
687 {
688 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_STATE);
689 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
690
691 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_STATE);
692
693 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
694
695 switch (reg_subtype) {
696 case KVM_REG_RISCV_SBI_STA:
697 return sbi_sta_id_to_str(reg_off);
698 }
699
700 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
701 }
702
print_reg(const char * prefix,__u64 id)703 void print_reg(const char *prefix, __u64 id)
704 {
705 const char *reg_size = NULL;
706
707 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
708 "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
709
710 switch (id & KVM_REG_SIZE_MASK) {
711 case KVM_REG_SIZE_U32:
712 reg_size = "KVM_REG_SIZE_U32";
713 break;
714 case KVM_REG_SIZE_U64:
715 reg_size = "KVM_REG_SIZE_U64";
716 break;
717 case KVM_REG_SIZE_U128:
718 reg_size = "KVM_REG_SIZE_U128";
719 break;
720 case KVM_REG_SIZE_U256:
721 reg_size = "KVM_REG_SIZE_U256";
722 break;
723 default:
724 printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,\n",
725 (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & ~REG_MASK);
726 return;
727 }
728
729 switch (id & KVM_REG_RISCV_TYPE_MASK) {
730 case KVM_REG_RISCV_CONFIG:
731 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
732 reg_size, config_id_to_str(prefix, id));
733 break;
734 case KVM_REG_RISCV_CORE:
735 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
736 reg_size, core_id_to_str(prefix, id));
737 break;
738 case KVM_REG_RISCV_CSR:
739 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
740 reg_size, csr_id_to_str(prefix, id));
741 break;
742 case KVM_REG_RISCV_TIMER:
743 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
744 reg_size, timer_id_to_str(prefix, id));
745 break;
746 case KVM_REG_RISCV_FP_F:
747 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
748 reg_size, fp_f_id_to_str(prefix, id));
749 break;
750 case KVM_REG_RISCV_FP_D:
751 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
752 reg_size, fp_d_id_to_str(prefix, id));
753 break;
754 case KVM_REG_RISCV_VECTOR:
755 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_VECTOR | %s,\n",
756 reg_size, vector_id_to_str(prefix, id));
757 break;
758 case KVM_REG_RISCV_ISA_EXT:
759 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
760 reg_size, isa_ext_id_to_str(prefix, id));
761 break;
762 case KVM_REG_RISCV_SBI_EXT:
763 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
764 reg_size, sbi_ext_id_to_str(prefix, id));
765 break;
766 case KVM_REG_RISCV_SBI_STATE:
767 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_STATE | %s,\n",
768 reg_size, sbi_id_to_str(prefix, id));
769 break;
770 default:
771 printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,\n",
772 reg_size, id & ~REG_MASK);
773 return;
774 }
775 }
776
777 /*
778 * The current blessed list was primed with the output of kernel version
779 * v6.5-rc3 and then later updated with new registers.
780 */
781 static __u64 base_regs[] = {
782 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
783 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
784 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
785 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
786 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode),
787 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
788 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
789 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
790 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
791 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
792 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
793 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
794 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
795 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
796 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
797 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
798 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
799 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
800 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
801 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
802 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
803 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
804 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
805 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
806 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
807 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
808 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
809 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
810 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
811 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
812 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
813 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
814 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
815 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
816 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
817 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
818 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
819 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
820 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
821 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
822 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
823 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
824 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
825 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
826 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
827 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
828 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
829 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
830 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(senvcfg),
831 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
832 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
833 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
834 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
835 };
836
837 /*
838 * The skips_set list registers that should skip set test.
839 * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
840 */
841 static __u64 base_skips_set[] = {
842 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
843 };
844
845 static __u64 sbi_base_regs[] = {
846 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
847 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
848 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
849 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
850 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
851 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
852 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
853 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
854 };
855
856 static __u64 sbi_sta_regs[] = {
857 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA,
858 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo),
859 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi),
860 };
861
862 static __u64 zicbom_regs[] = {
863 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
864 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM,
865 };
866
867 static __u64 zicboz_regs[] = {
868 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
869 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ,
870 };
871
872 static __u64 aia_regs[] = {
873 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
874 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
875 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
876 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
877 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
878 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
879 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
880 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA,
881 };
882
883 static __u64 smstateen_regs[] = {
884 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0),
885 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN,
886 };
887
888 static __u64 fp_f_regs[] = {
889 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
890 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
891 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
892 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
893 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
894 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
895 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
896 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
897 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
898 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
899 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
900 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
901 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
902 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
903 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
904 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
905 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
906 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
907 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
908 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
909 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
910 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
911 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
912 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
913 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
914 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
915 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
916 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
917 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
918 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
919 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
920 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
921 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
922 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F,
923 };
924
925 static __u64 fp_d_regs[] = {
926 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
927 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
928 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
929 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
930 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
931 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
932 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
933 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
934 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
935 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
936 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
937 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
938 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
939 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
940 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
941 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
942 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
943 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
944 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
945 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
946 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
947 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
948 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
949 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
950 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
951 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
952 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
953 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
954 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
955 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
956 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
957 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
958 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
959 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D,
960 };
961
962 /* Define a default vector registers with length. This will be overwritten at runtime */
963 static __u64 vector_regs[] = {
964 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_CSR_REG(vstart),
965 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_CSR_REG(vl),
966 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_CSR_REG(vtype),
967 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_CSR_REG(vcsr),
968 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_CSR_REG(vlenb),
969 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(0),
970 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(1),
971 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(2),
972 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(3),
973 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(4),
974 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(5),
975 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(6),
976 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(7),
977 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(8),
978 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(9),
979 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(10),
980 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(11),
981 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(12),
982 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(13),
983 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(14),
984 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(15),
985 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(16),
986 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(17),
987 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(18),
988 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(19),
989 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(20),
990 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(21),
991 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(22),
992 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(23),
993 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(24),
994 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(25),
995 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(26),
996 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(27),
997 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(28),
998 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(29),
999 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(30),
1000 KVM_REG_RISCV | KVM_REG_SIZE_U128 | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(31),
1001 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V,
1002 };
1003
1004 #define SUBLIST_BASE \
1005 {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
1006 .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),}
1007 #define SUBLIST_SBI_BASE \
1008 {"sbi-base", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_V01, \
1009 .regs = sbi_base_regs, .regs_n = ARRAY_SIZE(sbi_base_regs),}
1010 #define SUBLIST_SBI_STA \
1011 {"sbi-sta", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_STA, \
1012 .regs = sbi_sta_regs, .regs_n = ARRAY_SIZE(sbi_sta_regs),}
1013 #define SUBLIST_ZICBOM \
1014 {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),}
1015 #define SUBLIST_ZICBOZ \
1016 {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),}
1017 #define SUBLIST_AIA \
1018 {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),}
1019 #define SUBLIST_SMSTATEEN \
1020 {"smstateen", .feature = KVM_RISCV_ISA_EXT_SMSTATEEN, .regs = smstateen_regs, .regs_n = ARRAY_SIZE(smstateen_regs),}
1021 #define SUBLIST_FP_F \
1022 {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
1023 .regs_n = ARRAY_SIZE(fp_f_regs),}
1024 #define SUBLIST_FP_D \
1025 {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
1026 .regs_n = ARRAY_SIZE(fp_d_regs),}
1027
1028 #define SUBLIST_V \
1029 {"v", .feature = KVM_RISCV_ISA_EXT_V, .regs = vector_regs, .regs_n = ARRAY_SIZE(vector_regs),}
1030
1031 #define KVM_ISA_EXT_SIMPLE_CONFIG(ext, extu) \
1032 static __u64 regs_##ext[] = { \
1033 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \
1034 KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | \
1035 KVM_RISCV_ISA_EXT_##extu, \
1036 }; \
1037 static struct vcpu_reg_list config_##ext = { \
1038 .sublists = { \
1039 SUBLIST_BASE, \
1040 { \
1041 .name = #ext, \
1042 .feature = KVM_RISCV_ISA_EXT_##extu, \
1043 .regs = regs_##ext, \
1044 .regs_n = ARRAY_SIZE(regs_##ext), \
1045 }, \
1046 {0}, \
1047 }, \
1048 } \
1049
1050 #define KVM_SBI_EXT_SIMPLE_CONFIG(ext, extu) \
1051 static __u64 regs_sbi_##ext[] = { \
1052 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \
1053 KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | \
1054 KVM_RISCV_SBI_EXT_##extu, \
1055 }; \
1056 static struct vcpu_reg_list config_sbi_##ext = { \
1057 .sublists = { \
1058 SUBLIST_BASE, \
1059 { \
1060 .name = "sbi-"#ext, \
1061 .feature_type = VCPU_FEATURE_SBI_EXT, \
1062 .feature = KVM_RISCV_SBI_EXT_##extu, \
1063 .regs = regs_sbi_##ext, \
1064 .regs_n = ARRAY_SIZE(regs_sbi_##ext), \
1065 }, \
1066 {0}, \
1067 }, \
1068 } \
1069
1070 #define KVM_ISA_EXT_SUBLIST_CONFIG(ext, extu) \
1071 static struct vcpu_reg_list config_##ext = { \
1072 .sublists = { \
1073 SUBLIST_BASE, \
1074 SUBLIST_##extu, \
1075 {0}, \
1076 }, \
1077 } \
1078
1079 #define KVM_SBI_EXT_SUBLIST_CONFIG(ext, extu) \
1080 static struct vcpu_reg_list config_sbi_##ext = { \
1081 .sublists = { \
1082 SUBLIST_BASE, \
1083 SUBLIST_SBI_##extu, \
1084 {0}, \
1085 }, \
1086 } \
1087
1088 /* Note: The below list is alphabetically sorted. */
1089
1090 KVM_SBI_EXT_SUBLIST_CONFIG(base, BASE);
1091 KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA);
1092 KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU);
1093 KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN);
1094 KVM_SBI_EXT_SIMPLE_CONFIG(susp, SUSP);
1095
1096 KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA);
1097 KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F);
1098 KVM_ISA_EXT_SUBLIST_CONFIG(fp_d, FP_D);
1099 KVM_ISA_EXT_SUBLIST_CONFIG(v, V);
1100 KVM_ISA_EXT_SIMPLE_CONFIG(h, H);
1101 KVM_ISA_EXT_SIMPLE_CONFIG(smnpm, SMNPM);
1102 KVM_ISA_EXT_SUBLIST_CONFIG(smstateen, SMSTATEEN);
1103 KVM_ISA_EXT_SIMPLE_CONFIG(sscofpmf, SSCOFPMF);
1104 KVM_ISA_EXT_SIMPLE_CONFIG(ssnpm, SSNPM);
1105 KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC);
1106 KVM_ISA_EXT_SIMPLE_CONFIG(svade, SVADE);
1107 KVM_ISA_EXT_SIMPLE_CONFIG(svadu, SVADU);
1108 KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL);
1109 KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT);
1110 KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT);
1111 KVM_ISA_EXT_SIMPLE_CONFIG(svvptc, SVVPTC);
1112 KVM_ISA_EXT_SIMPLE_CONFIG(zaamo, ZAAMO);
1113 KVM_ISA_EXT_SIMPLE_CONFIG(zabha, ZABHA);
1114 KVM_ISA_EXT_SIMPLE_CONFIG(zacas, ZACAS);
1115 KVM_ISA_EXT_SIMPLE_CONFIG(zalrsc, ZALRSC);
1116 KVM_ISA_EXT_SIMPLE_CONFIG(zawrs, ZAWRS);
1117 KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
1118 KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB);
1119 KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC);
1120 KVM_ISA_EXT_SIMPLE_CONFIG(zbkb, ZBKB);
1121 KVM_ISA_EXT_SIMPLE_CONFIG(zbkc, ZBKC);
1122 KVM_ISA_EXT_SIMPLE_CONFIG(zbkx, ZBKX);
1123 KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS);
1124 KVM_ISA_EXT_SIMPLE_CONFIG(zca, ZCA);
1125 KVM_ISA_EXT_SIMPLE_CONFIG(zcb, ZCB);
1126 KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD);
1127 KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF);
1128 KVM_ISA_EXT_SIMPLE_CONFIG(zcmop, ZCMOP);
1129 KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA);
1130 KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH);
1131 KVM_ISA_EXT_SIMPLE_CONFIG(zfhmin, ZFHMIN);
1132 KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM);
1133 KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ);
1134 KVM_ISA_EXT_SIMPLE_CONFIG(ziccrse, ZICCRSE);
1135 KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR);
1136 KVM_ISA_EXT_SIMPLE_CONFIG(zicond, ZICOND);
1137 KVM_ISA_EXT_SIMPLE_CONFIG(zicsr, ZICSR);
1138 KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI);
1139 KVM_ISA_EXT_SIMPLE_CONFIG(zihintntl, ZIHINTNTL);
1140 KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE);
1141 KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM);
1142 KVM_ISA_EXT_SIMPLE_CONFIG(zimop, ZIMOP);
1143 KVM_ISA_EXT_SIMPLE_CONFIG(zknd, ZKND);
1144 KVM_ISA_EXT_SIMPLE_CONFIG(zkne, ZKNE);
1145 KVM_ISA_EXT_SIMPLE_CONFIG(zknh, ZKNH);
1146 KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR);
1147 KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED);
1148 KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH);
1149 KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT);
1150 KVM_ISA_EXT_SIMPLE_CONFIG(ztso, ZTSO);
1151 KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB);
1152 KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC);
1153 KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH);
1154 KVM_ISA_EXT_SIMPLE_CONFIG(zvfhmin, ZVFHMIN);
1155 KVM_ISA_EXT_SIMPLE_CONFIG(zvkb, ZVKB);
1156 KVM_ISA_EXT_SIMPLE_CONFIG(zvkg, ZVKG);
1157 KVM_ISA_EXT_SIMPLE_CONFIG(zvkned, ZVKNED);
1158 KVM_ISA_EXT_SIMPLE_CONFIG(zvknha, ZVKNHA);
1159 KVM_ISA_EXT_SIMPLE_CONFIG(zvknhb, ZVKNHB);
1160 KVM_ISA_EXT_SIMPLE_CONFIG(zvksed, ZVKSED);
1161 KVM_ISA_EXT_SIMPLE_CONFIG(zvksh, ZVKSH);
1162 KVM_ISA_EXT_SIMPLE_CONFIG(zvkt, ZVKT);
1163
1164 struct vcpu_reg_list *vcpu_configs[] = {
1165 &config_sbi_base,
1166 &config_sbi_sta,
1167 &config_sbi_pmu,
1168 &config_sbi_dbcn,
1169 &config_sbi_susp,
1170 &config_aia,
1171 &config_fp_f,
1172 &config_fp_d,
1173 &config_h,
1174 &config_v,
1175 &config_smnpm,
1176 &config_smstateen,
1177 &config_sscofpmf,
1178 &config_ssnpm,
1179 &config_sstc,
1180 &config_svade,
1181 &config_svadu,
1182 &config_svinval,
1183 &config_svnapot,
1184 &config_svpbmt,
1185 &config_svvptc,
1186 &config_zaamo,
1187 &config_zabha,
1188 &config_zacas,
1189 &config_zalrsc,
1190 &config_zawrs,
1191 &config_zba,
1192 &config_zbb,
1193 &config_zbc,
1194 &config_zbkb,
1195 &config_zbkc,
1196 &config_zbkx,
1197 &config_zbs,
1198 &config_zca,
1199 &config_zcb,
1200 &config_zcd,
1201 &config_zcf,
1202 &config_zcmop,
1203 &config_zfa,
1204 &config_zfh,
1205 &config_zfhmin,
1206 &config_zicbom,
1207 &config_zicboz,
1208 &config_ziccrse,
1209 &config_zicntr,
1210 &config_zicond,
1211 &config_zicsr,
1212 &config_zifencei,
1213 &config_zihintntl,
1214 &config_zihintpause,
1215 &config_zihpm,
1216 &config_zimop,
1217 &config_zknd,
1218 &config_zkne,
1219 &config_zknh,
1220 &config_zkr,
1221 &config_zksed,
1222 &config_zksh,
1223 &config_zkt,
1224 &config_ztso,
1225 &config_zvbb,
1226 &config_zvbc,
1227 &config_zvfh,
1228 &config_zvfhmin,
1229 &config_zvkb,
1230 &config_zvkg,
1231 &config_zvkned,
1232 &config_zvknha,
1233 &config_zvknhb,
1234 &config_zvksed,
1235 &config_zvksh,
1236 &config_zvkt,
1237 };
1238 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
1239