1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Check for KVM_GET_REG_LIST regressions. 4 * 5 * Copyright (c) 2023 Intel Corporation 6 * 7 */ 8 #include <stdio.h> 9 #include "kvm_util.h" 10 #include "test_util.h" 11 #include "processor.h" 12 13 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) 14 15 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX]; 16 17 bool filter_reg(__u64 reg) 18 { 19 switch (reg & ~REG_MASK) { 20 /* 21 * Same set of ISA_EXT registers are not present on all host because 22 * ISA_EXT registers are visible to the KVM user space based on the 23 * ISA extensions available on the host. Also, disabling an ISA 24 * extension using corresponding ISA_EXT register does not affect 25 * the visibility of the ISA_EXT register itself. 26 * 27 * Based on above, we should filter-out all ISA_EXT registers. 28 * 29 * Note: The below list is alphabetically sorted. 30 */ 31 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A: 32 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C: 33 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D: 34 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F: 35 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H: 36 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I: 37 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M: 38 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V: 39 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN: 40 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA: 41 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC: 42 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL: 43 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT: 44 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT: 45 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA: 46 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB: 47 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS: 48 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM: 49 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ: 50 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR: 51 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND: 52 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR: 53 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI: 54 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: 55 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM: 56 return true; 57 /* AIA registers are always available when Ssaia can't be disabled */ 58 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect): 59 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1): 60 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2): 61 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh): 62 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph): 63 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 64 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 65 return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA]; 66 default: 67 break; 68 } 69 70 return false; 71 } 72 73 bool check_reject_set(int err) 74 { 75 return err == EINVAL; 76 } 77 78 static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext) 79 { 80 int ret; 81 unsigned long value; 82 83 ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value); 84 return (ret) ? false : !!value; 85 } 86 87 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) 88 { 89 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; 90 struct vcpu_reg_sublist *s; 91 int rc; 92 93 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) 94 __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]); 95 96 /* 97 * Disable all extensions which were enabled by default 98 * if they were available in the risc-v host. 99 */ 100 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { 101 rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); 102 if (rc && isa_ext_state[i]) 103 isa_ext_cant_disable[i] = true; 104 } 105 106 for_each_sublist(c, s) { 107 if (!s->feature) 108 continue; 109 110 /* Try to enable the desired extension */ 111 __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1); 112 113 /* Double check whether the desired extension was enabled */ 114 __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature), 115 "%s not available, skipping tests\n", s->name); 116 } 117 } 118 119 static const char *config_id_to_str(const char *prefix, __u64 id) 120 { 121 /* reg_off is the offset into struct kvm_riscv_config */ 122 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG); 123 124 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG); 125 126 switch (reg_off) { 127 case KVM_REG_RISCV_CONFIG_REG(isa): 128 return "KVM_REG_RISCV_CONFIG_REG(isa)"; 129 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): 130 return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)"; 131 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): 132 return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)"; 133 case KVM_REG_RISCV_CONFIG_REG(mvendorid): 134 return "KVM_REG_RISCV_CONFIG_REG(mvendorid)"; 135 case KVM_REG_RISCV_CONFIG_REG(marchid): 136 return "KVM_REG_RISCV_CONFIG_REG(marchid)"; 137 case KVM_REG_RISCV_CONFIG_REG(mimpid): 138 return "KVM_REG_RISCV_CONFIG_REG(mimpid)"; 139 case KVM_REG_RISCV_CONFIG_REG(satp_mode): 140 return "KVM_REG_RISCV_CONFIG_REG(satp_mode)"; 141 } 142 143 return strdup_printf("%lld /* UNKNOWN */", reg_off); 144 } 145 146 static const char *core_id_to_str(const char *prefix, __u64 id) 147 { 148 /* reg_off is the offset into struct kvm_riscv_core */ 149 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE); 150 151 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE); 152 153 switch (reg_off) { 154 case KVM_REG_RISCV_CORE_REG(regs.pc): 155 return "KVM_REG_RISCV_CORE_REG(regs.pc)"; 156 case KVM_REG_RISCV_CORE_REG(regs.ra): 157 return "KVM_REG_RISCV_CORE_REG(regs.ra)"; 158 case KVM_REG_RISCV_CORE_REG(regs.sp): 159 return "KVM_REG_RISCV_CORE_REG(regs.sp)"; 160 case KVM_REG_RISCV_CORE_REG(regs.gp): 161 return "KVM_REG_RISCV_CORE_REG(regs.gp)"; 162 case KVM_REG_RISCV_CORE_REG(regs.tp): 163 return "KVM_REG_RISCV_CORE_REG(regs.tp)"; 164 case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2): 165 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 166 reg_off - KVM_REG_RISCV_CORE_REG(regs.t0)); 167 case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1): 168 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 169 reg_off - KVM_REG_RISCV_CORE_REG(regs.s0)); 170 case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7): 171 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)", 172 reg_off - KVM_REG_RISCV_CORE_REG(regs.a0)); 173 case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11): 174 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 175 reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2); 176 case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6): 177 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 178 reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3); 179 case KVM_REG_RISCV_CORE_REG(mode): 180 return "KVM_REG_RISCV_CORE_REG(mode)"; 181 } 182 183 return strdup_printf("%lld /* UNKNOWN */", reg_off); 184 } 185 186 #define RISCV_CSR_GENERAL(csr) \ 187 "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")" 188 #define RISCV_CSR_AIA(csr) \ 189 "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")" 190 #define RISCV_CSR_SMSTATEEN(csr) \ 191 "KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_REG(" #csr ")" 192 193 static const char *general_csr_id_to_str(__u64 reg_off) 194 { 195 /* reg_off is the offset into struct kvm_riscv_csr */ 196 switch (reg_off) { 197 case KVM_REG_RISCV_CSR_REG(sstatus): 198 return RISCV_CSR_GENERAL(sstatus); 199 case KVM_REG_RISCV_CSR_REG(sie): 200 return RISCV_CSR_GENERAL(sie); 201 case KVM_REG_RISCV_CSR_REG(stvec): 202 return RISCV_CSR_GENERAL(stvec); 203 case KVM_REG_RISCV_CSR_REG(sscratch): 204 return RISCV_CSR_GENERAL(sscratch); 205 case KVM_REG_RISCV_CSR_REG(sepc): 206 return RISCV_CSR_GENERAL(sepc); 207 case KVM_REG_RISCV_CSR_REG(scause): 208 return RISCV_CSR_GENERAL(scause); 209 case KVM_REG_RISCV_CSR_REG(stval): 210 return RISCV_CSR_GENERAL(stval); 211 case KVM_REG_RISCV_CSR_REG(sip): 212 return RISCV_CSR_GENERAL(sip); 213 case KVM_REG_RISCV_CSR_REG(satp): 214 return RISCV_CSR_GENERAL(satp); 215 case KVM_REG_RISCV_CSR_REG(scounteren): 216 return RISCV_CSR_GENERAL(scounteren); 217 case KVM_REG_RISCV_CSR_REG(senvcfg): 218 return RISCV_CSR_GENERAL(senvcfg); 219 } 220 221 return strdup_printf("KVM_REG_RISCV_CSR_GENERAL | %lld /* UNKNOWN */", reg_off); 222 } 223 224 static const char *aia_csr_id_to_str(__u64 reg_off) 225 { 226 /* reg_off is the offset into struct kvm_riscv_aia_csr */ 227 switch (reg_off) { 228 case KVM_REG_RISCV_CSR_AIA_REG(siselect): 229 return RISCV_CSR_AIA(siselect); 230 case KVM_REG_RISCV_CSR_AIA_REG(iprio1): 231 return RISCV_CSR_AIA(iprio1); 232 case KVM_REG_RISCV_CSR_AIA_REG(iprio2): 233 return RISCV_CSR_AIA(iprio2); 234 case KVM_REG_RISCV_CSR_AIA_REG(sieh): 235 return RISCV_CSR_AIA(sieh); 236 case KVM_REG_RISCV_CSR_AIA_REG(siph): 237 return RISCV_CSR_AIA(siph); 238 case KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 239 return RISCV_CSR_AIA(iprio1h); 240 case KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 241 return RISCV_CSR_AIA(iprio2h); 242 } 243 244 return strdup_printf("KVM_REG_RISCV_CSR_AIA | %lld /* UNKNOWN */", reg_off); 245 } 246 247 static const char *smstateen_csr_id_to_str(__u64 reg_off) 248 { 249 /* reg_off is the offset into struct kvm_riscv_smstateen_csr */ 250 switch (reg_off) { 251 case KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0): 252 return RISCV_CSR_SMSTATEEN(sstateen0); 253 } 254 255 TEST_FAIL("Unknown smstateen csr reg: 0x%llx", reg_off); 256 return NULL; 257 } 258 259 static const char *csr_id_to_str(const char *prefix, __u64 id) 260 { 261 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR); 262 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 263 264 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR); 265 266 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 267 268 switch (reg_subtype) { 269 case KVM_REG_RISCV_CSR_GENERAL: 270 return general_csr_id_to_str(reg_off); 271 case KVM_REG_RISCV_CSR_AIA: 272 return aia_csr_id_to_str(reg_off); 273 case KVM_REG_RISCV_CSR_SMSTATEEN: 274 return smstateen_csr_id_to_str(reg_off); 275 } 276 277 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 278 } 279 280 static const char *timer_id_to_str(const char *prefix, __u64 id) 281 { 282 /* reg_off is the offset into struct kvm_riscv_timer */ 283 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER); 284 285 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER); 286 287 switch (reg_off) { 288 case KVM_REG_RISCV_TIMER_REG(frequency): 289 return "KVM_REG_RISCV_TIMER_REG(frequency)"; 290 case KVM_REG_RISCV_TIMER_REG(time): 291 return "KVM_REG_RISCV_TIMER_REG(time)"; 292 case KVM_REG_RISCV_TIMER_REG(compare): 293 return "KVM_REG_RISCV_TIMER_REG(compare)"; 294 case KVM_REG_RISCV_TIMER_REG(state): 295 return "KVM_REG_RISCV_TIMER_REG(state)"; 296 } 297 298 return strdup_printf("%lld /* UNKNOWN */", reg_off); 299 } 300 301 static const char *fp_f_id_to_str(const char *prefix, __u64 id) 302 { 303 /* reg_off is the offset into struct __riscv_f_ext_state */ 304 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F); 305 306 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F); 307 308 switch (reg_off) { 309 case KVM_REG_RISCV_FP_F_REG(f[0]) ... 310 KVM_REG_RISCV_FP_F_REG(f[31]): 311 return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off); 312 case KVM_REG_RISCV_FP_F_REG(fcsr): 313 return "KVM_REG_RISCV_FP_F_REG(fcsr)"; 314 } 315 316 return strdup_printf("%lld /* UNKNOWN */", reg_off); 317 } 318 319 static const char *fp_d_id_to_str(const char *prefix, __u64 id) 320 { 321 /* reg_off is the offset into struct __riscv_d_ext_state */ 322 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D); 323 324 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D); 325 326 switch (reg_off) { 327 case KVM_REG_RISCV_FP_D_REG(f[0]) ... 328 KVM_REG_RISCV_FP_D_REG(f[31]): 329 return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off); 330 case KVM_REG_RISCV_FP_D_REG(fcsr): 331 return "KVM_REG_RISCV_FP_D_REG(fcsr)"; 332 } 333 334 return strdup_printf("%lld /* UNKNOWN */", reg_off); 335 } 336 337 #define KVM_ISA_EXT_ARR(ext) \ 338 [KVM_RISCV_ISA_EXT_##ext] = "KVM_RISCV_ISA_EXT_" #ext 339 340 static const char *isa_ext_id_to_str(const char *prefix, __u64 id) 341 { 342 /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */ 343 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT); 344 345 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT); 346 347 static const char * const kvm_isa_ext_reg_name[] = { 348 KVM_ISA_EXT_ARR(A), 349 KVM_ISA_EXT_ARR(C), 350 KVM_ISA_EXT_ARR(D), 351 KVM_ISA_EXT_ARR(F), 352 KVM_ISA_EXT_ARR(H), 353 KVM_ISA_EXT_ARR(I), 354 KVM_ISA_EXT_ARR(M), 355 KVM_ISA_EXT_ARR(V), 356 KVM_ISA_EXT_ARR(SMSTATEEN), 357 KVM_ISA_EXT_ARR(SSAIA), 358 KVM_ISA_EXT_ARR(SSTC), 359 KVM_ISA_EXT_ARR(SVINVAL), 360 KVM_ISA_EXT_ARR(SVNAPOT), 361 KVM_ISA_EXT_ARR(SVPBMT), 362 KVM_ISA_EXT_ARR(ZBA), 363 KVM_ISA_EXT_ARR(ZBB), 364 KVM_ISA_EXT_ARR(ZBS), 365 KVM_ISA_EXT_ARR(ZICBOM), 366 KVM_ISA_EXT_ARR(ZICBOZ), 367 KVM_ISA_EXT_ARR(ZICNTR), 368 KVM_ISA_EXT_ARR(ZICOND), 369 KVM_ISA_EXT_ARR(ZICSR), 370 KVM_ISA_EXT_ARR(ZIFENCEI), 371 KVM_ISA_EXT_ARR(ZIHINTPAUSE), 372 KVM_ISA_EXT_ARR(ZIHPM), 373 }; 374 375 if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) 376 return strdup_printf("%lld /* UNKNOWN */", reg_off); 377 378 return kvm_isa_ext_reg_name[reg_off]; 379 } 380 381 #define KVM_SBI_EXT_ARR(ext) \ 382 [ext] = "KVM_REG_RISCV_SBI_SINGLE | " #ext 383 384 static const char *sbi_ext_single_id_to_str(__u64 reg_off) 385 { 386 /* reg_off is KVM_RISCV_SBI_EXT_ID */ 387 static const char * const kvm_sbi_ext_reg_name[] = { 388 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_V01), 389 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_TIME), 390 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_IPI), 391 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_RFENCE), 392 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST), 393 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM), 394 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU), 395 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL), 396 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR), 397 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN), 398 }; 399 400 if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) 401 return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off); 402 403 return kvm_sbi_ext_reg_name[reg_off]; 404 } 405 406 static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off) 407 { 408 const char *unknown = ""; 409 410 if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) 411 unknown = " /* UNKNOWN */"; 412 413 switch (reg_subtype) { 414 case KVM_REG_RISCV_SBI_MULTI_EN: 415 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld%s", reg_off, unknown); 416 case KVM_REG_RISCV_SBI_MULTI_DIS: 417 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld%s", reg_off, unknown); 418 } 419 420 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 421 } 422 423 static const char *sbi_ext_id_to_str(const char *prefix, __u64 id) 424 { 425 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT); 426 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 427 428 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_EXT); 429 430 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 431 432 switch (reg_subtype) { 433 case KVM_REG_RISCV_SBI_SINGLE: 434 return sbi_ext_single_id_to_str(reg_off); 435 case KVM_REG_RISCV_SBI_MULTI_EN: 436 case KVM_REG_RISCV_SBI_MULTI_DIS: 437 return sbi_ext_multi_id_to_str(reg_subtype, reg_off); 438 } 439 440 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 441 } 442 443 void print_reg(const char *prefix, __u64 id) 444 { 445 const char *reg_size = NULL; 446 447 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV, 448 "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id); 449 450 switch (id & KVM_REG_SIZE_MASK) { 451 case KVM_REG_SIZE_U32: 452 reg_size = "KVM_REG_SIZE_U32"; 453 break; 454 case KVM_REG_SIZE_U64: 455 reg_size = "KVM_REG_SIZE_U64"; 456 break; 457 case KVM_REG_SIZE_U128: 458 reg_size = "KVM_REG_SIZE_U128"; 459 break; 460 default: 461 printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,", 462 (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & REG_MASK); 463 } 464 465 switch (id & KVM_REG_RISCV_TYPE_MASK) { 466 case KVM_REG_RISCV_CONFIG: 467 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n", 468 reg_size, config_id_to_str(prefix, id)); 469 break; 470 case KVM_REG_RISCV_CORE: 471 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n", 472 reg_size, core_id_to_str(prefix, id)); 473 break; 474 case KVM_REG_RISCV_CSR: 475 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n", 476 reg_size, csr_id_to_str(prefix, id)); 477 break; 478 case KVM_REG_RISCV_TIMER: 479 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n", 480 reg_size, timer_id_to_str(prefix, id)); 481 break; 482 case KVM_REG_RISCV_FP_F: 483 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n", 484 reg_size, fp_f_id_to_str(prefix, id)); 485 break; 486 case KVM_REG_RISCV_FP_D: 487 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n", 488 reg_size, fp_d_id_to_str(prefix, id)); 489 break; 490 case KVM_REG_RISCV_ISA_EXT: 491 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n", 492 reg_size, isa_ext_id_to_str(prefix, id)); 493 break; 494 case KVM_REG_RISCV_SBI_EXT: 495 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n", 496 reg_size, sbi_ext_id_to_str(prefix, id)); 497 break; 498 default: 499 printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,", 500 reg_size, id & REG_MASK); 501 } 502 } 503 504 /* 505 * The current blessed list was primed with the output of kernel version 506 * v6.5-rc3 and then later updated with new registers. 507 */ 508 static __u64 base_regs[] = { 509 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa), 510 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid), 511 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid), 512 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid), 513 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode), 514 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc), 515 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra), 516 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp), 517 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp), 518 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp), 519 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0), 520 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1), 521 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2), 522 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0), 523 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1), 524 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0), 525 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1), 526 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2), 527 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3), 528 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4), 529 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5), 530 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6), 531 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7), 532 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2), 533 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3), 534 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4), 535 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5), 536 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6), 537 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7), 538 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8), 539 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9), 540 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10), 541 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11), 542 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3), 543 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4), 544 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5), 545 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6), 546 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode), 547 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus), 548 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie), 549 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec), 550 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch), 551 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc), 552 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause), 553 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval), 554 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip), 555 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp), 556 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren), 557 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(senvcfg), 558 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency), 559 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), 560 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), 561 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 562 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, 563 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, 564 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, 565 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE, 566 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST, 567 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM, 568 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU, 569 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL, 570 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR, 571 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN, 572 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0, 573 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0, 574 }; 575 576 /* 577 * The skips_set list registers that should skip set test. 578 * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly. 579 */ 580 static __u64 base_skips_set[] = { 581 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 582 }; 583 584 static __u64 h_regs[] = { 585 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H, 586 }; 587 588 static __u64 zicbom_regs[] = { 589 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), 590 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM, 591 }; 592 593 static __u64 zicboz_regs[] = { 594 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), 595 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ, 596 }; 597 598 static __u64 svpbmt_regs[] = { 599 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT, 600 }; 601 602 static __u64 sstc_regs[] = { 603 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC, 604 }; 605 606 static __u64 svinval_regs[] = { 607 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL, 608 }; 609 610 static __u64 zihintpause_regs[] = { 611 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE, 612 }; 613 614 static __u64 zba_regs[] = { 615 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA, 616 }; 617 618 static __u64 zbb_regs[] = { 619 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB, 620 }; 621 622 static __u64 zbs_regs[] = { 623 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS, 624 }; 625 626 static __u64 zicntr_regs[] = { 627 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR, 628 }; 629 630 static __u64 zicond_regs[] = { 631 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND, 632 }; 633 634 static __u64 zicsr_regs[] = { 635 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR, 636 }; 637 638 static __u64 zifencei_regs[] = { 639 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI, 640 }; 641 642 static __u64 zihpm_regs[] = { 643 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM, 644 }; 645 646 static __u64 aia_regs[] = { 647 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect), 648 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1), 649 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2), 650 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh), 651 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph), 652 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h), 653 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h), 654 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA, 655 }; 656 657 static __u64 smstateen_regs[] = { 658 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0), 659 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN, 660 }; 661 662 static __u64 fp_f_regs[] = { 663 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]), 664 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]), 665 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]), 666 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]), 667 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]), 668 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]), 669 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]), 670 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]), 671 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]), 672 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]), 673 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]), 674 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]), 675 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]), 676 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]), 677 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]), 678 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]), 679 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]), 680 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]), 681 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]), 682 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]), 683 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]), 684 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]), 685 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]), 686 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]), 687 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]), 688 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]), 689 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]), 690 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]), 691 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]), 692 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]), 693 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]), 694 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]), 695 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr), 696 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F, 697 }; 698 699 static __u64 fp_d_regs[] = { 700 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]), 701 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]), 702 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]), 703 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]), 704 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]), 705 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]), 706 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]), 707 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]), 708 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]), 709 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]), 710 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]), 711 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]), 712 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]), 713 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]), 714 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]), 715 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]), 716 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]), 717 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]), 718 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]), 719 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]), 720 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]), 721 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]), 722 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]), 723 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]), 724 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]), 725 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]), 726 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]), 727 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]), 728 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]), 729 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]), 730 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]), 731 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]), 732 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr), 733 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D, 734 }; 735 736 #define BASE_SUBLIST \ 737 {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \ 738 .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),} 739 #define H_REGS_SUBLIST \ 740 {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),} 741 #define ZICBOM_REGS_SUBLIST \ 742 {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),} 743 #define ZICBOZ_REGS_SUBLIST \ 744 {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),} 745 #define SVPBMT_REGS_SUBLIST \ 746 {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),} 747 #define SSTC_REGS_SUBLIST \ 748 {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),} 749 #define SVINVAL_REGS_SUBLIST \ 750 {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),} 751 #define ZIHINTPAUSE_REGS_SUBLIST \ 752 {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),} 753 #define ZBA_REGS_SUBLIST \ 754 {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),} 755 #define ZBB_REGS_SUBLIST \ 756 {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),} 757 #define ZBS_REGS_SUBLIST \ 758 {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),} 759 #define ZICNTR_REGS_SUBLIST \ 760 {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),} 761 #define ZICOND_REGS_SUBLIST \ 762 {"zicond", .feature = KVM_RISCV_ISA_EXT_ZICOND, .regs = zicond_regs, .regs_n = ARRAY_SIZE(zicond_regs),} 763 #define ZICSR_REGS_SUBLIST \ 764 {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),} 765 #define ZIFENCEI_REGS_SUBLIST \ 766 {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),} 767 #define ZIHPM_REGS_SUBLIST \ 768 {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),} 769 #define AIA_REGS_SUBLIST \ 770 {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),} 771 #define SMSTATEEN_REGS_SUBLIST \ 772 {"smstateen", .feature = KVM_RISCV_ISA_EXT_SMSTATEEN, .regs = smstateen_regs, .regs_n = ARRAY_SIZE(smstateen_regs),} 773 #define FP_F_REGS_SUBLIST \ 774 {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \ 775 .regs_n = ARRAY_SIZE(fp_f_regs),} 776 #define FP_D_REGS_SUBLIST \ 777 {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \ 778 .regs_n = ARRAY_SIZE(fp_d_regs),} 779 780 static struct vcpu_reg_list h_config = { 781 .sublists = { 782 BASE_SUBLIST, 783 H_REGS_SUBLIST, 784 {0}, 785 }, 786 }; 787 788 static struct vcpu_reg_list zicbom_config = { 789 .sublists = { 790 BASE_SUBLIST, 791 ZICBOM_REGS_SUBLIST, 792 {0}, 793 }, 794 }; 795 796 static struct vcpu_reg_list zicboz_config = { 797 .sublists = { 798 BASE_SUBLIST, 799 ZICBOZ_REGS_SUBLIST, 800 {0}, 801 }, 802 }; 803 804 static struct vcpu_reg_list svpbmt_config = { 805 .sublists = { 806 BASE_SUBLIST, 807 SVPBMT_REGS_SUBLIST, 808 {0}, 809 }, 810 }; 811 812 static struct vcpu_reg_list sstc_config = { 813 .sublists = { 814 BASE_SUBLIST, 815 SSTC_REGS_SUBLIST, 816 {0}, 817 }, 818 }; 819 820 static struct vcpu_reg_list svinval_config = { 821 .sublists = { 822 BASE_SUBLIST, 823 SVINVAL_REGS_SUBLIST, 824 {0}, 825 }, 826 }; 827 828 static struct vcpu_reg_list zihintpause_config = { 829 .sublists = { 830 BASE_SUBLIST, 831 ZIHINTPAUSE_REGS_SUBLIST, 832 {0}, 833 }, 834 }; 835 836 static struct vcpu_reg_list zba_config = { 837 .sublists = { 838 BASE_SUBLIST, 839 ZBA_REGS_SUBLIST, 840 {0}, 841 }, 842 }; 843 844 static struct vcpu_reg_list zbb_config = { 845 .sublists = { 846 BASE_SUBLIST, 847 ZBB_REGS_SUBLIST, 848 {0}, 849 }, 850 }; 851 852 static struct vcpu_reg_list zbs_config = { 853 .sublists = { 854 BASE_SUBLIST, 855 ZBS_REGS_SUBLIST, 856 {0}, 857 }, 858 }; 859 860 static struct vcpu_reg_list zicntr_config = { 861 .sublists = { 862 BASE_SUBLIST, 863 ZICNTR_REGS_SUBLIST, 864 {0}, 865 }, 866 }; 867 868 static struct vcpu_reg_list zicond_config = { 869 .sublists = { 870 BASE_SUBLIST, 871 ZICOND_REGS_SUBLIST, 872 {0}, 873 }, 874 }; 875 876 static struct vcpu_reg_list zicsr_config = { 877 .sublists = { 878 BASE_SUBLIST, 879 ZICSR_REGS_SUBLIST, 880 {0}, 881 }, 882 }; 883 884 static struct vcpu_reg_list zifencei_config = { 885 .sublists = { 886 BASE_SUBLIST, 887 ZIFENCEI_REGS_SUBLIST, 888 {0}, 889 }, 890 }; 891 892 static struct vcpu_reg_list zihpm_config = { 893 .sublists = { 894 BASE_SUBLIST, 895 ZIHPM_REGS_SUBLIST, 896 {0}, 897 }, 898 }; 899 900 static struct vcpu_reg_list aia_config = { 901 .sublists = { 902 BASE_SUBLIST, 903 AIA_REGS_SUBLIST, 904 {0}, 905 }, 906 }; 907 908 static struct vcpu_reg_list smstateen_config = { 909 .sublists = { 910 BASE_SUBLIST, 911 SMSTATEEN_REGS_SUBLIST, 912 {0}, 913 }, 914 }; 915 916 static struct vcpu_reg_list fp_f_config = { 917 .sublists = { 918 BASE_SUBLIST, 919 FP_F_REGS_SUBLIST, 920 {0}, 921 }, 922 }; 923 924 static struct vcpu_reg_list fp_d_config = { 925 .sublists = { 926 BASE_SUBLIST, 927 FP_D_REGS_SUBLIST, 928 {0}, 929 }, 930 }; 931 932 struct vcpu_reg_list *vcpu_configs[] = { 933 &h_config, 934 &zicbom_config, 935 &zicboz_config, 936 &svpbmt_config, 937 &sstc_config, 938 &svinval_config, 939 &zihintpause_config, 940 &zba_config, 941 &zbb_config, 942 &zbs_config, 943 &zicntr_config, 944 &zicond_config, 945 &zicsr_config, 946 &zifencei_config, 947 &zihpm_config, 948 &aia_config, 949 &smstateen_config, 950 &fp_f_config, 951 &fp_d_config, 952 }; 953 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); 954