1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Check for KVM_GET_REG_LIST regressions. 4 * 5 * Copyright (c) 2023 Intel Corporation 6 * 7 */ 8 #include <stdio.h> 9 #include "kvm_util.h" 10 #include "test_util.h" 11 #include "processor.h" 12 13 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) 14 15 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX]; 16 17 bool filter_reg(__u64 reg) 18 { 19 switch (reg & ~REG_MASK) { 20 /* 21 * Same set of ISA_EXT registers are not present on all host because 22 * ISA_EXT registers are visible to the KVM user space based on the 23 * ISA extensions available on the host. Also, disabling an ISA 24 * extension using corresponding ISA_EXT register does not affect 25 * the visibility of the ISA_EXT register itself. 26 * 27 * Based on above, we should filter-out all ISA_EXT registers. 28 * 29 * Note: The below list is alphabetically sorted. 30 */ 31 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A: 32 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C: 33 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D: 34 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F: 35 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H: 36 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I: 37 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M: 38 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V: 39 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN: 40 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA: 41 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC: 42 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL: 43 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT: 44 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT: 45 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA: 46 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB: 47 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS: 48 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM: 49 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ: 50 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR: 51 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND: 52 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR: 53 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI: 54 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: 55 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM: 56 return true; 57 /* AIA registers are always available when Ssaia can't be disabled */ 58 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect): 59 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1): 60 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2): 61 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh): 62 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph): 63 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 64 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 65 return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA]; 66 default: 67 break; 68 } 69 70 return false; 71 } 72 73 bool check_reject_set(int err) 74 { 75 return err == EINVAL; 76 } 77 78 static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext) 79 { 80 int ret; 81 unsigned long value; 82 83 ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value); 84 return (ret) ? false : !!value; 85 } 86 87 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) 88 { 89 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; 90 struct vcpu_reg_sublist *s; 91 int rc; 92 93 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) 94 __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]); 95 96 /* 97 * Disable all extensions which were enabled by default 98 * if they were available in the risc-v host. 99 */ 100 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { 101 rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); 102 if (rc && isa_ext_state[i]) 103 isa_ext_cant_disable[i] = true; 104 } 105 106 for_each_sublist(c, s) { 107 if (!s->feature) 108 continue; 109 110 /* Try to enable the desired extension */ 111 __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1); 112 113 /* Double check whether the desired extension was enabled */ 114 __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature), 115 "%s not available, skipping tests\n", s->name); 116 } 117 } 118 119 static const char *config_id_to_str(const char *prefix, __u64 id) 120 { 121 /* reg_off is the offset into struct kvm_riscv_config */ 122 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG); 123 124 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG); 125 126 switch (reg_off) { 127 case KVM_REG_RISCV_CONFIG_REG(isa): 128 return "KVM_REG_RISCV_CONFIG_REG(isa)"; 129 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): 130 return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)"; 131 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): 132 return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)"; 133 case KVM_REG_RISCV_CONFIG_REG(mvendorid): 134 return "KVM_REG_RISCV_CONFIG_REG(mvendorid)"; 135 case KVM_REG_RISCV_CONFIG_REG(marchid): 136 return "KVM_REG_RISCV_CONFIG_REG(marchid)"; 137 case KVM_REG_RISCV_CONFIG_REG(mimpid): 138 return "KVM_REG_RISCV_CONFIG_REG(mimpid)"; 139 case KVM_REG_RISCV_CONFIG_REG(satp_mode): 140 return "KVM_REG_RISCV_CONFIG_REG(satp_mode)"; 141 } 142 143 return strdup_printf("%lld /* UNKNOWN */", reg_off); 144 } 145 146 static const char *core_id_to_str(const char *prefix, __u64 id) 147 { 148 /* reg_off is the offset into struct kvm_riscv_core */ 149 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE); 150 151 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE); 152 153 switch (reg_off) { 154 case KVM_REG_RISCV_CORE_REG(regs.pc): 155 return "KVM_REG_RISCV_CORE_REG(regs.pc)"; 156 case KVM_REG_RISCV_CORE_REG(regs.ra): 157 return "KVM_REG_RISCV_CORE_REG(regs.ra)"; 158 case KVM_REG_RISCV_CORE_REG(regs.sp): 159 return "KVM_REG_RISCV_CORE_REG(regs.sp)"; 160 case KVM_REG_RISCV_CORE_REG(regs.gp): 161 return "KVM_REG_RISCV_CORE_REG(regs.gp)"; 162 case KVM_REG_RISCV_CORE_REG(regs.tp): 163 return "KVM_REG_RISCV_CORE_REG(regs.tp)"; 164 case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2): 165 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 166 reg_off - KVM_REG_RISCV_CORE_REG(regs.t0)); 167 case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1): 168 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 169 reg_off - KVM_REG_RISCV_CORE_REG(regs.s0)); 170 case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7): 171 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)", 172 reg_off - KVM_REG_RISCV_CORE_REG(regs.a0)); 173 case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11): 174 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 175 reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2); 176 case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6): 177 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 178 reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3); 179 case KVM_REG_RISCV_CORE_REG(mode): 180 return "KVM_REG_RISCV_CORE_REG(mode)"; 181 } 182 183 return strdup_printf("%lld /* UNKNOWN */", reg_off); 184 } 185 186 #define RISCV_CSR_GENERAL(csr) \ 187 "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")" 188 #define RISCV_CSR_AIA(csr) \ 189 "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")" 190 #define RISCV_CSR_SMSTATEEN(csr) \ 191 "KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_REG(" #csr ")" 192 193 static const char *general_csr_id_to_str(__u64 reg_off) 194 { 195 /* reg_off is the offset into struct kvm_riscv_csr */ 196 switch (reg_off) { 197 case KVM_REG_RISCV_CSR_REG(sstatus): 198 return RISCV_CSR_GENERAL(sstatus); 199 case KVM_REG_RISCV_CSR_REG(sie): 200 return RISCV_CSR_GENERAL(sie); 201 case KVM_REG_RISCV_CSR_REG(stvec): 202 return RISCV_CSR_GENERAL(stvec); 203 case KVM_REG_RISCV_CSR_REG(sscratch): 204 return RISCV_CSR_GENERAL(sscratch); 205 case KVM_REG_RISCV_CSR_REG(sepc): 206 return RISCV_CSR_GENERAL(sepc); 207 case KVM_REG_RISCV_CSR_REG(scause): 208 return RISCV_CSR_GENERAL(scause); 209 case KVM_REG_RISCV_CSR_REG(stval): 210 return RISCV_CSR_GENERAL(stval); 211 case KVM_REG_RISCV_CSR_REG(sip): 212 return RISCV_CSR_GENERAL(sip); 213 case KVM_REG_RISCV_CSR_REG(satp): 214 return RISCV_CSR_GENERAL(satp); 215 case KVM_REG_RISCV_CSR_REG(scounteren): 216 return RISCV_CSR_GENERAL(scounteren); 217 case KVM_REG_RISCV_CSR_REG(senvcfg): 218 return RISCV_CSR_GENERAL(senvcfg); 219 } 220 221 return strdup_printf("KVM_REG_RISCV_CSR_GENERAL | %lld /* UNKNOWN */", reg_off); 222 } 223 224 static const char *aia_csr_id_to_str(__u64 reg_off) 225 { 226 /* reg_off is the offset into struct kvm_riscv_aia_csr */ 227 switch (reg_off) { 228 case KVM_REG_RISCV_CSR_AIA_REG(siselect): 229 return RISCV_CSR_AIA(siselect); 230 case KVM_REG_RISCV_CSR_AIA_REG(iprio1): 231 return RISCV_CSR_AIA(iprio1); 232 case KVM_REG_RISCV_CSR_AIA_REG(iprio2): 233 return RISCV_CSR_AIA(iprio2); 234 case KVM_REG_RISCV_CSR_AIA_REG(sieh): 235 return RISCV_CSR_AIA(sieh); 236 case KVM_REG_RISCV_CSR_AIA_REG(siph): 237 return RISCV_CSR_AIA(siph); 238 case KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 239 return RISCV_CSR_AIA(iprio1h); 240 case KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 241 return RISCV_CSR_AIA(iprio2h); 242 } 243 244 return strdup_printf("KVM_REG_RISCV_CSR_AIA | %lld /* UNKNOWN */", reg_off); 245 } 246 247 static const char *smstateen_csr_id_to_str(__u64 reg_off) 248 { 249 /* reg_off is the offset into struct kvm_riscv_smstateen_csr */ 250 switch (reg_off) { 251 case KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0): 252 return RISCV_CSR_SMSTATEEN(sstateen0); 253 } 254 255 TEST_FAIL("Unknown smstateen csr reg: 0x%llx", reg_off); 256 return NULL; 257 } 258 259 static const char *csr_id_to_str(const char *prefix, __u64 id) 260 { 261 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR); 262 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 263 264 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR); 265 266 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 267 268 switch (reg_subtype) { 269 case KVM_REG_RISCV_CSR_GENERAL: 270 return general_csr_id_to_str(reg_off); 271 case KVM_REG_RISCV_CSR_AIA: 272 return aia_csr_id_to_str(reg_off); 273 case KVM_REG_RISCV_CSR_SMSTATEEN: 274 return smstateen_csr_id_to_str(reg_off); 275 } 276 277 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 278 } 279 280 static const char *timer_id_to_str(const char *prefix, __u64 id) 281 { 282 /* reg_off is the offset into struct kvm_riscv_timer */ 283 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER); 284 285 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER); 286 287 switch (reg_off) { 288 case KVM_REG_RISCV_TIMER_REG(frequency): 289 return "KVM_REG_RISCV_TIMER_REG(frequency)"; 290 case KVM_REG_RISCV_TIMER_REG(time): 291 return "KVM_REG_RISCV_TIMER_REG(time)"; 292 case KVM_REG_RISCV_TIMER_REG(compare): 293 return "KVM_REG_RISCV_TIMER_REG(compare)"; 294 case KVM_REG_RISCV_TIMER_REG(state): 295 return "KVM_REG_RISCV_TIMER_REG(state)"; 296 } 297 298 return strdup_printf("%lld /* UNKNOWN */", reg_off); 299 } 300 301 static const char *fp_f_id_to_str(const char *prefix, __u64 id) 302 { 303 /* reg_off is the offset into struct __riscv_f_ext_state */ 304 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F); 305 306 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F); 307 308 switch (reg_off) { 309 case KVM_REG_RISCV_FP_F_REG(f[0]) ... 310 KVM_REG_RISCV_FP_F_REG(f[31]): 311 return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off); 312 case KVM_REG_RISCV_FP_F_REG(fcsr): 313 return "KVM_REG_RISCV_FP_F_REG(fcsr)"; 314 } 315 316 return strdup_printf("%lld /* UNKNOWN */", reg_off); 317 } 318 319 static const char *fp_d_id_to_str(const char *prefix, __u64 id) 320 { 321 /* reg_off is the offset into struct __riscv_d_ext_state */ 322 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D); 323 324 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D); 325 326 switch (reg_off) { 327 case KVM_REG_RISCV_FP_D_REG(f[0]) ... 328 KVM_REG_RISCV_FP_D_REG(f[31]): 329 return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off); 330 case KVM_REG_RISCV_FP_D_REG(fcsr): 331 return "KVM_REG_RISCV_FP_D_REG(fcsr)"; 332 } 333 334 return strdup_printf("%lld /* UNKNOWN */", reg_off); 335 } 336 337 #define KVM_ISA_EXT_ARR(ext) \ 338 [KVM_RISCV_ISA_EXT_##ext] = "KVM_RISCV_ISA_EXT_" #ext 339 340 static const char *isa_ext_id_to_str(const char *prefix, __u64 id) 341 { 342 /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */ 343 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT); 344 345 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT); 346 347 static const char * const kvm_isa_ext_reg_name[] = { 348 KVM_ISA_EXT_ARR(A), 349 KVM_ISA_EXT_ARR(C), 350 KVM_ISA_EXT_ARR(D), 351 KVM_ISA_EXT_ARR(F), 352 KVM_ISA_EXT_ARR(H), 353 KVM_ISA_EXT_ARR(I), 354 KVM_ISA_EXT_ARR(M), 355 KVM_ISA_EXT_ARR(V), 356 KVM_ISA_EXT_ARR(SMSTATEEN), 357 KVM_ISA_EXT_ARR(SSAIA), 358 KVM_ISA_EXT_ARR(SSTC), 359 KVM_ISA_EXT_ARR(SVINVAL), 360 KVM_ISA_EXT_ARR(SVNAPOT), 361 KVM_ISA_EXT_ARR(SVPBMT), 362 KVM_ISA_EXT_ARR(ZBA), 363 KVM_ISA_EXT_ARR(ZBB), 364 KVM_ISA_EXT_ARR(ZBS), 365 KVM_ISA_EXT_ARR(ZICBOM), 366 KVM_ISA_EXT_ARR(ZICBOZ), 367 KVM_ISA_EXT_ARR(ZICNTR), 368 KVM_ISA_EXT_ARR(ZICOND), 369 KVM_ISA_EXT_ARR(ZICSR), 370 KVM_ISA_EXT_ARR(ZIFENCEI), 371 KVM_ISA_EXT_ARR(ZIHINTPAUSE), 372 KVM_ISA_EXT_ARR(ZIHPM), 373 }; 374 375 if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) 376 return strdup_printf("%lld /* UNKNOWN */", reg_off); 377 378 return kvm_isa_ext_reg_name[reg_off]; 379 } 380 381 #define KVM_SBI_EXT_ARR(ext) \ 382 [ext] = "KVM_REG_RISCV_SBI_SINGLE | " #ext 383 384 static const char *sbi_ext_single_id_to_str(__u64 reg_off) 385 { 386 /* reg_off is KVM_RISCV_SBI_EXT_ID */ 387 static const char * const kvm_sbi_ext_reg_name[] = { 388 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_V01), 389 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_TIME), 390 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_IPI), 391 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_RFENCE), 392 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST), 393 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM), 394 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU), 395 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL), 396 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR), 397 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN), 398 }; 399 400 if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) 401 return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off); 402 403 return kvm_sbi_ext_reg_name[reg_off]; 404 } 405 406 static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off) 407 { 408 const char *unknown = ""; 409 410 if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) 411 unknown = " /* UNKNOWN */"; 412 413 switch (reg_subtype) { 414 case KVM_REG_RISCV_SBI_MULTI_EN: 415 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld%s", reg_off, unknown); 416 case KVM_REG_RISCV_SBI_MULTI_DIS: 417 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld%s", reg_off, unknown); 418 } 419 420 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 421 } 422 423 static const char *sbi_ext_id_to_str(const char *prefix, __u64 id) 424 { 425 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT); 426 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 427 428 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_EXT); 429 430 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 431 432 switch (reg_subtype) { 433 case KVM_REG_RISCV_SBI_SINGLE: 434 return sbi_ext_single_id_to_str(reg_off); 435 case KVM_REG_RISCV_SBI_MULTI_EN: 436 case KVM_REG_RISCV_SBI_MULTI_DIS: 437 return sbi_ext_multi_id_to_str(reg_subtype, reg_off); 438 } 439 440 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 441 } 442 443 void print_reg(const char *prefix, __u64 id) 444 { 445 const char *reg_size = NULL; 446 447 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV, 448 "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id); 449 450 switch (id & KVM_REG_SIZE_MASK) { 451 case KVM_REG_SIZE_U32: 452 reg_size = "KVM_REG_SIZE_U32"; 453 break; 454 case KVM_REG_SIZE_U64: 455 reg_size = "KVM_REG_SIZE_U64"; 456 break; 457 case KVM_REG_SIZE_U128: 458 reg_size = "KVM_REG_SIZE_U128"; 459 break; 460 default: 461 printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,\n", 462 (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & ~REG_MASK); 463 return; 464 } 465 466 switch (id & KVM_REG_RISCV_TYPE_MASK) { 467 case KVM_REG_RISCV_CONFIG: 468 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n", 469 reg_size, config_id_to_str(prefix, id)); 470 break; 471 case KVM_REG_RISCV_CORE: 472 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n", 473 reg_size, core_id_to_str(prefix, id)); 474 break; 475 case KVM_REG_RISCV_CSR: 476 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n", 477 reg_size, csr_id_to_str(prefix, id)); 478 break; 479 case KVM_REG_RISCV_TIMER: 480 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n", 481 reg_size, timer_id_to_str(prefix, id)); 482 break; 483 case KVM_REG_RISCV_FP_F: 484 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n", 485 reg_size, fp_f_id_to_str(prefix, id)); 486 break; 487 case KVM_REG_RISCV_FP_D: 488 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n", 489 reg_size, fp_d_id_to_str(prefix, id)); 490 break; 491 case KVM_REG_RISCV_ISA_EXT: 492 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n", 493 reg_size, isa_ext_id_to_str(prefix, id)); 494 break; 495 case KVM_REG_RISCV_SBI_EXT: 496 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n", 497 reg_size, sbi_ext_id_to_str(prefix, id)); 498 break; 499 default: 500 printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,\n", 501 reg_size, id & ~REG_MASK); 502 return; 503 } 504 } 505 506 /* 507 * The current blessed list was primed with the output of kernel version 508 * v6.5-rc3 and then later updated with new registers. 509 */ 510 static __u64 base_regs[] = { 511 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa), 512 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid), 513 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid), 514 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid), 515 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode), 516 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc), 517 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra), 518 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp), 519 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp), 520 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp), 521 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0), 522 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1), 523 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2), 524 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0), 525 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1), 526 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0), 527 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1), 528 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2), 529 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3), 530 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4), 531 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5), 532 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6), 533 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7), 534 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2), 535 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3), 536 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4), 537 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5), 538 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6), 539 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7), 540 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8), 541 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9), 542 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10), 543 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11), 544 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3), 545 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4), 546 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5), 547 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6), 548 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode), 549 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus), 550 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie), 551 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec), 552 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch), 553 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc), 554 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause), 555 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval), 556 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip), 557 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp), 558 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren), 559 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(senvcfg), 560 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency), 561 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), 562 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), 563 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 564 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, 565 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, 566 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, 567 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE, 568 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST, 569 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM, 570 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU, 571 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL, 572 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR, 573 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN, 574 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0, 575 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0, 576 }; 577 578 /* 579 * The skips_set list registers that should skip set test. 580 * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly. 581 */ 582 static __u64 base_skips_set[] = { 583 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 584 }; 585 586 static __u64 h_regs[] = { 587 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H, 588 }; 589 590 static __u64 zicbom_regs[] = { 591 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), 592 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM, 593 }; 594 595 static __u64 zicboz_regs[] = { 596 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), 597 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ, 598 }; 599 600 static __u64 svpbmt_regs[] = { 601 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT, 602 }; 603 604 static __u64 sstc_regs[] = { 605 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC, 606 }; 607 608 static __u64 svinval_regs[] = { 609 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL, 610 }; 611 612 static __u64 zihintpause_regs[] = { 613 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE, 614 }; 615 616 static __u64 zba_regs[] = { 617 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA, 618 }; 619 620 static __u64 zbb_regs[] = { 621 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB, 622 }; 623 624 static __u64 zbs_regs[] = { 625 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS, 626 }; 627 628 static __u64 zicntr_regs[] = { 629 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR, 630 }; 631 632 static __u64 zicond_regs[] = { 633 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND, 634 }; 635 636 static __u64 zicsr_regs[] = { 637 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR, 638 }; 639 640 static __u64 zifencei_regs[] = { 641 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI, 642 }; 643 644 static __u64 zihpm_regs[] = { 645 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM, 646 }; 647 648 static __u64 aia_regs[] = { 649 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect), 650 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1), 651 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2), 652 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh), 653 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph), 654 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h), 655 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h), 656 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA, 657 }; 658 659 static __u64 smstateen_regs[] = { 660 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0), 661 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN, 662 }; 663 664 static __u64 fp_f_regs[] = { 665 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]), 666 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]), 667 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]), 668 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]), 669 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]), 670 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]), 671 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]), 672 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]), 673 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]), 674 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]), 675 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]), 676 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]), 677 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]), 678 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]), 679 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]), 680 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]), 681 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]), 682 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]), 683 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]), 684 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]), 685 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]), 686 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]), 687 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]), 688 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]), 689 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]), 690 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]), 691 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]), 692 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]), 693 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]), 694 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]), 695 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]), 696 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]), 697 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr), 698 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F, 699 }; 700 701 static __u64 fp_d_regs[] = { 702 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]), 703 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]), 704 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]), 705 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]), 706 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]), 707 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]), 708 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]), 709 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]), 710 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]), 711 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]), 712 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]), 713 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]), 714 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]), 715 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]), 716 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]), 717 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]), 718 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]), 719 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]), 720 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]), 721 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]), 722 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]), 723 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]), 724 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]), 725 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]), 726 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]), 727 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]), 728 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]), 729 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]), 730 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]), 731 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]), 732 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]), 733 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]), 734 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr), 735 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D, 736 }; 737 738 #define BASE_SUBLIST \ 739 {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \ 740 .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),} 741 #define H_REGS_SUBLIST \ 742 {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),} 743 #define ZICBOM_REGS_SUBLIST \ 744 {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),} 745 #define ZICBOZ_REGS_SUBLIST \ 746 {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),} 747 #define SVPBMT_REGS_SUBLIST \ 748 {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),} 749 #define SSTC_REGS_SUBLIST \ 750 {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),} 751 #define SVINVAL_REGS_SUBLIST \ 752 {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),} 753 #define ZIHINTPAUSE_REGS_SUBLIST \ 754 {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),} 755 #define ZBA_REGS_SUBLIST \ 756 {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),} 757 #define ZBB_REGS_SUBLIST \ 758 {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),} 759 #define ZBS_REGS_SUBLIST \ 760 {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),} 761 #define ZICNTR_REGS_SUBLIST \ 762 {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),} 763 #define ZICOND_REGS_SUBLIST \ 764 {"zicond", .feature = KVM_RISCV_ISA_EXT_ZICOND, .regs = zicond_regs, .regs_n = ARRAY_SIZE(zicond_regs),} 765 #define ZICSR_REGS_SUBLIST \ 766 {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),} 767 #define ZIFENCEI_REGS_SUBLIST \ 768 {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),} 769 #define ZIHPM_REGS_SUBLIST \ 770 {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),} 771 #define AIA_REGS_SUBLIST \ 772 {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),} 773 #define SMSTATEEN_REGS_SUBLIST \ 774 {"smstateen", .feature = KVM_RISCV_ISA_EXT_SMSTATEEN, .regs = smstateen_regs, .regs_n = ARRAY_SIZE(smstateen_regs),} 775 #define FP_F_REGS_SUBLIST \ 776 {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \ 777 .regs_n = ARRAY_SIZE(fp_f_regs),} 778 #define FP_D_REGS_SUBLIST \ 779 {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \ 780 .regs_n = ARRAY_SIZE(fp_d_regs),} 781 782 static struct vcpu_reg_list h_config = { 783 .sublists = { 784 BASE_SUBLIST, 785 H_REGS_SUBLIST, 786 {0}, 787 }, 788 }; 789 790 static struct vcpu_reg_list zicbom_config = { 791 .sublists = { 792 BASE_SUBLIST, 793 ZICBOM_REGS_SUBLIST, 794 {0}, 795 }, 796 }; 797 798 static struct vcpu_reg_list zicboz_config = { 799 .sublists = { 800 BASE_SUBLIST, 801 ZICBOZ_REGS_SUBLIST, 802 {0}, 803 }, 804 }; 805 806 static struct vcpu_reg_list svpbmt_config = { 807 .sublists = { 808 BASE_SUBLIST, 809 SVPBMT_REGS_SUBLIST, 810 {0}, 811 }, 812 }; 813 814 static struct vcpu_reg_list sstc_config = { 815 .sublists = { 816 BASE_SUBLIST, 817 SSTC_REGS_SUBLIST, 818 {0}, 819 }, 820 }; 821 822 static struct vcpu_reg_list svinval_config = { 823 .sublists = { 824 BASE_SUBLIST, 825 SVINVAL_REGS_SUBLIST, 826 {0}, 827 }, 828 }; 829 830 static struct vcpu_reg_list zihintpause_config = { 831 .sublists = { 832 BASE_SUBLIST, 833 ZIHINTPAUSE_REGS_SUBLIST, 834 {0}, 835 }, 836 }; 837 838 static struct vcpu_reg_list zba_config = { 839 .sublists = { 840 BASE_SUBLIST, 841 ZBA_REGS_SUBLIST, 842 {0}, 843 }, 844 }; 845 846 static struct vcpu_reg_list zbb_config = { 847 .sublists = { 848 BASE_SUBLIST, 849 ZBB_REGS_SUBLIST, 850 {0}, 851 }, 852 }; 853 854 static struct vcpu_reg_list zbs_config = { 855 .sublists = { 856 BASE_SUBLIST, 857 ZBS_REGS_SUBLIST, 858 {0}, 859 }, 860 }; 861 862 static struct vcpu_reg_list zicntr_config = { 863 .sublists = { 864 BASE_SUBLIST, 865 ZICNTR_REGS_SUBLIST, 866 {0}, 867 }, 868 }; 869 870 static struct vcpu_reg_list zicond_config = { 871 .sublists = { 872 BASE_SUBLIST, 873 ZICOND_REGS_SUBLIST, 874 {0}, 875 }, 876 }; 877 878 static struct vcpu_reg_list zicsr_config = { 879 .sublists = { 880 BASE_SUBLIST, 881 ZICSR_REGS_SUBLIST, 882 {0}, 883 }, 884 }; 885 886 static struct vcpu_reg_list zifencei_config = { 887 .sublists = { 888 BASE_SUBLIST, 889 ZIFENCEI_REGS_SUBLIST, 890 {0}, 891 }, 892 }; 893 894 static struct vcpu_reg_list zihpm_config = { 895 .sublists = { 896 BASE_SUBLIST, 897 ZIHPM_REGS_SUBLIST, 898 {0}, 899 }, 900 }; 901 902 static struct vcpu_reg_list aia_config = { 903 .sublists = { 904 BASE_SUBLIST, 905 AIA_REGS_SUBLIST, 906 {0}, 907 }, 908 }; 909 910 static struct vcpu_reg_list smstateen_config = { 911 .sublists = { 912 BASE_SUBLIST, 913 SMSTATEEN_REGS_SUBLIST, 914 {0}, 915 }, 916 }; 917 918 static struct vcpu_reg_list fp_f_config = { 919 .sublists = { 920 BASE_SUBLIST, 921 FP_F_REGS_SUBLIST, 922 {0}, 923 }, 924 }; 925 926 static struct vcpu_reg_list fp_d_config = { 927 .sublists = { 928 BASE_SUBLIST, 929 FP_D_REGS_SUBLIST, 930 {0}, 931 }, 932 }; 933 934 struct vcpu_reg_list *vcpu_configs[] = { 935 &h_config, 936 &zicbom_config, 937 &zicboz_config, 938 &svpbmt_config, 939 &sstc_config, 940 &svinval_config, 941 &zihintpause_config, 942 &zba_config, 943 &zbb_config, 944 &zbs_config, 945 &zicntr_config, 946 &zicond_config, 947 &zicsr_config, 948 &zifencei_config, 949 &zihpm_config, 950 &aia_config, 951 &smstateen_config, 952 &fp_f_config, 953 &fp_d_config, 954 }; 955 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); 956