1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Check for KVM_GET_REG_LIST regressions. 4 * 5 * Copyright (c) 2023 Intel Corporation 6 * 7 */ 8 #include <stdio.h> 9 #include "kvm_util.h" 10 #include "test_util.h" 11 #include "processor.h" 12 13 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) 14 15 enum { 16 VCPU_FEATURE_ISA_EXT = 0, 17 VCPU_FEATURE_SBI_EXT, 18 }; 19 20 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX]; 21 22 bool filter_reg(__u64 reg) 23 { 24 switch (reg & ~REG_MASK) { 25 /* 26 * Same set of ISA_EXT registers are not present on all host because 27 * ISA_EXT registers are visible to the KVM user space based on the 28 * ISA extensions available on the host. Also, disabling an ISA 29 * extension using corresponding ISA_EXT register does not affect 30 * the visibility of the ISA_EXT register itself. 31 * 32 * Based on above, we should filter-out all ISA_EXT registers. 33 * 34 * Note: The below list is alphabetically sorted. 35 */ 36 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_A: 37 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_C: 38 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D: 39 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F: 40 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_H: 41 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_I: 42 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_M: 43 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V: 44 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN: 45 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA: 46 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSTC: 47 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL: 48 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT: 49 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT: 50 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA: 51 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB: 52 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC: 53 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKB: 54 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKC: 55 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKX: 56 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBS: 57 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA: 58 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFH: 59 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFHMIN: 60 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM: 61 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ: 62 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR: 63 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICOND: 64 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICSR: 65 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIFENCEI: 66 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTNTL: 67 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: 68 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM: 69 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKND: 70 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNE: 71 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNH: 72 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKR: 73 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED: 74 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH: 75 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT: 76 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB: 77 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC: 78 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH: 79 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFHMIN: 80 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKB: 81 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKG: 82 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNED: 83 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHA: 84 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHB: 85 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSED: 86 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSH: 87 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKT: 88 /* 89 * Like ISA_EXT registers, SBI_EXT registers are only visible when the 90 * host supports them and disabling them does not affect the visibility 91 * of the SBI_EXT register itself. 92 */ 93 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01: 94 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME: 95 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI: 96 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE: 97 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST: 98 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM: 99 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU: 100 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN: 101 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA: 102 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL: 103 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR: 104 return true; 105 /* AIA registers are always available when Ssaia can't be disabled */ 106 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect): 107 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1): 108 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2): 109 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh): 110 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph): 111 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 112 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 113 return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA]; 114 default: 115 break; 116 } 117 118 return false; 119 } 120 121 bool check_reject_set(int err) 122 { 123 return err == EINVAL; 124 } 125 126 static bool vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext_id) 127 { 128 int ret; 129 unsigned long value; 130 131 ret = __vcpu_get_reg(vcpu, ext_id, &value); 132 return (ret) ? false : !!value; 133 } 134 135 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) 136 { 137 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; 138 struct vcpu_reg_sublist *s; 139 uint64_t feature; 140 int rc; 141 142 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) 143 __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]); 144 145 /* 146 * Disable all extensions which were enabled by default 147 * if they were available in the risc-v host. 148 */ 149 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { 150 rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); 151 if (rc && isa_ext_state[i]) 152 isa_ext_cant_disable[i] = true; 153 } 154 155 for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) { 156 rc = __vcpu_set_reg(vcpu, RISCV_SBI_EXT_REG(i), 0); 157 TEST_ASSERT(!rc || (rc == -1 && errno == ENOENT), "Unexpected error"); 158 } 159 160 for_each_sublist(c, s) { 161 if (!s->feature) 162 continue; 163 164 switch (s->feature_type) { 165 case VCPU_FEATURE_ISA_EXT: 166 feature = RISCV_ISA_EXT_REG(s->feature); 167 break; 168 case VCPU_FEATURE_SBI_EXT: 169 feature = RISCV_SBI_EXT_REG(s->feature); 170 break; 171 default: 172 TEST_FAIL("Unknown feature type"); 173 } 174 175 /* Try to enable the desired extension */ 176 __vcpu_set_reg(vcpu, feature, 1); 177 178 /* Double check whether the desired extension was enabled */ 179 __TEST_REQUIRE(vcpu_has_ext(vcpu, feature), 180 "%s not available, skipping tests\n", s->name); 181 } 182 } 183 184 static const char *config_id_to_str(const char *prefix, __u64 id) 185 { 186 /* reg_off is the offset into struct kvm_riscv_config */ 187 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG); 188 189 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG); 190 191 switch (reg_off) { 192 case KVM_REG_RISCV_CONFIG_REG(isa): 193 return "KVM_REG_RISCV_CONFIG_REG(isa)"; 194 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): 195 return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)"; 196 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): 197 return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)"; 198 case KVM_REG_RISCV_CONFIG_REG(mvendorid): 199 return "KVM_REG_RISCV_CONFIG_REG(mvendorid)"; 200 case KVM_REG_RISCV_CONFIG_REG(marchid): 201 return "KVM_REG_RISCV_CONFIG_REG(marchid)"; 202 case KVM_REG_RISCV_CONFIG_REG(mimpid): 203 return "KVM_REG_RISCV_CONFIG_REG(mimpid)"; 204 case KVM_REG_RISCV_CONFIG_REG(satp_mode): 205 return "KVM_REG_RISCV_CONFIG_REG(satp_mode)"; 206 } 207 208 return strdup_printf("%lld /* UNKNOWN */", reg_off); 209 } 210 211 static const char *core_id_to_str(const char *prefix, __u64 id) 212 { 213 /* reg_off is the offset into struct kvm_riscv_core */ 214 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE); 215 216 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE); 217 218 switch (reg_off) { 219 case KVM_REG_RISCV_CORE_REG(regs.pc): 220 return "KVM_REG_RISCV_CORE_REG(regs.pc)"; 221 case KVM_REG_RISCV_CORE_REG(regs.ra): 222 return "KVM_REG_RISCV_CORE_REG(regs.ra)"; 223 case KVM_REG_RISCV_CORE_REG(regs.sp): 224 return "KVM_REG_RISCV_CORE_REG(regs.sp)"; 225 case KVM_REG_RISCV_CORE_REG(regs.gp): 226 return "KVM_REG_RISCV_CORE_REG(regs.gp)"; 227 case KVM_REG_RISCV_CORE_REG(regs.tp): 228 return "KVM_REG_RISCV_CORE_REG(regs.tp)"; 229 case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2): 230 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 231 reg_off - KVM_REG_RISCV_CORE_REG(regs.t0)); 232 case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1): 233 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 234 reg_off - KVM_REG_RISCV_CORE_REG(regs.s0)); 235 case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7): 236 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)", 237 reg_off - KVM_REG_RISCV_CORE_REG(regs.a0)); 238 case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11): 239 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 240 reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2); 241 case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6): 242 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 243 reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3); 244 case KVM_REG_RISCV_CORE_REG(mode): 245 return "KVM_REG_RISCV_CORE_REG(mode)"; 246 } 247 248 return strdup_printf("%lld /* UNKNOWN */", reg_off); 249 } 250 251 #define RISCV_CSR_GENERAL(csr) \ 252 "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")" 253 #define RISCV_CSR_AIA(csr) \ 254 "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")" 255 #define RISCV_CSR_SMSTATEEN(csr) \ 256 "KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_REG(" #csr ")" 257 258 static const char *general_csr_id_to_str(__u64 reg_off) 259 { 260 /* reg_off is the offset into struct kvm_riscv_csr */ 261 switch (reg_off) { 262 case KVM_REG_RISCV_CSR_REG(sstatus): 263 return RISCV_CSR_GENERAL(sstatus); 264 case KVM_REG_RISCV_CSR_REG(sie): 265 return RISCV_CSR_GENERAL(sie); 266 case KVM_REG_RISCV_CSR_REG(stvec): 267 return RISCV_CSR_GENERAL(stvec); 268 case KVM_REG_RISCV_CSR_REG(sscratch): 269 return RISCV_CSR_GENERAL(sscratch); 270 case KVM_REG_RISCV_CSR_REG(sepc): 271 return RISCV_CSR_GENERAL(sepc); 272 case KVM_REG_RISCV_CSR_REG(scause): 273 return RISCV_CSR_GENERAL(scause); 274 case KVM_REG_RISCV_CSR_REG(stval): 275 return RISCV_CSR_GENERAL(stval); 276 case KVM_REG_RISCV_CSR_REG(sip): 277 return RISCV_CSR_GENERAL(sip); 278 case KVM_REG_RISCV_CSR_REG(satp): 279 return RISCV_CSR_GENERAL(satp); 280 case KVM_REG_RISCV_CSR_REG(scounteren): 281 return RISCV_CSR_GENERAL(scounteren); 282 case KVM_REG_RISCV_CSR_REG(senvcfg): 283 return RISCV_CSR_GENERAL(senvcfg); 284 } 285 286 return strdup_printf("KVM_REG_RISCV_CSR_GENERAL | %lld /* UNKNOWN */", reg_off); 287 } 288 289 static const char *aia_csr_id_to_str(__u64 reg_off) 290 { 291 /* reg_off is the offset into struct kvm_riscv_aia_csr */ 292 switch (reg_off) { 293 case KVM_REG_RISCV_CSR_AIA_REG(siselect): 294 return RISCV_CSR_AIA(siselect); 295 case KVM_REG_RISCV_CSR_AIA_REG(iprio1): 296 return RISCV_CSR_AIA(iprio1); 297 case KVM_REG_RISCV_CSR_AIA_REG(iprio2): 298 return RISCV_CSR_AIA(iprio2); 299 case KVM_REG_RISCV_CSR_AIA_REG(sieh): 300 return RISCV_CSR_AIA(sieh); 301 case KVM_REG_RISCV_CSR_AIA_REG(siph): 302 return RISCV_CSR_AIA(siph); 303 case KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 304 return RISCV_CSR_AIA(iprio1h); 305 case KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 306 return RISCV_CSR_AIA(iprio2h); 307 } 308 309 return strdup_printf("KVM_REG_RISCV_CSR_AIA | %lld /* UNKNOWN */", reg_off); 310 } 311 312 static const char *smstateen_csr_id_to_str(__u64 reg_off) 313 { 314 /* reg_off is the offset into struct kvm_riscv_smstateen_csr */ 315 switch (reg_off) { 316 case KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0): 317 return RISCV_CSR_SMSTATEEN(sstateen0); 318 } 319 320 TEST_FAIL("Unknown smstateen csr reg: 0x%llx", reg_off); 321 return NULL; 322 } 323 324 static const char *csr_id_to_str(const char *prefix, __u64 id) 325 { 326 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR); 327 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 328 329 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR); 330 331 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 332 333 switch (reg_subtype) { 334 case KVM_REG_RISCV_CSR_GENERAL: 335 return general_csr_id_to_str(reg_off); 336 case KVM_REG_RISCV_CSR_AIA: 337 return aia_csr_id_to_str(reg_off); 338 case KVM_REG_RISCV_CSR_SMSTATEEN: 339 return smstateen_csr_id_to_str(reg_off); 340 } 341 342 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 343 } 344 345 static const char *timer_id_to_str(const char *prefix, __u64 id) 346 { 347 /* reg_off is the offset into struct kvm_riscv_timer */ 348 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER); 349 350 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER); 351 352 switch (reg_off) { 353 case KVM_REG_RISCV_TIMER_REG(frequency): 354 return "KVM_REG_RISCV_TIMER_REG(frequency)"; 355 case KVM_REG_RISCV_TIMER_REG(time): 356 return "KVM_REG_RISCV_TIMER_REG(time)"; 357 case KVM_REG_RISCV_TIMER_REG(compare): 358 return "KVM_REG_RISCV_TIMER_REG(compare)"; 359 case KVM_REG_RISCV_TIMER_REG(state): 360 return "KVM_REG_RISCV_TIMER_REG(state)"; 361 } 362 363 return strdup_printf("%lld /* UNKNOWN */", reg_off); 364 } 365 366 static const char *fp_f_id_to_str(const char *prefix, __u64 id) 367 { 368 /* reg_off is the offset into struct __riscv_f_ext_state */ 369 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F); 370 371 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F); 372 373 switch (reg_off) { 374 case KVM_REG_RISCV_FP_F_REG(f[0]) ... 375 KVM_REG_RISCV_FP_F_REG(f[31]): 376 return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off); 377 case KVM_REG_RISCV_FP_F_REG(fcsr): 378 return "KVM_REG_RISCV_FP_F_REG(fcsr)"; 379 } 380 381 return strdup_printf("%lld /* UNKNOWN */", reg_off); 382 } 383 384 static const char *fp_d_id_to_str(const char *prefix, __u64 id) 385 { 386 /* reg_off is the offset into struct __riscv_d_ext_state */ 387 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D); 388 389 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D); 390 391 switch (reg_off) { 392 case KVM_REG_RISCV_FP_D_REG(f[0]) ... 393 KVM_REG_RISCV_FP_D_REG(f[31]): 394 return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off); 395 case KVM_REG_RISCV_FP_D_REG(fcsr): 396 return "KVM_REG_RISCV_FP_D_REG(fcsr)"; 397 } 398 399 return strdup_printf("%lld /* UNKNOWN */", reg_off); 400 } 401 402 #define KVM_ISA_EXT_ARR(ext) \ 403 [KVM_RISCV_ISA_EXT_##ext] = "KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_" #ext 404 405 static const char *isa_ext_single_id_to_str(__u64 reg_off) 406 { 407 static const char * const kvm_isa_ext_reg_name[] = { 408 KVM_ISA_EXT_ARR(A), 409 KVM_ISA_EXT_ARR(C), 410 KVM_ISA_EXT_ARR(D), 411 KVM_ISA_EXT_ARR(F), 412 KVM_ISA_EXT_ARR(H), 413 KVM_ISA_EXT_ARR(I), 414 KVM_ISA_EXT_ARR(M), 415 KVM_ISA_EXT_ARR(V), 416 KVM_ISA_EXT_ARR(SMSTATEEN), 417 KVM_ISA_EXT_ARR(SSAIA), 418 KVM_ISA_EXT_ARR(SSTC), 419 KVM_ISA_EXT_ARR(SVINVAL), 420 KVM_ISA_EXT_ARR(SVNAPOT), 421 KVM_ISA_EXT_ARR(SVPBMT), 422 KVM_ISA_EXT_ARR(ZBA), 423 KVM_ISA_EXT_ARR(ZBB), 424 KVM_ISA_EXT_ARR(ZBC), 425 KVM_ISA_EXT_ARR(ZBKB), 426 KVM_ISA_EXT_ARR(ZBKC), 427 KVM_ISA_EXT_ARR(ZBKX), 428 KVM_ISA_EXT_ARR(ZBS), 429 KVM_ISA_EXT_ARR(ZFA), 430 KVM_ISA_EXT_ARR(ZFH), 431 KVM_ISA_EXT_ARR(ZFHMIN), 432 KVM_ISA_EXT_ARR(ZICBOM), 433 KVM_ISA_EXT_ARR(ZICBOZ), 434 KVM_ISA_EXT_ARR(ZICNTR), 435 KVM_ISA_EXT_ARR(ZICOND), 436 KVM_ISA_EXT_ARR(ZICSR), 437 KVM_ISA_EXT_ARR(ZIFENCEI), 438 KVM_ISA_EXT_ARR(ZIHINTNTL), 439 KVM_ISA_EXT_ARR(ZIHINTPAUSE), 440 KVM_ISA_EXT_ARR(ZIHPM), 441 KVM_ISA_EXT_ARR(ZKND), 442 KVM_ISA_EXT_ARR(ZKNE), 443 KVM_ISA_EXT_ARR(ZKNH), 444 KVM_ISA_EXT_ARR(ZKR), 445 KVM_ISA_EXT_ARR(ZKSED), 446 KVM_ISA_EXT_ARR(ZKSH), 447 KVM_ISA_EXT_ARR(ZKT), 448 KVM_ISA_EXT_ARR(ZVBB), 449 KVM_ISA_EXT_ARR(ZVBC), 450 KVM_ISA_EXT_ARR(ZVFH), 451 KVM_ISA_EXT_ARR(ZVFHMIN), 452 KVM_ISA_EXT_ARR(ZVKB), 453 KVM_ISA_EXT_ARR(ZVKG), 454 KVM_ISA_EXT_ARR(ZVKNED), 455 KVM_ISA_EXT_ARR(ZVKNHA), 456 KVM_ISA_EXT_ARR(ZVKNHB), 457 KVM_ISA_EXT_ARR(ZVKSED), 458 KVM_ISA_EXT_ARR(ZVKSH), 459 KVM_ISA_EXT_ARR(ZVKT), 460 }; 461 462 if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) 463 return strdup_printf("KVM_REG_RISCV_ISA_SINGLE | %lld /* UNKNOWN */", reg_off); 464 465 return kvm_isa_ext_reg_name[reg_off]; 466 } 467 468 static const char *isa_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off) 469 { 470 const char *unknown = ""; 471 472 if (reg_off > KVM_REG_RISCV_ISA_MULTI_REG_LAST) 473 unknown = " /* UNKNOWN */"; 474 475 switch (reg_subtype) { 476 case KVM_REG_RISCV_ISA_MULTI_EN: 477 return strdup_printf("KVM_REG_RISCV_ISA_MULTI_EN | %lld%s", reg_off, unknown); 478 case KVM_REG_RISCV_ISA_MULTI_DIS: 479 return strdup_printf("KVM_REG_RISCV_ISA_MULTI_DIS | %lld%s", reg_off, unknown); 480 } 481 482 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 483 } 484 485 static const char *isa_ext_id_to_str(const char *prefix, __u64 id) 486 { 487 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT); 488 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 489 490 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT); 491 492 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 493 494 switch (reg_subtype) { 495 case KVM_REG_RISCV_ISA_SINGLE: 496 return isa_ext_single_id_to_str(reg_off); 497 case KVM_REG_RISCV_ISA_MULTI_EN: 498 case KVM_REG_RISCV_ISA_MULTI_DIS: 499 return isa_ext_multi_id_to_str(reg_subtype, reg_off); 500 } 501 502 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 503 } 504 505 #define KVM_SBI_EXT_ARR(ext) \ 506 [ext] = "KVM_REG_RISCV_SBI_SINGLE | " #ext 507 508 static const char *sbi_ext_single_id_to_str(__u64 reg_off) 509 { 510 /* reg_off is KVM_RISCV_SBI_EXT_ID */ 511 static const char * const kvm_sbi_ext_reg_name[] = { 512 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_V01), 513 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_TIME), 514 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_IPI), 515 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_RFENCE), 516 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST), 517 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM), 518 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU), 519 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA), 520 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL), 521 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR), 522 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN), 523 }; 524 525 if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) 526 return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off); 527 528 return kvm_sbi_ext_reg_name[reg_off]; 529 } 530 531 static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off) 532 { 533 const char *unknown = ""; 534 535 if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) 536 unknown = " /* UNKNOWN */"; 537 538 switch (reg_subtype) { 539 case KVM_REG_RISCV_SBI_MULTI_EN: 540 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld%s", reg_off, unknown); 541 case KVM_REG_RISCV_SBI_MULTI_DIS: 542 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld%s", reg_off, unknown); 543 } 544 545 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 546 } 547 548 static const char *sbi_ext_id_to_str(const char *prefix, __u64 id) 549 { 550 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT); 551 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 552 553 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_EXT); 554 555 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 556 557 switch (reg_subtype) { 558 case KVM_REG_RISCV_SBI_SINGLE: 559 return sbi_ext_single_id_to_str(reg_off); 560 case KVM_REG_RISCV_SBI_MULTI_EN: 561 case KVM_REG_RISCV_SBI_MULTI_DIS: 562 return sbi_ext_multi_id_to_str(reg_subtype, reg_off); 563 } 564 565 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 566 } 567 568 static const char *sbi_sta_id_to_str(__u64 reg_off) 569 { 570 switch (reg_off) { 571 case 0: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo)"; 572 case 1: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi)"; 573 } 574 return strdup_printf("KVM_REG_RISCV_SBI_STA | %lld /* UNKNOWN */", reg_off); 575 } 576 577 static const char *sbi_id_to_str(const char *prefix, __u64 id) 578 { 579 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_STATE); 580 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 581 582 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_STATE); 583 584 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 585 586 switch (reg_subtype) { 587 case KVM_REG_RISCV_SBI_STA: 588 return sbi_sta_id_to_str(reg_off); 589 } 590 591 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); 592 } 593 594 void print_reg(const char *prefix, __u64 id) 595 { 596 const char *reg_size = NULL; 597 598 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV, 599 "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id); 600 601 switch (id & KVM_REG_SIZE_MASK) { 602 case KVM_REG_SIZE_U32: 603 reg_size = "KVM_REG_SIZE_U32"; 604 break; 605 case KVM_REG_SIZE_U64: 606 reg_size = "KVM_REG_SIZE_U64"; 607 break; 608 case KVM_REG_SIZE_U128: 609 reg_size = "KVM_REG_SIZE_U128"; 610 break; 611 default: 612 printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,\n", 613 (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & ~REG_MASK); 614 return; 615 } 616 617 switch (id & KVM_REG_RISCV_TYPE_MASK) { 618 case KVM_REG_RISCV_CONFIG: 619 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n", 620 reg_size, config_id_to_str(prefix, id)); 621 break; 622 case KVM_REG_RISCV_CORE: 623 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n", 624 reg_size, core_id_to_str(prefix, id)); 625 break; 626 case KVM_REG_RISCV_CSR: 627 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n", 628 reg_size, csr_id_to_str(prefix, id)); 629 break; 630 case KVM_REG_RISCV_TIMER: 631 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n", 632 reg_size, timer_id_to_str(prefix, id)); 633 break; 634 case KVM_REG_RISCV_FP_F: 635 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n", 636 reg_size, fp_f_id_to_str(prefix, id)); 637 break; 638 case KVM_REG_RISCV_FP_D: 639 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n", 640 reg_size, fp_d_id_to_str(prefix, id)); 641 break; 642 case KVM_REG_RISCV_ISA_EXT: 643 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n", 644 reg_size, isa_ext_id_to_str(prefix, id)); 645 break; 646 case KVM_REG_RISCV_SBI_EXT: 647 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n", 648 reg_size, sbi_ext_id_to_str(prefix, id)); 649 break; 650 case KVM_REG_RISCV_SBI_STATE: 651 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_STATE | %s,\n", 652 reg_size, sbi_id_to_str(prefix, id)); 653 break; 654 default: 655 printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,\n", 656 reg_size, id & ~REG_MASK); 657 return; 658 } 659 } 660 661 /* 662 * The current blessed list was primed with the output of kernel version 663 * v6.5-rc3 and then later updated with new registers. 664 */ 665 static __u64 base_regs[] = { 666 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa), 667 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid), 668 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid), 669 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid), 670 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode), 671 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc), 672 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra), 673 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp), 674 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp), 675 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp), 676 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0), 677 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1), 678 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2), 679 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0), 680 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1), 681 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0), 682 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1), 683 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2), 684 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3), 685 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4), 686 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5), 687 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6), 688 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7), 689 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2), 690 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3), 691 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4), 692 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5), 693 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6), 694 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7), 695 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8), 696 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9), 697 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10), 698 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11), 699 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3), 700 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4), 701 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5), 702 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6), 703 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode), 704 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus), 705 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie), 706 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec), 707 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch), 708 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc), 709 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause), 710 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval), 711 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip), 712 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp), 713 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren), 714 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(senvcfg), 715 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency), 716 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), 717 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), 718 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 719 }; 720 721 /* 722 * The skips_set list registers that should skip set test. 723 * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly. 724 */ 725 static __u64 base_skips_set[] = { 726 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 727 }; 728 729 static __u64 sbi_base_regs[] = { 730 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, 731 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, 732 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, 733 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE, 734 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST, 735 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM, 736 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL, 737 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR, 738 }; 739 740 static __u64 sbi_sta_regs[] = { 741 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA, 742 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo), 743 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi), 744 }; 745 746 static __u64 zicbom_regs[] = { 747 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), 748 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM, 749 }; 750 751 static __u64 zicboz_regs[] = { 752 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), 753 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ, 754 }; 755 756 static __u64 aia_regs[] = { 757 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect), 758 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1), 759 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2), 760 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh), 761 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph), 762 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h), 763 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h), 764 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA, 765 }; 766 767 static __u64 smstateen_regs[] = { 768 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0), 769 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN, 770 }; 771 772 static __u64 fp_f_regs[] = { 773 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]), 774 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]), 775 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]), 776 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]), 777 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]), 778 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]), 779 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]), 780 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]), 781 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]), 782 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]), 783 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]), 784 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]), 785 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]), 786 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]), 787 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]), 788 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]), 789 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]), 790 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]), 791 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]), 792 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]), 793 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]), 794 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]), 795 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]), 796 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]), 797 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]), 798 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]), 799 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]), 800 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]), 801 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]), 802 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]), 803 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]), 804 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]), 805 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr), 806 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F, 807 }; 808 809 static __u64 fp_d_regs[] = { 810 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]), 811 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]), 812 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]), 813 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]), 814 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]), 815 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]), 816 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]), 817 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]), 818 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]), 819 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]), 820 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]), 821 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]), 822 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]), 823 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]), 824 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]), 825 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]), 826 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]), 827 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]), 828 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]), 829 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]), 830 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]), 831 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]), 832 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]), 833 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]), 834 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]), 835 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]), 836 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]), 837 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]), 838 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]), 839 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]), 840 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]), 841 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]), 842 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr), 843 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D, 844 }; 845 846 #define SUBLIST_BASE \ 847 {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \ 848 .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),} 849 #define SUBLIST_SBI_BASE \ 850 {"sbi-base", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_V01, \ 851 .regs = sbi_base_regs, .regs_n = ARRAY_SIZE(sbi_base_regs),} 852 #define SUBLIST_SBI_STA \ 853 {"sbi-sta", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_STA, \ 854 .regs = sbi_sta_regs, .regs_n = ARRAY_SIZE(sbi_sta_regs),} 855 #define SUBLIST_ZICBOM \ 856 {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),} 857 #define SUBLIST_ZICBOZ \ 858 {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),} 859 #define SUBLIST_AIA \ 860 {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),} 861 #define SUBLIST_SMSTATEEN \ 862 {"smstateen", .feature = KVM_RISCV_ISA_EXT_SMSTATEEN, .regs = smstateen_regs, .regs_n = ARRAY_SIZE(smstateen_regs),} 863 #define SUBLIST_FP_F \ 864 {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \ 865 .regs_n = ARRAY_SIZE(fp_f_regs),} 866 #define SUBLIST_FP_D \ 867 {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \ 868 .regs_n = ARRAY_SIZE(fp_d_regs),} 869 870 #define KVM_ISA_EXT_SIMPLE_CONFIG(ext, extu) \ 871 static __u64 regs_##ext[] = { \ 872 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \ 873 KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | \ 874 KVM_RISCV_ISA_EXT_##extu, \ 875 }; \ 876 static struct vcpu_reg_list config_##ext = { \ 877 .sublists = { \ 878 SUBLIST_BASE, \ 879 { \ 880 .name = #ext, \ 881 .feature = KVM_RISCV_ISA_EXT_##extu, \ 882 .regs = regs_##ext, \ 883 .regs_n = ARRAY_SIZE(regs_##ext), \ 884 }, \ 885 {0}, \ 886 }, \ 887 } \ 888 889 #define KVM_SBI_EXT_SIMPLE_CONFIG(ext, extu) \ 890 static __u64 regs_sbi_##ext[] = { \ 891 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \ 892 KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | \ 893 KVM_RISCV_SBI_EXT_##extu, \ 894 }; \ 895 static struct vcpu_reg_list config_sbi_##ext = { \ 896 .sublists = { \ 897 SUBLIST_BASE, \ 898 { \ 899 .name = "sbi-"#ext, \ 900 .feature_type = VCPU_FEATURE_SBI_EXT, \ 901 .feature = KVM_RISCV_SBI_EXT_##extu, \ 902 .regs = regs_sbi_##ext, \ 903 .regs_n = ARRAY_SIZE(regs_sbi_##ext), \ 904 }, \ 905 {0}, \ 906 }, \ 907 } \ 908 909 #define KVM_ISA_EXT_SUBLIST_CONFIG(ext, extu) \ 910 static struct vcpu_reg_list config_##ext = { \ 911 .sublists = { \ 912 SUBLIST_BASE, \ 913 SUBLIST_##extu, \ 914 {0}, \ 915 }, \ 916 } \ 917 918 #define KVM_SBI_EXT_SUBLIST_CONFIG(ext, extu) \ 919 static struct vcpu_reg_list config_sbi_##ext = { \ 920 .sublists = { \ 921 SUBLIST_BASE, \ 922 SUBLIST_SBI_##extu, \ 923 {0}, \ 924 }, \ 925 } \ 926 927 /* Note: The below list is alphabetically sorted. */ 928 929 KVM_SBI_EXT_SUBLIST_CONFIG(base, BASE); 930 KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA); 931 KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU); 932 KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN); 933 934 KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA); 935 KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F); 936 KVM_ISA_EXT_SUBLIST_CONFIG(fp_d, FP_D); 937 KVM_ISA_EXT_SIMPLE_CONFIG(h, H); 938 KVM_ISA_EXT_SUBLIST_CONFIG(smstateen, SMSTATEEN); 939 KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC); 940 KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL); 941 KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT); 942 KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT); 943 KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA); 944 KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB); 945 KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC); 946 KVM_ISA_EXT_SIMPLE_CONFIG(zbkb, ZBKB); 947 KVM_ISA_EXT_SIMPLE_CONFIG(zbkc, ZBKC); 948 KVM_ISA_EXT_SIMPLE_CONFIG(zbkx, ZBKX); 949 KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS); 950 KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA); 951 KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH); 952 KVM_ISA_EXT_SIMPLE_CONFIG(zfhmin, ZFHMIN); 953 KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM); 954 KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ); 955 KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR); 956 KVM_ISA_EXT_SIMPLE_CONFIG(zicond, ZICOND); 957 KVM_ISA_EXT_SIMPLE_CONFIG(zicsr, ZICSR); 958 KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI); 959 KVM_ISA_EXT_SIMPLE_CONFIG(zihintntl, ZIHINTNTL); 960 KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE); 961 KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM); 962 KVM_ISA_EXT_SIMPLE_CONFIG(zknd, ZKND); 963 KVM_ISA_EXT_SIMPLE_CONFIG(zkne, ZKNE); 964 KVM_ISA_EXT_SIMPLE_CONFIG(zknh, ZKNH); 965 KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR); 966 KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED); 967 KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH); 968 KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT); 969 KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB); 970 KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC); 971 KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH); 972 KVM_ISA_EXT_SIMPLE_CONFIG(zvfhmin, ZVFHMIN); 973 KVM_ISA_EXT_SIMPLE_CONFIG(zvkb, ZVKB); 974 KVM_ISA_EXT_SIMPLE_CONFIG(zvkg, ZVKG); 975 KVM_ISA_EXT_SIMPLE_CONFIG(zvkned, ZVKNED); 976 KVM_ISA_EXT_SIMPLE_CONFIG(zvknha, ZVKNHA); 977 KVM_ISA_EXT_SIMPLE_CONFIG(zvknhb, ZVKNHB); 978 KVM_ISA_EXT_SIMPLE_CONFIG(zvksed, ZVKSED); 979 KVM_ISA_EXT_SIMPLE_CONFIG(zvksh, ZVKSH); 980 KVM_ISA_EXT_SIMPLE_CONFIG(zvkt, ZVKT); 981 982 struct vcpu_reg_list *vcpu_configs[] = { 983 &config_sbi_base, 984 &config_sbi_sta, 985 &config_sbi_pmu, 986 &config_sbi_dbcn, 987 &config_aia, 988 &config_fp_f, 989 &config_fp_d, 990 &config_h, 991 &config_smstateen, 992 &config_sstc, 993 &config_svinval, 994 &config_svnapot, 995 &config_svpbmt, 996 &config_zba, 997 &config_zbb, 998 &config_zbc, 999 &config_zbkb, 1000 &config_zbkc, 1001 &config_zbkx, 1002 &config_zbs, 1003 &config_zfa, 1004 &config_zfh, 1005 &config_zfhmin, 1006 &config_zicbom, 1007 &config_zicboz, 1008 &config_zicntr, 1009 &config_zicond, 1010 &config_zicsr, 1011 &config_zifencei, 1012 &config_zihintntl, 1013 &config_zihintpause, 1014 &config_zihpm, 1015 &config_zknd, 1016 &config_zkne, 1017 &config_zknh, 1018 &config_zkr, 1019 &config_zksed, 1020 &config_zksh, 1021 &config_zkt, 1022 &config_zvbb, 1023 &config_zvbc, 1024 &config_zvfh, 1025 &config_zvfhmin, 1026 &config_zvkb, 1027 &config_zvkg, 1028 &config_zvkned, 1029 &config_zvknha, 1030 &config_zvknhb, 1031 &config_zvksed, 1032 &config_zvksh, 1033 &config_zvkt, 1034 }; 1035 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); 1036