1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Check for KVM_GET_REG_LIST regressions. 4 * 5 * Copyright (C) 2020, Red Hat, Inc. 6 * 7 * While the blessed list should be created from the oldest possible 8 * kernel, we can't go older than v5.2, though, because that's the first 9 * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid 10 * core register IDs in KVM_GET_REG_LIST"). Without that commit the core 11 * registers won't match expectations. 12 */ 13 #include <stdio.h> 14 #include "kvm_util.h" 15 #include "test_util.h" 16 #include "processor.h" 17 18 #define SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \ 19 sys_reg_Op1(SYS_ ## r), \ 20 sys_reg_CRn(SYS_ ## r), \ 21 sys_reg_CRm(SYS_ ## r), \ 22 sys_reg_Op2(SYS_ ## r)) 23 24 struct feature_id_reg { 25 __u64 reg; 26 __u64 id_reg; 27 __u64 feat_shift; 28 __u64 feat_min; 29 }; 30 31 #define FEAT(id, f, v) \ 32 .id_reg = SYS_REG(id), \ 33 .feat_shift = id ## _ ## f ## _SHIFT, \ 34 .feat_min = id ## _ ## f ## _ ## v 35 36 #define REG_FEAT(r, id, f, v) \ 37 { \ 38 .reg = SYS_REG(r), \ 39 FEAT(id, f, v) \ 40 } 41 42 static struct feature_id_reg feat_id_regs[] = { 43 REG_FEAT(TCR2_EL1, ID_AA64MMFR3_EL1, TCRX, IMP), 44 REG_FEAT(TCR2_EL2, ID_AA64MMFR3_EL1, TCRX, IMP), 45 REG_FEAT(PIRE0_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP), 46 REG_FEAT(PIRE0_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP), 47 REG_FEAT(PIR_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP), 48 REG_FEAT(PIR_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP), 49 REG_FEAT(POR_EL1, ID_AA64MMFR3_EL1, S1POE, IMP), 50 REG_FEAT(POR_EL0, ID_AA64MMFR3_EL1, S1POE, IMP), 51 REG_FEAT(POR_EL2, ID_AA64MMFR3_EL1, S1POE, IMP), 52 REG_FEAT(HCRX_EL2, ID_AA64MMFR1_EL1, HCX, IMP), 53 REG_FEAT(HFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP), 54 REG_FEAT(HFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP), 55 REG_FEAT(HFGITR_EL2, ID_AA64MMFR0_EL1, FGT, IMP), 56 REG_FEAT(HDFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP), 57 REG_FEAT(HDFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP), 58 REG_FEAT(HAFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP), 59 REG_FEAT(HFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2), 60 REG_FEAT(HFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2), 61 REG_FEAT(HFGITR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2), 62 REG_FEAT(HDFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2), 63 REG_FEAT(HDFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2), 64 REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP), 65 REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP), 66 REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP), 67 REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP), 68 REG_FEAT(VNCR_EL2, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY), 69 REG_FEAT(CNTHV_CTL_EL2, ID_AA64MMFR1_EL1, VH, IMP), 70 REG_FEAT(CNTHV_CVAL_EL2,ID_AA64MMFR1_EL1, VH, IMP), 71 }; 72 73 bool filter_reg(__u64 reg) 74 { 75 /* 76 * DEMUX register presence depends on the host's CLIDR_EL1. 77 * This means there's no set of them that we can bless. 78 */ 79 if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 80 return true; 81 82 return false; 83 } 84 85 static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg) 86 { 87 int i, ret; 88 __u64 data, feat_val; 89 90 for (i = 0; i < ARRAY_SIZE(feat_id_regs); i++) { 91 if (feat_id_regs[i].reg == reg) { 92 ret = __vcpu_get_reg(vcpu, feat_id_regs[i].id_reg, &data); 93 if (ret < 0) 94 return false; 95 96 feat_val = ((data >> feat_id_regs[i].feat_shift) & 0xf); 97 return feat_val >= feat_id_regs[i].feat_min; 98 } 99 } 100 101 return true; 102 } 103 104 bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg) 105 { 106 return check_supported_feat_reg(vcpu, reg); 107 } 108 109 bool check_reject_set(int err) 110 { 111 return err == EPERM; 112 } 113 114 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) 115 { 116 struct vcpu_reg_sublist *s; 117 int feature; 118 119 for_each_sublist(c, s) { 120 if (s->finalize) { 121 feature = s->feature; 122 vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature); 123 } 124 } 125 } 126 127 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK) 128 129 #define CORE_REGS_XX_NR_WORDS 2 130 #define CORE_SPSR_XX_NR_WORDS 2 131 #define CORE_FPREGS_XX_NR_WORDS 4 132 133 static const char *core_id_to_str(const char *prefix, __u64 id) 134 { 135 __u64 core_off = id & ~REG_MASK, idx; 136 137 /* 138 * core_off is the offset into struct kvm_regs 139 */ 140 switch (core_off) { 141 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... 142 KVM_REG_ARM_CORE_REG(regs.regs[30]): 143 idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS; 144 TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx); 145 return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx); 146 case KVM_REG_ARM_CORE_REG(regs.sp): 147 return "KVM_REG_ARM_CORE_REG(regs.sp)"; 148 case KVM_REG_ARM_CORE_REG(regs.pc): 149 return "KVM_REG_ARM_CORE_REG(regs.pc)"; 150 case KVM_REG_ARM_CORE_REG(regs.pstate): 151 return "KVM_REG_ARM_CORE_REG(regs.pstate)"; 152 case KVM_REG_ARM_CORE_REG(sp_el1): 153 return "KVM_REG_ARM_CORE_REG(sp_el1)"; 154 case KVM_REG_ARM_CORE_REG(elr_el1): 155 return "KVM_REG_ARM_CORE_REG(elr_el1)"; 156 case KVM_REG_ARM_CORE_REG(spsr[0]) ... 157 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): 158 idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS; 159 TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx); 160 return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx); 161 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... 162 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): 163 idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS; 164 TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx); 165 return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx); 166 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): 167 return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)"; 168 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): 169 return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)"; 170 } 171 172 TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id); 173 return NULL; 174 } 175 176 static const char *sve_id_to_str(const char *prefix, __u64 id) 177 { 178 __u64 sve_off, n, i; 179 180 if (id == KVM_REG_ARM64_SVE_VLS) 181 return "KVM_REG_ARM64_SVE_VLS"; 182 183 sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1)); 184 i = id & (KVM_ARM64_SVE_MAX_SLICES - 1); 185 186 TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id); 187 188 switch (sve_off) { 189 case KVM_REG_ARM64_SVE_ZREG_BASE ... 190 KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1: 191 n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1); 192 TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0), 193 "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id); 194 return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n); 195 case KVM_REG_ARM64_SVE_PREG_BASE ... 196 KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1: 197 n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1); 198 TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0), 199 "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id); 200 return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n); 201 case KVM_REG_ARM64_SVE_FFR_BASE: 202 TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0), 203 "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id); 204 return "KVM_REG_ARM64_SVE_FFR(0)"; 205 } 206 207 return NULL; 208 } 209 210 void print_reg(const char *prefix, __u64 id) 211 { 212 unsigned op0, op1, crn, crm, op2; 213 const char *reg_size = NULL; 214 215 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64, 216 "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id); 217 218 switch (id & KVM_REG_SIZE_MASK) { 219 case KVM_REG_SIZE_U8: 220 reg_size = "KVM_REG_SIZE_U8"; 221 break; 222 case KVM_REG_SIZE_U16: 223 reg_size = "KVM_REG_SIZE_U16"; 224 break; 225 case KVM_REG_SIZE_U32: 226 reg_size = "KVM_REG_SIZE_U32"; 227 break; 228 case KVM_REG_SIZE_U64: 229 reg_size = "KVM_REG_SIZE_U64"; 230 break; 231 case KVM_REG_SIZE_U128: 232 reg_size = "KVM_REG_SIZE_U128"; 233 break; 234 case KVM_REG_SIZE_U256: 235 reg_size = "KVM_REG_SIZE_U256"; 236 break; 237 case KVM_REG_SIZE_U512: 238 reg_size = "KVM_REG_SIZE_U512"; 239 break; 240 case KVM_REG_SIZE_U1024: 241 reg_size = "KVM_REG_SIZE_U1024"; 242 break; 243 case KVM_REG_SIZE_U2048: 244 reg_size = "KVM_REG_SIZE_U2048"; 245 break; 246 default: 247 TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx", 248 prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id); 249 } 250 251 switch (id & KVM_REG_ARM_COPROC_MASK) { 252 case KVM_REG_ARM_CORE: 253 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id)); 254 break; 255 case KVM_REG_ARM_DEMUX: 256 TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)), 257 "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id); 258 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n", 259 reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK); 260 break; 261 case KVM_REG_ARM64_SYSREG: 262 op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT; 263 op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT; 264 crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT; 265 crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT; 266 op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT; 267 TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2), 268 "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id); 269 printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2); 270 break; 271 case KVM_REG_ARM_FW: 272 TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff), 273 "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id); 274 printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff); 275 break; 276 case KVM_REG_ARM_FW_FEAT_BMAP: 277 TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff), 278 "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id); 279 printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff); 280 break; 281 case KVM_REG_ARM64_SVE: 282 printf("\t%s,\n", sve_id_to_str(prefix, id)); 283 break; 284 default: 285 TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx", 286 prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id); 287 } 288 } 289 290 /* 291 * The original blessed list was primed with the output of kernel version 292 * v4.15 with --core-reg-fixup and then later updated with new registers. 293 * (The --core-reg-fixup option and it's fixup function have been removed 294 * from the test, as it's unlikely to use this type of test on a kernel 295 * older than v5.2.) 296 * 297 * The blessed list is up to date with kernel version v6.4 (or so we hope) 298 */ 299 static __u64 base_regs[] = { 300 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]), 301 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]), 302 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]), 303 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]), 304 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]), 305 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]), 306 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]), 307 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]), 308 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]), 309 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]), 310 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]), 311 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]), 312 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]), 313 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]), 314 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]), 315 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]), 316 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]), 317 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]), 318 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]), 319 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]), 320 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]), 321 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]), 322 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]), 323 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]), 324 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]), 325 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]), 326 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]), 327 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]), 328 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]), 329 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]), 330 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]), 331 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp), 332 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc), 333 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate), 334 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1), 335 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1), 336 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]), 337 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]), 338 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]), 339 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]), 340 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]), 341 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr), 342 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr), 343 KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */ 344 KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */ 345 KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */ 346 KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */ 347 KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */ 348 KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */ 349 KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */ 350 KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */ 351 352 /* 353 * EL0 Virtual Timer Registers 354 * 355 * WARNING: 356 * KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined 357 * with the appropriate register encodings. Their values have been 358 * accidentally swapped. As this is set API, the definitions here 359 * must be used, rather than ones derived from the encodings. 360 */ 361 KVM_ARM64_SYS_REG(SYS_CNTV_CTL_EL0), 362 KVM_REG_ARM_TIMER_CVAL, 363 KVM_REG_ARM_TIMER_CNT, 364 365 ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */ 366 ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */ 367 ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */ 368 ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */ 369 ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */ 370 ARM64_SYS_REG(2, 0, 0, 0, 4), 371 ARM64_SYS_REG(2, 0, 0, 0, 5), 372 ARM64_SYS_REG(2, 0, 0, 0, 6), 373 ARM64_SYS_REG(2, 0, 0, 0, 7), 374 ARM64_SYS_REG(2, 0, 0, 1, 4), 375 ARM64_SYS_REG(2, 0, 0, 1, 5), 376 ARM64_SYS_REG(2, 0, 0, 1, 6), 377 ARM64_SYS_REG(2, 0, 0, 1, 7), 378 ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */ 379 ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */ 380 ARM64_SYS_REG(2, 0, 0, 2, 4), 381 ARM64_SYS_REG(2, 0, 0, 2, 5), 382 ARM64_SYS_REG(2, 0, 0, 2, 6), 383 ARM64_SYS_REG(2, 0, 0, 2, 7), 384 ARM64_SYS_REG(2, 0, 0, 3, 4), 385 ARM64_SYS_REG(2, 0, 0, 3, 5), 386 ARM64_SYS_REG(2, 0, 0, 3, 6), 387 ARM64_SYS_REG(2, 0, 0, 3, 7), 388 ARM64_SYS_REG(2, 0, 0, 4, 4), 389 ARM64_SYS_REG(2, 0, 0, 4, 5), 390 ARM64_SYS_REG(2, 0, 0, 4, 6), 391 ARM64_SYS_REG(2, 0, 0, 4, 7), 392 ARM64_SYS_REG(2, 0, 0, 5, 4), 393 ARM64_SYS_REG(2, 0, 0, 5, 5), 394 ARM64_SYS_REG(2, 0, 0, 5, 6), 395 ARM64_SYS_REG(2, 0, 0, 5, 7), 396 ARM64_SYS_REG(2, 0, 0, 6, 4), 397 ARM64_SYS_REG(2, 0, 0, 6, 5), 398 ARM64_SYS_REG(2, 0, 0, 6, 6), 399 ARM64_SYS_REG(2, 0, 0, 6, 7), 400 ARM64_SYS_REG(2, 0, 0, 7, 4), 401 ARM64_SYS_REG(2, 0, 0, 7, 5), 402 ARM64_SYS_REG(2, 0, 0, 7, 6), 403 ARM64_SYS_REG(2, 0, 0, 7, 7), 404 ARM64_SYS_REG(2, 0, 0, 8, 4), 405 ARM64_SYS_REG(2, 0, 0, 8, 5), 406 ARM64_SYS_REG(2, 0, 0, 8, 6), 407 ARM64_SYS_REG(2, 0, 0, 8, 7), 408 ARM64_SYS_REG(2, 0, 0, 9, 4), 409 ARM64_SYS_REG(2, 0, 0, 9, 5), 410 ARM64_SYS_REG(2, 0, 0, 9, 6), 411 ARM64_SYS_REG(2, 0, 0, 9, 7), 412 ARM64_SYS_REG(2, 0, 0, 10, 4), 413 ARM64_SYS_REG(2, 0, 0, 10, 5), 414 ARM64_SYS_REG(2, 0, 0, 10, 6), 415 ARM64_SYS_REG(2, 0, 0, 10, 7), 416 ARM64_SYS_REG(2, 0, 0, 11, 4), 417 ARM64_SYS_REG(2, 0, 0, 11, 5), 418 ARM64_SYS_REG(2, 0, 0, 11, 6), 419 ARM64_SYS_REG(2, 0, 0, 11, 7), 420 ARM64_SYS_REG(2, 0, 0, 12, 4), 421 ARM64_SYS_REG(2, 0, 0, 12, 5), 422 ARM64_SYS_REG(2, 0, 0, 12, 6), 423 ARM64_SYS_REG(2, 0, 0, 12, 7), 424 ARM64_SYS_REG(2, 0, 0, 13, 4), 425 ARM64_SYS_REG(2, 0, 0, 13, 5), 426 ARM64_SYS_REG(2, 0, 0, 13, 6), 427 ARM64_SYS_REG(2, 0, 0, 13, 7), 428 ARM64_SYS_REG(2, 0, 0, 14, 4), 429 ARM64_SYS_REG(2, 0, 0, 14, 5), 430 ARM64_SYS_REG(2, 0, 0, 14, 6), 431 ARM64_SYS_REG(2, 0, 0, 14, 7), 432 ARM64_SYS_REG(2, 0, 0, 15, 4), 433 ARM64_SYS_REG(2, 0, 0, 15, 5), 434 ARM64_SYS_REG(2, 0, 0, 15, 6), 435 ARM64_SYS_REG(2, 0, 0, 15, 7), 436 ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */ 437 ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */ 438 ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */ 439 ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */ 440 ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */ 441 ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */ 442 ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */ 443 ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */ 444 ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */ 445 ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */ 446 ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */ 447 ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */ 448 ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */ 449 ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */ 450 ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */ 451 ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */ 452 ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */ 453 ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */ 454 ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */ 455 ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */ 456 ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */ 457 ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */ 458 ARM64_SYS_REG(3, 0, 0, 3, 3), 459 ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */ 460 ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */ 461 ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */ 462 ARM64_SYS_REG(3, 0, 0, 3, 7), 463 ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */ 464 ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */ 465 ARM64_SYS_REG(3, 0, 0, 4, 2), /* ID_AA64PFR2_EL1 */ 466 ARM64_SYS_REG(3, 0, 0, 4, 3), 467 ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */ 468 ARM64_SYS_REG(3, 0, 0, 4, 5), /* ID_AA64SMFR0_EL1 */ 469 ARM64_SYS_REG(3, 0, 0, 4, 6), 470 ARM64_SYS_REG(3, 0, 0, 4, 7), 471 ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */ 472 ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */ 473 ARM64_SYS_REG(3, 0, 0, 5, 2), 474 ARM64_SYS_REG(3, 0, 0, 5, 3), 475 ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */ 476 ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */ 477 ARM64_SYS_REG(3, 0, 0, 5, 6), 478 ARM64_SYS_REG(3, 0, 0, 5, 7), 479 ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */ 480 ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */ 481 ARM64_SYS_REG(3, 0, 0, 6, 2), /* ID_AA64ISAR2_EL1 */ 482 ARM64_SYS_REG(3, 0, 0, 6, 3), 483 ARM64_SYS_REG(3, 0, 0, 6, 4), 484 ARM64_SYS_REG(3, 0, 0, 6, 5), 485 ARM64_SYS_REG(3, 0, 0, 6, 6), 486 ARM64_SYS_REG(3, 0, 0, 6, 7), 487 ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */ 488 ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */ 489 ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */ 490 ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */ 491 ARM64_SYS_REG(3, 0, 0, 7, 4), /* ID_AA64MMFR4_EL1 */ 492 ARM64_SYS_REG(3, 0, 0, 7, 5), 493 ARM64_SYS_REG(3, 0, 0, 7, 6), 494 ARM64_SYS_REG(3, 0, 0, 7, 7), 495 ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */ 496 ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */ 497 ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */ 498 KVM_ARM64_SYS_REG(SYS_SCTLR2_EL1), 499 ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */ 500 ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */ 501 ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */ 502 ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */ 503 ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */ 504 ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */ 505 ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */ 506 ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */ 507 ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */ 508 ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */ 509 ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */ 510 ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */ 511 ARM64_SYS_REG(3, 0, 10, 2, 4), /* POR_EL1 */ 512 ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */ 513 ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */ 514 ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */ 515 ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */ 516 ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */ 517 ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */ 518 ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */ 519 ARM64_SYS_REG(3, 3, 10, 2, 4), /* POR_EL0 */ 520 ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */ 521 ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */ 522 ARM64_SYS_REG(3, 3, 14, 0, 1), /* CNTPCT_EL0 */ 523 ARM64_SYS_REG(3, 3, 14, 2, 1), /* CNTP_CTL_EL0 */ 524 ARM64_SYS_REG(3, 3, 14, 2, 2), /* CNTP_CVAL_EL0 */ 525 ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */ 526 ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */ 527 ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */ 528 }; 529 530 static __u64 pmu_regs[] = { 531 ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */ 532 ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */ 533 ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */ 534 ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */ 535 ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */ 536 ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */ 537 ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */ 538 ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */ 539 ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */ 540 ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */ 541 ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */ 542 ARM64_SYS_REG(3, 3, 14, 8, 0), 543 ARM64_SYS_REG(3, 3, 14, 8, 1), 544 ARM64_SYS_REG(3, 3, 14, 8, 2), 545 ARM64_SYS_REG(3, 3, 14, 8, 3), 546 ARM64_SYS_REG(3, 3, 14, 8, 4), 547 ARM64_SYS_REG(3, 3, 14, 8, 5), 548 ARM64_SYS_REG(3, 3, 14, 8, 6), 549 ARM64_SYS_REG(3, 3, 14, 8, 7), 550 ARM64_SYS_REG(3, 3, 14, 9, 0), 551 ARM64_SYS_REG(3, 3, 14, 9, 1), 552 ARM64_SYS_REG(3, 3, 14, 9, 2), 553 ARM64_SYS_REG(3, 3, 14, 9, 3), 554 ARM64_SYS_REG(3, 3, 14, 9, 4), 555 ARM64_SYS_REG(3, 3, 14, 9, 5), 556 ARM64_SYS_REG(3, 3, 14, 9, 6), 557 ARM64_SYS_REG(3, 3, 14, 9, 7), 558 ARM64_SYS_REG(3, 3, 14, 10, 0), 559 ARM64_SYS_REG(3, 3, 14, 10, 1), 560 ARM64_SYS_REG(3, 3, 14, 10, 2), 561 ARM64_SYS_REG(3, 3, 14, 10, 3), 562 ARM64_SYS_REG(3, 3, 14, 10, 4), 563 ARM64_SYS_REG(3, 3, 14, 10, 5), 564 ARM64_SYS_REG(3, 3, 14, 10, 6), 565 ARM64_SYS_REG(3, 3, 14, 10, 7), 566 ARM64_SYS_REG(3, 3, 14, 11, 0), 567 ARM64_SYS_REG(3, 3, 14, 11, 1), 568 ARM64_SYS_REG(3, 3, 14, 11, 2), 569 ARM64_SYS_REG(3, 3, 14, 11, 3), 570 ARM64_SYS_REG(3, 3, 14, 11, 4), 571 ARM64_SYS_REG(3, 3, 14, 11, 5), 572 ARM64_SYS_REG(3, 3, 14, 11, 6), 573 ARM64_SYS_REG(3, 3, 14, 12, 0), 574 ARM64_SYS_REG(3, 3, 14, 12, 1), 575 ARM64_SYS_REG(3, 3, 14, 12, 2), 576 ARM64_SYS_REG(3, 3, 14, 12, 3), 577 ARM64_SYS_REG(3, 3, 14, 12, 4), 578 ARM64_SYS_REG(3, 3, 14, 12, 5), 579 ARM64_SYS_REG(3, 3, 14, 12, 6), 580 ARM64_SYS_REG(3, 3, 14, 12, 7), 581 ARM64_SYS_REG(3, 3, 14, 13, 0), 582 ARM64_SYS_REG(3, 3, 14, 13, 1), 583 ARM64_SYS_REG(3, 3, 14, 13, 2), 584 ARM64_SYS_REG(3, 3, 14, 13, 3), 585 ARM64_SYS_REG(3, 3, 14, 13, 4), 586 ARM64_SYS_REG(3, 3, 14, 13, 5), 587 ARM64_SYS_REG(3, 3, 14, 13, 6), 588 ARM64_SYS_REG(3, 3, 14, 13, 7), 589 ARM64_SYS_REG(3, 3, 14, 14, 0), 590 ARM64_SYS_REG(3, 3, 14, 14, 1), 591 ARM64_SYS_REG(3, 3, 14, 14, 2), 592 ARM64_SYS_REG(3, 3, 14, 14, 3), 593 ARM64_SYS_REG(3, 3, 14, 14, 4), 594 ARM64_SYS_REG(3, 3, 14, 14, 5), 595 ARM64_SYS_REG(3, 3, 14, 14, 6), 596 ARM64_SYS_REG(3, 3, 14, 14, 7), 597 ARM64_SYS_REG(3, 3, 14, 15, 0), 598 ARM64_SYS_REG(3, 3, 14, 15, 1), 599 ARM64_SYS_REG(3, 3, 14, 15, 2), 600 ARM64_SYS_REG(3, 3, 14, 15, 3), 601 ARM64_SYS_REG(3, 3, 14, 15, 4), 602 ARM64_SYS_REG(3, 3, 14, 15, 5), 603 ARM64_SYS_REG(3, 3, 14, 15, 6), 604 ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */ 605 }; 606 607 static __u64 vregs[] = { 608 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]), 609 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]), 610 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]), 611 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]), 612 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]), 613 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]), 614 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]), 615 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]), 616 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]), 617 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]), 618 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]), 619 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]), 620 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]), 621 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]), 622 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]), 623 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]), 624 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]), 625 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]), 626 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]), 627 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]), 628 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]), 629 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]), 630 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]), 631 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]), 632 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]), 633 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]), 634 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]), 635 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]), 636 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]), 637 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]), 638 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]), 639 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]), 640 }; 641 642 static __u64 sve_regs[] = { 643 KVM_REG_ARM64_SVE_VLS, 644 KVM_REG_ARM64_SVE_ZREG(0, 0), 645 KVM_REG_ARM64_SVE_ZREG(1, 0), 646 KVM_REG_ARM64_SVE_ZREG(2, 0), 647 KVM_REG_ARM64_SVE_ZREG(3, 0), 648 KVM_REG_ARM64_SVE_ZREG(4, 0), 649 KVM_REG_ARM64_SVE_ZREG(5, 0), 650 KVM_REG_ARM64_SVE_ZREG(6, 0), 651 KVM_REG_ARM64_SVE_ZREG(7, 0), 652 KVM_REG_ARM64_SVE_ZREG(8, 0), 653 KVM_REG_ARM64_SVE_ZREG(9, 0), 654 KVM_REG_ARM64_SVE_ZREG(10, 0), 655 KVM_REG_ARM64_SVE_ZREG(11, 0), 656 KVM_REG_ARM64_SVE_ZREG(12, 0), 657 KVM_REG_ARM64_SVE_ZREG(13, 0), 658 KVM_REG_ARM64_SVE_ZREG(14, 0), 659 KVM_REG_ARM64_SVE_ZREG(15, 0), 660 KVM_REG_ARM64_SVE_ZREG(16, 0), 661 KVM_REG_ARM64_SVE_ZREG(17, 0), 662 KVM_REG_ARM64_SVE_ZREG(18, 0), 663 KVM_REG_ARM64_SVE_ZREG(19, 0), 664 KVM_REG_ARM64_SVE_ZREG(20, 0), 665 KVM_REG_ARM64_SVE_ZREG(21, 0), 666 KVM_REG_ARM64_SVE_ZREG(22, 0), 667 KVM_REG_ARM64_SVE_ZREG(23, 0), 668 KVM_REG_ARM64_SVE_ZREG(24, 0), 669 KVM_REG_ARM64_SVE_ZREG(25, 0), 670 KVM_REG_ARM64_SVE_ZREG(26, 0), 671 KVM_REG_ARM64_SVE_ZREG(27, 0), 672 KVM_REG_ARM64_SVE_ZREG(28, 0), 673 KVM_REG_ARM64_SVE_ZREG(29, 0), 674 KVM_REG_ARM64_SVE_ZREG(30, 0), 675 KVM_REG_ARM64_SVE_ZREG(31, 0), 676 KVM_REG_ARM64_SVE_PREG(0, 0), 677 KVM_REG_ARM64_SVE_PREG(1, 0), 678 KVM_REG_ARM64_SVE_PREG(2, 0), 679 KVM_REG_ARM64_SVE_PREG(3, 0), 680 KVM_REG_ARM64_SVE_PREG(4, 0), 681 KVM_REG_ARM64_SVE_PREG(5, 0), 682 KVM_REG_ARM64_SVE_PREG(6, 0), 683 KVM_REG_ARM64_SVE_PREG(7, 0), 684 KVM_REG_ARM64_SVE_PREG(8, 0), 685 KVM_REG_ARM64_SVE_PREG(9, 0), 686 KVM_REG_ARM64_SVE_PREG(10, 0), 687 KVM_REG_ARM64_SVE_PREG(11, 0), 688 KVM_REG_ARM64_SVE_PREG(12, 0), 689 KVM_REG_ARM64_SVE_PREG(13, 0), 690 KVM_REG_ARM64_SVE_PREG(14, 0), 691 KVM_REG_ARM64_SVE_PREG(15, 0), 692 KVM_REG_ARM64_SVE_FFR(0), 693 ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */ 694 }; 695 696 static __u64 sve_rejects_set[] = { 697 KVM_REG_ARM64_SVE_VLS, 698 }; 699 700 static __u64 pauth_addr_regs[] = { 701 ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */ 702 ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */ 703 ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */ 704 ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */ 705 ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */ 706 ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */ 707 ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */ 708 ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */ 709 }; 710 711 static __u64 pauth_generic_regs[] = { 712 ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */ 713 ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */ 714 }; 715 716 static __u64 el2_regs[] = { 717 SYS_REG(VPIDR_EL2), 718 SYS_REG(VMPIDR_EL2), 719 SYS_REG(SCTLR_EL2), 720 SYS_REG(ACTLR_EL2), 721 SYS_REG(HCR_EL2), 722 SYS_REG(MDCR_EL2), 723 SYS_REG(CPTR_EL2), 724 SYS_REG(HSTR_EL2), 725 SYS_REG(HFGRTR_EL2), 726 SYS_REG(HFGWTR_EL2), 727 SYS_REG(HFGITR_EL2), 728 SYS_REG(HACR_EL2), 729 SYS_REG(ZCR_EL2), 730 SYS_REG(HCRX_EL2), 731 SYS_REG(TTBR0_EL2), 732 SYS_REG(TTBR1_EL2), 733 SYS_REG(TCR_EL2), 734 SYS_REG(TCR2_EL2), 735 SYS_REG(VTTBR_EL2), 736 SYS_REG(VTCR_EL2), 737 SYS_REG(VNCR_EL2), 738 SYS_REG(HDFGRTR2_EL2), 739 SYS_REG(HDFGWTR2_EL2), 740 SYS_REG(HFGRTR2_EL2), 741 SYS_REG(HFGWTR2_EL2), 742 SYS_REG(HDFGRTR_EL2), 743 SYS_REG(HDFGWTR_EL2), 744 SYS_REG(HAFGRTR_EL2), 745 SYS_REG(HFGITR2_EL2), 746 SYS_REG(SPSR_EL2), 747 SYS_REG(ELR_EL2), 748 SYS_REG(AFSR0_EL2), 749 SYS_REG(AFSR1_EL2), 750 SYS_REG(ESR_EL2), 751 SYS_REG(FAR_EL2), 752 SYS_REG(HPFAR_EL2), 753 SYS_REG(MAIR_EL2), 754 SYS_REG(PIRE0_EL2), 755 SYS_REG(PIR_EL2), 756 SYS_REG(POR_EL2), 757 SYS_REG(AMAIR_EL2), 758 SYS_REG(VBAR_EL2), 759 SYS_REG(CONTEXTIDR_EL2), 760 SYS_REG(TPIDR_EL2), 761 SYS_REG(CNTVOFF_EL2), 762 SYS_REG(CNTHCTL_EL2), 763 SYS_REG(CNTHP_CTL_EL2), 764 SYS_REG(CNTHP_CVAL_EL2), 765 SYS_REG(CNTHV_CTL_EL2), 766 SYS_REG(CNTHV_CVAL_EL2), 767 SYS_REG(SP_EL2), 768 SYS_REG(VDISR_EL2), 769 SYS_REG(VSESR_EL2), 770 }; 771 772 static __u64 el2_e2h0_regs[] = { 773 /* Empty */ 774 }; 775 776 #define BASE_SUBLIST \ 777 { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), } 778 #define VREGS_SUBLIST \ 779 { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), } 780 #define PMU_SUBLIST \ 781 { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \ 782 .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), } 783 #define SVE_SUBLIST \ 784 { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \ 785 .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \ 786 .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), } 787 #define PAUTH_SUBLIST \ 788 { \ 789 .name = "pauth_address", \ 790 .capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \ 791 .feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \ 792 .regs = pauth_addr_regs, \ 793 .regs_n = ARRAY_SIZE(pauth_addr_regs), \ 794 }, \ 795 { \ 796 .name = "pauth_generic", \ 797 .capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \ 798 .feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \ 799 .regs = pauth_generic_regs, \ 800 .regs_n = ARRAY_SIZE(pauth_generic_regs), \ 801 } 802 #define EL2_SUBLIST \ 803 { \ 804 .name = "EL2", \ 805 .capability = KVM_CAP_ARM_EL2, \ 806 .feature = KVM_ARM_VCPU_HAS_EL2, \ 807 .regs = el2_regs, \ 808 .regs_n = ARRAY_SIZE(el2_regs), \ 809 } 810 #define EL2_E2H0_SUBLIST \ 811 EL2_SUBLIST, \ 812 { \ 813 .name = "EL2 E2H0", \ 814 .capability = KVM_CAP_ARM_EL2_E2H0, \ 815 .feature = KVM_ARM_VCPU_HAS_EL2_E2H0, \ 816 .regs = el2_e2h0_regs, \ 817 .regs_n = ARRAY_SIZE(el2_e2h0_regs), \ 818 } 819 820 static struct vcpu_reg_list vregs_config = { 821 .sublists = { 822 BASE_SUBLIST, 823 VREGS_SUBLIST, 824 {0}, 825 }, 826 }; 827 static struct vcpu_reg_list vregs_pmu_config = { 828 .sublists = { 829 BASE_SUBLIST, 830 VREGS_SUBLIST, 831 PMU_SUBLIST, 832 {0}, 833 }, 834 }; 835 static struct vcpu_reg_list sve_config = { 836 .sublists = { 837 BASE_SUBLIST, 838 SVE_SUBLIST, 839 {0}, 840 }, 841 }; 842 static struct vcpu_reg_list sve_pmu_config = { 843 .sublists = { 844 BASE_SUBLIST, 845 SVE_SUBLIST, 846 PMU_SUBLIST, 847 {0}, 848 }, 849 }; 850 static struct vcpu_reg_list pauth_config = { 851 .sublists = { 852 BASE_SUBLIST, 853 VREGS_SUBLIST, 854 PAUTH_SUBLIST, 855 {0}, 856 }, 857 }; 858 static struct vcpu_reg_list pauth_pmu_config = { 859 .sublists = { 860 BASE_SUBLIST, 861 VREGS_SUBLIST, 862 PAUTH_SUBLIST, 863 PMU_SUBLIST, 864 {0}, 865 }, 866 }; 867 868 static struct vcpu_reg_list el2_vregs_config = { 869 .sublists = { 870 BASE_SUBLIST, 871 EL2_SUBLIST, 872 VREGS_SUBLIST, 873 {0}, 874 }, 875 }; 876 877 static struct vcpu_reg_list el2_vregs_pmu_config = { 878 .sublists = { 879 BASE_SUBLIST, 880 EL2_SUBLIST, 881 VREGS_SUBLIST, 882 PMU_SUBLIST, 883 {0}, 884 }, 885 }; 886 887 static struct vcpu_reg_list el2_sve_config = { 888 .sublists = { 889 BASE_SUBLIST, 890 EL2_SUBLIST, 891 SVE_SUBLIST, 892 {0}, 893 }, 894 }; 895 896 static struct vcpu_reg_list el2_sve_pmu_config = { 897 .sublists = { 898 BASE_SUBLIST, 899 EL2_SUBLIST, 900 SVE_SUBLIST, 901 PMU_SUBLIST, 902 {0}, 903 }, 904 }; 905 906 static struct vcpu_reg_list el2_pauth_config = { 907 .sublists = { 908 BASE_SUBLIST, 909 EL2_SUBLIST, 910 VREGS_SUBLIST, 911 PAUTH_SUBLIST, 912 {0}, 913 }, 914 }; 915 916 static struct vcpu_reg_list el2_pauth_pmu_config = { 917 .sublists = { 918 BASE_SUBLIST, 919 EL2_SUBLIST, 920 VREGS_SUBLIST, 921 PAUTH_SUBLIST, 922 PMU_SUBLIST, 923 {0}, 924 }, 925 }; 926 927 static struct vcpu_reg_list el2_e2h0_vregs_config = { 928 .sublists = { 929 BASE_SUBLIST, 930 EL2_E2H0_SUBLIST, 931 VREGS_SUBLIST, 932 {0}, 933 }, 934 }; 935 936 static struct vcpu_reg_list el2_e2h0_vregs_pmu_config = { 937 .sublists = { 938 BASE_SUBLIST, 939 EL2_E2H0_SUBLIST, 940 VREGS_SUBLIST, 941 PMU_SUBLIST, 942 {0}, 943 }, 944 }; 945 946 static struct vcpu_reg_list el2_e2h0_sve_config = { 947 .sublists = { 948 BASE_SUBLIST, 949 EL2_E2H0_SUBLIST, 950 SVE_SUBLIST, 951 {0}, 952 }, 953 }; 954 955 static struct vcpu_reg_list el2_e2h0_sve_pmu_config = { 956 .sublists = { 957 BASE_SUBLIST, 958 EL2_E2H0_SUBLIST, 959 SVE_SUBLIST, 960 PMU_SUBLIST, 961 {0}, 962 }, 963 }; 964 965 static struct vcpu_reg_list el2_e2h0_pauth_config = { 966 .sublists = { 967 BASE_SUBLIST, 968 EL2_E2H0_SUBLIST, 969 VREGS_SUBLIST, 970 PAUTH_SUBLIST, 971 {0}, 972 }, 973 }; 974 975 static struct vcpu_reg_list el2_e2h0_pauth_pmu_config = { 976 .sublists = { 977 BASE_SUBLIST, 978 EL2_E2H0_SUBLIST, 979 VREGS_SUBLIST, 980 PAUTH_SUBLIST, 981 PMU_SUBLIST, 982 {0}, 983 }, 984 }; 985 986 struct vcpu_reg_list *vcpu_configs[] = { 987 &vregs_config, 988 &vregs_pmu_config, 989 &sve_config, 990 &sve_pmu_config, 991 &pauth_config, 992 &pauth_pmu_config, 993 994 &el2_vregs_config, 995 &el2_vregs_pmu_config, 996 &el2_sve_config, 997 &el2_sve_pmu_config, 998 &el2_pauth_config, 999 &el2_pauth_pmu_config, 1000 1001 &el2_e2h0_vregs_config, 1002 &el2_e2h0_vregs_pmu_config, 1003 &el2_e2h0_sve_config, 1004 &el2_e2h0_sve_pmu_config, 1005 &el2_e2h0_pauth_config, 1006 &el2_e2h0_pauth_pmu_config, 1007 }; 1008 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); 1009