1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/coproc.c: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Authors: Rusty Russell <rusty@rustcorp.com.au> 9 * Christoffer Dall <c.dall@virtualopensystems.com> 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/bsearch.h> 14 #include <linux/cacheinfo.h> 15 #include <linux/debugfs.h> 16 #include <linux/kvm_host.h> 17 #include <linux/mm.h> 18 #include <linux/printk.h> 19 #include <linux/uaccess.h> 20 #include <linux/irqchip/arm-gic-v3.h> 21 22 #include <asm/arm_pmuv3.h> 23 #include <asm/cacheflush.h> 24 #include <asm/cputype.h> 25 #include <asm/debug-monitors.h> 26 #include <asm/esr.h> 27 #include <asm/kvm_arm.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_hyp.h> 30 #include <asm/kvm_mmu.h> 31 #include <asm/kvm_nested.h> 32 #include <asm/perf_event.h> 33 #include <asm/sysreg.h> 34 35 #include <trace/events/kvm.h> 36 37 #include "sys_regs.h" 38 #include "vgic/vgic.h" 39 40 #include "trace.h" 41 42 /* 43 * For AArch32, we only take care of what is being trapped. Anything 44 * that has to do with init and userspace access has to go via the 45 * 64bit interface. 46 */ 47 48 static u64 sys_reg_to_index(const struct sys_reg_desc *reg); 49 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 50 u64 val); 51 52 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 53 const struct sys_reg_desc *r) 54 { 55 kvm_inject_undefined(vcpu); 56 return false; 57 } 58 59 static bool bad_trap(struct kvm_vcpu *vcpu, 60 struct sys_reg_params *params, 61 const struct sys_reg_desc *r, 62 const char *msg) 63 { 64 WARN_ONCE(1, "Unexpected %s\n", msg); 65 print_sys_reg_instr(params); 66 return undef_access(vcpu, params, r); 67 } 68 69 static bool read_from_write_only(struct kvm_vcpu *vcpu, 70 struct sys_reg_params *params, 71 const struct sys_reg_desc *r) 72 { 73 return bad_trap(vcpu, params, r, 74 "sys_reg read to write-only register"); 75 } 76 77 static bool write_to_read_only(struct kvm_vcpu *vcpu, 78 struct sys_reg_params *params, 79 const struct sys_reg_desc *r) 80 { 81 return bad_trap(vcpu, params, r, 82 "sys_reg write to read-only register"); 83 } 84 85 enum sr_loc_attr { 86 SR_LOC_MEMORY = 0, /* Register definitely in memory */ 87 SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */ 88 SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */ 89 SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */ 90 SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */ 91 }; 92 93 struct sr_loc { 94 enum sr_loc_attr loc; 95 enum vcpu_sysreg map_reg; 96 u64 (*xlate)(u64); 97 }; 98 99 static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu, 100 enum vcpu_sysreg reg) 101 { 102 switch (reg) { 103 case SCTLR_EL1: 104 case CPACR_EL1: 105 case TTBR0_EL1: 106 case TTBR1_EL1: 107 case TCR_EL1: 108 case TCR2_EL1: 109 case PIR_EL1: 110 case PIRE0_EL1: 111 case POR_EL1: 112 case ESR_EL1: 113 case AFSR0_EL1: 114 case AFSR1_EL1: 115 case FAR_EL1: 116 case MAIR_EL1: 117 case VBAR_EL1: 118 case CONTEXTIDR_EL1: 119 case AMAIR_EL1: 120 case CNTKCTL_EL1: 121 case ELR_EL1: 122 case SPSR_EL1: 123 case ZCR_EL1: 124 case SCTLR2_EL1: 125 /* 126 * EL1 registers which have an ELx2 mapping are loaded if 127 * we're not in hypervisor context. 128 */ 129 return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED; 130 131 case TPIDR_EL0: 132 case TPIDRRO_EL0: 133 case TPIDR_EL1: 134 case PAR_EL1: 135 case DACR32_EL2: 136 case IFSR32_EL2: 137 case DBGVCR32_EL2: 138 /* These registers are always loaded, no matter what */ 139 return SR_LOC_LOADED; 140 141 default: 142 /* Non-mapped EL2 registers are by definition in memory. */ 143 return SR_LOC_MEMORY; 144 } 145 } 146 147 static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu, 148 enum vcpu_sysreg reg, 149 enum vcpu_sysreg map_reg, 150 u64 (*xlate)(u64), 151 struct sr_loc *loc) 152 { 153 if (!is_hyp_ctxt(vcpu)) { 154 loc->loc = SR_LOC_MEMORY; 155 return; 156 } 157 158 loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED; 159 loc->map_reg = map_reg; 160 161 WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY); 162 163 if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) { 164 loc->loc |= SR_LOC_XLATED; 165 loc->xlate = xlate; 166 } 167 } 168 169 #define MAPPED_EL2_SYSREG(r, m, t) \ 170 case r: { \ 171 locate_mapped_el2_register(vcpu, r, m, t, loc); \ 172 break; \ 173 } 174 175 static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg, 176 struct sr_loc *loc) 177 { 178 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) { 179 loc->loc = SR_LOC_MEMORY; 180 return; 181 } 182 183 switch (reg) { 184 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1, 185 translate_sctlr_el2_to_sctlr_el1 ); 186 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1, 187 translate_cptr_el2_to_cpacr_el1 ); 188 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1, 189 translate_ttbr0_el2_to_ttbr0_el1 ); 190 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL ); 191 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1, 192 translate_tcr_el2_to_tcr_el1 ); 193 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL ); 194 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL ); 195 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL ); 196 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL ); 197 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL ); 198 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL ); 199 MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL ); 200 MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL ); 201 MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL ); 202 MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL ); 203 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL ); 204 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL ); 205 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL ); 206 MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL ); 207 MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL ); 208 case CNTHCTL_EL2: 209 /* CNTHCTL_EL2 is super special, until we support NV2.1 */ 210 loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ? 211 SR_LOC_SPECIAL : SR_LOC_MEMORY); 212 break; 213 default: 214 loc->loc = locate_direct_register(vcpu, reg); 215 } 216 } 217 218 static u64 read_sr_from_cpu(enum vcpu_sysreg reg) 219 { 220 u64 val = 0x8badf00d8badf00d; 221 222 switch (reg) { 223 case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break; 224 case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break; 225 case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break; 226 case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break; 227 case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break; 228 case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break; 229 case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break; 230 case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break; 231 case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break; 232 case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break; 233 case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break; 234 case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break; 235 case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break; 236 case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break; 237 case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break; 238 case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 239 case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break; 240 case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 241 case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break; 242 case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break; 243 case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break; 244 case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break; 245 case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break; 246 case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 247 case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break; 248 case PAR_EL1: val = read_sysreg_par(); break; 249 case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break; 250 case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break; 251 case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 252 default: WARN_ON_ONCE(1); 253 } 254 255 return val; 256 } 257 258 static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val) 259 { 260 switch (reg) { 261 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 262 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 263 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 264 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 265 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 266 case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break; 267 case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break; 268 case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break; 269 case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break; 270 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 271 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 272 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 273 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 274 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 275 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 276 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 277 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 278 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 279 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 280 case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break; 281 case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break; 282 case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break; 283 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 284 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 285 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 286 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 287 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 288 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 289 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 290 default: WARN_ON_ONCE(1); 291 } 292 } 293 294 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg) 295 { 296 struct sr_loc loc = {}; 297 298 locate_register(vcpu, reg, &loc); 299 300 WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY); 301 302 if (loc.loc & SR_LOC_SPECIAL) { 303 u64 val; 304 305 WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL); 306 307 /* 308 * CNTHCTL_EL2 requires some special treatment to account 309 * for the bits that can be set via CNTKCTL_EL1 when E2H==1. 310 */ 311 switch (reg) { 312 case CNTHCTL_EL2: 313 val = read_sysreg_el1(SYS_CNTKCTL); 314 val &= CNTKCTL_VALID_BITS; 315 val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS; 316 return val; 317 default: 318 WARN_ON_ONCE(1); 319 } 320 } 321 322 if (loc.loc & SR_LOC_LOADED) { 323 enum vcpu_sysreg map_reg = reg; 324 325 if (loc.loc & SR_LOC_MAPPED) 326 map_reg = loc.map_reg; 327 328 if (!(loc.loc & SR_LOC_XLATED)) { 329 u64 val = read_sr_from_cpu(map_reg); 330 331 if (reg >= __SANITISED_REG_START__) 332 val = kvm_vcpu_apply_reg_masks(vcpu, reg, val); 333 334 return val; 335 } 336 } 337 338 return __vcpu_sys_reg(vcpu, reg); 339 } 340 341 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg) 342 { 343 struct sr_loc loc = {}; 344 345 locate_register(vcpu, reg, &loc); 346 347 WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY); 348 349 if (loc.loc & SR_LOC_SPECIAL) { 350 351 WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL); 352 353 switch (reg) { 354 case CNTHCTL_EL2: 355 /* 356 * If E2H=1, some of the bits are backed by 357 * CNTKCTL_EL1, while the rest is kept in memory. 358 * Yes, this is fun stuff. 359 */ 360 write_sysreg_el1(val, SYS_CNTKCTL); 361 break; 362 default: 363 WARN_ON_ONCE(1); 364 } 365 } 366 367 if (loc.loc & SR_LOC_LOADED) { 368 enum vcpu_sysreg map_reg = reg; 369 u64 xlated_val; 370 371 if (reg >= __SANITISED_REG_START__) 372 val = kvm_vcpu_apply_reg_masks(vcpu, reg, val); 373 374 if (loc.loc & SR_LOC_MAPPED) 375 map_reg = loc.map_reg; 376 377 if (loc.loc & SR_LOC_XLATED) 378 xlated_val = loc.xlate(val); 379 else 380 xlated_val = val; 381 382 write_sr_to_cpu(map_reg, xlated_val); 383 384 /* 385 * Fall through to write the backing store anyway, which 386 * allows translated registers to be directly read without a 387 * reverse translation. 388 */ 389 } 390 391 __vcpu_assign_sys_reg(vcpu, reg, val); 392 } 393 394 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 395 #define CSSELR_MAX 14 396 397 /* 398 * Returns the minimum line size for the selected cache, expressed as 399 * Log2(bytes). 400 */ 401 static u8 get_min_cache_line_size(bool icache) 402 { 403 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0); 404 u8 field; 405 406 if (icache) 407 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr); 408 else 409 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr); 410 411 /* 412 * Cache line size is represented as Log2(words) in CTR_EL0. 413 * Log2(bytes) can be derived with the following: 414 * 415 * Log2(words) + 2 = Log2(bytes / 4) + 2 416 * = Log2(bytes) - 2 + 2 417 * = Log2(bytes) 418 */ 419 return field + 2; 420 } 421 422 /* Which cache CCSIDR represents depends on CSSELR value. */ 423 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr) 424 { 425 u8 line_size; 426 427 if (vcpu->arch.ccsidr) 428 return vcpu->arch.ccsidr[csselr]; 429 430 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD); 431 432 /* 433 * Fabricate a CCSIDR value as the overriding value does not exist. 434 * The real CCSIDR value will not be used as it can vary by the 435 * physical CPU which the vcpu currently resides in. 436 * 437 * The line size is determined with get_min_cache_line_size(), which 438 * should be valid for all CPUs even if they have different cache 439 * configuration. 440 * 441 * The associativity bits are cleared, meaning the geometry of all data 442 * and unified caches (which are guaranteed to be PIPT and thus 443 * non-aliasing) are 1 set and 1 way. 444 * Guests should not be doing cache operations by set/way at all, and 445 * for this reason, we trap them and attempt to infer the intent, so 446 * that we can flush the entire guest's address space at the appropriate 447 * time. The exposed geometry minimizes the number of the traps. 448 * [If guests should attempt to infer aliasing properties from the 449 * geometry (which is not permitted by the architecture), they would 450 * only do so for virtually indexed caches.] 451 * 452 * We don't check if the cache level exists as it is allowed to return 453 * an UNKNOWN value if not. 454 */ 455 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4); 456 } 457 458 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val) 459 { 460 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4; 461 u32 *ccsidr = vcpu->arch.ccsidr; 462 u32 i; 463 464 if ((val & CCSIDR_EL1_RES0) || 465 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD)) 466 return -EINVAL; 467 468 if (!ccsidr) { 469 if (val == get_ccsidr(vcpu, csselr)) 470 return 0; 471 472 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT); 473 if (!ccsidr) 474 return -ENOMEM; 475 476 for (i = 0; i < CSSELR_MAX; i++) 477 ccsidr[i] = get_ccsidr(vcpu, i); 478 479 vcpu->arch.ccsidr = ccsidr; 480 } 481 482 ccsidr[csselr] = val; 483 484 return 0; 485 } 486 487 static bool access_rw(struct kvm_vcpu *vcpu, 488 struct sys_reg_params *p, 489 const struct sys_reg_desc *r) 490 { 491 if (p->is_write) 492 vcpu_write_sys_reg(vcpu, p->regval, r->reg); 493 else 494 p->regval = vcpu_read_sys_reg(vcpu, r->reg); 495 496 return true; 497 } 498 499 /* 500 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 501 */ 502 static bool access_dcsw(struct kvm_vcpu *vcpu, 503 struct sys_reg_params *p, 504 const struct sys_reg_desc *r) 505 { 506 if (!p->is_write) 507 return read_from_write_only(vcpu, p, r); 508 509 /* 510 * Only track S/W ops if we don't have FWB. It still indicates 511 * that the guest is a bit broken (S/W operations should only 512 * be done by firmware, knowing that there is only a single 513 * CPU left in the system, and certainly not from non-secure 514 * software). 515 */ 516 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 517 kvm_set_way_flush(vcpu); 518 519 return true; 520 } 521 522 static bool access_dcgsw(struct kvm_vcpu *vcpu, 523 struct sys_reg_params *p, 524 const struct sys_reg_desc *r) 525 { 526 if (!kvm_has_mte(vcpu->kvm)) 527 return undef_access(vcpu, p, r); 528 529 /* Treat MTE S/W ops as we treat the classic ones: with contempt */ 530 return access_dcsw(vcpu, p, r); 531 } 532 533 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) 534 { 535 switch (r->aarch32_map) { 536 case AA32_LO: 537 *mask = GENMASK_ULL(31, 0); 538 *shift = 0; 539 break; 540 case AA32_HI: 541 *mask = GENMASK_ULL(63, 32); 542 *shift = 32; 543 break; 544 default: 545 *mask = GENMASK_ULL(63, 0); 546 *shift = 0; 547 break; 548 } 549 } 550 551 /* 552 * Generic accessor for VM registers. Only called as long as HCR_TVM 553 * is set. If the guest enables the MMU, we stop trapping the VM 554 * sys_regs and leave it in complete control of the caches. 555 */ 556 static bool access_vm_reg(struct kvm_vcpu *vcpu, 557 struct sys_reg_params *p, 558 const struct sys_reg_desc *r) 559 { 560 bool was_enabled = vcpu_has_cache_enabled(vcpu); 561 u64 val, mask, shift; 562 563 BUG_ON(!p->is_write); 564 565 get_access_mask(r, &mask, &shift); 566 567 if (~mask) { 568 val = vcpu_read_sys_reg(vcpu, r->reg); 569 val &= ~mask; 570 } else { 571 val = 0; 572 } 573 574 val |= (p->regval & (mask >> shift)) << shift; 575 vcpu_write_sys_reg(vcpu, val, r->reg); 576 577 kvm_toggle_cache(vcpu, was_enabled); 578 return true; 579 } 580 581 static bool access_actlr(struct kvm_vcpu *vcpu, 582 struct sys_reg_params *p, 583 const struct sys_reg_desc *r) 584 { 585 u64 mask, shift; 586 587 if (p->is_write) 588 return ignore_write(vcpu, p); 589 590 get_access_mask(r, &mask, &shift); 591 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift; 592 593 return true; 594 } 595 596 /* 597 * Trap handler for the GICv3 SGI generation system register. 598 * Forward the request to the VGIC emulation. 599 * The cp15_64 code makes sure this automatically works 600 * for both AArch64 and AArch32 accesses. 601 */ 602 static bool access_gic_sgi(struct kvm_vcpu *vcpu, 603 struct sys_reg_params *p, 604 const struct sys_reg_desc *r) 605 { 606 bool g1; 607 608 if (!kvm_has_gicv3(vcpu->kvm)) 609 return undef_access(vcpu, p, r); 610 611 if (!p->is_write) 612 return read_from_write_only(vcpu, p, r); 613 614 /* 615 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates 616 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, 617 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively 618 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure 619 * group. 620 */ 621 if (p->Op0 == 0) { /* AArch32 */ 622 switch (p->Op1) { 623 default: /* Keep GCC quiet */ 624 case 0: /* ICC_SGI1R */ 625 g1 = true; 626 break; 627 case 1: /* ICC_ASGI1R */ 628 case 2: /* ICC_SGI0R */ 629 g1 = false; 630 break; 631 } 632 } else { /* AArch64 */ 633 switch (p->Op2) { 634 default: /* Keep GCC quiet */ 635 case 5: /* ICC_SGI1R_EL1 */ 636 g1 = true; 637 break; 638 case 6: /* ICC_ASGI1R_EL1 */ 639 case 7: /* ICC_SGI0R_EL1 */ 640 g1 = false; 641 break; 642 } 643 } 644 645 vgic_v3_dispatch_sgi(vcpu, p->regval, g1); 646 647 return true; 648 } 649 650 static bool access_gic_sre(struct kvm_vcpu *vcpu, 651 struct sys_reg_params *p, 652 const struct sys_reg_desc *r) 653 { 654 if (!kvm_has_gicv3(vcpu->kvm)) 655 return undef_access(vcpu, p, r); 656 657 if (p->is_write) 658 return ignore_write(vcpu, p); 659 660 if (p->Op1 == 4) { /* ICC_SRE_EL2 */ 661 p->regval = KVM_ICC_SRE_EL2; 662 } else { /* ICC_SRE_EL1 */ 663 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; 664 } 665 666 return true; 667 } 668 669 static bool access_gic_dir(struct kvm_vcpu *vcpu, 670 struct sys_reg_params *p, 671 const struct sys_reg_desc *r) 672 { 673 if (!kvm_has_gicv3(vcpu->kvm)) 674 return undef_access(vcpu, p, r); 675 676 if (!p->is_write) 677 return undef_access(vcpu, p, r); 678 679 vgic_v3_deactivate(vcpu, p->regval); 680 681 return true; 682 } 683 684 static bool trap_raz_wi(struct kvm_vcpu *vcpu, 685 struct sys_reg_params *p, 686 const struct sys_reg_desc *r) 687 { 688 if (p->is_write) 689 return ignore_write(vcpu, p); 690 else 691 return read_zero(vcpu, p); 692 } 693 694 /* 695 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the 696 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 697 * system, these registers should UNDEF. LORID_EL1 being a RO register, we 698 * treat it separately. 699 */ 700 static bool trap_loregion(struct kvm_vcpu *vcpu, 701 struct sys_reg_params *p, 702 const struct sys_reg_desc *r) 703 { 704 u32 sr = reg_to_encoding(r); 705 706 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) 707 return undef_access(vcpu, p, r); 708 709 if (p->is_write && sr == SYS_LORID_EL1) 710 return write_to_read_only(vcpu, p, r); 711 712 return trap_raz_wi(vcpu, p, r); 713 } 714 715 static bool trap_oslar_el1(struct kvm_vcpu *vcpu, 716 struct sys_reg_params *p, 717 const struct sys_reg_desc *r) 718 { 719 if (!p->is_write) 720 return read_from_write_only(vcpu, p, r); 721 722 kvm_debug_handle_oslar(vcpu, p->regval); 723 return true; 724 } 725 726 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 727 struct sys_reg_params *p, 728 const struct sys_reg_desc *r) 729 { 730 if (p->is_write) 731 return write_to_read_only(vcpu, p, r); 732 733 p->regval = __vcpu_sys_reg(vcpu, r->reg); 734 return true; 735 } 736 737 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 738 u64 val) 739 { 740 /* 741 * The only modifiable bit is the OSLK bit. Refuse the write if 742 * userspace attempts to change any other bit in the register. 743 */ 744 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) 745 return -EINVAL; 746 747 __vcpu_assign_sys_reg(vcpu, rd->reg, val); 748 return 0; 749 } 750 751 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 752 struct sys_reg_params *p, 753 const struct sys_reg_desc *r) 754 { 755 if (p->is_write) { 756 return ignore_write(vcpu, p); 757 } else { 758 p->regval = read_sysreg(dbgauthstatus_el1); 759 return true; 760 } 761 } 762 763 static bool trap_debug_regs(struct kvm_vcpu *vcpu, 764 struct sys_reg_params *p, 765 const struct sys_reg_desc *r) 766 { 767 access_rw(vcpu, p, r); 768 769 kvm_debug_set_guest_ownership(vcpu); 770 return true; 771 } 772 773 /* 774 * reg_to_dbg/dbg_to_reg 775 * 776 * A 32 bit write to a debug register leave top bits alone 777 * A 32 bit read from a debug register only returns the bottom bits 778 */ 779 static void reg_to_dbg(struct kvm_vcpu *vcpu, 780 struct sys_reg_params *p, 781 const struct sys_reg_desc *rd, 782 u64 *dbg_reg) 783 { 784 u64 mask, shift, val; 785 786 get_access_mask(rd, &mask, &shift); 787 788 val = *dbg_reg; 789 val &= ~mask; 790 val |= (p->regval & (mask >> shift)) << shift; 791 *dbg_reg = val; 792 } 793 794 static void dbg_to_reg(struct kvm_vcpu *vcpu, 795 struct sys_reg_params *p, 796 const struct sys_reg_desc *rd, 797 u64 *dbg_reg) 798 { 799 u64 mask, shift; 800 801 get_access_mask(rd, &mask, &shift); 802 p->regval = (*dbg_reg & mask) >> shift; 803 } 804 805 static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) 806 { 807 struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state; 808 809 switch (rd->Op2) { 810 case 0b100: 811 return &dbg->dbg_bvr[rd->CRm]; 812 case 0b101: 813 return &dbg->dbg_bcr[rd->CRm]; 814 case 0b110: 815 return &dbg->dbg_wvr[rd->CRm]; 816 case 0b111: 817 return &dbg->dbg_wcr[rd->CRm]; 818 default: 819 KVM_BUG_ON(1, vcpu->kvm); 820 return NULL; 821 } 822 } 823 824 static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 825 const struct sys_reg_desc *rd) 826 { 827 u64 *reg = demux_wb_reg(vcpu, rd); 828 829 if (!reg) 830 return false; 831 832 if (p->is_write) 833 reg_to_dbg(vcpu, p, rd, reg); 834 else 835 dbg_to_reg(vcpu, p, rd, reg); 836 837 kvm_debug_set_guest_ownership(vcpu); 838 return true; 839 } 840 841 static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 842 u64 val) 843 { 844 u64 *reg = demux_wb_reg(vcpu, rd); 845 846 if (!reg) 847 return -EINVAL; 848 849 *reg = val; 850 return 0; 851 } 852 853 static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 854 u64 *val) 855 { 856 u64 *reg = demux_wb_reg(vcpu, rd); 857 858 if (!reg) 859 return -EINVAL; 860 861 *val = *reg; 862 return 0; 863 } 864 865 static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) 866 { 867 u64 *reg = demux_wb_reg(vcpu, rd); 868 869 /* 870 * Bail early if we couldn't find storage for the register, the 871 * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever 872 * being run. 873 */ 874 if (!reg) 875 return 0; 876 877 *reg = rd->val; 878 return rd->val; 879 } 880 881 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 882 { 883 u64 amair = read_sysreg(amair_el1); 884 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); 885 return amair; 886 } 887 888 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 889 { 890 u64 actlr = read_sysreg(actlr_el1); 891 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1); 892 return actlr; 893 } 894 895 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 896 { 897 u64 mpidr; 898 899 /* 900 * Map the vcpu_id into the first three affinity level fields of 901 * the MPIDR. We limit the number of VCPUs in level 0 due to a 902 * limitation to 16 CPUs in that level in the ICC_SGIxR registers 903 * of the GICv3 to be able to address each CPU directly when 904 * sending IPIs. 905 */ 906 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 907 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 908 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 909 mpidr |= (1ULL << 31); 910 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); 911 912 return mpidr; 913 } 914 915 static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu, 916 const struct sys_reg_desc *r) 917 { 918 return REG_HIDDEN; 919 } 920 921 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, 922 const struct sys_reg_desc *r) 923 { 924 if (kvm_vcpu_has_pmu(vcpu)) 925 return 0; 926 927 return REG_HIDDEN; 928 } 929 930 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 931 { 932 u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); 933 u8 n = vcpu->kvm->arch.nr_pmu_counters; 934 935 if (n) 936 mask |= GENMASK(n - 1, 0); 937 938 reset_unknown(vcpu, r); 939 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask); 940 941 return __vcpu_sys_reg(vcpu, r->reg); 942 } 943 944 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 945 { 946 reset_unknown(vcpu, r); 947 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0)); 948 949 return __vcpu_sys_reg(vcpu, r->reg); 950 } 951 952 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 953 { 954 /* This thing will UNDEF, who cares about the reset value? */ 955 if (!kvm_vcpu_has_pmu(vcpu)) 956 return 0; 957 958 reset_unknown(vcpu, r); 959 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm)); 960 961 return __vcpu_sys_reg(vcpu, r->reg); 962 } 963 964 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 965 { 966 reset_unknown(vcpu, r); 967 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK); 968 969 return __vcpu_sys_reg(vcpu, r->reg); 970 } 971 972 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 973 { 974 u64 pmcr = 0; 975 976 if (!kvm_supports_32bit_el0()) 977 pmcr |= ARMV8_PMU_PMCR_LC; 978 979 /* 980 * The value of PMCR.N field is included when the 981 * vCPU register is read via kvm_vcpu_read_pmcr(). 982 */ 983 __vcpu_assign_sys_reg(vcpu, r->reg, pmcr); 984 985 return __vcpu_sys_reg(vcpu, r->reg); 986 } 987 988 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) 989 { 990 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); 991 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); 992 993 if (!enabled) 994 kvm_inject_undefined(vcpu); 995 996 return !enabled; 997 } 998 999 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) 1000 { 1001 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN); 1002 } 1003 1004 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) 1005 { 1006 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN); 1007 } 1008 1009 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) 1010 { 1011 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN); 1012 } 1013 1014 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) 1015 { 1016 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN); 1017 } 1018 1019 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1020 const struct sys_reg_desc *r) 1021 { 1022 u64 val; 1023 1024 if (pmu_access_el0_disabled(vcpu)) 1025 return false; 1026 1027 if (p->is_write) { 1028 /* 1029 * Only update writeable bits of PMCR (continuing into 1030 * kvm_pmu_handle_pmcr() as well) 1031 */ 1032 val = kvm_vcpu_read_pmcr(vcpu); 1033 val &= ~ARMV8_PMU_PMCR_MASK; 1034 val |= p->regval & ARMV8_PMU_PMCR_MASK; 1035 if (!kvm_supports_32bit_el0()) 1036 val |= ARMV8_PMU_PMCR_LC; 1037 kvm_pmu_handle_pmcr(vcpu, val); 1038 } else { 1039 /* PMCR.P & PMCR.C are RAZ */ 1040 val = kvm_vcpu_read_pmcr(vcpu) 1041 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); 1042 p->regval = val; 1043 } 1044 1045 return true; 1046 } 1047 1048 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1049 const struct sys_reg_desc *r) 1050 { 1051 if (pmu_access_event_counter_el0_disabled(vcpu)) 1052 return false; 1053 1054 if (p->is_write) 1055 __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval); 1056 else 1057 /* return PMSELR.SEL field */ 1058 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) 1059 & PMSELR_EL0_SEL_MASK; 1060 1061 return true; 1062 } 1063 1064 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1065 const struct sys_reg_desc *r) 1066 { 1067 u64 pmceid, mask, shift; 1068 1069 BUG_ON(p->is_write); 1070 1071 if (pmu_access_el0_disabled(vcpu)) 1072 return false; 1073 1074 get_access_mask(r, &mask, &shift); 1075 1076 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); 1077 pmceid &= mask; 1078 pmceid >>= shift; 1079 1080 p->regval = pmceid; 1081 1082 return true; 1083 } 1084 1085 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) 1086 { 1087 u64 pmcr, val; 1088 1089 pmcr = kvm_vcpu_read_pmcr(vcpu); 1090 val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); 1091 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { 1092 kvm_inject_undefined(vcpu); 1093 return false; 1094 } 1095 1096 return true; 1097 } 1098 1099 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 1100 u64 *val) 1101 { 1102 u64 idx; 1103 1104 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) 1105 /* PMCCNTR_EL0 */ 1106 idx = ARMV8_PMU_CYCLE_IDX; 1107 else 1108 /* PMEVCNTRn_EL0 */ 1109 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1110 1111 *val = kvm_pmu_get_counter_value(vcpu, idx); 1112 return 0; 1113 } 1114 1115 static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 1116 u64 val) 1117 { 1118 u64 idx; 1119 1120 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) 1121 /* PMCCNTR_EL0 */ 1122 idx = ARMV8_PMU_CYCLE_IDX; 1123 else 1124 /* PMEVCNTRn_EL0 */ 1125 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1126 1127 kvm_pmu_set_counter_value_user(vcpu, idx, val); 1128 return 0; 1129 } 1130 1131 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, 1132 struct sys_reg_params *p, 1133 const struct sys_reg_desc *r) 1134 { 1135 u64 idx = ~0UL; 1136 1137 if (r->CRn == 9 && r->CRm == 13) { 1138 if (r->Op2 == 2) { 1139 /* PMXEVCNTR_EL0 */ 1140 if (pmu_access_event_counter_el0_disabled(vcpu)) 1141 return false; 1142 1143 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, 1144 __vcpu_sys_reg(vcpu, PMSELR_EL0)); 1145 } else if (r->Op2 == 0) { 1146 /* PMCCNTR_EL0 */ 1147 if (pmu_access_cycle_counter_el0_disabled(vcpu)) 1148 return false; 1149 1150 idx = ARMV8_PMU_CYCLE_IDX; 1151 } 1152 } else if (r->CRn == 0 && r->CRm == 9) { 1153 /* PMCCNTR */ 1154 if (pmu_access_event_counter_el0_disabled(vcpu)) 1155 return false; 1156 1157 idx = ARMV8_PMU_CYCLE_IDX; 1158 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 1159 /* PMEVCNTRn_EL0 */ 1160 if (pmu_access_event_counter_el0_disabled(vcpu)) 1161 return false; 1162 1163 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1164 } 1165 1166 /* Catch any decoding mistake */ 1167 WARN_ON(idx == ~0UL); 1168 1169 if (!pmu_counter_idx_valid(vcpu, idx)) 1170 return false; 1171 1172 if (p->is_write) { 1173 if (pmu_access_el0_disabled(vcpu)) 1174 return false; 1175 1176 kvm_pmu_set_counter_value(vcpu, idx, p->regval); 1177 } else { 1178 p->regval = kvm_pmu_get_counter_value(vcpu, idx); 1179 } 1180 1181 return true; 1182 } 1183 1184 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1185 const struct sys_reg_desc *r) 1186 { 1187 u64 idx, reg; 1188 1189 if (pmu_access_el0_disabled(vcpu)) 1190 return false; 1191 1192 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { 1193 /* PMXEVTYPER_EL0 */ 1194 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0)); 1195 reg = PMEVTYPER0_EL0 + idx; 1196 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { 1197 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1198 if (idx == ARMV8_PMU_CYCLE_IDX) 1199 reg = PMCCFILTR_EL0; 1200 else 1201 /* PMEVTYPERn_EL0 */ 1202 reg = PMEVTYPER0_EL0 + idx; 1203 } else { 1204 BUG(); 1205 } 1206 1207 if (!pmu_counter_idx_valid(vcpu, idx)) 1208 return false; 1209 1210 if (p->is_write) { 1211 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); 1212 kvm_vcpu_pmu_restore_guest(vcpu); 1213 } else { 1214 p->regval = __vcpu_sys_reg(vcpu, reg); 1215 } 1216 1217 return true; 1218 } 1219 1220 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) 1221 { 1222 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1223 1224 __vcpu_assign_sys_reg(vcpu, r->reg, val & mask); 1225 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1226 1227 return 0; 1228 } 1229 1230 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val) 1231 { 1232 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1233 1234 *val = __vcpu_sys_reg(vcpu, r->reg) & mask; 1235 return 0; 1236 } 1237 1238 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1239 const struct sys_reg_desc *r) 1240 { 1241 u64 val, mask; 1242 1243 if (pmu_access_el0_disabled(vcpu)) 1244 return false; 1245 1246 mask = kvm_pmu_accessible_counter_mask(vcpu); 1247 if (p->is_write) { 1248 val = p->regval & mask; 1249 if (r->Op2 & 0x1) 1250 /* accessing PMCNTENSET_EL0 */ 1251 __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val); 1252 else 1253 /* accessing PMCNTENCLR_EL0 */ 1254 __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val); 1255 1256 kvm_pmu_reprogram_counter_mask(vcpu, val); 1257 } else { 1258 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 1259 } 1260 1261 return true; 1262 } 1263 1264 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1265 const struct sys_reg_desc *r) 1266 { 1267 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1268 1269 if (check_pmu_access_disabled(vcpu, 0)) 1270 return false; 1271 1272 if (p->is_write) { 1273 u64 val = p->regval & mask; 1274 1275 if (r->Op2 & 0x1) 1276 /* accessing PMINTENSET_EL1 */ 1277 __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val); 1278 else 1279 /* accessing PMINTENCLR_EL1 */ 1280 __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val); 1281 } else { 1282 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); 1283 } 1284 1285 return true; 1286 } 1287 1288 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1289 const struct sys_reg_desc *r) 1290 { 1291 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1292 1293 if (pmu_access_el0_disabled(vcpu)) 1294 return false; 1295 1296 if (p->is_write) { 1297 if (r->CRm & 0x2) 1298 /* accessing PMOVSSET_EL0 */ 1299 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask)); 1300 else 1301 /* accessing PMOVSCLR_EL0 */ 1302 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask)); 1303 } else { 1304 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); 1305 } 1306 1307 return true; 1308 } 1309 1310 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1311 const struct sys_reg_desc *r) 1312 { 1313 u64 mask; 1314 1315 if (!p->is_write) 1316 return read_from_write_only(vcpu, p, r); 1317 1318 if (pmu_write_swinc_el0_disabled(vcpu)) 1319 return false; 1320 1321 mask = kvm_pmu_accessible_counter_mask(vcpu); 1322 kvm_pmu_software_increment(vcpu, p->regval & mask); 1323 return true; 1324 } 1325 1326 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1327 const struct sys_reg_desc *r) 1328 { 1329 if (p->is_write) { 1330 if (!vcpu_mode_priv(vcpu)) 1331 return undef_access(vcpu, p, r); 1332 1333 __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0, 1334 (p->regval & ARMV8_PMU_USERENR_MASK)); 1335 } else { 1336 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) 1337 & ARMV8_PMU_USERENR_MASK; 1338 } 1339 1340 return true; 1341 } 1342 1343 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 1344 u64 *val) 1345 { 1346 *val = kvm_vcpu_read_pmcr(vcpu); 1347 return 0; 1348 } 1349 1350 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 1351 u64 val) 1352 { 1353 u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val); 1354 struct kvm *kvm = vcpu->kvm; 1355 1356 mutex_lock(&kvm->arch.config_lock); 1357 1358 /* 1359 * The vCPU can't have more counters than the PMU hardware 1360 * implements. Ignore this error to maintain compatibility 1361 * with the existing KVM behavior. 1362 */ 1363 if (!kvm_vm_has_ran_once(kvm) && 1364 !vcpu_has_nv(vcpu) && 1365 new_n <= kvm_arm_pmu_get_max_counters(kvm)) 1366 kvm->arch.nr_pmu_counters = new_n; 1367 1368 mutex_unlock(&kvm->arch.config_lock); 1369 1370 /* 1371 * Ignore writes to RES0 bits, read only bits that are cleared on 1372 * vCPU reset, and writable bits that KVM doesn't support yet. 1373 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace) 1374 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU. 1375 * But, we leave the bit as it is here, as the vCPU's PMUver might 1376 * be changed later (NOTE: the bit will be cleared on first vCPU run 1377 * if necessary). 1378 */ 1379 val &= ARMV8_PMU_PMCR_MASK; 1380 1381 /* The LC bit is RES1 when AArch32 is not supported */ 1382 if (!kvm_supports_32bit_el0()) 1383 val |= ARMV8_PMU_PMCR_LC; 1384 1385 __vcpu_assign_sys_reg(vcpu, r->reg, val); 1386 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1387 1388 return 0; 1389 } 1390 1391 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 1392 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 1393 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ 1394 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1395 get_dbg_wb_reg, set_dbg_wb_reg }, \ 1396 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ 1397 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1398 get_dbg_wb_reg, set_dbg_wb_reg }, \ 1399 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ 1400 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1401 get_dbg_wb_reg, set_dbg_wb_reg }, \ 1402 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ 1403 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1404 get_dbg_wb_reg, set_dbg_wb_reg } 1405 1406 #define PMU_SYS_REG(name) \ 1407 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \ 1408 .visibility = pmu_visibility 1409 1410 /* Macro to expand the PMEVCNTRn_EL0 register */ 1411 #define PMU_PMEVCNTR_EL0(n) \ 1412 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \ 1413 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ 1414 .set_user = set_pmu_evcntr, \ 1415 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } 1416 1417 /* Macro to expand the PMEVTYPERn_EL0 register */ 1418 #define PMU_PMEVTYPER_EL0(n) \ 1419 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \ 1420 .reset = reset_pmevtyper, \ 1421 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } 1422 1423 /* Macro to expand the AMU counter and type registers*/ 1424 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access } 1425 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access } 1426 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access } 1427 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access } 1428 1429 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, 1430 const struct sys_reg_desc *rd) 1431 { 1432 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN; 1433 } 1434 1435 /* 1436 * If we land here on a PtrAuth access, that is because we didn't 1437 * fixup the access on exit by allowing the PtrAuth sysregs. The only 1438 * way this happens is when the guest does not have PtrAuth support 1439 * enabled. 1440 */ 1441 #define __PTRAUTH_KEY(k) \ 1442 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \ 1443 .visibility = ptrauth_visibility} 1444 1445 #define PTRAUTH_KEY(k) \ 1446 __PTRAUTH_KEY(k ## KEYLO_EL1), \ 1447 __PTRAUTH_KEY(k ## KEYHI_EL1) 1448 1449 static bool access_arch_timer(struct kvm_vcpu *vcpu, 1450 struct sys_reg_params *p, 1451 const struct sys_reg_desc *r) 1452 { 1453 enum kvm_arch_timers tmr; 1454 enum kvm_arch_timer_regs treg; 1455 u64 reg = reg_to_encoding(r); 1456 1457 switch (reg) { 1458 case SYS_CNTP_TVAL_EL0: 1459 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1460 tmr = TIMER_HPTIMER; 1461 else 1462 tmr = TIMER_PTIMER; 1463 treg = TIMER_REG_TVAL; 1464 break; 1465 1466 case SYS_CNTV_TVAL_EL0: 1467 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1468 tmr = TIMER_HVTIMER; 1469 else 1470 tmr = TIMER_VTIMER; 1471 treg = TIMER_REG_TVAL; 1472 break; 1473 1474 case SYS_AARCH32_CNTP_TVAL: 1475 case SYS_CNTP_TVAL_EL02: 1476 tmr = TIMER_PTIMER; 1477 treg = TIMER_REG_TVAL; 1478 break; 1479 1480 case SYS_CNTV_TVAL_EL02: 1481 tmr = TIMER_VTIMER; 1482 treg = TIMER_REG_TVAL; 1483 break; 1484 1485 case SYS_CNTHP_TVAL_EL2: 1486 tmr = TIMER_HPTIMER; 1487 treg = TIMER_REG_TVAL; 1488 break; 1489 1490 case SYS_CNTHV_TVAL_EL2: 1491 tmr = TIMER_HVTIMER; 1492 treg = TIMER_REG_TVAL; 1493 break; 1494 1495 case SYS_CNTP_CTL_EL0: 1496 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1497 tmr = TIMER_HPTIMER; 1498 else 1499 tmr = TIMER_PTIMER; 1500 treg = TIMER_REG_CTL; 1501 break; 1502 1503 case SYS_CNTV_CTL_EL0: 1504 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1505 tmr = TIMER_HVTIMER; 1506 else 1507 tmr = TIMER_VTIMER; 1508 treg = TIMER_REG_CTL; 1509 break; 1510 1511 case SYS_AARCH32_CNTP_CTL: 1512 case SYS_CNTP_CTL_EL02: 1513 tmr = TIMER_PTIMER; 1514 treg = TIMER_REG_CTL; 1515 break; 1516 1517 case SYS_CNTV_CTL_EL02: 1518 tmr = TIMER_VTIMER; 1519 treg = TIMER_REG_CTL; 1520 break; 1521 1522 case SYS_CNTHP_CTL_EL2: 1523 tmr = TIMER_HPTIMER; 1524 treg = TIMER_REG_CTL; 1525 break; 1526 1527 case SYS_CNTHV_CTL_EL2: 1528 tmr = TIMER_HVTIMER; 1529 treg = TIMER_REG_CTL; 1530 break; 1531 1532 case SYS_CNTP_CVAL_EL0: 1533 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1534 tmr = TIMER_HPTIMER; 1535 else 1536 tmr = TIMER_PTIMER; 1537 treg = TIMER_REG_CVAL; 1538 break; 1539 1540 case SYS_CNTV_CVAL_EL0: 1541 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1542 tmr = TIMER_HVTIMER; 1543 else 1544 tmr = TIMER_VTIMER; 1545 treg = TIMER_REG_CVAL; 1546 break; 1547 1548 case SYS_AARCH32_CNTP_CVAL: 1549 case SYS_CNTP_CVAL_EL02: 1550 tmr = TIMER_PTIMER; 1551 treg = TIMER_REG_CVAL; 1552 break; 1553 1554 case SYS_CNTV_CVAL_EL02: 1555 tmr = TIMER_VTIMER; 1556 treg = TIMER_REG_CVAL; 1557 break; 1558 1559 case SYS_CNTHP_CVAL_EL2: 1560 tmr = TIMER_HPTIMER; 1561 treg = TIMER_REG_CVAL; 1562 break; 1563 1564 case SYS_CNTHV_CVAL_EL2: 1565 tmr = TIMER_HVTIMER; 1566 treg = TIMER_REG_CVAL; 1567 break; 1568 1569 case SYS_CNTPCT_EL0: 1570 case SYS_CNTPCTSS_EL0: 1571 if (is_hyp_ctxt(vcpu)) 1572 tmr = TIMER_HPTIMER; 1573 else 1574 tmr = TIMER_PTIMER; 1575 treg = TIMER_REG_CNT; 1576 break; 1577 1578 case SYS_AARCH32_CNTPCT: 1579 case SYS_AARCH32_CNTPCTSS: 1580 tmr = TIMER_PTIMER; 1581 treg = TIMER_REG_CNT; 1582 break; 1583 1584 case SYS_CNTVCT_EL0: 1585 case SYS_CNTVCTSS_EL0: 1586 if (is_hyp_ctxt(vcpu)) 1587 tmr = TIMER_HVTIMER; 1588 else 1589 tmr = TIMER_VTIMER; 1590 treg = TIMER_REG_CNT; 1591 break; 1592 1593 case SYS_AARCH32_CNTVCT: 1594 case SYS_AARCH32_CNTVCTSS: 1595 tmr = TIMER_VTIMER; 1596 treg = TIMER_REG_CNT; 1597 break; 1598 1599 default: 1600 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register"); 1601 return undef_access(vcpu, p, r); 1602 } 1603 1604 if (p->is_write) 1605 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); 1606 else 1607 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); 1608 1609 return true; 1610 } 1611 1612 static int arch_timer_set_user(struct kvm_vcpu *vcpu, 1613 const struct sys_reg_desc *rd, 1614 u64 val) 1615 { 1616 switch (reg_to_encoding(rd)) { 1617 case SYS_CNTV_CTL_EL0: 1618 case SYS_CNTP_CTL_EL0: 1619 case SYS_CNTHV_CTL_EL2: 1620 case SYS_CNTHP_CTL_EL2: 1621 val &= ~ARCH_TIMER_CTRL_IT_STAT; 1622 break; 1623 case SYS_CNTVCT_EL0: 1624 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) 1625 timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val); 1626 return 0; 1627 case SYS_CNTPCT_EL0: 1628 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) 1629 timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val); 1630 return 0; 1631 } 1632 1633 __vcpu_assign_sys_reg(vcpu, rd->reg, val); 1634 return 0; 1635 } 1636 1637 static int arch_timer_get_user(struct kvm_vcpu *vcpu, 1638 const struct sys_reg_desc *rd, 1639 u64 *val) 1640 { 1641 switch (reg_to_encoding(rd)) { 1642 case SYS_CNTVCT_EL0: 1643 *val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu)); 1644 break; 1645 case SYS_CNTPCT_EL0: 1646 *val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu)); 1647 break; 1648 default: 1649 *val = __vcpu_sys_reg(vcpu, rd->reg); 1650 } 1651 1652 return 0; 1653 } 1654 1655 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, 1656 s64 new, s64 cur) 1657 { 1658 struct arm64_ftr_bits kvm_ftr = *ftrp; 1659 1660 /* Some features have different safe value type in KVM than host features */ 1661 switch (id) { 1662 case SYS_ID_AA64DFR0_EL1: 1663 switch (kvm_ftr.shift) { 1664 case ID_AA64DFR0_EL1_PMUVer_SHIFT: 1665 kvm_ftr.type = FTR_LOWER_SAFE; 1666 break; 1667 case ID_AA64DFR0_EL1_DebugVer_SHIFT: 1668 kvm_ftr.type = FTR_LOWER_SAFE; 1669 break; 1670 } 1671 break; 1672 case SYS_ID_DFR0_EL1: 1673 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT) 1674 kvm_ftr.type = FTR_LOWER_SAFE; 1675 break; 1676 } 1677 1678 return arm64_ftr_safe_value(&kvm_ftr, new, cur); 1679 } 1680 1681 /* 1682 * arm64_check_features() - Check if a feature register value constitutes 1683 * a subset of features indicated by the idreg's KVM sanitised limit. 1684 * 1685 * This function will check if each feature field of @val is the "safe" value 1686 * against idreg's KVM sanitised limit return from reset() callback. 1687 * If a field value in @val is the same as the one in limit, it is always 1688 * considered the safe value regardless For register fields that are not in 1689 * writable, only the value in limit is considered the safe value. 1690 * 1691 * Return: 0 if all the fields are safe. Otherwise, return negative errno. 1692 */ 1693 static int arm64_check_features(struct kvm_vcpu *vcpu, 1694 const struct sys_reg_desc *rd, 1695 u64 val) 1696 { 1697 const struct arm64_ftr_reg *ftr_reg; 1698 const struct arm64_ftr_bits *ftrp = NULL; 1699 u32 id = reg_to_encoding(rd); 1700 u64 writable_mask = rd->val; 1701 u64 limit = rd->reset(vcpu, rd); 1702 u64 mask = 0; 1703 1704 /* 1705 * Hidden and unallocated ID registers may not have a corresponding 1706 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the 1707 * only safe value is 0. 1708 */ 1709 if (sysreg_visible_as_raz(vcpu, rd)) 1710 return val ? -E2BIG : 0; 1711 1712 ftr_reg = get_arm64_ftr_reg(id); 1713 if (!ftr_reg) 1714 return -EINVAL; 1715 1716 ftrp = ftr_reg->ftr_bits; 1717 1718 for (; ftrp && ftrp->width; ftrp++) { 1719 s64 f_val, f_lim, safe_val; 1720 u64 ftr_mask; 1721 1722 ftr_mask = arm64_ftr_mask(ftrp); 1723 if ((ftr_mask & writable_mask) != ftr_mask) 1724 continue; 1725 1726 f_val = arm64_ftr_value(ftrp, val); 1727 f_lim = arm64_ftr_value(ftrp, limit); 1728 mask |= ftr_mask; 1729 1730 if (f_val == f_lim) 1731 safe_val = f_val; 1732 else 1733 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim); 1734 1735 if (safe_val != f_val) 1736 return -E2BIG; 1737 } 1738 1739 /* For fields that are not writable, values in limit are the safe values. */ 1740 if ((val & ~mask) != (limit & ~mask)) 1741 return -E2BIG; 1742 1743 return 0; 1744 } 1745 1746 static u8 pmuver_to_perfmon(u8 pmuver) 1747 { 1748 switch (pmuver) { 1749 case ID_AA64DFR0_EL1_PMUVer_IMP: 1750 return ID_DFR0_EL1_PerfMon_PMUv3; 1751 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF: 1752 return ID_DFR0_EL1_PerfMon_IMPDEF; 1753 default: 1754 /* Anything ARMv8.1+ and NI have the same value. For now. */ 1755 return pmuver; 1756 } 1757 } 1758 1759 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val); 1760 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val); 1761 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val); 1762 1763 /* Read a sanitised cpufeature ID register by sys_reg_desc */ 1764 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, 1765 const struct sys_reg_desc *r) 1766 { 1767 u32 id = reg_to_encoding(r); 1768 u64 val; 1769 1770 if (sysreg_visible_as_raz(vcpu, r)) 1771 return 0; 1772 1773 val = read_sanitised_ftr_reg(id); 1774 1775 switch (id) { 1776 case SYS_ID_AA64DFR0_EL1: 1777 val = sanitise_id_aa64dfr0_el1(vcpu, val); 1778 break; 1779 case SYS_ID_AA64PFR0_EL1: 1780 val = sanitise_id_aa64pfr0_el1(vcpu, val); 1781 break; 1782 case SYS_ID_AA64PFR1_EL1: 1783 val = sanitise_id_aa64pfr1_el1(vcpu, val); 1784 break; 1785 case SYS_ID_AA64PFR2_EL1: 1786 val &= ID_AA64PFR2_EL1_FPMR | 1787 (kvm_has_mte(vcpu->kvm) ? 1788 ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY : 1789 0); 1790 break; 1791 case SYS_ID_AA64ISAR1_EL1: 1792 if (!vcpu_has_ptrauth(vcpu)) 1793 val &= ~(ID_AA64ISAR1_EL1_APA | 1794 ID_AA64ISAR1_EL1_API | 1795 ID_AA64ISAR1_EL1_GPA | 1796 ID_AA64ISAR1_EL1_GPI); 1797 break; 1798 case SYS_ID_AA64ISAR2_EL1: 1799 if (!vcpu_has_ptrauth(vcpu)) 1800 val &= ~(ID_AA64ISAR2_EL1_APA3 | 1801 ID_AA64ISAR2_EL1_GPA3); 1802 if (!cpus_have_final_cap(ARM64_HAS_WFXT) || 1803 has_broken_cntvoff()) 1804 val &= ~ID_AA64ISAR2_EL1_WFxT; 1805 break; 1806 case SYS_ID_AA64ISAR3_EL1: 1807 val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE | 1808 ID_AA64ISAR3_EL1_FAMINMAX; 1809 break; 1810 case SYS_ID_AA64MMFR2_EL1: 1811 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; 1812 val &= ~ID_AA64MMFR2_EL1_NV; 1813 break; 1814 case SYS_ID_AA64MMFR3_EL1: 1815 val &= ID_AA64MMFR3_EL1_TCRX | 1816 ID_AA64MMFR3_EL1_SCTLRX | 1817 ID_AA64MMFR3_EL1_S1POE | 1818 ID_AA64MMFR3_EL1_S1PIE; 1819 break; 1820 case SYS_ID_MMFR4_EL1: 1821 val &= ~ID_MMFR4_EL1_CCIDX; 1822 break; 1823 } 1824 1825 if (vcpu_has_nv(vcpu)) 1826 val = limit_nv_id_reg(vcpu->kvm, id, val); 1827 1828 return val; 1829 } 1830 1831 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu, 1832 const struct sys_reg_desc *r) 1833 { 1834 return __kvm_read_sanitised_id_reg(vcpu, r); 1835 } 1836 1837 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 1838 { 1839 return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r)); 1840 } 1841 1842 static bool is_feature_id_reg(u32 encoding) 1843 { 1844 return (sys_reg_Op0(encoding) == 3 && 1845 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) && 1846 sys_reg_CRn(encoding) == 0 && 1847 sys_reg_CRm(encoding) <= 7); 1848 } 1849 1850 /* 1851 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is 1852 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID 1853 * registers KVM maintains on a per-VM basis. 1854 * 1855 * Additionally, the implementation ID registers and CTR_EL0 are handled as 1856 * per-VM registers. 1857 */ 1858 static inline bool is_vm_ftr_id_reg(u32 id) 1859 { 1860 switch (id) { 1861 case SYS_CTR_EL0: 1862 case SYS_MIDR_EL1: 1863 case SYS_REVIDR_EL1: 1864 case SYS_AIDR_EL1: 1865 return true; 1866 default: 1867 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && 1868 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && 1869 sys_reg_CRm(id) < 8); 1870 1871 } 1872 } 1873 1874 static inline bool is_vcpu_ftr_id_reg(u32 id) 1875 { 1876 return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id); 1877 } 1878 1879 static inline bool is_aa32_id_reg(u32 id) 1880 { 1881 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && 1882 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && 1883 sys_reg_CRm(id) <= 3); 1884 } 1885 1886 static unsigned int id_visibility(const struct kvm_vcpu *vcpu, 1887 const struct sys_reg_desc *r) 1888 { 1889 u32 id = reg_to_encoding(r); 1890 1891 switch (id) { 1892 case SYS_ID_AA64ZFR0_EL1: 1893 if (!vcpu_has_sve(vcpu)) 1894 return REG_RAZ; 1895 break; 1896 } 1897 1898 return 0; 1899 } 1900 1901 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu, 1902 const struct sys_reg_desc *r) 1903 { 1904 /* 1905 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any 1906 * EL. Promote to RAZ/WI in order to guarantee consistency between 1907 * systems. 1908 */ 1909 if (!kvm_supports_32bit_el0()) 1910 return REG_RAZ | REG_USER_WI; 1911 1912 return id_visibility(vcpu, r); 1913 } 1914 1915 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, 1916 const struct sys_reg_desc *r) 1917 { 1918 return REG_RAZ; 1919 } 1920 1921 /* cpufeature ID register access trap handlers */ 1922 1923 static bool access_id_reg(struct kvm_vcpu *vcpu, 1924 struct sys_reg_params *p, 1925 const struct sys_reg_desc *r) 1926 { 1927 if (p->is_write) 1928 return write_to_read_only(vcpu, p, r); 1929 1930 p->regval = read_id_reg(vcpu, r); 1931 1932 return true; 1933 } 1934 1935 /* Visibility overrides for SVE-specific control registers */ 1936 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, 1937 const struct sys_reg_desc *rd) 1938 { 1939 if (vcpu_has_sve(vcpu)) 1940 return 0; 1941 1942 return REG_HIDDEN; 1943 } 1944 1945 static unsigned int sme_visibility(const struct kvm_vcpu *vcpu, 1946 const struct sys_reg_desc *rd) 1947 { 1948 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP)) 1949 return 0; 1950 1951 return REG_HIDDEN; 1952 } 1953 1954 static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu, 1955 const struct sys_reg_desc *rd) 1956 { 1957 if (kvm_has_fpmr(vcpu->kvm)) 1958 return 0; 1959 1960 return REG_HIDDEN; 1961 } 1962 1963 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val) 1964 { 1965 if (!vcpu_has_sve(vcpu)) 1966 val &= ~ID_AA64PFR0_EL1_SVE_MASK; 1967 1968 /* 1969 * The default is to expose CSV2 == 1 if the HW isn't affected. 1970 * Although this is a per-CPU feature, we make it global because 1971 * asymmetric systems are just a nuisance. 1972 * 1973 * Userspace can override this as long as it doesn't promise 1974 * the impossible. 1975 */ 1976 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) { 1977 val &= ~ID_AA64PFR0_EL1_CSV2_MASK; 1978 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP); 1979 } 1980 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) { 1981 val &= ~ID_AA64PFR0_EL1_CSV3_MASK; 1982 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP); 1983 } 1984 1985 if (vgic_is_v3(vcpu->kvm)) { 1986 val &= ~ID_AA64PFR0_EL1_GIC_MASK; 1987 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP); 1988 } 1989 1990 val &= ~ID_AA64PFR0_EL1_AMU_MASK; 1991 1992 /* 1993 * MPAM is disabled by default as KVM also needs a set of PARTID to 1994 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some 1995 * older kernels let the guest see the ID bit. 1996 */ 1997 val &= ~ID_AA64PFR0_EL1_MPAM_MASK; 1998 1999 return val; 2000 } 2001 2002 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val) 2003 { 2004 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 2005 2006 if (!kvm_has_mte(vcpu->kvm)) { 2007 val &= ~ID_AA64PFR1_EL1_MTE; 2008 val &= ~ID_AA64PFR1_EL1_MTE_frac; 2009 } 2010 2011 if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) && 2012 SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP)) 2013 val &= ~ID_AA64PFR1_EL1_RAS_frac; 2014 2015 val &= ~ID_AA64PFR1_EL1_SME; 2016 val &= ~ID_AA64PFR1_EL1_RNDR_trap; 2017 val &= ~ID_AA64PFR1_EL1_NMI; 2018 val &= ~ID_AA64PFR1_EL1_GCS; 2019 val &= ~ID_AA64PFR1_EL1_THE; 2020 val &= ~ID_AA64PFR1_EL1_MTEX; 2021 val &= ~ID_AA64PFR1_EL1_PFAR; 2022 val &= ~ID_AA64PFR1_EL1_MPAM_frac; 2023 2024 return val; 2025 } 2026 2027 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) 2028 { 2029 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8); 2030 2031 /* 2032 * Only initialize the PMU version if the vCPU was configured with one. 2033 */ 2034 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; 2035 if (kvm_vcpu_has_pmu(vcpu)) 2036 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer, 2037 kvm_arm_pmu_get_pmuver_limit()); 2038 2039 /* Hide SPE from guests */ 2040 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK; 2041 2042 /* Hide BRBE from guests */ 2043 val &= ~ID_AA64DFR0_EL1_BRBE_MASK; 2044 2045 return val; 2046 } 2047 2048 /* 2049 * Older versions of KVM erroneously claim support for FEAT_DoubleLock with 2050 * NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect 2051 * value if it is consistent with the bug. 2052 */ 2053 static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val) 2054 { 2055 u8 host, user; 2056 2057 if (!vcpu_has_nv(vcpu)) 2058 return false; 2059 2060 host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, 2061 read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1)); 2062 user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val); 2063 2064 return host == ID_AA64DFR0_EL1_DoubleLock_NI && 2065 user == ID_AA64DFR0_EL1_DoubleLock_IMP; 2066 } 2067 2068 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, 2069 const struct sys_reg_desc *rd, 2070 u64 val) 2071 { 2072 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val); 2073 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); 2074 2075 /* 2076 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the 2077 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously 2078 * exposed an IMP_DEF PMU to userspace and the guest on systems w/ 2079 * non-architectural PMUs. Of course, PMUv3 is the only game in town for 2080 * PMU virtualization, so the IMP_DEF value was rather user-hostile. 2081 * 2082 * At minimum, we're on the hook to allow values that were given to 2083 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value 2084 * with a more sensible NI. The value of an ID register changing under 2085 * the nose of the guest is unfortunate, but is certainly no more 2086 * surprising than an ill-guided PMU driver poking at impdef system 2087 * registers that end in an UNDEF... 2088 */ 2089 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 2090 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; 2091 2092 /* 2093 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a 2094 * nonzero minimum safe value. 2095 */ 2096 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP) 2097 return -EINVAL; 2098 2099 if (ignore_feat_doublelock(vcpu, val)) { 2100 val &= ~ID_AA64DFR0_EL1_DoubleLock; 2101 val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI); 2102 } 2103 2104 return set_id_reg(vcpu, rd, val); 2105 } 2106 2107 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, 2108 const struct sys_reg_desc *rd) 2109 { 2110 u8 perfmon; 2111 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1); 2112 2113 val &= ~ID_DFR0_EL1_PerfMon_MASK; 2114 if (kvm_vcpu_has_pmu(vcpu)) { 2115 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit()); 2116 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon); 2117 } 2118 2119 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8); 2120 2121 return val; 2122 } 2123 2124 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, 2125 const struct sys_reg_desc *rd, 2126 u64 val) 2127 { 2128 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val); 2129 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val); 2130 2131 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) { 2132 val &= ~ID_DFR0_EL1_PerfMon_MASK; 2133 perfmon = 0; 2134 } 2135 2136 /* 2137 * Allow DFR0_EL1.PerfMon to be set from userspace as long as 2138 * it doesn't promise more than what the HW gives us on the 2139 * AArch64 side (as everything is emulated with that), and 2140 * that this is a PMUv3. 2141 */ 2142 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3) 2143 return -EINVAL; 2144 2145 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8) 2146 return -EINVAL; 2147 2148 return set_id_reg(vcpu, rd, val); 2149 } 2150 2151 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, 2152 const struct sys_reg_desc *rd, u64 user_val) 2153 { 2154 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 2155 u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK; 2156 2157 /* 2158 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits 2159 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to 2160 * guests, but didn't add trap handling. KVM doesn't support MPAM and 2161 * always returns an UNDEF for these registers. The guest must see 0 2162 * for this field. 2163 * 2164 * But KVM must also accept values from user-space that were provided 2165 * by KVM. On CPUs that support MPAM, permit user-space to write 2166 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field. 2167 */ 2168 if ((hw_val & mpam_mask) == (user_val & mpam_mask)) 2169 user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK; 2170 2171 /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */ 2172 if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) || 2173 !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) || 2174 (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val))) 2175 return -EINVAL; 2176 2177 /* 2178 * If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then 2179 * we support GICv3. Fail attempts to do anything but set that to IMP. 2180 */ 2181 if (vgic_is_v3_compat(vcpu->kvm) && 2182 FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP) 2183 return -EINVAL; 2184 2185 return set_id_reg(vcpu, rd, user_val); 2186 } 2187 2188 static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, 2189 const struct sys_reg_desc *rd, u64 user_val) 2190 { 2191 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); 2192 u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK; 2193 u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val); 2194 u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val); 2195 u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val); 2196 2197 /* See set_id_aa64pfr0_el1 for comment about MPAM */ 2198 if ((hw_val & mpam_mask) == (user_val & mpam_mask)) 2199 user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; 2200 2201 /* 2202 * Previously MTE_frac was hidden from guest. However, if the 2203 * hardware supports MTE2 but not MTE_ASYM_FAULT then a value 2204 * of 0 for this field indicates that the hardware supports 2205 * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported. 2206 * 2207 * As KVM must accept values from KVM provided by user-space, 2208 * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set 2209 * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid 2210 * incorrectly claiming hardware support for MTE_ASYNC in the 2211 * guest. 2212 */ 2213 2214 if (mte == ID_AA64PFR1_EL1_MTE_MTE2 && 2215 hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI && 2216 user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) { 2217 user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK; 2218 user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK; 2219 } 2220 2221 return set_id_reg(vcpu, rd, user_val); 2222 } 2223 2224 /* 2225 * Allow userspace to de-feature a stage-2 translation granule but prevent it 2226 * from claiming the impossible. 2227 */ 2228 #define tgran2_val_allowed(tg, safe, user) \ 2229 ({ \ 2230 u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe); \ 2231 u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user); \ 2232 \ 2233 __s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI; \ 2234 }) 2235 2236 static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu, 2237 const struct sys_reg_desc *rd, u64 user_val) 2238 { 2239 u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd); 2240 2241 if (!vcpu_has_nv(vcpu)) 2242 return set_id_reg(vcpu, rd, user_val); 2243 2244 if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) || 2245 !tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) || 2246 !tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val)) 2247 return -EINVAL; 2248 2249 return set_id_reg(vcpu, rd, user_val); 2250 } 2251 2252 static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu, 2253 const struct sys_reg_desc *rd, u64 user_val) 2254 { 2255 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); 2256 u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK; 2257 2258 /* 2259 * We made the mistake to expose the now deprecated NV field, 2260 * so allow userspace to write it, but silently ignore it. 2261 */ 2262 if ((hw_val & nv_mask) == (user_val & nv_mask)) 2263 user_val &= ~nv_mask; 2264 2265 return set_id_reg(vcpu, rd, user_val); 2266 } 2267 2268 static int set_ctr_el0(struct kvm_vcpu *vcpu, 2269 const struct sys_reg_desc *rd, u64 user_val) 2270 { 2271 u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val); 2272 2273 /* 2274 * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved. 2275 * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based 2276 * on what hardware reports. 2277 * 2278 * Using a VIPT software model on PIPT will lead to over invalidation, 2279 * but still correct. Hence, we can allow downgrading PIPT to VIPT, 2280 * but not the other way around. This is handled via arm64_ftr_safe_value() 2281 * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value 2282 * set as VIPT. 2283 */ 2284 switch (user_L1Ip) { 2285 case CTR_EL0_L1Ip_RESERVED_VPIPT: 2286 case CTR_EL0_L1Ip_RESERVED_AIVIVT: 2287 return -EINVAL; 2288 case CTR_EL0_L1Ip_VIPT: 2289 case CTR_EL0_L1Ip_PIPT: 2290 return set_id_reg(vcpu, rd, user_val); 2291 default: 2292 return -ENOENT; 2293 } 2294 } 2295 2296 /* 2297 * cpufeature ID register user accessors 2298 * 2299 * For now, these registers are immutable for userspace, so no values 2300 * are stored, and for set_id_reg() we don't allow the effective value 2301 * to be changed. 2302 */ 2303 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2304 u64 *val) 2305 { 2306 /* 2307 * Avoid locking if the VM has already started, as the ID registers are 2308 * guaranteed to be invariant at that point. 2309 */ 2310 if (kvm_vm_has_ran_once(vcpu->kvm)) { 2311 *val = read_id_reg(vcpu, rd); 2312 return 0; 2313 } 2314 2315 mutex_lock(&vcpu->kvm->arch.config_lock); 2316 *val = read_id_reg(vcpu, rd); 2317 mutex_unlock(&vcpu->kvm->arch.config_lock); 2318 2319 return 0; 2320 } 2321 2322 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2323 u64 val) 2324 { 2325 u32 id = reg_to_encoding(rd); 2326 int ret; 2327 2328 mutex_lock(&vcpu->kvm->arch.config_lock); 2329 2330 /* 2331 * Once the VM has started the ID registers are immutable. Reject any 2332 * write that does not match the final register value. 2333 */ 2334 if (kvm_vm_has_ran_once(vcpu->kvm)) { 2335 if (val != read_id_reg(vcpu, rd)) 2336 ret = -EBUSY; 2337 else 2338 ret = 0; 2339 2340 mutex_unlock(&vcpu->kvm->arch.config_lock); 2341 return ret; 2342 } 2343 2344 ret = arm64_check_features(vcpu, rd, val); 2345 if (!ret) 2346 kvm_set_vm_id_reg(vcpu->kvm, id, val); 2347 2348 mutex_unlock(&vcpu->kvm->arch.config_lock); 2349 2350 /* 2351 * arm64_check_features() returns -E2BIG to indicate the register's 2352 * feature set is a superset of the maximally-allowed register value. 2353 * While it would be nice to precisely describe this to userspace, the 2354 * existing UAPI for KVM_SET_ONE_REG has it that invalid register 2355 * writes return -EINVAL. 2356 */ 2357 if (ret == -E2BIG) 2358 ret = -EINVAL; 2359 return ret; 2360 } 2361 2362 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val) 2363 { 2364 u64 *p = __vm_id_reg(&kvm->arch, reg); 2365 2366 lockdep_assert_held(&kvm->arch.config_lock); 2367 2368 if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm)) 2369 return; 2370 2371 *p = val; 2372 } 2373 2374 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2375 u64 *val) 2376 { 2377 *val = 0; 2378 return 0; 2379 } 2380 2381 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2382 u64 val) 2383 { 2384 return 0; 2385 } 2386 2387 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2388 const struct sys_reg_desc *r) 2389 { 2390 if (p->is_write) 2391 return write_to_read_only(vcpu, p, r); 2392 2393 p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0); 2394 return true; 2395 } 2396 2397 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2398 const struct sys_reg_desc *r) 2399 { 2400 if (p->is_write) 2401 return write_to_read_only(vcpu, p, r); 2402 2403 p->regval = __vcpu_sys_reg(vcpu, r->reg); 2404 return true; 2405 } 2406 2407 /* 2408 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary 2409 * by the physical CPU which the vcpu currently resides in. 2410 */ 2411 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 2412 { 2413 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); 2414 u64 clidr; 2415 u8 loc; 2416 2417 if ((ctr_el0 & CTR_EL0_IDC)) { 2418 /* 2419 * Data cache clean to the PoU is not required so LoUU and LoUIS 2420 * will not be set and a unified cache, which will be marked as 2421 * LoC, will be added. 2422 * 2423 * If not DIC, let the unified cache L2 so that an instruction 2424 * cache can be added as L1 later. 2425 */ 2426 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2; 2427 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc); 2428 } else { 2429 /* 2430 * Data cache clean to the PoU is required so let L1 have a data 2431 * cache and mark it as LoUU and LoUIS. As L1 has a data cache, 2432 * it can be marked as LoC too. 2433 */ 2434 loc = 1; 2435 clidr = 1 << CLIDR_LOUU_SHIFT; 2436 clidr |= 1 << CLIDR_LOUIS_SHIFT; 2437 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1); 2438 } 2439 2440 /* 2441 * Instruction cache invalidation to the PoU is required so let L1 have 2442 * an instruction cache. If L1 already has a data cache, it will be 2443 * CACHE_TYPE_SEPARATE. 2444 */ 2445 if (!(ctr_el0 & CTR_EL0_DIC)) 2446 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1); 2447 2448 clidr |= loc << CLIDR_LOC_SHIFT; 2449 2450 /* 2451 * Add tag cache unified to data cache. Allocation tags and data are 2452 * unified in a cache line so that it looks valid even if there is only 2453 * one cache line. 2454 */ 2455 if (kvm_has_mte(vcpu->kvm)) 2456 clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc); 2457 2458 __vcpu_assign_sys_reg(vcpu, r->reg, clidr); 2459 2460 return __vcpu_sys_reg(vcpu, r->reg); 2461 } 2462 2463 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2464 u64 val) 2465 { 2466 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); 2467 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val)); 2468 2469 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc)) 2470 return -EINVAL; 2471 2472 __vcpu_assign_sys_reg(vcpu, rd->reg, val); 2473 2474 return 0; 2475 } 2476 2477 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2478 const struct sys_reg_desc *r) 2479 { 2480 int reg = r->reg; 2481 2482 if (p->is_write) 2483 vcpu_write_sys_reg(vcpu, p->regval, reg); 2484 else 2485 p->regval = vcpu_read_sys_reg(vcpu, reg); 2486 return true; 2487 } 2488 2489 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2490 const struct sys_reg_desc *r) 2491 { 2492 u32 csselr; 2493 2494 if (p->is_write) 2495 return write_to_read_only(vcpu, p, r); 2496 2497 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1); 2498 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD; 2499 if (csselr < CSSELR_MAX) 2500 p->regval = get_ccsidr(vcpu, csselr); 2501 2502 return true; 2503 } 2504 2505 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, 2506 const struct sys_reg_desc *rd) 2507 { 2508 if (kvm_has_mte(vcpu->kvm)) 2509 return 0; 2510 2511 return REG_HIDDEN; 2512 } 2513 2514 #define MTE_REG(name) { \ 2515 SYS_DESC(SYS_##name), \ 2516 .access = undef_access, \ 2517 .reset = reset_unknown, \ 2518 .reg = name, \ 2519 .visibility = mte_visibility, \ 2520 } 2521 2522 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu, 2523 const struct sys_reg_desc *rd) 2524 { 2525 if (vcpu_has_nv(vcpu)) 2526 return 0; 2527 2528 return REG_HIDDEN; 2529 } 2530 2531 static bool bad_vncr_trap(struct kvm_vcpu *vcpu, 2532 struct sys_reg_params *p, 2533 const struct sys_reg_desc *r) 2534 { 2535 /* 2536 * We really shouldn't be here, and this is likely the result 2537 * of a misconfigured trap, as this register should target the 2538 * VNCR page, and nothing else. 2539 */ 2540 return bad_trap(vcpu, p, r, 2541 "trap of VNCR-backed register"); 2542 } 2543 2544 static bool bad_redir_trap(struct kvm_vcpu *vcpu, 2545 struct sys_reg_params *p, 2546 const struct sys_reg_desc *r) 2547 { 2548 /* 2549 * We really shouldn't be here, and this is likely the result 2550 * of a misconfigured trap, as this register should target the 2551 * corresponding EL1, and nothing else. 2552 */ 2553 return bad_trap(vcpu, p, r, 2554 "trap of EL2 register redirected to EL1"); 2555 } 2556 2557 #define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \ 2558 SYS_DESC(SYS_##name), \ 2559 .access = acc, \ 2560 .reset = rst, \ 2561 .reg = name, \ 2562 .get_user = gu, \ 2563 .set_user = su, \ 2564 .visibility = filter, \ 2565 .val = v, \ 2566 } 2567 2568 #define EL2_REG_FILTERED(name, acc, rst, v, filter) \ 2569 SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter) 2570 2571 #define EL2_REG(name, acc, rst, v) \ 2572 EL2_REG_FILTERED(name, acc, rst, v, el2_visibility) 2573 2574 #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v) 2575 #define EL2_REG_VNCR_FILT(name, vis) \ 2576 EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis) 2577 #define EL2_REG_VNCR_GICv3(name) \ 2578 EL2_REG_VNCR_FILT(name, hidden_visibility) 2579 #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v) 2580 2581 #define TIMER_REG(name, vis) \ 2582 SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \ 2583 arch_timer_get_user, arch_timer_set_user, vis) 2584 2585 /* 2586 * Since reset() callback and field val are not used for idregs, they will be 2587 * used for specific purposes for idregs. 2588 * The reset() would return KVM sanitised register value. The value would be the 2589 * same as the host kernel sanitised value if there is no KVM sanitisation. 2590 * The val would be used as a mask indicating writable fields for the idreg. 2591 * Only bits with 1 are writable from userspace. This mask might not be 2592 * necessary in the future whenever all ID registers are enabled as writable 2593 * from userspace. 2594 */ 2595 2596 #define ID_DESC_DEFAULT_CALLBACKS \ 2597 .access = access_id_reg, \ 2598 .get_user = get_id_reg, \ 2599 .set_user = set_id_reg, \ 2600 .visibility = id_visibility, \ 2601 .reset = kvm_read_sanitised_id_reg 2602 2603 #define ID_DESC(name) \ 2604 SYS_DESC(SYS_##name), \ 2605 ID_DESC_DEFAULT_CALLBACKS 2606 2607 /* sys_reg_desc initialiser for known cpufeature ID registers */ 2608 #define ID_SANITISED(name) { \ 2609 ID_DESC(name), \ 2610 .val = 0, \ 2611 } 2612 2613 /* sys_reg_desc initialiser for writable ID registers */ 2614 #define ID_WRITABLE(name, mask) { \ 2615 ID_DESC(name), \ 2616 .val = mask, \ 2617 } 2618 2619 /* 2620 * 32bit ID regs are fully writable when the guest is 32bit 2621 * capable. Nothing in the KVM code should rely on 32bit features 2622 * anyway, only 64bit, so let the VMM do its worse. 2623 */ 2624 #define AA32_ID_WRITABLE(name) { \ 2625 ID_DESC(name), \ 2626 .visibility = aa32_id_visibility, \ 2627 .val = GENMASK(31, 0), \ 2628 } 2629 2630 /* sys_reg_desc initialiser for cpufeature ID registers that need filtering */ 2631 #define ID_FILTERED(sysreg, name, mask) { \ 2632 ID_DESC(sysreg), \ 2633 .set_user = set_##name, \ 2634 .val = (mask), \ 2635 } 2636 2637 /* 2638 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID 2639 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 2640 * (1 <= crm < 8, 0 <= Op2 < 8). 2641 */ 2642 #define ID_UNALLOCATED(crm, op2) { \ 2643 .name = "S3_0_0_" #crm "_" #op2, \ 2644 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ 2645 ID_DESC_DEFAULT_CALLBACKS, \ 2646 .visibility = raz_visibility, \ 2647 .val = 0, \ 2648 } 2649 2650 /* 2651 * sys_reg_desc initialiser for known ID registers that we hide from guests. 2652 * For now, these are exposed just like unallocated ID regs: they appear 2653 * RAZ for the guest. 2654 */ 2655 #define ID_HIDDEN(name) { \ 2656 ID_DESC(name), \ 2657 .visibility = raz_visibility, \ 2658 .val = 0, \ 2659 } 2660 2661 static bool access_sp_el1(struct kvm_vcpu *vcpu, 2662 struct sys_reg_params *p, 2663 const struct sys_reg_desc *r) 2664 { 2665 if (p->is_write) 2666 __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval); 2667 else 2668 p->regval = __vcpu_sys_reg(vcpu, SP_EL1); 2669 2670 return true; 2671 } 2672 2673 static bool access_elr(struct kvm_vcpu *vcpu, 2674 struct sys_reg_params *p, 2675 const struct sys_reg_desc *r) 2676 { 2677 if (p->is_write) 2678 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1); 2679 else 2680 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1); 2681 2682 return true; 2683 } 2684 2685 static bool access_spsr(struct kvm_vcpu *vcpu, 2686 struct sys_reg_params *p, 2687 const struct sys_reg_desc *r) 2688 { 2689 if (p->is_write) 2690 __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval); 2691 else 2692 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); 2693 2694 return true; 2695 } 2696 2697 static bool access_cntkctl_el12(struct kvm_vcpu *vcpu, 2698 struct sys_reg_params *p, 2699 const struct sys_reg_desc *r) 2700 { 2701 if (p->is_write) 2702 __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval); 2703 else 2704 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); 2705 2706 return true; 2707 } 2708 2709 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 2710 { 2711 u64 val = r->val; 2712 2713 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1)) 2714 val |= HCR_E2H; 2715 2716 __vcpu_assign_sys_reg(vcpu, r->reg, val); 2717 2718 return __vcpu_sys_reg(vcpu, r->reg); 2719 } 2720 2721 static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu, 2722 const struct sys_reg_desc *rd, 2723 unsigned int (*fn)(const struct kvm_vcpu *, 2724 const struct sys_reg_desc *)) 2725 { 2726 return el2_visibility(vcpu, rd) ?: fn(vcpu, rd); 2727 } 2728 2729 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu, 2730 const struct sys_reg_desc *rd) 2731 { 2732 return __el2_visibility(vcpu, rd, sve_visibility); 2733 } 2734 2735 static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu, 2736 const struct sys_reg_desc *rd) 2737 { 2738 if (el2_visibility(vcpu, rd) == 0 && 2739 kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) 2740 return 0; 2741 2742 return REG_HIDDEN; 2743 } 2744 2745 static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu, 2746 const struct sys_reg_desc *rd) 2747 { 2748 if (kvm_has_sctlr2(vcpu->kvm)) 2749 return 0; 2750 2751 return REG_HIDDEN; 2752 } 2753 2754 static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu, 2755 const struct sys_reg_desc *rd) 2756 { 2757 return __el2_visibility(vcpu, rd, sctlr2_visibility); 2758 } 2759 2760 static bool access_zcr_el2(struct kvm_vcpu *vcpu, 2761 struct sys_reg_params *p, 2762 const struct sys_reg_desc *r) 2763 { 2764 unsigned int vq; 2765 2766 if (guest_hyp_sve_traps_enabled(vcpu)) { 2767 kvm_inject_nested_sve_trap(vcpu); 2768 return false; 2769 } 2770 2771 if (!p->is_write) { 2772 p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2); 2773 return true; 2774 } 2775 2776 vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1; 2777 vq = min(vq, vcpu_sve_max_vq(vcpu)); 2778 __vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1); 2779 return true; 2780 } 2781 2782 static bool access_gic_vtr(struct kvm_vcpu *vcpu, 2783 struct sys_reg_params *p, 2784 const struct sys_reg_desc *r) 2785 { 2786 if (p->is_write) 2787 return write_to_read_only(vcpu, p, r); 2788 2789 p->regval = kvm_get_guest_vtr_el2(); 2790 2791 return true; 2792 } 2793 2794 static bool access_gic_misr(struct kvm_vcpu *vcpu, 2795 struct sys_reg_params *p, 2796 const struct sys_reg_desc *r) 2797 { 2798 if (p->is_write) 2799 return write_to_read_only(vcpu, p, r); 2800 2801 p->regval = vgic_v3_get_misr(vcpu); 2802 2803 return true; 2804 } 2805 2806 static bool access_gic_eisr(struct kvm_vcpu *vcpu, 2807 struct sys_reg_params *p, 2808 const struct sys_reg_desc *r) 2809 { 2810 if (p->is_write) 2811 return write_to_read_only(vcpu, p, r); 2812 2813 p->regval = vgic_v3_get_eisr(vcpu); 2814 2815 return true; 2816 } 2817 2818 static bool access_gic_elrsr(struct kvm_vcpu *vcpu, 2819 struct sys_reg_params *p, 2820 const struct sys_reg_desc *r) 2821 { 2822 if (p->is_write) 2823 return write_to_read_only(vcpu, p, r); 2824 2825 p->regval = vgic_v3_get_elrsr(vcpu); 2826 2827 return true; 2828 } 2829 2830 static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu, 2831 const struct sys_reg_desc *rd) 2832 { 2833 if (kvm_has_s1poe(vcpu->kvm)) 2834 return 0; 2835 2836 return REG_HIDDEN; 2837 } 2838 2839 static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu, 2840 const struct sys_reg_desc *rd) 2841 { 2842 return __el2_visibility(vcpu, rd, s1poe_visibility); 2843 } 2844 2845 static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu, 2846 const struct sys_reg_desc *rd) 2847 { 2848 if (kvm_has_tcr2(vcpu->kvm)) 2849 return 0; 2850 2851 return REG_HIDDEN; 2852 } 2853 2854 static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu, 2855 const struct sys_reg_desc *rd) 2856 { 2857 return __el2_visibility(vcpu, rd, tcr2_visibility); 2858 } 2859 2860 static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu, 2861 const struct sys_reg_desc *rd) 2862 { 2863 if (el2_visibility(vcpu, rd) == 0 && 2864 kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2)) 2865 return 0; 2866 2867 return REG_HIDDEN; 2868 } 2869 2870 static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu, 2871 const struct sys_reg_desc *rd) 2872 { 2873 if (el2_visibility(vcpu, rd) == 0 && 2874 kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP)) 2875 return 0; 2876 2877 return REG_HIDDEN; 2878 } 2879 2880 static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu, 2881 const struct sys_reg_desc *rd) 2882 { 2883 if (kvm_has_s1pie(vcpu->kvm)) 2884 return 0; 2885 2886 return REG_HIDDEN; 2887 } 2888 2889 static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu, 2890 const struct sys_reg_desc *rd) 2891 { 2892 return __el2_visibility(vcpu, rd, s1pie_visibility); 2893 } 2894 2895 static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu, 2896 const struct sys_reg_desc *rd) 2897 { 2898 if (vcpu_has_nv(vcpu) && 2899 !vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0)) 2900 return 0; 2901 2902 return REG_HIDDEN; 2903 } 2904 2905 static bool access_mdcr(struct kvm_vcpu *vcpu, 2906 struct sys_reg_params *p, 2907 const struct sys_reg_desc *r) 2908 { 2909 u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2); 2910 2911 if (!p->is_write) { 2912 p->regval = old; 2913 return true; 2914 } 2915 2916 val = p->regval; 2917 hpmn = FIELD_GET(MDCR_EL2_HPMN, val); 2918 2919 /* 2920 * If HPMN is out of bounds, limit it to what we actually 2921 * support. This matches the UNKNOWN definition of the field 2922 * in that case, and keeps the emulation simple. Sort of. 2923 */ 2924 if (hpmn > vcpu->kvm->arch.nr_pmu_counters) { 2925 hpmn = vcpu->kvm->arch.nr_pmu_counters; 2926 u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN); 2927 } 2928 2929 __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); 2930 2931 /* 2932 * Request a reload of the PMU to enable/disable the counters 2933 * affected by HPME. 2934 */ 2935 if ((old ^ val) & MDCR_EL2_HPME) 2936 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 2937 2938 return true; 2939 } 2940 2941 static bool access_ras(struct kvm_vcpu *vcpu, 2942 struct sys_reg_params *p, 2943 const struct sys_reg_desc *r) 2944 { 2945 struct kvm *kvm = vcpu->kvm; 2946 2947 switch(reg_to_encoding(r)) { 2948 case SYS_ERXPFGCDN_EL1: 2949 case SYS_ERXPFGCTL_EL1: 2950 case SYS_ERXPFGF_EL1: 2951 case SYS_ERXMISC2_EL1: 2952 case SYS_ERXMISC3_EL1: 2953 if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) || 2954 (kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) && 2955 kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) { 2956 kvm_inject_undefined(vcpu); 2957 return false; 2958 } 2959 break; 2960 default: 2961 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) { 2962 kvm_inject_undefined(vcpu); 2963 return false; 2964 } 2965 } 2966 2967 return trap_raz_wi(vcpu, p, r); 2968 } 2969 2970 /* 2971 * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and 2972 * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them. 2973 * The values made visible to userspace were the register values of the boot 2974 * CPU. 2975 * 2976 * At the same time, reads from these registers at EL1 previously were not 2977 * trapped, allowing the guest to read the actual hardware value. On big-little 2978 * machines, this means the VM can see different values depending on where a 2979 * given vCPU got scheduled. 2980 * 2981 * These registers are now trapped as collateral damage from SME, and what 2982 * follows attempts to give a user / guest view consistent with the existing 2983 * ABI. 2984 */ 2985 static bool access_imp_id_reg(struct kvm_vcpu *vcpu, 2986 struct sys_reg_params *p, 2987 const struct sys_reg_desc *r) 2988 { 2989 if (p->is_write) 2990 return write_to_read_only(vcpu, p, r); 2991 2992 /* 2993 * Return the VM-scoped implementation ID register values if userspace 2994 * has made them writable. 2995 */ 2996 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags)) 2997 return access_id_reg(vcpu, p, r); 2998 2999 /* 3000 * Otherwise, fall back to the old behavior of returning the value of 3001 * the current CPU. 3002 */ 3003 switch (reg_to_encoding(r)) { 3004 case SYS_REVIDR_EL1: 3005 p->regval = read_sysreg(revidr_el1); 3006 break; 3007 case SYS_AIDR_EL1: 3008 p->regval = read_sysreg(aidr_el1); 3009 break; 3010 default: 3011 WARN_ON_ONCE(1); 3012 } 3013 3014 return true; 3015 } 3016 3017 static u64 __ro_after_init boot_cpu_midr_val; 3018 static u64 __ro_after_init boot_cpu_revidr_val; 3019 static u64 __ro_after_init boot_cpu_aidr_val; 3020 3021 static void init_imp_id_regs(void) 3022 { 3023 boot_cpu_midr_val = read_sysreg(midr_el1); 3024 boot_cpu_revidr_val = read_sysreg(revidr_el1); 3025 boot_cpu_aidr_val = read_sysreg(aidr_el1); 3026 } 3027 3028 static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 3029 { 3030 switch (reg_to_encoding(r)) { 3031 case SYS_MIDR_EL1: 3032 return boot_cpu_midr_val; 3033 case SYS_REVIDR_EL1: 3034 return boot_cpu_revidr_val; 3035 case SYS_AIDR_EL1: 3036 return boot_cpu_aidr_val; 3037 default: 3038 KVM_BUG_ON(1, vcpu->kvm); 3039 return 0; 3040 } 3041 } 3042 3043 static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 3044 u64 val) 3045 { 3046 struct kvm *kvm = vcpu->kvm; 3047 u64 expected; 3048 3049 guard(mutex)(&kvm->arch.config_lock); 3050 3051 expected = read_id_reg(vcpu, r); 3052 if (expected == val) 3053 return 0; 3054 3055 if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)) 3056 return -EINVAL; 3057 3058 /* 3059 * Once the VM has started the ID registers are immutable. Reject the 3060 * write if userspace tries to change it. 3061 */ 3062 if (kvm_vm_has_ran_once(kvm)) 3063 return -EBUSY; 3064 3065 /* 3066 * Any value is allowed for the implementation ID registers so long as 3067 * it is within the writable mask. 3068 */ 3069 if ((val & r->val) != val) 3070 return -EINVAL; 3071 3072 kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val); 3073 return 0; 3074 } 3075 3076 #define IMPLEMENTATION_ID(reg, mask) { \ 3077 SYS_DESC(SYS_##reg), \ 3078 .access = access_imp_id_reg, \ 3079 .get_user = get_id_reg, \ 3080 .set_user = set_imp_id_reg, \ 3081 .reset = reset_imp_id_reg, \ 3082 .val = mask, \ 3083 } 3084 3085 static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 3086 { 3087 __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters); 3088 return vcpu->kvm->arch.nr_pmu_counters; 3089 } 3090 3091 /* 3092 * Architected system registers. 3093 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 3094 * 3095 * Debug handling: We do trap most, if not all debug related system 3096 * registers. The implementation is good enough to ensure that a guest 3097 * can use these with minimal performance degradation. The drawback is 3098 * that we don't implement any of the external debug architecture. 3099 * This should be revisited if we ever encounter a more demanding 3100 * guest... 3101 */ 3102 static const struct sys_reg_desc sys_reg_descs[] = { 3103 DBG_BCR_BVR_WCR_WVR_EL1(0), 3104 DBG_BCR_BVR_WCR_WVR_EL1(1), 3105 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 3106 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 3107 DBG_BCR_BVR_WCR_WVR_EL1(2), 3108 DBG_BCR_BVR_WCR_WVR_EL1(3), 3109 DBG_BCR_BVR_WCR_WVR_EL1(4), 3110 DBG_BCR_BVR_WCR_WVR_EL1(5), 3111 DBG_BCR_BVR_WCR_WVR_EL1(6), 3112 DBG_BCR_BVR_WCR_WVR_EL1(7), 3113 DBG_BCR_BVR_WCR_WVR_EL1(8), 3114 DBG_BCR_BVR_WCR_WVR_EL1(9), 3115 DBG_BCR_BVR_WCR_WVR_EL1(10), 3116 DBG_BCR_BVR_WCR_WVR_EL1(11), 3117 DBG_BCR_BVR_WCR_WVR_EL1(12), 3118 DBG_BCR_BVR_WCR_WVR_EL1(13), 3119 DBG_BCR_BVR_WCR_WVR_EL1(14), 3120 DBG_BCR_BVR_WCR_WVR_EL1(15), 3121 3122 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, 3123 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 }, 3124 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1, 3125 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, 3126 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, 3127 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, 3128 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, 3129 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi }, 3130 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 }, 3131 3132 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi }, 3133 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi }, 3134 // DBGDTR[TR]X_EL0 share the same encoding 3135 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, 3136 3137 { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 }, 3138 3139 IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)), 3140 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, 3141 IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)), 3142 3143 /* 3144 * ID regs: all ID_SANITISED() entries here must have corresponding 3145 * entries in arm64_ftr_regs[]. 3146 */ 3147 3148 /* AArch64 mappings of the AArch32 ID registers */ 3149 /* CRm=1 */ 3150 AA32_ID_WRITABLE(ID_PFR0_EL1), 3151 AA32_ID_WRITABLE(ID_PFR1_EL1), 3152 { SYS_DESC(SYS_ID_DFR0_EL1), 3153 .access = access_id_reg, 3154 .get_user = get_id_reg, 3155 .set_user = set_id_dfr0_el1, 3156 .visibility = aa32_id_visibility, 3157 .reset = read_sanitised_id_dfr0_el1, 3158 .val = GENMASK(31, 0) }, 3159 ID_HIDDEN(ID_AFR0_EL1), 3160 AA32_ID_WRITABLE(ID_MMFR0_EL1), 3161 AA32_ID_WRITABLE(ID_MMFR1_EL1), 3162 AA32_ID_WRITABLE(ID_MMFR2_EL1), 3163 AA32_ID_WRITABLE(ID_MMFR3_EL1), 3164 3165 /* CRm=2 */ 3166 AA32_ID_WRITABLE(ID_ISAR0_EL1), 3167 AA32_ID_WRITABLE(ID_ISAR1_EL1), 3168 AA32_ID_WRITABLE(ID_ISAR2_EL1), 3169 AA32_ID_WRITABLE(ID_ISAR3_EL1), 3170 AA32_ID_WRITABLE(ID_ISAR4_EL1), 3171 AA32_ID_WRITABLE(ID_ISAR5_EL1), 3172 AA32_ID_WRITABLE(ID_MMFR4_EL1), 3173 AA32_ID_WRITABLE(ID_ISAR6_EL1), 3174 3175 /* CRm=3 */ 3176 AA32_ID_WRITABLE(MVFR0_EL1), 3177 AA32_ID_WRITABLE(MVFR1_EL1), 3178 AA32_ID_WRITABLE(MVFR2_EL1), 3179 ID_UNALLOCATED(3,3), 3180 AA32_ID_WRITABLE(ID_PFR2_EL1), 3181 ID_HIDDEN(ID_DFR1_EL1), 3182 AA32_ID_WRITABLE(ID_MMFR5_EL1), 3183 ID_UNALLOCATED(3,7), 3184 3185 /* AArch64 ID registers */ 3186 /* CRm=4 */ 3187 ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1, 3188 ~(ID_AA64PFR0_EL1_AMU | 3189 ID_AA64PFR0_EL1_MPAM | 3190 ID_AA64PFR0_EL1_SVE | 3191 ID_AA64PFR0_EL1_AdvSIMD | 3192 ID_AA64PFR0_EL1_FP)), 3193 ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1, 3194 ~(ID_AA64PFR1_EL1_PFAR | 3195 ID_AA64PFR1_EL1_MTEX | 3196 ID_AA64PFR1_EL1_THE | 3197 ID_AA64PFR1_EL1_GCS | 3198 ID_AA64PFR1_EL1_MTE_frac | 3199 ID_AA64PFR1_EL1_NMI | 3200 ID_AA64PFR1_EL1_RNDR_trap | 3201 ID_AA64PFR1_EL1_SME | 3202 ID_AA64PFR1_EL1_RES0 | 3203 ID_AA64PFR1_EL1_MPAM_frac | 3204 ID_AA64PFR1_EL1_MTE)), 3205 ID_WRITABLE(ID_AA64PFR2_EL1, 3206 ID_AA64PFR2_EL1_FPMR | 3207 ID_AA64PFR2_EL1_MTEFAR | 3208 ID_AA64PFR2_EL1_MTESTOREONLY), 3209 ID_UNALLOCATED(4,3), 3210 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), 3211 ID_HIDDEN(ID_AA64SMFR0_EL1), 3212 ID_UNALLOCATED(4,6), 3213 ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0), 3214 3215 /* CRm=5 */ 3216 /* 3217 * Prior to FEAT_Debugv8.9, the architecture defines context-aware 3218 * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs). 3219 * KVM does not trap + emulate the breakpoint registers, and as such 3220 * cannot support a layout that misaligns with the underlying hardware. 3221 * While it may be possible to describe a subset that aligns with 3222 * hardware, just prevent changes to BRPs and CTX_CMPs altogether for 3223 * simplicity. 3224 * 3225 * See DDI0487K.a, section D2.8.3 Breakpoint types and linking 3226 * of breakpoints for more details. 3227 */ 3228 ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1, 3229 ID_AA64DFR0_EL1_DoubleLock_MASK | 3230 ID_AA64DFR0_EL1_WRPs_MASK | 3231 ID_AA64DFR0_EL1_PMUVer_MASK | 3232 ID_AA64DFR0_EL1_DebugVer_MASK), 3233 ID_SANITISED(ID_AA64DFR1_EL1), 3234 ID_UNALLOCATED(5,2), 3235 ID_UNALLOCATED(5,3), 3236 ID_HIDDEN(ID_AA64AFR0_EL1), 3237 ID_HIDDEN(ID_AA64AFR1_EL1), 3238 ID_UNALLOCATED(5,6), 3239 ID_UNALLOCATED(5,7), 3240 3241 /* CRm=6 */ 3242 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0), 3243 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI | 3244 ID_AA64ISAR1_EL1_GPA | 3245 ID_AA64ISAR1_EL1_API | 3246 ID_AA64ISAR1_EL1_APA)), 3247 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 | 3248 ID_AA64ISAR2_EL1_APA3 | 3249 ID_AA64ISAR2_EL1_GPA3)), 3250 ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT | 3251 ID_AA64ISAR3_EL1_LSFE | 3252 ID_AA64ISAR3_EL1_FAMINMAX)), 3253 ID_UNALLOCATED(6,4), 3254 ID_UNALLOCATED(6,5), 3255 ID_UNALLOCATED(6,6), 3256 ID_UNALLOCATED(6,7), 3257 3258 /* CRm=7 */ 3259 ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1, 3260 ~(ID_AA64MMFR0_EL1_RES0 | 3261 ID_AA64MMFR0_EL1_ASIDBITS)), 3262 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 | 3263 ID_AA64MMFR1_EL1_XNX | 3264 ID_AA64MMFR1_EL1_VH | 3265 ID_AA64MMFR1_EL1_VMIDBits)), 3266 ID_FILTERED(ID_AA64MMFR2_EL1, 3267 id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 | 3268 ID_AA64MMFR2_EL1_EVT | 3269 ID_AA64MMFR2_EL1_FWB | 3270 ID_AA64MMFR2_EL1_IDS | 3271 ID_AA64MMFR2_EL1_NV | 3272 ID_AA64MMFR2_EL1_CCIDX)), 3273 ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX | 3274 ID_AA64MMFR3_EL1_SCTLRX | 3275 ID_AA64MMFR3_EL1_S1PIE | 3276 ID_AA64MMFR3_EL1_S1POE)), 3277 ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac), 3278 ID_UNALLOCATED(7,5), 3279 ID_UNALLOCATED(7,6), 3280 ID_UNALLOCATED(7,7), 3281 3282 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 3283 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, 3284 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, 3285 { SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0, 3286 .visibility = sctlr2_visibility }, 3287 3288 MTE_REG(RGSR_EL1), 3289 MTE_REG(GCR_EL1), 3290 3291 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, 3292 { SYS_DESC(SYS_TRFCR_EL1), undef_access }, 3293 { SYS_DESC(SYS_SMPRI_EL1), undef_access }, 3294 { SYS_DESC(SYS_SMCR_EL1), undef_access }, 3295 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, 3296 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, 3297 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, 3298 { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0, 3299 .visibility = tcr2_visibility }, 3300 3301 PTRAUTH_KEY(APIA), 3302 PTRAUTH_KEY(APIB), 3303 PTRAUTH_KEY(APDA), 3304 PTRAUTH_KEY(APDB), 3305 PTRAUTH_KEY(APGA), 3306 3307 { SYS_DESC(SYS_SPSR_EL1), access_spsr}, 3308 { SYS_DESC(SYS_ELR_EL1), access_elr}, 3309 3310 { SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, 3311 3312 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, 3313 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, 3314 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, 3315 3316 { SYS_DESC(SYS_ERRIDR_EL1), access_ras }, 3317 { SYS_DESC(SYS_ERRSELR_EL1), access_ras }, 3318 { SYS_DESC(SYS_ERXFR_EL1), access_ras }, 3319 { SYS_DESC(SYS_ERXCTLR_EL1), access_ras }, 3320 { SYS_DESC(SYS_ERXSTATUS_EL1), access_ras }, 3321 { SYS_DESC(SYS_ERXADDR_EL1), access_ras }, 3322 { SYS_DESC(SYS_ERXPFGF_EL1), access_ras }, 3323 { SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras }, 3324 { SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras }, 3325 { SYS_DESC(SYS_ERXMISC0_EL1), access_ras }, 3326 { SYS_DESC(SYS_ERXMISC1_EL1), access_ras }, 3327 { SYS_DESC(SYS_ERXMISC2_EL1), access_ras }, 3328 { SYS_DESC(SYS_ERXMISC3_EL1), access_ras }, 3329 3330 MTE_REG(TFSR_EL1), 3331 MTE_REG(TFSRE0_EL1), 3332 3333 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, 3334 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, 3335 3336 { SYS_DESC(SYS_PMSCR_EL1), undef_access }, 3337 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access }, 3338 { SYS_DESC(SYS_PMSICR_EL1), undef_access }, 3339 { SYS_DESC(SYS_PMSIRR_EL1), undef_access }, 3340 { SYS_DESC(SYS_PMSFCR_EL1), undef_access }, 3341 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access }, 3342 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access }, 3343 { SYS_DESC(SYS_PMSIDR_EL1), undef_access }, 3344 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access }, 3345 { SYS_DESC(SYS_PMBPTR_EL1), undef_access }, 3346 { SYS_DESC(SYS_PMBSR_EL1), undef_access }, 3347 { SYS_DESC(SYS_PMSDSFR_EL1), undef_access }, 3348 /* PMBIDR_EL1 is not trapped */ 3349 3350 { PMU_SYS_REG(PMINTENSET_EL1), 3351 .access = access_pminten, .reg = PMINTENSET_EL1, 3352 .get_user = get_pmreg, .set_user = set_pmreg }, 3353 { PMU_SYS_REG(PMINTENCLR_EL1), 3354 .access = access_pminten, .reg = PMINTENSET_EL1, 3355 .get_user = get_pmreg, .set_user = set_pmreg }, 3356 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, 3357 3358 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 3359 { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1, 3360 .visibility = s1pie_visibility }, 3361 { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1, 3362 .visibility = s1pie_visibility }, 3363 { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1, 3364 .visibility = s1poe_visibility }, 3365 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 3366 3367 { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, 3368 { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, 3369 { SYS_DESC(SYS_LORN_EL1), trap_loregion }, 3370 { SYS_DESC(SYS_LORC_EL1), trap_loregion }, 3371 { SYS_DESC(SYS_MPAMIDR_EL1), undef_access }, 3372 { SYS_DESC(SYS_LORID_EL1), trap_loregion }, 3373 3374 { SYS_DESC(SYS_MPAM1_EL1), undef_access }, 3375 { SYS_DESC(SYS_MPAM0_EL1), undef_access }, 3376 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, 3377 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 3378 3379 { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, 3380 { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, 3381 { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, 3382 { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, 3383 { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, 3384 { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, 3385 { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, 3386 { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, 3387 { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, 3388 { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, 3389 { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, 3390 { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, 3391 { SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir }, 3392 { SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, 3393 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, 3394 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi }, 3395 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi }, 3396 { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, 3397 { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, 3398 { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, 3399 { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, 3400 { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, 3401 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 3402 { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, 3403 { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, 3404 3405 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 3406 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, 3407 3408 { SYS_DESC(SYS_ACCDATA_EL1), undef_access }, 3409 3410 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access }, 3411 3412 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, 3413 3414 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, 3415 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1, 3416 .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 }, 3417 IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)), 3418 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, 3419 ID_FILTERED(CTR_EL0, ctr_el0, 3420 CTR_EL0_DIC_MASK | 3421 CTR_EL0_IDC_MASK | 3422 CTR_EL0_DminLine_MASK | 3423 CTR_EL0_L1Ip_MASK | 3424 CTR_EL0_IminLine_MASK), 3425 { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility }, 3426 { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility }, 3427 3428 { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, 3429 .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr }, 3430 { PMU_SYS_REG(PMCNTENSET_EL0), 3431 .access = access_pmcnten, .reg = PMCNTENSET_EL0, 3432 .get_user = get_pmreg, .set_user = set_pmreg }, 3433 { PMU_SYS_REG(PMCNTENCLR_EL0), 3434 .access = access_pmcnten, .reg = PMCNTENSET_EL0, 3435 .get_user = get_pmreg, .set_user = set_pmreg }, 3436 { PMU_SYS_REG(PMOVSCLR_EL0), 3437 .access = access_pmovs, .reg = PMOVSSET_EL0, 3438 .get_user = get_pmreg, .set_user = set_pmreg }, 3439 /* 3440 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was 3441 * previously (and pointlessly) advertised in the past... 3442 */ 3443 { PMU_SYS_REG(PMSWINC_EL0), 3444 .get_user = get_raz_reg, .set_user = set_wi_reg, 3445 .access = access_pmswinc, .reset = NULL }, 3446 { PMU_SYS_REG(PMSELR_EL0), 3447 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, 3448 { PMU_SYS_REG(PMCEID0_EL0), 3449 .access = access_pmceid, .reset = NULL }, 3450 { PMU_SYS_REG(PMCEID1_EL0), 3451 .access = access_pmceid, .reset = NULL }, 3452 { PMU_SYS_REG(PMCCNTR_EL0), 3453 .access = access_pmu_evcntr, .reset = reset_unknown, 3454 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr, 3455 .set_user = set_pmu_evcntr }, 3456 { PMU_SYS_REG(PMXEVTYPER_EL0), 3457 .access = access_pmu_evtyper, .reset = NULL }, 3458 { PMU_SYS_REG(PMXEVCNTR_EL0), 3459 .access = access_pmu_evcntr, .reset = NULL }, 3460 /* 3461 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero 3462 * in 32bit mode. Here we choose to reset it as zero for consistency. 3463 */ 3464 { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, 3465 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, 3466 { PMU_SYS_REG(PMOVSSET_EL0), 3467 .access = access_pmovs, .reg = PMOVSSET_EL0, 3468 .get_user = get_pmreg, .set_user = set_pmreg }, 3469 3470 { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0, 3471 .visibility = s1poe_visibility }, 3472 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, 3473 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, 3474 { SYS_DESC(SYS_TPIDR2_EL0), undef_access }, 3475 3476 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access }, 3477 3478 { SYS_DESC(SYS_AMCR_EL0), undef_access }, 3479 { SYS_DESC(SYS_AMCFGR_EL0), undef_access }, 3480 { SYS_DESC(SYS_AMCGCR_EL0), undef_access }, 3481 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access }, 3482 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access }, 3483 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access }, 3484 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access }, 3485 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access }, 3486 AMU_AMEVCNTR0_EL0(0), 3487 AMU_AMEVCNTR0_EL0(1), 3488 AMU_AMEVCNTR0_EL0(2), 3489 AMU_AMEVCNTR0_EL0(3), 3490 AMU_AMEVCNTR0_EL0(4), 3491 AMU_AMEVCNTR0_EL0(5), 3492 AMU_AMEVCNTR0_EL0(6), 3493 AMU_AMEVCNTR0_EL0(7), 3494 AMU_AMEVCNTR0_EL0(8), 3495 AMU_AMEVCNTR0_EL0(9), 3496 AMU_AMEVCNTR0_EL0(10), 3497 AMU_AMEVCNTR0_EL0(11), 3498 AMU_AMEVCNTR0_EL0(12), 3499 AMU_AMEVCNTR0_EL0(13), 3500 AMU_AMEVCNTR0_EL0(14), 3501 AMU_AMEVCNTR0_EL0(15), 3502 AMU_AMEVTYPER0_EL0(0), 3503 AMU_AMEVTYPER0_EL0(1), 3504 AMU_AMEVTYPER0_EL0(2), 3505 AMU_AMEVTYPER0_EL0(3), 3506 AMU_AMEVTYPER0_EL0(4), 3507 AMU_AMEVTYPER0_EL0(5), 3508 AMU_AMEVTYPER0_EL0(6), 3509 AMU_AMEVTYPER0_EL0(7), 3510 AMU_AMEVTYPER0_EL0(8), 3511 AMU_AMEVTYPER0_EL0(9), 3512 AMU_AMEVTYPER0_EL0(10), 3513 AMU_AMEVTYPER0_EL0(11), 3514 AMU_AMEVTYPER0_EL0(12), 3515 AMU_AMEVTYPER0_EL0(13), 3516 AMU_AMEVTYPER0_EL0(14), 3517 AMU_AMEVTYPER0_EL0(15), 3518 AMU_AMEVCNTR1_EL0(0), 3519 AMU_AMEVCNTR1_EL0(1), 3520 AMU_AMEVCNTR1_EL0(2), 3521 AMU_AMEVCNTR1_EL0(3), 3522 AMU_AMEVCNTR1_EL0(4), 3523 AMU_AMEVCNTR1_EL0(5), 3524 AMU_AMEVCNTR1_EL0(6), 3525 AMU_AMEVCNTR1_EL0(7), 3526 AMU_AMEVCNTR1_EL0(8), 3527 AMU_AMEVCNTR1_EL0(9), 3528 AMU_AMEVCNTR1_EL0(10), 3529 AMU_AMEVCNTR1_EL0(11), 3530 AMU_AMEVCNTR1_EL0(12), 3531 AMU_AMEVCNTR1_EL0(13), 3532 AMU_AMEVCNTR1_EL0(14), 3533 AMU_AMEVCNTR1_EL0(15), 3534 AMU_AMEVTYPER1_EL0(0), 3535 AMU_AMEVTYPER1_EL0(1), 3536 AMU_AMEVTYPER1_EL0(2), 3537 AMU_AMEVTYPER1_EL0(3), 3538 AMU_AMEVTYPER1_EL0(4), 3539 AMU_AMEVTYPER1_EL0(5), 3540 AMU_AMEVTYPER1_EL0(6), 3541 AMU_AMEVTYPER1_EL0(7), 3542 AMU_AMEVTYPER1_EL0(8), 3543 AMU_AMEVTYPER1_EL0(9), 3544 AMU_AMEVTYPER1_EL0(10), 3545 AMU_AMEVTYPER1_EL0(11), 3546 AMU_AMEVTYPER1_EL0(12), 3547 AMU_AMEVTYPER1_EL0(13), 3548 AMU_AMEVTYPER1_EL0(14), 3549 AMU_AMEVTYPER1_EL0(15), 3550 3551 { SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer, 3552 .get_user = arch_timer_get_user, .set_user = arch_timer_set_user }, 3553 { SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer, 3554 .get_user = arch_timer_get_user, .set_user = arch_timer_set_user }, 3555 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer }, 3556 { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer }, 3557 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, 3558 TIMER_REG(CNTP_CTL_EL0, NULL), 3559 TIMER_REG(CNTP_CVAL_EL0, NULL), 3560 3561 { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer }, 3562 TIMER_REG(CNTV_CTL_EL0, NULL), 3563 TIMER_REG(CNTV_CVAL_EL0, NULL), 3564 3565 /* PMEVCNTRn_EL0 */ 3566 PMU_PMEVCNTR_EL0(0), 3567 PMU_PMEVCNTR_EL0(1), 3568 PMU_PMEVCNTR_EL0(2), 3569 PMU_PMEVCNTR_EL0(3), 3570 PMU_PMEVCNTR_EL0(4), 3571 PMU_PMEVCNTR_EL0(5), 3572 PMU_PMEVCNTR_EL0(6), 3573 PMU_PMEVCNTR_EL0(7), 3574 PMU_PMEVCNTR_EL0(8), 3575 PMU_PMEVCNTR_EL0(9), 3576 PMU_PMEVCNTR_EL0(10), 3577 PMU_PMEVCNTR_EL0(11), 3578 PMU_PMEVCNTR_EL0(12), 3579 PMU_PMEVCNTR_EL0(13), 3580 PMU_PMEVCNTR_EL0(14), 3581 PMU_PMEVCNTR_EL0(15), 3582 PMU_PMEVCNTR_EL0(16), 3583 PMU_PMEVCNTR_EL0(17), 3584 PMU_PMEVCNTR_EL0(18), 3585 PMU_PMEVCNTR_EL0(19), 3586 PMU_PMEVCNTR_EL0(20), 3587 PMU_PMEVCNTR_EL0(21), 3588 PMU_PMEVCNTR_EL0(22), 3589 PMU_PMEVCNTR_EL0(23), 3590 PMU_PMEVCNTR_EL0(24), 3591 PMU_PMEVCNTR_EL0(25), 3592 PMU_PMEVCNTR_EL0(26), 3593 PMU_PMEVCNTR_EL0(27), 3594 PMU_PMEVCNTR_EL0(28), 3595 PMU_PMEVCNTR_EL0(29), 3596 PMU_PMEVCNTR_EL0(30), 3597 /* PMEVTYPERn_EL0 */ 3598 PMU_PMEVTYPER_EL0(0), 3599 PMU_PMEVTYPER_EL0(1), 3600 PMU_PMEVTYPER_EL0(2), 3601 PMU_PMEVTYPER_EL0(3), 3602 PMU_PMEVTYPER_EL0(4), 3603 PMU_PMEVTYPER_EL0(5), 3604 PMU_PMEVTYPER_EL0(6), 3605 PMU_PMEVTYPER_EL0(7), 3606 PMU_PMEVTYPER_EL0(8), 3607 PMU_PMEVTYPER_EL0(9), 3608 PMU_PMEVTYPER_EL0(10), 3609 PMU_PMEVTYPER_EL0(11), 3610 PMU_PMEVTYPER_EL0(12), 3611 PMU_PMEVTYPER_EL0(13), 3612 PMU_PMEVTYPER_EL0(14), 3613 PMU_PMEVTYPER_EL0(15), 3614 PMU_PMEVTYPER_EL0(16), 3615 PMU_PMEVTYPER_EL0(17), 3616 PMU_PMEVTYPER_EL0(18), 3617 PMU_PMEVTYPER_EL0(19), 3618 PMU_PMEVTYPER_EL0(20), 3619 PMU_PMEVTYPER_EL0(21), 3620 PMU_PMEVTYPER_EL0(22), 3621 PMU_PMEVTYPER_EL0(23), 3622 PMU_PMEVTYPER_EL0(24), 3623 PMU_PMEVTYPER_EL0(25), 3624 PMU_PMEVTYPER_EL0(26), 3625 PMU_PMEVTYPER_EL0(27), 3626 PMU_PMEVTYPER_EL0(28), 3627 PMU_PMEVTYPER_EL0(29), 3628 PMU_PMEVTYPER_EL0(30), 3629 /* 3630 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero 3631 * in 32bit mode. Here we choose to reset it as zero for consistency. 3632 */ 3633 { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper, 3634 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 }, 3635 3636 EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0), 3637 EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0), 3638 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1), 3639 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0), 3640 EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0, 3641 sctlr2_el2_visibility), 3642 EL2_REG_VNCR(HCR_EL2, reset_hcr, 0), 3643 EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0), 3644 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), 3645 EL2_REG_VNCR(HSTR_EL2, reset_val, 0), 3646 EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility), 3647 EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility), 3648 EL2_REG_VNCR(HFGITR_EL2, reset_val, 0), 3649 EL2_REG_VNCR(HACR_EL2, reset_val, 0), 3650 3651 EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0, 3652 sve_el2_visibility), 3653 3654 EL2_REG_VNCR(HCRX_EL2, reset_val, 0), 3655 3656 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0), 3657 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0), 3658 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1), 3659 EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1, 3660 tcr2_el2_visibility), 3661 EL2_REG_VNCR(VTTBR_EL2, reset_val, 0), 3662 EL2_REG_VNCR(VTCR_EL2, reset_val, 0), 3663 EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0, 3664 vncr_el2_visibility), 3665 3666 { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 }, 3667 EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility), 3668 EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility), 3669 EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility), 3670 EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility), 3671 EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility), 3672 EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility), 3673 EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility), 3674 EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility), 3675 EL2_REG_REDIR(SPSR_EL2, reset_val, 0), 3676 EL2_REG_REDIR(ELR_EL2, reset_val, 0), 3677 { SYS_DESC(SYS_SP_EL1), access_sp_el1}, 3678 3679 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */ 3680 { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi }, 3681 { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi }, 3682 { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi }, 3683 { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi }, 3684 3685 { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 }, 3686 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0), 3687 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0), 3688 EL2_REG_REDIR(ESR_EL2, reset_val, 0), 3689 EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0), 3690 { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 }, 3691 3692 EL2_REG_REDIR(FAR_EL2, reset_val, 0), 3693 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0), 3694 3695 EL2_REG(MAIR_EL2, access_rw, reset_val, 0), 3696 EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0, 3697 s1pie_el2_visibility), 3698 EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0, 3699 s1pie_el2_visibility), 3700 EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0, 3701 s1poe_el2_visibility), 3702 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0), 3703 { SYS_DESC(SYS_MPAMHCR_EL2), undef_access }, 3704 { SYS_DESC(SYS_MPAMVPMV_EL2), undef_access }, 3705 { SYS_DESC(SYS_MPAM2_EL2), undef_access }, 3706 { SYS_DESC(SYS_MPAMVPM0_EL2), undef_access }, 3707 { SYS_DESC(SYS_MPAMVPM1_EL2), undef_access }, 3708 { SYS_DESC(SYS_MPAMVPM2_EL2), undef_access }, 3709 { SYS_DESC(SYS_MPAMVPM3_EL2), undef_access }, 3710 { SYS_DESC(SYS_MPAMVPM4_EL2), undef_access }, 3711 { SYS_DESC(SYS_MPAMVPM5_EL2), undef_access }, 3712 { SYS_DESC(SYS_MPAMVPM6_EL2), undef_access }, 3713 { SYS_DESC(SYS_MPAMVPM7_EL2), undef_access }, 3714 3715 EL2_REG(VBAR_EL2, access_rw, reset_val, 0), 3716 { SYS_DESC(SYS_RVBAR_EL2), undef_access }, 3717 { SYS_DESC(SYS_RMR_EL2), undef_access }, 3718 EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0), 3719 3720 EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2), 3721 EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2), 3722 EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2), 3723 EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2), 3724 EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2), 3725 EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2), 3726 EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2), 3727 EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2), 3728 3729 { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre }, 3730 3731 EL2_REG_VNCR_GICv3(ICH_HCR_EL2), 3732 { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr }, 3733 { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr }, 3734 { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr }, 3735 { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr }, 3736 EL2_REG_VNCR_GICv3(ICH_VMCR_EL2), 3737 3738 EL2_REG_VNCR_GICv3(ICH_LR0_EL2), 3739 EL2_REG_VNCR_GICv3(ICH_LR1_EL2), 3740 EL2_REG_VNCR_GICv3(ICH_LR2_EL2), 3741 EL2_REG_VNCR_GICv3(ICH_LR3_EL2), 3742 EL2_REG_VNCR_GICv3(ICH_LR4_EL2), 3743 EL2_REG_VNCR_GICv3(ICH_LR5_EL2), 3744 EL2_REG_VNCR_GICv3(ICH_LR6_EL2), 3745 EL2_REG_VNCR_GICv3(ICH_LR7_EL2), 3746 EL2_REG_VNCR_GICv3(ICH_LR8_EL2), 3747 EL2_REG_VNCR_GICv3(ICH_LR9_EL2), 3748 EL2_REG_VNCR_GICv3(ICH_LR10_EL2), 3749 EL2_REG_VNCR_GICv3(ICH_LR11_EL2), 3750 EL2_REG_VNCR_GICv3(ICH_LR12_EL2), 3751 EL2_REG_VNCR_GICv3(ICH_LR13_EL2), 3752 EL2_REG_VNCR_GICv3(ICH_LR14_EL2), 3753 EL2_REG_VNCR_GICv3(ICH_LR15_EL2), 3754 3755 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0), 3756 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0), 3757 3758 EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0), 3759 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0), 3760 { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer }, 3761 TIMER_REG(CNTHP_CTL_EL2, el2_visibility), 3762 TIMER_REG(CNTHP_CVAL_EL2, el2_visibility), 3763 3764 { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility }, 3765 TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility), 3766 TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility), 3767 3768 { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 }, 3769 3770 { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer }, 3771 { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer }, 3772 { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer }, 3773 3774 { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer }, 3775 { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer }, 3776 { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer }, 3777 3778 EL2_REG(SP_EL2, NULL, reset_unknown, 0), 3779 }; 3780 3781 static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3782 const struct sys_reg_desc *r) 3783 { 3784 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3785 3786 if (__kvm_at_s1e01(vcpu, op, p->regval)) 3787 return false; 3788 3789 return true; 3790 } 3791 3792 static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3793 const struct sys_reg_desc *r) 3794 { 3795 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3796 3797 /* There is no FGT associated with AT S1E2A :-( */ 3798 if (op == OP_AT_S1E2A && 3799 !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) { 3800 kvm_inject_undefined(vcpu); 3801 return false; 3802 } 3803 3804 if (__kvm_at_s1e2(vcpu, op, p->regval)) 3805 return false; 3806 3807 return true; 3808 } 3809 3810 static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3811 const struct sys_reg_desc *r) 3812 { 3813 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3814 3815 if (__kvm_at_s12(vcpu, op, p->regval)) 3816 return false; 3817 3818 return true; 3819 } 3820 3821 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr) 3822 { 3823 struct kvm *kvm = vpcu->kvm; 3824 u8 CRm = sys_reg_CRm(instr); 3825 3826 if (sys_reg_CRn(instr) == TLBI_CRn_nXS && 3827 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) 3828 return false; 3829 3830 if (CRm == TLBI_CRm_nROS && 3831 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 3832 return false; 3833 3834 return true; 3835 } 3836 3837 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3838 const struct sys_reg_desc *r) 3839 { 3840 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3841 3842 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) 3843 return undef_access(vcpu, p, r); 3844 3845 write_lock(&vcpu->kvm->mmu_lock); 3846 3847 /* 3848 * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the 3849 * corresponding VMIDs. 3850 */ 3851 kvm_nested_s2_unmap(vcpu->kvm, true); 3852 3853 write_unlock(&vcpu->kvm->mmu_lock); 3854 3855 return true; 3856 } 3857 3858 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr) 3859 { 3860 struct kvm *kvm = vpcu->kvm; 3861 u8 CRm = sys_reg_CRm(instr); 3862 u8 Op2 = sys_reg_Op2(instr); 3863 3864 if (sys_reg_CRn(instr) == TLBI_CRn_nXS && 3865 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) 3866 return false; 3867 3868 if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) && 3869 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 3870 return false; 3871 3872 if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) && 3873 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 3874 return false; 3875 3876 if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) && 3877 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 3878 return false; 3879 3880 return true; 3881 } 3882 3883 /* Only defined here as this is an internal "abstraction" */ 3884 union tlbi_info { 3885 struct { 3886 u64 start; 3887 u64 size; 3888 } range; 3889 3890 struct { 3891 u64 addr; 3892 } ipa; 3893 3894 struct { 3895 u64 addr; 3896 u32 encoding; 3897 } va; 3898 }; 3899 3900 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu, 3901 const union tlbi_info *info) 3902 { 3903 /* 3904 * The unmap operation is allowed to drop the MMU lock and block, which 3905 * means that @mmu could be used for a different context than the one 3906 * currently being invalidated. 3907 * 3908 * This behavior is still safe, as: 3909 * 3910 * 1) The vCPU(s) that recycled the MMU are responsible for invalidating 3911 * the entire MMU before reusing it, which still honors the intent 3912 * of a TLBI. 3913 * 3914 * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC 3915 * and ERET to the guest), other vCPUs are allowed to use stale 3916 * translations. 3917 * 3918 * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and 3919 * at worst may cause more aborts for shadow stage-2 fills. 3920 * 3921 * Dropping the MMU lock also implies that shadow stage-2 fills could 3922 * happen behind the back of the TLBI. This is still safe, though, as 3923 * the L1 needs to put its stage-2 in a consistent state before doing 3924 * the TLBI. 3925 */ 3926 kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true); 3927 } 3928 3929 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3930 const struct sys_reg_desc *r) 3931 { 3932 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3933 u64 limit, vttbr; 3934 3935 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) 3936 return undef_access(vcpu, p, r); 3937 3938 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 3939 limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm)); 3940 3941 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), 3942 &(union tlbi_info) { 3943 .range = { 3944 .start = 0, 3945 .size = limit, 3946 }, 3947 }, 3948 s2_mmu_unmap_range); 3949 3950 return true; 3951 } 3952 3953 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3954 const struct sys_reg_desc *r) 3955 { 3956 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3957 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 3958 u64 base, range; 3959 3960 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) 3961 return undef_access(vcpu, p, r); 3962 3963 /* 3964 * Because the shadow S2 structure doesn't necessarily reflect that 3965 * of the guest's S2 (different base granule size, for example), we 3966 * decide to ignore TTL and only use the described range. 3967 */ 3968 base = decode_range_tlbi(p->regval, &range, NULL); 3969 3970 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), 3971 &(union tlbi_info) { 3972 .range = { 3973 .start = base, 3974 .size = range, 3975 }, 3976 }, 3977 s2_mmu_unmap_range); 3978 3979 return true; 3980 } 3981 3982 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu, 3983 const union tlbi_info *info) 3984 { 3985 unsigned long max_size; 3986 u64 base_addr; 3987 3988 /* 3989 * We drop a number of things from the supplied value: 3990 * 3991 * - NS bit: we're non-secure only. 3992 * 3993 * - IPA[51:48]: We don't support 52bit IPA just yet... 3994 * 3995 * And of course, adjust the IPA to be on an actual address. 3996 */ 3997 base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12; 3998 max_size = compute_tlb_inval_range(mmu, info->ipa.addr); 3999 base_addr &= ~(max_size - 1); 4000 4001 /* 4002 * See comment in s2_mmu_unmap_range() for why this is allowed to 4003 * reschedule. 4004 */ 4005 kvm_stage2_unmap_range(mmu, base_addr, max_size, true); 4006 } 4007 4008 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 4009 const struct sys_reg_desc *r) 4010 { 4011 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 4012 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 4013 4014 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) 4015 return undef_access(vcpu, p, r); 4016 4017 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), 4018 &(union tlbi_info) { 4019 .ipa = { 4020 .addr = p->regval, 4021 }, 4022 }, 4023 s2_mmu_unmap_ipa); 4024 4025 return true; 4026 } 4027 4028 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu, 4029 const union tlbi_info *info) 4030 { 4031 WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding)); 4032 } 4033 4034 static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 4035 const struct sys_reg_desc *r) 4036 { 4037 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 4038 4039 if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding)) 4040 return undef_access(vcpu, p, r); 4041 4042 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); 4043 return true; 4044 } 4045 4046 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 4047 const struct sys_reg_desc *r) 4048 { 4049 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 4050 4051 /* 4052 * If we're here, this is because we've trapped on a EL1 TLBI 4053 * instruction that affects the EL1 translation regime while 4054 * we're running in a context that doesn't allow us to let the 4055 * HW do its thing (aka vEL2): 4056 * 4057 * - HCR_EL2.E2H == 0 : a non-VHE guest 4058 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode 4059 * 4060 * Another possibility is that we are invalidating the EL2 context 4061 * using EL1 instructions, but that we landed here because we need 4062 * additional invalidation for structures that are not held in the 4063 * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In 4064 * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 } 4065 * as we don't allow an NV-capable L1 in a nVHE configuration. 4066 * 4067 * We don't expect these helpers to ever be called when running 4068 * in a vEL1 context. 4069 */ 4070 4071 WARN_ON(!vcpu_is_el2(vcpu)); 4072 4073 if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) 4074 return undef_access(vcpu, p, r); 4075 4076 if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) { 4077 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); 4078 return true; 4079 } 4080 4081 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, 4082 get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)), 4083 &(union tlbi_info) { 4084 .va = { 4085 .addr = p->regval, 4086 .encoding = sys_encoding, 4087 }, 4088 }, 4089 s2_mmu_tlbi_s1e1); 4090 4091 return true; 4092 } 4093 4094 #define SYS_INSN(insn, access_fn) \ 4095 { \ 4096 SYS_DESC(OP_##insn), \ 4097 .access = (access_fn), \ 4098 } 4099 4100 static struct sys_reg_desc sys_insn_descs[] = { 4101 { SYS_DESC(SYS_DC_ISW), access_dcsw }, 4102 { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, 4103 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, 4104 4105 SYS_INSN(AT_S1E1R, handle_at_s1e01), 4106 SYS_INSN(AT_S1E1W, handle_at_s1e01), 4107 SYS_INSN(AT_S1E0R, handle_at_s1e01), 4108 SYS_INSN(AT_S1E0W, handle_at_s1e01), 4109 SYS_INSN(AT_S1E1RP, handle_at_s1e01), 4110 SYS_INSN(AT_S1E1WP, handle_at_s1e01), 4111 4112 { SYS_DESC(SYS_DC_CSW), access_dcsw }, 4113 { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, 4114 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, 4115 { SYS_DESC(SYS_DC_CISW), access_dcsw }, 4116 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, 4117 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, 4118 4119 SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1), 4120 SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1), 4121 SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1), 4122 SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1), 4123 SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1), 4124 SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1), 4125 4126 SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1), 4127 SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1), 4128 SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1), 4129 SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1), 4130 4131 SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1), 4132 SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1), 4133 SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1), 4134 SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1), 4135 SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1), 4136 SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1), 4137 4138 SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1), 4139 SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1), 4140 SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1), 4141 SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1), 4142 4143 SYS_INSN(TLBI_RVAE1, handle_tlbi_el1), 4144 SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1), 4145 SYS_INSN(TLBI_RVALE1, handle_tlbi_el1), 4146 SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1), 4147 4148 SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1), 4149 SYS_INSN(TLBI_VAE1, handle_tlbi_el1), 4150 SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1), 4151 SYS_INSN(TLBI_VAAE1, handle_tlbi_el1), 4152 SYS_INSN(TLBI_VALE1, handle_tlbi_el1), 4153 SYS_INSN(TLBI_VAALE1, handle_tlbi_el1), 4154 4155 SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1), 4156 SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1), 4157 SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1), 4158 SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1), 4159 SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1), 4160 SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1), 4161 4162 SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1), 4163 SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1), 4164 SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1), 4165 SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1), 4166 4167 SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1), 4168 SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1), 4169 SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1), 4170 SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1), 4171 SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1), 4172 SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1), 4173 4174 SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1), 4175 SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1), 4176 SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1), 4177 SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1), 4178 4179 SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1), 4180 SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1), 4181 SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1), 4182 SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1), 4183 4184 SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1), 4185 SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1), 4186 SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1), 4187 SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1), 4188 SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1), 4189 SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1), 4190 4191 SYS_INSN(AT_S1E2R, handle_at_s1e2), 4192 SYS_INSN(AT_S1E2W, handle_at_s1e2), 4193 SYS_INSN(AT_S12E1R, handle_at_s12), 4194 SYS_INSN(AT_S12E1W, handle_at_s12), 4195 SYS_INSN(AT_S12E0R, handle_at_s12), 4196 SYS_INSN(AT_S12E0W, handle_at_s12), 4197 SYS_INSN(AT_S1E2A, handle_at_s1e2), 4198 4199 SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is), 4200 SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is), 4201 SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is), 4202 SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is), 4203 4204 SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2), 4205 SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2), 4206 SYS_INSN(TLBI_ALLE1OS, handle_alle1is), 4207 SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2), 4208 SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is), 4209 4210 SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2), 4211 SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2), 4212 SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2), 4213 SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2), 4214 4215 SYS_INSN(TLBI_ALLE1IS, handle_alle1is), 4216 4217 SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2), 4218 4219 SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is), 4220 SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is), 4221 SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is), 4222 SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is), 4223 SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is), 4224 SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is), 4225 SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is), 4226 SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is), 4227 SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is), 4228 SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2), 4229 SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2), 4230 SYS_INSN(TLBI_RVAE2, handle_tlbi_el2), 4231 SYS_INSN(TLBI_RVALE2, handle_tlbi_el2), 4232 SYS_INSN(TLBI_ALLE2, handle_tlbi_el2), 4233 SYS_INSN(TLBI_VAE2, handle_tlbi_el2), 4234 4235 SYS_INSN(TLBI_ALLE1, handle_alle1is), 4236 4237 SYS_INSN(TLBI_VALE2, handle_tlbi_el2), 4238 4239 SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is), 4240 4241 SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is), 4242 SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is), 4243 SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is), 4244 SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is), 4245 4246 SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2), 4247 SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2), 4248 SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is), 4249 SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2), 4250 SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is), 4251 4252 SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2), 4253 SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2), 4254 SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2), 4255 SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2), 4256 4257 SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is), 4258 SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2), 4259 SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is), 4260 SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is), 4261 SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is), 4262 SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is), 4263 SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is), 4264 SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is), 4265 SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is), 4266 SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is), 4267 SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is), 4268 SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2), 4269 SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2), 4270 SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2), 4271 SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2), 4272 SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2), 4273 SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2), 4274 SYS_INSN(TLBI_ALLE1NXS, handle_alle1is), 4275 SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2), 4276 SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is), 4277 }; 4278 4279 static bool trap_dbgdidr(struct kvm_vcpu *vcpu, 4280 struct sys_reg_params *p, 4281 const struct sys_reg_desc *r) 4282 { 4283 if (p->is_write) { 4284 return ignore_write(vcpu, p); 4285 } else { 4286 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1); 4287 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP); 4288 4289 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | 4290 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) | 4291 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) | 4292 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) | 4293 (1 << 15) | (el3 << 14) | (el3 << 12)); 4294 return true; 4295 } 4296 } 4297 4298 /* 4299 * AArch32 debug register mappings 4300 * 4301 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] 4302 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] 4303 * 4304 * None of the other registers share their location, so treat them as 4305 * if they were 64bit. 4306 */ 4307 #define DBG_BCR_BVR_WCR_WVR(n) \ 4308 /* DBGBVRn */ \ 4309 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \ 4310 trap_dbg_wb_reg, NULL, n }, \ 4311 /* DBGBCRn */ \ 4312 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \ 4313 /* DBGWVRn */ \ 4314 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \ 4315 /* DBGWCRn */ \ 4316 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n } 4317 4318 #define DBGBXVR(n) \ 4319 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \ 4320 trap_dbg_wb_reg, NULL, n } 4321 4322 /* 4323 * Trapped cp14 registers. We generally ignore most of the external 4324 * debug, on the principle that they don't really make sense to a 4325 * guest. Revisit this one day, would this principle change. 4326 */ 4327 static const struct sys_reg_desc cp14_regs[] = { 4328 /* DBGDIDR */ 4329 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr }, 4330 /* DBGDTRRXext */ 4331 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 4332 4333 DBG_BCR_BVR_WCR_WVR(0), 4334 /* DBGDSCRint */ 4335 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 4336 DBG_BCR_BVR_WCR_WVR(1), 4337 /* DBGDCCINT */ 4338 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 }, 4339 /* DBGDSCRext */ 4340 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 }, 4341 DBG_BCR_BVR_WCR_WVR(2), 4342 /* DBGDTR[RT]Xint */ 4343 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 4344 /* DBGDTR[RT]Xext */ 4345 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 4346 DBG_BCR_BVR_WCR_WVR(3), 4347 DBG_BCR_BVR_WCR_WVR(4), 4348 DBG_BCR_BVR_WCR_WVR(5), 4349 /* DBGWFAR */ 4350 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 4351 /* DBGOSECCR */ 4352 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 4353 DBG_BCR_BVR_WCR_WVR(6), 4354 /* DBGVCR */ 4355 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 }, 4356 DBG_BCR_BVR_WCR_WVR(7), 4357 DBG_BCR_BVR_WCR_WVR(8), 4358 DBG_BCR_BVR_WCR_WVR(9), 4359 DBG_BCR_BVR_WCR_WVR(10), 4360 DBG_BCR_BVR_WCR_WVR(11), 4361 DBG_BCR_BVR_WCR_WVR(12), 4362 DBG_BCR_BVR_WCR_WVR(13), 4363 DBG_BCR_BVR_WCR_WVR(14), 4364 DBG_BCR_BVR_WCR_WVR(15), 4365 4366 /* DBGDRAR (32bit) */ 4367 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 4368 4369 DBGBXVR(0), 4370 /* DBGOSLAR */ 4371 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 }, 4372 DBGBXVR(1), 4373 /* DBGOSLSR */ 4374 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 }, 4375 DBGBXVR(2), 4376 DBGBXVR(3), 4377 /* DBGOSDLR */ 4378 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 4379 DBGBXVR(4), 4380 /* DBGPRCR */ 4381 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 4382 DBGBXVR(5), 4383 DBGBXVR(6), 4384 DBGBXVR(7), 4385 DBGBXVR(8), 4386 DBGBXVR(9), 4387 DBGBXVR(10), 4388 DBGBXVR(11), 4389 DBGBXVR(12), 4390 DBGBXVR(13), 4391 DBGBXVR(14), 4392 DBGBXVR(15), 4393 4394 /* DBGDSAR (32bit) */ 4395 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 4396 4397 /* DBGDEVID2 */ 4398 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 4399 /* DBGDEVID1 */ 4400 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 4401 /* DBGDEVID */ 4402 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 4403 /* DBGCLAIMSET */ 4404 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 4405 /* DBGCLAIMCLR */ 4406 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 4407 /* DBGAUTHSTATUS */ 4408 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 4409 }; 4410 4411 /* Trapped cp14 64bit registers */ 4412 static const struct sys_reg_desc cp14_64_regs[] = { 4413 /* DBGDRAR (64bit) */ 4414 { Op1( 0), CRm( 1), .access = trap_raz_wi }, 4415 4416 /* DBGDSAR (64bit) */ 4417 { Op1( 0), CRm( 2), .access = trap_raz_wi }, 4418 }; 4419 4420 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \ 4421 AA32(_map), \ 4422 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \ 4423 .visibility = pmu_visibility 4424 4425 /* Macro to expand the PMEVCNTRn register */ 4426 #define PMU_PMEVCNTR(n) \ 4427 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ 4428 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ 4429 .access = access_pmu_evcntr } 4430 4431 /* Macro to expand the PMEVTYPERn register */ 4432 #define PMU_PMEVTYPER(n) \ 4433 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ 4434 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ 4435 .access = access_pmu_evtyper } 4436 /* 4437 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 4438 * depending on the way they are accessed (as a 32bit or a 64bit 4439 * register). 4440 */ 4441 static const struct sys_reg_desc cp15_regs[] = { 4442 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr }, 4443 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 }, 4444 /* ACTLR */ 4445 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 }, 4446 /* ACTLR2 */ 4447 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 }, 4448 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, 4449 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 }, 4450 /* TTBCR */ 4451 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 }, 4452 /* TTBCR2 */ 4453 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 }, 4454 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 }, 4455 { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, 4456 /* DFSR */ 4457 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 }, 4458 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 }, 4459 /* ADFSR */ 4460 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 }, 4461 /* AIFSR */ 4462 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 }, 4463 /* DFAR */ 4464 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 }, 4465 /* IFAR */ 4466 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 }, 4467 4468 /* 4469 * DC{C,I,CI}SW operations: 4470 */ 4471 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 4472 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 4473 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 4474 4475 /* PMU */ 4476 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr }, 4477 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten }, 4478 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten }, 4479 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs }, 4480 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc }, 4481 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr }, 4482 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid }, 4483 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid }, 4484 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr }, 4485 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper }, 4486 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr }, 4487 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr }, 4488 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten }, 4489 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten }, 4490 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs }, 4491 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid }, 4492 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid }, 4493 /* PMMIR */ 4494 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi }, 4495 4496 /* PRRR/MAIR0 */ 4497 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, 4498 /* NMRR/MAIR1 */ 4499 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 }, 4500 /* AMAIR0 */ 4501 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 }, 4502 /* AMAIR1 */ 4503 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 }, 4504 4505 { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, 4506 { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, 4507 { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, 4508 { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, 4509 { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, 4510 { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, 4511 { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, 4512 { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, 4513 { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, 4514 { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, 4515 { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, 4516 { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, 4517 { CP15_SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir }, 4518 { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, 4519 { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, 4520 { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, 4521 { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, 4522 { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, 4523 { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, 4524 { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 4525 { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, 4526 { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, 4527 4528 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 }, 4529 4530 /* Arch Tmers */ 4531 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer }, 4532 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer }, 4533 4534 /* PMEVCNTRn */ 4535 PMU_PMEVCNTR(0), 4536 PMU_PMEVCNTR(1), 4537 PMU_PMEVCNTR(2), 4538 PMU_PMEVCNTR(3), 4539 PMU_PMEVCNTR(4), 4540 PMU_PMEVCNTR(5), 4541 PMU_PMEVCNTR(6), 4542 PMU_PMEVCNTR(7), 4543 PMU_PMEVCNTR(8), 4544 PMU_PMEVCNTR(9), 4545 PMU_PMEVCNTR(10), 4546 PMU_PMEVCNTR(11), 4547 PMU_PMEVCNTR(12), 4548 PMU_PMEVCNTR(13), 4549 PMU_PMEVCNTR(14), 4550 PMU_PMEVCNTR(15), 4551 PMU_PMEVCNTR(16), 4552 PMU_PMEVCNTR(17), 4553 PMU_PMEVCNTR(18), 4554 PMU_PMEVCNTR(19), 4555 PMU_PMEVCNTR(20), 4556 PMU_PMEVCNTR(21), 4557 PMU_PMEVCNTR(22), 4558 PMU_PMEVCNTR(23), 4559 PMU_PMEVCNTR(24), 4560 PMU_PMEVCNTR(25), 4561 PMU_PMEVCNTR(26), 4562 PMU_PMEVCNTR(27), 4563 PMU_PMEVCNTR(28), 4564 PMU_PMEVCNTR(29), 4565 PMU_PMEVCNTR(30), 4566 /* PMEVTYPERn */ 4567 PMU_PMEVTYPER(0), 4568 PMU_PMEVTYPER(1), 4569 PMU_PMEVTYPER(2), 4570 PMU_PMEVTYPER(3), 4571 PMU_PMEVTYPER(4), 4572 PMU_PMEVTYPER(5), 4573 PMU_PMEVTYPER(6), 4574 PMU_PMEVTYPER(7), 4575 PMU_PMEVTYPER(8), 4576 PMU_PMEVTYPER(9), 4577 PMU_PMEVTYPER(10), 4578 PMU_PMEVTYPER(11), 4579 PMU_PMEVTYPER(12), 4580 PMU_PMEVTYPER(13), 4581 PMU_PMEVTYPER(14), 4582 PMU_PMEVTYPER(15), 4583 PMU_PMEVTYPER(16), 4584 PMU_PMEVTYPER(17), 4585 PMU_PMEVTYPER(18), 4586 PMU_PMEVTYPER(19), 4587 PMU_PMEVTYPER(20), 4588 PMU_PMEVTYPER(21), 4589 PMU_PMEVTYPER(22), 4590 PMU_PMEVTYPER(23), 4591 PMU_PMEVTYPER(24), 4592 PMU_PMEVTYPER(25), 4593 PMU_PMEVTYPER(26), 4594 PMU_PMEVTYPER(27), 4595 PMU_PMEVTYPER(28), 4596 PMU_PMEVTYPER(29), 4597 PMU_PMEVTYPER(30), 4598 /* PMCCFILTR */ 4599 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper }, 4600 4601 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr }, 4602 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr }, 4603 4604 /* CCSIDR2 */ 4605 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access }, 4606 4607 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 }, 4608 }; 4609 4610 static const struct sys_reg_desc cp15_64_regs[] = { 4611 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, 4612 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr }, 4613 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ 4614 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer }, 4615 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 }, 4616 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ 4617 { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer }, 4618 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ 4619 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, 4620 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer }, 4621 { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer }, 4622 }; 4623 4624 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, 4625 bool reset_check) 4626 { 4627 unsigned int i; 4628 4629 for (i = 0; i < n; i++) { 4630 if (reset_check && table[i].reg && !table[i].reset) { 4631 kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n", 4632 &table[i], i, table[i].name); 4633 return false; 4634 } 4635 4636 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 4637 kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n", 4638 &table[i], i, table[i - 1].name, table[i].name); 4639 return false; 4640 } 4641 } 4642 4643 return true; 4644 } 4645 4646 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu) 4647 { 4648 kvm_inject_undefined(vcpu); 4649 return 1; 4650 } 4651 4652 static void perform_access(struct kvm_vcpu *vcpu, 4653 struct sys_reg_params *params, 4654 const struct sys_reg_desc *r) 4655 { 4656 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); 4657 4658 /* Check for regs disabled by runtime config */ 4659 if (sysreg_hidden(vcpu, r)) { 4660 kvm_inject_undefined(vcpu); 4661 return; 4662 } 4663 4664 /* 4665 * Not having an accessor means that we have configured a trap 4666 * that we don't know how to handle. This certainly qualifies 4667 * as a gross bug that should be fixed right away. 4668 */ 4669 if (!r->access) { 4670 bad_trap(vcpu, params, r, "register access"); 4671 return; 4672 } 4673 4674 /* Skip instruction if instructed so */ 4675 if (likely(r->access(vcpu, params, r))) 4676 kvm_incr_pc(vcpu); 4677 } 4678 4679 /* 4680 * emulate_cp -- tries to match a sys_reg access in a handling table, and 4681 * call the corresponding trap handler. 4682 * 4683 * @params: pointer to the descriptor of the access 4684 * @table: array of trap descriptors 4685 * @num: size of the trap descriptor array 4686 * 4687 * Return true if the access has been handled, false if not. 4688 */ 4689 static bool emulate_cp(struct kvm_vcpu *vcpu, 4690 struct sys_reg_params *params, 4691 const struct sys_reg_desc *table, 4692 size_t num) 4693 { 4694 const struct sys_reg_desc *r; 4695 4696 if (!table) 4697 return false; /* Not handled */ 4698 4699 r = find_reg(params, table, num); 4700 4701 if (r) { 4702 perform_access(vcpu, params, r); 4703 return true; 4704 } 4705 4706 /* Not handled */ 4707 return false; 4708 } 4709 4710 static void unhandled_cp_access(struct kvm_vcpu *vcpu, 4711 struct sys_reg_params *params) 4712 { 4713 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); 4714 int cp = -1; 4715 4716 switch (esr_ec) { 4717 case ESR_ELx_EC_CP15_32: 4718 case ESR_ELx_EC_CP15_64: 4719 cp = 15; 4720 break; 4721 case ESR_ELx_EC_CP14_MR: 4722 case ESR_ELx_EC_CP14_64: 4723 cp = 14; 4724 break; 4725 default: 4726 WARN_ON(1); 4727 } 4728 4729 print_sys_reg_msg(params, 4730 "Unsupported guest CP%d access at: %08lx [%08lx]\n", 4731 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 4732 kvm_inject_undefined(vcpu); 4733 } 4734 4735 /** 4736 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access 4737 * @vcpu: The VCPU pointer 4738 * @global: &struct sys_reg_desc 4739 * @nr_global: size of the @global array 4740 */ 4741 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 4742 const struct sys_reg_desc *global, 4743 size_t nr_global) 4744 { 4745 struct sys_reg_params params; 4746 u64 esr = kvm_vcpu_get_esr(vcpu); 4747 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4748 int Rt2 = (esr >> 10) & 0x1f; 4749 4750 params.CRm = (esr >> 1) & 0xf; 4751 params.is_write = ((esr & 1) == 0); 4752 4753 params.Op0 = 0; 4754 params.Op1 = (esr >> 16) & 0xf; 4755 params.Op2 = 0; 4756 params.CRn = 0; 4757 4758 /* 4759 * Make a 64-bit value out of Rt and Rt2. As we use the same trap 4760 * backends between AArch32 and AArch64, we get away with it. 4761 */ 4762 if (params.is_write) { 4763 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 4764 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 4765 } 4766 4767 /* 4768 * If the table contains a handler, handle the 4769 * potential register operation in the case of a read and return 4770 * with success. 4771 */ 4772 if (emulate_cp(vcpu, ¶ms, global, nr_global)) { 4773 /* Split up the value between registers for the read side */ 4774 if (!params.is_write) { 4775 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 4776 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 4777 } 4778 4779 return 1; 4780 } 4781 4782 unhandled_cp_access(vcpu, ¶ms); 4783 return 1; 4784 } 4785 4786 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params); 4787 4788 /* 4789 * The CP10 ID registers are architecturally mapped to AArch64 feature 4790 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses 4791 * from AArch32. 4792 */ 4793 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params) 4794 { 4795 u8 reg_id = (esr >> 10) & 0xf; 4796 bool valid; 4797 4798 params->is_write = ((esr & 1) == 0); 4799 params->Op0 = 3; 4800 params->Op1 = 0; 4801 params->CRn = 0; 4802 params->CRm = 3; 4803 4804 /* CP10 ID registers are read-only */ 4805 valid = !params->is_write; 4806 4807 switch (reg_id) { 4808 /* MVFR0 */ 4809 case 0b0111: 4810 params->Op2 = 0; 4811 break; 4812 /* MVFR1 */ 4813 case 0b0110: 4814 params->Op2 = 1; 4815 break; 4816 /* MVFR2 */ 4817 case 0b0101: 4818 params->Op2 = 2; 4819 break; 4820 default: 4821 valid = false; 4822 } 4823 4824 if (valid) 4825 return true; 4826 4827 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n", 4828 str_write_read(params->is_write), reg_id); 4829 return false; 4830 } 4831 4832 /** 4833 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and 4834 * VFP Register' from AArch32. 4835 * @vcpu: The vCPU pointer 4836 * 4837 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers. 4838 * Work out the correct AArch64 system register encoding and reroute to the 4839 * AArch64 system register emulation. 4840 */ 4841 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu) 4842 { 4843 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4844 u64 esr = kvm_vcpu_get_esr(vcpu); 4845 struct sys_reg_params params; 4846 4847 /* UNDEF on any unhandled register access */ 4848 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) { 4849 kvm_inject_undefined(vcpu); 4850 return 1; 4851 } 4852 4853 if (emulate_sys_reg(vcpu, ¶ms)) 4854 vcpu_set_reg(vcpu, Rt, params.regval); 4855 4856 return 1; 4857 } 4858 4859 /** 4860 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where 4861 * CRn=0, which corresponds to the AArch32 feature 4862 * registers. 4863 * @vcpu: the vCPU pointer 4864 * @params: the system register access parameters. 4865 * 4866 * Our cp15 system register tables do not enumerate the AArch32 feature 4867 * registers. Conveniently, our AArch64 table does, and the AArch32 system 4868 * register encoding can be trivially remapped into the AArch64 for the feature 4869 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same. 4870 * 4871 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit 4872 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this 4873 * range are either UNKNOWN or RES0. Rerouting remains architectural as we 4874 * treat undefined registers in this range as RAZ. 4875 */ 4876 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu, 4877 struct sys_reg_params *params) 4878 { 4879 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4880 4881 /* Treat impossible writes to RO registers as UNDEFINED */ 4882 if (params->is_write) { 4883 unhandled_cp_access(vcpu, params); 4884 return 1; 4885 } 4886 4887 params->Op0 = 3; 4888 4889 /* 4890 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32. 4891 * Avoid conflicting with future expansion of AArch64 feature registers 4892 * and simply treat them as RAZ here. 4893 */ 4894 if (params->CRm > 3) 4895 params->regval = 0; 4896 else if (!emulate_sys_reg(vcpu, params)) 4897 return 1; 4898 4899 vcpu_set_reg(vcpu, Rt, params->regval); 4900 return 1; 4901 } 4902 4903 /** 4904 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access 4905 * @vcpu: The VCPU pointer 4906 * @params: &struct sys_reg_params 4907 * @global: &struct sys_reg_desc 4908 * @nr_global: size of the @global array 4909 */ 4910 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 4911 struct sys_reg_params *params, 4912 const struct sys_reg_desc *global, 4913 size_t nr_global) 4914 { 4915 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4916 4917 params->regval = vcpu_get_reg(vcpu, Rt); 4918 4919 if (emulate_cp(vcpu, params, global, nr_global)) { 4920 if (!params->is_write) 4921 vcpu_set_reg(vcpu, Rt, params->regval); 4922 return 1; 4923 } 4924 4925 unhandled_cp_access(vcpu, params); 4926 return 1; 4927 } 4928 4929 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu) 4930 { 4931 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs)); 4932 } 4933 4934 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu) 4935 { 4936 struct sys_reg_params params; 4937 4938 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); 4939 4940 /* 4941 * Certain AArch32 ID registers are handled by rerouting to the AArch64 4942 * system register table. Registers in the ID range where CRm=0 are 4943 * excluded from this scheme as they do not trivially map into AArch64 4944 * system register encodings, except for AIDR/REVIDR. 4945 */ 4946 if (params.Op1 == 0 && params.CRn == 0 && 4947 (params.CRm || params.Op2 == 6 /* REVIDR */)) 4948 return kvm_emulate_cp15_id_reg(vcpu, ¶ms); 4949 if (params.Op1 == 1 && params.CRn == 0 && 4950 params.CRm == 0 && params.Op2 == 7 /* AIDR */) 4951 return kvm_emulate_cp15_id_reg(vcpu, ¶ms); 4952 4953 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); 4954 } 4955 4956 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu) 4957 { 4958 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs)); 4959 } 4960 4961 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) 4962 { 4963 struct sys_reg_params params; 4964 4965 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); 4966 4967 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs)); 4968 } 4969 4970 /** 4971 * emulate_sys_reg - Emulate a guest access to an AArch64 system register 4972 * @vcpu: The VCPU pointer 4973 * @params: Decoded system register parameters 4974 * 4975 * Return: true if the system register access was successful, false otherwise. 4976 */ 4977 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, 4978 struct sys_reg_params *params) 4979 { 4980 const struct sys_reg_desc *r; 4981 4982 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 4983 if (likely(r)) { 4984 perform_access(vcpu, params, r); 4985 return true; 4986 } 4987 4988 print_sys_reg_msg(params, 4989 "Unsupported guest sys_reg access at: %lx [%08lx]\n", 4990 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 4991 kvm_inject_undefined(vcpu); 4992 4993 return false; 4994 } 4995 4996 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, loff_t pos) 4997 { 4998 unsigned long i, idreg_idx = 0; 4999 5000 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { 5001 const struct sys_reg_desc *r = &sys_reg_descs[i]; 5002 5003 if (!is_vm_ftr_id_reg(reg_to_encoding(r))) 5004 continue; 5005 5006 if (idreg_idx++ == pos) 5007 return r; 5008 } 5009 5010 return NULL; 5011 } 5012 5013 static void *idregs_debug_start(struct seq_file *s, loff_t *pos) 5014 { 5015 struct kvm *kvm = s->private; 5016 5017 if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) 5018 return NULL; 5019 5020 return (void *)idregs_debug_find(kvm, *pos); 5021 } 5022 5023 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos) 5024 { 5025 struct kvm *kvm = s->private; 5026 5027 (*pos)++; 5028 5029 return (void *)idregs_debug_find(kvm, *pos); 5030 } 5031 5032 static void idregs_debug_stop(struct seq_file *s, void *v) 5033 { 5034 } 5035 5036 static int idregs_debug_show(struct seq_file *s, void *v) 5037 { 5038 const struct sys_reg_desc *desc = v; 5039 struct kvm *kvm = s->private; 5040 5041 if (!desc) 5042 return 0; 5043 5044 seq_printf(s, "%20s:\t%016llx\n", 5045 desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc))); 5046 5047 return 0; 5048 } 5049 5050 static const struct seq_operations idregs_debug_sops = { 5051 .start = idregs_debug_start, 5052 .next = idregs_debug_next, 5053 .stop = idregs_debug_stop, 5054 .show = idregs_debug_show, 5055 }; 5056 5057 DEFINE_SEQ_ATTRIBUTE(idregs_debug); 5058 5059 static const struct sys_reg_desc *sr_resx_find(struct kvm *kvm, loff_t pos) 5060 { 5061 unsigned long i, sr_idx = 0; 5062 5063 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { 5064 const struct sys_reg_desc *r = &sys_reg_descs[i]; 5065 5066 if (r->reg < __SANITISED_REG_START__) 5067 continue; 5068 5069 if (sr_idx++ == pos) 5070 return r; 5071 } 5072 5073 return NULL; 5074 } 5075 5076 static void *sr_resx_start(struct seq_file *s, loff_t *pos) 5077 { 5078 struct kvm *kvm = s->private; 5079 5080 if (!kvm->arch.sysreg_masks) 5081 return NULL; 5082 5083 return (void *)sr_resx_find(kvm, *pos); 5084 } 5085 5086 static void *sr_resx_next(struct seq_file *s, void *v, loff_t *pos) 5087 { 5088 struct kvm *kvm = s->private; 5089 5090 (*pos)++; 5091 5092 return (void *)sr_resx_find(kvm, *pos); 5093 } 5094 5095 static void sr_resx_stop(struct seq_file *s, void *v) 5096 { 5097 } 5098 5099 static int sr_resx_show(struct seq_file *s, void *v) 5100 { 5101 const struct sys_reg_desc *desc = v; 5102 struct kvm *kvm = s->private; 5103 struct resx resx; 5104 5105 if (!desc) 5106 return 0; 5107 5108 resx = kvm_get_sysreg_resx(kvm, desc->reg); 5109 5110 seq_printf(s, "%20s:\tRES0:%016llx\tRES1:%016llx\n", 5111 desc->name, resx.res0, resx.res1); 5112 5113 return 0; 5114 } 5115 5116 static const struct seq_operations sr_resx_sops = { 5117 .start = sr_resx_start, 5118 .next = sr_resx_next, 5119 .stop = sr_resx_stop, 5120 .show = sr_resx_show, 5121 }; 5122 5123 DEFINE_SEQ_ATTRIBUTE(sr_resx); 5124 5125 void kvm_sys_regs_create_debugfs(struct kvm *kvm) 5126 { 5127 debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm, 5128 &idregs_debug_fops); 5129 debugfs_create_file("resx", 0444, kvm->debugfs_dentry, kvm, 5130 &sr_resx_fops); 5131 } 5132 5133 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg) 5134 { 5135 u32 id = reg_to_encoding(reg); 5136 struct kvm *kvm = vcpu->kvm; 5137 5138 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) 5139 return; 5140 5141 kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg)); 5142 } 5143 5144 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu, 5145 const struct sys_reg_desc *reg) 5146 { 5147 if (kvm_vcpu_initialized(vcpu)) 5148 return; 5149 5150 reg->reset(vcpu, reg); 5151 } 5152 5153 /** 5154 * kvm_reset_sys_regs - sets system registers to reset value 5155 * @vcpu: The VCPU pointer 5156 * 5157 * This function finds the right table above and sets the registers on the 5158 * virtual CPU struct to their architecturally defined reset values. 5159 */ 5160 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 5161 { 5162 struct kvm *kvm = vcpu->kvm; 5163 unsigned long i; 5164 5165 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { 5166 const struct sys_reg_desc *r = &sys_reg_descs[i]; 5167 5168 if (!r->reset) 5169 continue; 5170 5171 if (is_vm_ftr_id_reg(reg_to_encoding(r))) 5172 reset_vm_ftr_id_reg(vcpu, r); 5173 else if (is_vcpu_ftr_id_reg(reg_to_encoding(r))) 5174 reset_vcpu_ftr_id_reg(vcpu, r); 5175 else 5176 r->reset(vcpu, r); 5177 5178 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) 5179 __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0); 5180 } 5181 5182 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); 5183 5184 if (kvm_vcpu_has_pmu(vcpu)) 5185 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 5186 } 5187 5188 /** 5189 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction 5190 * trap on a guest execution 5191 * @vcpu: The VCPU pointer 5192 */ 5193 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) 5194 { 5195 const struct sys_reg_desc *desc = NULL; 5196 struct sys_reg_params params; 5197 unsigned long esr = kvm_vcpu_get_esr(vcpu); 5198 int Rt = kvm_vcpu_sys_get_rt(vcpu); 5199 int sr_idx; 5200 5201 trace_kvm_handle_sys_reg(esr); 5202 5203 if (triage_sysreg_trap(vcpu, &sr_idx)) 5204 return 1; 5205 5206 params = esr_sys64_to_params(esr); 5207 params.regval = vcpu_get_reg(vcpu, Rt); 5208 5209 /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */ 5210 if (params.Op0 == 2 || params.Op0 == 3) 5211 desc = &sys_reg_descs[sr_idx]; 5212 else 5213 desc = &sys_insn_descs[sr_idx]; 5214 5215 perform_access(vcpu, ¶ms, desc); 5216 5217 /* Read from system register? */ 5218 if (!params.is_write && 5219 (params.Op0 == 2 || params.Op0 == 3)) 5220 vcpu_set_reg(vcpu, Rt, params.regval); 5221 5222 return 1; 5223 } 5224 5225 /****************************************************************************** 5226 * Userspace API 5227 *****************************************************************************/ 5228 5229 static bool index_to_params(u64 id, struct sys_reg_params *params) 5230 { 5231 switch (id & KVM_REG_SIZE_MASK) { 5232 case KVM_REG_SIZE_U64: 5233 /* Any unused index bits means it's not valid. */ 5234 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 5235 | KVM_REG_ARM_COPROC_MASK 5236 | KVM_REG_ARM64_SYSREG_OP0_MASK 5237 | KVM_REG_ARM64_SYSREG_OP1_MASK 5238 | KVM_REG_ARM64_SYSREG_CRN_MASK 5239 | KVM_REG_ARM64_SYSREG_CRM_MASK 5240 | KVM_REG_ARM64_SYSREG_OP2_MASK)) 5241 return false; 5242 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 5243 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 5244 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 5245 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 5246 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 5247 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 5248 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 5249 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 5250 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 5251 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 5252 return true; 5253 default: 5254 return false; 5255 } 5256 } 5257 5258 const struct sys_reg_desc *get_reg_by_id(u64 id, 5259 const struct sys_reg_desc table[], 5260 unsigned int num) 5261 { 5262 struct sys_reg_params params; 5263 5264 if (!index_to_params(id, ¶ms)) 5265 return NULL; 5266 5267 return find_reg(¶ms, table, num); 5268 } 5269 5270 /* Decode an index value, and find the sys_reg_desc entry. */ 5271 static const struct sys_reg_desc * 5272 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id, 5273 const struct sys_reg_desc table[], unsigned int num) 5274 5275 { 5276 const struct sys_reg_desc *r; 5277 5278 /* We only do sys_reg for now. */ 5279 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 5280 return NULL; 5281 5282 r = get_reg_by_id(id, table, num); 5283 5284 /* Not saved in the sys_reg array and not otherwise accessible? */ 5285 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r))) 5286 r = NULL; 5287 5288 return r; 5289 } 5290 5291 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 5292 { 5293 u32 val; 5294 u32 __user *uval = uaddr; 5295 5296 /* Fail if we have unknown bits set. */ 5297 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 5298 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 5299 return -ENOENT; 5300 5301 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 5302 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 5303 if (KVM_REG_SIZE(id) != 4) 5304 return -ENOENT; 5305 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 5306 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 5307 if (val >= CSSELR_MAX) 5308 return -ENOENT; 5309 5310 return put_user(get_ccsidr(vcpu, val), uval); 5311 default: 5312 return -ENOENT; 5313 } 5314 } 5315 5316 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 5317 { 5318 u32 val, newval; 5319 u32 __user *uval = uaddr; 5320 5321 /* Fail if we have unknown bits set. */ 5322 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 5323 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 5324 return -ENOENT; 5325 5326 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 5327 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 5328 if (KVM_REG_SIZE(id) != 4) 5329 return -ENOENT; 5330 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 5331 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 5332 if (val >= CSSELR_MAX) 5333 return -ENOENT; 5334 5335 if (get_user(newval, uval)) 5336 return -EFAULT; 5337 5338 return set_ccsidr(vcpu, val, newval); 5339 default: 5340 return -ENOENT; 5341 } 5342 } 5343 5344 static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg) 5345 { 5346 switch(reg->id) { 5347 case KVM_REG_ARM_TIMER_CVAL: 5348 return TO_ARM64_SYS_REG(CNTV_CVAL_EL0); 5349 case KVM_REG_ARM_TIMER_CNT: 5350 return TO_ARM64_SYS_REG(CNTVCT_EL0); 5351 default: 5352 return reg->id; 5353 } 5354 } 5355 5356 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, 5357 const struct sys_reg_desc table[], unsigned int num) 5358 { 5359 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 5360 const struct sys_reg_desc *r; 5361 u64 id = kvm_one_reg_to_id(reg); 5362 u64 val; 5363 int ret; 5364 5365 r = id_to_sys_reg_desc(vcpu, id, table, num); 5366 if (!r || sysreg_hidden(vcpu, r)) 5367 return -ENOENT; 5368 5369 if (r->get_user) { 5370 ret = (r->get_user)(vcpu, r, &val); 5371 } else { 5372 val = __vcpu_sys_reg(vcpu, r->reg); 5373 ret = 0; 5374 } 5375 5376 if (!ret) 5377 ret = put_user(val, uaddr); 5378 5379 return ret; 5380 } 5381 5382 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 5383 { 5384 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 5385 5386 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 5387 return demux_c15_get(vcpu, reg->id, uaddr); 5388 5389 return kvm_sys_reg_get_user(vcpu, reg, 5390 sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 5391 } 5392 5393 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, 5394 const struct sys_reg_desc table[], unsigned int num) 5395 { 5396 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 5397 const struct sys_reg_desc *r; 5398 u64 id = kvm_one_reg_to_id(reg); 5399 u64 val; 5400 int ret; 5401 5402 if (get_user(val, uaddr)) 5403 return -EFAULT; 5404 5405 r = id_to_sys_reg_desc(vcpu, id, table, num); 5406 if (!r || sysreg_hidden(vcpu, r)) 5407 return -ENOENT; 5408 5409 if (sysreg_user_write_ignore(vcpu, r)) 5410 return 0; 5411 5412 if (r->set_user) { 5413 ret = (r->set_user)(vcpu, r, val); 5414 } else { 5415 __vcpu_assign_sys_reg(vcpu, r->reg, val); 5416 ret = 0; 5417 } 5418 5419 return ret; 5420 } 5421 5422 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 5423 { 5424 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 5425 5426 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 5427 return demux_c15_set(vcpu, reg->id, uaddr); 5428 5429 return kvm_sys_reg_set_user(vcpu, reg, 5430 sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 5431 } 5432 5433 static unsigned int num_demux_regs(void) 5434 { 5435 return CSSELR_MAX; 5436 } 5437 5438 static int write_demux_regids(u64 __user *uindices) 5439 { 5440 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 5441 unsigned int i; 5442 5443 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 5444 for (i = 0; i < CSSELR_MAX; i++) { 5445 if (put_user(val | i, uindices)) 5446 return -EFAULT; 5447 uindices++; 5448 } 5449 return 0; 5450 } 5451 5452 static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 5453 { 5454 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 5455 KVM_REG_ARM64_SYSREG | 5456 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 5457 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 5458 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 5459 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 5460 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 5461 } 5462 5463 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 5464 { 5465 u64 idx; 5466 5467 if (!*uind) 5468 return true; 5469 5470 switch (reg_to_encoding(reg)) { 5471 case SYS_CNTV_CVAL_EL0: 5472 idx = KVM_REG_ARM_TIMER_CVAL; 5473 break; 5474 case SYS_CNTVCT_EL0: 5475 idx = KVM_REG_ARM_TIMER_CNT; 5476 break; 5477 default: 5478 idx = sys_reg_to_index(reg); 5479 } 5480 5481 if (put_user(idx, *uind)) 5482 return false; 5483 5484 (*uind)++; 5485 return true; 5486 } 5487 5488 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, 5489 const struct sys_reg_desc *rd, 5490 u64 __user **uind, 5491 unsigned int *total) 5492 { 5493 /* 5494 * Ignore registers we trap but don't save, 5495 * and for which no custom user accessor is provided. 5496 */ 5497 if (!(rd->reg || rd->get_user)) 5498 return 0; 5499 5500 if (sysreg_hidden(vcpu, rd)) 5501 return 0; 5502 5503 if (!copy_reg_to_user(rd, uind)) 5504 return -EFAULT; 5505 5506 (*total)++; 5507 return 0; 5508 } 5509 5510 /* Assumed ordered tables, see kvm_sys_reg_table_init. */ 5511 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 5512 { 5513 const struct sys_reg_desc *i2, *end2; 5514 unsigned int total = 0; 5515 int err; 5516 5517 i2 = sys_reg_descs; 5518 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 5519 5520 while (i2 != end2) { 5521 err = walk_one_sys_reg(vcpu, i2++, &uind, &total); 5522 if (err) 5523 return err; 5524 } 5525 return total; 5526 } 5527 5528 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 5529 { 5530 return num_demux_regs() 5531 + walk_sys_regs(vcpu, (u64 __user *)NULL); 5532 } 5533 5534 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 5535 { 5536 int err; 5537 5538 err = walk_sys_regs(vcpu, uindices); 5539 if (err < 0) 5540 return err; 5541 uindices += err; 5542 5543 return write_demux_regids(uindices); 5544 } 5545 5546 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \ 5547 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \ 5548 sys_reg_Op1(r), \ 5549 sys_reg_CRn(r), \ 5550 sys_reg_CRm(r), \ 5551 sys_reg_Op2(r)) 5552 5553 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range) 5554 { 5555 const void *zero_page = page_to_virt(ZERO_PAGE(0)); 5556 u64 __user *masks = (u64 __user *)range->addr; 5557 5558 /* Only feature id range is supported, reserved[13] must be zero. */ 5559 if (range->range || 5560 memcmp(range->reserved, zero_page, sizeof(range->reserved))) 5561 return -EINVAL; 5562 5563 /* Wipe the whole thing first */ 5564 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64))) 5565 return -EFAULT; 5566 5567 for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { 5568 const struct sys_reg_desc *reg = &sys_reg_descs[i]; 5569 u32 encoding = reg_to_encoding(reg); 5570 u64 val; 5571 5572 if (!is_feature_id_reg(encoding) || !reg->set_user) 5573 continue; 5574 5575 if (!reg->val || 5576 (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) { 5577 continue; 5578 } 5579 val = reg->val; 5580 5581 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding)))) 5582 return -EFAULT; 5583 } 5584 5585 return 0; 5586 } 5587 5588 static void vcpu_set_hcr(struct kvm_vcpu *vcpu) 5589 { 5590 struct kvm *kvm = vcpu->kvm; 5591 5592 if (has_vhe() || has_hvhe()) 5593 vcpu->arch.hcr_el2 |= HCR_E2H; 5594 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) { 5595 /* route synchronous external abort exceptions to EL2 */ 5596 vcpu->arch.hcr_el2 |= HCR_TEA; 5597 /* trap error record accesses */ 5598 vcpu->arch.hcr_el2 |= HCR_TERR; 5599 } 5600 5601 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 5602 vcpu->arch.hcr_el2 |= HCR_FWB; 5603 5604 if (cpus_have_final_cap(ARM64_HAS_EVT) && 5605 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) && 5606 kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0)) 5607 vcpu->arch.hcr_el2 |= HCR_TID4; 5608 else 5609 vcpu->arch.hcr_el2 |= HCR_TID2; 5610 5611 if (vcpu_el1_is_32bit(vcpu)) 5612 vcpu->arch.hcr_el2 &= ~HCR_RW; 5613 5614 if (kvm_has_mte(vcpu->kvm)) 5615 vcpu->arch.hcr_el2 |= HCR_ATA; 5616 else 5617 vcpu->arch.hcr_el2 |= HCR_TID5; 5618 5619 /* 5620 * In the absence of FGT, we cannot independently trap TLBI 5621 * Range instructions. This isn't great, but trapping all 5622 * TLBIs would be far worse. Live with it... 5623 */ 5624 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 5625 vcpu->arch.hcr_el2 |= HCR_TTLBOS; 5626 } 5627 5628 void kvm_calculate_traps(struct kvm_vcpu *vcpu) 5629 { 5630 struct kvm *kvm = vcpu->kvm; 5631 5632 mutex_lock(&kvm->arch.config_lock); 5633 vcpu_set_hcr(vcpu); 5634 vcpu_set_ich_hcr(vcpu); 5635 vcpu_set_hcrx(vcpu); 5636 5637 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) 5638 goto out; 5639 5640 compute_fgu(kvm, HFGRTR_GROUP); 5641 compute_fgu(kvm, HFGITR_GROUP); 5642 compute_fgu(kvm, HDFGRTR_GROUP); 5643 compute_fgu(kvm, HAFGRTR_GROUP); 5644 compute_fgu(kvm, HFGRTR2_GROUP); 5645 compute_fgu(kvm, HFGITR2_GROUP); 5646 compute_fgu(kvm, HDFGRTR2_GROUP); 5647 5648 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); 5649 out: 5650 mutex_unlock(&kvm->arch.config_lock); 5651 } 5652 5653 /* 5654 * Perform last adjustments to the ID registers that are implied by the 5655 * configuration outside of the ID regs themselves, as well as any 5656 * initialisation that directly depend on these ID registers (such as 5657 * RES0/RES1 behaviours). This is not the place to configure traps though. 5658 * 5659 * Because this can be called once per CPU, changes must be idempotent. 5660 */ 5661 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu) 5662 { 5663 struct kvm *kvm = vcpu->kvm; 5664 5665 guard(mutex)(&kvm->arch.config_lock); 5666 5667 /* 5668 * This hacks into the ID registers, so only perform it when the 5669 * first vcpu runs, or the kvm_set_vm_id_reg() helper will scream. 5670 */ 5671 if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) { 5672 u64 val; 5673 5674 val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC; 5675 kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val); 5676 val = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC; 5677 kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, val); 5678 } 5679 5680 if (vcpu_has_nv(vcpu)) { 5681 int ret = kvm_init_nv_sysregs(vcpu); 5682 if (ret) 5683 return ret; 5684 } 5685 5686 return 0; 5687 } 5688 5689 int __init kvm_sys_reg_table_init(void) 5690 { 5691 const struct sys_reg_desc *gicv3_regs; 5692 bool valid = true; 5693 unsigned int i, sz; 5694 int ret = 0; 5695 5696 /* Make sure tables are unique and in order. */ 5697 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true); 5698 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false); 5699 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false); 5700 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false); 5701 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false); 5702 valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false); 5703 5704 gicv3_regs = vgic_v3_get_sysreg_table(&sz); 5705 valid &= check_sysreg_table(gicv3_regs, sz, false); 5706 5707 if (!valid) 5708 return -EINVAL; 5709 5710 init_imp_id_regs(); 5711 5712 ret = populate_nv_trap_config(); 5713 5714 check_feature_map(); 5715 5716 for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++) 5717 ret = populate_sysreg_config(sys_reg_descs + i, i); 5718 5719 for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++) 5720 ret = populate_sysreg_config(sys_insn_descs + i, i); 5721 5722 return ret; 5723 } 5724