1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/coproc.c: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Authors: Rusty Russell <rusty@rustcorp.com.au> 9 * Christoffer Dall <c.dall@virtualopensystems.com> 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/bsearch.h> 14 #include <linux/cacheinfo.h> 15 #include <linux/debugfs.h> 16 #include <linux/kvm_host.h> 17 #include <linux/mm.h> 18 #include <linux/printk.h> 19 #include <linux/uaccess.h> 20 #include <linux/irqchip/arm-gic-v3.h> 21 22 #include <asm/arm_pmuv3.h> 23 #include <asm/cacheflush.h> 24 #include <asm/cputype.h> 25 #include <asm/debug-monitors.h> 26 #include <asm/esr.h> 27 #include <asm/kvm_arm.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_hyp.h> 30 #include <asm/kvm_mmu.h> 31 #include <asm/kvm_nested.h> 32 #include <asm/perf_event.h> 33 #include <asm/sysreg.h> 34 35 #include <trace/events/kvm.h> 36 37 #include "sys_regs.h" 38 #include "vgic/vgic.h" 39 40 #include "trace.h" 41 42 /* 43 * For AArch32, we only take care of what is being trapped. Anything 44 * that has to do with init and userspace access has to go via the 45 * 64bit interface. 46 */ 47 48 static u64 sys_reg_to_index(const struct sys_reg_desc *reg); 49 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 50 u64 val); 51 undef_access(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)52 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 53 const struct sys_reg_desc *r) 54 { 55 kvm_inject_undefined(vcpu); 56 return false; 57 } 58 bad_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r,const char * msg)59 static bool bad_trap(struct kvm_vcpu *vcpu, 60 struct sys_reg_params *params, 61 const struct sys_reg_desc *r, 62 const char *msg) 63 { 64 WARN_ONCE(1, "Unexpected %s\n", msg); 65 print_sys_reg_instr(params); 66 return undef_access(vcpu, params, r); 67 } 68 read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)69 static bool read_from_write_only(struct kvm_vcpu *vcpu, 70 struct sys_reg_params *params, 71 const struct sys_reg_desc *r) 72 { 73 return bad_trap(vcpu, params, r, 74 "sys_reg read to write-only register"); 75 } 76 write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)77 static bool write_to_read_only(struct kvm_vcpu *vcpu, 78 struct sys_reg_params *params, 79 const struct sys_reg_desc *r) 80 { 81 return bad_trap(vcpu, params, r, 82 "sys_reg write to read-only register"); 83 } 84 85 #define PURE_EL2_SYSREG(el2) \ 86 case el2: { \ 87 *el1r = el2; \ 88 return true; \ 89 } 90 91 #define MAPPED_EL2_SYSREG(el2, el1, fn) \ 92 case el2: { \ 93 *xlate = fn; \ 94 *el1r = el1; \ 95 return true; \ 96 } 97 get_el2_to_el1_mapping(unsigned int reg,unsigned int * el1r,u64 (** xlate)(u64))98 static bool get_el2_to_el1_mapping(unsigned int reg, 99 unsigned int *el1r, u64 (**xlate)(u64)) 100 { 101 switch (reg) { 102 PURE_EL2_SYSREG( VPIDR_EL2 ); 103 PURE_EL2_SYSREG( VMPIDR_EL2 ); 104 PURE_EL2_SYSREG( ACTLR_EL2 ); 105 PURE_EL2_SYSREG( HCR_EL2 ); 106 PURE_EL2_SYSREG( MDCR_EL2 ); 107 PURE_EL2_SYSREG( HSTR_EL2 ); 108 PURE_EL2_SYSREG( HACR_EL2 ); 109 PURE_EL2_SYSREG( VTTBR_EL2 ); 110 PURE_EL2_SYSREG( VTCR_EL2 ); 111 PURE_EL2_SYSREG( RVBAR_EL2 ); 112 PURE_EL2_SYSREG( TPIDR_EL2 ); 113 PURE_EL2_SYSREG( HPFAR_EL2 ); 114 PURE_EL2_SYSREG( HCRX_EL2 ); 115 PURE_EL2_SYSREG( HFGRTR_EL2 ); 116 PURE_EL2_SYSREG( HFGWTR_EL2 ); 117 PURE_EL2_SYSREG( HFGITR_EL2 ); 118 PURE_EL2_SYSREG( HDFGRTR_EL2 ); 119 PURE_EL2_SYSREG( HDFGWTR_EL2 ); 120 PURE_EL2_SYSREG( HAFGRTR_EL2 ); 121 PURE_EL2_SYSREG( CNTVOFF_EL2 ); 122 PURE_EL2_SYSREG( CNTHCTL_EL2 ); 123 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1, 124 translate_sctlr_el2_to_sctlr_el1 ); 125 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1, 126 translate_cptr_el2_to_cpacr_el1 ); 127 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1, 128 translate_ttbr0_el2_to_ttbr0_el1 ); 129 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL ); 130 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1, 131 translate_tcr_el2_to_tcr_el1 ); 132 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL ); 133 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL ); 134 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL ); 135 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL ); 136 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL ); 137 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL ); 138 MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL ); 139 MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL ); 140 MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL ); 141 MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL ); 142 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL ); 143 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL ); 144 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL ); 145 MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL ); 146 MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL ); 147 default: 148 return false; 149 } 150 } 151 vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,int reg)152 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg) 153 { 154 u64 val = 0x8badf00d8badf00d; 155 u64 (*xlate)(u64) = NULL; 156 unsigned int el1r; 157 158 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) 159 goto memory_read; 160 161 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) { 162 if (!is_hyp_ctxt(vcpu)) 163 goto memory_read; 164 165 /* 166 * CNTHCTL_EL2 requires some special treatment to 167 * account for the bits that can be set via CNTKCTL_EL1. 168 */ 169 switch (reg) { 170 case CNTHCTL_EL2: 171 if (vcpu_el2_e2h_is_set(vcpu)) { 172 val = read_sysreg_el1(SYS_CNTKCTL); 173 val &= CNTKCTL_VALID_BITS; 174 val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS; 175 return val; 176 } 177 break; 178 } 179 180 /* 181 * If this register does not have an EL1 counterpart, 182 * then read the stored EL2 version. 183 */ 184 if (reg == el1r) 185 goto memory_read; 186 187 /* 188 * If we have a non-VHE guest and that the sysreg 189 * requires translation to be used at EL1, use the 190 * in-memory copy instead. 191 */ 192 if (!vcpu_el2_e2h_is_set(vcpu) && xlate) 193 goto memory_read; 194 195 /* Get the current version of the EL1 counterpart. */ 196 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val)); 197 if (reg >= __SANITISED_REG_START__) 198 val = kvm_vcpu_apply_reg_masks(vcpu, reg, val); 199 200 return val; 201 } 202 203 /* EL1 register can't be on the CPU if the guest is in vEL2. */ 204 if (unlikely(is_hyp_ctxt(vcpu))) 205 goto memory_read; 206 207 if (__vcpu_read_sys_reg_from_cpu(reg, &val)) 208 return val; 209 210 memory_read: 211 return __vcpu_sys_reg(vcpu, reg); 212 } 213 vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,int reg)214 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) 215 { 216 u64 (*xlate)(u64) = NULL; 217 unsigned int el1r; 218 219 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) 220 goto memory_write; 221 222 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) { 223 if (!is_hyp_ctxt(vcpu)) 224 goto memory_write; 225 226 /* 227 * Always store a copy of the write to memory to avoid having 228 * to reverse-translate virtual EL2 system registers for a 229 * non-VHE guest hypervisor. 230 */ 231 __vcpu_sys_reg(vcpu, reg) = val; 232 233 switch (reg) { 234 case CNTHCTL_EL2: 235 /* 236 * If E2H=0, CNHTCTL_EL2 is a pure shadow register. 237 * Otherwise, some of the bits are backed by 238 * CNTKCTL_EL1, while the rest is kept in memory. 239 * Yes, this is fun stuff. 240 */ 241 if (vcpu_el2_e2h_is_set(vcpu)) 242 write_sysreg_el1(val, SYS_CNTKCTL); 243 return; 244 } 245 246 /* No EL1 counterpart? We're done here.? */ 247 if (reg == el1r) 248 return; 249 250 if (!vcpu_el2_e2h_is_set(vcpu) && xlate) 251 val = xlate(val); 252 253 /* Redirect this to the EL1 version of the register. */ 254 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r)); 255 return; 256 } 257 258 /* EL1 register can't be on the CPU if the guest is in vEL2. */ 259 if (unlikely(is_hyp_ctxt(vcpu))) 260 goto memory_write; 261 262 if (__vcpu_write_sys_reg_to_cpu(val, reg)) 263 return; 264 265 memory_write: 266 __vcpu_sys_reg(vcpu, reg) = val; 267 } 268 269 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 270 #define CSSELR_MAX 14 271 272 /* 273 * Returns the minimum line size for the selected cache, expressed as 274 * Log2(bytes). 275 */ get_min_cache_line_size(bool icache)276 static u8 get_min_cache_line_size(bool icache) 277 { 278 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0); 279 u8 field; 280 281 if (icache) 282 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr); 283 else 284 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr); 285 286 /* 287 * Cache line size is represented as Log2(words) in CTR_EL0. 288 * Log2(bytes) can be derived with the following: 289 * 290 * Log2(words) + 2 = Log2(bytes / 4) + 2 291 * = Log2(bytes) - 2 + 2 292 * = Log2(bytes) 293 */ 294 return field + 2; 295 } 296 297 /* Which cache CCSIDR represents depends on CSSELR value. */ get_ccsidr(struct kvm_vcpu * vcpu,u32 csselr)298 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr) 299 { 300 u8 line_size; 301 302 if (vcpu->arch.ccsidr) 303 return vcpu->arch.ccsidr[csselr]; 304 305 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD); 306 307 /* 308 * Fabricate a CCSIDR value as the overriding value does not exist. 309 * The real CCSIDR value will not be used as it can vary by the 310 * physical CPU which the vcpu currently resides in. 311 * 312 * The line size is determined with get_min_cache_line_size(), which 313 * should be valid for all CPUs even if they have different cache 314 * configuration. 315 * 316 * The associativity bits are cleared, meaning the geometry of all data 317 * and unified caches (which are guaranteed to be PIPT and thus 318 * non-aliasing) are 1 set and 1 way. 319 * Guests should not be doing cache operations by set/way at all, and 320 * for this reason, we trap them and attempt to infer the intent, so 321 * that we can flush the entire guest's address space at the appropriate 322 * time. The exposed geometry minimizes the number of the traps. 323 * [If guests should attempt to infer aliasing properties from the 324 * geometry (which is not permitted by the architecture), they would 325 * only do so for virtually indexed caches.] 326 * 327 * We don't check if the cache level exists as it is allowed to return 328 * an UNKNOWN value if not. 329 */ 330 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4); 331 } 332 set_ccsidr(struct kvm_vcpu * vcpu,u32 csselr,u32 val)333 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val) 334 { 335 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4; 336 u32 *ccsidr = vcpu->arch.ccsidr; 337 u32 i; 338 339 if ((val & CCSIDR_EL1_RES0) || 340 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD)) 341 return -EINVAL; 342 343 if (!ccsidr) { 344 if (val == get_ccsidr(vcpu, csselr)) 345 return 0; 346 347 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT); 348 if (!ccsidr) 349 return -ENOMEM; 350 351 for (i = 0; i < CSSELR_MAX; i++) 352 ccsidr[i] = get_ccsidr(vcpu, i); 353 354 vcpu->arch.ccsidr = ccsidr; 355 } 356 357 ccsidr[csselr] = val; 358 359 return 0; 360 } 361 access_rw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)362 static bool access_rw(struct kvm_vcpu *vcpu, 363 struct sys_reg_params *p, 364 const struct sys_reg_desc *r) 365 { 366 if (p->is_write) 367 vcpu_write_sys_reg(vcpu, p->regval, r->reg); 368 else 369 p->regval = vcpu_read_sys_reg(vcpu, r->reg); 370 371 return true; 372 } 373 374 /* 375 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 376 */ access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)377 static bool access_dcsw(struct kvm_vcpu *vcpu, 378 struct sys_reg_params *p, 379 const struct sys_reg_desc *r) 380 { 381 if (!p->is_write) 382 return read_from_write_only(vcpu, p, r); 383 384 /* 385 * Only track S/W ops if we don't have FWB. It still indicates 386 * that the guest is a bit broken (S/W operations should only 387 * be done by firmware, knowing that there is only a single 388 * CPU left in the system, and certainly not from non-secure 389 * software). 390 */ 391 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 392 kvm_set_way_flush(vcpu); 393 394 return true; 395 } 396 access_dcgsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)397 static bool access_dcgsw(struct kvm_vcpu *vcpu, 398 struct sys_reg_params *p, 399 const struct sys_reg_desc *r) 400 { 401 if (!kvm_has_mte(vcpu->kvm)) 402 return undef_access(vcpu, p, r); 403 404 /* Treat MTE S/W ops as we treat the classic ones: with contempt */ 405 return access_dcsw(vcpu, p, r); 406 } 407 get_access_mask(const struct sys_reg_desc * r,u64 * mask,u64 * shift)408 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) 409 { 410 switch (r->aarch32_map) { 411 case AA32_LO: 412 *mask = GENMASK_ULL(31, 0); 413 *shift = 0; 414 break; 415 case AA32_HI: 416 *mask = GENMASK_ULL(63, 32); 417 *shift = 32; 418 break; 419 default: 420 *mask = GENMASK_ULL(63, 0); 421 *shift = 0; 422 break; 423 } 424 } 425 426 /* 427 * Generic accessor for VM registers. Only called as long as HCR_TVM 428 * is set. If the guest enables the MMU, we stop trapping the VM 429 * sys_regs and leave it in complete control of the caches. 430 */ access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)431 static bool access_vm_reg(struct kvm_vcpu *vcpu, 432 struct sys_reg_params *p, 433 const struct sys_reg_desc *r) 434 { 435 bool was_enabled = vcpu_has_cache_enabled(vcpu); 436 u64 val, mask, shift; 437 438 BUG_ON(!p->is_write); 439 440 get_access_mask(r, &mask, &shift); 441 442 if (~mask) { 443 val = vcpu_read_sys_reg(vcpu, r->reg); 444 val &= ~mask; 445 } else { 446 val = 0; 447 } 448 449 val |= (p->regval & (mask >> shift)) << shift; 450 vcpu_write_sys_reg(vcpu, val, r->reg); 451 452 kvm_toggle_cache(vcpu, was_enabled); 453 return true; 454 } 455 access_actlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)456 static bool access_actlr(struct kvm_vcpu *vcpu, 457 struct sys_reg_params *p, 458 const struct sys_reg_desc *r) 459 { 460 u64 mask, shift; 461 462 if (p->is_write) 463 return ignore_write(vcpu, p); 464 465 get_access_mask(r, &mask, &shift); 466 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift; 467 468 return true; 469 } 470 471 /* 472 * Trap handler for the GICv3 SGI generation system register. 473 * Forward the request to the VGIC emulation. 474 * The cp15_64 code makes sure this automatically works 475 * for both AArch64 and AArch32 accesses. 476 */ access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)477 static bool access_gic_sgi(struct kvm_vcpu *vcpu, 478 struct sys_reg_params *p, 479 const struct sys_reg_desc *r) 480 { 481 bool g1; 482 483 if (!kvm_has_gicv3(vcpu->kvm)) 484 return undef_access(vcpu, p, r); 485 486 if (!p->is_write) 487 return read_from_write_only(vcpu, p, r); 488 489 /* 490 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates 491 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, 492 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively 493 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure 494 * group. 495 */ 496 if (p->Op0 == 0) { /* AArch32 */ 497 switch (p->Op1) { 498 default: /* Keep GCC quiet */ 499 case 0: /* ICC_SGI1R */ 500 g1 = true; 501 break; 502 case 1: /* ICC_ASGI1R */ 503 case 2: /* ICC_SGI0R */ 504 g1 = false; 505 break; 506 } 507 } else { /* AArch64 */ 508 switch (p->Op2) { 509 default: /* Keep GCC quiet */ 510 case 5: /* ICC_SGI1R_EL1 */ 511 g1 = true; 512 break; 513 case 6: /* ICC_ASGI1R_EL1 */ 514 case 7: /* ICC_SGI0R_EL1 */ 515 g1 = false; 516 break; 517 } 518 } 519 520 vgic_v3_dispatch_sgi(vcpu, p->regval, g1); 521 522 return true; 523 } 524 access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)525 static bool access_gic_sre(struct kvm_vcpu *vcpu, 526 struct sys_reg_params *p, 527 const struct sys_reg_desc *r) 528 { 529 if (!kvm_has_gicv3(vcpu->kvm)) 530 return undef_access(vcpu, p, r); 531 532 if (p->is_write) 533 return ignore_write(vcpu, p); 534 535 if (p->Op1 == 4) { /* ICC_SRE_EL2 */ 536 p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | 537 ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB); 538 } else { /* ICC_SRE_EL1 */ 539 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; 540 } 541 542 return true; 543 } 544 trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)545 static bool trap_raz_wi(struct kvm_vcpu *vcpu, 546 struct sys_reg_params *p, 547 const struct sys_reg_desc *r) 548 { 549 if (p->is_write) 550 return ignore_write(vcpu, p); 551 else 552 return read_zero(vcpu, p); 553 } 554 555 /* 556 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the 557 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 558 * system, these registers should UNDEF. LORID_EL1 being a RO register, we 559 * treat it separately. 560 */ trap_loregion(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)561 static bool trap_loregion(struct kvm_vcpu *vcpu, 562 struct sys_reg_params *p, 563 const struct sys_reg_desc *r) 564 { 565 u32 sr = reg_to_encoding(r); 566 567 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) 568 return undef_access(vcpu, p, r); 569 570 if (p->is_write && sr == SYS_LORID_EL1) 571 return write_to_read_only(vcpu, p, r); 572 573 return trap_raz_wi(vcpu, p, r); 574 } 575 trap_oslar_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)576 static bool trap_oslar_el1(struct kvm_vcpu *vcpu, 577 struct sys_reg_params *p, 578 const struct sys_reg_desc *r) 579 { 580 if (!p->is_write) 581 return read_from_write_only(vcpu, p, r); 582 583 kvm_debug_handle_oslar(vcpu, p->regval); 584 return true; 585 } 586 trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)587 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 588 struct sys_reg_params *p, 589 const struct sys_reg_desc *r) 590 { 591 if (p->is_write) 592 return write_to_read_only(vcpu, p, r); 593 594 p->regval = __vcpu_sys_reg(vcpu, r->reg); 595 return true; 596 } 597 set_oslsr_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)598 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 599 u64 val) 600 { 601 /* 602 * The only modifiable bit is the OSLK bit. Refuse the write if 603 * userspace attempts to change any other bit in the register. 604 */ 605 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) 606 return -EINVAL; 607 608 __vcpu_sys_reg(vcpu, rd->reg) = val; 609 return 0; 610 } 611 trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)612 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 613 struct sys_reg_params *p, 614 const struct sys_reg_desc *r) 615 { 616 if (p->is_write) { 617 return ignore_write(vcpu, p); 618 } else { 619 p->regval = read_sysreg(dbgauthstatus_el1); 620 return true; 621 } 622 } 623 trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)624 static bool trap_debug_regs(struct kvm_vcpu *vcpu, 625 struct sys_reg_params *p, 626 const struct sys_reg_desc *r) 627 { 628 access_rw(vcpu, p, r); 629 630 kvm_debug_set_guest_ownership(vcpu); 631 return true; 632 } 633 634 /* 635 * reg_to_dbg/dbg_to_reg 636 * 637 * A 32 bit write to a debug register leave top bits alone 638 * A 32 bit read from a debug register only returns the bottom bits 639 */ reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)640 static void reg_to_dbg(struct kvm_vcpu *vcpu, 641 struct sys_reg_params *p, 642 const struct sys_reg_desc *rd, 643 u64 *dbg_reg) 644 { 645 u64 mask, shift, val; 646 647 get_access_mask(rd, &mask, &shift); 648 649 val = *dbg_reg; 650 val &= ~mask; 651 val |= (p->regval & (mask >> shift)) << shift; 652 *dbg_reg = val; 653 } 654 dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)655 static void dbg_to_reg(struct kvm_vcpu *vcpu, 656 struct sys_reg_params *p, 657 const struct sys_reg_desc *rd, 658 u64 *dbg_reg) 659 { 660 u64 mask, shift; 661 662 get_access_mask(rd, &mask, &shift); 663 p->regval = (*dbg_reg & mask) >> shift; 664 } 665 demux_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)666 static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) 667 { 668 struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state; 669 670 switch (rd->Op2) { 671 case 0b100: 672 return &dbg->dbg_bvr[rd->CRm]; 673 case 0b101: 674 return &dbg->dbg_bcr[rd->CRm]; 675 case 0b110: 676 return &dbg->dbg_wvr[rd->CRm]; 677 case 0b111: 678 return &dbg->dbg_wcr[rd->CRm]; 679 default: 680 KVM_BUG_ON(1, vcpu->kvm); 681 return NULL; 682 } 683 } 684 trap_dbg_wb_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)685 static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 686 const struct sys_reg_desc *rd) 687 { 688 u64 *reg = demux_wb_reg(vcpu, rd); 689 690 if (!reg) 691 return false; 692 693 if (p->is_write) 694 reg_to_dbg(vcpu, p, rd, reg); 695 else 696 dbg_to_reg(vcpu, p, rd, reg); 697 698 kvm_debug_set_guest_ownership(vcpu); 699 return true; 700 } 701 set_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)702 static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 703 u64 val) 704 { 705 u64 *reg = demux_wb_reg(vcpu, rd); 706 707 if (!reg) 708 return -EINVAL; 709 710 *reg = val; 711 return 0; 712 } 713 get_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)714 static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 715 u64 *val) 716 { 717 u64 *reg = demux_wb_reg(vcpu, rd); 718 719 if (!reg) 720 return -EINVAL; 721 722 *val = *reg; 723 return 0; 724 } 725 reset_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)726 static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) 727 { 728 u64 *reg = demux_wb_reg(vcpu, rd); 729 730 /* 731 * Bail early if we couldn't find storage for the register, the 732 * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever 733 * being run. 734 */ 735 if (!reg) 736 return 0; 737 738 *reg = rd->val; 739 return rd->val; 740 } 741 reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)742 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 743 { 744 u64 amair = read_sysreg(amair_el1); 745 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); 746 return amair; 747 } 748 reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)749 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 750 { 751 u64 actlr = read_sysreg(actlr_el1); 752 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1); 753 return actlr; 754 } 755 reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)756 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 757 { 758 u64 mpidr; 759 760 /* 761 * Map the vcpu_id into the first three affinity level fields of 762 * the MPIDR. We limit the number of VCPUs in level 0 due to a 763 * limitation to 16 CPUs in that level in the ICC_SGIxR registers 764 * of the GICv3 to be able to address each CPU directly when 765 * sending IPIs. 766 */ 767 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 768 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 769 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 770 mpidr |= (1ULL << 31); 771 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); 772 773 return mpidr; 774 } 775 pmu_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)776 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, 777 const struct sys_reg_desc *r) 778 { 779 if (kvm_vcpu_has_pmu(vcpu)) 780 return 0; 781 782 return REG_HIDDEN; 783 } 784 reset_pmu_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)785 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 786 { 787 u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); 788 u8 n = vcpu->kvm->arch.pmcr_n; 789 790 if (n) 791 mask |= GENMASK(n - 1, 0); 792 793 reset_unknown(vcpu, r); 794 __vcpu_sys_reg(vcpu, r->reg) &= mask; 795 796 return __vcpu_sys_reg(vcpu, r->reg); 797 } 798 reset_pmevcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)799 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 800 { 801 reset_unknown(vcpu, r); 802 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0); 803 804 return __vcpu_sys_reg(vcpu, r->reg); 805 } 806 reset_pmevtyper(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)807 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 808 { 809 /* This thing will UNDEF, who cares about the reset value? */ 810 if (!kvm_vcpu_has_pmu(vcpu)) 811 return 0; 812 813 reset_unknown(vcpu, r); 814 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); 815 816 return __vcpu_sys_reg(vcpu, r->reg); 817 } 818 reset_pmselr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)819 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 820 { 821 reset_unknown(vcpu, r); 822 __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; 823 824 return __vcpu_sys_reg(vcpu, r->reg); 825 } 826 reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)827 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 828 { 829 u64 pmcr = 0; 830 831 if (!kvm_supports_32bit_el0()) 832 pmcr |= ARMV8_PMU_PMCR_LC; 833 834 /* 835 * The value of PMCR.N field is included when the 836 * vCPU register is read via kvm_vcpu_read_pmcr(). 837 */ 838 __vcpu_sys_reg(vcpu, r->reg) = pmcr; 839 840 return __vcpu_sys_reg(vcpu, r->reg); 841 } 842 check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)843 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) 844 { 845 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); 846 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); 847 848 if (!enabled) 849 kvm_inject_undefined(vcpu); 850 851 return !enabled; 852 } 853 pmu_access_el0_disabled(struct kvm_vcpu * vcpu)854 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) 855 { 856 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN); 857 } 858 pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)859 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) 860 { 861 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN); 862 } 863 pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)864 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) 865 { 866 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN); 867 } 868 pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)869 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) 870 { 871 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN); 872 } 873 access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)874 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 875 const struct sys_reg_desc *r) 876 { 877 u64 val; 878 879 if (pmu_access_el0_disabled(vcpu)) 880 return false; 881 882 if (p->is_write) { 883 /* 884 * Only update writeable bits of PMCR (continuing into 885 * kvm_pmu_handle_pmcr() as well) 886 */ 887 val = kvm_vcpu_read_pmcr(vcpu); 888 val &= ~ARMV8_PMU_PMCR_MASK; 889 val |= p->regval & ARMV8_PMU_PMCR_MASK; 890 if (!kvm_supports_32bit_el0()) 891 val |= ARMV8_PMU_PMCR_LC; 892 kvm_pmu_handle_pmcr(vcpu, val); 893 } else { 894 /* PMCR.P & PMCR.C are RAZ */ 895 val = kvm_vcpu_read_pmcr(vcpu) 896 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); 897 p->regval = val; 898 } 899 900 return true; 901 } 902 access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)903 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 904 const struct sys_reg_desc *r) 905 { 906 if (pmu_access_event_counter_el0_disabled(vcpu)) 907 return false; 908 909 if (p->is_write) 910 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 911 else 912 /* return PMSELR.SEL field */ 913 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) 914 & PMSELR_EL0_SEL_MASK; 915 916 return true; 917 } 918 access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)919 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 920 const struct sys_reg_desc *r) 921 { 922 u64 pmceid, mask, shift; 923 924 BUG_ON(p->is_write); 925 926 if (pmu_access_el0_disabled(vcpu)) 927 return false; 928 929 get_access_mask(r, &mask, &shift); 930 931 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); 932 pmceid &= mask; 933 pmceid >>= shift; 934 935 p->regval = pmceid; 936 937 return true; 938 } 939 pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)940 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) 941 { 942 u64 pmcr, val; 943 944 pmcr = kvm_vcpu_read_pmcr(vcpu); 945 val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); 946 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { 947 kvm_inject_undefined(vcpu); 948 return false; 949 } 950 951 return true; 952 } 953 get_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)954 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 955 u64 *val) 956 { 957 u64 idx; 958 959 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) 960 /* PMCCNTR_EL0 */ 961 idx = ARMV8_PMU_CYCLE_IDX; 962 else 963 /* PMEVCNTRn_EL0 */ 964 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 965 966 *val = kvm_pmu_get_counter_value(vcpu, idx); 967 return 0; 968 } 969 set_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)970 static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 971 u64 val) 972 { 973 u64 idx; 974 975 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) 976 /* PMCCNTR_EL0 */ 977 idx = ARMV8_PMU_CYCLE_IDX; 978 else 979 /* PMEVCNTRn_EL0 */ 980 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 981 982 kvm_pmu_set_counter_value_user(vcpu, idx, val); 983 return 0; 984 } 985 access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)986 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, 987 struct sys_reg_params *p, 988 const struct sys_reg_desc *r) 989 { 990 u64 idx = ~0UL; 991 992 if (r->CRn == 9 && r->CRm == 13) { 993 if (r->Op2 == 2) { 994 /* PMXEVCNTR_EL0 */ 995 if (pmu_access_event_counter_el0_disabled(vcpu)) 996 return false; 997 998 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, 999 __vcpu_sys_reg(vcpu, PMSELR_EL0)); 1000 } else if (r->Op2 == 0) { 1001 /* PMCCNTR_EL0 */ 1002 if (pmu_access_cycle_counter_el0_disabled(vcpu)) 1003 return false; 1004 1005 idx = ARMV8_PMU_CYCLE_IDX; 1006 } 1007 } else if (r->CRn == 0 && r->CRm == 9) { 1008 /* PMCCNTR */ 1009 if (pmu_access_event_counter_el0_disabled(vcpu)) 1010 return false; 1011 1012 idx = ARMV8_PMU_CYCLE_IDX; 1013 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 1014 /* PMEVCNTRn_EL0 */ 1015 if (pmu_access_event_counter_el0_disabled(vcpu)) 1016 return false; 1017 1018 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1019 } 1020 1021 /* Catch any decoding mistake */ 1022 WARN_ON(idx == ~0UL); 1023 1024 if (!pmu_counter_idx_valid(vcpu, idx)) 1025 return false; 1026 1027 if (p->is_write) { 1028 if (pmu_access_el0_disabled(vcpu)) 1029 return false; 1030 1031 kvm_pmu_set_counter_value(vcpu, idx, p->regval); 1032 } else { 1033 p->regval = kvm_pmu_get_counter_value(vcpu, idx); 1034 } 1035 1036 return true; 1037 } 1038 access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1039 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1040 const struct sys_reg_desc *r) 1041 { 1042 u64 idx, reg; 1043 1044 if (pmu_access_el0_disabled(vcpu)) 1045 return false; 1046 1047 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { 1048 /* PMXEVTYPER_EL0 */ 1049 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0)); 1050 reg = PMEVTYPER0_EL0 + idx; 1051 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { 1052 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 1053 if (idx == ARMV8_PMU_CYCLE_IDX) 1054 reg = PMCCFILTR_EL0; 1055 else 1056 /* PMEVTYPERn_EL0 */ 1057 reg = PMEVTYPER0_EL0 + idx; 1058 } else { 1059 BUG(); 1060 } 1061 1062 if (!pmu_counter_idx_valid(vcpu, idx)) 1063 return false; 1064 1065 if (p->is_write) { 1066 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); 1067 kvm_vcpu_pmu_restore_guest(vcpu); 1068 } else { 1069 p->regval = __vcpu_sys_reg(vcpu, reg); 1070 } 1071 1072 return true; 1073 } 1074 set_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1075 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) 1076 { 1077 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1078 1079 __vcpu_sys_reg(vcpu, r->reg) = val & mask; 1080 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1081 1082 return 0; 1083 } 1084 get_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1085 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val) 1086 { 1087 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1088 1089 *val = __vcpu_sys_reg(vcpu, r->reg) & mask; 1090 return 0; 1091 } 1092 access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1093 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1094 const struct sys_reg_desc *r) 1095 { 1096 u64 val, mask; 1097 1098 if (pmu_access_el0_disabled(vcpu)) 1099 return false; 1100 1101 mask = kvm_pmu_accessible_counter_mask(vcpu); 1102 if (p->is_write) { 1103 val = p->regval & mask; 1104 if (r->Op2 & 0x1) 1105 /* accessing PMCNTENSET_EL0 */ 1106 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; 1107 else 1108 /* accessing PMCNTENCLR_EL0 */ 1109 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; 1110 1111 kvm_pmu_reprogram_counter_mask(vcpu, val); 1112 } else { 1113 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 1114 } 1115 1116 return true; 1117 } 1118 access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1119 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1120 const struct sys_reg_desc *r) 1121 { 1122 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1123 1124 if (check_pmu_access_disabled(vcpu, 0)) 1125 return false; 1126 1127 if (p->is_write) { 1128 u64 val = p->regval & mask; 1129 1130 if (r->Op2 & 0x1) 1131 /* accessing PMINTENSET_EL1 */ 1132 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; 1133 else 1134 /* accessing PMINTENCLR_EL1 */ 1135 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; 1136 } else { 1137 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); 1138 } 1139 1140 return true; 1141 } 1142 access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1143 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1144 const struct sys_reg_desc *r) 1145 { 1146 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1147 1148 if (pmu_access_el0_disabled(vcpu)) 1149 return false; 1150 1151 if (p->is_write) { 1152 if (r->CRm & 0x2) 1153 /* accessing PMOVSSET_EL0 */ 1154 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); 1155 else 1156 /* accessing PMOVSCLR_EL0 */ 1157 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); 1158 } else { 1159 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); 1160 } 1161 1162 return true; 1163 } 1164 access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1165 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1166 const struct sys_reg_desc *r) 1167 { 1168 u64 mask; 1169 1170 if (!p->is_write) 1171 return read_from_write_only(vcpu, p, r); 1172 1173 if (pmu_write_swinc_el0_disabled(vcpu)) 1174 return false; 1175 1176 mask = kvm_pmu_accessible_counter_mask(vcpu); 1177 kvm_pmu_software_increment(vcpu, p->regval & mask); 1178 return true; 1179 } 1180 access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1181 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1182 const struct sys_reg_desc *r) 1183 { 1184 if (p->is_write) { 1185 if (!vcpu_mode_priv(vcpu)) 1186 return undef_access(vcpu, p, r); 1187 1188 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = 1189 p->regval & ARMV8_PMU_USERENR_MASK; 1190 } else { 1191 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) 1192 & ARMV8_PMU_USERENR_MASK; 1193 } 1194 1195 return true; 1196 } 1197 get_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1198 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 1199 u64 *val) 1200 { 1201 *val = kvm_vcpu_read_pmcr(vcpu); 1202 return 0; 1203 } 1204 set_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1205 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 1206 u64 val) 1207 { 1208 u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val); 1209 struct kvm *kvm = vcpu->kvm; 1210 1211 mutex_lock(&kvm->arch.config_lock); 1212 1213 /* 1214 * The vCPU can't have more counters than the PMU hardware 1215 * implements. Ignore this error to maintain compatibility 1216 * with the existing KVM behavior. 1217 */ 1218 if (!kvm_vm_has_ran_once(kvm) && 1219 new_n <= kvm_arm_pmu_get_max_counters(kvm)) 1220 kvm->arch.pmcr_n = new_n; 1221 1222 mutex_unlock(&kvm->arch.config_lock); 1223 1224 /* 1225 * Ignore writes to RES0 bits, read only bits that are cleared on 1226 * vCPU reset, and writable bits that KVM doesn't support yet. 1227 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace) 1228 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU. 1229 * But, we leave the bit as it is here, as the vCPU's PMUver might 1230 * be changed later (NOTE: the bit will be cleared on first vCPU run 1231 * if necessary). 1232 */ 1233 val &= ARMV8_PMU_PMCR_MASK; 1234 1235 /* The LC bit is RES1 when AArch32 is not supported */ 1236 if (!kvm_supports_32bit_el0()) 1237 val |= ARMV8_PMU_PMCR_LC; 1238 1239 __vcpu_sys_reg(vcpu, r->reg) = val; 1240 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1241 1242 return 0; 1243 } 1244 1245 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 1246 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 1247 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ 1248 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1249 get_dbg_wb_reg, set_dbg_wb_reg }, \ 1250 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ 1251 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1252 get_dbg_wb_reg, set_dbg_wb_reg }, \ 1253 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ 1254 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1255 get_dbg_wb_reg, set_dbg_wb_reg }, \ 1256 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ 1257 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ 1258 get_dbg_wb_reg, set_dbg_wb_reg } 1259 1260 #define PMU_SYS_REG(name) \ 1261 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \ 1262 .visibility = pmu_visibility 1263 1264 /* Macro to expand the PMEVCNTRn_EL0 register */ 1265 #define PMU_PMEVCNTR_EL0(n) \ 1266 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \ 1267 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ 1268 .set_user = set_pmu_evcntr, \ 1269 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } 1270 1271 /* Macro to expand the PMEVTYPERn_EL0 register */ 1272 #define PMU_PMEVTYPER_EL0(n) \ 1273 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \ 1274 .reset = reset_pmevtyper, \ 1275 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } 1276 1277 /* Macro to expand the AMU counter and type registers*/ 1278 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access } 1279 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access } 1280 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access } 1281 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access } 1282 ptrauth_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1283 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, 1284 const struct sys_reg_desc *rd) 1285 { 1286 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN; 1287 } 1288 1289 /* 1290 * If we land here on a PtrAuth access, that is because we didn't 1291 * fixup the access on exit by allowing the PtrAuth sysregs. The only 1292 * way this happens is when the guest does not have PtrAuth support 1293 * enabled. 1294 */ 1295 #define __PTRAUTH_KEY(k) \ 1296 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \ 1297 .visibility = ptrauth_visibility} 1298 1299 #define PTRAUTH_KEY(k) \ 1300 __PTRAUTH_KEY(k ## KEYLO_EL1), \ 1301 __PTRAUTH_KEY(k ## KEYHI_EL1) 1302 access_arch_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1303 static bool access_arch_timer(struct kvm_vcpu *vcpu, 1304 struct sys_reg_params *p, 1305 const struct sys_reg_desc *r) 1306 { 1307 enum kvm_arch_timers tmr; 1308 enum kvm_arch_timer_regs treg; 1309 u64 reg = reg_to_encoding(r); 1310 1311 switch (reg) { 1312 case SYS_CNTP_TVAL_EL0: 1313 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1314 tmr = TIMER_HPTIMER; 1315 else 1316 tmr = TIMER_PTIMER; 1317 treg = TIMER_REG_TVAL; 1318 break; 1319 1320 case SYS_CNTV_TVAL_EL0: 1321 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1322 tmr = TIMER_HVTIMER; 1323 else 1324 tmr = TIMER_VTIMER; 1325 treg = TIMER_REG_TVAL; 1326 break; 1327 1328 case SYS_AARCH32_CNTP_TVAL: 1329 case SYS_CNTP_TVAL_EL02: 1330 tmr = TIMER_PTIMER; 1331 treg = TIMER_REG_TVAL; 1332 break; 1333 1334 case SYS_CNTV_TVAL_EL02: 1335 tmr = TIMER_VTIMER; 1336 treg = TIMER_REG_TVAL; 1337 break; 1338 1339 case SYS_CNTHP_TVAL_EL2: 1340 tmr = TIMER_HPTIMER; 1341 treg = TIMER_REG_TVAL; 1342 break; 1343 1344 case SYS_CNTHV_TVAL_EL2: 1345 tmr = TIMER_HVTIMER; 1346 treg = TIMER_REG_TVAL; 1347 break; 1348 1349 case SYS_CNTP_CTL_EL0: 1350 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1351 tmr = TIMER_HPTIMER; 1352 else 1353 tmr = TIMER_PTIMER; 1354 treg = TIMER_REG_CTL; 1355 break; 1356 1357 case SYS_CNTV_CTL_EL0: 1358 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1359 tmr = TIMER_HVTIMER; 1360 else 1361 tmr = TIMER_VTIMER; 1362 treg = TIMER_REG_CTL; 1363 break; 1364 1365 case SYS_AARCH32_CNTP_CTL: 1366 case SYS_CNTP_CTL_EL02: 1367 tmr = TIMER_PTIMER; 1368 treg = TIMER_REG_CTL; 1369 break; 1370 1371 case SYS_CNTV_CTL_EL02: 1372 tmr = TIMER_VTIMER; 1373 treg = TIMER_REG_CTL; 1374 break; 1375 1376 case SYS_CNTHP_CTL_EL2: 1377 tmr = TIMER_HPTIMER; 1378 treg = TIMER_REG_CTL; 1379 break; 1380 1381 case SYS_CNTHV_CTL_EL2: 1382 tmr = TIMER_HVTIMER; 1383 treg = TIMER_REG_CTL; 1384 break; 1385 1386 case SYS_CNTP_CVAL_EL0: 1387 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1388 tmr = TIMER_HPTIMER; 1389 else 1390 tmr = TIMER_PTIMER; 1391 treg = TIMER_REG_CVAL; 1392 break; 1393 1394 case SYS_CNTV_CVAL_EL0: 1395 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) 1396 tmr = TIMER_HVTIMER; 1397 else 1398 tmr = TIMER_VTIMER; 1399 treg = TIMER_REG_CVAL; 1400 break; 1401 1402 case SYS_AARCH32_CNTP_CVAL: 1403 case SYS_CNTP_CVAL_EL02: 1404 tmr = TIMER_PTIMER; 1405 treg = TIMER_REG_CVAL; 1406 break; 1407 1408 case SYS_CNTV_CVAL_EL02: 1409 tmr = TIMER_VTIMER; 1410 treg = TIMER_REG_CVAL; 1411 break; 1412 1413 case SYS_CNTHP_CVAL_EL2: 1414 tmr = TIMER_HPTIMER; 1415 treg = TIMER_REG_CVAL; 1416 break; 1417 1418 case SYS_CNTHV_CVAL_EL2: 1419 tmr = TIMER_HVTIMER; 1420 treg = TIMER_REG_CVAL; 1421 break; 1422 1423 case SYS_CNTPCT_EL0: 1424 case SYS_CNTPCTSS_EL0: 1425 if (is_hyp_ctxt(vcpu)) 1426 tmr = TIMER_HPTIMER; 1427 else 1428 tmr = TIMER_PTIMER; 1429 treg = TIMER_REG_CNT; 1430 break; 1431 1432 case SYS_AARCH32_CNTPCT: 1433 case SYS_AARCH32_CNTPCTSS: 1434 tmr = TIMER_PTIMER; 1435 treg = TIMER_REG_CNT; 1436 break; 1437 1438 case SYS_CNTVCT_EL0: 1439 case SYS_CNTVCTSS_EL0: 1440 if (is_hyp_ctxt(vcpu)) 1441 tmr = TIMER_HVTIMER; 1442 else 1443 tmr = TIMER_VTIMER; 1444 treg = TIMER_REG_CNT; 1445 break; 1446 1447 case SYS_AARCH32_CNTVCT: 1448 case SYS_AARCH32_CNTVCTSS: 1449 tmr = TIMER_VTIMER; 1450 treg = TIMER_REG_CNT; 1451 break; 1452 1453 default: 1454 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register"); 1455 return undef_access(vcpu, p, r); 1456 } 1457 1458 if (p->is_write) 1459 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); 1460 else 1461 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); 1462 1463 return true; 1464 } 1465 access_hv_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1466 static bool access_hv_timer(struct kvm_vcpu *vcpu, 1467 struct sys_reg_params *p, 1468 const struct sys_reg_desc *r) 1469 { 1470 if (!vcpu_el2_e2h_is_set(vcpu)) 1471 return undef_access(vcpu, p, r); 1472 1473 return access_arch_timer(vcpu, p, r); 1474 } 1475 kvm_arm64_ftr_safe_value(u32 id,const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)1476 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, 1477 s64 new, s64 cur) 1478 { 1479 struct arm64_ftr_bits kvm_ftr = *ftrp; 1480 1481 /* Some features have different safe value type in KVM than host features */ 1482 switch (id) { 1483 case SYS_ID_AA64DFR0_EL1: 1484 switch (kvm_ftr.shift) { 1485 case ID_AA64DFR0_EL1_PMUVer_SHIFT: 1486 kvm_ftr.type = FTR_LOWER_SAFE; 1487 break; 1488 case ID_AA64DFR0_EL1_DebugVer_SHIFT: 1489 kvm_ftr.type = FTR_LOWER_SAFE; 1490 break; 1491 } 1492 break; 1493 case SYS_ID_DFR0_EL1: 1494 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT) 1495 kvm_ftr.type = FTR_LOWER_SAFE; 1496 break; 1497 } 1498 1499 return arm64_ftr_safe_value(&kvm_ftr, new, cur); 1500 } 1501 1502 /* 1503 * arm64_check_features() - Check if a feature register value constitutes 1504 * a subset of features indicated by the idreg's KVM sanitised limit. 1505 * 1506 * This function will check if each feature field of @val is the "safe" value 1507 * against idreg's KVM sanitised limit return from reset() callback. 1508 * If a field value in @val is the same as the one in limit, it is always 1509 * considered the safe value regardless For register fields that are not in 1510 * writable, only the value in limit is considered the safe value. 1511 * 1512 * Return: 0 if all the fields are safe. Otherwise, return negative errno. 1513 */ arm64_check_features(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1514 static int arm64_check_features(struct kvm_vcpu *vcpu, 1515 const struct sys_reg_desc *rd, 1516 u64 val) 1517 { 1518 const struct arm64_ftr_reg *ftr_reg; 1519 const struct arm64_ftr_bits *ftrp = NULL; 1520 u32 id = reg_to_encoding(rd); 1521 u64 writable_mask = rd->val; 1522 u64 limit = rd->reset(vcpu, rd); 1523 u64 mask = 0; 1524 1525 /* 1526 * Hidden and unallocated ID registers may not have a corresponding 1527 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the 1528 * only safe value is 0. 1529 */ 1530 if (sysreg_visible_as_raz(vcpu, rd)) 1531 return val ? -E2BIG : 0; 1532 1533 ftr_reg = get_arm64_ftr_reg(id); 1534 if (!ftr_reg) 1535 return -EINVAL; 1536 1537 ftrp = ftr_reg->ftr_bits; 1538 1539 for (; ftrp && ftrp->width; ftrp++) { 1540 s64 f_val, f_lim, safe_val; 1541 u64 ftr_mask; 1542 1543 ftr_mask = arm64_ftr_mask(ftrp); 1544 if ((ftr_mask & writable_mask) != ftr_mask) 1545 continue; 1546 1547 f_val = arm64_ftr_value(ftrp, val); 1548 f_lim = arm64_ftr_value(ftrp, limit); 1549 mask |= ftr_mask; 1550 1551 if (f_val == f_lim) 1552 safe_val = f_val; 1553 else 1554 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim); 1555 1556 if (safe_val != f_val) 1557 return -E2BIG; 1558 } 1559 1560 /* For fields that are not writable, values in limit are the safe values. */ 1561 if ((val & ~mask) != (limit & ~mask)) 1562 return -E2BIG; 1563 1564 return 0; 1565 } 1566 pmuver_to_perfmon(u8 pmuver)1567 static u8 pmuver_to_perfmon(u8 pmuver) 1568 { 1569 switch (pmuver) { 1570 case ID_AA64DFR0_EL1_PMUVer_IMP: 1571 return ID_DFR0_EL1_PerfMon_PMUv3; 1572 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF: 1573 return ID_DFR0_EL1_PerfMon_IMPDEF; 1574 default: 1575 /* Anything ARMv8.1+ and NI have the same value. For now. */ 1576 return pmuver; 1577 } 1578 } 1579 1580 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val); 1581 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val); 1582 1583 /* Read a sanitised cpufeature ID register by sys_reg_desc */ __kvm_read_sanitised_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1584 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, 1585 const struct sys_reg_desc *r) 1586 { 1587 u32 id = reg_to_encoding(r); 1588 u64 val; 1589 1590 if (sysreg_visible_as_raz(vcpu, r)) 1591 return 0; 1592 1593 val = read_sanitised_ftr_reg(id); 1594 1595 switch (id) { 1596 case SYS_ID_AA64DFR0_EL1: 1597 val = sanitise_id_aa64dfr0_el1(vcpu, val); 1598 break; 1599 case SYS_ID_AA64PFR0_EL1: 1600 val = sanitise_id_aa64pfr0_el1(vcpu, val); 1601 break; 1602 case SYS_ID_AA64PFR1_EL1: 1603 if (!kvm_has_mte(vcpu->kvm)) 1604 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); 1605 1606 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); 1607 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap); 1608 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI); 1609 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac); 1610 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS); 1611 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE); 1612 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX); 1613 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2); 1614 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR); 1615 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac); 1616 break; 1617 case SYS_ID_AA64PFR2_EL1: 1618 /* We only expose FPMR */ 1619 val &= ID_AA64PFR2_EL1_FPMR; 1620 break; 1621 case SYS_ID_AA64ISAR1_EL1: 1622 if (!vcpu_has_ptrauth(vcpu)) 1623 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | 1624 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | 1625 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | 1626 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI)); 1627 break; 1628 case SYS_ID_AA64ISAR2_EL1: 1629 if (!vcpu_has_ptrauth(vcpu)) 1630 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | 1631 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); 1632 if (!cpus_have_final_cap(ARM64_HAS_WFXT) || 1633 has_broken_cntvoff()) 1634 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); 1635 break; 1636 case SYS_ID_AA64ISAR3_EL1: 1637 val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX; 1638 break; 1639 case SYS_ID_AA64MMFR2_EL1: 1640 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; 1641 val &= ~ID_AA64MMFR2_EL1_NV; 1642 break; 1643 case SYS_ID_AA64MMFR3_EL1: 1644 val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE | 1645 ID_AA64MMFR3_EL1_S1PIE; 1646 break; 1647 case SYS_ID_MMFR4_EL1: 1648 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); 1649 break; 1650 } 1651 1652 if (vcpu_has_nv(vcpu)) 1653 val = limit_nv_id_reg(vcpu->kvm, id, val); 1654 1655 return val; 1656 } 1657 kvm_read_sanitised_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1658 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu, 1659 const struct sys_reg_desc *r) 1660 { 1661 return __kvm_read_sanitised_id_reg(vcpu, r); 1662 } 1663 read_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1664 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 1665 { 1666 return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r)); 1667 } 1668 is_feature_id_reg(u32 encoding)1669 static bool is_feature_id_reg(u32 encoding) 1670 { 1671 return (sys_reg_Op0(encoding) == 3 && 1672 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) && 1673 sys_reg_CRn(encoding) == 0 && 1674 sys_reg_CRm(encoding) <= 7); 1675 } 1676 1677 /* 1678 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is 1679 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID 1680 * registers KVM maintains on a per-VM basis. 1681 * 1682 * Additionally, the implementation ID registers and CTR_EL0 are handled as 1683 * per-VM registers. 1684 */ is_vm_ftr_id_reg(u32 id)1685 static inline bool is_vm_ftr_id_reg(u32 id) 1686 { 1687 switch (id) { 1688 case SYS_CTR_EL0: 1689 case SYS_MIDR_EL1: 1690 case SYS_REVIDR_EL1: 1691 case SYS_AIDR_EL1: 1692 return true; 1693 default: 1694 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && 1695 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && 1696 sys_reg_CRm(id) < 8); 1697 1698 } 1699 } 1700 is_vcpu_ftr_id_reg(u32 id)1701 static inline bool is_vcpu_ftr_id_reg(u32 id) 1702 { 1703 return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id); 1704 } 1705 is_aa32_id_reg(u32 id)1706 static inline bool is_aa32_id_reg(u32 id) 1707 { 1708 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && 1709 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && 1710 sys_reg_CRm(id) <= 3); 1711 } 1712 id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1713 static unsigned int id_visibility(const struct kvm_vcpu *vcpu, 1714 const struct sys_reg_desc *r) 1715 { 1716 u32 id = reg_to_encoding(r); 1717 1718 switch (id) { 1719 case SYS_ID_AA64ZFR0_EL1: 1720 if (!vcpu_has_sve(vcpu)) 1721 return REG_RAZ; 1722 break; 1723 } 1724 1725 return 0; 1726 } 1727 aa32_id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1728 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu, 1729 const struct sys_reg_desc *r) 1730 { 1731 /* 1732 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any 1733 * EL. Promote to RAZ/WI in order to guarantee consistency between 1734 * systems. 1735 */ 1736 if (!kvm_supports_32bit_el0()) 1737 return REG_RAZ | REG_USER_WI; 1738 1739 return id_visibility(vcpu, r); 1740 } 1741 raz_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1742 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, 1743 const struct sys_reg_desc *r) 1744 { 1745 return REG_RAZ; 1746 } 1747 1748 /* cpufeature ID register access trap handlers */ 1749 access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1750 static bool access_id_reg(struct kvm_vcpu *vcpu, 1751 struct sys_reg_params *p, 1752 const struct sys_reg_desc *r) 1753 { 1754 if (p->is_write) 1755 return write_to_read_only(vcpu, p, r); 1756 1757 p->regval = read_id_reg(vcpu, r); 1758 1759 return true; 1760 } 1761 1762 /* Visibility overrides for SVE-specific control registers */ sve_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1763 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, 1764 const struct sys_reg_desc *rd) 1765 { 1766 if (vcpu_has_sve(vcpu)) 1767 return 0; 1768 1769 return REG_HIDDEN; 1770 } 1771 sme_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1772 static unsigned int sme_visibility(const struct kvm_vcpu *vcpu, 1773 const struct sys_reg_desc *rd) 1774 { 1775 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP)) 1776 return 0; 1777 1778 return REG_HIDDEN; 1779 } 1780 fp8_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1781 static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu, 1782 const struct sys_reg_desc *rd) 1783 { 1784 if (kvm_has_fpmr(vcpu->kvm)) 1785 return 0; 1786 1787 return REG_HIDDEN; 1788 } 1789 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu * vcpu,u64 val)1790 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val) 1791 { 1792 if (!vcpu_has_sve(vcpu)) 1793 val &= ~ID_AA64PFR0_EL1_SVE_MASK; 1794 1795 /* 1796 * The default is to expose CSV2 == 1 if the HW isn't affected. 1797 * Although this is a per-CPU feature, we make it global because 1798 * asymmetric systems are just a nuisance. 1799 * 1800 * Userspace can override this as long as it doesn't promise 1801 * the impossible. 1802 */ 1803 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) { 1804 val &= ~ID_AA64PFR0_EL1_CSV2_MASK; 1805 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP); 1806 } 1807 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) { 1808 val &= ~ID_AA64PFR0_EL1_CSV3_MASK; 1809 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP); 1810 } 1811 1812 if (kvm_vgic_global_state.type == VGIC_V3) { 1813 val &= ~ID_AA64PFR0_EL1_GIC_MASK; 1814 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP); 1815 } 1816 1817 val &= ~ID_AA64PFR0_EL1_AMU_MASK; 1818 1819 /* 1820 * MPAM is disabled by default as KVM also needs a set of PARTID to 1821 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some 1822 * older kernels let the guest see the ID bit. 1823 */ 1824 val &= ~ID_AA64PFR0_EL1_MPAM_MASK; 1825 1826 return val; 1827 } 1828 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu * vcpu,u64 val)1829 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) 1830 { 1831 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8); 1832 1833 /* 1834 * Only initialize the PMU version if the vCPU was configured with one. 1835 */ 1836 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; 1837 if (kvm_vcpu_has_pmu(vcpu)) 1838 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer, 1839 kvm_arm_pmu_get_pmuver_limit()); 1840 1841 /* Hide SPE from guests */ 1842 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK; 1843 1844 /* Hide BRBE from guests */ 1845 val &= ~ID_AA64DFR0_EL1_BRBE_MASK; 1846 1847 return val; 1848 } 1849 set_id_aa64dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1850 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, 1851 const struct sys_reg_desc *rd, 1852 u64 val) 1853 { 1854 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val); 1855 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); 1856 1857 /* 1858 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the 1859 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously 1860 * exposed an IMP_DEF PMU to userspace and the guest on systems w/ 1861 * non-architectural PMUs. Of course, PMUv3 is the only game in town for 1862 * PMU virtualization, so the IMP_DEF value was rather user-hostile. 1863 * 1864 * At minimum, we're on the hook to allow values that were given to 1865 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value 1866 * with a more sensible NI. The value of an ID register changing under 1867 * the nose of the guest is unfortunate, but is certainly no more 1868 * surprising than an ill-guided PMU driver poking at impdef system 1869 * registers that end in an UNDEF... 1870 */ 1871 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 1872 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; 1873 1874 /* 1875 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a 1876 * nonzero minimum safe value. 1877 */ 1878 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP) 1879 return -EINVAL; 1880 1881 return set_id_reg(vcpu, rd, val); 1882 } 1883 read_sanitised_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1884 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, 1885 const struct sys_reg_desc *rd) 1886 { 1887 u8 perfmon; 1888 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1); 1889 1890 val &= ~ID_DFR0_EL1_PerfMon_MASK; 1891 if (kvm_vcpu_has_pmu(vcpu)) { 1892 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit()); 1893 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon); 1894 } 1895 1896 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8); 1897 1898 return val; 1899 } 1900 set_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1901 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, 1902 const struct sys_reg_desc *rd, 1903 u64 val) 1904 { 1905 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val); 1906 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val); 1907 1908 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) { 1909 val &= ~ID_DFR0_EL1_PerfMon_MASK; 1910 perfmon = 0; 1911 } 1912 1913 /* 1914 * Allow DFR0_EL1.PerfMon to be set from userspace as long as 1915 * it doesn't promise more than what the HW gives us on the 1916 * AArch64 side (as everything is emulated with that), and 1917 * that this is a PMUv3. 1918 */ 1919 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3) 1920 return -EINVAL; 1921 1922 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8) 1923 return -EINVAL; 1924 1925 return set_id_reg(vcpu, rd, val); 1926 } 1927 set_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)1928 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, 1929 const struct sys_reg_desc *rd, u64 user_val) 1930 { 1931 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 1932 u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK; 1933 1934 /* 1935 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits 1936 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to 1937 * guests, but didn't add trap handling. KVM doesn't support MPAM and 1938 * always returns an UNDEF for these registers. The guest must see 0 1939 * for this field. 1940 * 1941 * But KVM must also accept values from user-space that were provided 1942 * by KVM. On CPUs that support MPAM, permit user-space to write 1943 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field. 1944 */ 1945 if ((hw_val & mpam_mask) == (user_val & mpam_mask)) 1946 user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK; 1947 1948 return set_id_reg(vcpu, rd, user_val); 1949 } 1950 set_id_aa64pfr1_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)1951 static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, 1952 const struct sys_reg_desc *rd, u64 user_val) 1953 { 1954 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); 1955 u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK; 1956 1957 /* See set_id_aa64pfr0_el1 for comment about MPAM */ 1958 if ((hw_val & mpam_mask) == (user_val & mpam_mask)) 1959 user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; 1960 1961 return set_id_reg(vcpu, rd, user_val); 1962 } 1963 set_id_aa64mmfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)1964 static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu, 1965 const struct sys_reg_desc *rd, u64 user_val) 1966 { 1967 u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd); 1968 u64 tgran2_mask = ID_AA64MMFR0_EL1_TGRAN4_2_MASK | 1969 ID_AA64MMFR0_EL1_TGRAN16_2_MASK | 1970 ID_AA64MMFR0_EL1_TGRAN64_2_MASK; 1971 1972 if (vcpu_has_nv(vcpu) && 1973 ((sanitized_val & tgran2_mask) != (user_val & tgran2_mask))) 1974 return -EINVAL; 1975 1976 return set_id_reg(vcpu, rd, user_val); 1977 } 1978 set_id_aa64mmfr2_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)1979 static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu, 1980 const struct sys_reg_desc *rd, u64 user_val) 1981 { 1982 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); 1983 u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK; 1984 1985 /* 1986 * We made the mistake to expose the now deprecated NV field, 1987 * so allow userspace to write it, but silently ignore it. 1988 */ 1989 if ((hw_val & nv_mask) == (user_val & nv_mask)) 1990 user_val &= ~nv_mask; 1991 1992 return set_id_reg(vcpu, rd, user_val); 1993 } 1994 set_ctr_el0(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)1995 static int set_ctr_el0(struct kvm_vcpu *vcpu, 1996 const struct sys_reg_desc *rd, u64 user_val) 1997 { 1998 u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val); 1999 2000 /* 2001 * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved. 2002 * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based 2003 * on what hardware reports. 2004 * 2005 * Using a VIPT software model on PIPT will lead to over invalidation, 2006 * but still correct. Hence, we can allow downgrading PIPT to VIPT, 2007 * but not the other way around. This is handled via arm64_ftr_safe_value() 2008 * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value 2009 * set as VIPT. 2010 */ 2011 switch (user_L1Ip) { 2012 case CTR_EL0_L1Ip_RESERVED_VPIPT: 2013 case CTR_EL0_L1Ip_RESERVED_AIVIVT: 2014 return -EINVAL; 2015 case CTR_EL0_L1Ip_VIPT: 2016 case CTR_EL0_L1Ip_PIPT: 2017 return set_id_reg(vcpu, rd, user_val); 2018 default: 2019 return -ENOENT; 2020 } 2021 } 2022 2023 /* 2024 * cpufeature ID register user accessors 2025 * 2026 * For now, these registers are immutable for userspace, so no values 2027 * are stored, and for set_id_reg() we don't allow the effective value 2028 * to be changed. 2029 */ get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)2030 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2031 u64 *val) 2032 { 2033 /* 2034 * Avoid locking if the VM has already started, as the ID registers are 2035 * guaranteed to be invariant at that point. 2036 */ 2037 if (kvm_vm_has_ran_once(vcpu->kvm)) { 2038 *val = read_id_reg(vcpu, rd); 2039 return 0; 2040 } 2041 2042 mutex_lock(&vcpu->kvm->arch.config_lock); 2043 *val = read_id_reg(vcpu, rd); 2044 mutex_unlock(&vcpu->kvm->arch.config_lock); 2045 2046 return 0; 2047 } 2048 set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2049 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2050 u64 val) 2051 { 2052 u32 id = reg_to_encoding(rd); 2053 int ret; 2054 2055 mutex_lock(&vcpu->kvm->arch.config_lock); 2056 2057 /* 2058 * Once the VM has started the ID registers are immutable. Reject any 2059 * write that does not match the final register value. 2060 */ 2061 if (kvm_vm_has_ran_once(vcpu->kvm)) { 2062 if (val != read_id_reg(vcpu, rd)) 2063 ret = -EBUSY; 2064 else 2065 ret = 0; 2066 2067 mutex_unlock(&vcpu->kvm->arch.config_lock); 2068 return ret; 2069 } 2070 2071 ret = arm64_check_features(vcpu, rd, val); 2072 if (!ret) 2073 kvm_set_vm_id_reg(vcpu->kvm, id, val); 2074 2075 mutex_unlock(&vcpu->kvm->arch.config_lock); 2076 2077 /* 2078 * arm64_check_features() returns -E2BIG to indicate the register's 2079 * feature set is a superset of the maximally-allowed register value. 2080 * While it would be nice to precisely describe this to userspace, the 2081 * existing UAPI for KVM_SET_ONE_REG has it that invalid register 2082 * writes return -EINVAL. 2083 */ 2084 if (ret == -E2BIG) 2085 ret = -EINVAL; 2086 return ret; 2087 } 2088 kvm_set_vm_id_reg(struct kvm * kvm,u32 reg,u64 val)2089 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val) 2090 { 2091 u64 *p = __vm_id_reg(&kvm->arch, reg); 2092 2093 lockdep_assert_held(&kvm->arch.config_lock); 2094 2095 if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm)) 2096 return; 2097 2098 *p = val; 2099 } 2100 get_raz_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)2101 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2102 u64 *val) 2103 { 2104 *val = 0; 2105 return 0; 2106 } 2107 set_wi_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2108 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2109 u64 val) 2110 { 2111 return 0; 2112 } 2113 access_ctr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2114 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2115 const struct sys_reg_desc *r) 2116 { 2117 if (p->is_write) 2118 return write_to_read_only(vcpu, p, r); 2119 2120 p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0); 2121 return true; 2122 } 2123 access_clidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2124 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2125 const struct sys_reg_desc *r) 2126 { 2127 if (p->is_write) 2128 return write_to_read_only(vcpu, p, r); 2129 2130 p->regval = __vcpu_sys_reg(vcpu, r->reg); 2131 return true; 2132 } 2133 2134 /* 2135 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary 2136 * by the physical CPU which the vcpu currently resides in. 2137 */ reset_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2138 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 2139 { 2140 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); 2141 u64 clidr; 2142 u8 loc; 2143 2144 if ((ctr_el0 & CTR_EL0_IDC)) { 2145 /* 2146 * Data cache clean to the PoU is not required so LoUU and LoUIS 2147 * will not be set and a unified cache, which will be marked as 2148 * LoC, will be added. 2149 * 2150 * If not DIC, let the unified cache L2 so that an instruction 2151 * cache can be added as L1 later. 2152 */ 2153 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2; 2154 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc); 2155 } else { 2156 /* 2157 * Data cache clean to the PoU is required so let L1 have a data 2158 * cache and mark it as LoUU and LoUIS. As L1 has a data cache, 2159 * it can be marked as LoC too. 2160 */ 2161 loc = 1; 2162 clidr = 1 << CLIDR_LOUU_SHIFT; 2163 clidr |= 1 << CLIDR_LOUIS_SHIFT; 2164 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1); 2165 } 2166 2167 /* 2168 * Instruction cache invalidation to the PoU is required so let L1 have 2169 * an instruction cache. If L1 already has a data cache, it will be 2170 * CACHE_TYPE_SEPARATE. 2171 */ 2172 if (!(ctr_el0 & CTR_EL0_DIC)) 2173 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1); 2174 2175 clidr |= loc << CLIDR_LOC_SHIFT; 2176 2177 /* 2178 * Add tag cache unified to data cache. Allocation tags and data are 2179 * unified in a cache line so that it looks valid even if there is only 2180 * one cache line. 2181 */ 2182 if (kvm_has_mte(vcpu->kvm)) 2183 clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc); 2184 2185 __vcpu_sys_reg(vcpu, r->reg) = clidr; 2186 2187 return __vcpu_sys_reg(vcpu, r->reg); 2188 } 2189 set_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2190 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 2191 u64 val) 2192 { 2193 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); 2194 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val)); 2195 2196 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc)) 2197 return -EINVAL; 2198 2199 __vcpu_sys_reg(vcpu, rd->reg) = val; 2200 2201 return 0; 2202 } 2203 access_csselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2204 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2205 const struct sys_reg_desc *r) 2206 { 2207 int reg = r->reg; 2208 2209 if (p->is_write) 2210 vcpu_write_sys_reg(vcpu, p->regval, reg); 2211 else 2212 p->regval = vcpu_read_sys_reg(vcpu, reg); 2213 return true; 2214 } 2215 access_ccsidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2216 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 2217 const struct sys_reg_desc *r) 2218 { 2219 u32 csselr; 2220 2221 if (p->is_write) 2222 return write_to_read_only(vcpu, p, r); 2223 2224 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1); 2225 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD; 2226 if (csselr < CSSELR_MAX) 2227 p->regval = get_ccsidr(vcpu, csselr); 2228 2229 return true; 2230 } 2231 mte_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2232 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, 2233 const struct sys_reg_desc *rd) 2234 { 2235 if (kvm_has_mte(vcpu->kvm)) 2236 return 0; 2237 2238 return REG_HIDDEN; 2239 } 2240 2241 #define MTE_REG(name) { \ 2242 SYS_DESC(SYS_##name), \ 2243 .access = undef_access, \ 2244 .reset = reset_unknown, \ 2245 .reg = name, \ 2246 .visibility = mte_visibility, \ 2247 } 2248 el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2249 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu, 2250 const struct sys_reg_desc *rd) 2251 { 2252 if (vcpu_has_nv(vcpu)) 2253 return 0; 2254 2255 return REG_HIDDEN; 2256 } 2257 bad_vncr_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2258 static bool bad_vncr_trap(struct kvm_vcpu *vcpu, 2259 struct sys_reg_params *p, 2260 const struct sys_reg_desc *r) 2261 { 2262 /* 2263 * We really shouldn't be here, and this is likely the result 2264 * of a misconfigured trap, as this register should target the 2265 * VNCR page, and nothing else. 2266 */ 2267 return bad_trap(vcpu, p, r, 2268 "trap of VNCR-backed register"); 2269 } 2270 bad_redir_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2271 static bool bad_redir_trap(struct kvm_vcpu *vcpu, 2272 struct sys_reg_params *p, 2273 const struct sys_reg_desc *r) 2274 { 2275 /* 2276 * We really shouldn't be here, and this is likely the result 2277 * of a misconfigured trap, as this register should target the 2278 * corresponding EL1, and nothing else. 2279 */ 2280 return bad_trap(vcpu, p, r, 2281 "trap of EL2 register redirected to EL1"); 2282 } 2283 2284 #define EL2_REG(name, acc, rst, v) { \ 2285 SYS_DESC(SYS_##name), \ 2286 .access = acc, \ 2287 .reset = rst, \ 2288 .reg = name, \ 2289 .visibility = el2_visibility, \ 2290 .val = v, \ 2291 } 2292 2293 #define EL2_REG_FILTERED(name, acc, rst, v, filter) { \ 2294 SYS_DESC(SYS_##name), \ 2295 .access = acc, \ 2296 .reset = rst, \ 2297 .reg = name, \ 2298 .visibility = filter, \ 2299 .val = v, \ 2300 } 2301 2302 #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v) 2303 #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v) 2304 2305 /* 2306 * Since reset() callback and field val are not used for idregs, they will be 2307 * used for specific purposes for idregs. 2308 * The reset() would return KVM sanitised register value. The value would be the 2309 * same as the host kernel sanitised value if there is no KVM sanitisation. 2310 * The val would be used as a mask indicating writable fields for the idreg. 2311 * Only bits with 1 are writable from userspace. This mask might not be 2312 * necessary in the future whenever all ID registers are enabled as writable 2313 * from userspace. 2314 */ 2315 2316 #define ID_DESC_DEFAULT_CALLBACKS \ 2317 .access = access_id_reg, \ 2318 .get_user = get_id_reg, \ 2319 .set_user = set_id_reg, \ 2320 .visibility = id_visibility, \ 2321 .reset = kvm_read_sanitised_id_reg 2322 2323 #define ID_DESC(name) \ 2324 SYS_DESC(SYS_##name), \ 2325 ID_DESC_DEFAULT_CALLBACKS 2326 2327 /* sys_reg_desc initialiser for known cpufeature ID registers */ 2328 #define ID_SANITISED(name) { \ 2329 ID_DESC(name), \ 2330 .val = 0, \ 2331 } 2332 2333 /* sys_reg_desc initialiser for known cpufeature ID registers */ 2334 #define AA32_ID_SANITISED(name) { \ 2335 ID_DESC(name), \ 2336 .visibility = aa32_id_visibility, \ 2337 .val = 0, \ 2338 } 2339 2340 /* sys_reg_desc initialiser for writable ID registers */ 2341 #define ID_WRITABLE(name, mask) { \ 2342 ID_DESC(name), \ 2343 .val = mask, \ 2344 } 2345 2346 /* sys_reg_desc initialiser for cpufeature ID registers that need filtering */ 2347 #define ID_FILTERED(sysreg, name, mask) { \ 2348 ID_DESC(sysreg), \ 2349 .set_user = set_##name, \ 2350 .val = (mask), \ 2351 } 2352 2353 /* 2354 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID 2355 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 2356 * (1 <= crm < 8, 0 <= Op2 < 8). 2357 */ 2358 #define ID_UNALLOCATED(crm, op2) { \ 2359 .name = "S3_0_0_" #crm "_" #op2, \ 2360 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ 2361 ID_DESC_DEFAULT_CALLBACKS, \ 2362 .visibility = raz_visibility, \ 2363 .val = 0, \ 2364 } 2365 2366 /* 2367 * sys_reg_desc initialiser for known ID registers that we hide from guests. 2368 * For now, these are exposed just like unallocated ID regs: they appear 2369 * RAZ for the guest. 2370 */ 2371 #define ID_HIDDEN(name) { \ 2372 ID_DESC(name), \ 2373 .visibility = raz_visibility, \ 2374 .val = 0, \ 2375 } 2376 access_sp_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2377 static bool access_sp_el1(struct kvm_vcpu *vcpu, 2378 struct sys_reg_params *p, 2379 const struct sys_reg_desc *r) 2380 { 2381 if (p->is_write) 2382 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval; 2383 else 2384 p->regval = __vcpu_sys_reg(vcpu, SP_EL1); 2385 2386 return true; 2387 } 2388 access_elr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2389 static bool access_elr(struct kvm_vcpu *vcpu, 2390 struct sys_reg_params *p, 2391 const struct sys_reg_desc *r) 2392 { 2393 if (p->is_write) 2394 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1); 2395 else 2396 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1); 2397 2398 return true; 2399 } 2400 access_spsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2401 static bool access_spsr(struct kvm_vcpu *vcpu, 2402 struct sys_reg_params *p, 2403 const struct sys_reg_desc *r) 2404 { 2405 if (p->is_write) 2406 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval; 2407 else 2408 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); 2409 2410 return true; 2411 } 2412 access_cntkctl_el12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2413 static bool access_cntkctl_el12(struct kvm_vcpu *vcpu, 2414 struct sys_reg_params *p, 2415 const struct sys_reg_desc *r) 2416 { 2417 if (p->is_write) 2418 __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval; 2419 else 2420 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); 2421 2422 return true; 2423 } 2424 reset_hcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2425 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 2426 { 2427 u64 val = r->val; 2428 2429 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1)) 2430 val |= HCR_E2H; 2431 2432 return __vcpu_sys_reg(vcpu, r->reg) = val; 2433 } 2434 __el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,unsigned int (* fn)(const struct kvm_vcpu *,const struct sys_reg_desc *))2435 static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu, 2436 const struct sys_reg_desc *rd, 2437 unsigned int (*fn)(const struct kvm_vcpu *, 2438 const struct sys_reg_desc *)) 2439 { 2440 return el2_visibility(vcpu, rd) ?: fn(vcpu, rd); 2441 } 2442 sve_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2443 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu, 2444 const struct sys_reg_desc *rd) 2445 { 2446 return __el2_visibility(vcpu, rd, sve_visibility); 2447 } 2448 access_zcr_el2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2449 static bool access_zcr_el2(struct kvm_vcpu *vcpu, 2450 struct sys_reg_params *p, 2451 const struct sys_reg_desc *r) 2452 { 2453 unsigned int vq; 2454 2455 if (guest_hyp_sve_traps_enabled(vcpu)) { 2456 kvm_inject_nested_sve_trap(vcpu); 2457 return true; 2458 } 2459 2460 if (!p->is_write) { 2461 p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2); 2462 return true; 2463 } 2464 2465 vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1; 2466 vq = min(vq, vcpu_sve_max_vq(vcpu)); 2467 vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2); 2468 2469 return true; 2470 } 2471 access_gic_vtr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2472 static bool access_gic_vtr(struct kvm_vcpu *vcpu, 2473 struct sys_reg_params *p, 2474 const struct sys_reg_desc *r) 2475 { 2476 if (p->is_write) 2477 return write_to_read_only(vcpu, p, r); 2478 2479 p->regval = kvm_vgic_global_state.ich_vtr_el2; 2480 p->regval &= ~(ICH_VTR_EL2_DVIM | 2481 ICH_VTR_EL2_A3V | 2482 ICH_VTR_EL2_IDbits); 2483 p->regval |= ICH_VTR_EL2_nV4; 2484 2485 return true; 2486 } 2487 access_gic_misr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2488 static bool access_gic_misr(struct kvm_vcpu *vcpu, 2489 struct sys_reg_params *p, 2490 const struct sys_reg_desc *r) 2491 { 2492 if (p->is_write) 2493 return write_to_read_only(vcpu, p, r); 2494 2495 p->regval = vgic_v3_get_misr(vcpu); 2496 2497 return true; 2498 } 2499 access_gic_eisr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2500 static bool access_gic_eisr(struct kvm_vcpu *vcpu, 2501 struct sys_reg_params *p, 2502 const struct sys_reg_desc *r) 2503 { 2504 if (p->is_write) 2505 return write_to_read_only(vcpu, p, r); 2506 2507 p->regval = vgic_v3_get_eisr(vcpu); 2508 2509 return true; 2510 } 2511 access_gic_elrsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2512 static bool access_gic_elrsr(struct kvm_vcpu *vcpu, 2513 struct sys_reg_params *p, 2514 const struct sys_reg_desc *r) 2515 { 2516 if (p->is_write) 2517 return write_to_read_only(vcpu, p, r); 2518 2519 p->regval = vgic_v3_get_elrsr(vcpu); 2520 2521 return true; 2522 } 2523 s1poe_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2524 static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu, 2525 const struct sys_reg_desc *rd) 2526 { 2527 if (kvm_has_s1poe(vcpu->kvm)) 2528 return 0; 2529 2530 return REG_HIDDEN; 2531 } 2532 s1poe_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2533 static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu, 2534 const struct sys_reg_desc *rd) 2535 { 2536 return __el2_visibility(vcpu, rd, s1poe_visibility); 2537 } 2538 tcr2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2539 static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu, 2540 const struct sys_reg_desc *rd) 2541 { 2542 if (kvm_has_tcr2(vcpu->kvm)) 2543 return 0; 2544 2545 return REG_HIDDEN; 2546 } 2547 tcr2_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2548 static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu, 2549 const struct sys_reg_desc *rd) 2550 { 2551 return __el2_visibility(vcpu, rd, tcr2_visibility); 2552 } 2553 s1pie_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2554 static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu, 2555 const struct sys_reg_desc *rd) 2556 { 2557 if (kvm_has_s1pie(vcpu->kvm)) 2558 return 0; 2559 2560 return REG_HIDDEN; 2561 } 2562 s1pie_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2563 static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu, 2564 const struct sys_reg_desc *rd) 2565 { 2566 return __el2_visibility(vcpu, rd, s1pie_visibility); 2567 } 2568 access_mdcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2569 static bool access_mdcr(struct kvm_vcpu *vcpu, 2570 struct sys_reg_params *p, 2571 const struct sys_reg_desc *r) 2572 { 2573 u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2); 2574 2575 if (!access_rw(vcpu, p, r)) 2576 return false; 2577 2578 /* 2579 * Request a reload of the PMU to enable/disable the counters affected 2580 * by HPME. 2581 */ 2582 if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME) 2583 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 2584 2585 return true; 2586 } 2587 2588 /* 2589 * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and 2590 * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them. 2591 * The values made visible to userspace were the register values of the boot 2592 * CPU. 2593 * 2594 * At the same time, reads from these registers at EL1 previously were not 2595 * trapped, allowing the guest to read the actual hardware value. On big-little 2596 * machines, this means the VM can see different values depending on where a 2597 * given vCPU got scheduled. 2598 * 2599 * These registers are now trapped as collateral damage from SME, and what 2600 * follows attempts to give a user / guest view consistent with the existing 2601 * ABI. 2602 */ access_imp_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2603 static bool access_imp_id_reg(struct kvm_vcpu *vcpu, 2604 struct sys_reg_params *p, 2605 const struct sys_reg_desc *r) 2606 { 2607 if (p->is_write) 2608 return write_to_read_only(vcpu, p, r); 2609 2610 /* 2611 * Return the VM-scoped implementation ID register values if userspace 2612 * has made them writable. 2613 */ 2614 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags)) 2615 return access_id_reg(vcpu, p, r); 2616 2617 /* 2618 * Otherwise, fall back to the old behavior of returning the value of 2619 * the current CPU. 2620 */ 2621 switch (reg_to_encoding(r)) { 2622 case SYS_REVIDR_EL1: 2623 p->regval = read_sysreg(revidr_el1); 2624 break; 2625 case SYS_AIDR_EL1: 2626 p->regval = read_sysreg(aidr_el1); 2627 break; 2628 default: 2629 WARN_ON_ONCE(1); 2630 } 2631 2632 return true; 2633 } 2634 2635 static u64 __ro_after_init boot_cpu_midr_val; 2636 static u64 __ro_after_init boot_cpu_revidr_val; 2637 static u64 __ro_after_init boot_cpu_aidr_val; 2638 init_imp_id_regs(void)2639 static void init_imp_id_regs(void) 2640 { 2641 boot_cpu_midr_val = read_sysreg(midr_el1); 2642 boot_cpu_revidr_val = read_sysreg(revidr_el1); 2643 boot_cpu_aidr_val = read_sysreg(aidr_el1); 2644 } 2645 reset_imp_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2646 static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 2647 { 2648 switch (reg_to_encoding(r)) { 2649 case SYS_MIDR_EL1: 2650 return boot_cpu_midr_val; 2651 case SYS_REVIDR_EL1: 2652 return boot_cpu_revidr_val; 2653 case SYS_AIDR_EL1: 2654 return boot_cpu_aidr_val; 2655 default: 2656 KVM_BUG_ON(1, vcpu->kvm); 2657 return 0; 2658 } 2659 } 2660 set_imp_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)2661 static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 2662 u64 val) 2663 { 2664 struct kvm *kvm = vcpu->kvm; 2665 u64 expected; 2666 2667 guard(mutex)(&kvm->arch.config_lock); 2668 2669 expected = read_id_reg(vcpu, r); 2670 if (expected == val) 2671 return 0; 2672 2673 if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)) 2674 return -EINVAL; 2675 2676 /* 2677 * Once the VM has started the ID registers are immutable. Reject the 2678 * write if userspace tries to change it. 2679 */ 2680 if (kvm_vm_has_ran_once(kvm)) 2681 return -EBUSY; 2682 2683 /* 2684 * Any value is allowed for the implementation ID registers so long as 2685 * it is within the writable mask. 2686 */ 2687 if ((val & r->val) != val) 2688 return -EINVAL; 2689 2690 kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val); 2691 return 0; 2692 } 2693 2694 #define IMPLEMENTATION_ID(reg, mask) { \ 2695 SYS_DESC(SYS_##reg), \ 2696 .access = access_imp_id_reg, \ 2697 .get_user = get_id_reg, \ 2698 .set_user = set_imp_id_reg, \ 2699 .reset = reset_imp_id_reg, \ 2700 .val = mask, \ 2701 } 2702 2703 /* 2704 * Architected system registers. 2705 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 2706 * 2707 * Debug handling: We do trap most, if not all debug related system 2708 * registers. The implementation is good enough to ensure that a guest 2709 * can use these with minimal performance degradation. The drawback is 2710 * that we don't implement any of the external debug architecture. 2711 * This should be revisited if we ever encounter a more demanding 2712 * guest... 2713 */ 2714 static const struct sys_reg_desc sys_reg_descs[] = { 2715 DBG_BCR_BVR_WCR_WVR_EL1(0), 2716 DBG_BCR_BVR_WCR_WVR_EL1(1), 2717 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 2718 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 2719 DBG_BCR_BVR_WCR_WVR_EL1(2), 2720 DBG_BCR_BVR_WCR_WVR_EL1(3), 2721 DBG_BCR_BVR_WCR_WVR_EL1(4), 2722 DBG_BCR_BVR_WCR_WVR_EL1(5), 2723 DBG_BCR_BVR_WCR_WVR_EL1(6), 2724 DBG_BCR_BVR_WCR_WVR_EL1(7), 2725 DBG_BCR_BVR_WCR_WVR_EL1(8), 2726 DBG_BCR_BVR_WCR_WVR_EL1(9), 2727 DBG_BCR_BVR_WCR_WVR_EL1(10), 2728 DBG_BCR_BVR_WCR_WVR_EL1(11), 2729 DBG_BCR_BVR_WCR_WVR_EL1(12), 2730 DBG_BCR_BVR_WCR_WVR_EL1(13), 2731 DBG_BCR_BVR_WCR_WVR_EL1(14), 2732 DBG_BCR_BVR_WCR_WVR_EL1(15), 2733 2734 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, 2735 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 }, 2736 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1, 2737 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, 2738 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, 2739 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, 2740 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, 2741 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi }, 2742 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 }, 2743 2744 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi }, 2745 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi }, 2746 // DBGDTR[TR]X_EL0 share the same encoding 2747 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, 2748 2749 { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 }, 2750 2751 IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)), 2752 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, 2753 IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)), 2754 2755 /* 2756 * ID regs: all ID_SANITISED() entries here must have corresponding 2757 * entries in arm64_ftr_regs[]. 2758 */ 2759 2760 /* AArch64 mappings of the AArch32 ID registers */ 2761 /* CRm=1 */ 2762 AA32_ID_SANITISED(ID_PFR0_EL1), 2763 AA32_ID_SANITISED(ID_PFR1_EL1), 2764 { SYS_DESC(SYS_ID_DFR0_EL1), 2765 .access = access_id_reg, 2766 .get_user = get_id_reg, 2767 .set_user = set_id_dfr0_el1, 2768 .visibility = aa32_id_visibility, 2769 .reset = read_sanitised_id_dfr0_el1, 2770 .val = ID_DFR0_EL1_PerfMon_MASK | 2771 ID_DFR0_EL1_CopDbg_MASK, }, 2772 ID_HIDDEN(ID_AFR0_EL1), 2773 AA32_ID_SANITISED(ID_MMFR0_EL1), 2774 AA32_ID_SANITISED(ID_MMFR1_EL1), 2775 AA32_ID_SANITISED(ID_MMFR2_EL1), 2776 AA32_ID_SANITISED(ID_MMFR3_EL1), 2777 2778 /* CRm=2 */ 2779 AA32_ID_SANITISED(ID_ISAR0_EL1), 2780 AA32_ID_SANITISED(ID_ISAR1_EL1), 2781 AA32_ID_SANITISED(ID_ISAR2_EL1), 2782 AA32_ID_SANITISED(ID_ISAR3_EL1), 2783 AA32_ID_SANITISED(ID_ISAR4_EL1), 2784 AA32_ID_SANITISED(ID_ISAR5_EL1), 2785 AA32_ID_SANITISED(ID_MMFR4_EL1), 2786 AA32_ID_SANITISED(ID_ISAR6_EL1), 2787 2788 /* CRm=3 */ 2789 AA32_ID_SANITISED(MVFR0_EL1), 2790 AA32_ID_SANITISED(MVFR1_EL1), 2791 AA32_ID_SANITISED(MVFR2_EL1), 2792 ID_UNALLOCATED(3,3), 2793 AA32_ID_SANITISED(ID_PFR2_EL1), 2794 ID_HIDDEN(ID_DFR1_EL1), 2795 AA32_ID_SANITISED(ID_MMFR5_EL1), 2796 ID_UNALLOCATED(3,7), 2797 2798 /* AArch64 ID registers */ 2799 /* CRm=4 */ 2800 ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1, 2801 ~(ID_AA64PFR0_EL1_AMU | 2802 ID_AA64PFR0_EL1_MPAM | 2803 ID_AA64PFR0_EL1_SVE | 2804 ID_AA64PFR0_EL1_RAS | 2805 ID_AA64PFR0_EL1_AdvSIMD | 2806 ID_AA64PFR0_EL1_FP)), 2807 ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1, 2808 ~(ID_AA64PFR1_EL1_PFAR | 2809 ID_AA64PFR1_EL1_DF2 | 2810 ID_AA64PFR1_EL1_MTEX | 2811 ID_AA64PFR1_EL1_THE | 2812 ID_AA64PFR1_EL1_GCS | 2813 ID_AA64PFR1_EL1_MTE_frac | 2814 ID_AA64PFR1_EL1_NMI | 2815 ID_AA64PFR1_EL1_RNDR_trap | 2816 ID_AA64PFR1_EL1_SME | 2817 ID_AA64PFR1_EL1_RES0 | 2818 ID_AA64PFR1_EL1_MPAM_frac | 2819 ID_AA64PFR1_EL1_RAS_frac | 2820 ID_AA64PFR1_EL1_MTE)), 2821 ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), 2822 ID_UNALLOCATED(4,3), 2823 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), 2824 ID_HIDDEN(ID_AA64SMFR0_EL1), 2825 ID_UNALLOCATED(4,6), 2826 ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0), 2827 2828 /* CRm=5 */ 2829 /* 2830 * Prior to FEAT_Debugv8.9, the architecture defines context-aware 2831 * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs). 2832 * KVM does not trap + emulate the breakpoint registers, and as such 2833 * cannot support a layout that misaligns with the underlying hardware. 2834 * While it may be possible to describe a subset that aligns with 2835 * hardware, just prevent changes to BRPs and CTX_CMPs altogether for 2836 * simplicity. 2837 * 2838 * See DDI0487K.a, section D2.8.3 Breakpoint types and linking 2839 * of breakpoints for more details. 2840 */ 2841 ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1, 2842 ID_AA64DFR0_EL1_DoubleLock_MASK | 2843 ID_AA64DFR0_EL1_WRPs_MASK | 2844 ID_AA64DFR0_EL1_PMUVer_MASK | 2845 ID_AA64DFR0_EL1_DebugVer_MASK), 2846 ID_SANITISED(ID_AA64DFR1_EL1), 2847 ID_UNALLOCATED(5,2), 2848 ID_UNALLOCATED(5,3), 2849 ID_HIDDEN(ID_AA64AFR0_EL1), 2850 ID_HIDDEN(ID_AA64AFR1_EL1), 2851 ID_UNALLOCATED(5,6), 2852 ID_UNALLOCATED(5,7), 2853 2854 /* CRm=6 */ 2855 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0), 2856 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI | 2857 ID_AA64ISAR1_EL1_GPA | 2858 ID_AA64ISAR1_EL1_API | 2859 ID_AA64ISAR1_EL1_APA)), 2860 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 | 2861 ID_AA64ISAR2_EL1_APA3 | 2862 ID_AA64ISAR2_EL1_GPA3)), 2863 ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT | 2864 ID_AA64ISAR3_EL1_FAMINMAX)), 2865 ID_UNALLOCATED(6,4), 2866 ID_UNALLOCATED(6,5), 2867 ID_UNALLOCATED(6,6), 2868 ID_UNALLOCATED(6,7), 2869 2870 /* CRm=7 */ 2871 ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1, 2872 ~(ID_AA64MMFR0_EL1_RES0 | 2873 ID_AA64MMFR0_EL1_ASIDBITS)), 2874 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 | 2875 ID_AA64MMFR1_EL1_HCX | 2876 ID_AA64MMFR1_EL1_TWED | 2877 ID_AA64MMFR1_EL1_XNX | 2878 ID_AA64MMFR1_EL1_VH | 2879 ID_AA64MMFR1_EL1_VMIDBits)), 2880 ID_FILTERED(ID_AA64MMFR2_EL1, 2881 id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 | 2882 ID_AA64MMFR2_EL1_EVT | 2883 ID_AA64MMFR2_EL1_FWB | 2884 ID_AA64MMFR2_EL1_IDS | 2885 ID_AA64MMFR2_EL1_NV | 2886 ID_AA64MMFR2_EL1_CCIDX)), 2887 ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX | 2888 ID_AA64MMFR3_EL1_S1PIE | 2889 ID_AA64MMFR3_EL1_S1POE)), 2890 ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac), 2891 ID_UNALLOCATED(7,5), 2892 ID_UNALLOCATED(7,6), 2893 ID_UNALLOCATED(7,7), 2894 2895 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 2896 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, 2897 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, 2898 2899 MTE_REG(RGSR_EL1), 2900 MTE_REG(GCR_EL1), 2901 2902 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, 2903 { SYS_DESC(SYS_TRFCR_EL1), undef_access }, 2904 { SYS_DESC(SYS_SMPRI_EL1), undef_access }, 2905 { SYS_DESC(SYS_SMCR_EL1), undef_access }, 2906 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, 2907 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, 2908 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, 2909 { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0, 2910 .visibility = tcr2_visibility }, 2911 2912 PTRAUTH_KEY(APIA), 2913 PTRAUTH_KEY(APIB), 2914 PTRAUTH_KEY(APDA), 2915 PTRAUTH_KEY(APDB), 2916 PTRAUTH_KEY(APGA), 2917 2918 { SYS_DESC(SYS_SPSR_EL1), access_spsr}, 2919 { SYS_DESC(SYS_ELR_EL1), access_elr}, 2920 2921 { SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, 2922 2923 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, 2924 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, 2925 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, 2926 2927 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi }, 2928 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi }, 2929 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi }, 2930 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi }, 2931 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi }, 2932 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi }, 2933 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, 2934 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, 2935 2936 MTE_REG(TFSR_EL1), 2937 MTE_REG(TFSRE0_EL1), 2938 2939 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, 2940 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, 2941 2942 { SYS_DESC(SYS_PMSCR_EL1), undef_access }, 2943 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access }, 2944 { SYS_DESC(SYS_PMSICR_EL1), undef_access }, 2945 { SYS_DESC(SYS_PMSIRR_EL1), undef_access }, 2946 { SYS_DESC(SYS_PMSFCR_EL1), undef_access }, 2947 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access }, 2948 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access }, 2949 { SYS_DESC(SYS_PMSIDR_EL1), undef_access }, 2950 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access }, 2951 { SYS_DESC(SYS_PMBPTR_EL1), undef_access }, 2952 { SYS_DESC(SYS_PMBSR_EL1), undef_access }, 2953 /* PMBIDR_EL1 is not trapped */ 2954 2955 { PMU_SYS_REG(PMINTENSET_EL1), 2956 .access = access_pminten, .reg = PMINTENSET_EL1, 2957 .get_user = get_pmreg, .set_user = set_pmreg }, 2958 { PMU_SYS_REG(PMINTENCLR_EL1), 2959 .access = access_pminten, .reg = PMINTENSET_EL1, 2960 .get_user = get_pmreg, .set_user = set_pmreg }, 2961 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, 2962 2963 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 2964 { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1, 2965 .visibility = s1pie_visibility }, 2966 { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1, 2967 .visibility = s1pie_visibility }, 2968 { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1, 2969 .visibility = s1poe_visibility }, 2970 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 2971 2972 { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, 2973 { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, 2974 { SYS_DESC(SYS_LORN_EL1), trap_loregion }, 2975 { SYS_DESC(SYS_LORC_EL1), trap_loregion }, 2976 { SYS_DESC(SYS_MPAMIDR_EL1), undef_access }, 2977 { SYS_DESC(SYS_LORID_EL1), trap_loregion }, 2978 2979 { SYS_DESC(SYS_MPAM1_EL1), undef_access }, 2980 { SYS_DESC(SYS_MPAM0_EL1), undef_access }, 2981 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, 2982 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 2983 2984 { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, 2985 { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, 2986 { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, 2987 { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, 2988 { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, 2989 { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, 2990 { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, 2991 { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, 2992 { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, 2993 { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, 2994 { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, 2995 { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, 2996 { SYS_DESC(SYS_ICC_DIR_EL1), undef_access }, 2997 { SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, 2998 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, 2999 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi }, 3000 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi }, 3001 { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, 3002 { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, 3003 { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, 3004 { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, 3005 { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, 3006 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 3007 { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, 3008 { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, 3009 3010 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 3011 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, 3012 3013 { SYS_DESC(SYS_ACCDATA_EL1), undef_access }, 3014 3015 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access }, 3016 3017 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, 3018 3019 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, 3020 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1, 3021 .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 }, 3022 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access }, 3023 { SYS_DESC(SYS_SMIDR_EL1), undef_access }, 3024 IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)), 3025 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, 3026 ID_FILTERED(CTR_EL0, ctr_el0, 3027 CTR_EL0_DIC_MASK | 3028 CTR_EL0_IDC_MASK | 3029 CTR_EL0_DminLine_MASK | 3030 CTR_EL0_L1Ip_MASK | 3031 CTR_EL0_IminLine_MASK), 3032 { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility }, 3033 { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility }, 3034 3035 { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, 3036 .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr }, 3037 { PMU_SYS_REG(PMCNTENSET_EL0), 3038 .access = access_pmcnten, .reg = PMCNTENSET_EL0, 3039 .get_user = get_pmreg, .set_user = set_pmreg }, 3040 { PMU_SYS_REG(PMCNTENCLR_EL0), 3041 .access = access_pmcnten, .reg = PMCNTENSET_EL0, 3042 .get_user = get_pmreg, .set_user = set_pmreg }, 3043 { PMU_SYS_REG(PMOVSCLR_EL0), 3044 .access = access_pmovs, .reg = PMOVSSET_EL0, 3045 .get_user = get_pmreg, .set_user = set_pmreg }, 3046 /* 3047 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was 3048 * previously (and pointlessly) advertised in the past... 3049 */ 3050 { PMU_SYS_REG(PMSWINC_EL0), 3051 .get_user = get_raz_reg, .set_user = set_wi_reg, 3052 .access = access_pmswinc, .reset = NULL }, 3053 { PMU_SYS_REG(PMSELR_EL0), 3054 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, 3055 { PMU_SYS_REG(PMCEID0_EL0), 3056 .access = access_pmceid, .reset = NULL }, 3057 { PMU_SYS_REG(PMCEID1_EL0), 3058 .access = access_pmceid, .reset = NULL }, 3059 { PMU_SYS_REG(PMCCNTR_EL0), 3060 .access = access_pmu_evcntr, .reset = reset_unknown, 3061 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr, 3062 .set_user = set_pmu_evcntr }, 3063 { PMU_SYS_REG(PMXEVTYPER_EL0), 3064 .access = access_pmu_evtyper, .reset = NULL }, 3065 { PMU_SYS_REG(PMXEVCNTR_EL0), 3066 .access = access_pmu_evcntr, .reset = NULL }, 3067 /* 3068 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero 3069 * in 32bit mode. Here we choose to reset it as zero for consistency. 3070 */ 3071 { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, 3072 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, 3073 { PMU_SYS_REG(PMOVSSET_EL0), 3074 .access = access_pmovs, .reg = PMOVSSET_EL0, 3075 .get_user = get_pmreg, .set_user = set_pmreg }, 3076 3077 { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0, 3078 .visibility = s1poe_visibility }, 3079 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, 3080 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, 3081 { SYS_DESC(SYS_TPIDR2_EL0), undef_access }, 3082 3083 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access }, 3084 3085 { SYS_DESC(SYS_AMCR_EL0), undef_access }, 3086 { SYS_DESC(SYS_AMCFGR_EL0), undef_access }, 3087 { SYS_DESC(SYS_AMCGCR_EL0), undef_access }, 3088 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access }, 3089 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access }, 3090 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access }, 3091 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access }, 3092 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access }, 3093 AMU_AMEVCNTR0_EL0(0), 3094 AMU_AMEVCNTR0_EL0(1), 3095 AMU_AMEVCNTR0_EL0(2), 3096 AMU_AMEVCNTR0_EL0(3), 3097 AMU_AMEVCNTR0_EL0(4), 3098 AMU_AMEVCNTR0_EL0(5), 3099 AMU_AMEVCNTR0_EL0(6), 3100 AMU_AMEVCNTR0_EL0(7), 3101 AMU_AMEVCNTR0_EL0(8), 3102 AMU_AMEVCNTR0_EL0(9), 3103 AMU_AMEVCNTR0_EL0(10), 3104 AMU_AMEVCNTR0_EL0(11), 3105 AMU_AMEVCNTR0_EL0(12), 3106 AMU_AMEVCNTR0_EL0(13), 3107 AMU_AMEVCNTR0_EL0(14), 3108 AMU_AMEVCNTR0_EL0(15), 3109 AMU_AMEVTYPER0_EL0(0), 3110 AMU_AMEVTYPER0_EL0(1), 3111 AMU_AMEVTYPER0_EL0(2), 3112 AMU_AMEVTYPER0_EL0(3), 3113 AMU_AMEVTYPER0_EL0(4), 3114 AMU_AMEVTYPER0_EL0(5), 3115 AMU_AMEVTYPER0_EL0(6), 3116 AMU_AMEVTYPER0_EL0(7), 3117 AMU_AMEVTYPER0_EL0(8), 3118 AMU_AMEVTYPER0_EL0(9), 3119 AMU_AMEVTYPER0_EL0(10), 3120 AMU_AMEVTYPER0_EL0(11), 3121 AMU_AMEVTYPER0_EL0(12), 3122 AMU_AMEVTYPER0_EL0(13), 3123 AMU_AMEVTYPER0_EL0(14), 3124 AMU_AMEVTYPER0_EL0(15), 3125 AMU_AMEVCNTR1_EL0(0), 3126 AMU_AMEVCNTR1_EL0(1), 3127 AMU_AMEVCNTR1_EL0(2), 3128 AMU_AMEVCNTR1_EL0(3), 3129 AMU_AMEVCNTR1_EL0(4), 3130 AMU_AMEVCNTR1_EL0(5), 3131 AMU_AMEVCNTR1_EL0(6), 3132 AMU_AMEVCNTR1_EL0(7), 3133 AMU_AMEVCNTR1_EL0(8), 3134 AMU_AMEVCNTR1_EL0(9), 3135 AMU_AMEVCNTR1_EL0(10), 3136 AMU_AMEVCNTR1_EL0(11), 3137 AMU_AMEVCNTR1_EL0(12), 3138 AMU_AMEVCNTR1_EL0(13), 3139 AMU_AMEVCNTR1_EL0(14), 3140 AMU_AMEVCNTR1_EL0(15), 3141 AMU_AMEVTYPER1_EL0(0), 3142 AMU_AMEVTYPER1_EL0(1), 3143 AMU_AMEVTYPER1_EL0(2), 3144 AMU_AMEVTYPER1_EL0(3), 3145 AMU_AMEVTYPER1_EL0(4), 3146 AMU_AMEVTYPER1_EL0(5), 3147 AMU_AMEVTYPER1_EL0(6), 3148 AMU_AMEVTYPER1_EL0(7), 3149 AMU_AMEVTYPER1_EL0(8), 3150 AMU_AMEVTYPER1_EL0(9), 3151 AMU_AMEVTYPER1_EL0(10), 3152 AMU_AMEVTYPER1_EL0(11), 3153 AMU_AMEVTYPER1_EL0(12), 3154 AMU_AMEVTYPER1_EL0(13), 3155 AMU_AMEVTYPER1_EL0(14), 3156 AMU_AMEVTYPER1_EL0(15), 3157 3158 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer }, 3159 { SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer }, 3160 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer }, 3161 { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer }, 3162 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, 3163 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, 3164 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, 3165 3166 { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer }, 3167 { SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer }, 3168 { SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer }, 3169 3170 /* PMEVCNTRn_EL0 */ 3171 PMU_PMEVCNTR_EL0(0), 3172 PMU_PMEVCNTR_EL0(1), 3173 PMU_PMEVCNTR_EL0(2), 3174 PMU_PMEVCNTR_EL0(3), 3175 PMU_PMEVCNTR_EL0(4), 3176 PMU_PMEVCNTR_EL0(5), 3177 PMU_PMEVCNTR_EL0(6), 3178 PMU_PMEVCNTR_EL0(7), 3179 PMU_PMEVCNTR_EL0(8), 3180 PMU_PMEVCNTR_EL0(9), 3181 PMU_PMEVCNTR_EL0(10), 3182 PMU_PMEVCNTR_EL0(11), 3183 PMU_PMEVCNTR_EL0(12), 3184 PMU_PMEVCNTR_EL0(13), 3185 PMU_PMEVCNTR_EL0(14), 3186 PMU_PMEVCNTR_EL0(15), 3187 PMU_PMEVCNTR_EL0(16), 3188 PMU_PMEVCNTR_EL0(17), 3189 PMU_PMEVCNTR_EL0(18), 3190 PMU_PMEVCNTR_EL0(19), 3191 PMU_PMEVCNTR_EL0(20), 3192 PMU_PMEVCNTR_EL0(21), 3193 PMU_PMEVCNTR_EL0(22), 3194 PMU_PMEVCNTR_EL0(23), 3195 PMU_PMEVCNTR_EL0(24), 3196 PMU_PMEVCNTR_EL0(25), 3197 PMU_PMEVCNTR_EL0(26), 3198 PMU_PMEVCNTR_EL0(27), 3199 PMU_PMEVCNTR_EL0(28), 3200 PMU_PMEVCNTR_EL0(29), 3201 PMU_PMEVCNTR_EL0(30), 3202 /* PMEVTYPERn_EL0 */ 3203 PMU_PMEVTYPER_EL0(0), 3204 PMU_PMEVTYPER_EL0(1), 3205 PMU_PMEVTYPER_EL0(2), 3206 PMU_PMEVTYPER_EL0(3), 3207 PMU_PMEVTYPER_EL0(4), 3208 PMU_PMEVTYPER_EL0(5), 3209 PMU_PMEVTYPER_EL0(6), 3210 PMU_PMEVTYPER_EL0(7), 3211 PMU_PMEVTYPER_EL0(8), 3212 PMU_PMEVTYPER_EL0(9), 3213 PMU_PMEVTYPER_EL0(10), 3214 PMU_PMEVTYPER_EL0(11), 3215 PMU_PMEVTYPER_EL0(12), 3216 PMU_PMEVTYPER_EL0(13), 3217 PMU_PMEVTYPER_EL0(14), 3218 PMU_PMEVTYPER_EL0(15), 3219 PMU_PMEVTYPER_EL0(16), 3220 PMU_PMEVTYPER_EL0(17), 3221 PMU_PMEVTYPER_EL0(18), 3222 PMU_PMEVTYPER_EL0(19), 3223 PMU_PMEVTYPER_EL0(20), 3224 PMU_PMEVTYPER_EL0(21), 3225 PMU_PMEVTYPER_EL0(22), 3226 PMU_PMEVTYPER_EL0(23), 3227 PMU_PMEVTYPER_EL0(24), 3228 PMU_PMEVTYPER_EL0(25), 3229 PMU_PMEVTYPER_EL0(26), 3230 PMU_PMEVTYPER_EL0(27), 3231 PMU_PMEVTYPER_EL0(28), 3232 PMU_PMEVTYPER_EL0(29), 3233 PMU_PMEVTYPER_EL0(30), 3234 /* 3235 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero 3236 * in 32bit mode. Here we choose to reset it as zero for consistency. 3237 */ 3238 { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper, 3239 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 }, 3240 3241 EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0), 3242 EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0), 3243 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1), 3244 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0), 3245 EL2_REG_VNCR(HCR_EL2, reset_hcr, 0), 3246 EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0), 3247 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), 3248 EL2_REG_VNCR(HSTR_EL2, reset_val, 0), 3249 EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0), 3250 EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0), 3251 EL2_REG_VNCR(HFGITR_EL2, reset_val, 0), 3252 EL2_REG_VNCR(HACR_EL2, reset_val, 0), 3253 3254 EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0, 3255 sve_el2_visibility), 3256 3257 EL2_REG_VNCR(HCRX_EL2, reset_val, 0), 3258 3259 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0), 3260 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0), 3261 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1), 3262 EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1, 3263 tcr2_el2_visibility), 3264 EL2_REG_VNCR(VTTBR_EL2, reset_val, 0), 3265 EL2_REG_VNCR(VTCR_EL2, reset_val, 0), 3266 3267 { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 }, 3268 EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0), 3269 EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0), 3270 EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0), 3271 EL2_REG_REDIR(SPSR_EL2, reset_val, 0), 3272 EL2_REG_REDIR(ELR_EL2, reset_val, 0), 3273 { SYS_DESC(SYS_SP_EL1), access_sp_el1}, 3274 3275 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */ 3276 { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi }, 3277 { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi }, 3278 { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi }, 3279 { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi }, 3280 3281 { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 }, 3282 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0), 3283 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0), 3284 EL2_REG_REDIR(ESR_EL2, reset_val, 0), 3285 { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 }, 3286 3287 EL2_REG_REDIR(FAR_EL2, reset_val, 0), 3288 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0), 3289 3290 EL2_REG(MAIR_EL2, access_rw, reset_val, 0), 3291 EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0, 3292 s1pie_el2_visibility), 3293 EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0, 3294 s1pie_el2_visibility), 3295 EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0, 3296 s1poe_el2_visibility), 3297 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0), 3298 { SYS_DESC(SYS_MPAMHCR_EL2), undef_access }, 3299 { SYS_DESC(SYS_MPAMVPMV_EL2), undef_access }, 3300 { SYS_DESC(SYS_MPAM2_EL2), undef_access }, 3301 { SYS_DESC(SYS_MPAMVPM0_EL2), undef_access }, 3302 { SYS_DESC(SYS_MPAMVPM1_EL2), undef_access }, 3303 { SYS_DESC(SYS_MPAMVPM2_EL2), undef_access }, 3304 { SYS_DESC(SYS_MPAMVPM3_EL2), undef_access }, 3305 { SYS_DESC(SYS_MPAMVPM4_EL2), undef_access }, 3306 { SYS_DESC(SYS_MPAMVPM5_EL2), undef_access }, 3307 { SYS_DESC(SYS_MPAMVPM6_EL2), undef_access }, 3308 { SYS_DESC(SYS_MPAMVPM7_EL2), undef_access }, 3309 3310 EL2_REG(VBAR_EL2, access_rw, reset_val, 0), 3311 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0), 3312 { SYS_DESC(SYS_RMR_EL2), undef_access }, 3313 3314 EL2_REG_VNCR(ICH_AP0R0_EL2, reset_val, 0), 3315 EL2_REG_VNCR(ICH_AP0R1_EL2, reset_val, 0), 3316 EL2_REG_VNCR(ICH_AP0R2_EL2, reset_val, 0), 3317 EL2_REG_VNCR(ICH_AP0R3_EL2, reset_val, 0), 3318 EL2_REG_VNCR(ICH_AP1R0_EL2, reset_val, 0), 3319 EL2_REG_VNCR(ICH_AP1R1_EL2, reset_val, 0), 3320 EL2_REG_VNCR(ICH_AP1R2_EL2, reset_val, 0), 3321 EL2_REG_VNCR(ICH_AP1R3_EL2, reset_val, 0), 3322 3323 { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre }, 3324 3325 EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0), 3326 { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr }, 3327 { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr }, 3328 { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr }, 3329 { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr }, 3330 EL2_REG_VNCR(ICH_VMCR_EL2, reset_val, 0), 3331 3332 EL2_REG_VNCR(ICH_LR0_EL2, reset_val, 0), 3333 EL2_REG_VNCR(ICH_LR1_EL2, reset_val, 0), 3334 EL2_REG_VNCR(ICH_LR2_EL2, reset_val, 0), 3335 EL2_REG_VNCR(ICH_LR3_EL2, reset_val, 0), 3336 EL2_REG_VNCR(ICH_LR4_EL2, reset_val, 0), 3337 EL2_REG_VNCR(ICH_LR5_EL2, reset_val, 0), 3338 EL2_REG_VNCR(ICH_LR6_EL2, reset_val, 0), 3339 EL2_REG_VNCR(ICH_LR7_EL2, reset_val, 0), 3340 EL2_REG_VNCR(ICH_LR8_EL2, reset_val, 0), 3341 EL2_REG_VNCR(ICH_LR9_EL2, reset_val, 0), 3342 EL2_REG_VNCR(ICH_LR10_EL2, reset_val, 0), 3343 EL2_REG_VNCR(ICH_LR11_EL2, reset_val, 0), 3344 EL2_REG_VNCR(ICH_LR12_EL2, reset_val, 0), 3345 EL2_REG_VNCR(ICH_LR13_EL2, reset_val, 0), 3346 EL2_REG_VNCR(ICH_LR14_EL2, reset_val, 0), 3347 EL2_REG_VNCR(ICH_LR15_EL2, reset_val, 0), 3348 3349 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0), 3350 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0), 3351 3352 EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0), 3353 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0), 3354 { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer }, 3355 EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0), 3356 EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0), 3357 3358 { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer }, 3359 EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0), 3360 EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0), 3361 3362 { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 }, 3363 3364 { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer }, 3365 { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer }, 3366 { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer }, 3367 3368 { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer }, 3369 { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer }, 3370 { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer }, 3371 3372 EL2_REG(SP_EL2, NULL, reset_unknown, 0), 3373 }; 3374 handle_at_s1e01(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3375 static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3376 const struct sys_reg_desc *r) 3377 { 3378 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3379 3380 __kvm_at_s1e01(vcpu, op, p->regval); 3381 3382 return true; 3383 } 3384 handle_at_s1e2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3385 static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3386 const struct sys_reg_desc *r) 3387 { 3388 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3389 3390 /* There is no FGT associated with AT S1E2A :-( */ 3391 if (op == OP_AT_S1E2A && 3392 !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) { 3393 kvm_inject_undefined(vcpu); 3394 return false; 3395 } 3396 3397 __kvm_at_s1e2(vcpu, op, p->regval); 3398 3399 return true; 3400 } 3401 handle_at_s12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3402 static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3403 const struct sys_reg_desc *r) 3404 { 3405 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3406 3407 __kvm_at_s12(vcpu, op, p->regval); 3408 3409 return true; 3410 } 3411 kvm_supported_tlbi_s12_op(struct kvm_vcpu * vpcu,u32 instr)3412 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr) 3413 { 3414 struct kvm *kvm = vpcu->kvm; 3415 u8 CRm = sys_reg_CRm(instr); 3416 3417 if (sys_reg_CRn(instr) == TLBI_CRn_nXS && 3418 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) 3419 return false; 3420 3421 if (CRm == TLBI_CRm_nROS && 3422 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 3423 return false; 3424 3425 return true; 3426 } 3427 handle_alle1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3428 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3429 const struct sys_reg_desc *r) 3430 { 3431 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3432 3433 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) 3434 return undef_access(vcpu, p, r); 3435 3436 write_lock(&vcpu->kvm->mmu_lock); 3437 3438 /* 3439 * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the 3440 * corresponding VMIDs. 3441 */ 3442 kvm_nested_s2_unmap(vcpu->kvm, true); 3443 3444 write_unlock(&vcpu->kvm->mmu_lock); 3445 3446 return true; 3447 } 3448 kvm_supported_tlbi_ipas2_op(struct kvm_vcpu * vpcu,u32 instr)3449 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr) 3450 { 3451 struct kvm *kvm = vpcu->kvm; 3452 u8 CRm = sys_reg_CRm(instr); 3453 u8 Op2 = sys_reg_Op2(instr); 3454 3455 if (sys_reg_CRn(instr) == TLBI_CRn_nXS && 3456 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) 3457 return false; 3458 3459 if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) && 3460 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 3461 return false; 3462 3463 if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) && 3464 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 3465 return false; 3466 3467 if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) && 3468 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 3469 return false; 3470 3471 return true; 3472 } 3473 3474 /* Only defined here as this is an internal "abstraction" */ 3475 union tlbi_info { 3476 struct { 3477 u64 start; 3478 u64 size; 3479 } range; 3480 3481 struct { 3482 u64 addr; 3483 } ipa; 3484 3485 struct { 3486 u64 addr; 3487 u32 encoding; 3488 } va; 3489 }; 3490 s2_mmu_unmap_range(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3491 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu, 3492 const union tlbi_info *info) 3493 { 3494 /* 3495 * The unmap operation is allowed to drop the MMU lock and block, which 3496 * means that @mmu could be used for a different context than the one 3497 * currently being invalidated. 3498 * 3499 * This behavior is still safe, as: 3500 * 3501 * 1) The vCPU(s) that recycled the MMU are responsible for invalidating 3502 * the entire MMU before reusing it, which still honors the intent 3503 * of a TLBI. 3504 * 3505 * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC 3506 * and ERET to the guest), other vCPUs are allowed to use stale 3507 * translations. 3508 * 3509 * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and 3510 * at worst may cause more aborts for shadow stage-2 fills. 3511 * 3512 * Dropping the MMU lock also implies that shadow stage-2 fills could 3513 * happen behind the back of the TLBI. This is still safe, though, as 3514 * the L1 needs to put its stage-2 in a consistent state before doing 3515 * the TLBI. 3516 */ 3517 kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true); 3518 } 3519 handle_vmalls12e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3520 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3521 const struct sys_reg_desc *r) 3522 { 3523 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3524 u64 limit, vttbr; 3525 3526 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) 3527 return undef_access(vcpu, p, r); 3528 3529 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 3530 limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm)); 3531 3532 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), 3533 &(union tlbi_info) { 3534 .range = { 3535 .start = 0, 3536 .size = limit, 3537 }, 3538 }, 3539 s2_mmu_unmap_range); 3540 3541 return true; 3542 } 3543 handle_ripas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3544 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3545 const struct sys_reg_desc *r) 3546 { 3547 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3548 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 3549 u64 base, range, tg, num, scale; 3550 int shift; 3551 3552 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) 3553 return undef_access(vcpu, p, r); 3554 3555 /* 3556 * Because the shadow S2 structure doesn't necessarily reflect that 3557 * of the guest's S2 (different base granule size, for example), we 3558 * decide to ignore TTL and only use the described range. 3559 */ 3560 tg = FIELD_GET(GENMASK(47, 46), p->regval); 3561 scale = FIELD_GET(GENMASK(45, 44), p->regval); 3562 num = FIELD_GET(GENMASK(43, 39), p->regval); 3563 base = p->regval & GENMASK(36, 0); 3564 3565 switch(tg) { 3566 case 1: 3567 shift = 12; 3568 break; 3569 case 2: 3570 shift = 14; 3571 break; 3572 case 3: 3573 default: /* IMPDEF: handle tg==0 as 64k */ 3574 shift = 16; 3575 break; 3576 } 3577 3578 base <<= shift; 3579 range = __TLBI_RANGE_PAGES(num, scale) << shift; 3580 3581 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), 3582 &(union tlbi_info) { 3583 .range = { 3584 .start = base, 3585 .size = range, 3586 }, 3587 }, 3588 s2_mmu_unmap_range); 3589 3590 return true; 3591 } 3592 s2_mmu_unmap_ipa(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3593 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu, 3594 const union tlbi_info *info) 3595 { 3596 unsigned long max_size; 3597 u64 base_addr; 3598 3599 /* 3600 * We drop a number of things from the supplied value: 3601 * 3602 * - NS bit: we're non-secure only. 3603 * 3604 * - IPA[51:48]: We don't support 52bit IPA just yet... 3605 * 3606 * And of course, adjust the IPA to be on an actual address. 3607 */ 3608 base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12; 3609 max_size = compute_tlb_inval_range(mmu, info->ipa.addr); 3610 base_addr &= ~(max_size - 1); 3611 3612 /* 3613 * See comment in s2_mmu_unmap_range() for why this is allowed to 3614 * reschedule. 3615 */ 3616 kvm_stage2_unmap_range(mmu, base_addr, max_size, true); 3617 } 3618 handle_ipas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3619 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3620 const struct sys_reg_desc *r) 3621 { 3622 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3623 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 3624 3625 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) 3626 return undef_access(vcpu, p, r); 3627 3628 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), 3629 &(union tlbi_info) { 3630 .ipa = { 3631 .addr = p->regval, 3632 }, 3633 }, 3634 s2_mmu_unmap_ipa); 3635 3636 return true; 3637 } 3638 s2_mmu_tlbi_s1e1(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3639 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu, 3640 const union tlbi_info *info) 3641 { 3642 WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding)); 3643 } 3644 handle_tlbi_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3645 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 3646 const struct sys_reg_desc *r) 3647 { 3648 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3649 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 3650 3651 /* 3652 * If we're here, this is because we've trapped on a EL1 TLBI 3653 * instruction that affects the EL1 translation regime while 3654 * we're running in a context that doesn't allow us to let the 3655 * HW do its thing (aka vEL2): 3656 * 3657 * - HCR_EL2.E2H == 0 : a non-VHE guest 3658 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode 3659 * 3660 * We don't expect these helpers to ever be called when running 3661 * in a vEL1 context. 3662 */ 3663 3664 WARN_ON(!vcpu_is_el2(vcpu)); 3665 3666 if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) 3667 return undef_access(vcpu, p, r); 3668 3669 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), 3670 &(union tlbi_info) { 3671 .va = { 3672 .addr = p->regval, 3673 .encoding = sys_encoding, 3674 }, 3675 }, 3676 s2_mmu_tlbi_s1e1); 3677 3678 return true; 3679 } 3680 3681 #define SYS_INSN(insn, access_fn) \ 3682 { \ 3683 SYS_DESC(OP_##insn), \ 3684 .access = (access_fn), \ 3685 } 3686 3687 static struct sys_reg_desc sys_insn_descs[] = { 3688 { SYS_DESC(SYS_DC_ISW), access_dcsw }, 3689 { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, 3690 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, 3691 3692 SYS_INSN(AT_S1E1R, handle_at_s1e01), 3693 SYS_INSN(AT_S1E1W, handle_at_s1e01), 3694 SYS_INSN(AT_S1E0R, handle_at_s1e01), 3695 SYS_INSN(AT_S1E0W, handle_at_s1e01), 3696 SYS_INSN(AT_S1E1RP, handle_at_s1e01), 3697 SYS_INSN(AT_S1E1WP, handle_at_s1e01), 3698 3699 { SYS_DESC(SYS_DC_CSW), access_dcsw }, 3700 { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, 3701 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, 3702 { SYS_DESC(SYS_DC_CISW), access_dcsw }, 3703 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, 3704 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, 3705 3706 SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1), 3707 SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1), 3708 SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1), 3709 SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1), 3710 SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1), 3711 SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1), 3712 3713 SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1), 3714 SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1), 3715 SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1), 3716 SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1), 3717 3718 SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1), 3719 SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1), 3720 SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1), 3721 SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1), 3722 SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1), 3723 SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1), 3724 3725 SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1), 3726 SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1), 3727 SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1), 3728 SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1), 3729 3730 SYS_INSN(TLBI_RVAE1, handle_tlbi_el1), 3731 SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1), 3732 SYS_INSN(TLBI_RVALE1, handle_tlbi_el1), 3733 SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1), 3734 3735 SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1), 3736 SYS_INSN(TLBI_VAE1, handle_tlbi_el1), 3737 SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1), 3738 SYS_INSN(TLBI_VAAE1, handle_tlbi_el1), 3739 SYS_INSN(TLBI_VALE1, handle_tlbi_el1), 3740 SYS_INSN(TLBI_VAALE1, handle_tlbi_el1), 3741 3742 SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1), 3743 SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1), 3744 SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1), 3745 SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1), 3746 SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1), 3747 SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1), 3748 3749 SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1), 3750 SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1), 3751 SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1), 3752 SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1), 3753 3754 SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1), 3755 SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1), 3756 SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1), 3757 SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1), 3758 SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1), 3759 SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1), 3760 3761 SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1), 3762 SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1), 3763 SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1), 3764 SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1), 3765 3766 SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1), 3767 SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1), 3768 SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1), 3769 SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1), 3770 3771 SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1), 3772 SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1), 3773 SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1), 3774 SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1), 3775 SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1), 3776 SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1), 3777 3778 SYS_INSN(AT_S1E2R, handle_at_s1e2), 3779 SYS_INSN(AT_S1E2W, handle_at_s1e2), 3780 SYS_INSN(AT_S12E1R, handle_at_s12), 3781 SYS_INSN(AT_S12E1W, handle_at_s12), 3782 SYS_INSN(AT_S12E0R, handle_at_s12), 3783 SYS_INSN(AT_S12E0W, handle_at_s12), 3784 SYS_INSN(AT_S1E2A, handle_at_s1e2), 3785 3786 SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is), 3787 SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is), 3788 SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is), 3789 SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is), 3790 3791 SYS_INSN(TLBI_ALLE2OS, undef_access), 3792 SYS_INSN(TLBI_VAE2OS, undef_access), 3793 SYS_INSN(TLBI_ALLE1OS, handle_alle1is), 3794 SYS_INSN(TLBI_VALE2OS, undef_access), 3795 SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is), 3796 3797 SYS_INSN(TLBI_RVAE2IS, undef_access), 3798 SYS_INSN(TLBI_RVALE2IS, undef_access), 3799 3800 SYS_INSN(TLBI_ALLE1IS, handle_alle1is), 3801 SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is), 3802 SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is), 3803 SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is), 3804 SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is), 3805 SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is), 3806 SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is), 3807 SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is), 3808 SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is), 3809 SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is), 3810 SYS_INSN(TLBI_RVAE2OS, undef_access), 3811 SYS_INSN(TLBI_RVALE2OS, undef_access), 3812 SYS_INSN(TLBI_RVAE2, undef_access), 3813 SYS_INSN(TLBI_RVALE2, undef_access), 3814 SYS_INSN(TLBI_ALLE1, handle_alle1is), 3815 SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is), 3816 3817 SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is), 3818 SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is), 3819 SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is), 3820 SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is), 3821 3822 SYS_INSN(TLBI_ALLE2OSNXS, undef_access), 3823 SYS_INSN(TLBI_VAE2OSNXS, undef_access), 3824 SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is), 3825 SYS_INSN(TLBI_VALE2OSNXS, undef_access), 3826 SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is), 3827 3828 SYS_INSN(TLBI_RVAE2ISNXS, undef_access), 3829 SYS_INSN(TLBI_RVALE2ISNXS, undef_access), 3830 SYS_INSN(TLBI_ALLE2ISNXS, undef_access), 3831 SYS_INSN(TLBI_VAE2ISNXS, undef_access), 3832 3833 SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is), 3834 SYS_INSN(TLBI_VALE2ISNXS, undef_access), 3835 SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is), 3836 SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is), 3837 SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is), 3838 SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is), 3839 SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is), 3840 SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is), 3841 SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is), 3842 SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is), 3843 SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is), 3844 SYS_INSN(TLBI_RVAE2OSNXS, undef_access), 3845 SYS_INSN(TLBI_RVALE2OSNXS, undef_access), 3846 SYS_INSN(TLBI_RVAE2NXS, undef_access), 3847 SYS_INSN(TLBI_RVALE2NXS, undef_access), 3848 SYS_INSN(TLBI_ALLE2NXS, undef_access), 3849 SYS_INSN(TLBI_VAE2NXS, undef_access), 3850 SYS_INSN(TLBI_ALLE1NXS, handle_alle1is), 3851 SYS_INSN(TLBI_VALE2NXS, undef_access), 3852 SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is), 3853 }; 3854 trap_dbgdidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3855 static bool trap_dbgdidr(struct kvm_vcpu *vcpu, 3856 struct sys_reg_params *p, 3857 const struct sys_reg_desc *r) 3858 { 3859 if (p->is_write) { 3860 return ignore_write(vcpu, p); 3861 } else { 3862 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1); 3863 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP); 3864 3865 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | 3866 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) | 3867 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) | 3868 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) | 3869 (1 << 15) | (el3 << 14) | (el3 << 12)); 3870 return true; 3871 } 3872 } 3873 3874 /* 3875 * AArch32 debug register mappings 3876 * 3877 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] 3878 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] 3879 * 3880 * None of the other registers share their location, so treat them as 3881 * if they were 64bit. 3882 */ 3883 #define DBG_BCR_BVR_WCR_WVR(n) \ 3884 /* DBGBVRn */ \ 3885 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \ 3886 trap_dbg_wb_reg, NULL, n }, \ 3887 /* DBGBCRn */ \ 3888 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \ 3889 /* DBGWVRn */ \ 3890 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \ 3891 /* DBGWCRn */ \ 3892 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n } 3893 3894 #define DBGBXVR(n) \ 3895 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \ 3896 trap_dbg_wb_reg, NULL, n } 3897 3898 /* 3899 * Trapped cp14 registers. We generally ignore most of the external 3900 * debug, on the principle that they don't really make sense to a 3901 * guest. Revisit this one day, would this principle change. 3902 */ 3903 static const struct sys_reg_desc cp14_regs[] = { 3904 /* DBGDIDR */ 3905 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr }, 3906 /* DBGDTRRXext */ 3907 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 3908 3909 DBG_BCR_BVR_WCR_WVR(0), 3910 /* DBGDSCRint */ 3911 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 3912 DBG_BCR_BVR_WCR_WVR(1), 3913 /* DBGDCCINT */ 3914 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 }, 3915 /* DBGDSCRext */ 3916 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 }, 3917 DBG_BCR_BVR_WCR_WVR(2), 3918 /* DBGDTR[RT]Xint */ 3919 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 3920 /* DBGDTR[RT]Xext */ 3921 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 3922 DBG_BCR_BVR_WCR_WVR(3), 3923 DBG_BCR_BVR_WCR_WVR(4), 3924 DBG_BCR_BVR_WCR_WVR(5), 3925 /* DBGWFAR */ 3926 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 3927 /* DBGOSECCR */ 3928 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 3929 DBG_BCR_BVR_WCR_WVR(6), 3930 /* DBGVCR */ 3931 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 }, 3932 DBG_BCR_BVR_WCR_WVR(7), 3933 DBG_BCR_BVR_WCR_WVR(8), 3934 DBG_BCR_BVR_WCR_WVR(9), 3935 DBG_BCR_BVR_WCR_WVR(10), 3936 DBG_BCR_BVR_WCR_WVR(11), 3937 DBG_BCR_BVR_WCR_WVR(12), 3938 DBG_BCR_BVR_WCR_WVR(13), 3939 DBG_BCR_BVR_WCR_WVR(14), 3940 DBG_BCR_BVR_WCR_WVR(15), 3941 3942 /* DBGDRAR (32bit) */ 3943 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 3944 3945 DBGBXVR(0), 3946 /* DBGOSLAR */ 3947 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 }, 3948 DBGBXVR(1), 3949 /* DBGOSLSR */ 3950 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 }, 3951 DBGBXVR(2), 3952 DBGBXVR(3), 3953 /* DBGOSDLR */ 3954 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 3955 DBGBXVR(4), 3956 /* DBGPRCR */ 3957 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 3958 DBGBXVR(5), 3959 DBGBXVR(6), 3960 DBGBXVR(7), 3961 DBGBXVR(8), 3962 DBGBXVR(9), 3963 DBGBXVR(10), 3964 DBGBXVR(11), 3965 DBGBXVR(12), 3966 DBGBXVR(13), 3967 DBGBXVR(14), 3968 DBGBXVR(15), 3969 3970 /* DBGDSAR (32bit) */ 3971 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 3972 3973 /* DBGDEVID2 */ 3974 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 3975 /* DBGDEVID1 */ 3976 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 3977 /* DBGDEVID */ 3978 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 3979 /* DBGCLAIMSET */ 3980 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 3981 /* DBGCLAIMCLR */ 3982 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 3983 /* DBGAUTHSTATUS */ 3984 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 3985 }; 3986 3987 /* Trapped cp14 64bit registers */ 3988 static const struct sys_reg_desc cp14_64_regs[] = { 3989 /* DBGDRAR (64bit) */ 3990 { Op1( 0), CRm( 1), .access = trap_raz_wi }, 3991 3992 /* DBGDSAR (64bit) */ 3993 { Op1( 0), CRm( 2), .access = trap_raz_wi }, 3994 }; 3995 3996 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \ 3997 AA32(_map), \ 3998 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \ 3999 .visibility = pmu_visibility 4000 4001 /* Macro to expand the PMEVCNTRn register */ 4002 #define PMU_PMEVCNTR(n) \ 4003 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ 4004 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ 4005 .access = access_pmu_evcntr } 4006 4007 /* Macro to expand the PMEVTYPERn register */ 4008 #define PMU_PMEVTYPER(n) \ 4009 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ 4010 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ 4011 .access = access_pmu_evtyper } 4012 /* 4013 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 4014 * depending on the way they are accessed (as a 32bit or a 64bit 4015 * register). 4016 */ 4017 static const struct sys_reg_desc cp15_regs[] = { 4018 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr }, 4019 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 }, 4020 /* ACTLR */ 4021 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 }, 4022 /* ACTLR2 */ 4023 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 }, 4024 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, 4025 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 }, 4026 /* TTBCR */ 4027 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 }, 4028 /* TTBCR2 */ 4029 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 }, 4030 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 }, 4031 { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, 4032 /* DFSR */ 4033 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 }, 4034 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 }, 4035 /* ADFSR */ 4036 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 }, 4037 /* AIFSR */ 4038 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 }, 4039 /* DFAR */ 4040 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 }, 4041 /* IFAR */ 4042 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 }, 4043 4044 /* 4045 * DC{C,I,CI}SW operations: 4046 */ 4047 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 4048 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 4049 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 4050 4051 /* PMU */ 4052 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr }, 4053 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten }, 4054 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten }, 4055 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs }, 4056 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc }, 4057 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr }, 4058 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid }, 4059 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid }, 4060 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr }, 4061 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper }, 4062 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr }, 4063 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr }, 4064 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten }, 4065 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten }, 4066 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs }, 4067 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid }, 4068 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid }, 4069 /* PMMIR */ 4070 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi }, 4071 4072 /* PRRR/MAIR0 */ 4073 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, 4074 /* NMRR/MAIR1 */ 4075 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 }, 4076 /* AMAIR0 */ 4077 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 }, 4078 /* AMAIR1 */ 4079 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 }, 4080 4081 { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, 4082 { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, 4083 { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, 4084 { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, 4085 { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, 4086 { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, 4087 { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, 4088 { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, 4089 { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, 4090 { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, 4091 { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, 4092 { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, 4093 { CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access }, 4094 { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, 4095 { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, 4096 { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, 4097 { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, 4098 { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, 4099 { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, 4100 { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 4101 { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, 4102 { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, 4103 4104 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 }, 4105 4106 /* Arch Tmers */ 4107 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer }, 4108 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer }, 4109 4110 /* PMEVCNTRn */ 4111 PMU_PMEVCNTR(0), 4112 PMU_PMEVCNTR(1), 4113 PMU_PMEVCNTR(2), 4114 PMU_PMEVCNTR(3), 4115 PMU_PMEVCNTR(4), 4116 PMU_PMEVCNTR(5), 4117 PMU_PMEVCNTR(6), 4118 PMU_PMEVCNTR(7), 4119 PMU_PMEVCNTR(8), 4120 PMU_PMEVCNTR(9), 4121 PMU_PMEVCNTR(10), 4122 PMU_PMEVCNTR(11), 4123 PMU_PMEVCNTR(12), 4124 PMU_PMEVCNTR(13), 4125 PMU_PMEVCNTR(14), 4126 PMU_PMEVCNTR(15), 4127 PMU_PMEVCNTR(16), 4128 PMU_PMEVCNTR(17), 4129 PMU_PMEVCNTR(18), 4130 PMU_PMEVCNTR(19), 4131 PMU_PMEVCNTR(20), 4132 PMU_PMEVCNTR(21), 4133 PMU_PMEVCNTR(22), 4134 PMU_PMEVCNTR(23), 4135 PMU_PMEVCNTR(24), 4136 PMU_PMEVCNTR(25), 4137 PMU_PMEVCNTR(26), 4138 PMU_PMEVCNTR(27), 4139 PMU_PMEVCNTR(28), 4140 PMU_PMEVCNTR(29), 4141 PMU_PMEVCNTR(30), 4142 /* PMEVTYPERn */ 4143 PMU_PMEVTYPER(0), 4144 PMU_PMEVTYPER(1), 4145 PMU_PMEVTYPER(2), 4146 PMU_PMEVTYPER(3), 4147 PMU_PMEVTYPER(4), 4148 PMU_PMEVTYPER(5), 4149 PMU_PMEVTYPER(6), 4150 PMU_PMEVTYPER(7), 4151 PMU_PMEVTYPER(8), 4152 PMU_PMEVTYPER(9), 4153 PMU_PMEVTYPER(10), 4154 PMU_PMEVTYPER(11), 4155 PMU_PMEVTYPER(12), 4156 PMU_PMEVTYPER(13), 4157 PMU_PMEVTYPER(14), 4158 PMU_PMEVTYPER(15), 4159 PMU_PMEVTYPER(16), 4160 PMU_PMEVTYPER(17), 4161 PMU_PMEVTYPER(18), 4162 PMU_PMEVTYPER(19), 4163 PMU_PMEVTYPER(20), 4164 PMU_PMEVTYPER(21), 4165 PMU_PMEVTYPER(22), 4166 PMU_PMEVTYPER(23), 4167 PMU_PMEVTYPER(24), 4168 PMU_PMEVTYPER(25), 4169 PMU_PMEVTYPER(26), 4170 PMU_PMEVTYPER(27), 4171 PMU_PMEVTYPER(28), 4172 PMU_PMEVTYPER(29), 4173 PMU_PMEVTYPER(30), 4174 /* PMCCFILTR */ 4175 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper }, 4176 4177 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr }, 4178 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr }, 4179 4180 /* CCSIDR2 */ 4181 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access }, 4182 4183 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 }, 4184 }; 4185 4186 static const struct sys_reg_desc cp15_64_regs[] = { 4187 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, 4188 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr }, 4189 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ 4190 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer }, 4191 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 }, 4192 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ 4193 { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer }, 4194 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ 4195 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, 4196 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer }, 4197 { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer }, 4198 }; 4199 check_sysreg_table(const struct sys_reg_desc * table,unsigned int n,bool is_32)4200 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, 4201 bool is_32) 4202 { 4203 unsigned int i; 4204 4205 for (i = 0; i < n; i++) { 4206 if (!is_32 && table[i].reg && !table[i].reset) { 4207 kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n", 4208 &table[i], i, table[i].name); 4209 return false; 4210 } 4211 4212 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 4213 kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n", 4214 &table[i], i, table[i - 1].name, table[i].name); 4215 return false; 4216 } 4217 } 4218 4219 return true; 4220 } 4221 kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu)4222 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu) 4223 { 4224 kvm_inject_undefined(vcpu); 4225 return 1; 4226 } 4227 perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)4228 static void perform_access(struct kvm_vcpu *vcpu, 4229 struct sys_reg_params *params, 4230 const struct sys_reg_desc *r) 4231 { 4232 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); 4233 4234 /* Check for regs disabled by runtime config */ 4235 if (sysreg_hidden(vcpu, r)) { 4236 kvm_inject_undefined(vcpu); 4237 return; 4238 } 4239 4240 /* 4241 * Not having an accessor means that we have configured a trap 4242 * that we don't know how to handle. This certainly qualifies 4243 * as a gross bug that should be fixed right away. 4244 */ 4245 BUG_ON(!r->access); 4246 4247 /* Skip instruction if instructed so */ 4248 if (likely(r->access(vcpu, params, r))) 4249 kvm_incr_pc(vcpu); 4250 } 4251 4252 /* 4253 * emulate_cp -- tries to match a sys_reg access in a handling table, and 4254 * call the corresponding trap handler. 4255 * 4256 * @params: pointer to the descriptor of the access 4257 * @table: array of trap descriptors 4258 * @num: size of the trap descriptor array 4259 * 4260 * Return true if the access has been handled, false if not. 4261 */ emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)4262 static bool emulate_cp(struct kvm_vcpu *vcpu, 4263 struct sys_reg_params *params, 4264 const struct sys_reg_desc *table, 4265 size_t num) 4266 { 4267 const struct sys_reg_desc *r; 4268 4269 if (!table) 4270 return false; /* Not handled */ 4271 4272 r = find_reg(params, table, num); 4273 4274 if (r) { 4275 perform_access(vcpu, params, r); 4276 return true; 4277 } 4278 4279 /* Not handled */ 4280 return false; 4281 } 4282 unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4283 static void unhandled_cp_access(struct kvm_vcpu *vcpu, 4284 struct sys_reg_params *params) 4285 { 4286 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); 4287 int cp = -1; 4288 4289 switch (esr_ec) { 4290 case ESR_ELx_EC_CP15_32: 4291 case ESR_ELx_EC_CP15_64: 4292 cp = 15; 4293 break; 4294 case ESR_ELx_EC_CP14_MR: 4295 case ESR_ELx_EC_CP14_64: 4296 cp = 14; 4297 break; 4298 default: 4299 WARN_ON(1); 4300 } 4301 4302 print_sys_reg_msg(params, 4303 "Unsupported guest CP%d access at: %08lx [%08lx]\n", 4304 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 4305 kvm_inject_undefined(vcpu); 4306 } 4307 4308 /** 4309 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access 4310 * @vcpu: The VCPU pointer 4311 * @global: &struct sys_reg_desc 4312 * @nr_global: size of the @global array 4313 */ kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global)4314 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 4315 const struct sys_reg_desc *global, 4316 size_t nr_global) 4317 { 4318 struct sys_reg_params params; 4319 u64 esr = kvm_vcpu_get_esr(vcpu); 4320 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4321 int Rt2 = (esr >> 10) & 0x1f; 4322 4323 params.CRm = (esr >> 1) & 0xf; 4324 params.is_write = ((esr & 1) == 0); 4325 4326 params.Op0 = 0; 4327 params.Op1 = (esr >> 16) & 0xf; 4328 params.Op2 = 0; 4329 params.CRn = 0; 4330 4331 /* 4332 * Make a 64-bit value out of Rt and Rt2. As we use the same trap 4333 * backends between AArch32 and AArch64, we get away with it. 4334 */ 4335 if (params.is_write) { 4336 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 4337 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 4338 } 4339 4340 /* 4341 * If the table contains a handler, handle the 4342 * potential register operation in the case of a read and return 4343 * with success. 4344 */ 4345 if (emulate_cp(vcpu, ¶ms, global, nr_global)) { 4346 /* Split up the value between registers for the read side */ 4347 if (!params.is_write) { 4348 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 4349 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 4350 } 4351 4352 return 1; 4353 } 4354 4355 unhandled_cp_access(vcpu, ¶ms); 4356 return 1; 4357 } 4358 4359 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params); 4360 4361 /* 4362 * The CP10 ID registers are architecturally mapped to AArch64 feature 4363 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses 4364 * from AArch32. 4365 */ kvm_esr_cp10_id_to_sys64(u64 esr,struct sys_reg_params * params)4366 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params) 4367 { 4368 u8 reg_id = (esr >> 10) & 0xf; 4369 bool valid; 4370 4371 params->is_write = ((esr & 1) == 0); 4372 params->Op0 = 3; 4373 params->Op1 = 0; 4374 params->CRn = 0; 4375 params->CRm = 3; 4376 4377 /* CP10 ID registers are read-only */ 4378 valid = !params->is_write; 4379 4380 switch (reg_id) { 4381 /* MVFR0 */ 4382 case 0b0111: 4383 params->Op2 = 0; 4384 break; 4385 /* MVFR1 */ 4386 case 0b0110: 4387 params->Op2 = 1; 4388 break; 4389 /* MVFR2 */ 4390 case 0b0101: 4391 params->Op2 = 2; 4392 break; 4393 default: 4394 valid = false; 4395 } 4396 4397 if (valid) 4398 return true; 4399 4400 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n", 4401 params->is_write ? "write" : "read", reg_id); 4402 return false; 4403 } 4404 4405 /** 4406 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and 4407 * VFP Register' from AArch32. 4408 * @vcpu: The vCPU pointer 4409 * 4410 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers. 4411 * Work out the correct AArch64 system register encoding and reroute to the 4412 * AArch64 system register emulation. 4413 */ kvm_handle_cp10_id(struct kvm_vcpu * vcpu)4414 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu) 4415 { 4416 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4417 u64 esr = kvm_vcpu_get_esr(vcpu); 4418 struct sys_reg_params params; 4419 4420 /* UNDEF on any unhandled register access */ 4421 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) { 4422 kvm_inject_undefined(vcpu); 4423 return 1; 4424 } 4425 4426 if (emulate_sys_reg(vcpu, ¶ms)) 4427 vcpu_set_reg(vcpu, Rt, params.regval); 4428 4429 return 1; 4430 } 4431 4432 /** 4433 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where 4434 * CRn=0, which corresponds to the AArch32 feature 4435 * registers. 4436 * @vcpu: the vCPU pointer 4437 * @params: the system register access parameters. 4438 * 4439 * Our cp15 system register tables do not enumerate the AArch32 feature 4440 * registers. Conveniently, our AArch64 table does, and the AArch32 system 4441 * register encoding can be trivially remapped into the AArch64 for the feature 4442 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same. 4443 * 4444 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit 4445 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this 4446 * range are either UNKNOWN or RES0. Rerouting remains architectural as we 4447 * treat undefined registers in this range as RAZ. 4448 */ kvm_emulate_cp15_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4449 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu, 4450 struct sys_reg_params *params) 4451 { 4452 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4453 4454 /* Treat impossible writes to RO registers as UNDEFINED */ 4455 if (params->is_write) { 4456 unhandled_cp_access(vcpu, params); 4457 return 1; 4458 } 4459 4460 params->Op0 = 3; 4461 4462 /* 4463 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32. 4464 * Avoid conflicting with future expansion of AArch64 feature registers 4465 * and simply treat them as RAZ here. 4466 */ 4467 if (params->CRm > 3) 4468 params->regval = 0; 4469 else if (!emulate_sys_reg(vcpu, params)) 4470 return 1; 4471 4472 vcpu_set_reg(vcpu, Rt, params->regval); 4473 return 1; 4474 } 4475 4476 /** 4477 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access 4478 * @vcpu: The VCPU pointer 4479 * @params: &struct sys_reg_params 4480 * @global: &struct sys_reg_desc 4481 * @nr_global: size of the @global array 4482 */ kvm_handle_cp_32(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * global,size_t nr_global)4483 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 4484 struct sys_reg_params *params, 4485 const struct sys_reg_desc *global, 4486 size_t nr_global) 4487 { 4488 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4489 4490 params->regval = vcpu_get_reg(vcpu, Rt); 4491 4492 if (emulate_cp(vcpu, params, global, nr_global)) { 4493 if (!params->is_write) 4494 vcpu_set_reg(vcpu, Rt, params->regval); 4495 return 1; 4496 } 4497 4498 unhandled_cp_access(vcpu, params); 4499 return 1; 4500 } 4501 kvm_handle_cp15_64(struct kvm_vcpu * vcpu)4502 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu) 4503 { 4504 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs)); 4505 } 4506 kvm_handle_cp15_32(struct kvm_vcpu * vcpu)4507 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu) 4508 { 4509 struct sys_reg_params params; 4510 4511 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); 4512 4513 /* 4514 * Certain AArch32 ID registers are handled by rerouting to the AArch64 4515 * system register table. Registers in the ID range where CRm=0 are 4516 * excluded from this scheme as they do not trivially map into AArch64 4517 * system register encodings, except for AIDR/REVIDR. 4518 */ 4519 if (params.Op1 == 0 && params.CRn == 0 && 4520 (params.CRm || params.Op2 == 6 /* REVIDR */)) 4521 return kvm_emulate_cp15_id_reg(vcpu, ¶ms); 4522 if (params.Op1 == 1 && params.CRn == 0 && 4523 params.CRm == 0 && params.Op2 == 7 /* AIDR */) 4524 return kvm_emulate_cp15_id_reg(vcpu, ¶ms); 4525 4526 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); 4527 } 4528 kvm_handle_cp14_64(struct kvm_vcpu * vcpu)4529 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu) 4530 { 4531 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs)); 4532 } 4533 kvm_handle_cp14_32(struct kvm_vcpu * vcpu)4534 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) 4535 { 4536 struct sys_reg_params params; 4537 4538 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); 4539 4540 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs)); 4541 } 4542 4543 /** 4544 * emulate_sys_reg - Emulate a guest access to an AArch64 system register 4545 * @vcpu: The VCPU pointer 4546 * @params: Decoded system register parameters 4547 * 4548 * Return: true if the system register access was successful, false otherwise. 4549 */ emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4550 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, 4551 struct sys_reg_params *params) 4552 { 4553 const struct sys_reg_desc *r; 4554 4555 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 4556 if (likely(r)) { 4557 perform_access(vcpu, params, r); 4558 return true; 4559 } 4560 4561 print_sys_reg_msg(params, 4562 "Unsupported guest sys_reg access at: %lx [%08lx]\n", 4563 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 4564 kvm_inject_undefined(vcpu); 4565 4566 return false; 4567 } 4568 idregs_debug_find(struct kvm * kvm,u8 pos)4569 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos) 4570 { 4571 unsigned long i, idreg_idx = 0; 4572 4573 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { 4574 const struct sys_reg_desc *r = &sys_reg_descs[i]; 4575 4576 if (!is_vm_ftr_id_reg(reg_to_encoding(r))) 4577 continue; 4578 4579 if (idreg_idx == pos) 4580 return r; 4581 4582 idreg_idx++; 4583 } 4584 4585 return NULL; 4586 } 4587 idregs_debug_start(struct seq_file * s,loff_t * pos)4588 static void *idregs_debug_start(struct seq_file *s, loff_t *pos) 4589 { 4590 struct kvm *kvm = s->private; 4591 u8 *iter; 4592 4593 mutex_lock(&kvm->arch.config_lock); 4594 4595 iter = &kvm->arch.idreg_debugfs_iter; 4596 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) && 4597 *iter == (u8)~0) { 4598 *iter = *pos; 4599 if (!idregs_debug_find(kvm, *iter)) 4600 iter = NULL; 4601 } else { 4602 iter = ERR_PTR(-EBUSY); 4603 } 4604 4605 mutex_unlock(&kvm->arch.config_lock); 4606 4607 return iter; 4608 } 4609 idregs_debug_next(struct seq_file * s,void * v,loff_t * pos)4610 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos) 4611 { 4612 struct kvm *kvm = s->private; 4613 4614 (*pos)++; 4615 4616 if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) { 4617 kvm->arch.idreg_debugfs_iter++; 4618 4619 return &kvm->arch.idreg_debugfs_iter; 4620 } 4621 4622 return NULL; 4623 } 4624 idregs_debug_stop(struct seq_file * s,void * v)4625 static void idregs_debug_stop(struct seq_file *s, void *v) 4626 { 4627 struct kvm *kvm = s->private; 4628 4629 if (IS_ERR(v)) 4630 return; 4631 4632 mutex_lock(&kvm->arch.config_lock); 4633 4634 kvm->arch.idreg_debugfs_iter = ~0; 4635 4636 mutex_unlock(&kvm->arch.config_lock); 4637 } 4638 idregs_debug_show(struct seq_file * s,void * v)4639 static int idregs_debug_show(struct seq_file *s, void *v) 4640 { 4641 const struct sys_reg_desc *desc; 4642 struct kvm *kvm = s->private; 4643 4644 desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter); 4645 4646 if (!desc->name) 4647 return 0; 4648 4649 seq_printf(s, "%20s:\t%016llx\n", 4650 desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc))); 4651 4652 return 0; 4653 } 4654 4655 static const struct seq_operations idregs_debug_sops = { 4656 .start = idregs_debug_start, 4657 .next = idregs_debug_next, 4658 .stop = idregs_debug_stop, 4659 .show = idregs_debug_show, 4660 }; 4661 4662 DEFINE_SEQ_ATTRIBUTE(idregs_debug); 4663 kvm_sys_regs_create_debugfs(struct kvm * kvm)4664 void kvm_sys_regs_create_debugfs(struct kvm *kvm) 4665 { 4666 kvm->arch.idreg_debugfs_iter = ~0; 4667 4668 debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm, 4669 &idregs_debug_fops); 4670 } 4671 reset_vm_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)4672 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg) 4673 { 4674 u32 id = reg_to_encoding(reg); 4675 struct kvm *kvm = vcpu->kvm; 4676 4677 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) 4678 return; 4679 4680 kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg)); 4681 } 4682 reset_vcpu_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)4683 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu, 4684 const struct sys_reg_desc *reg) 4685 { 4686 if (kvm_vcpu_initialized(vcpu)) 4687 return; 4688 4689 reg->reset(vcpu, reg); 4690 } 4691 4692 /** 4693 * kvm_reset_sys_regs - sets system registers to reset value 4694 * @vcpu: The VCPU pointer 4695 * 4696 * This function finds the right table above and sets the registers on the 4697 * virtual CPU struct to their architecturally defined reset values. 4698 */ kvm_reset_sys_regs(struct kvm_vcpu * vcpu)4699 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 4700 { 4701 struct kvm *kvm = vcpu->kvm; 4702 unsigned long i; 4703 4704 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { 4705 const struct sys_reg_desc *r = &sys_reg_descs[i]; 4706 4707 if (!r->reset) 4708 continue; 4709 4710 if (is_vm_ftr_id_reg(reg_to_encoding(r))) 4711 reset_vm_ftr_id_reg(vcpu, r); 4712 else if (is_vcpu_ftr_id_reg(reg_to_encoding(r))) 4713 reset_vcpu_ftr_id_reg(vcpu, r); 4714 else 4715 r->reset(vcpu, r); 4716 4717 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) 4718 (void)__vcpu_sys_reg(vcpu, r->reg); 4719 } 4720 4721 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); 4722 4723 if (kvm_vcpu_has_pmu(vcpu)) 4724 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 4725 } 4726 4727 /** 4728 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction 4729 * trap on a guest execution 4730 * @vcpu: The VCPU pointer 4731 */ kvm_handle_sys_reg(struct kvm_vcpu * vcpu)4732 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) 4733 { 4734 const struct sys_reg_desc *desc = NULL; 4735 struct sys_reg_params params; 4736 unsigned long esr = kvm_vcpu_get_esr(vcpu); 4737 int Rt = kvm_vcpu_sys_get_rt(vcpu); 4738 int sr_idx; 4739 4740 trace_kvm_handle_sys_reg(esr); 4741 4742 if (triage_sysreg_trap(vcpu, &sr_idx)) 4743 return 1; 4744 4745 params = esr_sys64_to_params(esr); 4746 params.regval = vcpu_get_reg(vcpu, Rt); 4747 4748 /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */ 4749 if (params.Op0 == 2 || params.Op0 == 3) 4750 desc = &sys_reg_descs[sr_idx]; 4751 else 4752 desc = &sys_insn_descs[sr_idx]; 4753 4754 perform_access(vcpu, ¶ms, desc); 4755 4756 /* Read from system register? */ 4757 if (!params.is_write && 4758 (params.Op0 == 2 || params.Op0 == 3)) 4759 vcpu_set_reg(vcpu, Rt, params.regval); 4760 4761 return 1; 4762 } 4763 4764 /****************************************************************************** 4765 * Userspace API 4766 *****************************************************************************/ 4767 index_to_params(u64 id,struct sys_reg_params * params)4768 static bool index_to_params(u64 id, struct sys_reg_params *params) 4769 { 4770 switch (id & KVM_REG_SIZE_MASK) { 4771 case KVM_REG_SIZE_U64: 4772 /* Any unused index bits means it's not valid. */ 4773 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 4774 | KVM_REG_ARM_COPROC_MASK 4775 | KVM_REG_ARM64_SYSREG_OP0_MASK 4776 | KVM_REG_ARM64_SYSREG_OP1_MASK 4777 | KVM_REG_ARM64_SYSREG_CRN_MASK 4778 | KVM_REG_ARM64_SYSREG_CRM_MASK 4779 | KVM_REG_ARM64_SYSREG_OP2_MASK)) 4780 return false; 4781 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 4782 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 4783 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 4784 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 4785 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 4786 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 4787 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 4788 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 4789 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 4790 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 4791 return true; 4792 default: 4793 return false; 4794 } 4795 } 4796 get_reg_by_id(u64 id,const struct sys_reg_desc table[],unsigned int num)4797 const struct sys_reg_desc *get_reg_by_id(u64 id, 4798 const struct sys_reg_desc table[], 4799 unsigned int num) 4800 { 4801 struct sys_reg_params params; 4802 4803 if (!index_to_params(id, ¶ms)) 4804 return NULL; 4805 4806 return find_reg(¶ms, table, num); 4807 } 4808 4809 /* Decode an index value, and find the sys_reg_desc entry. */ 4810 static const struct sys_reg_desc * id_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id,const struct sys_reg_desc table[],unsigned int num)4811 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id, 4812 const struct sys_reg_desc table[], unsigned int num) 4813 4814 { 4815 const struct sys_reg_desc *r; 4816 4817 /* We only do sys_reg for now. */ 4818 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 4819 return NULL; 4820 4821 r = get_reg_by_id(id, table, num); 4822 4823 /* Not saved in the sys_reg array and not otherwise accessible? */ 4824 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r))) 4825 r = NULL; 4826 4827 return r; 4828 } 4829 demux_c15_get(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)4830 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 4831 { 4832 u32 val; 4833 u32 __user *uval = uaddr; 4834 4835 /* Fail if we have unknown bits set. */ 4836 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 4837 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 4838 return -ENOENT; 4839 4840 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 4841 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 4842 if (KVM_REG_SIZE(id) != 4) 4843 return -ENOENT; 4844 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 4845 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 4846 if (val >= CSSELR_MAX) 4847 return -ENOENT; 4848 4849 return put_user(get_ccsidr(vcpu, val), uval); 4850 default: 4851 return -ENOENT; 4852 } 4853 } 4854 demux_c15_set(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)4855 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 4856 { 4857 u32 val, newval; 4858 u32 __user *uval = uaddr; 4859 4860 /* Fail if we have unknown bits set. */ 4861 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 4862 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 4863 return -ENOENT; 4864 4865 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 4866 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 4867 if (KVM_REG_SIZE(id) != 4) 4868 return -ENOENT; 4869 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 4870 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 4871 if (val >= CSSELR_MAX) 4872 return -ENOENT; 4873 4874 if (get_user(newval, uval)) 4875 return -EFAULT; 4876 4877 return set_ccsidr(vcpu, val, newval); 4878 default: 4879 return -ENOENT; 4880 } 4881 } 4882 kvm_sys_reg_get_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)4883 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, 4884 const struct sys_reg_desc table[], unsigned int num) 4885 { 4886 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 4887 const struct sys_reg_desc *r; 4888 u64 val; 4889 int ret; 4890 4891 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); 4892 if (!r || sysreg_hidden(vcpu, r)) 4893 return -ENOENT; 4894 4895 if (r->get_user) { 4896 ret = (r->get_user)(vcpu, r, &val); 4897 } else { 4898 val = __vcpu_sys_reg(vcpu, r->reg); 4899 ret = 0; 4900 } 4901 4902 if (!ret) 4903 ret = put_user(val, uaddr); 4904 4905 return ret; 4906 } 4907 kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)4908 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 4909 { 4910 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 4911 4912 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 4913 return demux_c15_get(vcpu, reg->id, uaddr); 4914 4915 return kvm_sys_reg_get_user(vcpu, reg, 4916 sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 4917 } 4918 kvm_sys_reg_set_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)4919 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, 4920 const struct sys_reg_desc table[], unsigned int num) 4921 { 4922 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 4923 const struct sys_reg_desc *r; 4924 u64 val; 4925 int ret; 4926 4927 if (get_user(val, uaddr)) 4928 return -EFAULT; 4929 4930 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); 4931 if (!r || sysreg_hidden(vcpu, r)) 4932 return -ENOENT; 4933 4934 if (sysreg_user_write_ignore(vcpu, r)) 4935 return 0; 4936 4937 if (r->set_user) { 4938 ret = (r->set_user)(vcpu, r, val); 4939 } else { 4940 __vcpu_sys_reg(vcpu, r->reg) = val; 4941 ret = 0; 4942 } 4943 4944 return ret; 4945 } 4946 kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)4947 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 4948 { 4949 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 4950 4951 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 4952 return demux_c15_set(vcpu, reg->id, uaddr); 4953 4954 return kvm_sys_reg_set_user(vcpu, reg, 4955 sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 4956 } 4957 num_demux_regs(void)4958 static unsigned int num_demux_regs(void) 4959 { 4960 return CSSELR_MAX; 4961 } 4962 write_demux_regids(u64 __user * uindices)4963 static int write_demux_regids(u64 __user *uindices) 4964 { 4965 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 4966 unsigned int i; 4967 4968 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 4969 for (i = 0; i < CSSELR_MAX; i++) { 4970 if (put_user(val | i, uindices)) 4971 return -EFAULT; 4972 uindices++; 4973 } 4974 return 0; 4975 } 4976 sys_reg_to_index(const struct sys_reg_desc * reg)4977 static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 4978 { 4979 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 4980 KVM_REG_ARM64_SYSREG | 4981 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 4982 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 4983 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 4984 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 4985 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 4986 } 4987 copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)4988 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 4989 { 4990 if (!*uind) 4991 return true; 4992 4993 if (put_user(sys_reg_to_index(reg), *uind)) 4994 return false; 4995 4996 (*uind)++; 4997 return true; 4998 } 4999 walk_one_sys_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)5000 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, 5001 const struct sys_reg_desc *rd, 5002 u64 __user **uind, 5003 unsigned int *total) 5004 { 5005 /* 5006 * Ignore registers we trap but don't save, 5007 * and for which no custom user accessor is provided. 5008 */ 5009 if (!(rd->reg || rd->get_user)) 5010 return 0; 5011 5012 if (sysreg_hidden(vcpu, rd)) 5013 return 0; 5014 5015 if (!copy_reg_to_user(rd, uind)) 5016 return -EFAULT; 5017 5018 (*total)++; 5019 return 0; 5020 } 5021 5022 /* Assumed ordered tables, see kvm_sys_reg_table_init. */ walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)5023 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 5024 { 5025 const struct sys_reg_desc *i2, *end2; 5026 unsigned int total = 0; 5027 int err; 5028 5029 i2 = sys_reg_descs; 5030 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 5031 5032 while (i2 != end2) { 5033 err = walk_one_sys_reg(vcpu, i2++, &uind, &total); 5034 if (err) 5035 return err; 5036 } 5037 return total; 5038 } 5039 kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)5040 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 5041 { 5042 return num_demux_regs() 5043 + walk_sys_regs(vcpu, (u64 __user *)NULL); 5044 } 5045 kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)5046 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 5047 { 5048 int err; 5049 5050 err = walk_sys_regs(vcpu, uindices); 5051 if (err < 0) 5052 return err; 5053 uindices += err; 5054 5055 return write_demux_regids(uindices); 5056 } 5057 5058 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \ 5059 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \ 5060 sys_reg_Op1(r), \ 5061 sys_reg_CRn(r), \ 5062 sys_reg_CRm(r), \ 5063 sys_reg_Op2(r)) 5064 kvm_vm_ioctl_get_reg_writable_masks(struct kvm * kvm,struct reg_mask_range * range)5065 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range) 5066 { 5067 const void *zero_page = page_to_virt(ZERO_PAGE(0)); 5068 u64 __user *masks = (u64 __user *)range->addr; 5069 5070 /* Only feature id range is supported, reserved[13] must be zero. */ 5071 if (range->range || 5072 memcmp(range->reserved, zero_page, sizeof(range->reserved))) 5073 return -EINVAL; 5074 5075 /* Wipe the whole thing first */ 5076 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64))) 5077 return -EFAULT; 5078 5079 for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { 5080 const struct sys_reg_desc *reg = &sys_reg_descs[i]; 5081 u32 encoding = reg_to_encoding(reg); 5082 u64 val; 5083 5084 if (!is_feature_id_reg(encoding) || !reg->set_user) 5085 continue; 5086 5087 if (!reg->val || 5088 (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) { 5089 continue; 5090 } 5091 val = reg->val; 5092 5093 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding)))) 5094 return -EFAULT; 5095 } 5096 5097 return 0; 5098 } 5099 vcpu_set_hcr(struct kvm_vcpu * vcpu)5100 static void vcpu_set_hcr(struct kvm_vcpu *vcpu) 5101 { 5102 struct kvm *kvm = vcpu->kvm; 5103 5104 if (has_vhe() || has_hvhe()) 5105 vcpu->arch.hcr_el2 |= HCR_E2H; 5106 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) { 5107 /* route synchronous external abort exceptions to EL2 */ 5108 vcpu->arch.hcr_el2 |= HCR_TEA; 5109 /* trap error record accesses */ 5110 vcpu->arch.hcr_el2 |= HCR_TERR; 5111 } 5112 5113 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 5114 vcpu->arch.hcr_el2 |= HCR_FWB; 5115 5116 if (cpus_have_final_cap(ARM64_HAS_EVT) && 5117 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) && 5118 kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0)) 5119 vcpu->arch.hcr_el2 |= HCR_TID4; 5120 else 5121 vcpu->arch.hcr_el2 |= HCR_TID2; 5122 5123 if (vcpu_el1_is_32bit(vcpu)) 5124 vcpu->arch.hcr_el2 &= ~HCR_RW; 5125 5126 if (kvm_has_mte(vcpu->kvm)) 5127 vcpu->arch.hcr_el2 |= HCR_ATA; 5128 5129 /* 5130 * In the absence of FGT, we cannot independently trap TLBI 5131 * Range instructions. This isn't great, but trapping all 5132 * TLBIs would be far worse. Live with it... 5133 */ 5134 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 5135 vcpu->arch.hcr_el2 |= HCR_TTLBOS; 5136 } 5137 kvm_calculate_traps(struct kvm_vcpu * vcpu)5138 void kvm_calculate_traps(struct kvm_vcpu *vcpu) 5139 { 5140 struct kvm *kvm = vcpu->kvm; 5141 5142 mutex_lock(&kvm->arch.config_lock); 5143 vcpu_set_hcr(vcpu); 5144 vcpu_set_ich_hcr(vcpu); 5145 vcpu_set_hcrx(vcpu); 5146 5147 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) 5148 goto out; 5149 5150 kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 | 5151 HFGxTR_EL2_nMAIR2_EL1 | 5152 HFGxTR_EL2_nS2POR_EL1 | 5153 HFGxTR_EL2_nACCDATA_EL1 | 5154 HFGxTR_EL2_nSMPRI_EL1_MASK | 5155 HFGxTR_EL2_nTPIDR2_EL0_MASK); 5156 5157 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 5158 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS| 5159 HFGITR_EL2_TLBIRVALE1OS | 5160 HFGITR_EL2_TLBIRVAAE1OS | 5161 HFGITR_EL2_TLBIRVAE1OS | 5162 HFGITR_EL2_TLBIVAALE1OS | 5163 HFGITR_EL2_TLBIVALE1OS | 5164 HFGITR_EL2_TLBIVAAE1OS | 5165 HFGITR_EL2_TLBIASIDE1OS | 5166 HFGITR_EL2_TLBIVAE1OS | 5167 HFGITR_EL2_TLBIVMALLE1OS); 5168 5169 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 5170 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 | 5171 HFGITR_EL2_TLBIRVALE1 | 5172 HFGITR_EL2_TLBIRVAAE1 | 5173 HFGITR_EL2_TLBIRVAE1 | 5174 HFGITR_EL2_TLBIRVAALE1IS| 5175 HFGITR_EL2_TLBIRVALE1IS | 5176 HFGITR_EL2_TLBIRVAAE1IS | 5177 HFGITR_EL2_TLBIRVAE1IS | 5178 HFGITR_EL2_TLBIRVAALE1OS| 5179 HFGITR_EL2_TLBIRVALE1OS | 5180 HFGITR_EL2_TLBIRVAAE1OS | 5181 HFGITR_EL2_TLBIRVAE1OS); 5182 5183 if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) 5184 kvm->arch.fgu[HFGITR_GROUP] |= HFGITR_EL2_ATS1E1A; 5185 5186 if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2)) 5187 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP | 5188 HFGITR_EL2_ATS1E1WP); 5189 5190 if (!kvm_has_s1pie(kvm)) 5191 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 | 5192 HFGxTR_EL2_nPIR_EL1); 5193 5194 if (!kvm_has_s1poe(kvm)) 5195 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 | 5196 HFGxTR_EL2_nPOR_EL0); 5197 5198 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) 5199 kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | 5200 HAFGRTR_EL2_RES1); 5201 5202 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) { 5203 kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA | 5204 HDFGRTR_EL2_nBRBCTL | 5205 HDFGRTR_EL2_nBRBIDR); 5206 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ | 5207 HFGITR_EL2_nBRBIALL); 5208 } 5209 5210 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); 5211 out: 5212 mutex_unlock(&kvm->arch.config_lock); 5213 } 5214 5215 /* 5216 * Perform last adjustments to the ID registers that are implied by the 5217 * configuration outside of the ID regs themselves, as well as any 5218 * initialisation that directly depend on these ID registers (such as 5219 * RES0/RES1 behaviours). This is not the place to configure traps though. 5220 * 5221 * Because this can be called once per CPU, changes must be idempotent. 5222 */ kvm_finalize_sys_regs(struct kvm_vcpu * vcpu)5223 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu) 5224 { 5225 struct kvm *kvm = vcpu->kvm; 5226 5227 guard(mutex)(&kvm->arch.config_lock); 5228 5229 if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && 5230 irqchip_in_kernel(kvm) && 5231 kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) { 5232 kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK; 5233 kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK; 5234 } 5235 5236 if (vcpu_has_nv(vcpu)) { 5237 int ret = kvm_init_nv_sysregs(vcpu); 5238 if (ret) 5239 return ret; 5240 } 5241 5242 return 0; 5243 } 5244 kvm_sys_reg_table_init(void)5245 int __init kvm_sys_reg_table_init(void) 5246 { 5247 bool valid = true; 5248 unsigned int i; 5249 int ret = 0; 5250 5251 /* Make sure tables are unique and in order. */ 5252 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false); 5253 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true); 5254 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true); 5255 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true); 5256 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true); 5257 valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false); 5258 5259 if (!valid) 5260 return -EINVAL; 5261 5262 init_imp_id_regs(); 5263 5264 ret = populate_nv_trap_config(); 5265 5266 for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++) 5267 ret = populate_sysreg_config(sys_reg_descs + i, i); 5268 5269 for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++) 5270 ret = populate_sysreg_config(sys_insn_descs + i, i); 5271 5272 return ret; 5273 } 5274