1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/maple_tree.h> 20 #include <linux/percpu.h> 21 #include <linux/psci.h> 22 #include <asm/arch_gicv3.h> 23 #include <asm/barrier.h> 24 #include <asm/cpufeature.h> 25 #include <asm/cputype.h> 26 #include <asm/daifflags.h> 27 #include <asm/fpsimd.h> 28 #include <asm/kvm.h> 29 #include <asm/kvm_asm.h> 30 #include <asm/vncr_mapping.h> 31 32 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 33 34 #define KVM_HALT_POLL_NS_DEFAULT 500000 35 36 #include <kvm/arm_vgic.h> 37 #include <kvm/arm_arch_timer.h> 38 #include <kvm/arm_pmu.h> 39 40 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 41 42 #define KVM_VCPU_MAX_FEATURES 7 43 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1) 44 45 #define KVM_REQ_SLEEP \ 46 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 47 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 48 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 49 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 50 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 51 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) 52 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) 53 #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) 54 #define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8) 55 56 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 57 KVM_DIRTY_LOG_INITIALLY_SET) 58 59 #define KVM_HAVE_MMU_RWLOCK 60 61 /* 62 * Mode of operation configurable with kvm-arm.mode early param. 63 * See Documentation/admin-guide/kernel-parameters.txt for more information. 64 */ 65 enum kvm_mode { 66 KVM_MODE_DEFAULT, 67 KVM_MODE_PROTECTED, 68 KVM_MODE_NV, 69 KVM_MODE_NONE, 70 }; 71 #ifdef CONFIG_KVM 72 enum kvm_mode kvm_get_mode(void); 73 #else 74 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; 75 #endif 76 77 extern unsigned int __ro_after_init kvm_sve_max_vl; 78 extern unsigned int __ro_after_init kvm_host_sve_max_vl; 79 int __init kvm_arm_init_sve(void); 80 81 u32 __attribute_const__ kvm_target_cpu(void); 82 void kvm_reset_vcpu(struct kvm_vcpu *vcpu); 83 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 84 85 struct kvm_hyp_memcache { 86 phys_addr_t head; 87 unsigned long nr_pages; 88 }; 89 90 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc, 91 phys_addr_t *p, 92 phys_addr_t (*to_pa)(void *virt)) 93 { 94 *p = mc->head; 95 mc->head = to_pa(p); 96 mc->nr_pages++; 97 } 98 99 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc, 100 void *(*to_va)(phys_addr_t phys)) 101 { 102 phys_addr_t *p = to_va(mc->head); 103 104 if (!mc->nr_pages) 105 return NULL; 106 107 mc->head = *p; 108 mc->nr_pages--; 109 110 return p; 111 } 112 113 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc, 114 unsigned long min_pages, 115 void *(*alloc_fn)(void *arg), 116 phys_addr_t (*to_pa)(void *virt), 117 void *arg) 118 { 119 while (mc->nr_pages < min_pages) { 120 phys_addr_t *p = alloc_fn(arg); 121 122 if (!p) 123 return -ENOMEM; 124 push_hyp_memcache(mc, p, to_pa); 125 } 126 127 return 0; 128 } 129 130 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc, 131 void (*free_fn)(void *virt, void *arg), 132 void *(*to_va)(phys_addr_t phys), 133 void *arg) 134 { 135 while (mc->nr_pages) 136 free_fn(pop_hyp_memcache(mc, to_va), arg); 137 } 138 139 void free_hyp_memcache(struct kvm_hyp_memcache *mc); 140 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages); 141 142 struct kvm_vmid { 143 atomic64_t id; 144 }; 145 146 struct kvm_s2_mmu { 147 struct kvm_vmid vmid; 148 149 /* 150 * stage2 entry level table 151 * 152 * Two kvm_s2_mmu structures in the same VM can point to the same 153 * pgd here. This happens when running a guest using a 154 * translation regime that isn't affected by its own stage-2 155 * translation, such as a non-VHE hypervisor running at vEL2, or 156 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 157 * canonical stage-2 page tables. 158 */ 159 phys_addr_t pgd_phys; 160 struct kvm_pgtable *pgt; 161 162 /* 163 * VTCR value used on the host. For a non-NV guest (or a NV 164 * guest that runs in a context where its own S2 doesn't 165 * apply), its T0SZ value reflects that of the IPA size. 166 * 167 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to 168 * the guest. 169 */ 170 u64 vtcr; 171 172 /* The last vcpu id that ran on each physical CPU */ 173 int __percpu *last_vcpu_ran; 174 175 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0 176 /* 177 * Memory cache used to split 178 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It 179 * is used to allocate stage2 page tables while splitting huge 180 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 181 * influences both the capacity of the split page cache, and 182 * how often KVM reschedules. Be wary of raising CHUNK_SIZE 183 * too high. 184 * 185 * Protected by kvm->slots_lock. 186 */ 187 struct kvm_mmu_memory_cache split_page_cache; 188 uint64_t split_page_chunk_size; 189 190 struct kvm_arch *arch; 191 192 /* 193 * For a shadow stage-2 MMU, the virtual vttbr used by the 194 * host to parse the guest S2. 195 * This either contains: 196 * - the virtual VTTBR programmed by the guest hypervisor with 197 * CnP cleared 198 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid 199 * 200 * We also cache the full VTCR which gets used for TLB invalidation, 201 * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted 202 * to be cached in a TLB" to the letter. 203 */ 204 u64 tlb_vttbr; 205 u64 tlb_vtcr; 206 207 /* 208 * true when this represents a nested context where virtual 209 * HCR_EL2.VM == 1 210 */ 211 bool nested_stage2_enabled; 212 213 /* 214 * true when this MMU needs to be unmapped before being used for a new 215 * purpose. 216 */ 217 bool pending_unmap; 218 219 /* 220 * 0: Nobody is currently using this, check vttbr for validity 221 * >0: Somebody is actively using this. 222 */ 223 atomic_t refcnt; 224 }; 225 226 struct kvm_arch_memory_slot { 227 }; 228 229 /** 230 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests 231 * 232 * @std_bmap: Bitmap of standard secure service calls 233 * @std_hyp_bmap: Bitmap of standard hypervisor service calls 234 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls 235 */ 236 struct kvm_smccc_features { 237 unsigned long std_bmap; 238 unsigned long std_hyp_bmap; 239 unsigned long vendor_hyp_bmap; 240 }; 241 242 typedef unsigned int pkvm_handle_t; 243 244 struct kvm_protected_vm { 245 pkvm_handle_t handle; 246 struct kvm_hyp_memcache teardown_mc; 247 bool enabled; 248 }; 249 250 struct kvm_mpidr_data { 251 u64 mpidr_mask; 252 DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx); 253 }; 254 255 static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) 256 { 257 unsigned long index = 0, mask = data->mpidr_mask; 258 unsigned long aff = mpidr & MPIDR_HWID_BITMASK; 259 260 bitmap_gather(&index, &aff, &mask, fls(mask)); 261 262 return index; 263 } 264 265 struct kvm_sysreg_masks; 266 267 enum fgt_group_id { 268 __NO_FGT_GROUP__, 269 HFGxTR_GROUP, 270 HDFGRTR_GROUP, 271 HDFGWTR_GROUP = HDFGRTR_GROUP, 272 HFGITR_GROUP, 273 HAFGRTR_GROUP, 274 275 /* Must be last */ 276 __NR_FGT_GROUP_IDS__ 277 }; 278 279 struct kvm_arch { 280 struct kvm_s2_mmu mmu; 281 282 /* 283 * Fine-Grained UNDEF, mimicking the FGT layout defined by the 284 * architecture. We track them globally, as we present the 285 * same feature-set to all vcpus. 286 * 287 * Index 0 is currently spare. 288 */ 289 u64 fgu[__NR_FGT_GROUP_IDS__]; 290 291 /* 292 * Stage 2 paging state for VMs with nested S2 using a virtual 293 * VMID. 294 */ 295 struct kvm_s2_mmu *nested_mmus; 296 size_t nested_mmus_size; 297 int nested_mmus_next; 298 299 /* Interrupt controller */ 300 struct vgic_dist vgic; 301 302 /* Timers */ 303 struct arch_timer_vm_data timer_data; 304 305 /* Mandated version of PSCI */ 306 u32 psci_version; 307 308 /* Protects VM-scoped configuration data */ 309 struct mutex config_lock; 310 311 /* 312 * If we encounter a data abort without valid instruction syndrome 313 * information, report this to user space. User space can (and 314 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 315 * supported. 316 */ 317 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 318 /* Memory Tagging Extension enabled for the guest */ 319 #define KVM_ARCH_FLAG_MTE_ENABLED 1 320 /* At least one vCPU has ran in the VM */ 321 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 322 /* The vCPU feature set for the VM is configured */ 323 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3 324 /* PSCI SYSTEM_SUSPEND enabled for the guest */ 325 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4 326 /* VM counter offset */ 327 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5 328 /* Timer PPIs made immutable */ 329 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6 330 /* Initial ID reg values loaded */ 331 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 332 /* Fine-Grained UNDEF initialised */ 333 #define KVM_ARCH_FLAG_FGU_INITIALIZED 8 334 unsigned long flags; 335 336 /* VM-wide vCPU feature set */ 337 DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES); 338 339 /* MPIDR to vcpu index mapping, optional */ 340 struct kvm_mpidr_data *mpidr_data; 341 342 /* 343 * VM-wide PMU filter, implemented as a bitmap and big enough for 344 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 345 */ 346 unsigned long *pmu_filter; 347 struct arm_pmu *arm_pmu; 348 349 cpumask_var_t supported_cpus; 350 351 /* PMCR_EL0.N value for the guest */ 352 u8 pmcr_n; 353 354 /* Iterator for idreg debugfs */ 355 u8 idreg_debugfs_iter; 356 357 /* Hypercall features firmware registers' descriptor */ 358 struct kvm_smccc_features smccc_feat; 359 struct maple_tree smccc_filter; 360 361 /* 362 * Emulated CPU ID registers per VM 363 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it 364 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8. 365 * 366 * These emulated idregs are VM-wide, but accessed from the context of a vCPU. 367 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock. 368 */ 369 #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id)) 370 #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1) 371 u64 id_regs[KVM_ARM_ID_REG_NUM]; 372 373 u64 ctr_el0; 374 375 /* Masks for VNCR-backed and general EL2 sysregs */ 376 struct kvm_sysreg_masks *sysreg_masks; 377 378 /* 379 * For an untrusted host VM, 'pkvm.handle' is used to lookup 380 * the associated pKVM instance in the hypervisor. 381 */ 382 struct kvm_protected_vm pkvm; 383 }; 384 385 struct kvm_vcpu_fault_info { 386 u64 esr_el2; /* Hyp Syndrom Register */ 387 u64 far_el2; /* Hyp Fault Address Register */ 388 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 389 u64 disr_el1; /* Deferred [SError] Status Register */ 390 }; 391 392 /* 393 * VNCR() just places the VNCR_capable registers in the enum after 394 * __VNCR_START__, and the value (after correction) to be an 8-byte offset 395 * from the VNCR base. As we don't require the enum to be otherwise ordered, 396 * we need the terrible hack below to ensure that we correctly size the 397 * sys_regs array, no matter what. 398 * 399 * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful 400 * treasure trove of bit hacks: 401 * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax 402 */ 403 #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y)))) 404 #define VNCR(r) \ 405 __before_##r, \ 406 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \ 407 __after_##r = __MAX__(__before_##r - 1, r) 408 409 #define MARKER(m) \ 410 m, __after_##m = m - 1 411 412 enum vcpu_sysreg { 413 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 414 MPIDR_EL1, /* MultiProcessor Affinity Register */ 415 CLIDR_EL1, /* Cache Level ID Register */ 416 CSSELR_EL1, /* Cache Size Selection Register */ 417 TPIDR_EL0, /* Thread ID, User R/W */ 418 TPIDRRO_EL0, /* Thread ID, User R/O */ 419 TPIDR_EL1, /* Thread ID, Privileged */ 420 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 421 PAR_EL1, /* Physical Address Register */ 422 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 423 OSLSR_EL1, /* OS Lock Status Register */ 424 DISR_EL1, /* Deferred Interrupt Status Register */ 425 426 /* Performance Monitors Registers */ 427 PMCR_EL0, /* Control Register */ 428 PMSELR_EL0, /* Event Counter Selection Register */ 429 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 430 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 431 PMCCNTR_EL0, /* Cycle Counter Register */ 432 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 433 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 434 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 435 PMCNTENSET_EL0, /* Count Enable Set Register */ 436 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 437 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 438 PMUSERENR_EL0, /* User Enable Register */ 439 440 /* Pointer Authentication Registers in a strict increasing order. */ 441 APIAKEYLO_EL1, 442 APIAKEYHI_EL1, 443 APIBKEYLO_EL1, 444 APIBKEYHI_EL1, 445 APDAKEYLO_EL1, 446 APDAKEYHI_EL1, 447 APDBKEYLO_EL1, 448 APDBKEYHI_EL1, 449 APGAKEYLO_EL1, 450 APGAKEYHI_EL1, 451 452 /* Memory Tagging Extension registers */ 453 RGSR_EL1, /* Random Allocation Tag Seed Register */ 454 GCR_EL1, /* Tag Control Register */ 455 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 456 457 POR_EL0, /* Permission Overlay Register 0 (EL0) */ 458 459 /* FP/SIMD/SVE */ 460 SVCR, 461 FPMR, 462 463 /* 32bit specific registers. */ 464 DACR32_EL2, /* Domain Access Control Register */ 465 IFSR32_EL2, /* Instruction Fault Status Register */ 466 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 467 DBGVCR32_EL2, /* Debug Vector Catch Register */ 468 469 /* EL2 registers */ 470 SCTLR_EL2, /* System Control Register (EL2) */ 471 ACTLR_EL2, /* Auxiliary Control Register (EL2) */ 472 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */ 473 HACR_EL2, /* Hypervisor Auxiliary Control Register */ 474 ZCR_EL2, /* SVE Control Register (EL2) */ 475 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */ 476 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */ 477 TCR_EL2, /* Translation Control Register (EL2) */ 478 PIRE0_EL2, /* Permission Indirection Register 0 (EL2) */ 479 PIR_EL2, /* Permission Indirection Register 1 (EL2) */ 480 POR_EL2, /* Permission Overlay Register 2 (EL2) */ 481 SPSR_EL2, /* EL2 saved program status register */ 482 ELR_EL2, /* EL2 exception link register */ 483 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */ 484 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */ 485 ESR_EL2, /* Exception Syndrome Register (EL2) */ 486 FAR_EL2, /* Fault Address Register (EL2) */ 487 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */ 488 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */ 489 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */ 490 VBAR_EL2, /* Vector Base Address Register (EL2) */ 491 RVBAR_EL2, /* Reset Vector Base Address Register */ 492 CONTEXTIDR_EL2, /* Context ID Register (EL2) */ 493 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ 494 SP_EL2, /* EL2 Stack Pointer */ 495 CNTHP_CTL_EL2, 496 CNTHP_CVAL_EL2, 497 CNTHV_CTL_EL2, 498 CNTHV_CVAL_EL2, 499 500 /* Anything from this can be RES0/RES1 sanitised */ 501 MARKER(__SANITISED_REG_START__), 502 TCR2_EL2, /* Extended Translation Control Register (EL2) */ 503 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */ 504 505 /* Any VNCR-capable reg goes after this point */ 506 MARKER(__VNCR_START__), 507 508 VNCR(SCTLR_EL1),/* System Control Register */ 509 VNCR(ACTLR_EL1),/* Auxiliary Control Register */ 510 VNCR(CPACR_EL1),/* Coprocessor Access Control */ 511 VNCR(ZCR_EL1), /* SVE Control */ 512 VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */ 513 VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */ 514 VNCR(TCR_EL1), /* Translation Control Register */ 515 VNCR(TCR2_EL1), /* Extended Translation Control Register */ 516 VNCR(ESR_EL1), /* Exception Syndrome Register */ 517 VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */ 518 VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */ 519 VNCR(FAR_EL1), /* Fault Address Register */ 520 VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */ 521 VNCR(VBAR_EL1), /* Vector Base Address Register */ 522 VNCR(CONTEXTIDR_EL1), /* Context ID Register */ 523 VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */ 524 VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */ 525 VNCR(ELR_EL1), 526 VNCR(SP_EL1), 527 VNCR(SPSR_EL1), 528 VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */ 529 VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */ 530 VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */ 531 VNCR(HCR_EL2), /* Hypervisor Configuration Register */ 532 VNCR(HSTR_EL2), /* Hypervisor System Trap Register */ 533 VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */ 534 VNCR(VTCR_EL2), /* Virtualization Translation Control Register */ 535 VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */ 536 VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */ 537 538 /* Permission Indirection Extension registers */ 539 VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */ 540 VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */ 541 542 VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */ 543 544 VNCR(HFGRTR_EL2), 545 VNCR(HFGWTR_EL2), 546 VNCR(HFGITR_EL2), 547 VNCR(HDFGRTR_EL2), 548 VNCR(HDFGWTR_EL2), 549 VNCR(HAFGRTR_EL2), 550 551 VNCR(CNTVOFF_EL2), 552 VNCR(CNTV_CVAL_EL0), 553 VNCR(CNTV_CTL_EL0), 554 VNCR(CNTP_CVAL_EL0), 555 VNCR(CNTP_CTL_EL0), 556 557 VNCR(ICH_HCR_EL2), 558 559 NR_SYS_REGS /* Nothing after this line! */ 560 }; 561 562 struct kvm_sysreg_masks { 563 struct { 564 u64 res0; 565 u64 res1; 566 } mask[NR_SYS_REGS - __SANITISED_REG_START__]; 567 }; 568 569 struct kvm_cpu_context { 570 struct user_pt_regs regs; /* sp = sp_el0 */ 571 572 u64 spsr_abt; 573 u64 spsr_und; 574 u64 spsr_irq; 575 u64 spsr_fiq; 576 577 struct user_fpsimd_state fp_regs; 578 579 u64 sys_regs[NR_SYS_REGS]; 580 581 struct kvm_vcpu *__hyp_running_vcpu; 582 583 /* This pointer has to be 4kB aligned. */ 584 u64 *vncr_array; 585 }; 586 587 struct cpu_sve_state { 588 __u64 zcr_el1; 589 590 /* 591 * Ordering is important since __sve_save_state/__sve_restore_state 592 * relies on it. 593 */ 594 __u32 fpsr; 595 __u32 fpcr; 596 597 /* Must be SVE_VQ_BYTES (128 bit) aligned. */ 598 __u8 sve_regs[]; 599 }; 600 601 /* 602 * This structure is instantiated on a per-CPU basis, and contains 603 * data that is: 604 * 605 * - tied to a single physical CPU, and 606 * - either have a lifetime that does not extend past vcpu_put() 607 * - or is an invariant for the lifetime of the system 608 * 609 * Use host_data_ptr(field) as a way to access a pointer to such a 610 * field. 611 */ 612 struct kvm_host_data { 613 #define KVM_HOST_DATA_FLAG_HAS_SPE 0 614 #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 615 #define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2 616 #define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3 617 unsigned long flags; 618 619 struct kvm_cpu_context host_ctxt; 620 621 /* 622 * All pointers in this union are hyp VA. 623 * sve_state is only used in pKVM and if system_supports_sve(). 624 */ 625 union { 626 struct user_fpsimd_state *fpsimd_state; 627 struct cpu_sve_state *sve_state; 628 }; 629 630 union { 631 /* HYP VA pointer to the host storage for FPMR */ 632 u64 *fpmr_ptr; 633 /* 634 * Used by pKVM only, as it needs to provide storage 635 * for the host 636 */ 637 u64 fpmr; 638 }; 639 640 /* Ownership of the FP regs */ 641 enum { 642 FP_STATE_FREE, 643 FP_STATE_HOST_OWNED, 644 FP_STATE_GUEST_OWNED, 645 } fp_owner; 646 647 /* 648 * host_debug_state contains the host registers which are 649 * saved and restored during world switches. 650 */ 651 struct { 652 /* {Break,watch}point registers */ 653 struct kvm_guest_debug_arch regs; 654 /* Statistical profiling extension */ 655 u64 pmscr_el1; 656 /* Self-hosted trace */ 657 u64 trfcr_el1; 658 /* Values of trap registers for the host before guest entry. */ 659 u64 mdcr_el2; 660 } host_debug_state; 661 662 /* Number of programmable event counters (PMCR_EL0.N) for this CPU */ 663 unsigned int nr_event_counters; 664 665 /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */ 666 unsigned int debug_brps; 667 unsigned int debug_wrps; 668 }; 669 670 struct kvm_host_psci_config { 671 /* PSCI version used by host. */ 672 u32 version; 673 u32 smccc_version; 674 675 /* Function IDs used by host if version is v0.1. */ 676 struct psci_0_1_function_ids function_ids_0_1; 677 678 bool psci_0_1_cpu_suspend_implemented; 679 bool psci_0_1_cpu_on_implemented; 680 bool psci_0_1_cpu_off_implemented; 681 bool psci_0_1_migrate_implemented; 682 }; 683 684 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 685 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 686 687 extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 688 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 689 690 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 691 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 692 693 struct vcpu_reset_state { 694 unsigned long pc; 695 unsigned long r0; 696 bool be; 697 bool reset; 698 }; 699 700 struct kvm_vcpu_arch { 701 struct kvm_cpu_context ctxt; 702 703 /* 704 * Guest floating point state 705 * 706 * The architecture has two main floating point extensions, 707 * the original FPSIMD and SVE. These have overlapping 708 * register views, with the FPSIMD V registers occupying the 709 * low 128 bits of the SVE Z registers. When the core 710 * floating point code saves the register state of a task it 711 * records which view it saved in fp_type. 712 */ 713 void *sve_state; 714 enum fp_type fp_type; 715 unsigned int sve_max_vl; 716 717 /* Stage 2 paging state used by the hardware on next switch */ 718 struct kvm_s2_mmu *hw_mmu; 719 720 /* Values of trap registers for the guest. */ 721 u64 hcr_el2; 722 u64 hcrx_el2; 723 u64 mdcr_el2; 724 u64 cptr_el2; 725 726 /* Exception Information */ 727 struct kvm_vcpu_fault_info fault; 728 729 /* Configuration flags, set once and for all before the vcpu can run */ 730 u8 cflags; 731 732 /* Input flags to the hypervisor code, potentially cleared after use */ 733 u8 iflags; 734 735 /* State flags for kernel bookkeeping, unused by the hypervisor code */ 736 u8 sflags; 737 738 /* 739 * Don't run the guest (internal implementation need). 740 * 741 * Contrary to the flags above, this is set/cleared outside of 742 * a vcpu context, and thus cannot be mixed with the flags 743 * themselves (or the flag accesses need to be made atomic). 744 */ 745 bool pause; 746 747 /* 748 * We maintain more than a single set of debug registers to support 749 * debugging the guest from the host and to maintain separate host and 750 * guest state during world switches. vcpu_debug_state are the debug 751 * registers of the vcpu as the guest sees them. 752 * 753 * external_debug_state contains the debug values we want to debug the 754 * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl. 755 */ 756 struct kvm_guest_debug_arch vcpu_debug_state; 757 struct kvm_guest_debug_arch external_debug_state; 758 u64 external_mdscr_el1; 759 760 enum { 761 VCPU_DEBUG_FREE, 762 VCPU_DEBUG_HOST_OWNED, 763 VCPU_DEBUG_GUEST_OWNED, 764 } debug_owner; 765 766 /* VGIC state */ 767 struct vgic_cpu vgic_cpu; 768 struct arch_timer_cpu timer_cpu; 769 struct kvm_pmu pmu; 770 771 /* vcpu power state */ 772 struct kvm_mp_state mp_state; 773 spinlock_t mp_state_lock; 774 775 /* Cache some mmu pages needed inside spinlock regions */ 776 struct kvm_mmu_memory_cache mmu_page_cache; 777 778 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 779 u64 vsesr_el2; 780 781 /* Additional reset state */ 782 struct vcpu_reset_state reset_state; 783 784 /* Guest PV state */ 785 struct { 786 u64 last_steal; 787 gpa_t base; 788 } steal; 789 790 /* Per-vcpu CCSIDR override or NULL */ 791 u32 *ccsidr; 792 }; 793 794 /* 795 * Each 'flag' is composed of a comma-separated triplet: 796 * 797 * - the flag-set it belongs to in the vcpu->arch structure 798 * - the value for that flag 799 * - the mask for that flag 800 * 801 * __vcpu_single_flag() builds such a triplet for a single-bit flag. 802 * unpack_vcpu_flag() extract the flag value from the triplet for 803 * direct use outside of the flag accessors. 804 */ 805 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f) 806 807 #define __unpack_flag(_set, _f, _m) _f 808 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__) 809 810 #define __build_check_flag(v, flagset, f, m) \ 811 do { \ 812 typeof(v->arch.flagset) *_fset; \ 813 \ 814 /* Check that the flags fit in the mask */ \ 815 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \ 816 /* Check that the flags fit in the type */ \ 817 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \ 818 } while (0) 819 820 #define __vcpu_get_flag(v, flagset, f, m) \ 821 ({ \ 822 __build_check_flag(v, flagset, f, m); \ 823 \ 824 READ_ONCE(v->arch.flagset) & (m); \ 825 }) 826 827 /* 828 * Note that the set/clear accessors must be preempt-safe in order to 829 * avoid nesting them with load/put which also manipulate flags... 830 */ 831 #ifdef __KVM_NVHE_HYPERVISOR__ 832 /* the nVHE hypervisor is always non-preemptible */ 833 #define __vcpu_flags_preempt_disable() 834 #define __vcpu_flags_preempt_enable() 835 #else 836 #define __vcpu_flags_preempt_disable() preempt_disable() 837 #define __vcpu_flags_preempt_enable() preempt_enable() 838 #endif 839 840 #define __vcpu_set_flag(v, flagset, f, m) \ 841 do { \ 842 typeof(v->arch.flagset) *fset; \ 843 \ 844 __build_check_flag(v, flagset, f, m); \ 845 \ 846 fset = &v->arch.flagset; \ 847 __vcpu_flags_preempt_disable(); \ 848 if (HWEIGHT(m) > 1) \ 849 *fset &= ~(m); \ 850 *fset |= (f); \ 851 __vcpu_flags_preempt_enable(); \ 852 } while (0) 853 854 #define __vcpu_clear_flag(v, flagset, f, m) \ 855 do { \ 856 typeof(v->arch.flagset) *fset; \ 857 \ 858 __build_check_flag(v, flagset, f, m); \ 859 \ 860 fset = &v->arch.flagset; \ 861 __vcpu_flags_preempt_disable(); \ 862 *fset &= ~(m); \ 863 __vcpu_flags_preempt_enable(); \ 864 } while (0) 865 866 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__) 867 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) 868 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) 869 870 /* SVE exposed to guest */ 871 #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0)) 872 /* SVE config completed */ 873 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) 874 /* PTRAUTH exposed to guest */ 875 #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) 876 /* KVM_ARM_VCPU_INIT completed */ 877 #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3)) 878 879 /* Exception pending */ 880 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) 881 /* 882 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't 883 * be set together with an exception... 884 */ 885 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1)) 886 /* Target EL/MODE (not a single flag, but let's abuse the macro) */ 887 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1)) 888 889 /* Helpers to encode exceptions with minimum fuss */ 890 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK) 891 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL) 892 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL 893 894 /* 895 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following 896 * values: 897 * 898 * For AArch32 EL1: 899 */ 900 #define EXCEPT_AA32_UND __vcpu_except_flags(0) 901 #define EXCEPT_AA32_IABT __vcpu_except_flags(1) 902 #define EXCEPT_AA32_DABT __vcpu_except_flags(2) 903 /* For AArch64: */ 904 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0) 905 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1) 906 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2) 907 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3) 908 /* For AArch64 with NV: */ 909 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4) 910 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5) 911 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) 912 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) 913 914 /* Physical CPU not in supported_cpus */ 915 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0)) 916 /* WFIT instruction trapped */ 917 #define IN_WFIT __vcpu_single_flag(sflags, BIT(1)) 918 /* vcpu system registers loaded on physical CPU */ 919 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2)) 920 /* Software step state is Active-pending for external debug */ 921 #define HOST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3)) 922 /* Software step state is Active pending for guest debug */ 923 #define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4)) 924 /* PMUSERENR for the guest EL0 is on physical CPU */ 925 #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5)) 926 /* WFI instruction trapped */ 927 #define IN_WFI __vcpu_single_flag(sflags, BIT(6)) 928 929 930 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 931 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ 932 sve_ffr_offset((vcpu)->arch.sve_max_vl)) 933 934 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) 935 936 #define vcpu_sve_zcr_elx(vcpu) \ 937 (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1) 938 939 #define vcpu_sve_state_size(vcpu) ({ \ 940 size_t __size_ret; \ 941 unsigned int __vcpu_vq; \ 942 \ 943 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 944 __size_ret = 0; \ 945 } else { \ 946 __vcpu_vq = vcpu_sve_max_vq(vcpu); \ 947 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 948 } \ 949 \ 950 __size_ret; \ 951 }) 952 953 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 954 KVM_GUESTDBG_USE_SW_BP | \ 955 KVM_GUESTDBG_USE_HW | \ 956 KVM_GUESTDBG_SINGLESTEP) 957 958 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 959 vcpu_get_flag(vcpu, GUEST_HAS_SVE)) 960 961 #ifdef CONFIG_ARM64_PTR_AUTH 962 #define vcpu_has_ptrauth(vcpu) \ 963 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 964 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 965 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH)) 966 #else 967 #define vcpu_has_ptrauth(vcpu) false 968 #endif 969 970 #define vcpu_on_unsupported_cpu(vcpu) \ 971 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU) 972 973 #define vcpu_set_on_unsupported_cpu(vcpu) \ 974 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU) 975 976 #define vcpu_clear_on_unsupported_cpu(vcpu) \ 977 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU) 978 979 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 980 981 /* 982 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 983 * memory backed version of a register, and not the one most recently 984 * accessed by a running VCPU. For example, for userspace access or 985 * for system registers that are never context switched, but only 986 * emulated. 987 * 988 * Don't bother with VNCR-based accesses in the nVHE code, it has no 989 * business dealing with NV. 990 */ 991 static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r) 992 { 993 #if !defined (__KVM_NVHE_HYPERVISOR__) 994 if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && 995 r >= __VNCR_START__ && ctxt->vncr_array)) 996 return &ctxt->vncr_array[r - __VNCR_START__]; 997 #endif 998 return (u64 *)&ctxt->sys_regs[r]; 999 } 1000 1001 #define __ctxt_sys_reg(c,r) \ 1002 ({ \ 1003 BUILD_BUG_ON(__builtin_constant_p(r) && \ 1004 (r) >= NR_SYS_REGS); \ 1005 ___ctxt_sys_reg(c, r); \ 1006 }) 1007 1008 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 1009 1010 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64); 1011 #define __vcpu_sys_reg(v,r) \ 1012 (*({ \ 1013 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1014 u64 *__r = __ctxt_sys_reg(ctxt, (r)); \ 1015 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1016 *__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\ 1017 __r; \ 1018 })) 1019 1020 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 1021 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 1022 1023 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) 1024 { 1025 /* 1026 * *** VHE ONLY *** 1027 * 1028 * System registers listed in the switch are not saved on every 1029 * exit from the guest but are only saved on vcpu_put. 1030 * 1031 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 1032 * should never be listed below, because the guest cannot modify its 1033 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 1034 * thread when emulating cross-VCPU communication. 1035 */ 1036 if (!has_vhe()) 1037 return false; 1038 1039 switch (reg) { 1040 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; 1041 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; 1042 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; 1043 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; 1044 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; 1045 case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break; 1046 case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break; 1047 case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break; 1048 case POR_EL1: *val = read_sysreg_s(SYS_POR_EL12); break; 1049 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; 1050 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; 1051 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; 1052 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; 1053 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; 1054 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; 1055 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 1056 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; 1057 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 1058 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; 1059 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; 1060 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 1061 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; 1062 case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break; 1063 case PAR_EL1: *val = read_sysreg_par(); break; 1064 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; 1065 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; 1066 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 1067 case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break; 1068 default: return false; 1069 } 1070 1071 return true; 1072 } 1073 1074 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) 1075 { 1076 /* 1077 * *** VHE ONLY *** 1078 * 1079 * System registers listed in the switch are not restored on every 1080 * entry to the guest but are only restored on vcpu_load. 1081 * 1082 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 1083 * should never be listed below, because the MPIDR should only be set 1084 * once, before running the VCPU, and never changed later. 1085 */ 1086 if (!has_vhe()) 1087 return false; 1088 1089 switch (reg) { 1090 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 1091 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 1092 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 1093 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 1094 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 1095 case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break; 1096 case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break; 1097 case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break; 1098 case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break; 1099 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 1100 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 1101 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 1102 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 1103 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 1104 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 1105 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 1106 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 1107 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 1108 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 1109 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 1110 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 1111 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 1112 case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break; 1113 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 1114 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 1115 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 1116 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 1117 case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break; 1118 default: return false; 1119 } 1120 1121 return true; 1122 } 1123 1124 struct kvm_vm_stat { 1125 struct kvm_vm_stat_generic generic; 1126 }; 1127 1128 struct kvm_vcpu_stat { 1129 struct kvm_vcpu_stat_generic generic; 1130 u64 hvc_exit_stat; 1131 u64 wfe_exit_stat; 1132 u64 wfi_exit_stat; 1133 u64 mmio_exit_user; 1134 u64 mmio_exit_kernel; 1135 u64 signal_exits; 1136 u64 exits; 1137 }; 1138 1139 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 1140 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 1141 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 1142 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 1143 1144 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 1145 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 1146 1147 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 1148 struct kvm_vcpu_events *events); 1149 1150 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 1151 struct kvm_vcpu_events *events); 1152 1153 void kvm_arm_halt_guest(struct kvm *kvm); 1154 void kvm_arm_resume_guest(struct kvm *kvm); 1155 1156 #define vcpu_has_run_once(vcpu) (!!READ_ONCE((vcpu)->pid)) 1157 1158 #ifndef __KVM_NVHE_HYPERVISOR__ 1159 #define kvm_call_hyp_nvhe(f, ...) \ 1160 ({ \ 1161 struct arm_smccc_res res; \ 1162 \ 1163 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 1164 ##__VA_ARGS__, &res); \ 1165 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 1166 \ 1167 res.a1; \ 1168 }) 1169 1170 /* 1171 * The couple of isb() below are there to guarantee the same behaviour 1172 * on VHE as on !VHE, where the eret to EL1 acts as a context 1173 * synchronization event. 1174 */ 1175 #define kvm_call_hyp(f, ...) \ 1176 do { \ 1177 if (has_vhe()) { \ 1178 f(__VA_ARGS__); \ 1179 isb(); \ 1180 } else { \ 1181 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 1182 } \ 1183 } while(0) 1184 1185 #define kvm_call_hyp_ret(f, ...) \ 1186 ({ \ 1187 typeof(f(__VA_ARGS__)) ret; \ 1188 \ 1189 if (has_vhe()) { \ 1190 ret = f(__VA_ARGS__); \ 1191 isb(); \ 1192 } else { \ 1193 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 1194 } \ 1195 \ 1196 ret; \ 1197 }) 1198 #else /* __KVM_NVHE_HYPERVISOR__ */ 1199 #define kvm_call_hyp(f, ...) f(__VA_ARGS__) 1200 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) 1201 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) 1202 #endif /* __KVM_NVHE_HYPERVISOR__ */ 1203 1204 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 1205 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 1206 1207 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 1208 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 1209 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 1210 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 1211 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 1212 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 1213 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); 1214 1215 void kvm_sys_regs_create_debugfs(struct kvm *kvm); 1216 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 1217 1218 int __init kvm_sys_reg_table_init(void); 1219 struct sys_reg_desc; 1220 int __init populate_sysreg_config(const struct sys_reg_desc *sr, 1221 unsigned int idx); 1222 int __init populate_nv_trap_config(void); 1223 1224 bool lock_all_vcpus(struct kvm *kvm); 1225 void unlock_all_vcpus(struct kvm *kvm); 1226 1227 void kvm_calculate_traps(struct kvm_vcpu *vcpu); 1228 1229 /* MMIO helpers */ 1230 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 1231 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 1232 1233 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 1234 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 1235 1236 /* 1237 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, 1238 * arrived in guest context. For arm64, any event that arrives while a vCPU is 1239 * loaded is considered to be "in guest". 1240 */ 1241 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) 1242 { 1243 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; 1244 } 1245 1246 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 1247 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 1248 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 1249 1250 bool kvm_arm_pvtime_supported(void); 1251 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 1252 struct kvm_device_attr *attr); 1253 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 1254 struct kvm_device_attr *attr); 1255 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 1256 struct kvm_device_attr *attr); 1257 1258 extern unsigned int __ro_after_init kvm_arm_vmid_bits; 1259 int __init kvm_arm_vmid_alloc_init(void); 1260 void __init kvm_arm_vmid_alloc_free(void); 1261 bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); 1262 void kvm_arm_vmid_clear_active(void); 1263 1264 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 1265 { 1266 vcpu_arch->steal.base = INVALID_GPA; 1267 } 1268 1269 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 1270 { 1271 return (vcpu_arch->steal.base != INVALID_GPA); 1272 } 1273 1274 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 1275 1276 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 1277 1278 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 1279 1280 /* 1281 * How we access per-CPU host data depends on the where we access it from, 1282 * and the mode we're in: 1283 * 1284 * - VHE and nVHE hypervisor bits use their locally defined instance 1285 * 1286 * - the rest of the kernel use either the VHE or nVHE one, depending on 1287 * the mode we're running in. 1288 * 1289 * Unless we're in protected mode, fully deprivileged, and the nVHE 1290 * per-CPU stuff is exclusively accessible to the protected EL2 code. 1291 * In this case, the EL1 code uses the *VHE* data as its private state 1292 * (which makes sense in a way as there shouldn't be any shared state 1293 * between the host and the hypervisor). 1294 * 1295 * Yes, this is all totally trivial. Shoot me now. 1296 */ 1297 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) 1298 #define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f) 1299 #else 1300 #define host_data_ptr(f) \ 1301 (static_branch_unlikely(&kvm_protected_mode_initialized) ? \ 1302 &this_cpu_ptr(&kvm_host_data)->f : \ 1303 &this_cpu_ptr_hyp_sym(kvm_host_data)->f) 1304 #endif 1305 1306 #define host_data_test_flag(flag) \ 1307 (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))) 1308 #define host_data_set_flag(flag) \ 1309 set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) 1310 #define host_data_clear_flag(flag) \ 1311 clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) 1312 1313 /* Check whether the FP regs are owned by the guest */ 1314 static inline bool guest_owns_fp_regs(void) 1315 { 1316 return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED; 1317 } 1318 1319 /* Check whether the FP regs are owned by the host */ 1320 static inline bool host_owns_fp_regs(void) 1321 { 1322 return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED; 1323 } 1324 1325 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 1326 { 1327 /* The host's MPIDR is immutable, so let's set it up at boot time */ 1328 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 1329 } 1330 1331 static inline bool kvm_system_needs_idmapped_vectors(void) 1332 { 1333 return cpus_have_final_cap(ARM64_SPECTRE_V3A); 1334 } 1335 1336 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 1337 1338 void kvm_init_host_debug_data(void); 1339 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu); 1340 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu); 1341 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu); 1342 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val); 1343 1344 #define kvm_vcpu_os_lock_enabled(vcpu) \ 1345 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK)) 1346 1347 #define kvm_debug_regs_in_use(vcpu) \ 1348 ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE) 1349 #define kvm_host_owns_debug_regs(vcpu) \ 1350 ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED) 1351 #define kvm_guest_owns_debug_regs(vcpu) \ 1352 ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED) 1353 1354 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 1355 struct kvm_device_attr *attr); 1356 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 1357 struct kvm_device_attr *attr); 1358 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 1359 struct kvm_device_attr *attr); 1360 1361 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, 1362 struct kvm_arm_copy_mte_tags *copy_tags); 1363 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, 1364 struct kvm_arm_counter_offset *offset); 1365 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, 1366 struct reg_mask_range *range); 1367 1368 /* Guest/host FPSIMD coordination helpers */ 1369 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 1370 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 1371 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); 1372 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 1373 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 1374 1375 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 1376 { 1377 return (!has_vhe() && attr->exclude_host); 1378 } 1379 1380 #ifdef CONFIG_KVM 1381 void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); 1382 void kvm_clr_pmu_events(u64 clr); 1383 bool kvm_set_pmuserenr(u64 val); 1384 #else 1385 static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} 1386 static inline void kvm_clr_pmu_events(u64 clr) {} 1387 static inline bool kvm_set_pmuserenr(u64 val) 1388 { 1389 return false; 1390 } 1391 #endif 1392 1393 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); 1394 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu); 1395 1396 int __init kvm_set_ipa_limit(void); 1397 u32 kvm_get_pa_bits(struct kvm *kvm); 1398 1399 #define __KVM_HAVE_ARCH_VM_ALLOC 1400 struct kvm *kvm_arch_alloc_vm(void); 1401 1402 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS 1403 1404 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE 1405 1406 #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled) 1407 1408 #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm) 1409 1410 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 1411 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 1412 1413 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED) 1414 1415 #define kvm_has_mte(kvm) \ 1416 (system_supports_mte() && \ 1417 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) 1418 1419 #define kvm_supports_32bit_el0() \ 1420 (system_supports_32bit_el0() && \ 1421 !static_branch_unlikely(&arm64_mismatched_32bit_el0)) 1422 1423 #define kvm_vm_has_ran_once(kvm) \ 1424 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags)) 1425 1426 static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature) 1427 { 1428 return test_bit(feature, ka->vcpu_features); 1429 } 1430 1431 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) 1432 1433 #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) 1434 1435 int kvm_trng_call(struct kvm_vcpu *vcpu); 1436 #ifdef CONFIG_KVM 1437 extern phys_addr_t hyp_mem_base; 1438 extern phys_addr_t hyp_mem_size; 1439 void __init kvm_hyp_reserve(void); 1440 #else 1441 static inline void kvm_hyp_reserve(void) { } 1442 #endif 1443 1444 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); 1445 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); 1446 1447 static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg) 1448 { 1449 switch (reg) { 1450 case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7): 1451 return &ka->id_regs[IDREG_IDX(reg)]; 1452 case SYS_CTR_EL0: 1453 return &ka->ctr_el0; 1454 default: 1455 WARN_ON_ONCE(1); 1456 return NULL; 1457 } 1458 } 1459 1460 #define kvm_read_vm_id_reg(kvm, reg) \ 1461 ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; }) 1462 1463 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); 1464 1465 #define __expand_field_sign_unsigned(id, fld, val) \ 1466 ((u64)SYS_FIELD_VALUE(id, fld, val)) 1467 1468 #define __expand_field_sign_signed(id, fld, val) \ 1469 ({ \ 1470 u64 __val = SYS_FIELD_VALUE(id, fld, val); \ 1471 sign_extend64(__val, id##_##fld##_WIDTH - 1); \ 1472 }) 1473 1474 #define get_idreg_field_unsigned(kvm, id, fld) \ 1475 ({ \ 1476 u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \ 1477 FIELD_GET(id##_##fld##_MASK, __val); \ 1478 }) 1479 1480 #define get_idreg_field_signed(kvm, id, fld) \ 1481 ({ \ 1482 u64 __val = get_idreg_field_unsigned(kvm, id, fld); \ 1483 sign_extend64(__val, id##_##fld##_WIDTH - 1); \ 1484 }) 1485 1486 #define get_idreg_field_enum(kvm, id, fld) \ 1487 get_idreg_field_unsigned(kvm, id, fld) 1488 1489 #define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \ 1490 (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit)) 1491 1492 #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \ 1493 (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit)) 1494 1495 #define kvm_cmp_feat(kvm, id, fld, op, limit) \ 1496 (id##_##fld##_SIGNED ? \ 1497 kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \ 1498 kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)) 1499 1500 #define kvm_has_feat(kvm, id, fld, limit) \ 1501 kvm_cmp_feat(kvm, id, fld, >=, limit) 1502 1503 #define kvm_has_feat_enum(kvm, id, fld, val) \ 1504 kvm_cmp_feat_unsigned(kvm, id, fld, ==, val) 1505 1506 #define kvm_has_feat_range(kvm, id, fld, min, max) \ 1507 (kvm_cmp_feat(kvm, id, fld, >=, min) && \ 1508 kvm_cmp_feat(kvm, id, fld, <=, max)) 1509 1510 /* Check for a given level of PAuth support */ 1511 #define kvm_has_pauth(k, l) \ 1512 ({ \ 1513 bool pa, pi, pa3; \ 1514 \ 1515 pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \ 1516 pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \ 1517 pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \ 1518 pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \ 1519 pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \ 1520 pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \ 1521 \ 1522 (pa + pi + pa3) == 1; \ 1523 }) 1524 1525 #define kvm_has_fpmr(k) \ 1526 (system_supports_fpmr() && \ 1527 kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP)) 1528 1529 #define kvm_has_tcr2(k) \ 1530 (kvm_has_feat((k), ID_AA64MMFR3_EL1, TCRX, IMP)) 1531 1532 #define kvm_has_s1pie(k) \ 1533 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP)) 1534 1535 #define kvm_has_s1poe(k) \ 1536 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) 1537 1538 #endif /* __ARM64_KVM_HOST_H__ */ 1539