1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/maple_tree.h> 20 #include <linux/percpu.h> 21 #include <linux/psci.h> 22 #include <asm/arch_gicv3.h> 23 #include <asm/barrier.h> 24 #include <asm/cpufeature.h> 25 #include <asm/cputype.h> 26 #include <asm/daifflags.h> 27 #include <asm/fpsimd.h> 28 #include <asm/kvm.h> 29 #include <asm/kvm_asm.h> 30 #include <asm/vncr_mapping.h> 31 32 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 33 34 #define KVM_HALT_POLL_NS_DEFAULT 500000 35 36 #include <kvm/arm_vgic.h> 37 #include <kvm/arm_arch_timer.h> 38 #include <kvm/arm_pmu.h> 39 40 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 41 42 #define KVM_VCPU_MAX_FEATURES 7 43 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1) 44 45 #define KVM_REQ_SLEEP \ 46 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 47 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 48 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 49 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 50 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 51 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) 52 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) 53 #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) 54 #define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8) 55 56 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 57 KVM_DIRTY_LOG_INITIALLY_SET) 58 59 #define KVM_HAVE_MMU_RWLOCK 60 61 /* 62 * Mode of operation configurable with kvm-arm.mode early param. 63 * See Documentation/admin-guide/kernel-parameters.txt for more information. 64 */ 65 enum kvm_mode { 66 KVM_MODE_DEFAULT, 67 KVM_MODE_PROTECTED, 68 KVM_MODE_NV, 69 KVM_MODE_NONE, 70 }; 71 #ifdef CONFIG_KVM 72 enum kvm_mode kvm_get_mode(void); 73 #else 74 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; 75 #endif 76 77 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 78 79 extern unsigned int __ro_after_init kvm_sve_max_vl; 80 extern unsigned int __ro_after_init kvm_host_sve_max_vl; 81 int __init kvm_arm_init_sve(void); 82 83 u32 __attribute_const__ kvm_target_cpu(void); 84 void kvm_reset_vcpu(struct kvm_vcpu *vcpu); 85 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 86 87 struct kvm_hyp_memcache { 88 phys_addr_t head; 89 unsigned long nr_pages; 90 }; 91 92 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc, 93 phys_addr_t *p, 94 phys_addr_t (*to_pa)(void *virt)) 95 { 96 *p = mc->head; 97 mc->head = to_pa(p); 98 mc->nr_pages++; 99 } 100 101 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc, 102 void *(*to_va)(phys_addr_t phys)) 103 { 104 phys_addr_t *p = to_va(mc->head); 105 106 if (!mc->nr_pages) 107 return NULL; 108 109 mc->head = *p; 110 mc->nr_pages--; 111 112 return p; 113 } 114 115 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc, 116 unsigned long min_pages, 117 void *(*alloc_fn)(void *arg), 118 phys_addr_t (*to_pa)(void *virt), 119 void *arg) 120 { 121 while (mc->nr_pages < min_pages) { 122 phys_addr_t *p = alloc_fn(arg); 123 124 if (!p) 125 return -ENOMEM; 126 push_hyp_memcache(mc, p, to_pa); 127 } 128 129 return 0; 130 } 131 132 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc, 133 void (*free_fn)(void *virt, void *arg), 134 void *(*to_va)(phys_addr_t phys), 135 void *arg) 136 { 137 while (mc->nr_pages) 138 free_fn(pop_hyp_memcache(mc, to_va), arg); 139 } 140 141 void free_hyp_memcache(struct kvm_hyp_memcache *mc); 142 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages); 143 144 struct kvm_vmid { 145 atomic64_t id; 146 }; 147 148 struct kvm_s2_mmu { 149 struct kvm_vmid vmid; 150 151 /* 152 * stage2 entry level table 153 * 154 * Two kvm_s2_mmu structures in the same VM can point to the same 155 * pgd here. This happens when running a guest using a 156 * translation regime that isn't affected by its own stage-2 157 * translation, such as a non-VHE hypervisor running at vEL2, or 158 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 159 * canonical stage-2 page tables. 160 */ 161 phys_addr_t pgd_phys; 162 struct kvm_pgtable *pgt; 163 164 /* 165 * VTCR value used on the host. For a non-NV guest (or a NV 166 * guest that runs in a context where its own S2 doesn't 167 * apply), its T0SZ value reflects that of the IPA size. 168 * 169 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to 170 * the guest. 171 */ 172 u64 vtcr; 173 174 /* The last vcpu id that ran on each physical CPU */ 175 int __percpu *last_vcpu_ran; 176 177 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0 178 /* 179 * Memory cache used to split 180 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It 181 * is used to allocate stage2 page tables while splitting huge 182 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 183 * influences both the capacity of the split page cache, and 184 * how often KVM reschedules. Be wary of raising CHUNK_SIZE 185 * too high. 186 * 187 * Protected by kvm->slots_lock. 188 */ 189 struct kvm_mmu_memory_cache split_page_cache; 190 uint64_t split_page_chunk_size; 191 192 struct kvm_arch *arch; 193 194 /* 195 * For a shadow stage-2 MMU, the virtual vttbr used by the 196 * host to parse the guest S2. 197 * This either contains: 198 * - the virtual VTTBR programmed by the guest hypervisor with 199 * CnP cleared 200 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid 201 * 202 * We also cache the full VTCR which gets used for TLB invalidation, 203 * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted 204 * to be cached in a TLB" to the letter. 205 */ 206 u64 tlb_vttbr; 207 u64 tlb_vtcr; 208 209 /* 210 * true when this represents a nested context where virtual 211 * HCR_EL2.VM == 1 212 */ 213 bool nested_stage2_enabled; 214 215 /* 216 * true when this MMU needs to be unmapped before being used for a new 217 * purpose. 218 */ 219 bool pending_unmap; 220 221 /* 222 * 0: Nobody is currently using this, check vttbr for validity 223 * >0: Somebody is actively using this. 224 */ 225 atomic_t refcnt; 226 }; 227 228 struct kvm_arch_memory_slot { 229 }; 230 231 /** 232 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests 233 * 234 * @std_bmap: Bitmap of standard secure service calls 235 * @std_hyp_bmap: Bitmap of standard hypervisor service calls 236 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls 237 */ 238 struct kvm_smccc_features { 239 unsigned long std_bmap; 240 unsigned long std_hyp_bmap; 241 unsigned long vendor_hyp_bmap; 242 }; 243 244 typedef unsigned int pkvm_handle_t; 245 246 struct kvm_protected_vm { 247 pkvm_handle_t handle; 248 struct kvm_hyp_memcache teardown_mc; 249 bool enabled; 250 }; 251 252 struct kvm_mpidr_data { 253 u64 mpidr_mask; 254 DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx); 255 }; 256 257 static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) 258 { 259 unsigned long index = 0, mask = data->mpidr_mask; 260 unsigned long aff = mpidr & MPIDR_HWID_BITMASK; 261 262 bitmap_gather(&index, &aff, &mask, fls(mask)); 263 264 return index; 265 } 266 267 struct kvm_sysreg_masks; 268 269 enum fgt_group_id { 270 __NO_FGT_GROUP__, 271 HFGxTR_GROUP, 272 HDFGRTR_GROUP, 273 HDFGWTR_GROUP = HDFGRTR_GROUP, 274 HFGITR_GROUP, 275 HAFGRTR_GROUP, 276 277 /* Must be last */ 278 __NR_FGT_GROUP_IDS__ 279 }; 280 281 struct kvm_arch { 282 struct kvm_s2_mmu mmu; 283 284 /* 285 * Fine-Grained UNDEF, mimicking the FGT layout defined by the 286 * architecture. We track them globally, as we present the 287 * same feature-set to all vcpus. 288 * 289 * Index 0 is currently spare. 290 */ 291 u64 fgu[__NR_FGT_GROUP_IDS__]; 292 293 /* 294 * Stage 2 paging state for VMs with nested S2 using a virtual 295 * VMID. 296 */ 297 struct kvm_s2_mmu *nested_mmus; 298 size_t nested_mmus_size; 299 int nested_mmus_next; 300 301 /* Interrupt controller */ 302 struct vgic_dist vgic; 303 304 /* Timers */ 305 struct arch_timer_vm_data timer_data; 306 307 /* Mandated version of PSCI */ 308 u32 psci_version; 309 310 /* Protects VM-scoped configuration data */ 311 struct mutex config_lock; 312 313 /* 314 * If we encounter a data abort without valid instruction syndrome 315 * information, report this to user space. User space can (and 316 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 317 * supported. 318 */ 319 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 320 /* Memory Tagging Extension enabled for the guest */ 321 #define KVM_ARCH_FLAG_MTE_ENABLED 1 322 /* At least one vCPU has ran in the VM */ 323 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 324 /* The vCPU feature set for the VM is configured */ 325 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3 326 /* PSCI SYSTEM_SUSPEND enabled for the guest */ 327 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4 328 /* VM counter offset */ 329 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5 330 /* Timer PPIs made immutable */ 331 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6 332 /* Initial ID reg values loaded */ 333 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 334 /* Fine-Grained UNDEF initialised */ 335 #define KVM_ARCH_FLAG_FGU_INITIALIZED 8 336 unsigned long flags; 337 338 /* VM-wide vCPU feature set */ 339 DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES); 340 341 /* MPIDR to vcpu index mapping, optional */ 342 struct kvm_mpidr_data *mpidr_data; 343 344 /* 345 * VM-wide PMU filter, implemented as a bitmap and big enough for 346 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 347 */ 348 unsigned long *pmu_filter; 349 struct arm_pmu *arm_pmu; 350 351 cpumask_var_t supported_cpus; 352 353 /* PMCR_EL0.N value for the guest */ 354 u8 pmcr_n; 355 356 /* Iterator for idreg debugfs */ 357 u8 idreg_debugfs_iter; 358 359 /* Hypercall features firmware registers' descriptor */ 360 struct kvm_smccc_features smccc_feat; 361 struct maple_tree smccc_filter; 362 363 /* 364 * Emulated CPU ID registers per VM 365 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it 366 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8. 367 * 368 * These emulated idregs are VM-wide, but accessed from the context of a vCPU. 369 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock. 370 */ 371 #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id)) 372 #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1) 373 u64 id_regs[KVM_ARM_ID_REG_NUM]; 374 375 u64 ctr_el0; 376 377 /* Masks for VNCR-backed and general EL2 sysregs */ 378 struct kvm_sysreg_masks *sysreg_masks; 379 380 /* 381 * For an untrusted host VM, 'pkvm.handle' is used to lookup 382 * the associated pKVM instance in the hypervisor. 383 */ 384 struct kvm_protected_vm pkvm; 385 }; 386 387 struct kvm_vcpu_fault_info { 388 u64 esr_el2; /* Hyp Syndrom Register */ 389 u64 far_el2; /* Hyp Fault Address Register */ 390 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 391 u64 disr_el1; /* Deferred [SError] Status Register */ 392 }; 393 394 /* 395 * VNCR() just places the VNCR_capable registers in the enum after 396 * __VNCR_START__, and the value (after correction) to be an 8-byte offset 397 * from the VNCR base. As we don't require the enum to be otherwise ordered, 398 * we need the terrible hack below to ensure that we correctly size the 399 * sys_regs array, no matter what. 400 * 401 * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful 402 * treasure trove of bit hacks: 403 * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax 404 */ 405 #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y)))) 406 #define VNCR(r) \ 407 __before_##r, \ 408 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \ 409 __after_##r = __MAX__(__before_##r - 1, r) 410 411 #define MARKER(m) \ 412 m, __after_##m = m - 1 413 414 enum vcpu_sysreg { 415 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 416 MPIDR_EL1, /* MultiProcessor Affinity Register */ 417 CLIDR_EL1, /* Cache Level ID Register */ 418 CSSELR_EL1, /* Cache Size Selection Register */ 419 TPIDR_EL0, /* Thread ID, User R/W */ 420 TPIDRRO_EL0, /* Thread ID, User R/O */ 421 TPIDR_EL1, /* Thread ID, Privileged */ 422 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 423 PAR_EL1, /* Physical Address Register */ 424 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 425 OSLSR_EL1, /* OS Lock Status Register */ 426 DISR_EL1, /* Deferred Interrupt Status Register */ 427 428 /* Performance Monitors Registers */ 429 PMCR_EL0, /* Control Register */ 430 PMSELR_EL0, /* Event Counter Selection Register */ 431 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 432 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 433 PMCCNTR_EL0, /* Cycle Counter Register */ 434 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 435 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 436 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 437 PMCNTENSET_EL0, /* Count Enable Set Register */ 438 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 439 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 440 PMUSERENR_EL0, /* User Enable Register */ 441 442 /* Pointer Authentication Registers in a strict increasing order. */ 443 APIAKEYLO_EL1, 444 APIAKEYHI_EL1, 445 APIBKEYLO_EL1, 446 APIBKEYHI_EL1, 447 APDAKEYLO_EL1, 448 APDAKEYHI_EL1, 449 APDBKEYLO_EL1, 450 APDBKEYHI_EL1, 451 APGAKEYLO_EL1, 452 APGAKEYHI_EL1, 453 454 /* Memory Tagging Extension registers */ 455 RGSR_EL1, /* Random Allocation Tag Seed Register */ 456 GCR_EL1, /* Tag Control Register */ 457 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 458 459 POR_EL0, /* Permission Overlay Register 0 (EL0) */ 460 461 /* FP/SIMD/SVE */ 462 SVCR, 463 FPMR, 464 465 /* 32bit specific registers. */ 466 DACR32_EL2, /* Domain Access Control Register */ 467 IFSR32_EL2, /* Instruction Fault Status Register */ 468 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 469 DBGVCR32_EL2, /* Debug Vector Catch Register */ 470 471 /* EL2 registers */ 472 SCTLR_EL2, /* System Control Register (EL2) */ 473 ACTLR_EL2, /* Auxiliary Control Register (EL2) */ 474 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */ 475 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */ 476 HACR_EL2, /* Hypervisor Auxiliary Control Register */ 477 ZCR_EL2, /* SVE Control Register (EL2) */ 478 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */ 479 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */ 480 TCR_EL2, /* Translation Control Register (EL2) */ 481 SPSR_EL2, /* EL2 saved program status register */ 482 ELR_EL2, /* EL2 exception link register */ 483 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */ 484 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */ 485 ESR_EL2, /* Exception Syndrome Register (EL2) */ 486 FAR_EL2, /* Fault Address Register (EL2) */ 487 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */ 488 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */ 489 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */ 490 VBAR_EL2, /* Vector Base Address Register (EL2) */ 491 RVBAR_EL2, /* Reset Vector Base Address Register */ 492 CONTEXTIDR_EL2, /* Context ID Register (EL2) */ 493 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ 494 SP_EL2, /* EL2 Stack Pointer */ 495 CNTHP_CTL_EL2, 496 CNTHP_CVAL_EL2, 497 CNTHV_CTL_EL2, 498 CNTHV_CVAL_EL2, 499 500 /* Anything from this can be RES0/RES1 sanitised */ 501 MARKER(__SANITISED_REG_START__), 502 TCR2_EL2, /* Extended Translation Control Register (EL2) */ 503 504 /* Any VNCR-capable reg goes after this point */ 505 MARKER(__VNCR_START__), 506 507 VNCR(SCTLR_EL1),/* System Control Register */ 508 VNCR(ACTLR_EL1),/* Auxiliary Control Register */ 509 VNCR(CPACR_EL1),/* Coprocessor Access Control */ 510 VNCR(ZCR_EL1), /* SVE Control */ 511 VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */ 512 VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */ 513 VNCR(TCR_EL1), /* Translation Control Register */ 514 VNCR(TCR2_EL1), /* Extended Translation Control Register */ 515 VNCR(ESR_EL1), /* Exception Syndrome Register */ 516 VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */ 517 VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */ 518 VNCR(FAR_EL1), /* Fault Address Register */ 519 VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */ 520 VNCR(VBAR_EL1), /* Vector Base Address Register */ 521 VNCR(CONTEXTIDR_EL1), /* Context ID Register */ 522 VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */ 523 VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */ 524 VNCR(ELR_EL1), 525 VNCR(SP_EL1), 526 VNCR(SPSR_EL1), 527 VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */ 528 VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */ 529 VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */ 530 VNCR(HCR_EL2), /* Hypervisor Configuration Register */ 531 VNCR(HSTR_EL2), /* Hypervisor System Trap Register */ 532 VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */ 533 VNCR(VTCR_EL2), /* Virtualization Translation Control Register */ 534 VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */ 535 VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */ 536 537 /* Permission Indirection Extension registers */ 538 VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */ 539 VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */ 540 541 VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */ 542 543 VNCR(HFGRTR_EL2), 544 VNCR(HFGWTR_EL2), 545 VNCR(HFGITR_EL2), 546 VNCR(HDFGRTR_EL2), 547 VNCR(HDFGWTR_EL2), 548 VNCR(HAFGRTR_EL2), 549 550 VNCR(CNTVOFF_EL2), 551 VNCR(CNTV_CVAL_EL0), 552 VNCR(CNTV_CTL_EL0), 553 VNCR(CNTP_CVAL_EL0), 554 VNCR(CNTP_CTL_EL0), 555 556 VNCR(ICH_HCR_EL2), 557 558 NR_SYS_REGS /* Nothing after this line! */ 559 }; 560 561 struct kvm_sysreg_masks { 562 struct { 563 u64 res0; 564 u64 res1; 565 } mask[NR_SYS_REGS - __SANITISED_REG_START__]; 566 }; 567 568 struct kvm_cpu_context { 569 struct user_pt_regs regs; /* sp = sp_el0 */ 570 571 u64 spsr_abt; 572 u64 spsr_und; 573 u64 spsr_irq; 574 u64 spsr_fiq; 575 576 struct user_fpsimd_state fp_regs; 577 578 u64 sys_regs[NR_SYS_REGS]; 579 580 struct kvm_vcpu *__hyp_running_vcpu; 581 582 /* This pointer has to be 4kB aligned. */ 583 u64 *vncr_array; 584 }; 585 586 struct cpu_sve_state { 587 __u64 zcr_el1; 588 589 /* 590 * Ordering is important since __sve_save_state/__sve_restore_state 591 * relies on it. 592 */ 593 __u32 fpsr; 594 __u32 fpcr; 595 596 /* Must be SVE_VQ_BYTES (128 bit) aligned. */ 597 __u8 sve_regs[]; 598 }; 599 600 /* 601 * This structure is instantiated on a per-CPU basis, and contains 602 * data that is: 603 * 604 * - tied to a single physical CPU, and 605 * - either have a lifetime that does not extend past vcpu_put() 606 * - or is an invariant for the lifetime of the system 607 * 608 * Use host_data_ptr(field) as a way to access a pointer to such a 609 * field. 610 */ 611 struct kvm_host_data { 612 struct kvm_cpu_context host_ctxt; 613 614 /* 615 * All pointers in this union are hyp VA. 616 * sve_state is only used in pKVM and if system_supports_sve(). 617 */ 618 union { 619 struct user_fpsimd_state *fpsimd_state; 620 struct cpu_sve_state *sve_state; 621 }; 622 623 union { 624 /* HYP VA pointer to the host storage for FPMR */ 625 u64 *fpmr_ptr; 626 /* 627 * Used by pKVM only, as it needs to provide storage 628 * for the host 629 */ 630 u64 fpmr; 631 }; 632 633 /* Ownership of the FP regs */ 634 enum { 635 FP_STATE_FREE, 636 FP_STATE_HOST_OWNED, 637 FP_STATE_GUEST_OWNED, 638 } fp_owner; 639 640 /* 641 * host_debug_state contains the host registers which are 642 * saved and restored during world switches. 643 */ 644 struct { 645 /* {Break,watch}point registers */ 646 struct kvm_guest_debug_arch regs; 647 /* Statistical profiling extension */ 648 u64 pmscr_el1; 649 /* Self-hosted trace */ 650 u64 trfcr_el1; 651 /* Values of trap registers for the host before guest entry. */ 652 u64 mdcr_el2; 653 } host_debug_state; 654 }; 655 656 struct kvm_host_psci_config { 657 /* PSCI version used by host. */ 658 u32 version; 659 u32 smccc_version; 660 661 /* Function IDs used by host if version is v0.1. */ 662 struct psci_0_1_function_ids function_ids_0_1; 663 664 bool psci_0_1_cpu_suspend_implemented; 665 bool psci_0_1_cpu_on_implemented; 666 bool psci_0_1_cpu_off_implemented; 667 bool psci_0_1_migrate_implemented; 668 }; 669 670 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 671 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 672 673 extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 674 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 675 676 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 677 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 678 679 struct vcpu_reset_state { 680 unsigned long pc; 681 unsigned long r0; 682 bool be; 683 bool reset; 684 }; 685 686 struct kvm_vcpu_arch { 687 struct kvm_cpu_context ctxt; 688 689 /* 690 * Guest floating point state 691 * 692 * The architecture has two main floating point extensions, 693 * the original FPSIMD and SVE. These have overlapping 694 * register views, with the FPSIMD V registers occupying the 695 * low 128 bits of the SVE Z registers. When the core 696 * floating point code saves the register state of a task it 697 * records which view it saved in fp_type. 698 */ 699 void *sve_state; 700 enum fp_type fp_type; 701 unsigned int sve_max_vl; 702 703 /* Stage 2 paging state used by the hardware on next switch */ 704 struct kvm_s2_mmu *hw_mmu; 705 706 /* Values of trap registers for the guest. */ 707 u64 hcr_el2; 708 u64 hcrx_el2; 709 u64 mdcr_el2; 710 u64 cptr_el2; 711 712 /* Exception Information */ 713 struct kvm_vcpu_fault_info fault; 714 715 /* Configuration flags, set once and for all before the vcpu can run */ 716 u8 cflags; 717 718 /* Input flags to the hypervisor code, potentially cleared after use */ 719 u8 iflags; 720 721 /* State flags for kernel bookkeeping, unused by the hypervisor code */ 722 u8 sflags; 723 724 /* 725 * Don't run the guest (internal implementation need). 726 * 727 * Contrary to the flags above, this is set/cleared outside of 728 * a vcpu context, and thus cannot be mixed with the flags 729 * themselves (or the flag accesses need to be made atomic). 730 */ 731 bool pause; 732 733 /* 734 * We maintain more than a single set of debug registers to support 735 * debugging the guest from the host and to maintain separate host and 736 * guest state during world switches. vcpu_debug_state are the debug 737 * registers of the vcpu as the guest sees them. 738 * 739 * external_debug_state contains the debug values we want to debug the 740 * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl. 741 * 742 * debug_ptr points to the set of debug registers that should be loaded 743 * onto the hardware when running the guest. 744 */ 745 struct kvm_guest_debug_arch *debug_ptr; 746 struct kvm_guest_debug_arch vcpu_debug_state; 747 struct kvm_guest_debug_arch external_debug_state; 748 749 /* VGIC state */ 750 struct vgic_cpu vgic_cpu; 751 struct arch_timer_cpu timer_cpu; 752 struct kvm_pmu pmu; 753 754 /* 755 * Guest registers we preserve during guest debugging. 756 * 757 * These shadow registers are updated by the kvm_handle_sys_reg 758 * trap handler if the guest accesses or updates them while we 759 * are using guest debug. 760 */ 761 struct { 762 u32 mdscr_el1; 763 bool pstate_ss; 764 } guest_debug_preserved; 765 766 /* vcpu power state */ 767 struct kvm_mp_state mp_state; 768 spinlock_t mp_state_lock; 769 770 /* Cache some mmu pages needed inside spinlock regions */ 771 struct kvm_mmu_memory_cache mmu_page_cache; 772 773 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 774 u64 vsesr_el2; 775 776 /* Additional reset state */ 777 struct vcpu_reset_state reset_state; 778 779 /* Guest PV state */ 780 struct { 781 u64 last_steal; 782 gpa_t base; 783 } steal; 784 785 /* Per-vcpu CCSIDR override or NULL */ 786 u32 *ccsidr; 787 }; 788 789 /* 790 * Each 'flag' is composed of a comma-separated triplet: 791 * 792 * - the flag-set it belongs to in the vcpu->arch structure 793 * - the value for that flag 794 * - the mask for that flag 795 * 796 * __vcpu_single_flag() builds such a triplet for a single-bit flag. 797 * unpack_vcpu_flag() extract the flag value from the triplet for 798 * direct use outside of the flag accessors. 799 */ 800 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f) 801 802 #define __unpack_flag(_set, _f, _m) _f 803 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__) 804 805 #define __build_check_flag(v, flagset, f, m) \ 806 do { \ 807 typeof(v->arch.flagset) *_fset; \ 808 \ 809 /* Check that the flags fit in the mask */ \ 810 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \ 811 /* Check that the flags fit in the type */ \ 812 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \ 813 } while (0) 814 815 #define __vcpu_get_flag(v, flagset, f, m) \ 816 ({ \ 817 __build_check_flag(v, flagset, f, m); \ 818 \ 819 READ_ONCE(v->arch.flagset) & (m); \ 820 }) 821 822 /* 823 * Note that the set/clear accessors must be preempt-safe in order to 824 * avoid nesting them with load/put which also manipulate flags... 825 */ 826 #ifdef __KVM_NVHE_HYPERVISOR__ 827 /* the nVHE hypervisor is always non-preemptible */ 828 #define __vcpu_flags_preempt_disable() 829 #define __vcpu_flags_preempt_enable() 830 #else 831 #define __vcpu_flags_preempt_disable() preempt_disable() 832 #define __vcpu_flags_preempt_enable() preempt_enable() 833 #endif 834 835 #define __vcpu_set_flag(v, flagset, f, m) \ 836 do { \ 837 typeof(v->arch.flagset) *fset; \ 838 \ 839 __build_check_flag(v, flagset, f, m); \ 840 \ 841 fset = &v->arch.flagset; \ 842 __vcpu_flags_preempt_disable(); \ 843 if (HWEIGHT(m) > 1) \ 844 *fset &= ~(m); \ 845 *fset |= (f); \ 846 __vcpu_flags_preempt_enable(); \ 847 } while (0) 848 849 #define __vcpu_clear_flag(v, flagset, f, m) \ 850 do { \ 851 typeof(v->arch.flagset) *fset; \ 852 \ 853 __build_check_flag(v, flagset, f, m); \ 854 \ 855 fset = &v->arch.flagset; \ 856 __vcpu_flags_preempt_disable(); \ 857 *fset &= ~(m); \ 858 __vcpu_flags_preempt_enable(); \ 859 } while (0) 860 861 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__) 862 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) 863 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) 864 865 /* SVE exposed to guest */ 866 #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0)) 867 /* SVE config completed */ 868 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) 869 /* PTRAUTH exposed to guest */ 870 #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) 871 /* KVM_ARM_VCPU_INIT completed */ 872 #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3)) 873 874 /* Exception pending */ 875 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) 876 /* 877 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't 878 * be set together with an exception... 879 */ 880 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1)) 881 /* Target EL/MODE (not a single flag, but let's abuse the macro) */ 882 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1)) 883 884 /* Helpers to encode exceptions with minimum fuss */ 885 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK) 886 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL) 887 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL 888 889 /* 890 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following 891 * values: 892 * 893 * For AArch32 EL1: 894 */ 895 #define EXCEPT_AA32_UND __vcpu_except_flags(0) 896 #define EXCEPT_AA32_IABT __vcpu_except_flags(1) 897 #define EXCEPT_AA32_DABT __vcpu_except_flags(2) 898 /* For AArch64: */ 899 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0) 900 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1) 901 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2) 902 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3) 903 /* For AArch64 with NV: */ 904 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4) 905 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5) 906 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) 907 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) 908 /* Guest debug is live */ 909 #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4)) 910 /* Save SPE context if active */ 911 #define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5)) 912 /* Save TRBE context if active */ 913 #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) 914 915 /* SVE enabled for host EL0 */ 916 #define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0)) 917 /* SME enabled for EL0 */ 918 #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1)) 919 /* Physical CPU not in supported_cpus */ 920 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) 921 /* WFIT instruction trapped */ 922 #define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) 923 /* vcpu system registers loaded on physical CPU */ 924 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) 925 /* Software step state is Active-pending */ 926 #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) 927 /* PMUSERENR for the guest EL0 is on physical CPU */ 928 #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) 929 /* WFI instruction trapped */ 930 #define IN_WFI __vcpu_single_flag(sflags, BIT(7)) 931 932 933 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 934 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ 935 sve_ffr_offset((vcpu)->arch.sve_max_vl)) 936 937 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) 938 939 #define vcpu_sve_zcr_elx(vcpu) \ 940 (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1) 941 942 #define vcpu_sve_state_size(vcpu) ({ \ 943 size_t __size_ret; \ 944 unsigned int __vcpu_vq; \ 945 \ 946 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 947 __size_ret = 0; \ 948 } else { \ 949 __vcpu_vq = vcpu_sve_max_vq(vcpu); \ 950 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 951 } \ 952 \ 953 __size_ret; \ 954 }) 955 956 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 957 KVM_GUESTDBG_USE_SW_BP | \ 958 KVM_GUESTDBG_USE_HW | \ 959 KVM_GUESTDBG_SINGLESTEP) 960 961 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 962 vcpu_get_flag(vcpu, GUEST_HAS_SVE)) 963 964 #ifdef CONFIG_ARM64_PTR_AUTH 965 #define vcpu_has_ptrauth(vcpu) \ 966 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 967 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 968 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH)) 969 #else 970 #define vcpu_has_ptrauth(vcpu) false 971 #endif 972 973 #define vcpu_on_unsupported_cpu(vcpu) \ 974 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU) 975 976 #define vcpu_set_on_unsupported_cpu(vcpu) \ 977 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU) 978 979 #define vcpu_clear_on_unsupported_cpu(vcpu) \ 980 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU) 981 982 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 983 984 /* 985 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 986 * memory backed version of a register, and not the one most recently 987 * accessed by a running VCPU. For example, for userspace access or 988 * for system registers that are never context switched, but only 989 * emulated. 990 * 991 * Don't bother with VNCR-based accesses in the nVHE code, it has no 992 * business dealing with NV. 993 */ 994 static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r) 995 { 996 #if !defined (__KVM_NVHE_HYPERVISOR__) 997 if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && 998 r >= __VNCR_START__ && ctxt->vncr_array)) 999 return &ctxt->vncr_array[r - __VNCR_START__]; 1000 #endif 1001 return (u64 *)&ctxt->sys_regs[r]; 1002 } 1003 1004 #define __ctxt_sys_reg(c,r) \ 1005 ({ \ 1006 BUILD_BUG_ON(__builtin_constant_p(r) && \ 1007 (r) >= NR_SYS_REGS); \ 1008 ___ctxt_sys_reg(c, r); \ 1009 }) 1010 1011 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 1012 1013 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64); 1014 #define __vcpu_sys_reg(v,r) \ 1015 (*({ \ 1016 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1017 u64 *__r = __ctxt_sys_reg(ctxt, (r)); \ 1018 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1019 *__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\ 1020 __r; \ 1021 })) 1022 1023 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 1024 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 1025 1026 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) 1027 { 1028 /* 1029 * *** VHE ONLY *** 1030 * 1031 * System registers listed in the switch are not saved on every 1032 * exit from the guest but are only saved on vcpu_put. 1033 * 1034 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 1035 * should never be listed below, because the guest cannot modify its 1036 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 1037 * thread when emulating cross-VCPU communication. 1038 */ 1039 if (!has_vhe()) 1040 return false; 1041 1042 switch (reg) { 1043 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; 1044 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; 1045 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; 1046 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; 1047 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; 1048 case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break; 1049 case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break; 1050 case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break; 1051 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; 1052 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; 1053 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; 1054 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; 1055 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; 1056 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; 1057 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 1058 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; 1059 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 1060 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; 1061 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; 1062 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 1063 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; 1064 case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break; 1065 case PAR_EL1: *val = read_sysreg_par(); break; 1066 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; 1067 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; 1068 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 1069 case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break; 1070 default: return false; 1071 } 1072 1073 return true; 1074 } 1075 1076 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) 1077 { 1078 /* 1079 * *** VHE ONLY *** 1080 * 1081 * System registers listed in the switch are not restored on every 1082 * entry to the guest but are only restored on vcpu_load. 1083 * 1084 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 1085 * should never be listed below, because the MPIDR should only be set 1086 * once, before running the VCPU, and never changed later. 1087 */ 1088 if (!has_vhe()) 1089 return false; 1090 1091 switch (reg) { 1092 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 1093 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 1094 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 1095 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 1096 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 1097 case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break; 1098 case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break; 1099 case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break; 1100 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 1101 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 1102 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 1103 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 1104 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 1105 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 1106 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 1107 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 1108 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 1109 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 1110 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 1111 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 1112 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 1113 case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break; 1114 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 1115 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 1116 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 1117 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 1118 case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break; 1119 default: return false; 1120 } 1121 1122 return true; 1123 } 1124 1125 struct kvm_vm_stat { 1126 struct kvm_vm_stat_generic generic; 1127 }; 1128 1129 struct kvm_vcpu_stat { 1130 struct kvm_vcpu_stat_generic generic; 1131 u64 hvc_exit_stat; 1132 u64 wfe_exit_stat; 1133 u64 wfi_exit_stat; 1134 u64 mmio_exit_user; 1135 u64 mmio_exit_kernel; 1136 u64 signal_exits; 1137 u64 exits; 1138 }; 1139 1140 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 1141 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 1142 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 1143 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 1144 1145 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 1146 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 1147 1148 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 1149 struct kvm_vcpu_events *events); 1150 1151 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 1152 struct kvm_vcpu_events *events); 1153 1154 void kvm_arm_halt_guest(struct kvm *kvm); 1155 void kvm_arm_resume_guest(struct kvm *kvm); 1156 1157 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) 1158 1159 #ifndef __KVM_NVHE_HYPERVISOR__ 1160 #define kvm_call_hyp_nvhe(f, ...) \ 1161 ({ \ 1162 struct arm_smccc_res res; \ 1163 \ 1164 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 1165 ##__VA_ARGS__, &res); \ 1166 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 1167 \ 1168 res.a1; \ 1169 }) 1170 1171 /* 1172 * The couple of isb() below are there to guarantee the same behaviour 1173 * on VHE as on !VHE, where the eret to EL1 acts as a context 1174 * synchronization event. 1175 */ 1176 #define kvm_call_hyp(f, ...) \ 1177 do { \ 1178 if (has_vhe()) { \ 1179 f(__VA_ARGS__); \ 1180 isb(); \ 1181 } else { \ 1182 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 1183 } \ 1184 } while(0) 1185 1186 #define kvm_call_hyp_ret(f, ...) \ 1187 ({ \ 1188 typeof(f(__VA_ARGS__)) ret; \ 1189 \ 1190 if (has_vhe()) { \ 1191 ret = f(__VA_ARGS__); \ 1192 isb(); \ 1193 } else { \ 1194 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 1195 } \ 1196 \ 1197 ret; \ 1198 }) 1199 #else /* __KVM_NVHE_HYPERVISOR__ */ 1200 #define kvm_call_hyp(f, ...) f(__VA_ARGS__) 1201 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) 1202 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) 1203 #endif /* __KVM_NVHE_HYPERVISOR__ */ 1204 1205 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 1206 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 1207 1208 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 1209 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 1210 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 1211 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 1212 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 1213 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 1214 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); 1215 1216 void kvm_sys_regs_create_debugfs(struct kvm *kvm); 1217 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 1218 1219 int __init kvm_sys_reg_table_init(void); 1220 struct sys_reg_desc; 1221 int __init populate_sysreg_config(const struct sys_reg_desc *sr, 1222 unsigned int idx); 1223 int __init populate_nv_trap_config(void); 1224 1225 bool lock_all_vcpus(struct kvm *kvm); 1226 void unlock_all_vcpus(struct kvm *kvm); 1227 1228 void kvm_calculate_traps(struct kvm_vcpu *vcpu); 1229 1230 /* MMIO helpers */ 1231 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 1232 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 1233 1234 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 1235 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 1236 1237 /* 1238 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, 1239 * arrived in guest context. For arm64, any event that arrives while a vCPU is 1240 * loaded is considered to be "in guest". 1241 */ 1242 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) 1243 { 1244 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; 1245 } 1246 1247 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 1248 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 1249 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 1250 1251 bool kvm_arm_pvtime_supported(void); 1252 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 1253 struct kvm_device_attr *attr); 1254 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 1255 struct kvm_device_attr *attr); 1256 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 1257 struct kvm_device_attr *attr); 1258 1259 extern unsigned int __ro_after_init kvm_arm_vmid_bits; 1260 int __init kvm_arm_vmid_alloc_init(void); 1261 void __init kvm_arm_vmid_alloc_free(void); 1262 bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); 1263 void kvm_arm_vmid_clear_active(void); 1264 1265 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 1266 { 1267 vcpu_arch->steal.base = INVALID_GPA; 1268 } 1269 1270 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 1271 { 1272 return (vcpu_arch->steal.base != INVALID_GPA); 1273 } 1274 1275 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 1276 1277 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 1278 1279 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 1280 1281 /* 1282 * How we access per-CPU host data depends on the where we access it from, 1283 * and the mode we're in: 1284 * 1285 * - VHE and nVHE hypervisor bits use their locally defined instance 1286 * 1287 * - the rest of the kernel use either the VHE or nVHE one, depending on 1288 * the mode we're running in. 1289 * 1290 * Unless we're in protected mode, fully deprivileged, and the nVHE 1291 * per-CPU stuff is exclusively accessible to the protected EL2 code. 1292 * In this case, the EL1 code uses the *VHE* data as its private state 1293 * (which makes sense in a way as there shouldn't be any shared state 1294 * between the host and the hypervisor). 1295 * 1296 * Yes, this is all totally trivial. Shoot me now. 1297 */ 1298 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) 1299 #define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f) 1300 #else 1301 #define host_data_ptr(f) \ 1302 (static_branch_unlikely(&kvm_protected_mode_initialized) ? \ 1303 &this_cpu_ptr(&kvm_host_data)->f : \ 1304 &this_cpu_ptr_hyp_sym(kvm_host_data)->f) 1305 #endif 1306 1307 /* Check whether the FP regs are owned by the guest */ 1308 static inline bool guest_owns_fp_regs(void) 1309 { 1310 return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED; 1311 } 1312 1313 /* Check whether the FP regs are owned by the host */ 1314 static inline bool host_owns_fp_regs(void) 1315 { 1316 return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED; 1317 } 1318 1319 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 1320 { 1321 /* The host's MPIDR is immutable, so let's set it up at boot time */ 1322 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 1323 } 1324 1325 static inline bool kvm_system_needs_idmapped_vectors(void) 1326 { 1327 return cpus_have_final_cap(ARM64_SPECTRE_V3A); 1328 } 1329 1330 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 1331 1332 void kvm_arm_init_debug(void); 1333 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); 1334 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 1335 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 1336 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 1337 1338 #define kvm_vcpu_os_lock_enabled(vcpu) \ 1339 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK)) 1340 1341 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 1342 struct kvm_device_attr *attr); 1343 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 1344 struct kvm_device_attr *attr); 1345 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 1346 struct kvm_device_attr *attr); 1347 1348 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, 1349 struct kvm_arm_copy_mte_tags *copy_tags); 1350 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, 1351 struct kvm_arm_counter_offset *offset); 1352 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, 1353 struct reg_mask_range *range); 1354 1355 /* Guest/host FPSIMD coordination helpers */ 1356 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 1357 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 1358 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); 1359 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 1360 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 1361 1362 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 1363 { 1364 return (!has_vhe() && attr->exclude_host); 1365 } 1366 1367 /* Flags for host debug state */ 1368 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); 1369 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); 1370 1371 #ifdef CONFIG_KVM 1372 void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); 1373 void kvm_clr_pmu_events(u64 clr); 1374 bool kvm_set_pmuserenr(u64 val); 1375 #else 1376 static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} 1377 static inline void kvm_clr_pmu_events(u64 clr) {} 1378 static inline bool kvm_set_pmuserenr(u64 val) 1379 { 1380 return false; 1381 } 1382 #endif 1383 1384 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); 1385 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu); 1386 1387 int __init kvm_set_ipa_limit(void); 1388 u32 kvm_get_pa_bits(struct kvm *kvm); 1389 1390 #define __KVM_HAVE_ARCH_VM_ALLOC 1391 struct kvm *kvm_arch_alloc_vm(void); 1392 1393 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS 1394 1395 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE 1396 1397 #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled) 1398 1399 #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm) 1400 1401 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 1402 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 1403 1404 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED) 1405 1406 #define kvm_has_mte(kvm) \ 1407 (system_supports_mte() && \ 1408 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) 1409 1410 #define kvm_supports_32bit_el0() \ 1411 (system_supports_32bit_el0() && \ 1412 !static_branch_unlikely(&arm64_mismatched_32bit_el0)) 1413 1414 #define kvm_vm_has_ran_once(kvm) \ 1415 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags)) 1416 1417 static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature) 1418 { 1419 return test_bit(feature, ka->vcpu_features); 1420 } 1421 1422 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) 1423 1424 #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) 1425 1426 int kvm_trng_call(struct kvm_vcpu *vcpu); 1427 #ifdef CONFIG_KVM 1428 extern phys_addr_t hyp_mem_base; 1429 extern phys_addr_t hyp_mem_size; 1430 void __init kvm_hyp_reserve(void); 1431 #else 1432 static inline void kvm_hyp_reserve(void) { } 1433 #endif 1434 1435 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); 1436 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); 1437 1438 static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg) 1439 { 1440 switch (reg) { 1441 case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7): 1442 return &ka->id_regs[IDREG_IDX(reg)]; 1443 case SYS_CTR_EL0: 1444 return &ka->ctr_el0; 1445 default: 1446 WARN_ON_ONCE(1); 1447 return NULL; 1448 } 1449 } 1450 1451 #define kvm_read_vm_id_reg(kvm, reg) \ 1452 ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; }) 1453 1454 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); 1455 1456 #define __expand_field_sign_unsigned(id, fld, val) \ 1457 ((u64)SYS_FIELD_VALUE(id, fld, val)) 1458 1459 #define __expand_field_sign_signed(id, fld, val) \ 1460 ({ \ 1461 u64 __val = SYS_FIELD_VALUE(id, fld, val); \ 1462 sign_extend64(__val, id##_##fld##_WIDTH - 1); \ 1463 }) 1464 1465 #define get_idreg_field_unsigned(kvm, id, fld) \ 1466 ({ \ 1467 u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \ 1468 FIELD_GET(id##_##fld##_MASK, __val); \ 1469 }) 1470 1471 #define get_idreg_field_signed(kvm, id, fld) \ 1472 ({ \ 1473 u64 __val = get_idreg_field_unsigned(kvm, id, fld); \ 1474 sign_extend64(__val, id##_##fld##_WIDTH - 1); \ 1475 }) 1476 1477 #define get_idreg_field_enum(kvm, id, fld) \ 1478 get_idreg_field_unsigned(kvm, id, fld) 1479 1480 #define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \ 1481 (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit)) 1482 1483 #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \ 1484 (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit)) 1485 1486 #define kvm_cmp_feat(kvm, id, fld, op, limit) \ 1487 (id##_##fld##_SIGNED ? \ 1488 kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \ 1489 kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)) 1490 1491 #define kvm_has_feat(kvm, id, fld, limit) \ 1492 kvm_cmp_feat(kvm, id, fld, >=, limit) 1493 1494 #define kvm_has_feat_enum(kvm, id, fld, val) \ 1495 kvm_cmp_feat_unsigned(kvm, id, fld, ==, val) 1496 1497 #define kvm_has_feat_range(kvm, id, fld, min, max) \ 1498 (kvm_cmp_feat(kvm, id, fld, >=, min) && \ 1499 kvm_cmp_feat(kvm, id, fld, <=, max)) 1500 1501 /* Check for a given level of PAuth support */ 1502 #define kvm_has_pauth(k, l) \ 1503 ({ \ 1504 bool pa, pi, pa3; \ 1505 \ 1506 pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \ 1507 pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \ 1508 pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \ 1509 pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \ 1510 pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \ 1511 pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \ 1512 \ 1513 (pa + pi + pa3) == 1; \ 1514 }) 1515 1516 #define kvm_has_fpmr(k) \ 1517 (system_supports_fpmr() && \ 1518 kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP)) 1519 1520 #endif /* __ARM64_KVM_HOST_H__ */ 1521