1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #ifndef __ARM64_KVM_HOST_H__ 12 #define __ARM64_KVM_HOST_H__ 13 14 #include <linux/arm-smccc.h> 15 #include <linux/bitmap.h> 16 #include <linux/types.h> 17 #include <linux/jump_label.h> 18 #include <linux/kvm_types.h> 19 #include <linux/maple_tree.h> 20 #include <linux/percpu.h> 21 #include <linux/psci.h> 22 #include <asm/arch_gicv3.h> 23 #include <asm/barrier.h> 24 #include <asm/cpufeature.h> 25 #include <asm/cputype.h> 26 #include <asm/daifflags.h> 27 #include <asm/fpsimd.h> 28 #include <asm/kvm.h> 29 #include <asm/kvm_asm.h> 30 #include <asm/vncr_mapping.h> 31 32 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 33 34 #define KVM_HALT_POLL_NS_DEFAULT 500000 35 36 #include <kvm/arm_vgic.h> 37 #include <kvm/arm_arch_timer.h> 38 #include <kvm/arm_pmu.h> 39 40 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 41 42 #define KVM_VCPU_MAX_FEATURES 9 43 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1) 44 45 #define KVM_REQ_SLEEP \ 46 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 47 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 48 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 49 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 50 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 51 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) 52 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) 53 #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) 54 #define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8) 55 #define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9) 56 #define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10) 57 #define KVM_REQ_VGIC_PROCESS_UPDATE KVM_ARCH_REQ(11) 58 59 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 60 KVM_DIRTY_LOG_INITIALLY_SET) 61 62 #define KVM_HAVE_MMU_RWLOCK 63 64 /* 65 * Mode of operation configurable with kvm-arm.mode early param. 66 * See Documentation/admin-guide/kernel-parameters.txt for more information. 67 */ 68 enum kvm_mode { 69 KVM_MODE_DEFAULT, 70 KVM_MODE_PROTECTED, 71 KVM_MODE_NV, 72 KVM_MODE_NONE, 73 }; 74 #ifdef CONFIG_KVM 75 enum kvm_mode kvm_get_mode(void); 76 #else 77 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; 78 #endif 79 80 extern unsigned int __ro_after_init kvm_sve_max_vl; 81 extern unsigned int __ro_after_init kvm_host_sve_max_vl; 82 int __init kvm_arm_init_sve(void); 83 84 u32 __attribute_const__ kvm_target_cpu(void); 85 void kvm_reset_vcpu(struct kvm_vcpu *vcpu); 86 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 87 88 struct kvm_hyp_memcache { 89 phys_addr_t head; 90 unsigned long nr_pages; 91 struct pkvm_mapping *mapping; /* only used from EL1 */ 92 93 #define HYP_MEMCACHE_ACCOUNT_STAGE2 BIT(1) 94 unsigned long flags; 95 }; 96 97 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc, 98 phys_addr_t *p, 99 phys_addr_t (*to_pa)(void *virt)) 100 { 101 *p = mc->head; 102 mc->head = to_pa(p); 103 mc->nr_pages++; 104 } 105 106 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc, 107 void *(*to_va)(phys_addr_t phys)) 108 { 109 phys_addr_t *p = to_va(mc->head & PAGE_MASK); 110 111 if (!mc->nr_pages) 112 return NULL; 113 114 mc->head = *p; 115 mc->nr_pages--; 116 117 return p; 118 } 119 120 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc, 121 unsigned long min_pages, 122 void *(*alloc_fn)(void *arg), 123 phys_addr_t (*to_pa)(void *virt), 124 void *arg) 125 { 126 while (mc->nr_pages < min_pages) { 127 phys_addr_t *p = alloc_fn(arg); 128 129 if (!p) 130 return -ENOMEM; 131 push_hyp_memcache(mc, p, to_pa); 132 } 133 134 return 0; 135 } 136 137 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc, 138 void (*free_fn)(void *virt, void *arg), 139 void *(*to_va)(phys_addr_t phys), 140 void *arg) 141 { 142 while (mc->nr_pages) 143 free_fn(pop_hyp_memcache(mc, to_va), arg); 144 } 145 146 void free_hyp_memcache(struct kvm_hyp_memcache *mc); 147 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages); 148 149 struct kvm_vmid { 150 atomic64_t id; 151 }; 152 153 struct kvm_s2_mmu { 154 struct kvm_vmid vmid; 155 156 /* 157 * stage2 entry level table 158 * 159 * Two kvm_s2_mmu structures in the same VM can point to the same 160 * pgd here. This happens when running a guest using a 161 * translation regime that isn't affected by its own stage-2 162 * translation, such as a non-VHE hypervisor running at vEL2, or 163 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 164 * canonical stage-2 page tables. 165 */ 166 phys_addr_t pgd_phys; 167 struct kvm_pgtable *pgt; 168 169 /* 170 * VTCR value used on the host. For a non-NV guest (or a NV 171 * guest that runs in a context where its own S2 doesn't 172 * apply), its T0SZ value reflects that of the IPA size. 173 * 174 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to 175 * the guest. 176 */ 177 u64 vtcr; 178 179 /* The last vcpu id that ran on each physical CPU */ 180 int __percpu *last_vcpu_ran; 181 182 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0 183 /* 184 * Memory cache used to split 185 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It 186 * is used to allocate stage2 page tables while splitting huge 187 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 188 * influences both the capacity of the split page cache, and 189 * how often KVM reschedules. Be wary of raising CHUNK_SIZE 190 * too high. 191 * 192 * Protected by kvm->slots_lock. 193 */ 194 struct kvm_mmu_memory_cache split_page_cache; 195 uint64_t split_page_chunk_size; 196 197 struct kvm_arch *arch; 198 199 /* 200 * For a shadow stage-2 MMU, the virtual vttbr used by the 201 * host to parse the guest S2. 202 * This either contains: 203 * - the virtual VTTBR programmed by the guest hypervisor with 204 * CnP cleared 205 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid 206 * 207 * We also cache the full VTCR which gets used for TLB invalidation, 208 * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted 209 * to be cached in a TLB" to the letter. 210 */ 211 u64 tlb_vttbr; 212 u64 tlb_vtcr; 213 214 /* 215 * true when this represents a nested context where virtual 216 * HCR_EL2.VM == 1 217 */ 218 bool nested_stage2_enabled; 219 220 /* 221 * true when this MMU needs to be unmapped before being used for a new 222 * purpose. 223 */ 224 bool pending_unmap; 225 226 /* 227 * 0: Nobody is currently using this, check vttbr for validity 228 * >0: Somebody is actively using this. 229 */ 230 atomic_t refcnt; 231 }; 232 233 struct kvm_arch_memory_slot { 234 }; 235 236 /** 237 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests 238 * 239 * @std_bmap: Bitmap of standard secure service calls 240 * @std_hyp_bmap: Bitmap of standard hypervisor service calls 241 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls 242 */ 243 struct kvm_smccc_features { 244 unsigned long std_bmap; 245 unsigned long std_hyp_bmap; 246 unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */ 247 unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */ 248 }; 249 250 typedef unsigned int pkvm_handle_t; 251 252 struct kvm_protected_vm { 253 pkvm_handle_t handle; 254 struct kvm_hyp_memcache teardown_mc; 255 struct kvm_hyp_memcache stage2_teardown_mc; 256 bool is_protected; 257 bool is_created; 258 }; 259 260 struct kvm_mpidr_data { 261 u64 mpidr_mask; 262 DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx); 263 }; 264 265 static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) 266 { 267 unsigned long index = 0, mask = data->mpidr_mask; 268 unsigned long aff = mpidr & MPIDR_HWID_BITMASK; 269 270 bitmap_gather(&index, &aff, &mask, fls(mask)); 271 272 return index; 273 } 274 275 struct kvm_sysreg_masks; 276 277 enum fgt_group_id { 278 __NO_FGT_GROUP__, 279 HFGRTR_GROUP, 280 HFGWTR_GROUP = HFGRTR_GROUP, 281 HDFGRTR_GROUP, 282 HDFGWTR_GROUP = HDFGRTR_GROUP, 283 HFGITR_GROUP, 284 HAFGRTR_GROUP, 285 HFGRTR2_GROUP, 286 HFGWTR2_GROUP = HFGRTR2_GROUP, 287 HDFGRTR2_GROUP, 288 HDFGWTR2_GROUP = HDFGRTR2_GROUP, 289 HFGITR2_GROUP, 290 ICH_HFGRTR_GROUP, 291 ICH_HFGWTR_GROUP = ICH_HFGRTR_GROUP, 292 ICH_HFGITR_GROUP, 293 294 /* Must be last */ 295 __NR_FGT_GROUP_IDS__ 296 }; 297 298 struct kvm_arch { 299 struct kvm_s2_mmu mmu; 300 301 /* 302 * Fine-Grained UNDEF, mimicking the FGT layout defined by the 303 * architecture. We track them globally, as we present the 304 * same feature-set to all vcpus. 305 * 306 * Index 0 is currently spare. 307 */ 308 u64 fgu[__NR_FGT_GROUP_IDS__]; 309 310 /* 311 * Stage 2 paging state for VMs with nested S2 using a virtual 312 * VMID. 313 */ 314 struct kvm_s2_mmu *nested_mmus; 315 size_t nested_mmus_size; 316 int nested_mmus_next; 317 318 /* Interrupt controller */ 319 struct vgic_dist vgic; 320 321 /* Timers */ 322 struct arch_timer_vm_data timer_data; 323 324 /* Mandated version of PSCI */ 325 u32 psci_version; 326 327 /* Protects VM-scoped configuration data */ 328 struct mutex config_lock; 329 330 /* 331 * If we encounter a data abort without valid instruction syndrome 332 * information, report this to user space. User space can (and 333 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 334 * supported. 335 */ 336 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 337 /* Memory Tagging Extension enabled for the guest */ 338 #define KVM_ARCH_FLAG_MTE_ENABLED 1 339 /* At least one vCPU has ran in the VM */ 340 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 341 /* The vCPU feature set for the VM is configured */ 342 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3 343 /* PSCI SYSTEM_SUSPEND enabled for the guest */ 344 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4 345 /* VM counter offset */ 346 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5 347 /* Timer PPIs made immutable */ 348 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6 349 /* Initial ID reg values loaded */ 350 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 351 /* Fine-Grained UNDEF initialised */ 352 #define KVM_ARCH_FLAG_FGU_INITIALIZED 8 353 /* SVE exposed to guest */ 354 #define KVM_ARCH_FLAG_GUEST_HAS_SVE 9 355 /* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */ 356 #define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10 357 /* Unhandled SEAs are taken to userspace */ 358 #define KVM_ARCH_FLAG_EXIT_SEA 11 359 unsigned long flags; 360 361 /* VM-wide vCPU feature set */ 362 DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES); 363 364 /* MPIDR to vcpu index mapping, optional */ 365 struct kvm_mpidr_data *mpidr_data; 366 367 /* 368 * VM-wide PMU filter, implemented as a bitmap and big enough for 369 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 370 */ 371 unsigned long *pmu_filter; 372 struct arm_pmu *arm_pmu; 373 374 cpumask_var_t supported_cpus; 375 376 /* Maximum number of counters for the guest */ 377 u8 nr_pmu_counters; 378 379 /* Hypercall features firmware registers' descriptor */ 380 struct kvm_smccc_features smccc_feat; 381 struct maple_tree smccc_filter; 382 383 /* 384 * Emulated CPU ID registers per VM 385 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it 386 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8. 387 * 388 * These emulated idregs are VM-wide, but accessed from the context of a vCPU. 389 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock. 390 */ 391 #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id)) 392 #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1) 393 u64 id_regs[KVM_ARM_ID_REG_NUM]; 394 395 u64 midr_el1; 396 u64 revidr_el1; 397 u64 aidr_el1; 398 u64 ctr_el0; 399 400 /* Masks for VNCR-backed and general EL2 sysregs */ 401 struct kvm_sysreg_masks *sysreg_masks; 402 403 /* Count the number of VNCR_EL2 currently mapped */ 404 atomic_t vncr_map_count; 405 406 /* 407 * For an untrusted host VM, 'pkvm.handle' is used to lookup 408 * the associated pKVM instance in the hypervisor. 409 */ 410 struct kvm_protected_vm pkvm; 411 }; 412 413 struct kvm_vcpu_fault_info { 414 u64 esr_el2; /* Hyp Syndrom Register */ 415 u64 far_el2; /* Hyp Fault Address Register */ 416 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 417 u64 disr_el1; /* Deferred [SError] Status Register */ 418 }; 419 420 /* 421 * VNCR() just places the VNCR_capable registers in the enum after 422 * __VNCR_START__, and the value (after correction) to be an 8-byte offset 423 * from the VNCR base. As we don't require the enum to be otherwise ordered, 424 * we need the terrible hack below to ensure that we correctly size the 425 * sys_regs array, no matter what. 426 * 427 * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful 428 * treasure trove of bit hacks: 429 * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax 430 */ 431 #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y)))) 432 #define VNCR(r) \ 433 __before_##r, \ 434 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \ 435 __after_##r = __MAX__(__before_##r - 1, r) 436 437 #define MARKER(m) \ 438 m, __after_##m = m - 1 439 440 enum vcpu_sysreg { 441 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 442 MPIDR_EL1, /* MultiProcessor Affinity Register */ 443 CLIDR_EL1, /* Cache Level ID Register */ 444 CSSELR_EL1, /* Cache Size Selection Register */ 445 TPIDR_EL0, /* Thread ID, User R/W */ 446 TPIDRRO_EL0, /* Thread ID, User R/O */ 447 TPIDR_EL1, /* Thread ID, Privileged */ 448 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 449 PAR_EL1, /* Physical Address Register */ 450 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 451 OSLSR_EL1, /* OS Lock Status Register */ 452 DISR_EL1, /* Deferred Interrupt Status Register */ 453 454 /* Performance Monitors Registers */ 455 PMCR_EL0, /* Control Register */ 456 PMSELR_EL0, /* Event Counter Selection Register */ 457 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 458 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 459 PMCCNTR_EL0, /* Cycle Counter Register */ 460 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 461 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 462 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 463 PMCNTENSET_EL0, /* Count Enable Set Register */ 464 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 465 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 466 PMUSERENR_EL0, /* User Enable Register */ 467 468 /* Pointer Authentication Registers in a strict increasing order. */ 469 APIAKEYLO_EL1, 470 APIAKEYHI_EL1, 471 APIBKEYLO_EL1, 472 APIBKEYHI_EL1, 473 APDAKEYLO_EL1, 474 APDAKEYHI_EL1, 475 APDBKEYLO_EL1, 476 APDBKEYHI_EL1, 477 APGAKEYLO_EL1, 478 APGAKEYHI_EL1, 479 480 /* Memory Tagging Extension registers */ 481 RGSR_EL1, /* Random Allocation Tag Seed Register */ 482 GCR_EL1, /* Tag Control Register */ 483 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 484 485 POR_EL0, /* Permission Overlay Register 0 (EL0) */ 486 487 /* FP/SIMD/SVE */ 488 SVCR, 489 FPMR, 490 491 /* 32bit specific registers. */ 492 DACR32_EL2, /* Domain Access Control Register */ 493 IFSR32_EL2, /* Instruction Fault Status Register */ 494 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 495 DBGVCR32_EL2, /* Debug Vector Catch Register */ 496 497 /* EL2 registers */ 498 ACTLR_EL2, /* Auxiliary Control Register (EL2) */ 499 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */ 500 HACR_EL2, /* Hypervisor Auxiliary Control Register */ 501 ZCR_EL2, /* SVE Control Register (EL2) */ 502 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */ 503 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */ 504 TCR_EL2, /* Translation Control Register (EL2) */ 505 PIRE0_EL2, /* Permission Indirection Register 0 (EL2) */ 506 PIR_EL2, /* Permission Indirection Register 1 (EL2) */ 507 POR_EL2, /* Permission Overlay Register 2 (EL2) */ 508 SPSR_EL2, /* EL2 saved program status register */ 509 ELR_EL2, /* EL2 exception link register */ 510 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */ 511 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */ 512 ESR_EL2, /* Exception Syndrome Register (EL2) */ 513 FAR_EL2, /* Fault Address Register (EL2) */ 514 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */ 515 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */ 516 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */ 517 VBAR_EL2, /* Vector Base Address Register (EL2) */ 518 RVBAR_EL2, /* Reset Vector Base Address Register */ 519 CONTEXTIDR_EL2, /* Context ID Register (EL2) */ 520 SP_EL2, /* EL2 Stack Pointer */ 521 CNTHP_CTL_EL2, 522 CNTHP_CVAL_EL2, 523 CNTHV_CTL_EL2, 524 CNTHV_CVAL_EL2, 525 526 /* Anything from this can be RES0/RES1 sanitised */ 527 MARKER(__SANITISED_REG_START__), 528 SCTLR_EL2, /* System Control Register (EL2) */ 529 TCR2_EL2, /* Extended Translation Control Register (EL2) */ 530 SCTLR2_EL2, /* System Control Register 2 (EL2) */ 531 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */ 532 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ 533 534 /* Any VNCR-capable reg goes after this point */ 535 MARKER(__VNCR_START__), 536 537 VNCR(SCTLR_EL1),/* System Control Register */ 538 VNCR(ACTLR_EL1),/* Auxiliary Control Register */ 539 VNCR(CPACR_EL1),/* Coprocessor Access Control */ 540 VNCR(ZCR_EL1), /* SVE Control */ 541 VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */ 542 VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */ 543 VNCR(TCR_EL1), /* Translation Control Register */ 544 VNCR(TCR2_EL1), /* Extended Translation Control Register */ 545 VNCR(SCTLR2_EL1), /* System Control Register 2 */ 546 VNCR(ESR_EL1), /* Exception Syndrome Register */ 547 VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */ 548 VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */ 549 VNCR(FAR_EL1), /* Fault Address Register */ 550 VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */ 551 VNCR(VBAR_EL1), /* Vector Base Address Register */ 552 VNCR(CONTEXTIDR_EL1), /* Context ID Register */ 553 VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */ 554 VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */ 555 VNCR(ELR_EL1), 556 VNCR(SP_EL1), 557 VNCR(SPSR_EL1), 558 VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */ 559 VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */ 560 VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */ 561 VNCR(HCR_EL2), /* Hypervisor Configuration Register */ 562 VNCR(HSTR_EL2), /* Hypervisor System Trap Register */ 563 VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */ 564 VNCR(VTCR_EL2), /* Virtualization Translation Control Register */ 565 VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */ 566 VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */ 567 568 /* Permission Indirection Extension registers */ 569 VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */ 570 VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */ 571 572 VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */ 573 574 /* FEAT_RAS registers */ 575 VNCR(VDISR_EL2), 576 VNCR(VSESR_EL2), 577 578 VNCR(HFGRTR_EL2), 579 VNCR(HFGWTR_EL2), 580 VNCR(HFGITR_EL2), 581 VNCR(HDFGRTR_EL2), 582 VNCR(HDFGWTR_EL2), 583 VNCR(HAFGRTR_EL2), 584 VNCR(HFGRTR2_EL2), 585 VNCR(HFGWTR2_EL2), 586 VNCR(HFGITR2_EL2), 587 VNCR(HDFGRTR2_EL2), 588 VNCR(HDFGWTR2_EL2), 589 590 VNCR(VNCR_EL2), 591 592 VNCR(CNTVOFF_EL2), 593 VNCR(CNTV_CVAL_EL0), 594 VNCR(CNTV_CTL_EL0), 595 VNCR(CNTP_CVAL_EL0), 596 VNCR(CNTP_CTL_EL0), 597 598 VNCR(ICH_LR0_EL2), 599 VNCR(ICH_LR1_EL2), 600 VNCR(ICH_LR2_EL2), 601 VNCR(ICH_LR3_EL2), 602 VNCR(ICH_LR4_EL2), 603 VNCR(ICH_LR5_EL2), 604 VNCR(ICH_LR6_EL2), 605 VNCR(ICH_LR7_EL2), 606 VNCR(ICH_LR8_EL2), 607 VNCR(ICH_LR9_EL2), 608 VNCR(ICH_LR10_EL2), 609 VNCR(ICH_LR11_EL2), 610 VNCR(ICH_LR12_EL2), 611 VNCR(ICH_LR13_EL2), 612 VNCR(ICH_LR14_EL2), 613 VNCR(ICH_LR15_EL2), 614 615 VNCR(ICH_AP0R0_EL2), 616 VNCR(ICH_AP0R1_EL2), 617 VNCR(ICH_AP0R2_EL2), 618 VNCR(ICH_AP0R3_EL2), 619 VNCR(ICH_AP1R0_EL2), 620 VNCR(ICH_AP1R1_EL2), 621 VNCR(ICH_AP1R2_EL2), 622 VNCR(ICH_AP1R3_EL2), 623 VNCR(ICH_HCR_EL2), 624 VNCR(ICH_VMCR_EL2), 625 626 VNCR(ICH_HFGRTR_EL2), 627 VNCR(ICH_HFGWTR_EL2), 628 VNCR(ICH_HFGITR_EL2), 629 630 NR_SYS_REGS /* Nothing after this line! */ 631 }; 632 633 struct resx { 634 u64 res0; 635 u64 res1; 636 }; 637 638 struct kvm_sysreg_masks { 639 struct resx mask[NR_SYS_REGS - __SANITISED_REG_START__]; 640 }; 641 642 static inline struct resx __kvm_get_sysreg_resx(struct kvm_arch *arch, 643 enum vcpu_sysreg sr) 644 { 645 struct kvm_sysreg_masks *masks; 646 647 masks = arch->sysreg_masks; 648 if (likely(masks && 649 sr >= __SANITISED_REG_START__ && sr < NR_SYS_REGS)) 650 return masks->mask[sr - __SANITISED_REG_START__]; 651 652 return (struct resx){}; 653 } 654 655 #define kvm_get_sysreg_resx(k, sr) __kvm_get_sysreg_resx(&(k)->arch, (sr)) 656 657 static inline void __kvm_set_sysreg_resx(struct kvm_arch *arch, 658 enum vcpu_sysreg sr, struct resx resx) 659 { 660 arch->sysreg_masks->mask[sr - __SANITISED_REG_START__] = resx; 661 } 662 663 #define kvm_set_sysreg_resx(k, sr, resx) \ 664 __kvm_set_sysreg_resx(&(k)->arch, (sr), (resx)) 665 666 struct fgt_masks { 667 const char *str; 668 u64 mask; 669 u64 nmask; 670 u64 res0; 671 u64 res1; 672 }; 673 674 extern struct fgt_masks hfgrtr_masks; 675 extern struct fgt_masks hfgwtr_masks; 676 extern struct fgt_masks hfgitr_masks; 677 extern struct fgt_masks hdfgrtr_masks; 678 extern struct fgt_masks hdfgwtr_masks; 679 extern struct fgt_masks hafgrtr_masks; 680 extern struct fgt_masks hfgrtr2_masks; 681 extern struct fgt_masks hfgwtr2_masks; 682 extern struct fgt_masks hfgitr2_masks; 683 extern struct fgt_masks hdfgrtr2_masks; 684 extern struct fgt_masks hdfgwtr2_masks; 685 extern struct fgt_masks ich_hfgrtr_masks; 686 extern struct fgt_masks ich_hfgwtr_masks; 687 extern struct fgt_masks ich_hfgitr_masks; 688 689 extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks); 690 extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks); 691 extern struct fgt_masks kvm_nvhe_sym(hfgitr_masks); 692 extern struct fgt_masks kvm_nvhe_sym(hdfgrtr_masks); 693 extern struct fgt_masks kvm_nvhe_sym(hdfgwtr_masks); 694 extern struct fgt_masks kvm_nvhe_sym(hafgrtr_masks); 695 extern struct fgt_masks kvm_nvhe_sym(hfgrtr2_masks); 696 extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks); 697 extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks); 698 extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks); 699 extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks); 700 extern struct fgt_masks kvm_nvhe_sym(ich_hfgrtr_masks); 701 extern struct fgt_masks kvm_nvhe_sym(ich_hfgwtr_masks); 702 extern struct fgt_masks kvm_nvhe_sym(ich_hfgitr_masks); 703 704 struct kvm_cpu_context { 705 struct user_pt_regs regs; /* sp = sp_el0 */ 706 707 u64 spsr_abt; 708 u64 spsr_und; 709 u64 spsr_irq; 710 u64 spsr_fiq; 711 712 struct user_fpsimd_state fp_regs; 713 714 u64 sys_regs[NR_SYS_REGS]; 715 716 struct kvm_vcpu *__hyp_running_vcpu; 717 718 /* This pointer has to be 4kB aligned. */ 719 u64 *vncr_array; 720 }; 721 722 struct cpu_sve_state { 723 __u64 zcr_el1; 724 725 /* 726 * Ordering is important since __sve_save_state/__sve_restore_state 727 * relies on it. 728 */ 729 __u32 fpsr; 730 __u32 fpcr; 731 732 /* Must be SVE_VQ_BYTES (128 bit) aligned. */ 733 __u8 sve_regs[]; 734 }; 735 736 /* 737 * This structure is instantiated on a per-CPU basis, and contains 738 * data that is: 739 * 740 * - tied to a single physical CPU, and 741 * - either have a lifetime that does not extend past vcpu_put() 742 * - or is an invariant for the lifetime of the system 743 * 744 * Use host_data_ptr(field) as a way to access a pointer to such a 745 * field. 746 */ 747 struct kvm_host_data { 748 #define KVM_HOST_DATA_FLAG_HAS_SPE 0 749 #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 750 #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 2 751 #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 3 752 #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 4 753 #define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 5 754 #define KVM_HOST_DATA_FLAG_HAS_BRBE 6 755 unsigned long flags; 756 757 struct kvm_cpu_context host_ctxt; 758 759 /* 760 * Hyp VA. 761 * sve_state is only used in pKVM and if system_supports_sve(). 762 */ 763 struct cpu_sve_state *sve_state; 764 765 /* Used by pKVM only. */ 766 u64 fpmr; 767 768 /* Ownership of the FP regs */ 769 enum { 770 FP_STATE_FREE, 771 FP_STATE_HOST_OWNED, 772 FP_STATE_GUEST_OWNED, 773 } fp_owner; 774 775 /* 776 * host_debug_state contains the host registers which are 777 * saved and restored during world switches. 778 */ 779 struct { 780 /* {Break,watch}point registers */ 781 struct kvm_guest_debug_arch regs; 782 /* Statistical profiling extension */ 783 u64 pmscr_el1; 784 /* Self-hosted trace */ 785 u64 trfcr_el1; 786 /* Values of trap registers for the host before guest entry. */ 787 u64 mdcr_el2; 788 u64 brbcr_el1; 789 } host_debug_state; 790 791 /* Guest trace filter value */ 792 u64 trfcr_while_in_guest; 793 794 /* Number of programmable event counters (PMCR_EL0.N) for this CPU */ 795 unsigned int nr_event_counters; 796 797 /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */ 798 unsigned int debug_brps; 799 unsigned int debug_wrps; 800 801 /* Last vgic_irq part of the AP list recorded in an LR */ 802 struct vgic_irq *last_lr_irq; 803 804 /* PPI state tracking for GICv5-based guests */ 805 struct { 806 /* 807 * For tracking the PPI pending state, we need both the entry 808 * state and exit state to correctly detect edges as it is 809 * possible that an interrupt has been injected in software in 810 * the interim. 811 */ 812 DECLARE_BITMAP(pendr_entry, VGIC_V5_NR_PRIVATE_IRQS); 813 DECLARE_BITMAP(pendr_exit, VGIC_V5_NR_PRIVATE_IRQS); 814 815 /* The saved state of the regs when leaving the guest */ 816 DECLARE_BITMAP(activer_exit, VGIC_V5_NR_PRIVATE_IRQS); 817 } vgic_v5_ppi_state; 818 }; 819 820 struct kvm_host_psci_config { 821 /* PSCI version used by host. */ 822 u32 version; 823 u32 smccc_version; 824 825 /* Function IDs used by host if version is v0.1. */ 826 struct psci_0_1_function_ids function_ids_0_1; 827 828 bool psci_0_1_cpu_suspend_implemented; 829 bool psci_0_1_cpu_on_implemented; 830 bool psci_0_1_cpu_off_implemented; 831 bool psci_0_1_migrate_implemented; 832 }; 833 834 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 835 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 836 837 extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 838 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 839 840 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 841 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 842 843 struct vcpu_reset_state { 844 unsigned long pc; 845 unsigned long r0; 846 bool be; 847 bool reset; 848 }; 849 850 struct vncr_tlb; 851 852 struct kvm_vcpu_arch { 853 struct kvm_cpu_context ctxt; 854 855 /* 856 * Guest floating point state 857 * 858 * The architecture has two main floating point extensions, 859 * the original FPSIMD and SVE. These have overlapping 860 * register views, with the FPSIMD V registers occupying the 861 * low 128 bits of the SVE Z registers. When the core 862 * floating point code saves the register state of a task it 863 * records which view it saved in fp_type. 864 */ 865 void *sve_state; 866 enum fp_type fp_type; 867 unsigned int sve_max_vl; 868 869 /* Stage 2 paging state used by the hardware on next switch */ 870 struct kvm_s2_mmu *hw_mmu; 871 872 /* Values of trap registers for the guest. */ 873 u64 hcr_el2; 874 u64 hcrx_el2; 875 u64 mdcr_el2; 876 877 struct { 878 u64 r; 879 u64 w; 880 } fgt[__NR_FGT_GROUP_IDS__]; 881 882 /* Exception Information */ 883 struct kvm_vcpu_fault_info fault; 884 885 /* Configuration flags, set once and for all before the vcpu can run */ 886 u8 cflags; 887 888 /* Input flags to the hypervisor code, potentially cleared after use */ 889 u8 iflags; 890 891 /* State flags for kernel bookkeeping, unused by the hypervisor code */ 892 u16 sflags; 893 894 /* 895 * Don't run the guest (internal implementation need). 896 * 897 * Contrary to the flags above, this is set/cleared outside of 898 * a vcpu context, and thus cannot be mixed with the flags 899 * themselves (or the flag accesses need to be made atomic). 900 */ 901 bool pause; 902 903 /* 904 * We maintain more than a single set of debug registers to support 905 * debugging the guest from the host and to maintain separate host and 906 * guest state during world switches. vcpu_debug_state are the debug 907 * registers of the vcpu as the guest sees them. 908 * 909 * external_debug_state contains the debug values we want to debug the 910 * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl. 911 */ 912 struct kvm_guest_debug_arch vcpu_debug_state; 913 struct kvm_guest_debug_arch external_debug_state; 914 u64 external_mdscr_el1; 915 916 enum { 917 VCPU_DEBUG_FREE, 918 VCPU_DEBUG_HOST_OWNED, 919 VCPU_DEBUG_GUEST_OWNED, 920 } debug_owner; 921 922 /* VGIC state */ 923 struct vgic_cpu vgic_cpu; 924 struct arch_timer_cpu timer_cpu; 925 struct kvm_pmu pmu; 926 927 /* vcpu power state */ 928 struct kvm_mp_state mp_state; 929 spinlock_t mp_state_lock; 930 931 /* Cache some mmu pages needed inside spinlock regions */ 932 struct kvm_mmu_memory_cache mmu_page_cache; 933 934 /* Pages to top-up the pKVM/EL2 guest pool */ 935 struct kvm_hyp_memcache pkvm_memcache; 936 937 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 938 u64 vsesr_el2; 939 940 /* Additional reset state */ 941 struct vcpu_reset_state reset_state; 942 943 /* Guest PV state */ 944 struct { 945 u64 last_steal; 946 gpa_t base; 947 } steal; 948 949 /* Per-vcpu CCSIDR override or NULL */ 950 u32 *ccsidr; 951 952 /* Per-vcpu TLB for VNCR_EL2 -- NULL when !NV */ 953 struct vncr_tlb *vncr_tlb; 954 }; 955 956 /* 957 * Each 'flag' is composed of a comma-separated triplet: 958 * 959 * - the flag-set it belongs to in the vcpu->arch structure 960 * - the value for that flag 961 * - the mask for that flag 962 * 963 * __vcpu_single_flag() builds such a triplet for a single-bit flag. 964 * unpack_vcpu_flag() extract the flag value from the triplet for 965 * direct use outside of the flag accessors. 966 */ 967 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f) 968 969 #define __unpack_flag(_set, _f, _m) _f 970 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__) 971 972 #define __build_check_flag(v, flagset, f, m) \ 973 do { \ 974 typeof(v->arch.flagset) *_fset; \ 975 \ 976 /* Check that the flags fit in the mask */ \ 977 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \ 978 /* Check that the flags fit in the type */ \ 979 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \ 980 } while (0) 981 982 #define __vcpu_get_flag(v, flagset, f, m) \ 983 ({ \ 984 __build_check_flag(v, flagset, f, m); \ 985 \ 986 READ_ONCE(v->arch.flagset) & (m); \ 987 }) 988 989 /* 990 * Note that the set/clear accessors must be preempt-safe in order to 991 * avoid nesting them with load/put which also manipulate flags... 992 */ 993 #ifdef __KVM_NVHE_HYPERVISOR__ 994 /* the nVHE hypervisor is always non-preemptible */ 995 #define __vcpu_flags_preempt_disable() 996 #define __vcpu_flags_preempt_enable() 997 #else 998 #define __vcpu_flags_preempt_disable() preempt_disable() 999 #define __vcpu_flags_preempt_enable() preempt_enable() 1000 #endif 1001 1002 #define __vcpu_set_flag(v, flagset, f, m) \ 1003 do { \ 1004 typeof(v->arch.flagset) *fset; \ 1005 \ 1006 __build_check_flag(v, flagset, f, m); \ 1007 \ 1008 fset = &v->arch.flagset; \ 1009 __vcpu_flags_preempt_disable(); \ 1010 if (HWEIGHT(m) > 1) \ 1011 *fset &= ~(m); \ 1012 *fset |= (f); \ 1013 __vcpu_flags_preempt_enable(); \ 1014 } while (0) 1015 1016 #define __vcpu_clear_flag(v, flagset, f, m) \ 1017 do { \ 1018 typeof(v->arch.flagset) *fset; \ 1019 \ 1020 __build_check_flag(v, flagset, f, m); \ 1021 \ 1022 fset = &v->arch.flagset; \ 1023 __vcpu_flags_preempt_disable(); \ 1024 *fset &= ~(m); \ 1025 __vcpu_flags_preempt_enable(); \ 1026 } while (0) 1027 1028 #define __vcpu_test_and_clear_flag(v, flagset, f, m) \ 1029 ({ \ 1030 typeof(v->arch.flagset) set; \ 1031 \ 1032 set = __vcpu_get_flag(v, flagset, f, m); \ 1033 __vcpu_clear_flag(v, flagset, f, m); \ 1034 \ 1035 set; \ 1036 }) 1037 1038 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__) 1039 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) 1040 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) 1041 #define vcpu_test_and_clear_flag(v, ...) \ 1042 __vcpu_test_and_clear_flag((v), __VA_ARGS__) 1043 1044 /* KVM_ARM_VCPU_INIT completed */ 1045 #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0)) 1046 /* SVE config completed */ 1047 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) 1048 /* pKVM VCPU setup completed */ 1049 #define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2)) 1050 1051 /* Exception pending */ 1052 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) 1053 /* 1054 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't 1055 * be set together with an exception... 1056 */ 1057 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1)) 1058 /* Target EL/MODE (not a single flag, but let's abuse the macro) */ 1059 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1)) 1060 1061 /* Helpers to encode exceptions with minimum fuss */ 1062 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK) 1063 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL) 1064 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL 1065 1066 /* 1067 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following 1068 * values: 1069 * 1070 * For AArch32 EL1: 1071 */ 1072 #define EXCEPT_AA32_UND __vcpu_except_flags(0) 1073 #define EXCEPT_AA32_IABT __vcpu_except_flags(1) 1074 #define EXCEPT_AA32_DABT __vcpu_except_flags(2) 1075 /* For AArch64: */ 1076 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0) 1077 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1) 1078 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2) 1079 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3) 1080 /* For AArch64 with NV: */ 1081 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4) 1082 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5) 1083 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) 1084 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) 1085 1086 /* Physical CPU not in supported_cpus */ 1087 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0)) 1088 /* WFIT instruction trapped */ 1089 #define IN_WFIT __vcpu_single_flag(sflags, BIT(1)) 1090 /* vcpu system registers loaded on physical CPU */ 1091 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2)) 1092 /* Software step state is Active-pending for external debug */ 1093 #define HOST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3)) 1094 /* Software step state is Active pending for guest debug */ 1095 #define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4)) 1096 /* PMUSERENR for the guest EL0 is on physical CPU */ 1097 #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5)) 1098 /* WFI instruction trapped */ 1099 #define IN_WFI __vcpu_single_flag(sflags, BIT(6)) 1100 /* KVM is currently emulating a nested ERET */ 1101 #define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7)) 1102 /* SError pending for nested guest */ 1103 #define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8)) 1104 1105 1106 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 1107 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ 1108 sve_ffr_offset((vcpu)->arch.sve_max_vl)) 1109 1110 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) 1111 1112 #define vcpu_sve_zcr_elx(vcpu) \ 1113 (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1) 1114 1115 #define sve_state_size_from_vl(sve_max_vl) ({ \ 1116 size_t __size_ret; \ 1117 unsigned int __vq; \ 1118 \ 1119 if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \ 1120 __size_ret = 0; \ 1121 } else { \ 1122 __vq = sve_vq_from_vl(sve_max_vl); \ 1123 __size_ret = SVE_SIG_REGS_SIZE(__vq); \ 1124 } \ 1125 \ 1126 __size_ret; \ 1127 }) 1128 1129 #define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl) 1130 1131 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 1132 KVM_GUESTDBG_USE_SW_BP | \ 1133 KVM_GUESTDBG_USE_HW | \ 1134 KVM_GUESTDBG_SINGLESTEP) 1135 1136 #define kvm_has_sve(kvm) (system_supports_sve() && \ 1137 test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags)) 1138 1139 #ifdef __KVM_NVHE_HYPERVISOR__ 1140 #define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm)) 1141 #else 1142 #define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm) 1143 #endif 1144 1145 #ifdef CONFIG_ARM64_PTR_AUTH 1146 #define vcpu_has_ptrauth(vcpu) \ 1147 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 1148 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 1149 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \ 1150 vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))) 1151 #else 1152 #define vcpu_has_ptrauth(vcpu) false 1153 #endif 1154 1155 #define vcpu_on_unsupported_cpu(vcpu) \ 1156 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU) 1157 1158 #define vcpu_set_on_unsupported_cpu(vcpu) \ 1159 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU) 1160 1161 #define vcpu_clear_on_unsupported_cpu(vcpu) \ 1162 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU) 1163 1164 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 1165 1166 /* 1167 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 1168 * memory backed version of a register, and not the one most recently 1169 * accessed by a running VCPU. For example, for userspace access or 1170 * for system registers that are never context switched, but only 1171 * emulated. 1172 * 1173 * Don't bother with VNCR-based accesses in the nVHE code, it has no 1174 * business dealing with NV. 1175 */ 1176 static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r) 1177 { 1178 #if !defined (__KVM_NVHE_HYPERVISOR__) 1179 if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && 1180 r >= __VNCR_START__ && ctxt->vncr_array)) 1181 return &ctxt->vncr_array[r - __VNCR_START__]; 1182 #endif 1183 return (u64 *)&ctxt->sys_regs[r]; 1184 } 1185 1186 #define __ctxt_sys_reg(c,r) \ 1187 ({ \ 1188 BUILD_BUG_ON(__builtin_constant_p(r) && \ 1189 (r) >= NR_SYS_REGS); \ 1190 ___ctxt_sys_reg(c, r); \ 1191 }) 1192 1193 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 1194 1195 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64); 1196 1197 #define __vcpu_assign_sys_reg(v, r, val) \ 1198 do { \ 1199 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1200 u64 __v = (val); \ 1201 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1202 __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ 1203 \ 1204 ctxt_sys_reg(ctxt, (r)) = __v; \ 1205 } while (0) 1206 1207 #define __vcpu_rmw_sys_reg(v, r, op, val) \ 1208 do { \ 1209 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1210 u64 __v = ctxt_sys_reg(ctxt, (r)); \ 1211 __v op (val); \ 1212 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1213 __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ 1214 \ 1215 ctxt_sys_reg(ctxt, (r)) = __v; \ 1216 } while (0) 1217 1218 #define __vcpu_sys_reg(v,r) \ 1219 ({ \ 1220 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1221 u64 __v = ctxt_sys_reg(ctxt, (r)); \ 1222 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1223 __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ 1224 __v; \ 1225 }) 1226 1227 u64 vcpu_read_sys_reg(const struct kvm_vcpu *, enum vcpu_sysreg); 1228 void vcpu_write_sys_reg(struct kvm_vcpu *, u64, enum vcpu_sysreg); 1229 1230 struct kvm_vm_stat { 1231 struct kvm_vm_stat_generic generic; 1232 }; 1233 1234 struct kvm_vcpu_stat { 1235 struct kvm_vcpu_stat_generic generic; 1236 u64 hvc_exit_stat; 1237 u64 wfe_exit_stat; 1238 u64 wfi_exit_stat; 1239 u64 mmio_exit_user; 1240 u64 mmio_exit_kernel; 1241 u64 signal_exits; 1242 u64 exits; 1243 }; 1244 1245 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 1246 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 1247 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 1248 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 1249 1250 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 1251 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 1252 1253 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 1254 struct kvm_vcpu_events *events); 1255 1256 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 1257 struct kvm_vcpu_events *events); 1258 1259 void kvm_arm_halt_guest(struct kvm *kvm); 1260 void kvm_arm_resume_guest(struct kvm *kvm); 1261 1262 #define vcpu_has_run_once(vcpu) (!!READ_ONCE((vcpu)->pid)) 1263 1264 #ifndef __KVM_NVHE_HYPERVISOR__ 1265 #define kvm_call_hyp_nvhe(f, ...) \ 1266 ({ \ 1267 struct arm_smccc_res res; \ 1268 \ 1269 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 1270 ##__VA_ARGS__, &res); \ 1271 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 1272 \ 1273 res.a1; \ 1274 }) 1275 1276 /* 1277 * The isb() below is there to guarantee the same behaviour on VHE as on !VHE, 1278 * where the eret to EL1 acts as a context synchronization event. 1279 */ 1280 #define kvm_call_hyp(f, ...) \ 1281 do { \ 1282 if (has_vhe()) { \ 1283 f(__VA_ARGS__); \ 1284 isb(); \ 1285 } else { \ 1286 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 1287 } \ 1288 } while(0) 1289 1290 #define kvm_call_hyp_ret(f, ...) \ 1291 ({ \ 1292 typeof(f(__VA_ARGS__)) ret; \ 1293 \ 1294 if (has_vhe()) { \ 1295 ret = f(__VA_ARGS__); \ 1296 } else { \ 1297 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 1298 } \ 1299 \ 1300 ret; \ 1301 }) 1302 #else /* __KVM_NVHE_HYPERVISOR__ */ 1303 #define kvm_call_hyp(f, ...) f(__VA_ARGS__) 1304 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) 1305 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) 1306 #endif /* __KVM_NVHE_HYPERVISOR__ */ 1307 1308 int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 1309 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 1310 1311 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 1312 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 1313 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 1314 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 1315 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 1316 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 1317 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); 1318 1319 void kvm_sys_regs_create_debugfs(struct kvm *kvm); 1320 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 1321 1322 int __init kvm_sys_reg_table_init(void); 1323 struct sys_reg_desc; 1324 int __init populate_sysreg_config(const struct sys_reg_desc *sr, 1325 unsigned int idx); 1326 int __init populate_nv_trap_config(void); 1327 1328 void kvm_calculate_traps(struct kvm_vcpu *vcpu); 1329 1330 /* MMIO helpers */ 1331 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 1332 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 1333 1334 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 1335 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 1336 1337 /* 1338 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, 1339 * arrived in guest context. For arm64, any event that arrives while a vCPU is 1340 * loaded is considered to be "in guest". 1341 */ 1342 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) 1343 { 1344 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; 1345 } 1346 1347 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 1348 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 1349 void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 1350 1351 bool kvm_arm_pvtime_supported(void); 1352 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 1353 struct kvm_device_attr *attr); 1354 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 1355 struct kvm_device_attr *attr); 1356 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 1357 struct kvm_device_attr *attr); 1358 1359 extern unsigned int __ro_after_init kvm_arm_vmid_bits; 1360 int __init kvm_arm_vmid_alloc_init(void); 1361 void __init kvm_arm_vmid_alloc_free(void); 1362 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); 1363 void kvm_arm_vmid_clear_active(void); 1364 1365 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 1366 { 1367 vcpu_arch->steal.base = INVALID_GPA; 1368 } 1369 1370 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 1371 { 1372 return (vcpu_arch->steal.base != INVALID_GPA); 1373 } 1374 1375 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 1376 1377 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 1378 1379 /* 1380 * How we access per-CPU host data depends on the where we access it from, 1381 * and the mode we're in: 1382 * 1383 * - VHE and nVHE hypervisor bits use their locally defined instance 1384 * 1385 * - the rest of the kernel use either the VHE or nVHE one, depending on 1386 * the mode we're running in. 1387 * 1388 * Unless we're in protected mode, fully deprivileged, and the nVHE 1389 * per-CPU stuff is exclusively accessible to the protected EL2 code. 1390 * In this case, the EL1 code uses the *VHE* data as its private state 1391 * (which makes sense in a way as there shouldn't be any shared state 1392 * between the host and the hypervisor). 1393 * 1394 * Yes, this is all totally trivial. Shoot me now. 1395 */ 1396 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) 1397 #define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f) 1398 #else 1399 #define host_data_ptr(f) \ 1400 (static_branch_unlikely(&kvm_protected_mode_initialized) ? \ 1401 &this_cpu_ptr(&kvm_host_data)->f : \ 1402 &this_cpu_ptr_hyp_sym(kvm_host_data)->f) 1403 #endif 1404 1405 #define host_data_test_flag(flag) \ 1406 (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))) 1407 #define host_data_set_flag(flag) \ 1408 set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) 1409 #define host_data_clear_flag(flag) \ 1410 clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) 1411 1412 /* Check whether the FP regs are owned by the guest */ 1413 static inline bool guest_owns_fp_regs(void) 1414 { 1415 return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED; 1416 } 1417 1418 /* Check whether the FP regs are owned by the host */ 1419 static inline bool host_owns_fp_regs(void) 1420 { 1421 return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED; 1422 } 1423 1424 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 1425 { 1426 /* The host's MPIDR is immutable, so let's set it up at boot time */ 1427 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 1428 } 1429 1430 static inline bool kvm_system_needs_idmapped_vectors(void) 1431 { 1432 return cpus_have_final_cap(ARM64_SPECTRE_V3A); 1433 } 1434 1435 void kvm_init_host_debug_data(void); 1436 void kvm_debug_init_vhe(void); 1437 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu); 1438 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu); 1439 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu); 1440 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val); 1441 1442 #define kvm_vcpu_os_lock_enabled(vcpu) \ 1443 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK)) 1444 1445 #define kvm_debug_regs_in_use(vcpu) \ 1446 ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE) 1447 #define kvm_host_owns_debug_regs(vcpu) \ 1448 ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED) 1449 #define kvm_guest_owns_debug_regs(vcpu) \ 1450 ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED) 1451 1452 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 1453 struct kvm_device_attr *attr); 1454 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 1455 struct kvm_device_attr *attr); 1456 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 1457 struct kvm_device_attr *attr); 1458 1459 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, 1460 struct kvm_arm_copy_mte_tags *copy_tags); 1461 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, 1462 struct kvm_arm_counter_offset *offset); 1463 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, 1464 struct reg_mask_range *range); 1465 1466 /* Guest/host FPSIMD coordination helpers */ 1467 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 1468 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); 1469 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 1470 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 1471 1472 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 1473 { 1474 return (!has_vhe() && attr->exclude_host); 1475 } 1476 1477 #ifdef CONFIG_KVM 1478 void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); 1479 void kvm_clr_pmu_events(u64 clr); 1480 bool kvm_set_pmuserenr(u64 val); 1481 void kvm_enable_trbe(void); 1482 void kvm_disable_trbe(void); 1483 void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest); 1484 #else 1485 static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} 1486 static inline void kvm_clr_pmu_events(u64 clr) {} 1487 static inline bool kvm_set_pmuserenr(u64 val) 1488 { 1489 return false; 1490 } 1491 static inline void kvm_enable_trbe(void) {} 1492 static inline void kvm_disable_trbe(void) {} 1493 static inline void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) {} 1494 #endif 1495 1496 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); 1497 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu); 1498 1499 int __init kvm_set_ipa_limit(void); 1500 u32 kvm_get_pa_bits(struct kvm *kvm); 1501 1502 #define __KVM_HAVE_ARCH_VM_ALLOC 1503 struct kvm *kvm_arch_alloc_vm(void); 1504 1505 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS 1506 1507 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE 1508 1509 #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.is_protected) 1510 1511 #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm) 1512 1513 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 1514 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 1515 1516 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED) 1517 1518 #define kvm_has_mte(kvm) \ 1519 (system_supports_mte() && \ 1520 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) 1521 1522 #define kvm_supports_32bit_el0() \ 1523 (system_supports_32bit_el0() && \ 1524 !static_branch_unlikely(&arm64_mismatched_32bit_el0)) 1525 1526 #define kvm_vm_has_ran_once(kvm) \ 1527 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags)) 1528 1529 static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature) 1530 { 1531 return test_bit(feature, ka->vcpu_features); 1532 } 1533 1534 #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f)) 1535 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) 1536 1537 #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) 1538 1539 int kvm_trng_call(struct kvm_vcpu *vcpu); 1540 #ifdef CONFIG_KVM 1541 extern phys_addr_t hyp_mem_base; 1542 extern phys_addr_t hyp_mem_size; 1543 void __init kvm_hyp_reserve(void); 1544 #else 1545 static inline void kvm_hyp_reserve(void) { } 1546 #endif 1547 1548 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); 1549 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); 1550 1551 static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg) 1552 { 1553 switch (reg) { 1554 case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7): 1555 return &ka->id_regs[IDREG_IDX(reg)]; 1556 case SYS_CTR_EL0: 1557 return &ka->ctr_el0; 1558 case SYS_MIDR_EL1: 1559 return &ka->midr_el1; 1560 case SYS_REVIDR_EL1: 1561 return &ka->revidr_el1; 1562 case SYS_AIDR_EL1: 1563 return &ka->aidr_el1; 1564 default: 1565 WARN_ON_ONCE(1); 1566 return NULL; 1567 } 1568 } 1569 1570 #define kvm_read_vm_id_reg(kvm, reg) \ 1571 ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; }) 1572 1573 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); 1574 1575 #define __expand_field_sign_unsigned(id, fld, val) \ 1576 ((u64)SYS_FIELD_VALUE(id, fld, val)) 1577 1578 #define __expand_field_sign_signed(id, fld, val) \ 1579 ({ \ 1580 u64 __val = SYS_FIELD_VALUE(id, fld, val); \ 1581 sign_extend64(__val, id##_##fld##_WIDTH - 1); \ 1582 }) 1583 1584 #define get_idreg_field_unsigned(kvm, id, fld) \ 1585 ({ \ 1586 u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \ 1587 FIELD_GET(id##_##fld##_MASK, __val); \ 1588 }) 1589 1590 #define get_idreg_field_signed(kvm, id, fld) \ 1591 ({ \ 1592 u64 __val = get_idreg_field_unsigned(kvm, id, fld); \ 1593 sign_extend64(__val, id##_##fld##_WIDTH - 1); \ 1594 }) 1595 1596 #define get_idreg_field_enum(kvm, id, fld) \ 1597 get_idreg_field_unsigned(kvm, id, fld) 1598 1599 #define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \ 1600 (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit)) 1601 1602 #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \ 1603 (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit)) 1604 1605 #define kvm_cmp_feat(kvm, id, fld, op, limit) \ 1606 (id##_##fld##_SIGNED ? \ 1607 kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \ 1608 kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)) 1609 1610 #define __kvm_has_feat(kvm, id, fld, limit) \ 1611 kvm_cmp_feat(kvm, id, fld, >=, limit) 1612 1613 #define kvm_has_feat(kvm, ...) __kvm_has_feat(kvm, __VA_ARGS__) 1614 1615 #define __kvm_has_feat_enum(kvm, id, fld, val) \ 1616 kvm_cmp_feat_unsigned(kvm, id, fld, ==, val) 1617 1618 #define kvm_has_feat_enum(kvm, ...) __kvm_has_feat_enum(kvm, __VA_ARGS__) 1619 1620 #define kvm_has_feat_range(kvm, id, fld, min, max) \ 1621 (kvm_cmp_feat(kvm, id, fld, >=, min) && \ 1622 kvm_cmp_feat(kvm, id, fld, <=, max)) 1623 1624 /* Check for a given level of PAuth support */ 1625 #define kvm_has_pauth(k, l) \ 1626 ({ \ 1627 bool pa, pi, pa3; \ 1628 \ 1629 pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \ 1630 pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \ 1631 pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \ 1632 pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \ 1633 pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \ 1634 pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \ 1635 \ 1636 (pa + pi + pa3) == 1; \ 1637 }) 1638 1639 #define kvm_has_fpmr(k) \ 1640 (system_supports_fpmr() && \ 1641 kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP)) 1642 1643 #define kvm_has_tcr2(k) \ 1644 (kvm_has_feat((k), ID_AA64MMFR3_EL1, TCRX, IMP)) 1645 1646 #define kvm_has_s1pie(k) \ 1647 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP)) 1648 1649 #define kvm_has_s1poe(k) \ 1650 (system_supports_poe() && \ 1651 kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) 1652 1653 #define kvm_has_ras(k) \ 1654 (kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP)) 1655 1656 #define kvm_has_sctlr2(k) \ 1657 (kvm_has_feat((k), ID_AA64MMFR3_EL1, SCTLRX, IMP)) 1658 1659 static inline bool kvm_arch_has_irq_bypass(void) 1660 { 1661 return true; 1662 } 1663 1664 void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt); 1665 struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg); 1666 void check_feature_map(void); 1667 void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu); 1668 1669 static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg) 1670 { 1671 switch (reg) { 1672 case HFGRTR_EL2: 1673 case HFGWTR_EL2: 1674 return HFGRTR_GROUP; 1675 case HFGITR_EL2: 1676 return HFGITR_GROUP; 1677 case HDFGRTR_EL2: 1678 case HDFGWTR_EL2: 1679 return HDFGRTR_GROUP; 1680 case HAFGRTR_EL2: 1681 return HAFGRTR_GROUP; 1682 case HFGRTR2_EL2: 1683 case HFGWTR2_EL2: 1684 return HFGRTR2_GROUP; 1685 case HFGITR2_EL2: 1686 return HFGITR2_GROUP; 1687 case HDFGRTR2_EL2: 1688 case HDFGWTR2_EL2: 1689 return HDFGRTR2_GROUP; 1690 case ICH_HFGRTR_EL2: 1691 case ICH_HFGWTR_EL2: 1692 return ICH_HFGRTR_GROUP; 1693 case ICH_HFGITR_EL2: 1694 return ICH_HFGITR_GROUP; 1695 default: 1696 BUILD_BUG_ON(1); 1697 } 1698 } 1699 1700 #define vcpu_fgt(vcpu, reg) \ 1701 ({ \ 1702 enum fgt_group_id id = __fgt_reg_to_group_id(reg); \ 1703 u64 *p; \ 1704 switch (reg) { \ 1705 case HFGWTR_EL2: \ 1706 case HDFGWTR_EL2: \ 1707 case HFGWTR2_EL2: \ 1708 case HDFGWTR2_EL2: \ 1709 case ICH_HFGWTR_EL2: \ 1710 p = &(vcpu)->arch.fgt[id].w; \ 1711 break; \ 1712 default: \ 1713 p = &(vcpu)->arch.fgt[id].r; \ 1714 break; \ 1715 } \ 1716 \ 1717 p; \ 1718 }) 1719 1720 long kvm_get_cap_for_kvm_ioctl(unsigned int ioctl, long *ext); 1721 1722 #endif /* __ARM64_KVM_HOST_H__ */ 1723