xref: /linux/arch/arm64/include/asm/kvm_host.h (revision 64dd3b6a79f0907d36de481b0f15fab323a53e5a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/maple_tree.h>
20 #include <linux/percpu.h>
21 #include <linux/psci.h>
22 #include <asm/arch_gicv3.h>
23 #include <asm/barrier.h>
24 #include <asm/cpufeature.h>
25 #include <asm/cputype.h>
26 #include <asm/daifflags.h>
27 #include <asm/fpsimd.h>
28 #include <asm/kvm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/vncr_mapping.h>
31 
32 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
33 
34 #define KVM_HALT_POLL_NS_DEFAULT 500000
35 
36 #include <kvm/arm_vgic.h>
37 #include <kvm/arm_arch_timer.h>
38 #include <kvm/arm_pmu.h>
39 
40 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41 
42 #define KVM_VCPU_MAX_FEATURES 7
43 #define KVM_VCPU_VALID_FEATURES	(BIT(KVM_VCPU_MAX_FEATURES) - 1)
44 
45 #define KVM_REQ_SLEEP \
46 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
47 #define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
48 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
49 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
50 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
51 #define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
52 #define KVM_REQ_SUSPEND		KVM_ARCH_REQ(6)
53 #define KVM_REQ_RESYNC_PMU_EL0	KVM_ARCH_REQ(7)
54 
55 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
56 				     KVM_DIRTY_LOG_INITIALLY_SET)
57 
58 #define KVM_HAVE_MMU_RWLOCK
59 
60 /*
61  * Mode of operation configurable with kvm-arm.mode early param.
62  * See Documentation/admin-guide/kernel-parameters.txt for more information.
63  */
64 enum kvm_mode {
65 	KVM_MODE_DEFAULT,
66 	KVM_MODE_PROTECTED,
67 	KVM_MODE_NV,
68 	KVM_MODE_NONE,
69 };
70 #ifdef CONFIG_KVM
71 enum kvm_mode kvm_get_mode(void);
72 #else
kvm_get_mode(void)73 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
74 #endif
75 
76 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
77 
78 extern unsigned int __ro_after_init kvm_sve_max_vl;
79 extern unsigned int __ro_after_init kvm_host_sve_max_vl;
80 int __init kvm_arm_init_sve(void);
81 
82 u32 __attribute_const__ kvm_target_cpu(void);
83 void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
84 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
85 
86 struct kvm_hyp_memcache {
87 	phys_addr_t head;
88 	unsigned long nr_pages;
89 };
90 
push_hyp_memcache(struct kvm_hyp_memcache * mc,phys_addr_t * p,phys_addr_t (* to_pa)(void * virt))91 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
92 				     phys_addr_t *p,
93 				     phys_addr_t (*to_pa)(void *virt))
94 {
95 	*p = mc->head;
96 	mc->head = to_pa(p);
97 	mc->nr_pages++;
98 }
99 
pop_hyp_memcache(struct kvm_hyp_memcache * mc,void * (* to_va)(phys_addr_t phys))100 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
101 				     void *(*to_va)(phys_addr_t phys))
102 {
103 	phys_addr_t *p = to_va(mc->head);
104 
105 	if (!mc->nr_pages)
106 		return NULL;
107 
108 	mc->head = *p;
109 	mc->nr_pages--;
110 
111 	return p;
112 }
113 
__topup_hyp_memcache(struct kvm_hyp_memcache * mc,unsigned long min_pages,void * (* alloc_fn)(void * arg),phys_addr_t (* to_pa)(void * virt),void * arg)114 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
115 				       unsigned long min_pages,
116 				       void *(*alloc_fn)(void *arg),
117 				       phys_addr_t (*to_pa)(void *virt),
118 				       void *arg)
119 {
120 	while (mc->nr_pages < min_pages) {
121 		phys_addr_t *p = alloc_fn(arg);
122 
123 		if (!p)
124 			return -ENOMEM;
125 		push_hyp_memcache(mc, p, to_pa);
126 	}
127 
128 	return 0;
129 }
130 
__free_hyp_memcache(struct kvm_hyp_memcache * mc,void (* free_fn)(void * virt,void * arg),void * (* to_va)(phys_addr_t phys),void * arg)131 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
132 				       void (*free_fn)(void *virt, void *arg),
133 				       void *(*to_va)(phys_addr_t phys),
134 				       void *arg)
135 {
136 	while (mc->nr_pages)
137 		free_fn(pop_hyp_memcache(mc, to_va), arg);
138 }
139 
140 void free_hyp_memcache(struct kvm_hyp_memcache *mc);
141 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
142 
143 struct kvm_vmid {
144 	atomic64_t id;
145 };
146 
147 struct kvm_s2_mmu {
148 	struct kvm_vmid vmid;
149 
150 	/*
151 	 * stage2 entry level table
152 	 *
153 	 * Two kvm_s2_mmu structures in the same VM can point to the same
154 	 * pgd here.  This happens when running a guest using a
155 	 * translation regime that isn't affected by its own stage-2
156 	 * translation, such as a non-VHE hypervisor running at vEL2, or
157 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
158 	 * canonical stage-2 page tables.
159 	 */
160 	phys_addr_t	pgd_phys;
161 	struct kvm_pgtable *pgt;
162 
163 	/*
164 	 * VTCR value used on the host. For a non-NV guest (or a NV
165 	 * guest that runs in a context where its own S2 doesn't
166 	 * apply), its T0SZ value reflects that of the IPA size.
167 	 *
168 	 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
169 	 * the guest.
170 	 */
171 	u64	vtcr;
172 
173 	/* The last vcpu id that ran on each physical CPU */
174 	int __percpu *last_vcpu_ran;
175 
176 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
177 	/*
178 	 * Memory cache used to split
179 	 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
180 	 * is used to allocate stage2 page tables while splitting huge
181 	 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
182 	 * influences both the capacity of the split page cache, and
183 	 * how often KVM reschedules. Be wary of raising CHUNK_SIZE
184 	 * too high.
185 	 *
186 	 * Protected by kvm->slots_lock.
187 	 */
188 	struct kvm_mmu_memory_cache split_page_cache;
189 	uint64_t split_page_chunk_size;
190 
191 	struct kvm_arch *arch;
192 
193 	/*
194 	 * For a shadow stage-2 MMU, the virtual vttbr used by the
195 	 * host to parse the guest S2.
196 	 * This either contains:
197 	 * - the virtual VTTBR programmed by the guest hypervisor with
198          *   CnP cleared
199 	 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
200 	 *
201 	 * We also cache the full VTCR which gets used for TLB invalidation,
202 	 * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted
203 	 * to be cached in a TLB" to the letter.
204 	 */
205 	u64	tlb_vttbr;
206 	u64	tlb_vtcr;
207 
208 	/*
209 	 * true when this represents a nested context where virtual
210 	 * HCR_EL2.VM == 1
211 	 */
212 	bool	nested_stage2_enabled;
213 
214 	/*
215 	 *  0: Nobody is currently using this, check vttbr for validity
216 	 * >0: Somebody is actively using this.
217 	 */
218 	atomic_t refcnt;
219 };
220 
221 struct kvm_arch_memory_slot {
222 };
223 
224 /**
225  * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
226  *
227  * @std_bmap: Bitmap of standard secure service calls
228  * @std_hyp_bmap: Bitmap of standard hypervisor service calls
229  * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
230  */
231 struct kvm_smccc_features {
232 	unsigned long std_bmap;
233 	unsigned long std_hyp_bmap;
234 	unsigned long vendor_hyp_bmap;
235 };
236 
237 typedef unsigned int pkvm_handle_t;
238 
239 struct kvm_protected_vm {
240 	pkvm_handle_t handle;
241 	struct kvm_hyp_memcache teardown_mc;
242 	bool enabled;
243 };
244 
245 struct kvm_mpidr_data {
246 	u64			mpidr_mask;
247 	DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
248 };
249 
kvm_mpidr_index(struct kvm_mpidr_data * data,u64 mpidr)250 static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
251 {
252 	unsigned long index = 0, mask = data->mpidr_mask;
253 	unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
254 
255 	bitmap_gather(&index, &aff, &mask, fls(mask));
256 
257 	return index;
258 }
259 
260 struct kvm_sysreg_masks;
261 
262 enum fgt_group_id {
263 	__NO_FGT_GROUP__,
264 	HFGxTR_GROUP,
265 	HDFGRTR_GROUP,
266 	HDFGWTR_GROUP = HDFGRTR_GROUP,
267 	HFGITR_GROUP,
268 	HAFGRTR_GROUP,
269 
270 	/* Must be last */
271 	__NR_FGT_GROUP_IDS__
272 };
273 
274 struct kvm_arch {
275 	struct kvm_s2_mmu mmu;
276 
277 	/*
278 	 * Fine-Grained UNDEF, mimicking the FGT layout defined by the
279 	 * architecture. We track them globally, as we present the
280 	 * same feature-set to all vcpus.
281 	 *
282 	 * Index 0 is currently spare.
283 	 */
284 	u64 fgu[__NR_FGT_GROUP_IDS__];
285 
286 	/*
287 	 * Stage 2 paging state for VMs with nested S2 using a virtual
288 	 * VMID.
289 	 */
290 	struct kvm_s2_mmu *nested_mmus;
291 	size_t nested_mmus_size;
292 	int nested_mmus_next;
293 
294 	/* Interrupt controller */
295 	struct vgic_dist	vgic;
296 
297 	/* Timers */
298 	struct arch_timer_vm_data timer_data;
299 
300 	/* Mandated version of PSCI */
301 	u32 psci_version;
302 
303 	/* Protects VM-scoped configuration data */
304 	struct mutex config_lock;
305 
306 	/*
307 	 * If we encounter a data abort without valid instruction syndrome
308 	 * information, report this to user space.  User space can (and
309 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
310 	 * supported.
311 	 */
312 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER	0
313 	/* Memory Tagging Extension enabled for the guest */
314 #define KVM_ARCH_FLAG_MTE_ENABLED			1
315 	/* At least one vCPU has ran in the VM */
316 #define KVM_ARCH_FLAG_HAS_RAN_ONCE			2
317 	/* The vCPU feature set for the VM is configured */
318 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED		3
319 	/* PSCI SYSTEM_SUSPEND enabled for the guest */
320 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED		4
321 	/* VM counter offset */
322 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET			5
323 	/* Timer PPIs made immutable */
324 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE		6
325 	/* Initial ID reg values loaded */
326 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED		7
327 	/* Fine-Grained UNDEF initialised */
328 #define KVM_ARCH_FLAG_FGU_INITIALIZED			8
329 	unsigned long flags;
330 
331 	/* VM-wide vCPU feature set */
332 	DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
333 
334 	/* MPIDR to vcpu index mapping, optional */
335 	struct kvm_mpidr_data *mpidr_data;
336 
337 	/*
338 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
339 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
340 	 */
341 	unsigned long *pmu_filter;
342 	struct arm_pmu *arm_pmu;
343 
344 	cpumask_var_t supported_cpus;
345 
346 	/* PMCR_EL0.N value for the guest */
347 	u8 pmcr_n;
348 
349 	/* Iterator for idreg debugfs */
350 	u8	idreg_debugfs_iter;
351 
352 	/* Hypercall features firmware registers' descriptor */
353 	struct kvm_smccc_features smccc_feat;
354 	struct maple_tree smccc_filter;
355 
356 	/*
357 	 * Emulated CPU ID registers per VM
358 	 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
359 	 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
360 	 *
361 	 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
362 	 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
363 	 */
364 #define IDREG_IDX(id)		(((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
365 #define KVM_ARM_ID_REG_NUM	(IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
366 	u64 id_regs[KVM_ARM_ID_REG_NUM];
367 
368 	u64 ctr_el0;
369 
370 	/* Masks for VNCR-baked sysregs */
371 	struct kvm_sysreg_masks	*sysreg_masks;
372 
373 	/*
374 	 * For an untrusted host VM, 'pkvm.handle' is used to lookup
375 	 * the associated pKVM instance in the hypervisor.
376 	 */
377 	struct kvm_protected_vm pkvm;
378 };
379 
380 struct kvm_vcpu_fault_info {
381 	u64 esr_el2;		/* Hyp Syndrom Register */
382 	u64 far_el2;		/* Hyp Fault Address Register */
383 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
384 	u64 disr_el1;		/* Deferred [SError] Status Register */
385 };
386 
387 /*
388  * VNCR() just places the VNCR_capable registers in the enum after
389  * __VNCR_START__, and the value (after correction) to be an 8-byte offset
390  * from the VNCR base. As we don't require the enum to be otherwise ordered,
391  * we need the terrible hack below to ensure that we correctly size the
392  * sys_regs array, no matter what.
393  *
394  * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
395  * treasure trove of bit hacks:
396  * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
397  */
398 #define __MAX__(x,y)	((x) ^ (((x) ^ (y)) & -((x) < (y))))
399 #define VNCR(r)						\
400 	__before_##r,					\
401 	r = __VNCR_START__ + ((VNCR_ ## r) / 8),	\
402 	__after_##r = __MAX__(__before_##r - 1, r)
403 
404 enum vcpu_sysreg {
405 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
406 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
407 	CLIDR_EL1,	/* Cache Level ID Register */
408 	CSSELR_EL1,	/* Cache Size Selection Register */
409 	TPIDR_EL0,	/* Thread ID, User R/W */
410 	TPIDRRO_EL0,	/* Thread ID, User R/O */
411 	TPIDR_EL1,	/* Thread ID, Privileged */
412 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
413 	PAR_EL1,	/* Physical Address Register */
414 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
415 	OSLSR_EL1,	/* OS Lock Status Register */
416 	DISR_EL1,	/* Deferred Interrupt Status Register */
417 
418 	/* Performance Monitors Registers */
419 	PMCR_EL0,	/* Control Register */
420 	PMSELR_EL0,	/* Event Counter Selection Register */
421 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
422 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
423 	PMCCNTR_EL0,	/* Cycle Counter Register */
424 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
425 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
426 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
427 	PMCNTENSET_EL0,	/* Count Enable Set Register */
428 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
429 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
430 	PMUSERENR_EL0,	/* User Enable Register */
431 
432 	/* Pointer Authentication Registers in a strict increasing order. */
433 	APIAKEYLO_EL1,
434 	APIAKEYHI_EL1,
435 	APIBKEYLO_EL1,
436 	APIBKEYHI_EL1,
437 	APDAKEYLO_EL1,
438 	APDAKEYHI_EL1,
439 	APDBKEYLO_EL1,
440 	APDBKEYHI_EL1,
441 	APGAKEYLO_EL1,
442 	APGAKEYHI_EL1,
443 
444 	/* Memory Tagging Extension registers */
445 	RGSR_EL1,	/* Random Allocation Tag Seed Register */
446 	GCR_EL1,	/* Tag Control Register */
447 	TFSRE0_EL1,	/* Tag Fault Status Register (EL0) */
448 
449 	POR_EL0,	/* Permission Overlay Register 0 (EL0) */
450 
451 	/* FP/SIMD/SVE */
452 	SVCR,
453 	FPMR,
454 
455 	/* 32bit specific registers. */
456 	DACR32_EL2,	/* Domain Access Control Register */
457 	IFSR32_EL2,	/* Instruction Fault Status Register */
458 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
459 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
460 
461 	/* EL2 registers */
462 	SCTLR_EL2,	/* System Control Register (EL2) */
463 	ACTLR_EL2,	/* Auxiliary Control Register (EL2) */
464 	MDCR_EL2,	/* Monitor Debug Configuration Register (EL2) */
465 	CPTR_EL2,	/* Architectural Feature Trap Register (EL2) */
466 	HACR_EL2,	/* Hypervisor Auxiliary Control Register */
467 	ZCR_EL2,	/* SVE Control Register (EL2) */
468 	TTBR0_EL2,	/* Translation Table Base Register 0 (EL2) */
469 	TTBR1_EL2,	/* Translation Table Base Register 1 (EL2) */
470 	TCR_EL2,	/* Translation Control Register (EL2) */
471 	SPSR_EL2,	/* EL2 saved program status register */
472 	ELR_EL2,	/* EL2 exception link register */
473 	AFSR0_EL2,	/* Auxiliary Fault Status Register 0 (EL2) */
474 	AFSR1_EL2,	/* Auxiliary Fault Status Register 1 (EL2) */
475 	ESR_EL2,	/* Exception Syndrome Register (EL2) */
476 	FAR_EL2,	/* Fault Address Register (EL2) */
477 	HPFAR_EL2,	/* Hypervisor IPA Fault Address Register */
478 	MAIR_EL2,	/* Memory Attribute Indirection Register (EL2) */
479 	AMAIR_EL2,	/* Auxiliary Memory Attribute Indirection Register (EL2) */
480 	VBAR_EL2,	/* Vector Base Address Register (EL2) */
481 	RVBAR_EL2,	/* Reset Vector Base Address Register */
482 	CONTEXTIDR_EL2,	/* Context ID Register (EL2) */
483 	CNTHCTL_EL2,	/* Counter-timer Hypervisor Control register */
484 	SP_EL2,		/* EL2 Stack Pointer */
485 	CNTHP_CTL_EL2,
486 	CNTHP_CVAL_EL2,
487 	CNTHV_CTL_EL2,
488 	CNTHV_CVAL_EL2,
489 
490 	__VNCR_START__,	/* Any VNCR-capable reg goes after this point */
491 
492 	VNCR(SCTLR_EL1),/* System Control Register */
493 	VNCR(ACTLR_EL1),/* Auxiliary Control Register */
494 	VNCR(CPACR_EL1),/* Coprocessor Access Control */
495 	VNCR(ZCR_EL1),	/* SVE Control */
496 	VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
497 	VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
498 	VNCR(TCR_EL1),	/* Translation Control Register */
499 	VNCR(TCR2_EL1),	/* Extended Translation Control Register */
500 	VNCR(ESR_EL1),	/* Exception Syndrome Register */
501 	VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
502 	VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
503 	VNCR(FAR_EL1),	/* Fault Address Register */
504 	VNCR(MAIR_EL1),	/* Memory Attribute Indirection Register */
505 	VNCR(VBAR_EL1),	/* Vector Base Address Register */
506 	VNCR(CONTEXTIDR_EL1),	/* Context ID Register */
507 	VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
508 	VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
509 	VNCR(ELR_EL1),
510 	VNCR(SP_EL1),
511 	VNCR(SPSR_EL1),
512 	VNCR(TFSR_EL1),	/* Tag Fault Status Register (EL1) */
513 	VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
514 	VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
515 	VNCR(HCR_EL2),	/* Hypervisor Configuration Register */
516 	VNCR(HSTR_EL2),	/* Hypervisor System Trap Register */
517 	VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
518 	VNCR(VTCR_EL2),	/* Virtualization Translation Control Register */
519 	VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
520 	VNCR(HCRX_EL2),	/* Extended Hypervisor Configuration Register */
521 
522 	/* Permission Indirection Extension registers */
523 	VNCR(PIR_EL1),	 /* Permission Indirection Register 1 (EL1) */
524 	VNCR(PIRE0_EL1), /*  Permission Indirection Register 0 (EL1) */
525 
526 	VNCR(POR_EL1),	/* Permission Overlay Register 1 (EL1) */
527 
528 	VNCR(HFGRTR_EL2),
529 	VNCR(HFGWTR_EL2),
530 	VNCR(HFGITR_EL2),
531 	VNCR(HDFGRTR_EL2),
532 	VNCR(HDFGWTR_EL2),
533 	VNCR(HAFGRTR_EL2),
534 
535 	VNCR(CNTVOFF_EL2),
536 	VNCR(CNTV_CVAL_EL0),
537 	VNCR(CNTV_CTL_EL0),
538 	VNCR(CNTP_CVAL_EL0),
539 	VNCR(CNTP_CTL_EL0),
540 
541 	VNCR(ICH_HCR_EL2),
542 
543 	NR_SYS_REGS	/* Nothing after this line! */
544 };
545 
546 struct kvm_sysreg_masks {
547 	struct {
548 		u64	res0;
549 		u64	res1;
550 	} mask[NR_SYS_REGS - __VNCR_START__];
551 };
552 
553 struct kvm_cpu_context {
554 	struct user_pt_regs regs;	/* sp = sp_el0 */
555 
556 	u64	spsr_abt;
557 	u64	spsr_und;
558 	u64	spsr_irq;
559 	u64	spsr_fiq;
560 
561 	struct user_fpsimd_state fp_regs;
562 
563 	u64 sys_regs[NR_SYS_REGS];
564 
565 	struct kvm_vcpu *__hyp_running_vcpu;
566 
567 	/* This pointer has to be 4kB aligned. */
568 	u64 *vncr_array;
569 };
570 
571 struct cpu_sve_state {
572 	__u64 zcr_el1;
573 
574 	/*
575 	 * Ordering is important since __sve_save_state/__sve_restore_state
576 	 * relies on it.
577 	 */
578 	__u32 fpsr;
579 	__u32 fpcr;
580 
581 	/* Must be SVE_VQ_BYTES (128 bit) aligned. */
582 	__u8 sve_regs[];
583 };
584 
585 /*
586  * This structure is instantiated on a per-CPU basis, and contains
587  * data that is:
588  *
589  * - tied to a single physical CPU, and
590  * - either have a lifetime that does not extend past vcpu_put()
591  * - or is an invariant for the lifetime of the system
592  *
593  * Use host_data_ptr(field) as a way to access a pointer to such a
594  * field.
595  */
596 struct kvm_host_data {
597 	struct kvm_cpu_context host_ctxt;
598 
599 	/*
600 	 * All pointers in this union are hyp VA.
601 	 * sve_state is only used in pKVM and if system_supports_sve().
602 	 */
603 	union {
604 		struct user_fpsimd_state *fpsimd_state;
605 		struct cpu_sve_state *sve_state;
606 	};
607 
608 	union {
609 		/* HYP VA pointer to the host storage for FPMR */
610 		u64	*fpmr_ptr;
611 		/*
612 		 * Used by pKVM only, as it needs to provide storage
613 		 * for the host
614 		 */
615 		u64	fpmr;
616 	};
617 
618 	/* Ownership of the FP regs */
619 	enum {
620 		FP_STATE_FREE,
621 		FP_STATE_HOST_OWNED,
622 		FP_STATE_GUEST_OWNED,
623 	} fp_owner;
624 
625 	/*
626 	 * host_debug_state contains the host registers which are
627 	 * saved and restored during world switches.
628 	 */
629 	 struct {
630 		/* {Break,watch}point registers */
631 		struct kvm_guest_debug_arch regs;
632 		/* Statistical profiling extension */
633 		u64 pmscr_el1;
634 		/* Self-hosted trace */
635 		u64 trfcr_el1;
636 		/* Values of trap registers for the host before guest entry. */
637 		u64 mdcr_el2;
638 	} host_debug_state;
639 };
640 
641 struct kvm_host_psci_config {
642 	/* PSCI version used by host. */
643 	u32 version;
644 	u32 smccc_version;
645 
646 	/* Function IDs used by host if version is v0.1. */
647 	struct psci_0_1_function_ids function_ids_0_1;
648 
649 	bool psci_0_1_cpu_suspend_implemented;
650 	bool psci_0_1_cpu_on_implemented;
651 	bool psci_0_1_cpu_off_implemented;
652 	bool psci_0_1_migrate_implemented;
653 };
654 
655 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
656 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
657 
658 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
659 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
660 
661 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
662 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
663 
664 struct vcpu_reset_state {
665 	unsigned long	pc;
666 	unsigned long	r0;
667 	bool		be;
668 	bool		reset;
669 };
670 
671 struct kvm_vcpu_arch {
672 	struct kvm_cpu_context ctxt;
673 
674 	/*
675 	 * Guest floating point state
676 	 *
677 	 * The architecture has two main floating point extensions,
678 	 * the original FPSIMD and SVE.  These have overlapping
679 	 * register views, with the FPSIMD V registers occupying the
680 	 * low 128 bits of the SVE Z registers.  When the core
681 	 * floating point code saves the register state of a task it
682 	 * records which view it saved in fp_type.
683 	 */
684 	void *sve_state;
685 	enum fp_type fp_type;
686 	unsigned int sve_max_vl;
687 
688 	/* Stage 2 paging state used by the hardware on next switch */
689 	struct kvm_s2_mmu *hw_mmu;
690 
691 	/* Values of trap registers for the guest. */
692 	u64 hcr_el2;
693 	u64 hcrx_el2;
694 	u64 mdcr_el2;
695 	u64 cptr_el2;
696 
697 	/* Exception Information */
698 	struct kvm_vcpu_fault_info fault;
699 
700 	/* Configuration flags, set once and for all before the vcpu can run */
701 	u8 cflags;
702 
703 	/* Input flags to the hypervisor code, potentially cleared after use */
704 	u8 iflags;
705 
706 	/* State flags for kernel bookkeeping, unused by the hypervisor code */
707 	u8 sflags;
708 
709 	/*
710 	 * Don't run the guest (internal implementation need).
711 	 *
712 	 * Contrary to the flags above, this is set/cleared outside of
713 	 * a vcpu context, and thus cannot be mixed with the flags
714 	 * themselves (or the flag accesses need to be made atomic).
715 	 */
716 	bool pause;
717 
718 	/*
719 	 * We maintain more than a single set of debug registers to support
720 	 * debugging the guest from the host and to maintain separate host and
721 	 * guest state during world switches. vcpu_debug_state are the debug
722 	 * registers of the vcpu as the guest sees them.
723 	 *
724 	 * external_debug_state contains the debug values we want to debug the
725 	 * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
726 	 *
727 	 * debug_ptr points to the set of debug registers that should be loaded
728 	 * onto the hardware when running the guest.
729 	 */
730 	struct kvm_guest_debug_arch *debug_ptr;
731 	struct kvm_guest_debug_arch vcpu_debug_state;
732 	struct kvm_guest_debug_arch external_debug_state;
733 
734 	/* VGIC state */
735 	struct vgic_cpu vgic_cpu;
736 	struct arch_timer_cpu timer_cpu;
737 	struct kvm_pmu pmu;
738 
739 	/*
740 	 * Guest registers we preserve during guest debugging.
741 	 *
742 	 * These shadow registers are updated by the kvm_handle_sys_reg
743 	 * trap handler if the guest accesses or updates them while we
744 	 * are using guest debug.
745 	 */
746 	struct {
747 		u32	mdscr_el1;
748 		bool	pstate_ss;
749 	} guest_debug_preserved;
750 
751 	/* vcpu power state */
752 	struct kvm_mp_state mp_state;
753 	spinlock_t mp_state_lock;
754 
755 	/* Cache some mmu pages needed inside spinlock regions */
756 	struct kvm_mmu_memory_cache mmu_page_cache;
757 
758 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
759 	u64 vsesr_el2;
760 
761 	/* Additional reset state */
762 	struct vcpu_reset_state	reset_state;
763 
764 	/* Guest PV state */
765 	struct {
766 		u64 last_steal;
767 		gpa_t base;
768 	} steal;
769 
770 	/* Per-vcpu CCSIDR override or NULL */
771 	u32 *ccsidr;
772 };
773 
774 /*
775  * Each 'flag' is composed of a comma-separated triplet:
776  *
777  * - the flag-set it belongs to in the vcpu->arch structure
778  * - the value for that flag
779  * - the mask for that flag
780  *
781  *  __vcpu_single_flag() builds such a triplet for a single-bit flag.
782  * unpack_vcpu_flag() extract the flag value from the triplet for
783  * direct use outside of the flag accessors.
784  */
785 #define __vcpu_single_flag(_set, _f)	_set, (_f), (_f)
786 
787 #define __unpack_flag(_set, _f, _m)	_f
788 #define unpack_vcpu_flag(...)		__unpack_flag(__VA_ARGS__)
789 
790 #define __build_check_flag(v, flagset, f, m)			\
791 	do {							\
792 		typeof(v->arch.flagset) *_fset;			\
793 								\
794 		/* Check that the flags fit in the mask */	\
795 		BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m)));	\
796 		/* Check that the flags fit in the type */	\
797 		BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m));	\
798 	} while (0)
799 
800 #define __vcpu_get_flag(v, flagset, f, m)			\
801 	({							\
802 		__build_check_flag(v, flagset, f, m);		\
803 								\
804 		READ_ONCE(v->arch.flagset) & (m);		\
805 	})
806 
807 /*
808  * Note that the set/clear accessors must be preempt-safe in order to
809  * avoid nesting them with load/put which also manipulate flags...
810  */
811 #ifdef __KVM_NVHE_HYPERVISOR__
812 /* the nVHE hypervisor is always non-preemptible */
813 #define __vcpu_flags_preempt_disable()
814 #define __vcpu_flags_preempt_enable()
815 #else
816 #define __vcpu_flags_preempt_disable()	preempt_disable()
817 #define __vcpu_flags_preempt_enable()	preempt_enable()
818 #endif
819 
820 #define __vcpu_set_flag(v, flagset, f, m)			\
821 	do {							\
822 		typeof(v->arch.flagset) *fset;			\
823 								\
824 		__build_check_flag(v, flagset, f, m);		\
825 								\
826 		fset = &v->arch.flagset;			\
827 		__vcpu_flags_preempt_disable();			\
828 		if (HWEIGHT(m) > 1)				\
829 			*fset &= ~(m);				\
830 		*fset |= (f);					\
831 		__vcpu_flags_preempt_enable();			\
832 	} while (0)
833 
834 #define __vcpu_clear_flag(v, flagset, f, m)			\
835 	do {							\
836 		typeof(v->arch.flagset) *fset;			\
837 								\
838 		__build_check_flag(v, flagset, f, m);		\
839 								\
840 		fset = &v->arch.flagset;			\
841 		__vcpu_flags_preempt_disable();			\
842 		*fset &= ~(m);					\
843 		__vcpu_flags_preempt_enable();			\
844 	} while (0)
845 
846 #define vcpu_get_flag(v, ...)	__vcpu_get_flag((v), __VA_ARGS__)
847 #define vcpu_set_flag(v, ...)	__vcpu_set_flag((v), __VA_ARGS__)
848 #define vcpu_clear_flag(v, ...)	__vcpu_clear_flag((v), __VA_ARGS__)
849 
850 /* SVE exposed to guest */
851 #define GUEST_HAS_SVE		__vcpu_single_flag(cflags, BIT(0))
852 /* SVE config completed */
853 #define VCPU_SVE_FINALIZED	__vcpu_single_flag(cflags, BIT(1))
854 /* PTRAUTH exposed to guest */
855 #define GUEST_HAS_PTRAUTH	__vcpu_single_flag(cflags, BIT(2))
856 /* KVM_ARM_VCPU_INIT completed */
857 #define VCPU_INITIALIZED	__vcpu_single_flag(cflags, BIT(3))
858 
859 /* Exception pending */
860 #define PENDING_EXCEPTION	__vcpu_single_flag(iflags, BIT(0))
861 /*
862  * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
863  * be set together with an exception...
864  */
865 #define INCREMENT_PC		__vcpu_single_flag(iflags, BIT(1))
866 /* Target EL/MODE (not a single flag, but let's abuse the macro) */
867 #define EXCEPT_MASK		__vcpu_single_flag(iflags, GENMASK(3, 1))
868 
869 /* Helpers to encode exceptions with minimum fuss */
870 #define __EXCEPT_MASK_VAL	unpack_vcpu_flag(EXCEPT_MASK)
871 #define __EXCEPT_SHIFT		__builtin_ctzl(__EXCEPT_MASK_VAL)
872 #define __vcpu_except_flags(_f)	iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
873 
874 /*
875  * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
876  * values:
877  *
878  * For AArch32 EL1:
879  */
880 #define EXCEPT_AA32_UND		__vcpu_except_flags(0)
881 #define EXCEPT_AA32_IABT	__vcpu_except_flags(1)
882 #define EXCEPT_AA32_DABT	__vcpu_except_flags(2)
883 /* For AArch64: */
884 #define EXCEPT_AA64_EL1_SYNC	__vcpu_except_flags(0)
885 #define EXCEPT_AA64_EL1_IRQ	__vcpu_except_flags(1)
886 #define EXCEPT_AA64_EL1_FIQ	__vcpu_except_flags(2)
887 #define EXCEPT_AA64_EL1_SERR	__vcpu_except_flags(3)
888 /* For AArch64 with NV: */
889 #define EXCEPT_AA64_EL2_SYNC	__vcpu_except_flags(4)
890 #define EXCEPT_AA64_EL2_IRQ	__vcpu_except_flags(5)
891 #define EXCEPT_AA64_EL2_FIQ	__vcpu_except_flags(6)
892 #define EXCEPT_AA64_EL2_SERR	__vcpu_except_flags(7)
893 /* Guest debug is live */
894 #define DEBUG_DIRTY		__vcpu_single_flag(iflags, BIT(4))
895 /* Save SPE context if active  */
896 #define DEBUG_STATE_SAVE_SPE	__vcpu_single_flag(iflags, BIT(5))
897 /* Save TRBE context if active  */
898 #define DEBUG_STATE_SAVE_TRBE	__vcpu_single_flag(iflags, BIT(6))
899 
900 /* SVE enabled for host EL0 */
901 #define HOST_SVE_ENABLED	__vcpu_single_flag(sflags, BIT(0))
902 /* SME enabled for EL0 */
903 #define HOST_SME_ENABLED	__vcpu_single_flag(sflags, BIT(1))
904 /* Physical CPU not in supported_cpus */
905 #define ON_UNSUPPORTED_CPU	__vcpu_single_flag(sflags, BIT(2))
906 /* WFIT instruction trapped */
907 #define IN_WFIT			__vcpu_single_flag(sflags, BIT(3))
908 /* vcpu system registers loaded on physical CPU */
909 #define SYSREGS_ON_CPU		__vcpu_single_flag(sflags, BIT(4))
910 /* Software step state is Active-pending */
911 #define DBG_SS_ACTIVE_PENDING	__vcpu_single_flag(sflags, BIT(5))
912 /* PMUSERENR for the guest EL0 is on physical CPU */
913 #define PMUSERENR_ON_CPU	__vcpu_single_flag(sflags, BIT(6))
914 /* WFI instruction trapped */
915 #define IN_WFI			__vcpu_single_flag(sflags, BIT(7))
916 
917 
918 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
919 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
920 			     sve_ffr_offset((vcpu)->arch.sve_max_vl))
921 
922 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
923 
924 #define vcpu_sve_zcr_elx(vcpu)						\
925 	(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
926 
927 #define vcpu_sve_state_size(vcpu) ({					\
928 	size_t __size_ret;						\
929 	unsigned int __vcpu_vq;						\
930 									\
931 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
932 		__size_ret = 0;						\
933 	} else {							\
934 		__vcpu_vq = vcpu_sve_max_vq(vcpu);			\
935 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
936 	}								\
937 									\
938 	__size_ret;							\
939 })
940 
941 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
942 				 KVM_GUESTDBG_USE_SW_BP | \
943 				 KVM_GUESTDBG_USE_HW | \
944 				 KVM_GUESTDBG_SINGLESTEP)
945 
946 #define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
947 			    vcpu_get_flag(vcpu, GUEST_HAS_SVE))
948 
949 #ifdef CONFIG_ARM64_PTR_AUTH
950 #define vcpu_has_ptrauth(vcpu)						\
951 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
952 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
953 	  vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
954 #else
955 #define vcpu_has_ptrauth(vcpu)		false
956 #endif
957 
958 #define vcpu_on_unsupported_cpu(vcpu)					\
959 	vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
960 
961 #define vcpu_set_on_unsupported_cpu(vcpu)				\
962 	vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
963 
964 #define vcpu_clear_on_unsupported_cpu(vcpu)				\
965 	vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
966 
967 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
968 
969 /*
970  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
971  * memory backed version of a register, and not the one most recently
972  * accessed by a running VCPU.  For example, for userspace access or
973  * for system registers that are never context switched, but only
974  * emulated.
975  *
976  * Don't bother with VNCR-based accesses in the nVHE code, it has no
977  * business dealing with NV.
978  */
___ctxt_sys_reg(const struct kvm_cpu_context * ctxt,int r)979 static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
980 {
981 #if !defined (__KVM_NVHE_HYPERVISOR__)
982 	if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
983 		     r >= __VNCR_START__ && ctxt->vncr_array))
984 		return &ctxt->vncr_array[r - __VNCR_START__];
985 #endif
986 	return (u64 *)&ctxt->sys_regs[r];
987 }
988 
989 #define __ctxt_sys_reg(c,r)						\
990 	({								\
991 		BUILD_BUG_ON(__builtin_constant_p(r) &&			\
992 			     (r) >= NR_SYS_REGS);			\
993 		___ctxt_sys_reg(c, r);					\
994 	})
995 
996 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
997 
998 u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
999 #define __vcpu_sys_reg(v,r)						\
1000 	(*({								\
1001 		const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt;	\
1002 		u64 *__r = __ctxt_sys_reg(ctxt, (r));			\
1003 		if (vcpu_has_nv((v)) && (r) >= __VNCR_START__)		\
1004 			*__r = kvm_vcpu_sanitise_vncr_reg((v), (r));	\
1005 		__r;							\
1006 	}))
1007 
1008 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
1009 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
1010 
__vcpu_read_sys_reg_from_cpu(int reg,u64 * val)1011 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
1012 {
1013 	/*
1014 	 * *** VHE ONLY ***
1015 	 *
1016 	 * System registers listed in the switch are not saved on every
1017 	 * exit from the guest but are only saved on vcpu_put.
1018 	 *
1019 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
1020 	 * should never be listed below, because the guest cannot modify its
1021 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
1022 	 * thread when emulating cross-VCPU communication.
1023 	 */
1024 	if (!has_vhe())
1025 		return false;
1026 
1027 	switch (reg) {
1028 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
1029 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
1030 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
1031 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
1032 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
1033 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
1034 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
1035 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
1036 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
1037 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
1038 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
1039 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
1040 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
1041 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
1042 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
1043 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
1044 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
1045 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
1046 	case SPSR_EL1:		*val = read_sysreg_s(SYS_SPSR_EL12);	break;
1047 	case PAR_EL1:		*val = read_sysreg_par();		break;
1048 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
1049 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
1050 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
1051 	case ZCR_EL1:		*val = read_sysreg_s(SYS_ZCR_EL12);	break;
1052 	default:		return false;
1053 	}
1054 
1055 	return true;
1056 }
1057 
__vcpu_write_sys_reg_to_cpu(u64 val,int reg)1058 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
1059 {
1060 	/*
1061 	 * *** VHE ONLY ***
1062 	 *
1063 	 * System registers listed in the switch are not restored on every
1064 	 * entry to the guest but are only restored on vcpu_load.
1065 	 *
1066 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
1067 	 * should never be listed below, because the MPIDR should only be set
1068 	 * once, before running the VCPU, and never changed later.
1069 	 */
1070 	if (!has_vhe())
1071 		return false;
1072 
1073 	switch (reg) {
1074 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
1075 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
1076 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
1077 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
1078 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
1079 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
1080 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
1081 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
1082 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
1083 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
1084 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
1085 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
1086 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
1087 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
1088 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
1089 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
1090 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
1091 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
1092 	case SPSR_EL1:		write_sysreg_s(val, SYS_SPSR_EL12);	break;
1093 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
1094 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
1095 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
1096 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
1097 	case ZCR_EL1:		write_sysreg_s(val, SYS_ZCR_EL12);	break;
1098 	default:		return false;
1099 	}
1100 
1101 	return true;
1102 }
1103 
1104 struct kvm_vm_stat {
1105 	struct kvm_vm_stat_generic generic;
1106 };
1107 
1108 struct kvm_vcpu_stat {
1109 	struct kvm_vcpu_stat_generic generic;
1110 	u64 hvc_exit_stat;
1111 	u64 wfe_exit_stat;
1112 	u64 wfi_exit_stat;
1113 	u64 mmio_exit_user;
1114 	u64 mmio_exit_kernel;
1115 	u64 signal_exits;
1116 	u64 exits;
1117 };
1118 
1119 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
1120 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
1121 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1122 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1123 
1124 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
1125 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
1126 
1127 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1128 			      struct kvm_vcpu_events *events);
1129 
1130 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1131 			      struct kvm_vcpu_events *events);
1132 
1133 void kvm_arm_halt_guest(struct kvm *kvm);
1134 void kvm_arm_resume_guest(struct kvm *kvm);
1135 
1136 #define vcpu_has_run_once(vcpu)	!!rcu_access_pointer((vcpu)->pid)
1137 
1138 #ifndef __KVM_NVHE_HYPERVISOR__
1139 #define kvm_call_hyp_nvhe(f, ...)						\
1140 	({								\
1141 		struct arm_smccc_res res;				\
1142 									\
1143 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
1144 				  ##__VA_ARGS__, &res);			\
1145 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
1146 									\
1147 		res.a1;							\
1148 	})
1149 
1150 /*
1151  * The couple of isb() below are there to guarantee the same behaviour
1152  * on VHE as on !VHE, where the eret to EL1 acts as a context
1153  * synchronization event.
1154  */
1155 #define kvm_call_hyp(f, ...)						\
1156 	do {								\
1157 		if (has_vhe()) {					\
1158 			f(__VA_ARGS__);					\
1159 			isb();						\
1160 		} else {						\
1161 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
1162 		}							\
1163 	} while(0)
1164 
1165 #define kvm_call_hyp_ret(f, ...)					\
1166 	({								\
1167 		typeof(f(__VA_ARGS__)) ret;				\
1168 									\
1169 		if (has_vhe()) {					\
1170 			ret = f(__VA_ARGS__);				\
1171 			isb();						\
1172 		} else {						\
1173 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
1174 		}							\
1175 									\
1176 		ret;							\
1177 	})
1178 #else /* __KVM_NVHE_HYPERVISOR__ */
1179 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
1180 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
1181 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
1182 #endif /* __KVM_NVHE_HYPERVISOR__ */
1183 
1184 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
1185 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
1186 
1187 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
1188 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
1189 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
1190 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
1191 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
1192 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
1193 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
1194 
1195 void kvm_sys_regs_create_debugfs(struct kvm *kvm);
1196 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
1197 
1198 int __init kvm_sys_reg_table_init(void);
1199 struct sys_reg_desc;
1200 int __init populate_sysreg_config(const struct sys_reg_desc *sr,
1201 				  unsigned int idx);
1202 int __init populate_nv_trap_config(void);
1203 
1204 bool lock_all_vcpus(struct kvm *kvm);
1205 void unlock_all_vcpus(struct kvm *kvm);
1206 
1207 void kvm_calculate_traps(struct kvm_vcpu *vcpu);
1208 
1209 /* MMIO helpers */
1210 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
1211 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
1212 
1213 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
1214 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
1215 
1216 /*
1217  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
1218  * arrived in guest context.  For arm64, any event that arrives while a vCPU is
1219  * loaded is considered to be "in guest".
1220  */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)1221 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
1222 {
1223 	return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
1224 }
1225 
1226 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
1227 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
1228 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
1229 
1230 bool kvm_arm_pvtime_supported(void);
1231 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
1232 			    struct kvm_device_attr *attr);
1233 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
1234 			    struct kvm_device_attr *attr);
1235 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
1236 			    struct kvm_device_attr *attr);
1237 
1238 extern unsigned int __ro_after_init kvm_arm_vmid_bits;
1239 int __init kvm_arm_vmid_alloc_init(void);
1240 void __init kvm_arm_vmid_alloc_free(void);
1241 bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1242 void kvm_arm_vmid_clear_active(void);
1243 
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)1244 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
1245 {
1246 	vcpu_arch->steal.base = INVALID_GPA;
1247 }
1248 
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)1249 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1250 {
1251 	return (vcpu_arch->steal.base != INVALID_GPA);
1252 }
1253 
1254 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1255 
1256 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1257 
1258 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1259 
1260 /*
1261  * How we access per-CPU host data depends on the where we access it from,
1262  * and the mode we're in:
1263  *
1264  * - VHE and nVHE hypervisor bits use their locally defined instance
1265  *
1266  * - the rest of the kernel use either the VHE or nVHE one, depending on
1267  *   the mode we're running in.
1268  *
1269  *   Unless we're in protected mode, fully deprivileged, and the nVHE
1270  *   per-CPU stuff is exclusively accessible to the protected EL2 code.
1271  *   In this case, the EL1 code uses the *VHE* data as its private state
1272  *   (which makes sense in a way as there shouldn't be any shared state
1273  *   between the host and the hypervisor).
1274  *
1275  * Yes, this is all totally trivial. Shoot me now.
1276  */
1277 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
1278 #define host_data_ptr(f)	(&this_cpu_ptr(&kvm_host_data)->f)
1279 #else
1280 #define host_data_ptr(f)						\
1281 	(static_branch_unlikely(&kvm_protected_mode_initialized) ?	\
1282 	 &this_cpu_ptr(&kvm_host_data)->f :				\
1283 	 &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
1284 #endif
1285 
1286 /* Check whether the FP regs are owned by the guest */
guest_owns_fp_regs(void)1287 static inline bool guest_owns_fp_regs(void)
1288 {
1289 	return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
1290 }
1291 
1292 /* Check whether the FP regs are owned by the host */
host_owns_fp_regs(void)1293 static inline bool host_owns_fp_regs(void)
1294 {
1295 	return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED;
1296 }
1297 
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)1298 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1299 {
1300 	/* The host's MPIDR is immutable, so let's set it up at boot time */
1301 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1302 }
1303 
kvm_system_needs_idmapped_vectors(void)1304 static inline bool kvm_system_needs_idmapped_vectors(void)
1305 {
1306 	return cpus_have_final_cap(ARM64_SPECTRE_V3A);
1307 }
1308 
kvm_arch_sync_events(struct kvm * kvm)1309 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1310 
1311 void kvm_arm_init_debug(void);
1312 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
1313 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
1314 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
1315 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
1316 
1317 #define kvm_vcpu_os_lock_enabled(vcpu)		\
1318 	(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
1319 
1320 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1321 			       struct kvm_device_attr *attr);
1322 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1323 			       struct kvm_device_attr *attr);
1324 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1325 			       struct kvm_device_attr *attr);
1326 
1327 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1328 			       struct kvm_arm_copy_mte_tags *copy_tags);
1329 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1330 				    struct kvm_arm_counter_offset *offset);
1331 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
1332 					struct reg_mask_range *range);
1333 
1334 /* Guest/host FPSIMD coordination helpers */
1335 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
1336 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1337 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1338 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1339 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1340 
kvm_pmu_counter_deferred(struct perf_event_attr * attr)1341 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1342 {
1343 	return (!has_vhe() && attr->exclude_host);
1344 }
1345 
1346 /* Flags for host debug state */
1347 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
1348 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
1349 
1350 #ifdef CONFIG_KVM
1351 void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
1352 void kvm_clr_pmu_events(u64 clr);
1353 bool kvm_set_pmuserenr(u64 val);
1354 #else
kvm_set_pmu_events(u64 set,struct perf_event_attr * attr)1355 static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u64 clr)1356 static inline void kvm_clr_pmu_events(u64 clr) {}
kvm_set_pmuserenr(u64 val)1357 static inline bool kvm_set_pmuserenr(u64 val)
1358 {
1359 	return false;
1360 }
1361 #endif
1362 
1363 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1364 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
1365 
1366 int __init kvm_set_ipa_limit(void);
1367 u32 kvm_get_pa_bits(struct kvm *kvm);
1368 
1369 #define __KVM_HAVE_ARCH_VM_ALLOC
1370 struct kvm *kvm_arch_alloc_vm(void);
1371 
1372 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1373 
1374 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1375 
1376 #define kvm_vm_is_protected(kvm)	(is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
1377 
1378 #define vcpu_is_protected(vcpu)		kvm_vm_is_protected((vcpu)->kvm)
1379 
1380 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1381 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1382 
1383 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1384 
1385 #define kvm_has_mte(kvm)					\
1386 	(system_supports_mte() &&				\
1387 	 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1388 
1389 #define kvm_supports_32bit_el0()				\
1390 	(system_supports_32bit_el0() &&				\
1391 	 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1392 
1393 #define kvm_vm_has_ran_once(kvm)					\
1394 	(test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1395 
__vcpu_has_feature(const struct kvm_arch * ka,int feature)1396 static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
1397 {
1398 	return test_bit(feature, ka->vcpu_features);
1399 }
1400 
1401 #define vcpu_has_feature(v, f)	__vcpu_has_feature(&(v)->kvm->arch, (f))
1402 
1403 #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
1404 
1405 int kvm_trng_call(struct kvm_vcpu *vcpu);
1406 #ifdef CONFIG_KVM
1407 extern phys_addr_t hyp_mem_base;
1408 extern phys_addr_t hyp_mem_size;
1409 void __init kvm_hyp_reserve(void);
1410 #else
kvm_hyp_reserve(void)1411 static inline void kvm_hyp_reserve(void) { }
1412 #endif
1413 
1414 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1415 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1416 
__vm_id_reg(struct kvm_arch * ka,u32 reg)1417 static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
1418 {
1419 	switch (reg) {
1420 	case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7):
1421 		return &ka->id_regs[IDREG_IDX(reg)];
1422 	case SYS_CTR_EL0:
1423 		return &ka->ctr_el0;
1424 	default:
1425 		WARN_ON_ONCE(1);
1426 		return NULL;
1427 	}
1428 }
1429 
1430 #define kvm_read_vm_id_reg(kvm, reg)					\
1431 	({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
1432 
1433 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
1434 
1435 #define __expand_field_sign_unsigned(id, fld, val)			\
1436 	((u64)SYS_FIELD_VALUE(id, fld, val))
1437 
1438 #define __expand_field_sign_signed(id, fld, val)			\
1439 	({								\
1440 		u64 __val = SYS_FIELD_VALUE(id, fld, val);		\
1441 		sign_extend64(__val, id##_##fld##_WIDTH - 1);		\
1442 	})
1443 
1444 #define expand_field_sign(id, fld, val)					\
1445 	(id##_##fld##_SIGNED ?						\
1446 	 __expand_field_sign_signed(id, fld, val) :			\
1447 	 __expand_field_sign_unsigned(id, fld, val))
1448 
1449 #define get_idreg_field_unsigned(kvm, id, fld)				\
1450 	({								\
1451 		u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id);	\
1452 		FIELD_GET(id##_##fld##_MASK, __val);			\
1453 	})
1454 
1455 #define get_idreg_field_signed(kvm, id, fld)				\
1456 	({								\
1457 		u64 __val = get_idreg_field_unsigned(kvm, id, fld);	\
1458 		sign_extend64(__val, id##_##fld##_WIDTH - 1);		\
1459 	})
1460 
1461 #define get_idreg_field_enum(kvm, id, fld)				\
1462 	get_idreg_field_unsigned(kvm, id, fld)
1463 
1464 #define get_idreg_field(kvm, id, fld)					\
1465 	(id##_##fld##_SIGNED ?						\
1466 	 get_idreg_field_signed(kvm, id, fld) :				\
1467 	 get_idreg_field_unsigned(kvm, id, fld))
1468 
1469 #define kvm_has_feat(kvm, id, fld, limit)				\
1470 	(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
1471 
1472 #define kvm_has_feat_enum(kvm, id, fld, val)				\
1473 	(get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
1474 
1475 #define kvm_has_feat_range(kvm, id, fld, min, max)			\
1476 	(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
1477 	 get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
1478 
1479 /* Check for a given level of PAuth support */
1480 #define kvm_has_pauth(k, l)						\
1481 	({								\
1482 		bool pa, pi, pa3;					\
1483 									\
1484 		pa  = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l);	\
1485 		pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP);	\
1486 		pi  = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l);	\
1487 		pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP);	\
1488 		pa3  = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l);	\
1489 		pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP);	\
1490 									\
1491 		(pa + pi + pa3) == 1;					\
1492 	})
1493 
1494 #define kvm_has_fpmr(k)					\
1495 	(system_supports_fpmr() &&			\
1496 	 kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
1497 
1498 #endif /* __ARM64_KVM_HOST_H__ */
1499