xref: /linux/arch/arm64/include/asm/kvm_host.h (revision 56e3e5c8f7ec1c6b732e0138efd76fd355ad55a8)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/maple_tree.h>
20 #include <linux/percpu.h>
21 #include <linux/psci.h>
22 #include <asm/arch_gicv3.h>
23 #include <asm/barrier.h>
24 #include <asm/cpufeature.h>
25 #include <asm/cputype.h>
26 #include <asm/daifflags.h>
27 #include <asm/fpsimd.h>
28 #include <asm/kvm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/vncr_mapping.h>
31 
32 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
33 
34 #define KVM_HALT_POLL_NS_DEFAULT 500000
35 
36 #include <kvm/arm_vgic.h>
37 #include <kvm/arm_arch_timer.h>
38 #include <kvm/arm_pmu.h>
39 
40 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41 
42 #define KVM_VCPU_MAX_FEATURES 7
43 #define KVM_VCPU_VALID_FEATURES	(BIT(KVM_VCPU_MAX_FEATURES) - 1)
44 
45 #define KVM_REQ_SLEEP \
46 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
47 #define KVM_REQ_IRQ_PENDING		KVM_ARCH_REQ(1)
48 #define KVM_REQ_VCPU_RESET		KVM_ARCH_REQ(2)
49 #define KVM_REQ_RECORD_STEAL		KVM_ARCH_REQ(3)
50 #define KVM_REQ_RELOAD_GICv4		KVM_ARCH_REQ(4)
51 #define KVM_REQ_RELOAD_PMU		KVM_ARCH_REQ(5)
52 #define KVM_REQ_SUSPEND			KVM_ARCH_REQ(6)
53 #define KVM_REQ_RESYNC_PMU_EL0		KVM_ARCH_REQ(7)
54 #define KVM_REQ_NESTED_S2_UNMAP		KVM_ARCH_REQ(8)
55 #define KVM_REQ_GUEST_HYP_IRQ_PENDING	KVM_ARCH_REQ(9)
56 
57 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
58 				     KVM_DIRTY_LOG_INITIALLY_SET)
59 
60 #define KVM_HAVE_MMU_RWLOCK
61 
62 /*
63  * Mode of operation configurable with kvm-arm.mode early param.
64  * See Documentation/admin-guide/kernel-parameters.txt for more information.
65  */
66 enum kvm_mode {
67 	KVM_MODE_DEFAULT,
68 	KVM_MODE_PROTECTED,
69 	KVM_MODE_NV,
70 	KVM_MODE_NONE,
71 };
72 #ifdef CONFIG_KVM
73 enum kvm_mode kvm_get_mode(void);
74 #else
75 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
76 #endif
77 
78 extern unsigned int __ro_after_init kvm_sve_max_vl;
79 extern unsigned int __ro_after_init kvm_host_sve_max_vl;
80 int __init kvm_arm_init_sve(void);
81 
82 u32 __attribute_const__ kvm_target_cpu(void);
83 void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
84 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
85 
86 struct kvm_hyp_memcache {
87 	phys_addr_t head;
88 	unsigned long nr_pages;
89 	struct pkvm_mapping *mapping; /* only used from EL1 */
90 };
91 
92 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
93 				     phys_addr_t *p,
94 				     phys_addr_t (*to_pa)(void *virt))
95 {
96 	*p = mc->head;
97 	mc->head = to_pa(p);
98 	mc->nr_pages++;
99 }
100 
101 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
102 				     void *(*to_va)(phys_addr_t phys))
103 {
104 	phys_addr_t *p = to_va(mc->head & PAGE_MASK);
105 
106 	if (!mc->nr_pages)
107 		return NULL;
108 
109 	mc->head = *p;
110 	mc->nr_pages--;
111 
112 	return p;
113 }
114 
115 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
116 				       unsigned long min_pages,
117 				       void *(*alloc_fn)(void *arg),
118 				       phys_addr_t (*to_pa)(void *virt),
119 				       void *arg)
120 {
121 	while (mc->nr_pages < min_pages) {
122 		phys_addr_t *p = alloc_fn(arg);
123 
124 		if (!p)
125 			return -ENOMEM;
126 		push_hyp_memcache(mc, p, to_pa);
127 	}
128 
129 	return 0;
130 }
131 
132 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
133 				       void (*free_fn)(void *virt, void *arg),
134 				       void *(*to_va)(phys_addr_t phys),
135 				       void *arg)
136 {
137 	while (mc->nr_pages)
138 		free_fn(pop_hyp_memcache(mc, to_va), arg);
139 }
140 
141 void free_hyp_memcache(struct kvm_hyp_memcache *mc);
142 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
143 
144 struct kvm_vmid {
145 	atomic64_t id;
146 };
147 
148 struct kvm_s2_mmu {
149 	struct kvm_vmid vmid;
150 
151 	/*
152 	 * stage2 entry level table
153 	 *
154 	 * Two kvm_s2_mmu structures in the same VM can point to the same
155 	 * pgd here.  This happens when running a guest using a
156 	 * translation regime that isn't affected by its own stage-2
157 	 * translation, such as a non-VHE hypervisor running at vEL2, or
158 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
159 	 * canonical stage-2 page tables.
160 	 */
161 	phys_addr_t	pgd_phys;
162 	struct kvm_pgtable *pgt;
163 
164 	/*
165 	 * VTCR value used on the host. For a non-NV guest (or a NV
166 	 * guest that runs in a context where its own S2 doesn't
167 	 * apply), its T0SZ value reflects that of the IPA size.
168 	 *
169 	 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
170 	 * the guest.
171 	 */
172 	u64	vtcr;
173 
174 	/* The last vcpu id that ran on each physical CPU */
175 	int __percpu *last_vcpu_ran;
176 
177 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
178 	/*
179 	 * Memory cache used to split
180 	 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
181 	 * is used to allocate stage2 page tables while splitting huge
182 	 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
183 	 * influences both the capacity of the split page cache, and
184 	 * how often KVM reschedules. Be wary of raising CHUNK_SIZE
185 	 * too high.
186 	 *
187 	 * Protected by kvm->slots_lock.
188 	 */
189 	struct kvm_mmu_memory_cache split_page_cache;
190 	uint64_t split_page_chunk_size;
191 
192 	struct kvm_arch *arch;
193 
194 	/*
195 	 * For a shadow stage-2 MMU, the virtual vttbr used by the
196 	 * host to parse the guest S2.
197 	 * This either contains:
198 	 * - the virtual VTTBR programmed by the guest hypervisor with
199          *   CnP cleared
200 	 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
201 	 *
202 	 * We also cache the full VTCR which gets used for TLB invalidation,
203 	 * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted
204 	 * to be cached in a TLB" to the letter.
205 	 */
206 	u64	tlb_vttbr;
207 	u64	tlb_vtcr;
208 
209 	/*
210 	 * true when this represents a nested context where virtual
211 	 * HCR_EL2.VM == 1
212 	 */
213 	bool	nested_stage2_enabled;
214 
215 	/*
216 	 * true when this MMU needs to be unmapped before being used for a new
217 	 * purpose.
218 	 */
219 	bool	pending_unmap;
220 
221 	/*
222 	 *  0: Nobody is currently using this, check vttbr for validity
223 	 * >0: Somebody is actively using this.
224 	 */
225 	atomic_t refcnt;
226 };
227 
228 struct kvm_arch_memory_slot {
229 };
230 
231 /**
232  * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
233  *
234  * @std_bmap: Bitmap of standard secure service calls
235  * @std_hyp_bmap: Bitmap of standard hypervisor service calls
236  * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
237  */
238 struct kvm_smccc_features {
239 	unsigned long std_bmap;
240 	unsigned long std_hyp_bmap;
241 	unsigned long vendor_hyp_bmap;
242 };
243 
244 typedef unsigned int pkvm_handle_t;
245 
246 struct kvm_protected_vm {
247 	pkvm_handle_t handle;
248 	struct kvm_hyp_memcache teardown_mc;
249 	bool enabled;
250 };
251 
252 struct kvm_mpidr_data {
253 	u64			mpidr_mask;
254 	DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
255 };
256 
257 static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
258 {
259 	unsigned long index = 0, mask = data->mpidr_mask;
260 	unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
261 
262 	bitmap_gather(&index, &aff, &mask, fls(mask));
263 
264 	return index;
265 }
266 
267 struct kvm_sysreg_masks;
268 
269 enum fgt_group_id {
270 	__NO_FGT_GROUP__,
271 	HFGxTR_GROUP,
272 	HDFGRTR_GROUP,
273 	HDFGWTR_GROUP = HDFGRTR_GROUP,
274 	HFGITR_GROUP,
275 	HAFGRTR_GROUP,
276 
277 	/* Must be last */
278 	__NR_FGT_GROUP_IDS__
279 };
280 
281 struct kvm_arch {
282 	struct kvm_s2_mmu mmu;
283 
284 	/*
285 	 * Fine-Grained UNDEF, mimicking the FGT layout defined by the
286 	 * architecture. We track them globally, as we present the
287 	 * same feature-set to all vcpus.
288 	 *
289 	 * Index 0 is currently spare.
290 	 */
291 	u64 fgu[__NR_FGT_GROUP_IDS__];
292 
293 	/*
294 	 * Stage 2 paging state for VMs with nested S2 using a virtual
295 	 * VMID.
296 	 */
297 	struct kvm_s2_mmu *nested_mmus;
298 	size_t nested_mmus_size;
299 	int nested_mmus_next;
300 
301 	/* Interrupt controller */
302 	struct vgic_dist	vgic;
303 
304 	/* Timers */
305 	struct arch_timer_vm_data timer_data;
306 
307 	/* Mandated version of PSCI */
308 	u32 psci_version;
309 
310 	/* Protects VM-scoped configuration data */
311 	struct mutex config_lock;
312 
313 	/*
314 	 * If we encounter a data abort without valid instruction syndrome
315 	 * information, report this to user space.  User space can (and
316 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
317 	 * supported.
318 	 */
319 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER	0
320 	/* Memory Tagging Extension enabled for the guest */
321 #define KVM_ARCH_FLAG_MTE_ENABLED			1
322 	/* At least one vCPU has ran in the VM */
323 #define KVM_ARCH_FLAG_HAS_RAN_ONCE			2
324 	/* The vCPU feature set for the VM is configured */
325 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED		3
326 	/* PSCI SYSTEM_SUSPEND enabled for the guest */
327 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED		4
328 	/* VM counter offset */
329 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET			5
330 	/* Timer PPIs made immutable */
331 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE		6
332 	/* Initial ID reg values loaded */
333 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED		7
334 	/* Fine-Grained UNDEF initialised */
335 #define KVM_ARCH_FLAG_FGU_INITIALIZED			8
336 	/* SVE exposed to guest */
337 #define KVM_ARCH_FLAG_GUEST_HAS_SVE			9
338 	unsigned long flags;
339 
340 	/* VM-wide vCPU feature set */
341 	DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
342 
343 	/* MPIDR to vcpu index mapping, optional */
344 	struct kvm_mpidr_data *mpidr_data;
345 
346 	/*
347 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
348 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
349 	 */
350 	unsigned long *pmu_filter;
351 	struct arm_pmu *arm_pmu;
352 
353 	cpumask_var_t supported_cpus;
354 
355 	/* PMCR_EL0.N value for the guest */
356 	u8 pmcr_n;
357 
358 	/* Iterator for idreg debugfs */
359 	u8	idreg_debugfs_iter;
360 
361 	/* Hypercall features firmware registers' descriptor */
362 	struct kvm_smccc_features smccc_feat;
363 	struct maple_tree smccc_filter;
364 
365 	/*
366 	 * Emulated CPU ID registers per VM
367 	 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
368 	 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
369 	 *
370 	 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
371 	 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
372 	 */
373 #define IDREG_IDX(id)		(((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
374 #define KVM_ARM_ID_REG_NUM	(IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
375 	u64 id_regs[KVM_ARM_ID_REG_NUM];
376 
377 	u64 ctr_el0;
378 
379 	/* Masks for VNCR-backed and general EL2 sysregs */
380 	struct kvm_sysreg_masks	*sysreg_masks;
381 
382 	/*
383 	 * For an untrusted host VM, 'pkvm.handle' is used to lookup
384 	 * the associated pKVM instance in the hypervisor.
385 	 */
386 	struct kvm_protected_vm pkvm;
387 };
388 
389 struct kvm_vcpu_fault_info {
390 	u64 esr_el2;		/* Hyp Syndrom Register */
391 	u64 far_el2;		/* Hyp Fault Address Register */
392 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
393 	u64 disr_el1;		/* Deferred [SError] Status Register */
394 };
395 
396 /*
397  * VNCR() just places the VNCR_capable registers in the enum after
398  * __VNCR_START__, and the value (after correction) to be an 8-byte offset
399  * from the VNCR base. As we don't require the enum to be otherwise ordered,
400  * we need the terrible hack below to ensure that we correctly size the
401  * sys_regs array, no matter what.
402  *
403  * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
404  * treasure trove of bit hacks:
405  * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
406  */
407 #define __MAX__(x,y)	((x) ^ (((x) ^ (y)) & -((x) < (y))))
408 #define VNCR(r)						\
409 	__before_##r,					\
410 	r = __VNCR_START__ + ((VNCR_ ## r) / 8),	\
411 	__after_##r = __MAX__(__before_##r - 1, r)
412 
413 #define MARKER(m)				\
414 	m, __after_##m = m - 1
415 
416 enum vcpu_sysreg {
417 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
418 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
419 	CLIDR_EL1,	/* Cache Level ID Register */
420 	CSSELR_EL1,	/* Cache Size Selection Register */
421 	TPIDR_EL0,	/* Thread ID, User R/W */
422 	TPIDRRO_EL0,	/* Thread ID, User R/O */
423 	TPIDR_EL1,	/* Thread ID, Privileged */
424 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
425 	PAR_EL1,	/* Physical Address Register */
426 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
427 	OSLSR_EL1,	/* OS Lock Status Register */
428 	DISR_EL1,	/* Deferred Interrupt Status Register */
429 
430 	/* Performance Monitors Registers */
431 	PMCR_EL0,	/* Control Register */
432 	PMSELR_EL0,	/* Event Counter Selection Register */
433 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
434 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
435 	PMCCNTR_EL0,	/* Cycle Counter Register */
436 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
437 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
438 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
439 	PMCNTENSET_EL0,	/* Count Enable Set Register */
440 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
441 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
442 	PMUSERENR_EL0,	/* User Enable Register */
443 
444 	/* Pointer Authentication Registers in a strict increasing order. */
445 	APIAKEYLO_EL1,
446 	APIAKEYHI_EL1,
447 	APIBKEYLO_EL1,
448 	APIBKEYHI_EL1,
449 	APDAKEYLO_EL1,
450 	APDAKEYHI_EL1,
451 	APDBKEYLO_EL1,
452 	APDBKEYHI_EL1,
453 	APGAKEYLO_EL1,
454 	APGAKEYHI_EL1,
455 
456 	/* Memory Tagging Extension registers */
457 	RGSR_EL1,	/* Random Allocation Tag Seed Register */
458 	GCR_EL1,	/* Tag Control Register */
459 	TFSRE0_EL1,	/* Tag Fault Status Register (EL0) */
460 
461 	POR_EL0,	/* Permission Overlay Register 0 (EL0) */
462 
463 	/* FP/SIMD/SVE */
464 	SVCR,
465 	FPMR,
466 
467 	/* 32bit specific registers. */
468 	DACR32_EL2,	/* Domain Access Control Register */
469 	IFSR32_EL2,	/* Instruction Fault Status Register */
470 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
471 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
472 
473 	/* EL2 registers */
474 	SCTLR_EL2,	/* System Control Register (EL2) */
475 	ACTLR_EL2,	/* Auxiliary Control Register (EL2) */
476 	CPTR_EL2,	/* Architectural Feature Trap Register (EL2) */
477 	HACR_EL2,	/* Hypervisor Auxiliary Control Register */
478 	ZCR_EL2,	/* SVE Control Register (EL2) */
479 	TTBR0_EL2,	/* Translation Table Base Register 0 (EL2) */
480 	TTBR1_EL2,	/* Translation Table Base Register 1 (EL2) */
481 	TCR_EL2,	/* Translation Control Register (EL2) */
482 	PIRE0_EL2,	/* Permission Indirection Register 0 (EL2) */
483 	PIR_EL2,	/* Permission Indirection Register 1 (EL2) */
484 	POR_EL2,	/* Permission Overlay Register 2 (EL2) */
485 	SPSR_EL2,	/* EL2 saved program status register */
486 	ELR_EL2,	/* EL2 exception link register */
487 	AFSR0_EL2,	/* Auxiliary Fault Status Register 0 (EL2) */
488 	AFSR1_EL2,	/* Auxiliary Fault Status Register 1 (EL2) */
489 	ESR_EL2,	/* Exception Syndrome Register (EL2) */
490 	FAR_EL2,	/* Fault Address Register (EL2) */
491 	HPFAR_EL2,	/* Hypervisor IPA Fault Address Register */
492 	MAIR_EL2,	/* Memory Attribute Indirection Register (EL2) */
493 	AMAIR_EL2,	/* Auxiliary Memory Attribute Indirection Register (EL2) */
494 	VBAR_EL2,	/* Vector Base Address Register (EL2) */
495 	RVBAR_EL2,	/* Reset Vector Base Address Register */
496 	CONTEXTIDR_EL2,	/* Context ID Register (EL2) */
497 	SP_EL2,		/* EL2 Stack Pointer */
498 	CNTHP_CTL_EL2,
499 	CNTHP_CVAL_EL2,
500 	CNTHV_CTL_EL2,
501 	CNTHV_CVAL_EL2,
502 
503 	/* Anything from this can be RES0/RES1 sanitised */
504 	MARKER(__SANITISED_REG_START__),
505 	TCR2_EL2,	/* Extended Translation Control Register (EL2) */
506 	MDCR_EL2,	/* Monitor Debug Configuration Register (EL2) */
507 	CNTHCTL_EL2,	/* Counter-timer Hypervisor Control register */
508 
509 	/* Any VNCR-capable reg goes after this point */
510 	MARKER(__VNCR_START__),
511 
512 	VNCR(SCTLR_EL1),/* System Control Register */
513 	VNCR(ACTLR_EL1),/* Auxiliary Control Register */
514 	VNCR(CPACR_EL1),/* Coprocessor Access Control */
515 	VNCR(ZCR_EL1),	/* SVE Control */
516 	VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
517 	VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
518 	VNCR(TCR_EL1),	/* Translation Control Register */
519 	VNCR(TCR2_EL1),	/* Extended Translation Control Register */
520 	VNCR(ESR_EL1),	/* Exception Syndrome Register */
521 	VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
522 	VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
523 	VNCR(FAR_EL1),	/* Fault Address Register */
524 	VNCR(MAIR_EL1),	/* Memory Attribute Indirection Register */
525 	VNCR(VBAR_EL1),	/* Vector Base Address Register */
526 	VNCR(CONTEXTIDR_EL1),	/* Context ID Register */
527 	VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
528 	VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
529 	VNCR(ELR_EL1),
530 	VNCR(SP_EL1),
531 	VNCR(SPSR_EL1),
532 	VNCR(TFSR_EL1),	/* Tag Fault Status Register (EL1) */
533 	VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
534 	VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
535 	VNCR(HCR_EL2),	/* Hypervisor Configuration Register */
536 	VNCR(HSTR_EL2),	/* Hypervisor System Trap Register */
537 	VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
538 	VNCR(VTCR_EL2),	/* Virtualization Translation Control Register */
539 	VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
540 	VNCR(HCRX_EL2),	/* Extended Hypervisor Configuration Register */
541 
542 	/* Permission Indirection Extension registers */
543 	VNCR(PIR_EL1),	 /* Permission Indirection Register 1 (EL1) */
544 	VNCR(PIRE0_EL1), /*  Permission Indirection Register 0 (EL1) */
545 
546 	VNCR(POR_EL1),	/* Permission Overlay Register 1 (EL1) */
547 
548 	VNCR(HFGRTR_EL2),
549 	VNCR(HFGWTR_EL2),
550 	VNCR(HFGITR_EL2),
551 	VNCR(HDFGRTR_EL2),
552 	VNCR(HDFGWTR_EL2),
553 	VNCR(HAFGRTR_EL2),
554 
555 	VNCR(CNTVOFF_EL2),
556 	VNCR(CNTV_CVAL_EL0),
557 	VNCR(CNTV_CTL_EL0),
558 	VNCR(CNTP_CVAL_EL0),
559 	VNCR(CNTP_CTL_EL0),
560 
561 	VNCR(ICH_LR0_EL2),
562 	VNCR(ICH_LR1_EL2),
563 	VNCR(ICH_LR2_EL2),
564 	VNCR(ICH_LR3_EL2),
565 	VNCR(ICH_LR4_EL2),
566 	VNCR(ICH_LR5_EL2),
567 	VNCR(ICH_LR6_EL2),
568 	VNCR(ICH_LR7_EL2),
569 	VNCR(ICH_LR8_EL2),
570 	VNCR(ICH_LR9_EL2),
571 	VNCR(ICH_LR10_EL2),
572 	VNCR(ICH_LR11_EL2),
573 	VNCR(ICH_LR12_EL2),
574 	VNCR(ICH_LR13_EL2),
575 	VNCR(ICH_LR14_EL2),
576 	VNCR(ICH_LR15_EL2),
577 
578 	VNCR(ICH_AP0R0_EL2),
579 	VNCR(ICH_AP0R1_EL2),
580 	VNCR(ICH_AP0R2_EL2),
581 	VNCR(ICH_AP0R3_EL2),
582 	VNCR(ICH_AP1R0_EL2),
583 	VNCR(ICH_AP1R1_EL2),
584 	VNCR(ICH_AP1R2_EL2),
585 	VNCR(ICH_AP1R3_EL2),
586 	VNCR(ICH_HCR_EL2),
587 	VNCR(ICH_VMCR_EL2),
588 
589 	NR_SYS_REGS	/* Nothing after this line! */
590 };
591 
592 struct kvm_sysreg_masks {
593 	struct {
594 		u64	res0;
595 		u64	res1;
596 	} mask[NR_SYS_REGS - __SANITISED_REG_START__];
597 };
598 
599 struct kvm_cpu_context {
600 	struct user_pt_regs regs;	/* sp = sp_el0 */
601 
602 	u64	spsr_abt;
603 	u64	spsr_und;
604 	u64	spsr_irq;
605 	u64	spsr_fiq;
606 
607 	struct user_fpsimd_state fp_regs;
608 
609 	u64 sys_regs[NR_SYS_REGS];
610 
611 	struct kvm_vcpu *__hyp_running_vcpu;
612 
613 	/* This pointer has to be 4kB aligned. */
614 	u64 *vncr_array;
615 };
616 
617 struct cpu_sve_state {
618 	__u64 zcr_el1;
619 
620 	/*
621 	 * Ordering is important since __sve_save_state/__sve_restore_state
622 	 * relies on it.
623 	 */
624 	__u32 fpsr;
625 	__u32 fpcr;
626 
627 	/* Must be SVE_VQ_BYTES (128 bit) aligned. */
628 	__u8 sve_regs[];
629 };
630 
631 /*
632  * This structure is instantiated on a per-CPU basis, and contains
633  * data that is:
634  *
635  * - tied to a single physical CPU, and
636  * - either have a lifetime that does not extend past vcpu_put()
637  * - or is an invariant for the lifetime of the system
638  *
639  * Use host_data_ptr(field) as a way to access a pointer to such a
640  * field.
641  */
642 struct kvm_host_data {
643 #define KVM_HOST_DATA_FLAG_HAS_SPE			0
644 #define KVM_HOST_DATA_FLAG_HAS_TRBE			1
645 #define KVM_HOST_DATA_FLAG_TRBE_ENABLED			4
646 #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED	5
647 	unsigned long flags;
648 
649 	struct kvm_cpu_context host_ctxt;
650 
651 	/*
652 	 * Hyp VA.
653 	 * sve_state is only used in pKVM and if system_supports_sve().
654 	 */
655 	struct cpu_sve_state *sve_state;
656 
657 	/* Used by pKVM only. */
658 	u64	fpmr;
659 
660 	/* Ownership of the FP regs */
661 	enum {
662 		FP_STATE_FREE,
663 		FP_STATE_HOST_OWNED,
664 		FP_STATE_GUEST_OWNED,
665 	} fp_owner;
666 
667 	/*
668 	 * host_debug_state contains the host registers which are
669 	 * saved and restored during world switches.
670 	 */
671 	struct {
672 		/* {Break,watch}point registers */
673 		struct kvm_guest_debug_arch regs;
674 		/* Statistical profiling extension */
675 		u64 pmscr_el1;
676 		/* Self-hosted trace */
677 		u64 trfcr_el1;
678 		/* Values of trap registers for the host before guest entry. */
679 		u64 mdcr_el2;
680 	} host_debug_state;
681 
682 	/* Guest trace filter value */
683 	u64 trfcr_while_in_guest;
684 
685 	/* Number of programmable event counters (PMCR_EL0.N) for this CPU */
686 	unsigned int nr_event_counters;
687 
688 	/* Number of debug breakpoints/watchpoints for this CPU (minus 1) */
689 	unsigned int debug_brps;
690 	unsigned int debug_wrps;
691 };
692 
693 struct kvm_host_psci_config {
694 	/* PSCI version used by host. */
695 	u32 version;
696 	u32 smccc_version;
697 
698 	/* Function IDs used by host if version is v0.1. */
699 	struct psci_0_1_function_ids function_ids_0_1;
700 
701 	bool psci_0_1_cpu_suspend_implemented;
702 	bool psci_0_1_cpu_on_implemented;
703 	bool psci_0_1_cpu_off_implemented;
704 	bool psci_0_1_migrate_implemented;
705 };
706 
707 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
708 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
709 
710 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
711 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
712 
713 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
714 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
715 
716 struct vcpu_reset_state {
717 	unsigned long	pc;
718 	unsigned long	r0;
719 	bool		be;
720 	bool		reset;
721 };
722 
723 struct kvm_vcpu_arch {
724 	struct kvm_cpu_context ctxt;
725 
726 	/*
727 	 * Guest floating point state
728 	 *
729 	 * The architecture has two main floating point extensions,
730 	 * the original FPSIMD and SVE.  These have overlapping
731 	 * register views, with the FPSIMD V registers occupying the
732 	 * low 128 bits of the SVE Z registers.  When the core
733 	 * floating point code saves the register state of a task it
734 	 * records which view it saved in fp_type.
735 	 */
736 	void *sve_state;
737 	enum fp_type fp_type;
738 	unsigned int sve_max_vl;
739 
740 	/* Stage 2 paging state used by the hardware on next switch */
741 	struct kvm_s2_mmu *hw_mmu;
742 
743 	/* Values of trap registers for the guest. */
744 	u64 hcr_el2;
745 	u64 hcrx_el2;
746 	u64 mdcr_el2;
747 
748 	/* Exception Information */
749 	struct kvm_vcpu_fault_info fault;
750 
751 	/* Configuration flags, set once and for all before the vcpu can run */
752 	u8 cflags;
753 
754 	/* Input flags to the hypervisor code, potentially cleared after use */
755 	u8 iflags;
756 
757 	/* State flags for kernel bookkeeping, unused by the hypervisor code */
758 	u8 sflags;
759 
760 	/*
761 	 * Don't run the guest (internal implementation need).
762 	 *
763 	 * Contrary to the flags above, this is set/cleared outside of
764 	 * a vcpu context, and thus cannot be mixed with the flags
765 	 * themselves (or the flag accesses need to be made atomic).
766 	 */
767 	bool pause;
768 
769 	/*
770 	 * We maintain more than a single set of debug registers to support
771 	 * debugging the guest from the host and to maintain separate host and
772 	 * guest state during world switches. vcpu_debug_state are the debug
773 	 * registers of the vcpu as the guest sees them.
774 	 *
775 	 * external_debug_state contains the debug values we want to debug the
776 	 * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
777 	 */
778 	struct kvm_guest_debug_arch vcpu_debug_state;
779 	struct kvm_guest_debug_arch external_debug_state;
780 	u64 external_mdscr_el1;
781 
782 	enum {
783 		VCPU_DEBUG_FREE,
784 		VCPU_DEBUG_HOST_OWNED,
785 		VCPU_DEBUG_GUEST_OWNED,
786 	} debug_owner;
787 
788 	/* VGIC state */
789 	struct vgic_cpu vgic_cpu;
790 	struct arch_timer_cpu timer_cpu;
791 	struct kvm_pmu pmu;
792 
793 	/* vcpu power state */
794 	struct kvm_mp_state mp_state;
795 	spinlock_t mp_state_lock;
796 
797 	/* Cache some mmu pages needed inside spinlock regions */
798 	struct kvm_mmu_memory_cache mmu_page_cache;
799 
800 	/* Pages to top-up the pKVM/EL2 guest pool */
801 	struct kvm_hyp_memcache pkvm_memcache;
802 
803 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
804 	u64 vsesr_el2;
805 
806 	/* Additional reset state */
807 	struct vcpu_reset_state	reset_state;
808 
809 	/* Guest PV state */
810 	struct {
811 		u64 last_steal;
812 		gpa_t base;
813 	} steal;
814 
815 	/* Per-vcpu CCSIDR override or NULL */
816 	u32 *ccsidr;
817 };
818 
819 /*
820  * Each 'flag' is composed of a comma-separated triplet:
821  *
822  * - the flag-set it belongs to in the vcpu->arch structure
823  * - the value for that flag
824  * - the mask for that flag
825  *
826  *  __vcpu_single_flag() builds such a triplet for a single-bit flag.
827  * unpack_vcpu_flag() extract the flag value from the triplet for
828  * direct use outside of the flag accessors.
829  */
830 #define __vcpu_single_flag(_set, _f)	_set, (_f), (_f)
831 
832 #define __unpack_flag(_set, _f, _m)	_f
833 #define unpack_vcpu_flag(...)		__unpack_flag(__VA_ARGS__)
834 
835 #define __build_check_flag(v, flagset, f, m)			\
836 	do {							\
837 		typeof(v->arch.flagset) *_fset;			\
838 								\
839 		/* Check that the flags fit in the mask */	\
840 		BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m)));	\
841 		/* Check that the flags fit in the type */	\
842 		BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m));	\
843 	} while (0)
844 
845 #define __vcpu_get_flag(v, flagset, f, m)			\
846 	({							\
847 		__build_check_flag(v, flagset, f, m);		\
848 								\
849 		READ_ONCE(v->arch.flagset) & (m);		\
850 	})
851 
852 /*
853  * Note that the set/clear accessors must be preempt-safe in order to
854  * avoid nesting them with load/put which also manipulate flags...
855  */
856 #ifdef __KVM_NVHE_HYPERVISOR__
857 /* the nVHE hypervisor is always non-preemptible */
858 #define __vcpu_flags_preempt_disable()
859 #define __vcpu_flags_preempt_enable()
860 #else
861 #define __vcpu_flags_preempt_disable()	preempt_disable()
862 #define __vcpu_flags_preempt_enable()	preempt_enable()
863 #endif
864 
865 #define __vcpu_set_flag(v, flagset, f, m)			\
866 	do {							\
867 		typeof(v->arch.flagset) *fset;			\
868 								\
869 		__build_check_flag(v, flagset, f, m);		\
870 								\
871 		fset = &v->arch.flagset;			\
872 		__vcpu_flags_preempt_disable();			\
873 		if (HWEIGHT(m) > 1)				\
874 			*fset &= ~(m);				\
875 		*fset |= (f);					\
876 		__vcpu_flags_preempt_enable();			\
877 	} while (0)
878 
879 #define __vcpu_clear_flag(v, flagset, f, m)			\
880 	do {							\
881 		typeof(v->arch.flagset) *fset;			\
882 								\
883 		__build_check_flag(v, flagset, f, m);		\
884 								\
885 		fset = &v->arch.flagset;			\
886 		__vcpu_flags_preempt_disable();			\
887 		*fset &= ~(m);					\
888 		__vcpu_flags_preempt_enable();			\
889 	} while (0)
890 
891 #define vcpu_get_flag(v, ...)	__vcpu_get_flag((v), __VA_ARGS__)
892 #define vcpu_set_flag(v, ...)	__vcpu_set_flag((v), __VA_ARGS__)
893 #define vcpu_clear_flag(v, ...)	__vcpu_clear_flag((v), __VA_ARGS__)
894 
895 /* KVM_ARM_VCPU_INIT completed */
896 #define VCPU_INITIALIZED	__vcpu_single_flag(cflags, BIT(0))
897 /* SVE config completed */
898 #define VCPU_SVE_FINALIZED	__vcpu_single_flag(cflags, BIT(1))
899 
900 /* Exception pending */
901 #define PENDING_EXCEPTION	__vcpu_single_flag(iflags, BIT(0))
902 /*
903  * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
904  * be set together with an exception...
905  */
906 #define INCREMENT_PC		__vcpu_single_flag(iflags, BIT(1))
907 /* Target EL/MODE (not a single flag, but let's abuse the macro) */
908 #define EXCEPT_MASK		__vcpu_single_flag(iflags, GENMASK(3, 1))
909 
910 /* Helpers to encode exceptions with minimum fuss */
911 #define __EXCEPT_MASK_VAL	unpack_vcpu_flag(EXCEPT_MASK)
912 #define __EXCEPT_SHIFT		__builtin_ctzl(__EXCEPT_MASK_VAL)
913 #define __vcpu_except_flags(_f)	iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
914 
915 /*
916  * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
917  * values:
918  *
919  * For AArch32 EL1:
920  */
921 #define EXCEPT_AA32_UND		__vcpu_except_flags(0)
922 #define EXCEPT_AA32_IABT	__vcpu_except_flags(1)
923 #define EXCEPT_AA32_DABT	__vcpu_except_flags(2)
924 /* For AArch64: */
925 #define EXCEPT_AA64_EL1_SYNC	__vcpu_except_flags(0)
926 #define EXCEPT_AA64_EL1_IRQ	__vcpu_except_flags(1)
927 #define EXCEPT_AA64_EL1_FIQ	__vcpu_except_flags(2)
928 #define EXCEPT_AA64_EL1_SERR	__vcpu_except_flags(3)
929 /* For AArch64 with NV: */
930 #define EXCEPT_AA64_EL2_SYNC	__vcpu_except_flags(4)
931 #define EXCEPT_AA64_EL2_IRQ	__vcpu_except_flags(5)
932 #define EXCEPT_AA64_EL2_FIQ	__vcpu_except_flags(6)
933 #define EXCEPT_AA64_EL2_SERR	__vcpu_except_flags(7)
934 
935 /* Physical CPU not in supported_cpus */
936 #define ON_UNSUPPORTED_CPU	__vcpu_single_flag(sflags, BIT(0))
937 /* WFIT instruction trapped */
938 #define IN_WFIT			__vcpu_single_flag(sflags, BIT(1))
939 /* vcpu system registers loaded on physical CPU */
940 #define SYSREGS_ON_CPU		__vcpu_single_flag(sflags, BIT(2))
941 /* Software step state is Active-pending for external debug */
942 #define HOST_SS_ACTIVE_PENDING	__vcpu_single_flag(sflags, BIT(3))
943 /* Software step state is Active pending for guest debug */
944 #define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4))
945 /* PMUSERENR for the guest EL0 is on physical CPU */
946 #define PMUSERENR_ON_CPU	__vcpu_single_flag(sflags, BIT(5))
947 /* WFI instruction trapped */
948 #define IN_WFI			__vcpu_single_flag(sflags, BIT(6))
949 /* KVM is currently emulating a nested ERET */
950 #define IN_NESTED_ERET		__vcpu_single_flag(sflags, BIT(7))
951 
952 
953 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
954 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
955 			     sve_ffr_offset((vcpu)->arch.sve_max_vl))
956 
957 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
958 
959 #define vcpu_sve_zcr_elx(vcpu)						\
960 	(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
961 
962 #define vcpu_sve_state_size(vcpu) ({					\
963 	size_t __size_ret;						\
964 	unsigned int __vcpu_vq;						\
965 									\
966 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
967 		__size_ret = 0;						\
968 	} else {							\
969 		__vcpu_vq = vcpu_sve_max_vq(vcpu);			\
970 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
971 	}								\
972 									\
973 	__size_ret;							\
974 })
975 
976 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
977 				 KVM_GUESTDBG_USE_SW_BP | \
978 				 KVM_GUESTDBG_USE_HW | \
979 				 KVM_GUESTDBG_SINGLESTEP)
980 
981 #define kvm_has_sve(kvm)	(system_supports_sve() &&		\
982 				 test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags))
983 
984 #ifdef __KVM_NVHE_HYPERVISOR__
985 #define vcpu_has_sve(vcpu)	kvm_has_sve(kern_hyp_va((vcpu)->kvm))
986 #else
987 #define vcpu_has_sve(vcpu)	kvm_has_sve((vcpu)->kvm)
988 #endif
989 
990 #ifdef CONFIG_ARM64_PTR_AUTH
991 #define vcpu_has_ptrauth(vcpu)						\
992 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
993 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
994 	 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||       \
995 	  vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
996 #else
997 #define vcpu_has_ptrauth(vcpu)		false
998 #endif
999 
1000 #define vcpu_on_unsupported_cpu(vcpu)					\
1001 	vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
1002 
1003 #define vcpu_set_on_unsupported_cpu(vcpu)				\
1004 	vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
1005 
1006 #define vcpu_clear_on_unsupported_cpu(vcpu)				\
1007 	vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
1008 
1009 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
1010 
1011 /*
1012  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
1013  * memory backed version of a register, and not the one most recently
1014  * accessed by a running VCPU.  For example, for userspace access or
1015  * for system registers that are never context switched, but only
1016  * emulated.
1017  *
1018  * Don't bother with VNCR-based accesses in the nVHE code, it has no
1019  * business dealing with NV.
1020  */
1021 static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
1022 {
1023 #if !defined (__KVM_NVHE_HYPERVISOR__)
1024 	if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
1025 		     r >= __VNCR_START__ && ctxt->vncr_array))
1026 		return &ctxt->vncr_array[r - __VNCR_START__];
1027 #endif
1028 	return (u64 *)&ctxt->sys_regs[r];
1029 }
1030 
1031 #define __ctxt_sys_reg(c,r)						\
1032 	({								\
1033 		BUILD_BUG_ON(__builtin_constant_p(r) &&			\
1034 			     (r) >= NR_SYS_REGS);			\
1035 		___ctxt_sys_reg(c, r);					\
1036 	})
1037 
1038 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
1039 
1040 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
1041 #define __vcpu_sys_reg(v,r)						\
1042 	(*({								\
1043 		const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt;	\
1044 		u64 *__r = __ctxt_sys_reg(ctxt, (r));			\
1045 		if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__)	\
1046 			*__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
1047 		__r;							\
1048 	}))
1049 
1050 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
1051 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
1052 
1053 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
1054 {
1055 	/*
1056 	 * *** VHE ONLY ***
1057 	 *
1058 	 * System registers listed in the switch are not saved on every
1059 	 * exit from the guest but are only saved on vcpu_put.
1060 	 *
1061 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
1062 	 * should never be listed below, because the guest cannot modify its
1063 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
1064 	 * thread when emulating cross-VCPU communication.
1065 	 */
1066 	if (!has_vhe())
1067 		return false;
1068 
1069 	switch (reg) {
1070 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
1071 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
1072 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
1073 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
1074 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
1075 	case TCR2_EL1:		*val = read_sysreg_s(SYS_TCR2_EL12);	break;
1076 	case PIR_EL1:		*val = read_sysreg_s(SYS_PIR_EL12);	break;
1077 	case PIRE0_EL1:		*val = read_sysreg_s(SYS_PIRE0_EL12);	break;
1078 	case POR_EL1:		*val = read_sysreg_s(SYS_POR_EL12);	break;
1079 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
1080 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
1081 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
1082 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
1083 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
1084 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
1085 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
1086 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
1087 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
1088 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
1089 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
1090 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
1091 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
1092 	case SPSR_EL1:		*val = read_sysreg_s(SYS_SPSR_EL12);	break;
1093 	case PAR_EL1:		*val = read_sysreg_par();		break;
1094 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
1095 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
1096 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
1097 	case ZCR_EL1:		*val = read_sysreg_s(SYS_ZCR_EL12);	break;
1098 	default:		return false;
1099 	}
1100 
1101 	return true;
1102 }
1103 
1104 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
1105 {
1106 	/*
1107 	 * *** VHE ONLY ***
1108 	 *
1109 	 * System registers listed in the switch are not restored on every
1110 	 * entry to the guest but are only restored on vcpu_load.
1111 	 *
1112 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
1113 	 * should never be listed below, because the MPIDR should only be set
1114 	 * once, before running the VCPU, and never changed later.
1115 	 */
1116 	if (!has_vhe())
1117 		return false;
1118 
1119 	switch (reg) {
1120 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
1121 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
1122 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
1123 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
1124 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
1125 	case TCR2_EL1:		write_sysreg_s(val, SYS_TCR2_EL12);	break;
1126 	case PIR_EL1:		write_sysreg_s(val, SYS_PIR_EL12);	break;
1127 	case PIRE0_EL1:		write_sysreg_s(val, SYS_PIRE0_EL12);	break;
1128 	case POR_EL1:		write_sysreg_s(val, SYS_POR_EL12);	break;
1129 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
1130 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
1131 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
1132 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
1133 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
1134 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
1135 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
1136 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
1137 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
1138 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
1139 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
1140 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
1141 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
1142 	case SPSR_EL1:		write_sysreg_s(val, SYS_SPSR_EL12);	break;
1143 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
1144 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
1145 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
1146 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
1147 	case ZCR_EL1:		write_sysreg_s(val, SYS_ZCR_EL12);	break;
1148 	default:		return false;
1149 	}
1150 
1151 	return true;
1152 }
1153 
1154 struct kvm_vm_stat {
1155 	struct kvm_vm_stat_generic generic;
1156 };
1157 
1158 struct kvm_vcpu_stat {
1159 	struct kvm_vcpu_stat_generic generic;
1160 	u64 hvc_exit_stat;
1161 	u64 wfe_exit_stat;
1162 	u64 wfi_exit_stat;
1163 	u64 mmio_exit_user;
1164 	u64 mmio_exit_kernel;
1165 	u64 signal_exits;
1166 	u64 exits;
1167 };
1168 
1169 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
1170 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
1171 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1172 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1173 
1174 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
1175 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
1176 
1177 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1178 			      struct kvm_vcpu_events *events);
1179 
1180 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1181 			      struct kvm_vcpu_events *events);
1182 
1183 void kvm_arm_halt_guest(struct kvm *kvm);
1184 void kvm_arm_resume_guest(struct kvm *kvm);
1185 
1186 #define vcpu_has_run_once(vcpu)	(!!READ_ONCE((vcpu)->pid))
1187 
1188 #ifndef __KVM_NVHE_HYPERVISOR__
1189 #define kvm_call_hyp_nvhe(f, ...)						\
1190 	({								\
1191 		struct arm_smccc_res res;				\
1192 									\
1193 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
1194 				  ##__VA_ARGS__, &res);			\
1195 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
1196 									\
1197 		res.a1;							\
1198 	})
1199 
1200 /*
1201  * The couple of isb() below are there to guarantee the same behaviour
1202  * on VHE as on !VHE, where the eret to EL1 acts as a context
1203  * synchronization event.
1204  */
1205 #define kvm_call_hyp(f, ...)						\
1206 	do {								\
1207 		if (has_vhe()) {					\
1208 			f(__VA_ARGS__);					\
1209 			isb();						\
1210 		} else {						\
1211 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
1212 		}							\
1213 	} while(0)
1214 
1215 #define kvm_call_hyp_ret(f, ...)					\
1216 	({								\
1217 		typeof(f(__VA_ARGS__)) ret;				\
1218 									\
1219 		if (has_vhe()) {					\
1220 			ret = f(__VA_ARGS__);				\
1221 			isb();						\
1222 		} else {						\
1223 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
1224 		}							\
1225 									\
1226 		ret;							\
1227 	})
1228 #else /* __KVM_NVHE_HYPERVISOR__ */
1229 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
1230 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
1231 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
1232 #endif /* __KVM_NVHE_HYPERVISOR__ */
1233 
1234 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
1235 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
1236 
1237 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
1238 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
1239 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
1240 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
1241 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
1242 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
1243 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
1244 
1245 void kvm_sys_regs_create_debugfs(struct kvm *kvm);
1246 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
1247 
1248 int __init kvm_sys_reg_table_init(void);
1249 struct sys_reg_desc;
1250 int __init populate_sysreg_config(const struct sys_reg_desc *sr,
1251 				  unsigned int idx);
1252 int __init populate_nv_trap_config(void);
1253 
1254 bool lock_all_vcpus(struct kvm *kvm);
1255 void unlock_all_vcpus(struct kvm *kvm);
1256 
1257 void kvm_calculate_traps(struct kvm_vcpu *vcpu);
1258 
1259 /* MMIO helpers */
1260 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
1261 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
1262 
1263 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
1264 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
1265 
1266 /*
1267  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
1268  * arrived in guest context.  For arm64, any event that arrives while a vCPU is
1269  * loaded is considered to be "in guest".
1270  */
1271 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
1272 {
1273 	return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
1274 }
1275 
1276 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
1277 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
1278 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
1279 
1280 bool kvm_arm_pvtime_supported(void);
1281 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
1282 			    struct kvm_device_attr *attr);
1283 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
1284 			    struct kvm_device_attr *attr);
1285 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
1286 			    struct kvm_device_attr *attr);
1287 
1288 extern unsigned int __ro_after_init kvm_arm_vmid_bits;
1289 int __init kvm_arm_vmid_alloc_init(void);
1290 void __init kvm_arm_vmid_alloc_free(void);
1291 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1292 void kvm_arm_vmid_clear_active(void);
1293 
1294 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
1295 {
1296 	vcpu_arch->steal.base = INVALID_GPA;
1297 }
1298 
1299 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1300 {
1301 	return (vcpu_arch->steal.base != INVALID_GPA);
1302 }
1303 
1304 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1305 
1306 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1307 
1308 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1309 
1310 /*
1311  * How we access per-CPU host data depends on the where we access it from,
1312  * and the mode we're in:
1313  *
1314  * - VHE and nVHE hypervisor bits use their locally defined instance
1315  *
1316  * - the rest of the kernel use either the VHE or nVHE one, depending on
1317  *   the mode we're running in.
1318  *
1319  *   Unless we're in protected mode, fully deprivileged, and the nVHE
1320  *   per-CPU stuff is exclusively accessible to the protected EL2 code.
1321  *   In this case, the EL1 code uses the *VHE* data as its private state
1322  *   (which makes sense in a way as there shouldn't be any shared state
1323  *   between the host and the hypervisor).
1324  *
1325  * Yes, this is all totally trivial. Shoot me now.
1326  */
1327 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
1328 #define host_data_ptr(f)	(&this_cpu_ptr(&kvm_host_data)->f)
1329 #else
1330 #define host_data_ptr(f)						\
1331 	(static_branch_unlikely(&kvm_protected_mode_initialized) ?	\
1332 	 &this_cpu_ptr(&kvm_host_data)->f :				\
1333 	 &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
1334 #endif
1335 
1336 #define host_data_test_flag(flag)					\
1337 	(test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)))
1338 #define host_data_set_flag(flag)					\
1339 	set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
1340 #define host_data_clear_flag(flag)					\
1341 	clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
1342 
1343 /* Check whether the FP regs are owned by the guest */
1344 static inline bool guest_owns_fp_regs(void)
1345 {
1346 	return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
1347 }
1348 
1349 /* Check whether the FP regs are owned by the host */
1350 static inline bool host_owns_fp_regs(void)
1351 {
1352 	return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED;
1353 }
1354 
1355 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1356 {
1357 	/* The host's MPIDR is immutable, so let's set it up at boot time */
1358 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1359 }
1360 
1361 static inline bool kvm_system_needs_idmapped_vectors(void)
1362 {
1363 	return cpus_have_final_cap(ARM64_SPECTRE_V3A);
1364 }
1365 
1366 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1367 
1368 void kvm_init_host_debug_data(void);
1369 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
1370 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
1371 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);
1372 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val);
1373 
1374 #define kvm_vcpu_os_lock_enabled(vcpu)		\
1375 	(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
1376 
1377 #define kvm_debug_regs_in_use(vcpu)		\
1378 	((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE)
1379 #define kvm_host_owns_debug_regs(vcpu)		\
1380 	((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED)
1381 #define kvm_guest_owns_debug_regs(vcpu)		\
1382 	((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED)
1383 
1384 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1385 			       struct kvm_device_attr *attr);
1386 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1387 			       struct kvm_device_attr *attr);
1388 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1389 			       struct kvm_device_attr *attr);
1390 
1391 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1392 			       struct kvm_arm_copy_mte_tags *copy_tags);
1393 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1394 				    struct kvm_arm_counter_offset *offset);
1395 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
1396 					struct reg_mask_range *range);
1397 
1398 /* Guest/host FPSIMD coordination helpers */
1399 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
1400 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1401 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1402 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1403 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1404 
1405 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1406 {
1407 	return (!has_vhe() && attr->exclude_host);
1408 }
1409 
1410 #ifdef CONFIG_KVM
1411 void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
1412 void kvm_clr_pmu_events(u64 clr);
1413 bool kvm_set_pmuserenr(u64 val);
1414 void kvm_enable_trbe(void);
1415 void kvm_disable_trbe(void);
1416 void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest);
1417 #else
1418 static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
1419 static inline void kvm_clr_pmu_events(u64 clr) {}
1420 static inline bool kvm_set_pmuserenr(u64 val)
1421 {
1422 	return false;
1423 }
1424 static inline void kvm_enable_trbe(void) {}
1425 static inline void kvm_disable_trbe(void) {}
1426 static inline void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) {}
1427 #endif
1428 
1429 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1430 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
1431 
1432 int __init kvm_set_ipa_limit(void);
1433 u32 kvm_get_pa_bits(struct kvm *kvm);
1434 
1435 #define __KVM_HAVE_ARCH_VM_ALLOC
1436 struct kvm *kvm_arch_alloc_vm(void);
1437 
1438 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1439 
1440 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1441 
1442 #define kvm_vm_is_protected(kvm)	(is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
1443 
1444 #define vcpu_is_protected(vcpu)		kvm_vm_is_protected((vcpu)->kvm)
1445 
1446 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1447 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1448 
1449 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1450 
1451 #define kvm_has_mte(kvm)					\
1452 	(system_supports_mte() &&				\
1453 	 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1454 
1455 #define kvm_supports_32bit_el0()				\
1456 	(system_supports_32bit_el0() &&				\
1457 	 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1458 
1459 #define kvm_vm_has_ran_once(kvm)					\
1460 	(test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1461 
1462 static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
1463 {
1464 	return test_bit(feature, ka->vcpu_features);
1465 }
1466 
1467 #define kvm_vcpu_has_feature(k, f)	__vcpu_has_feature(&(k)->arch, (f))
1468 #define vcpu_has_feature(v, f)	__vcpu_has_feature(&(v)->kvm->arch, (f))
1469 
1470 #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
1471 
1472 int kvm_trng_call(struct kvm_vcpu *vcpu);
1473 #ifdef CONFIG_KVM
1474 extern phys_addr_t hyp_mem_base;
1475 extern phys_addr_t hyp_mem_size;
1476 void __init kvm_hyp_reserve(void);
1477 #else
1478 static inline void kvm_hyp_reserve(void) { }
1479 #endif
1480 
1481 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1482 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1483 
1484 static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
1485 {
1486 	switch (reg) {
1487 	case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7):
1488 		return &ka->id_regs[IDREG_IDX(reg)];
1489 	case SYS_CTR_EL0:
1490 		return &ka->ctr_el0;
1491 	default:
1492 		WARN_ON_ONCE(1);
1493 		return NULL;
1494 	}
1495 }
1496 
1497 #define kvm_read_vm_id_reg(kvm, reg)					\
1498 	({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
1499 
1500 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
1501 
1502 #define __expand_field_sign_unsigned(id, fld, val)			\
1503 	((u64)SYS_FIELD_VALUE(id, fld, val))
1504 
1505 #define __expand_field_sign_signed(id, fld, val)			\
1506 	({								\
1507 		u64 __val = SYS_FIELD_VALUE(id, fld, val);		\
1508 		sign_extend64(__val, id##_##fld##_WIDTH - 1);		\
1509 	})
1510 
1511 #define get_idreg_field_unsigned(kvm, id, fld)				\
1512 	({								\
1513 		u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id);	\
1514 		FIELD_GET(id##_##fld##_MASK, __val);			\
1515 	})
1516 
1517 #define get_idreg_field_signed(kvm, id, fld)				\
1518 	({								\
1519 		u64 __val = get_idreg_field_unsigned(kvm, id, fld);	\
1520 		sign_extend64(__val, id##_##fld##_WIDTH - 1);		\
1521 	})
1522 
1523 #define get_idreg_field_enum(kvm, id, fld)				\
1524 	get_idreg_field_unsigned(kvm, id, fld)
1525 
1526 #define kvm_cmp_feat_signed(kvm, id, fld, op, limit)			\
1527 	(get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit))
1528 
1529 #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)			\
1530 	(get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit))
1531 
1532 #define kvm_cmp_feat(kvm, id, fld, op, limit)				\
1533 	(id##_##fld##_SIGNED ?						\
1534 	 kvm_cmp_feat_signed(kvm, id, fld, op, limit) :			\
1535 	 kvm_cmp_feat_unsigned(kvm, id, fld, op, limit))
1536 
1537 #define kvm_has_feat(kvm, id, fld, limit)				\
1538 	kvm_cmp_feat(kvm, id, fld, >=, limit)
1539 
1540 #define kvm_has_feat_enum(kvm, id, fld, val)				\
1541 	kvm_cmp_feat_unsigned(kvm, id, fld, ==, val)
1542 
1543 #define kvm_has_feat_range(kvm, id, fld, min, max)			\
1544 	(kvm_cmp_feat(kvm, id, fld, >=, min) &&				\
1545 	kvm_cmp_feat(kvm, id, fld, <=, max))
1546 
1547 /* Check for a given level of PAuth support */
1548 #define kvm_has_pauth(k, l)						\
1549 	({								\
1550 		bool pa, pi, pa3;					\
1551 									\
1552 		pa  = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l);	\
1553 		pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP);	\
1554 		pi  = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l);	\
1555 		pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP);	\
1556 		pa3  = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l);	\
1557 		pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP);	\
1558 									\
1559 		(pa + pi + pa3) == 1;					\
1560 	})
1561 
1562 #define kvm_has_fpmr(k)					\
1563 	(system_supports_fpmr() &&			\
1564 	 kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
1565 
1566 #define kvm_has_tcr2(k)				\
1567 	(kvm_has_feat((k), ID_AA64MMFR3_EL1, TCRX, IMP))
1568 
1569 #define kvm_has_s1pie(k)				\
1570 	(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
1571 
1572 #define kvm_has_s1poe(k)				\
1573 	(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
1574 
1575 #endif /* __ARM64_KVM_HOST_H__ */
1576