1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/maple_tree.h>
20 #include <linux/percpu.h>
21 #include <linux/psci.h>
22 #include <asm/arch_gicv3.h>
23 #include <asm/barrier.h>
24 #include <asm/cpufeature.h>
25 #include <asm/cputype.h>
26 #include <asm/daifflags.h>
27 #include <asm/fpsimd.h>
28 #include <asm/kvm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/vncr_mapping.h>
31
32 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
33
34 #define KVM_HALT_POLL_NS_DEFAULT 500000
35
36 #include <kvm/arm_vgic.h>
37 #include <kvm/arm_arch_timer.h>
38 #include <kvm/arm_pmu.h>
39
40 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41
42 #define KVM_VCPU_MAX_FEATURES 9
43 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
44
45 #define KVM_REQ_SLEEP \
46 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
47 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
48 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
49 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
50 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
51 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
52 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
53 #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
54 #define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
55 #define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
56 #define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10)
57 #define KVM_REQ_VGIC_PROCESS_UPDATE KVM_ARCH_REQ(11)
58
59 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
60 KVM_DIRTY_LOG_INITIALLY_SET)
61
62 #define KVM_HAVE_MMU_RWLOCK
63
64 /*
65 * Mode of operation configurable with kvm-arm.mode early param.
66 * See Documentation/admin-guide/kernel-parameters.txt for more information.
67 */
68 enum kvm_mode {
69 KVM_MODE_DEFAULT,
70 KVM_MODE_PROTECTED,
71 KVM_MODE_NV,
72 KVM_MODE_NONE,
73 };
74 #ifdef CONFIG_KVM
75 enum kvm_mode kvm_get_mode(void);
76 #else
kvm_get_mode(void)77 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
78 #endif
79
80 extern unsigned int __ro_after_init kvm_sve_max_vl;
81 extern unsigned int __ro_after_init kvm_host_sve_max_vl;
82 int __init kvm_arm_init_sve(void);
83
84 u32 __attribute_const__ kvm_target_cpu(void);
85 void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
86 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
87
88 struct kvm_hyp_memcache {
89 phys_addr_t head;
90 unsigned long nr_pages;
91 struct pkvm_mapping *mapping; /* only used from EL1 */
92
93 #define HYP_MEMCACHE_ACCOUNT_STAGE2 BIT(1)
94 unsigned long flags;
95 };
96
push_hyp_memcache(struct kvm_hyp_memcache * mc,phys_addr_t * p,phys_addr_t (* to_pa)(void * virt))97 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
98 phys_addr_t *p,
99 phys_addr_t (*to_pa)(void *virt))
100 {
101 *p = mc->head;
102 mc->head = to_pa(p);
103 mc->nr_pages++;
104 }
105
pop_hyp_memcache(struct kvm_hyp_memcache * mc,void * (* to_va)(phys_addr_t phys))106 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
107 void *(*to_va)(phys_addr_t phys))
108 {
109 phys_addr_t *p = to_va(mc->head & PAGE_MASK);
110
111 if (!mc->nr_pages)
112 return NULL;
113
114 mc->head = *p;
115 mc->nr_pages--;
116
117 return p;
118 }
119
__topup_hyp_memcache(struct kvm_hyp_memcache * mc,unsigned long min_pages,void * (* alloc_fn)(void * arg),phys_addr_t (* to_pa)(void * virt),void * arg)120 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
121 unsigned long min_pages,
122 void *(*alloc_fn)(void *arg),
123 phys_addr_t (*to_pa)(void *virt),
124 void *arg)
125 {
126 while (mc->nr_pages < min_pages) {
127 phys_addr_t *p = alloc_fn(arg);
128
129 if (!p)
130 return -ENOMEM;
131 push_hyp_memcache(mc, p, to_pa);
132 }
133
134 return 0;
135 }
136
__free_hyp_memcache(struct kvm_hyp_memcache * mc,void (* free_fn)(void * virt,void * arg),void * (* to_va)(phys_addr_t phys),void * arg)137 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
138 void (*free_fn)(void *virt, void *arg),
139 void *(*to_va)(phys_addr_t phys),
140 void *arg)
141 {
142 while (mc->nr_pages)
143 free_fn(pop_hyp_memcache(mc, to_va), arg);
144 }
145
146 void free_hyp_memcache(struct kvm_hyp_memcache *mc);
147 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
148
149 struct kvm_vmid {
150 atomic64_t id;
151 };
152
153 struct kvm_s2_mmu {
154 struct kvm_vmid vmid;
155
156 /*
157 * stage2 entry level table
158 *
159 * Two kvm_s2_mmu structures in the same VM can point to the same
160 * pgd here. This happens when running a guest using a
161 * translation regime that isn't affected by its own stage-2
162 * translation, such as a non-VHE hypervisor running at vEL2, or
163 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
164 * canonical stage-2 page tables.
165 */
166 phys_addr_t pgd_phys;
167 struct kvm_pgtable *pgt;
168
169 /*
170 * VTCR value used on the host. For a non-NV guest (or a NV
171 * guest that runs in a context where its own S2 doesn't
172 * apply), its T0SZ value reflects that of the IPA size.
173 *
174 * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
175 * the guest.
176 */
177 u64 vtcr;
178
179 /* The last vcpu id that ran on each physical CPU */
180 int __percpu *last_vcpu_ran;
181
182 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
183 /*
184 * Memory cache used to split
185 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
186 * is used to allocate stage2 page tables while splitting huge
187 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
188 * influences both the capacity of the split page cache, and
189 * how often KVM reschedules. Be wary of raising CHUNK_SIZE
190 * too high.
191 *
192 * Protected by kvm->slots_lock.
193 */
194 struct kvm_mmu_memory_cache split_page_cache;
195 uint64_t split_page_chunk_size;
196
197 struct kvm_arch *arch;
198
199 /*
200 * For a shadow stage-2 MMU, the virtual vttbr used by the
201 * host to parse the guest S2.
202 * This either contains:
203 * - the virtual VTTBR programmed by the guest hypervisor with
204 * CnP cleared
205 * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
206 *
207 * We also cache the full VTCR which gets used for TLB invalidation,
208 * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted
209 * to be cached in a TLB" to the letter.
210 */
211 u64 tlb_vttbr;
212 u64 tlb_vtcr;
213
214 /*
215 * true when this represents a nested context where virtual
216 * HCR_EL2.VM == 1
217 */
218 bool nested_stage2_enabled;
219
220 #ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
221 struct dentry *shadow_pt_debugfs_dentry;
222 #endif
223
224 /*
225 * true when this MMU needs to be unmapped before being used for a new
226 * purpose.
227 */
228 bool pending_unmap;
229
230 /*
231 * 0: Nobody is currently using this, check vttbr for validity
232 * >0: Somebody is actively using this.
233 */
234 atomic_t refcnt;
235 };
236
237 struct kvm_arch_memory_slot {
238 };
239
240 /**
241 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
242 *
243 * @std_bmap: Bitmap of standard secure service calls
244 * @std_hyp_bmap: Bitmap of standard hypervisor service calls
245 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
246 */
247 struct kvm_smccc_features {
248 unsigned long std_bmap;
249 unsigned long std_hyp_bmap;
250 unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */
251 unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */
252 };
253
254 typedef u16 pkvm_handle_t;
255
256 struct kvm_protected_vm {
257 pkvm_handle_t handle;
258 struct kvm_hyp_memcache teardown_mc;
259 struct kvm_hyp_memcache stage2_teardown_mc;
260 bool is_protected;
261 bool is_created;
262
263 /*
264 * True when the guest is being torn down. When in this state, the
265 * guest's vCPUs can't be loaded anymore, but its pages can be
266 * reclaimed by the host.
267 */
268 bool is_dying;
269 };
270
271 struct kvm_mpidr_data {
272 u64 mpidr_mask;
273 DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
274 };
275
kvm_mpidr_index(struct kvm_mpidr_data * data,u64 mpidr)276 static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
277 {
278 unsigned long index = 0, mask = data->mpidr_mask;
279 unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
280
281 bitmap_gather(&index, &aff, &mask, fls(mask));
282
283 return index;
284 }
285
286 struct kvm_sysreg_masks;
287
288 enum fgt_group_id {
289 __NO_FGT_GROUP__,
290 HFGRTR_GROUP,
291 HFGWTR_GROUP = HFGRTR_GROUP,
292 HDFGRTR_GROUP,
293 HDFGWTR_GROUP = HDFGRTR_GROUP,
294 HFGITR_GROUP,
295 HAFGRTR_GROUP,
296 HFGRTR2_GROUP,
297 HFGWTR2_GROUP = HFGRTR2_GROUP,
298 HDFGRTR2_GROUP,
299 HDFGWTR2_GROUP = HDFGRTR2_GROUP,
300 HFGITR2_GROUP,
301 ICH_HFGRTR_GROUP,
302 ICH_HFGWTR_GROUP = ICH_HFGRTR_GROUP,
303 ICH_HFGITR_GROUP,
304
305 /* Must be last */
306 __NR_FGT_GROUP_IDS__
307 };
308
309 struct kvm_arch {
310 struct kvm_s2_mmu mmu;
311
312 /*
313 * Fine-Grained UNDEF, mimicking the FGT layout defined by the
314 * architecture. We track them globally, as we present the
315 * same feature-set to all vcpus.
316 *
317 * Index 0 is currently spare.
318 */
319 u64 fgu[__NR_FGT_GROUP_IDS__];
320
321 /*
322 * Stage 2 paging state for VMs with nested S2 using a virtual
323 * VMID.
324 */
325 struct kvm_s2_mmu *nested_mmus;
326 size_t nested_mmus_size;
327 int nested_mmus_next;
328
329 /* Interrupt controller */
330 struct vgic_dist vgic;
331
332 /* Timers */
333 struct arch_timer_vm_data timer_data;
334
335 /* Mandated version of PSCI */
336 u32 psci_version;
337
338 /* Protects VM-scoped configuration data */
339 struct mutex config_lock;
340
341 /*
342 * If we encounter a data abort without valid instruction syndrome
343 * information, report this to user space. User space can (and
344 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
345 * supported.
346 */
347 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
348 /* Memory Tagging Extension enabled for the guest */
349 #define KVM_ARCH_FLAG_MTE_ENABLED 1
350 /* At least one vCPU has ran in the VM */
351 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
352 /* The vCPU feature set for the VM is configured */
353 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
354 /* PSCI SYSTEM_SUSPEND enabled for the guest */
355 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
356 /* VM counter offset */
357 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
358 /* Timer PPIs made immutable */
359 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
360 /* Initial ID reg values loaded */
361 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
362 /* Fine-Grained UNDEF initialised */
363 #define KVM_ARCH_FLAG_FGU_INITIALIZED 8
364 /* SVE exposed to guest */
365 #define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
366 /* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */
367 #define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10
368 /* Unhandled SEAs are taken to userspace */
369 #define KVM_ARCH_FLAG_EXIT_SEA 11
370 unsigned long flags;
371
372 /* VM-wide vCPU feature set */
373 DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
374
375 /* MPIDR to vcpu index mapping, optional */
376 struct kvm_mpidr_data *mpidr_data;
377
378 /*
379 * VM-wide PMU filter, implemented as a bitmap and big enough for
380 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
381 */
382 unsigned long *pmu_filter;
383 struct arm_pmu *arm_pmu;
384
385 cpumask_var_t supported_cpus;
386
387 /* Maximum number of counters for the guest */
388 u8 nr_pmu_counters;
389
390 /* Hypercall features firmware registers' descriptor */
391 struct kvm_smccc_features smccc_feat;
392 struct maple_tree smccc_filter;
393
394 /*
395 * Emulated CPU ID registers per VM
396 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
397 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
398 *
399 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
400 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
401 */
402 #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
403 #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
404 u64 id_regs[KVM_ARM_ID_REG_NUM];
405
406 u64 midr_el1;
407 u64 revidr_el1;
408 u64 aidr_el1;
409 u64 ctr_el0;
410
411 /* Masks for VNCR-backed and general EL2 sysregs */
412 struct kvm_sysreg_masks *sysreg_masks;
413
414 /* Count the number of VNCR_EL2 currently mapped */
415 atomic_t vncr_map_count;
416
417 /*
418 * For an untrusted host VM, 'pkvm.handle' is used to lookup
419 * the associated pKVM instance in the hypervisor.
420 */
421 struct kvm_protected_vm pkvm;
422
423 #ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
424 /* Nested virtualization info */
425 struct dentry *debugfs_nv_dentry;
426 #endif
427 };
428
429 struct kvm_vcpu_fault_info {
430 u64 esr_el2; /* Hyp Syndrom Register */
431 u64 far_el2; /* Hyp Fault Address Register */
432 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
433 u64 disr_el1; /* Deferred [SError] Status Register */
434 };
435
436 /*
437 * VNCR() just places the VNCR_capable registers in the enum after
438 * __VNCR_START__, and the value (after correction) to be an 8-byte offset
439 * from the VNCR base. As we don't require the enum to be otherwise ordered,
440 * we need the terrible hack below to ensure that we correctly size the
441 * sys_regs array, no matter what.
442 *
443 * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
444 * treasure trove of bit hacks:
445 * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
446 */
447 #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y))))
448 #define VNCR(r) \
449 __before_##r, \
450 r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
451 __after_##r = __MAX__(__before_##r - 1, r)
452
453 #define MARKER(m) \
454 m, __after_##m = m - 1
455
456 enum vcpu_sysreg {
457 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
458 MPIDR_EL1, /* MultiProcessor Affinity Register */
459 CLIDR_EL1, /* Cache Level ID Register */
460 CSSELR_EL1, /* Cache Size Selection Register */
461 TPIDR_EL0, /* Thread ID, User R/W */
462 TPIDRRO_EL0, /* Thread ID, User R/O */
463 TPIDR_EL1, /* Thread ID, Privileged */
464 CNTKCTL_EL1, /* Timer Control Register (EL1) */
465 PAR_EL1, /* Physical Address Register */
466 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
467 OSLSR_EL1, /* OS Lock Status Register */
468 DISR_EL1, /* Deferred Interrupt Status Register */
469
470 /* Performance Monitors Registers */
471 PMCR_EL0, /* Control Register */
472 PMSELR_EL0, /* Event Counter Selection Register */
473 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
474 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
475 PMCCNTR_EL0, /* Cycle Counter Register */
476 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
477 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
478 PMCCFILTR_EL0, /* Cycle Count Filter Register */
479 PMCNTENSET_EL0, /* Count Enable Set Register */
480 PMINTENSET_EL1, /* Interrupt Enable Set Register */
481 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
482 PMUSERENR_EL0, /* User Enable Register */
483
484 /* Pointer Authentication Registers in a strict increasing order. */
485 APIAKEYLO_EL1,
486 APIAKEYHI_EL1,
487 APIBKEYLO_EL1,
488 APIBKEYHI_EL1,
489 APDAKEYLO_EL1,
490 APDAKEYHI_EL1,
491 APDBKEYLO_EL1,
492 APDBKEYHI_EL1,
493 APGAKEYLO_EL1,
494 APGAKEYHI_EL1,
495
496 /* Memory Tagging Extension registers */
497 RGSR_EL1, /* Random Allocation Tag Seed Register */
498 GCR_EL1, /* Tag Control Register */
499 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
500
501 POR_EL0, /* Permission Overlay Register 0 (EL0) */
502
503 /* FP/SIMD/SVE */
504 SVCR,
505 FPMR,
506
507 /* 32bit specific registers. */
508 DACR32_EL2, /* Domain Access Control Register */
509 IFSR32_EL2, /* Instruction Fault Status Register */
510 FPEXC32_EL2, /* Floating-Point Exception Control Register */
511 DBGVCR32_EL2, /* Debug Vector Catch Register */
512
513 /* EL2 registers */
514 ACTLR_EL2, /* Auxiliary Control Register (EL2) */
515 CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
516 HACR_EL2, /* Hypervisor Auxiliary Control Register */
517 ZCR_EL2, /* SVE Control Register (EL2) */
518 TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
519 TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
520 TCR_EL2, /* Translation Control Register (EL2) */
521 PIRE0_EL2, /* Permission Indirection Register 0 (EL2) */
522 PIR_EL2, /* Permission Indirection Register 1 (EL2) */
523 POR_EL2, /* Permission Overlay Register 2 (EL2) */
524 SPSR_EL2, /* EL2 saved program status register */
525 ELR_EL2, /* EL2 exception link register */
526 AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */
527 AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */
528 ESR_EL2, /* Exception Syndrome Register (EL2) */
529 FAR_EL2, /* Fault Address Register (EL2) */
530 HPFAR_EL2, /* Hypervisor IPA Fault Address Register */
531 MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */
532 AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */
533 VBAR_EL2, /* Vector Base Address Register (EL2) */
534 RVBAR_EL2, /* Reset Vector Base Address Register */
535 CONTEXTIDR_EL2, /* Context ID Register (EL2) */
536 SP_EL2, /* EL2 Stack Pointer */
537 CNTHP_CTL_EL2,
538 CNTHP_CVAL_EL2,
539 CNTHV_CTL_EL2,
540 CNTHV_CVAL_EL2,
541
542 /* Anything from this can be RES0/RES1 sanitised */
543 MARKER(__SANITISED_REG_START__),
544 SCTLR_EL2, /* System Control Register (EL2) */
545 TCR2_EL2, /* Extended Translation Control Register (EL2) */
546 SCTLR2_EL2, /* System Control Register 2 (EL2) */
547 MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
548 CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
549
550 /* Any VNCR-capable reg goes after this point */
551 MARKER(__VNCR_START__),
552
553 VNCR(SCTLR_EL1),/* System Control Register */
554 VNCR(ACTLR_EL1),/* Auxiliary Control Register */
555 VNCR(CPACR_EL1),/* Coprocessor Access Control */
556 VNCR(ZCR_EL1), /* SVE Control */
557 VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
558 VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
559 VNCR(TCR_EL1), /* Translation Control Register */
560 VNCR(TCR2_EL1), /* Extended Translation Control Register */
561 VNCR(SCTLR2_EL1), /* System Control Register 2 */
562 VNCR(ESR_EL1), /* Exception Syndrome Register */
563 VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
564 VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
565 VNCR(FAR_EL1), /* Fault Address Register */
566 VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */
567 VNCR(VBAR_EL1), /* Vector Base Address Register */
568 VNCR(CONTEXTIDR_EL1), /* Context ID Register */
569 VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
570 VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
571 VNCR(ELR_EL1),
572 VNCR(SP_EL1),
573 VNCR(SPSR_EL1),
574 VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */
575 VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
576 VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
577 VNCR(HCR_EL2), /* Hypervisor Configuration Register */
578 VNCR(HSTR_EL2), /* Hypervisor System Trap Register */
579 VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
580 VNCR(VTCR_EL2), /* Virtualization Translation Control Register */
581 VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
582 VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */
583
584 /* Permission Indirection Extension registers */
585 VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */
586 VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */
587
588 VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */
589
590 /* FEAT_RAS registers */
591 VNCR(VDISR_EL2),
592 VNCR(VSESR_EL2),
593
594 VNCR(HFGRTR_EL2),
595 VNCR(HFGWTR_EL2),
596 VNCR(HFGITR_EL2),
597 VNCR(HDFGRTR_EL2),
598 VNCR(HDFGWTR_EL2),
599 VNCR(HAFGRTR_EL2),
600 VNCR(HFGRTR2_EL2),
601 VNCR(HFGWTR2_EL2),
602 VNCR(HFGITR2_EL2),
603 VNCR(HDFGRTR2_EL2),
604 VNCR(HDFGWTR2_EL2),
605
606 VNCR(VNCR_EL2),
607
608 VNCR(CNTVOFF_EL2),
609 VNCR(CNTV_CVAL_EL0),
610 VNCR(CNTV_CTL_EL0),
611 VNCR(CNTP_CVAL_EL0),
612 VNCR(CNTP_CTL_EL0),
613
614 VNCR(ICH_LR0_EL2),
615 VNCR(ICH_LR1_EL2),
616 VNCR(ICH_LR2_EL2),
617 VNCR(ICH_LR3_EL2),
618 VNCR(ICH_LR4_EL2),
619 VNCR(ICH_LR5_EL2),
620 VNCR(ICH_LR6_EL2),
621 VNCR(ICH_LR7_EL2),
622 VNCR(ICH_LR8_EL2),
623 VNCR(ICH_LR9_EL2),
624 VNCR(ICH_LR10_EL2),
625 VNCR(ICH_LR11_EL2),
626 VNCR(ICH_LR12_EL2),
627 VNCR(ICH_LR13_EL2),
628 VNCR(ICH_LR14_EL2),
629 VNCR(ICH_LR15_EL2),
630
631 VNCR(ICH_AP0R0_EL2),
632 VNCR(ICH_AP0R1_EL2),
633 VNCR(ICH_AP0R2_EL2),
634 VNCR(ICH_AP0R3_EL2),
635 VNCR(ICH_AP1R0_EL2),
636 VNCR(ICH_AP1R1_EL2),
637 VNCR(ICH_AP1R2_EL2),
638 VNCR(ICH_AP1R3_EL2),
639 VNCR(ICH_HCR_EL2),
640 VNCR(ICH_VMCR_EL2),
641
642 VNCR(ICH_HFGRTR_EL2),
643 VNCR(ICH_HFGWTR_EL2),
644 VNCR(ICH_HFGITR_EL2),
645
646 NR_SYS_REGS /* Nothing after this line! */
647 };
648
649 struct resx {
650 u64 res0;
651 u64 res1;
652 };
653
654 struct kvm_sysreg_masks {
655 struct resx mask[NR_SYS_REGS - __SANITISED_REG_START__];
656 };
657
__kvm_get_sysreg_resx(struct kvm_arch * arch,enum vcpu_sysreg sr)658 static inline struct resx __kvm_get_sysreg_resx(struct kvm_arch *arch,
659 enum vcpu_sysreg sr)
660 {
661 struct kvm_sysreg_masks *masks;
662
663 masks = arch->sysreg_masks;
664 if (likely(masks &&
665 sr >= __SANITISED_REG_START__ && sr < NR_SYS_REGS))
666 return masks->mask[sr - __SANITISED_REG_START__];
667
668 return (struct resx){};
669 }
670
671 #define kvm_get_sysreg_resx(k, sr) __kvm_get_sysreg_resx(&(k)->arch, (sr))
672
__kvm_set_sysreg_resx(struct kvm_arch * arch,enum vcpu_sysreg sr,struct resx resx)673 static inline void __kvm_set_sysreg_resx(struct kvm_arch *arch,
674 enum vcpu_sysreg sr, struct resx resx)
675 {
676 arch->sysreg_masks->mask[sr - __SANITISED_REG_START__] = resx;
677 }
678
679 #define kvm_set_sysreg_resx(k, sr, resx) \
680 __kvm_set_sysreg_resx(&(k)->arch, (sr), (resx))
681
682 struct fgt_masks {
683 const char *str;
684 u64 mask;
685 u64 nmask;
686 u64 res0;
687 u64 res1;
688 };
689
690 extern struct fgt_masks hfgrtr_masks;
691 extern struct fgt_masks hfgwtr_masks;
692 extern struct fgt_masks hfgitr_masks;
693 extern struct fgt_masks hdfgrtr_masks;
694 extern struct fgt_masks hdfgwtr_masks;
695 extern struct fgt_masks hafgrtr_masks;
696 extern struct fgt_masks hfgrtr2_masks;
697 extern struct fgt_masks hfgwtr2_masks;
698 extern struct fgt_masks hfgitr2_masks;
699 extern struct fgt_masks hdfgrtr2_masks;
700 extern struct fgt_masks hdfgwtr2_masks;
701 extern struct fgt_masks ich_hfgrtr_masks;
702 extern struct fgt_masks ich_hfgwtr_masks;
703 extern struct fgt_masks ich_hfgitr_masks;
704
705 extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks);
706 extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks);
707 extern struct fgt_masks kvm_nvhe_sym(hfgitr_masks);
708 extern struct fgt_masks kvm_nvhe_sym(hdfgrtr_masks);
709 extern struct fgt_masks kvm_nvhe_sym(hdfgwtr_masks);
710 extern struct fgt_masks kvm_nvhe_sym(hafgrtr_masks);
711 extern struct fgt_masks kvm_nvhe_sym(hfgrtr2_masks);
712 extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks);
713 extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks);
714 extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks);
715 extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks);
716 extern struct fgt_masks kvm_nvhe_sym(ich_hfgrtr_masks);
717 extern struct fgt_masks kvm_nvhe_sym(ich_hfgwtr_masks);
718 extern struct fgt_masks kvm_nvhe_sym(ich_hfgitr_masks);
719
720 struct kvm_cpu_context {
721 struct user_pt_regs regs; /* sp = sp_el0 */
722
723 u64 spsr_abt;
724 u64 spsr_und;
725 u64 spsr_irq;
726 u64 spsr_fiq;
727
728 struct user_fpsimd_state fp_regs;
729
730 u64 sys_regs[NR_SYS_REGS];
731
732 struct kvm_vcpu *__hyp_running_vcpu;
733
734 /* This pointer has to be 4kB aligned. */
735 u64 *vncr_array;
736 };
737
738 struct cpu_sve_state {
739 __u64 zcr_el1;
740
741 /*
742 * Ordering is important since __sve_save_state/__sve_restore_state
743 * relies on it.
744 */
745 __u32 fpsr;
746 __u32 fpcr;
747
748 /* Must be SVE_VQ_BYTES (128 bit) aligned. */
749 __u8 sve_regs[];
750 };
751
752 /*
753 * This structure is instantiated on a per-CPU basis, and contains
754 * data that is:
755 *
756 * - tied to a single physical CPU, and
757 * - either have a lifetime that does not extend past vcpu_put()
758 * - or is an invariant for the lifetime of the system
759 *
760 * Use host_data_ptr(field) as a way to access a pointer to such a
761 * field.
762 */
763 struct kvm_host_data {
764 #define KVM_HOST_DATA_FLAG_HAS_SPE 0
765 #define KVM_HOST_DATA_FLAG_HAS_TRBE 1
766 #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 2
767 #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 3
768 #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 4
769 #define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 5
770 #define KVM_HOST_DATA_FLAG_HAS_BRBE 6
771 unsigned long flags;
772
773 struct kvm_cpu_context host_ctxt;
774
775 /*
776 * Hyp VA.
777 * sve_state is only used in pKVM and if system_supports_sve().
778 */
779 struct cpu_sve_state *sve_state;
780
781 /* Used by pKVM only. */
782 u64 fpmr;
783
784 /* Ownership of the FP regs */
785 enum {
786 FP_STATE_FREE,
787 FP_STATE_HOST_OWNED,
788 FP_STATE_GUEST_OWNED,
789 } fp_owner;
790
791 /*
792 * host_debug_state contains the host registers which are
793 * saved and restored during world switches.
794 */
795 struct {
796 /* {Break,watch}point registers */
797 struct kvm_guest_debug_arch regs;
798 /* Statistical profiling extension */
799 u64 pmscr_el1;
800 u64 pmblimitr_el1;
801 /* Self-hosted trace */
802 u64 trfcr_el1;
803 u64 trblimitr_el1;
804 /* Values of trap registers for the host before guest entry. */
805 u64 mdcr_el2;
806 u64 brbcr_el1;
807 } host_debug_state;
808
809 /* Guest trace filter value */
810 u64 trfcr_while_in_guest;
811
812 /* Number of programmable event counters (PMCR_EL0.N) for this CPU */
813 unsigned int nr_event_counters;
814
815 /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */
816 unsigned int debug_brps;
817 unsigned int debug_wrps;
818
819 /* Last vgic_irq part of the AP list recorded in an LR */
820 struct vgic_irq *last_lr_irq;
821
822 /* PPI state tracking for GICv5-based guests */
823 struct {
824 DECLARE_BITMAP(pendr, VGIC_V5_NR_PRIVATE_IRQS);
825
826 /* The saved state of the regs when leaving the guest */
827 DECLARE_BITMAP(activer_exit, VGIC_V5_NR_PRIVATE_IRQS);
828 } vgic_v5_ppi_state;
829 };
830
831 struct kvm_host_psci_config {
832 /* PSCI version used by host. */
833 u32 version;
834 u32 smccc_version;
835
836 /* Function IDs used by host if version is v0.1. */
837 struct psci_0_1_function_ids function_ids_0_1;
838
839 bool psci_0_1_cpu_suspend_implemented;
840 bool psci_0_1_cpu_on_implemented;
841 bool psci_0_1_cpu_off_implemented;
842 bool psci_0_1_migrate_implemented;
843 };
844
845 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
846 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
847
848 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
849 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
850
851 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
852 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
853
854 struct vcpu_reset_state {
855 unsigned long pc;
856 unsigned long r0;
857 bool be;
858 bool reset;
859 };
860
861 struct vncr_tlb;
862
863 struct kvm_vcpu_arch {
864 struct kvm_cpu_context ctxt;
865
866 /*
867 * Guest floating point state
868 *
869 * The architecture has two main floating point extensions,
870 * the original FPSIMD and SVE. These have overlapping
871 * register views, with the FPSIMD V registers occupying the
872 * low 128 bits of the SVE Z registers. When the core
873 * floating point code saves the register state of a task it
874 * records which view it saved in fp_type.
875 */
876 void *sve_state;
877 enum fp_type fp_type;
878 unsigned int sve_max_vl;
879
880 /* Stage 2 paging state used by the hardware on next switch */
881 struct kvm_s2_mmu *hw_mmu;
882
883 /* Values of trap registers for the guest. */
884 u64 hcr_el2;
885 u64 hcrx_el2;
886 u64 mdcr_el2;
887
888 struct {
889 u64 r;
890 u64 w;
891 } fgt[__NR_FGT_GROUP_IDS__];
892
893 /* Exception Information */
894 struct kvm_vcpu_fault_info fault;
895
896 /* Configuration flags, set once and for all before the vcpu can run */
897 u8 cflags;
898
899 /* Input flags to the hypervisor code, potentially cleared after use */
900 u8 iflags;
901
902 /* State flags for kernel bookkeeping, unused by the hypervisor code */
903 u16 sflags;
904
905 /*
906 * Don't run the guest (internal implementation need).
907 *
908 * Contrary to the flags above, this is set/cleared outside of
909 * a vcpu context, and thus cannot be mixed with the flags
910 * themselves (or the flag accesses need to be made atomic).
911 */
912 bool pause;
913
914 /*
915 * We maintain more than a single set of debug registers to support
916 * debugging the guest from the host and to maintain separate host and
917 * guest state during world switches. vcpu_debug_state are the debug
918 * registers of the vcpu as the guest sees them.
919 *
920 * external_debug_state contains the debug values we want to debug the
921 * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
922 */
923 struct kvm_guest_debug_arch vcpu_debug_state;
924 struct kvm_guest_debug_arch external_debug_state;
925 u64 external_mdscr_el1;
926
927 enum {
928 VCPU_DEBUG_FREE,
929 VCPU_DEBUG_HOST_OWNED,
930 VCPU_DEBUG_GUEST_OWNED,
931 } debug_owner;
932
933 /* VGIC state */
934 struct vgic_cpu vgic_cpu;
935 struct arch_timer_cpu timer_cpu;
936 struct kvm_pmu pmu;
937
938 /* vcpu power state */
939 struct kvm_mp_state mp_state;
940 spinlock_t mp_state_lock;
941
942 /* Cache some mmu pages needed inside spinlock regions */
943 struct kvm_mmu_memory_cache mmu_page_cache;
944
945 /* Pages to top-up the pKVM/EL2 guest pool */
946 struct kvm_hyp_memcache pkvm_memcache;
947
948 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
949 u64 vsesr_el2;
950
951 /* Additional reset state */
952 struct vcpu_reset_state reset_state;
953
954 /* Guest PV state */
955 struct {
956 u64 last_steal;
957 gpa_t base;
958 } steal;
959
960 /* Per-vcpu CCSIDR override or NULL */
961 u32 *ccsidr;
962
963 /* Per-vcpu TLB for VNCR_EL2 -- NULL when !NV */
964 struct vncr_tlb *vncr_tlb;
965
966 /* Hyp-readable copy of kvm_vcpu::pid */
967 pid_t pid;
968 };
969
970 /*
971 * Each 'flag' is composed of a comma-separated triplet:
972 *
973 * - the flag-set it belongs to in the vcpu->arch structure
974 * - the value for that flag
975 * - the mask for that flag
976 *
977 * __vcpu_single_flag() builds such a triplet for a single-bit flag.
978 * unpack_vcpu_flag() extract the flag value from the triplet for
979 * direct use outside of the flag accessors.
980 */
981 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
982
983 #define __unpack_flag(_set, _f, _m) _f
984 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
985
986 #define __build_check_flag(v, flagset, f, m) \
987 do { \
988 typeof(v->arch.flagset) *_fset; \
989 \
990 /* Check that the flags fit in the mask */ \
991 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
992 /* Check that the flags fit in the type */ \
993 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
994 } while (0)
995
996 #define __vcpu_get_flag(v, flagset, f, m) \
997 ({ \
998 __build_check_flag(v, flagset, f, m); \
999 \
1000 READ_ONCE(v->arch.flagset) & (m); \
1001 })
1002
1003 /*
1004 * Note that the set/clear accessors must be preempt-safe in order to
1005 * avoid nesting them with load/put which also manipulate flags...
1006 */
1007 #ifdef __KVM_NVHE_HYPERVISOR__
1008 /* the nVHE hypervisor is always non-preemptible */
1009 #define __vcpu_flags_preempt_disable()
1010 #define __vcpu_flags_preempt_enable()
1011 #else
1012 #define __vcpu_flags_preempt_disable() preempt_disable()
1013 #define __vcpu_flags_preempt_enable() preempt_enable()
1014 #endif
1015
1016 #define __vcpu_set_flag(v, flagset, f, m) \
1017 do { \
1018 typeof(v->arch.flagset) *fset; \
1019 \
1020 __build_check_flag(v, flagset, f, m); \
1021 \
1022 fset = &v->arch.flagset; \
1023 __vcpu_flags_preempt_disable(); \
1024 if (HWEIGHT(m) > 1) \
1025 *fset &= ~(m); \
1026 *fset |= (f); \
1027 __vcpu_flags_preempt_enable(); \
1028 } while (0)
1029
1030 #define __vcpu_clear_flag(v, flagset, f, m) \
1031 do { \
1032 typeof(v->arch.flagset) *fset; \
1033 \
1034 __build_check_flag(v, flagset, f, m); \
1035 \
1036 fset = &v->arch.flagset; \
1037 __vcpu_flags_preempt_disable(); \
1038 *fset &= ~(m); \
1039 __vcpu_flags_preempt_enable(); \
1040 } while (0)
1041
1042 #define __vcpu_test_and_clear_flag(v, flagset, f, m) \
1043 ({ \
1044 typeof(v->arch.flagset) set; \
1045 \
1046 set = __vcpu_get_flag(v, flagset, f, m); \
1047 __vcpu_clear_flag(v, flagset, f, m); \
1048 \
1049 set; \
1050 })
1051
1052 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
1053 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
1054 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
1055 #define vcpu_test_and_clear_flag(v, ...) \
1056 __vcpu_test_and_clear_flag((v), __VA_ARGS__)
1057
1058 /* KVM_ARM_VCPU_INIT completed */
1059 #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
1060 /* SVE config completed */
1061 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
1062 /* pKVM VCPU setup completed */
1063 #define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
1064
1065 /* Exception pending */
1066 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
1067 /*
1068 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
1069 * be set together with an exception...
1070 */
1071 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
1072 /* Target EL/MODE (not a single flag, but let's abuse the macro) */
1073 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
1074
1075 /* Helpers to encode exceptions with minimum fuss */
1076 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
1077 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
1078 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
1079
1080 /*
1081 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
1082 * values:
1083 *
1084 * For AArch32 EL1:
1085 */
1086 #define EXCEPT_AA32_UND __vcpu_except_flags(0)
1087 #define EXCEPT_AA32_IABT __vcpu_except_flags(1)
1088 #define EXCEPT_AA32_DABT __vcpu_except_flags(2)
1089 /* For AArch64: */
1090 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
1091 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
1092 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
1093 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
1094 /* For AArch64 with NV: */
1095 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
1096 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
1097 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
1098 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
1099
1100 /* Physical CPU not in supported_cpus */
1101 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0))
1102 /* WFIT instruction trapped */
1103 #define IN_WFIT __vcpu_single_flag(sflags, BIT(1))
1104 /* vcpu system registers loaded on physical CPU */
1105 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2))
1106 /* Software step state is Active-pending for external debug */
1107 #define HOST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3))
1108 /* Software step state is Active pending for guest debug */
1109 #define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4))
1110 /* PMUSERENR for the guest EL0 is on physical CPU */
1111 #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5))
1112 /* WFI instruction trapped */
1113 #define IN_WFI __vcpu_single_flag(sflags, BIT(6))
1114 /* KVM is currently emulating a nested ERET */
1115 #define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7))
1116 /* SError pending for nested guest */
1117 #define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8))
1118
1119
1120 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
1121 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
1122 sve_ffr_offset((vcpu)->arch.sve_max_vl))
1123
1124 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
1125
1126 #define vcpu_sve_zcr_elx(vcpu) \
1127 (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
1128
1129 #define sve_state_size_from_vl(sve_max_vl) ({ \
1130 size_t __size_ret; \
1131 unsigned int __vq; \
1132 \
1133 if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \
1134 __size_ret = 0; \
1135 } else { \
1136 __vq = sve_vq_from_vl(sve_max_vl); \
1137 __size_ret = SVE_SIG_REGS_SIZE(__vq); \
1138 } \
1139 \
1140 __size_ret; \
1141 })
1142
1143 #define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
1144
1145 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
1146 KVM_GUESTDBG_USE_SW_BP | \
1147 KVM_GUESTDBG_USE_HW | \
1148 KVM_GUESTDBG_SINGLESTEP)
1149
1150 #define kvm_has_sve(kvm) (system_supports_sve() && \
1151 test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags))
1152
1153 #ifdef __KVM_NVHE_HYPERVISOR__
1154 #define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm))
1155 #else
1156 #define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm)
1157 #endif
1158
1159 #ifdef CONFIG_ARM64_PTR_AUTH
1160 #define vcpu_has_ptrauth(vcpu) \
1161 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
1162 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
1163 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \
1164 vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
1165 #else
1166 #define vcpu_has_ptrauth(vcpu) false
1167 #endif
1168
1169 #define vcpu_on_unsupported_cpu(vcpu) \
1170 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
1171
1172 #define vcpu_set_on_unsupported_cpu(vcpu) \
1173 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
1174
1175 #define vcpu_clear_on_unsupported_cpu(vcpu) \
1176 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
1177
1178 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
1179
1180 /*
1181 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
1182 * memory backed version of a register, and not the one most recently
1183 * accessed by a running VCPU. For example, for userspace access or
1184 * for system registers that are never context switched, but only
1185 * emulated.
1186 *
1187 * Don't bother with VNCR-based accesses in the nVHE code, it has no
1188 * business dealing with NV.
1189 */
___ctxt_sys_reg(const struct kvm_cpu_context * ctxt,int r)1190 static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
1191 {
1192 #if !defined (__KVM_NVHE_HYPERVISOR__)
1193 if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
1194 r >= __VNCR_START__ && ctxt->vncr_array))
1195 return &ctxt->vncr_array[r - __VNCR_START__];
1196 #endif
1197 return (u64 *)&ctxt->sys_regs[r];
1198 }
1199
1200 #define __ctxt_sys_reg(c,r) \
1201 ({ \
1202 BUILD_BUG_ON(__builtin_constant_p(r) && \
1203 (r) >= NR_SYS_REGS); \
1204 ___ctxt_sys_reg(c, r); \
1205 })
1206
1207 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
1208
1209 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
1210
1211 #define __vcpu_assign_sys_reg(v, r, val) \
1212 do { \
1213 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1214 u64 __v = (val); \
1215 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1216 __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1217 \
1218 ctxt_sys_reg(ctxt, (r)) = __v; \
1219 } while (0)
1220
1221 #define __vcpu_rmw_sys_reg(v, r, op, val) \
1222 do { \
1223 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1224 u64 __v = ctxt_sys_reg(ctxt, (r)); \
1225 __v op (val); \
1226 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1227 __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1228 \
1229 ctxt_sys_reg(ctxt, (r)) = __v; \
1230 } while (0)
1231
1232 #define __vcpu_sys_reg(v,r) \
1233 ({ \
1234 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1235 u64 __v = ctxt_sys_reg(ctxt, (r)); \
1236 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1237 __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1238 __v; \
1239 })
1240
1241 u64 vcpu_read_sys_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
1242 void vcpu_write_sys_reg(struct kvm_vcpu *, u64, enum vcpu_sysreg);
1243
1244 struct kvm_vm_stat {
1245 struct kvm_vm_stat_generic generic;
1246 };
1247
1248 struct kvm_vcpu_stat {
1249 struct kvm_vcpu_stat_generic generic;
1250 u64 hvc_exit_stat;
1251 u64 wfe_exit_stat;
1252 u64 wfi_exit_stat;
1253 u64 mmio_exit_user;
1254 u64 mmio_exit_kernel;
1255 u64 signal_exits;
1256 u64 exits;
1257 };
1258
1259 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
1260 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
1261 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1262 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
1263
1264 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
1265 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
1266
1267 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1268 struct kvm_vcpu_events *events);
1269
1270 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1271 struct kvm_vcpu_events *events);
1272
1273 void kvm_arm_halt_guest(struct kvm *kvm);
1274 void kvm_arm_resume_guest(struct kvm *kvm);
1275
1276 #define vcpu_has_run_once(vcpu) (!!READ_ONCE((vcpu)->pid))
1277
1278 #ifndef __KVM_NVHE_HYPERVISOR__
1279 #define kvm_call_hyp_nvhe(f, ...) \
1280 ({ \
1281 struct arm_smccc_res res; \
1282 \
1283 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
1284 ##__VA_ARGS__, &res); \
1285 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
1286 \
1287 res.a1; \
1288 })
1289
1290 /*
1291 * The isb() below is there to guarantee the same behaviour on VHE as on !VHE,
1292 * where the eret to EL1 acts as a context synchronization event.
1293 */
1294 #define kvm_call_hyp(f, ...) \
1295 do { \
1296 if (has_vhe()) { \
1297 f(__VA_ARGS__); \
1298 isb(); \
1299 } else { \
1300 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
1301 } \
1302 } while(0)
1303
1304 #define kvm_call_hyp_ret(f, ...) \
1305 ({ \
1306 typeof(f(__VA_ARGS__)) ret; \
1307 \
1308 if (has_vhe()) { \
1309 ret = f(__VA_ARGS__); \
1310 } else { \
1311 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
1312 } \
1313 \
1314 ret; \
1315 })
1316 #else /* __KVM_NVHE_HYPERVISOR__ */
1317 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
1318 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
1319 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
1320 #endif /* __KVM_NVHE_HYPERVISOR__ */
1321
1322 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
1323 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
1324
1325 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
1326 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
1327 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
1328 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
1329 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
1330 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
1331 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
1332
1333 void kvm_sys_regs_create_debugfs(struct kvm *kvm);
1334 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
1335
1336 int __init kvm_sys_reg_table_init(void);
1337 struct sys_reg_desc;
1338 int __init populate_sysreg_config(const struct sys_reg_desc *sr,
1339 unsigned int idx);
1340 int __init populate_nv_trap_config(void);
1341
1342 void kvm_calculate_traps(struct kvm_vcpu *vcpu);
1343
1344 /* MMIO helpers */
1345 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
1346 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
1347
1348 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
1349 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
1350
1351 /*
1352 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
1353 * arrived in guest context. For arm64, any event that arrives while a vCPU is
1354 * loaded is considered to be "in guest".
1355 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)1356 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
1357 {
1358 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
1359 }
1360
1361 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
1362 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
1363 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
1364
1365 bool kvm_arm_pvtime_supported(void);
1366 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
1367 struct kvm_device_attr *attr);
1368 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
1369 struct kvm_device_attr *attr);
1370 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
1371 struct kvm_device_attr *attr);
1372
1373 extern unsigned int __ro_after_init kvm_arm_vmid_bits;
1374 int __init kvm_arm_vmid_alloc_init(void);
1375 void __init kvm_arm_vmid_alloc_free(void);
1376 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1377 void kvm_arm_vmid_clear_active(void);
1378
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)1379 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
1380 {
1381 vcpu_arch->steal.base = INVALID_GPA;
1382 }
1383
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)1384 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1385 {
1386 return (vcpu_arch->steal.base != INVALID_GPA);
1387 }
1388
1389 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1390
1391 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1392
1393 /*
1394 * How we access per-CPU host data depends on the where we access it from,
1395 * and the mode we're in:
1396 *
1397 * - VHE and nVHE hypervisor bits use their locally defined instance
1398 *
1399 * - the rest of the kernel use either the VHE or nVHE one, depending on
1400 * the mode we're running in.
1401 *
1402 * Unless we're in protected mode, fully deprivileged, and the nVHE
1403 * per-CPU stuff is exclusively accessible to the protected EL2 code.
1404 * In this case, the EL1 code uses the *VHE* data as its private state
1405 * (which makes sense in a way as there shouldn't be any shared state
1406 * between the host and the hypervisor).
1407 *
1408 * Yes, this is all totally trivial. Shoot me now.
1409 */
1410 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
1411 #define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f)
1412 #else
1413 #define host_data_ptr(f) \
1414 (static_branch_unlikely(&kvm_protected_mode_initialized) ? \
1415 &this_cpu_ptr(&kvm_host_data)->f : \
1416 &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
1417 #endif
1418
1419 #define host_data_test_flag(flag) \
1420 (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)))
1421 #define host_data_set_flag(flag) \
1422 set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
1423 #define host_data_clear_flag(flag) \
1424 clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
1425
1426 /* Check whether the FP regs are owned by the guest */
guest_owns_fp_regs(void)1427 static inline bool guest_owns_fp_regs(void)
1428 {
1429 return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
1430 }
1431
1432 /* Check whether the FP regs are owned by the host */
host_owns_fp_regs(void)1433 static inline bool host_owns_fp_regs(void)
1434 {
1435 return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED;
1436 }
1437
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)1438 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1439 {
1440 /* The host's MPIDR is immutable, so let's set it up at boot time */
1441 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1442 }
1443
kvm_system_needs_idmapped_vectors(void)1444 static inline bool kvm_system_needs_idmapped_vectors(void)
1445 {
1446 return cpus_have_final_cap(ARM64_SPECTRE_V3A);
1447 }
1448
1449 void kvm_init_host_debug_data(void);
1450 void kvm_debug_init_vhe(void);
1451 void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
1452 void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
1453 void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);
1454 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val);
1455
1456 #define kvm_vcpu_os_lock_enabled(vcpu) \
1457 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
1458
1459 #define kvm_debug_regs_in_use(vcpu) \
1460 ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE)
1461 #define kvm_host_owns_debug_regs(vcpu) \
1462 ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED)
1463 #define kvm_guest_owns_debug_regs(vcpu) \
1464 ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED)
1465
1466 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1467 struct kvm_device_attr *attr);
1468 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1469 struct kvm_device_attr *attr);
1470 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1471 struct kvm_device_attr *attr);
1472
1473 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1474 struct kvm_arm_copy_mte_tags *copy_tags);
1475 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1476 struct kvm_arm_counter_offset *offset);
1477 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
1478 struct reg_mask_range *range);
1479
1480 /* Guest/host FPSIMD coordination helpers */
1481 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1482 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1483 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1484 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1485
kvm_pmu_counter_deferred(struct perf_event_attr * attr)1486 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1487 {
1488 return (!has_vhe() && attr->exclude_host);
1489 }
1490
1491 #ifdef CONFIG_KVM
1492 void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
1493 void kvm_clr_pmu_events(u64 clr);
1494 bool kvm_set_pmuserenr(u64 val);
1495 void kvm_enable_trbe(void);
1496 void kvm_disable_trbe(void);
1497 void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest);
1498 #else
kvm_set_pmu_events(u64 set,struct perf_event_attr * attr)1499 static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u64 clr)1500 static inline void kvm_clr_pmu_events(u64 clr) {}
kvm_set_pmuserenr(u64 val)1501 static inline bool kvm_set_pmuserenr(u64 val)
1502 {
1503 return false;
1504 }
kvm_enable_trbe(void)1505 static inline void kvm_enable_trbe(void) {}
kvm_disable_trbe(void)1506 static inline void kvm_disable_trbe(void) {}
kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)1507 static inline void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) {}
1508 #endif
1509
1510 void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1511 void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
1512
1513 int __init kvm_set_ipa_limit(void);
1514 u32 kvm_get_pa_bits(struct kvm *kvm);
1515
1516 #define __KVM_HAVE_ARCH_VM_ALLOC
1517 struct kvm *kvm_arch_alloc_vm(void);
1518
1519 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1520
1521 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1522
1523 #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.is_protected)
1524
1525 #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
1526
1527 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1528 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1529
1530 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1531
1532 #define kvm_has_mte(kvm) \
1533 (system_supports_mte() && \
1534 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1535
1536 #define kvm_supports_32bit_el0() \
1537 (system_supports_32bit_el0() && \
1538 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1539
1540 #define kvm_vm_has_ran_once(kvm) \
1541 (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1542
__vcpu_has_feature(const struct kvm_arch * ka,int feature)1543 static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
1544 {
1545 return test_bit(feature, ka->vcpu_features);
1546 }
1547
1548 #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
1549 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
1550
1551 #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
1552
1553 int kvm_trng_call(struct kvm_vcpu *vcpu);
1554 #ifdef CONFIG_KVM
1555 extern phys_addr_t hyp_mem_base;
1556 extern phys_addr_t hyp_mem_size;
1557 void __init kvm_hyp_reserve(void);
1558 #else
kvm_hyp_reserve(void)1559 static inline void kvm_hyp_reserve(void) { }
1560 #endif
1561
1562 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1563 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1564
__vm_id_reg(struct kvm_arch * ka,u32 reg)1565 static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
1566 {
1567 switch (reg) {
1568 case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7):
1569 return &ka->id_regs[IDREG_IDX(reg)];
1570 case SYS_CTR_EL0:
1571 return &ka->ctr_el0;
1572 case SYS_MIDR_EL1:
1573 return &ka->midr_el1;
1574 case SYS_REVIDR_EL1:
1575 return &ka->revidr_el1;
1576 case SYS_AIDR_EL1:
1577 return &ka->aidr_el1;
1578 default:
1579 WARN_ON_ONCE(1);
1580 return NULL;
1581 }
1582 }
1583
1584 #define kvm_read_vm_id_reg(kvm, reg) \
1585 ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
1586
1587 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
1588
1589 #define __expand_field_sign_unsigned(id, fld, val) \
1590 ((u64)SYS_FIELD_VALUE(id, fld, val))
1591
1592 #define __expand_field_sign_signed(id, fld, val) \
1593 ({ \
1594 u64 __val = SYS_FIELD_VALUE(id, fld, val); \
1595 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1596 })
1597
1598 #define get_idreg_field_unsigned(kvm, id, fld) \
1599 ({ \
1600 u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \
1601 FIELD_GET(id##_##fld##_MASK, __val); \
1602 })
1603
1604 #define get_idreg_field_signed(kvm, id, fld) \
1605 ({ \
1606 u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
1607 sign_extend64(__val, id##_##fld##_WIDTH - 1); \
1608 })
1609
1610 #define get_idreg_field_enum(kvm, id, fld) \
1611 get_idreg_field_unsigned(kvm, id, fld)
1612
1613 #define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \
1614 (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit))
1615
1616 #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \
1617 (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit))
1618
1619 #define kvm_cmp_feat(kvm, id, fld, op, limit) \
1620 (id##_##fld##_SIGNED ? \
1621 kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \
1622 kvm_cmp_feat_unsigned(kvm, id, fld, op, limit))
1623
1624 #define __kvm_has_feat(kvm, id, fld, limit) \
1625 kvm_cmp_feat(kvm, id, fld, >=, limit)
1626
1627 #define kvm_has_feat(kvm, ...) __kvm_has_feat(kvm, __VA_ARGS__)
1628
1629 #define __kvm_has_feat_enum(kvm, id, fld, val) \
1630 kvm_cmp_feat_unsigned(kvm, id, fld, ==, val)
1631
1632 #define kvm_has_feat_enum(kvm, ...) __kvm_has_feat_enum(kvm, __VA_ARGS__)
1633
1634 #define kvm_has_feat_range(kvm, id, fld, min, max) \
1635 (kvm_cmp_feat(kvm, id, fld, >=, min) && \
1636 kvm_cmp_feat(kvm, id, fld, <=, max))
1637
1638 /* Check for a given level of PAuth support */
1639 #define kvm_has_pauth(k, l) \
1640 ({ \
1641 bool pa, pi, pa3; \
1642 \
1643 pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \
1644 pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \
1645 pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \
1646 pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \
1647 pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \
1648 pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \
1649 \
1650 (pa + pi + pa3) == 1; \
1651 })
1652
1653 #define kvm_has_fpmr(k) \
1654 (system_supports_fpmr() && \
1655 kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
1656
1657 #define kvm_has_tcr2(k) \
1658 (kvm_has_feat((k), ID_AA64MMFR3_EL1, TCRX, IMP))
1659
1660 #define kvm_has_s1pie(k) \
1661 (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
1662
1663 #define kvm_has_s1poe(k) \
1664 (system_supports_poe() && \
1665 kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
1666
1667 #define kvm_has_ras(k) \
1668 (kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
1669
1670 #define kvm_has_sctlr2(k) \
1671 (kvm_has_feat((k), ID_AA64MMFR3_EL1, SCTLRX, IMP))
1672
kvm_arch_has_irq_bypass(void)1673 static inline bool kvm_arch_has_irq_bypass(void)
1674 {
1675 return true;
1676 }
1677
1678 void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
1679 struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg);
1680 void check_feature_map(void);
1681 void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
1682
__fgt_reg_to_group_id(enum vcpu_sysreg reg)1683 static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg)
1684 {
1685 switch (reg) {
1686 case HFGRTR_EL2:
1687 case HFGWTR_EL2:
1688 return HFGRTR_GROUP;
1689 case HFGITR_EL2:
1690 return HFGITR_GROUP;
1691 case HDFGRTR_EL2:
1692 case HDFGWTR_EL2:
1693 return HDFGRTR_GROUP;
1694 case HAFGRTR_EL2:
1695 return HAFGRTR_GROUP;
1696 case HFGRTR2_EL2:
1697 case HFGWTR2_EL2:
1698 return HFGRTR2_GROUP;
1699 case HFGITR2_EL2:
1700 return HFGITR2_GROUP;
1701 case HDFGRTR2_EL2:
1702 case HDFGWTR2_EL2:
1703 return HDFGRTR2_GROUP;
1704 case ICH_HFGRTR_EL2:
1705 case ICH_HFGWTR_EL2:
1706 return ICH_HFGRTR_GROUP;
1707 case ICH_HFGITR_EL2:
1708 return ICH_HFGITR_GROUP;
1709 default:
1710 BUILD_BUG_ON(1);
1711 }
1712 }
1713
1714 #define vcpu_fgt(vcpu, reg) \
1715 ({ \
1716 enum fgt_group_id id = __fgt_reg_to_group_id(reg); \
1717 u64 *p; \
1718 switch (reg) { \
1719 case HFGWTR_EL2: \
1720 case HDFGWTR_EL2: \
1721 case HFGWTR2_EL2: \
1722 case HDFGWTR2_EL2: \
1723 case ICH_HFGWTR_EL2: \
1724 p = &(vcpu)->arch.fgt[id].w; \
1725 break; \
1726 default: \
1727 p = &(vcpu)->arch.fgt[id].r; \
1728 break; \
1729 } \
1730 \
1731 p; \
1732 })
1733
1734 long kvm_get_cap_for_kvm_ioctl(unsigned int ioctl, long *ext);
1735
1736 #endif /* __ARM64_KVM_HOST_H__ */
1737