xref: /linux/arch/arm64/include/asm/kvm_pkvm.h (revision 43db1111073049220381944af4a3b8a5400eda71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8 
9 #include <linux/arm_ffa.h>
10 #include <linux/memblock.h>
11 #include <linux/scatterlist.h>
12 #include <asm/kvm_pgtable.h>
13 
14 /* Maximum number of VMs that can co-exist under pKVM. */
15 #define KVM_MAX_PVMS 255
16 
17 #define HYP_MEMBLOCK_REGIONS 128
18 
19 int pkvm_init_host_vm(struct kvm *kvm);
20 int pkvm_create_hyp_vm(struct kvm *kvm);
21 void pkvm_destroy_hyp_vm(struct kvm *kvm);
22 int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
23 
24 /*
25  * This functions as an allow-list of protected VM capabilities.
26  * Features not explicitly allowed by this function are denied.
27  */
kvm_pvm_ext_allowed(long ext)28 static inline bool kvm_pvm_ext_allowed(long ext)
29 {
30 	switch (ext) {
31 	case KVM_CAP_IRQCHIP:
32 	case KVM_CAP_ARM_PSCI:
33 	case KVM_CAP_ARM_PSCI_0_2:
34 	case KVM_CAP_NR_VCPUS:
35 	case KVM_CAP_MAX_VCPUS:
36 	case KVM_CAP_MAX_VCPU_ID:
37 	case KVM_CAP_MSI_DEVID:
38 	case KVM_CAP_ARM_VM_IPA_SIZE:
39 	case KVM_CAP_ARM_PMU_V3:
40 	case KVM_CAP_ARM_SVE:
41 	case KVM_CAP_ARM_PTRAUTH_ADDRESS:
42 	case KVM_CAP_ARM_PTRAUTH_GENERIC:
43 		return true;
44 	default:
45 		return false;
46 	}
47 }
48 
49 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
50 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
51 
52 static inline unsigned long
hyp_vmemmap_memblock_size(struct memblock_region * reg,size_t vmemmap_entry_size)53 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
54 {
55 	unsigned long nr_pages = reg->size >> PAGE_SHIFT;
56 	unsigned long start, end;
57 
58 	start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
59 	end = start + nr_pages * vmemmap_entry_size;
60 	start = ALIGN_DOWN(start, PAGE_SIZE);
61 	end = ALIGN(end, PAGE_SIZE);
62 
63 	return end - start;
64 }
65 
hyp_vmemmap_pages(size_t vmemmap_entry_size)66 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
67 {
68 	unsigned long res = 0, i;
69 
70 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
71 		res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
72 						 vmemmap_entry_size);
73 	}
74 
75 	return res >> PAGE_SHIFT;
76 }
77 
hyp_vm_table_pages(void)78 static inline unsigned long hyp_vm_table_pages(void)
79 {
80 	return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
81 }
82 
__hyp_pgtable_max_pages(unsigned long nr_pages)83 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
84 {
85 	unsigned long total = 0;
86 	int i;
87 
88 	/* Provision the worst case scenario */
89 	for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
90 		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
91 		total += nr_pages;
92 	}
93 
94 	return total;
95 }
96 
__hyp_pgtable_total_pages(void)97 static inline unsigned long __hyp_pgtable_total_pages(void)
98 {
99 	unsigned long res = 0, i;
100 
101 	/* Cover all of memory with page-granularity */
102 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
103 		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
104 		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
105 	}
106 
107 	return res;
108 }
109 
hyp_s1_pgtable_pages(void)110 static inline unsigned long hyp_s1_pgtable_pages(void)
111 {
112 	unsigned long res;
113 
114 	res = __hyp_pgtable_total_pages();
115 
116 	/* Allow 1 GiB for private mappings */
117 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
118 
119 	return res;
120 }
121 
host_s2_pgtable_pages(void)122 static inline unsigned long host_s2_pgtable_pages(void)
123 {
124 	unsigned long res;
125 
126 	/*
127 	 * Include an extra 16 pages to safely upper-bound the worst case of
128 	 * concatenated pgds.
129 	 */
130 	res = __hyp_pgtable_total_pages() + 16;
131 
132 	/* Allow 1 GiB for MMIO mappings */
133 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
134 
135 	return res;
136 }
137 
138 #ifdef CONFIG_NVHE_EL2_DEBUG
pkvm_selftest_pages(void)139 static inline unsigned long pkvm_selftest_pages(void) { return 32; }
140 #else
pkvm_selftest_pages(void)141 static inline unsigned long pkvm_selftest_pages(void) { return 0; }
142 #endif
143 
144 #define KVM_FFA_MBOX_NR_PAGES	1
145 
hyp_ffa_proxy_pages(void)146 static inline unsigned long hyp_ffa_proxy_pages(void)
147 {
148 	size_t desc_max;
149 
150 	/*
151 	 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
152 	 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
153 	 */
154 	desc_max = sizeof(struct ffa_mem_region) +
155 		   sizeof(struct ffa_mem_region_attributes) +
156 		   sizeof(struct ffa_composite_mem_region) +
157 		   SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
158 
159 	/* Plus a page each for the hypervisor's RX and TX mailboxes. */
160 	return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
161 }
162 
pkvm_host_sve_state_size(void)163 static inline size_t pkvm_host_sve_state_size(void)
164 {
165 	if (!system_supports_sve())
166 		return 0;
167 
168 	return size_add(sizeof(struct cpu_sve_state),
169 			SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
170 }
171 
172 struct pkvm_mapping {
173 	struct rb_node node;
174 	u64 gfn;
175 	u64 pfn;
176 	u64 nr_pages;
177 	u64 __subtree_last;	/* Internal member for interval tree */
178 };
179 
180 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
181 			     struct kvm_pgtable_mm_ops *mm_ops);
182 void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
183 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
184 			    enum kvm_pgtable_prot prot, void *mc,
185 			    enum kvm_pgtable_walk_flags flags);
186 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
187 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
188 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
189 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
190 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
191 				    enum kvm_pgtable_walk_flags flags);
192 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
193 				 enum kvm_pgtable_walk_flags flags);
194 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
195 			      struct kvm_mmu_memory_cache *mc);
196 void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
197 kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
198 					       enum kvm_pgtable_prot prot, void *mc,
199 					       bool force_pte);
200 #endif	/* __ARM64_KVM_PKVM_H__ */
201