xref: /linux/arch/arm64/include/asm/kvm_pkvm.h (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8 
9 #include <linux/arm_ffa.h>
10 #include <linux/memblock.h>
11 #include <linux/scatterlist.h>
12 #include <asm/kvm_host.h>
13 #include <asm/kvm_pgtable.h>
14 
15 /* Maximum number of VMs that can co-exist under pKVM. */
16 #define KVM_MAX_PVMS 255
17 
18 #define HYP_MEMBLOCK_REGIONS 128
19 
20 int pkvm_init_host_vm(struct kvm *kvm, unsigned long type);
21 int pkvm_create_hyp_vm(struct kvm *kvm);
22 bool pkvm_hyp_vm_is_created(struct kvm *kvm);
23 void pkvm_destroy_hyp_vm(struct kvm *kvm);
24 int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
25 
26 /*
27  * Check whether the specific capability is allowed in pKVM.
28  *
29  * Certain features are allowed only for non-protected VMs in pKVM, which is why
30  * this takes the VM (kvm) as a parameter.
31  */
32 static inline bool kvm_pkvm_ext_allowed(struct kvm *kvm, long ext)
33 {
34 	switch (ext) {
35 	case KVM_CAP_IRQCHIP:
36 	case KVM_CAP_ARM_PSCI:
37 	case KVM_CAP_ARM_PSCI_0_2:
38 	case KVM_CAP_NR_VCPUS:
39 	case KVM_CAP_MAX_VCPUS:
40 	case KVM_CAP_MAX_VCPU_ID:
41 	case KVM_CAP_MSI_DEVID:
42 	case KVM_CAP_ARM_VM_IPA_SIZE:
43 	case KVM_CAP_ARM_PTRAUTH_ADDRESS:
44 	case KVM_CAP_ARM_PTRAUTH_GENERIC:
45 		return true;
46 	case KVM_CAP_ARM_MTE:
47 		return false;
48 	default:
49 		return !kvm || !kvm_vm_is_protected(kvm);
50 	}
51 }
52 
53 /*
54  * Check whether the KVM VM IOCTL is allowed in pKVM.
55  *
56  * Certain features are allowed only for non-protected VMs in pKVM, which is why
57  * this takes the VM (kvm) as a parameter.
58  */
59 static inline bool kvm_pkvm_ioctl_allowed(struct kvm *kvm, unsigned int ioctl)
60 {
61 	long ext;
62 	int r;
63 
64 	r = kvm_get_cap_for_kvm_ioctl(ioctl, &ext);
65 
66 	if (WARN_ON_ONCE(r < 0))
67 		return false;
68 
69 	return kvm_pkvm_ext_allowed(kvm, ext);
70 }
71 
72 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
73 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
74 
75 static inline unsigned long
76 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
77 {
78 	unsigned long nr_pages = reg->size >> PAGE_SHIFT;
79 	unsigned long start, end;
80 
81 	start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
82 	end = start + nr_pages * vmemmap_entry_size;
83 	start = ALIGN_DOWN(start, PAGE_SIZE);
84 	end = ALIGN(end, PAGE_SIZE);
85 
86 	return end - start;
87 }
88 
89 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
90 {
91 	unsigned long res = 0, i;
92 
93 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
94 		res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
95 						 vmemmap_entry_size);
96 	}
97 
98 	return res >> PAGE_SHIFT;
99 }
100 
101 static inline unsigned long hyp_vm_table_pages(void)
102 {
103 	return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
104 }
105 
106 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
107 {
108 	unsigned long total = 0;
109 	int i;
110 
111 	/* Provision the worst case scenario */
112 	for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
113 		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
114 		total += nr_pages;
115 	}
116 
117 	return total;
118 }
119 
120 static inline unsigned long __hyp_pgtable_total_pages(void)
121 {
122 	unsigned long res = 0, i;
123 
124 	/* Cover all of memory with page-granularity */
125 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
126 		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
127 		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
128 	}
129 
130 	return res;
131 }
132 
133 static inline unsigned long hyp_s1_pgtable_pages(void)
134 {
135 	unsigned long res;
136 
137 	res = __hyp_pgtable_total_pages();
138 
139 	/* Allow 1 GiB for private mappings */
140 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
141 
142 	return res;
143 }
144 
145 static inline unsigned long host_s2_pgtable_pages(void)
146 {
147 	unsigned long res;
148 
149 	/*
150 	 * Include an extra 16 pages to safely upper-bound the worst case of
151 	 * concatenated pgds.
152 	 */
153 	res = __hyp_pgtable_total_pages() + 16;
154 
155 	/* Allow 1 GiB for MMIO mappings */
156 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
157 
158 	return res;
159 }
160 
161 #ifdef CONFIG_NVHE_EL2_DEBUG
162 static inline unsigned long pkvm_selftest_pages(void) { return 32; }
163 #else
164 static inline unsigned long pkvm_selftest_pages(void) { return 0; }
165 #endif
166 
167 #define KVM_FFA_MBOX_NR_PAGES	1
168 
169 static inline unsigned long hyp_ffa_proxy_pages(void)
170 {
171 	size_t desc_max;
172 
173 	/*
174 	 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
175 	 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
176 	 */
177 	desc_max = sizeof(struct ffa_mem_region) +
178 		   sizeof(struct ffa_mem_region_attributes) +
179 		   sizeof(struct ffa_composite_mem_region) +
180 		   SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
181 
182 	/* Plus a page each for the hypervisor's RX and TX mailboxes. */
183 	return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
184 }
185 
186 static inline size_t pkvm_host_sve_state_size(void)
187 {
188 	if (!system_supports_sve())
189 		return 0;
190 
191 	return size_add(sizeof(struct cpu_sve_state),
192 			SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
193 }
194 
195 struct pkvm_mapping {
196 	struct rb_node node;
197 	u64 gfn;
198 	u64 pfn;
199 	u64 nr_pages;
200 	u64 __subtree_last;	/* Internal member for interval tree */
201 };
202 
203 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
204 			     struct kvm_pgtable_mm_ops *mm_ops);
205 void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
206 					u64 addr, u64 size);
207 void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
208 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
209 			    enum kvm_pgtable_prot prot, void *mc,
210 			    enum kvm_pgtable_walk_flags flags);
211 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
212 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
213 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
214 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
215 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
216 				    enum kvm_pgtable_walk_flags flags);
217 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
218 				 enum kvm_pgtable_walk_flags flags);
219 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
220 			      struct kvm_mmu_memory_cache *mc);
221 void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
222 kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
223 					       enum kvm_pgtable_prot prot, void *mc,
224 					       bool force_pte);
225 #endif	/* __ARM64_KVM_PKVM_H__ */
226