1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8
9 #include <linux/arm_ffa.h>
10 #include <linux/memblock.h>
11 #include <linux/scatterlist.h>
12 #include <asm/kvm_pgtable.h>
13
14 /* Maximum number of VMs that can co-exist under pKVM. */
15 #define KVM_MAX_PVMS 255
16
17 #define HYP_MEMBLOCK_REGIONS 128
18
19 int pkvm_init_host_vm(struct kvm *kvm);
20 int pkvm_create_hyp_vm(struct kvm *kvm);
21 bool pkvm_hyp_vm_is_created(struct kvm *kvm);
22 void pkvm_destroy_hyp_vm(struct kvm *kvm);
23 int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
24
25 /*
26 * This functions as an allow-list of protected VM capabilities.
27 * Features not explicitly allowed by this function are denied.
28 */
kvm_pvm_ext_allowed(long ext)29 static inline bool kvm_pvm_ext_allowed(long ext)
30 {
31 switch (ext) {
32 case KVM_CAP_IRQCHIP:
33 case KVM_CAP_ARM_PSCI:
34 case KVM_CAP_ARM_PSCI_0_2:
35 case KVM_CAP_NR_VCPUS:
36 case KVM_CAP_MAX_VCPUS:
37 case KVM_CAP_MAX_VCPU_ID:
38 case KVM_CAP_MSI_DEVID:
39 case KVM_CAP_ARM_VM_IPA_SIZE:
40 case KVM_CAP_ARM_PMU_V3:
41 case KVM_CAP_ARM_SVE:
42 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
43 case KVM_CAP_ARM_PTRAUTH_GENERIC:
44 return true;
45 default:
46 return false;
47 }
48 }
49
50 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
51 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
52
53 static inline unsigned long
hyp_vmemmap_memblock_size(struct memblock_region * reg,size_t vmemmap_entry_size)54 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
55 {
56 unsigned long nr_pages = reg->size >> PAGE_SHIFT;
57 unsigned long start, end;
58
59 start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
60 end = start + nr_pages * vmemmap_entry_size;
61 start = ALIGN_DOWN(start, PAGE_SIZE);
62 end = ALIGN(end, PAGE_SIZE);
63
64 return end - start;
65 }
66
hyp_vmemmap_pages(size_t vmemmap_entry_size)67 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
68 {
69 unsigned long res = 0, i;
70
71 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
72 res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
73 vmemmap_entry_size);
74 }
75
76 return res >> PAGE_SHIFT;
77 }
78
hyp_vm_table_pages(void)79 static inline unsigned long hyp_vm_table_pages(void)
80 {
81 return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
82 }
83
__hyp_pgtable_max_pages(unsigned long nr_pages)84 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
85 {
86 unsigned long total = 0;
87 int i;
88
89 /* Provision the worst case scenario */
90 for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
91 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
92 total += nr_pages;
93 }
94
95 return total;
96 }
97
__hyp_pgtable_total_pages(void)98 static inline unsigned long __hyp_pgtable_total_pages(void)
99 {
100 unsigned long res = 0, i;
101
102 /* Cover all of memory with page-granularity */
103 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
104 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
105 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
106 }
107
108 return res;
109 }
110
hyp_s1_pgtable_pages(void)111 static inline unsigned long hyp_s1_pgtable_pages(void)
112 {
113 unsigned long res;
114
115 res = __hyp_pgtable_total_pages();
116
117 /* Allow 1 GiB for private mappings */
118 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
119
120 return res;
121 }
122
host_s2_pgtable_pages(void)123 static inline unsigned long host_s2_pgtable_pages(void)
124 {
125 unsigned long res;
126
127 /*
128 * Include an extra 16 pages to safely upper-bound the worst case of
129 * concatenated pgds.
130 */
131 res = __hyp_pgtable_total_pages() + 16;
132
133 /* Allow 1 GiB for MMIO mappings */
134 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
135
136 return res;
137 }
138
139 #ifdef CONFIG_NVHE_EL2_DEBUG
pkvm_selftest_pages(void)140 static inline unsigned long pkvm_selftest_pages(void) { return 32; }
141 #else
pkvm_selftest_pages(void)142 static inline unsigned long pkvm_selftest_pages(void) { return 0; }
143 #endif
144
145 #define KVM_FFA_MBOX_NR_PAGES 1
146
hyp_ffa_proxy_pages(void)147 static inline unsigned long hyp_ffa_proxy_pages(void)
148 {
149 size_t desc_max;
150
151 /*
152 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
153 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
154 */
155 desc_max = sizeof(struct ffa_mem_region) +
156 sizeof(struct ffa_mem_region_attributes) +
157 sizeof(struct ffa_composite_mem_region) +
158 SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
159
160 /* Plus a page each for the hypervisor's RX and TX mailboxes. */
161 return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
162 }
163
pkvm_host_sve_state_size(void)164 static inline size_t pkvm_host_sve_state_size(void)
165 {
166 if (!system_supports_sve())
167 return 0;
168
169 return size_add(sizeof(struct cpu_sve_state),
170 SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
171 }
172
173 struct pkvm_mapping {
174 struct rb_node node;
175 u64 gfn;
176 u64 pfn;
177 u64 nr_pages;
178 u64 __subtree_last; /* Internal member for interval tree */
179 };
180
181 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
182 struct kvm_pgtable_mm_ops *mm_ops);
183 void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
184 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
185 enum kvm_pgtable_prot prot, void *mc,
186 enum kvm_pgtable_walk_flags flags);
187 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
188 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
189 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
190 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
191 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
192 enum kvm_pgtable_walk_flags flags);
193 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
194 enum kvm_pgtable_walk_flags flags);
195 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
196 struct kvm_mmu_memory_cache *mc);
197 void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
198 kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
199 enum kvm_pgtable_prot prot, void *mc,
200 bool force_pte);
201 #endif /* __ARM64_KVM_PKVM_H__ */
202