xref: /linux/arch/arm64/include/asm/kvm_pkvm.h (revision 67f49869106f78882a8a09b736d4884be85aba18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8 
9 #include <linux/memblock.h>
10 #include <asm/kvm_pgtable.h>
11 
12 /* Maximum number of VMs that can co-exist under pKVM. */
13 #define KVM_MAX_PVMS 255
14 
15 #define HYP_MEMBLOCK_REGIONS 128
16 
17 int pkvm_init_host_vm(struct kvm *kvm);
18 int pkvm_create_hyp_vm(struct kvm *kvm);
19 void pkvm_destroy_hyp_vm(struct kvm *kvm);
20 
21 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
22 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
23 
24 static inline unsigned long
25 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
26 {
27 	unsigned long nr_pages = reg->size >> PAGE_SHIFT;
28 	unsigned long start, end;
29 
30 	start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
31 	end = start + nr_pages * vmemmap_entry_size;
32 	start = ALIGN_DOWN(start, PAGE_SIZE);
33 	end = ALIGN(end, PAGE_SIZE);
34 
35 	return end - start;
36 }
37 
38 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
39 {
40 	unsigned long res = 0, i;
41 
42 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
43 		res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
44 						 vmemmap_entry_size);
45 	}
46 
47 	return res >> PAGE_SHIFT;
48 }
49 
50 static inline unsigned long hyp_vm_table_pages(void)
51 {
52 	return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
53 }
54 
55 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
56 {
57 	unsigned long total = 0, i;
58 
59 	/* Provision the worst case scenario */
60 	for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
61 		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
62 		total += nr_pages;
63 	}
64 
65 	return total;
66 }
67 
68 static inline unsigned long __hyp_pgtable_total_pages(void)
69 {
70 	unsigned long res = 0, i;
71 
72 	/* Cover all of memory with page-granularity */
73 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
74 		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
75 		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
76 	}
77 
78 	return res;
79 }
80 
81 static inline unsigned long hyp_s1_pgtable_pages(void)
82 {
83 	unsigned long res;
84 
85 	res = __hyp_pgtable_total_pages();
86 
87 	/* Allow 1 GiB for private mappings */
88 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
89 
90 	return res;
91 }
92 
93 static inline unsigned long host_s2_pgtable_pages(void)
94 {
95 	unsigned long res;
96 
97 	/*
98 	 * Include an extra 16 pages to safely upper-bound the worst case of
99 	 * concatenated pgds.
100 	 */
101 	res = __hyp_pgtable_total_pages() + 16;
102 
103 	/* Allow 1 GiB for MMIO mappings */
104 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
105 
106 	return res;
107 }
108 
109 #endif	/* __ARM64_KVM_PKVM_H__ */
110