xref: /linux/arch/arm64/include/asm/kvm_pkvm.h (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
8 
9 #include <linux/memblock.h>
10 #include <asm/kvm_pgtable.h>
11 
12 #define HYP_MEMBLOCK_REGIONS 128
13 
14 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
15 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
16 
17 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
18 {
19 	unsigned long total = 0, i;
20 
21 	/* Provision the worst case scenario */
22 	for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
23 		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
24 		total += nr_pages;
25 	}
26 
27 	return total;
28 }
29 
30 static inline unsigned long __hyp_pgtable_total_pages(void)
31 {
32 	unsigned long res = 0, i;
33 
34 	/* Cover all of memory with page-granularity */
35 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
36 		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
37 		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
38 	}
39 
40 	return res;
41 }
42 
43 static inline unsigned long hyp_s1_pgtable_pages(void)
44 {
45 	unsigned long res;
46 
47 	res = __hyp_pgtable_total_pages();
48 
49 	/* Allow 1 GiB for private mappings */
50 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
51 
52 	return res;
53 }
54 
55 static inline unsigned long host_s2_pgtable_pages(void)
56 {
57 	unsigned long res;
58 
59 	/*
60 	 * Include an extra 16 pages to safely upper-bound the worst case of
61 	 * concatenated pgds.
62 	 */
63 	res = __hyp_pgtable_total_pages() + 16;
64 
65 	/* Allow 1 GiB for MMIO mappings */
66 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
67 
68 	return res;
69 }
70 
71 #endif	/* __ARM64_KVM_PKVM_H__ */
72