1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020 - Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 #ifndef __ARM64_KVM_PKVM_H__ 7 #define __ARM64_KVM_PKVM_H__ 8 9 #include <linux/memblock.h> 10 #include <asm/kvm_pgtable.h> 11 12 #define HYP_MEMBLOCK_REGIONS 128 13 14 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; 15 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); 16 17 static inline unsigned long 18 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size) 19 { 20 unsigned long nr_pages = reg->size >> PAGE_SHIFT; 21 unsigned long start, end; 22 23 start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size; 24 end = start + nr_pages * vmemmap_entry_size; 25 start = ALIGN_DOWN(start, PAGE_SIZE); 26 end = ALIGN(end, PAGE_SIZE); 27 28 return end - start; 29 } 30 31 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size) 32 { 33 unsigned long res = 0, i; 34 35 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { 36 res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i], 37 vmemmap_entry_size); 38 } 39 40 return res >> PAGE_SHIFT; 41 } 42 43 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) 44 { 45 unsigned long total = 0, i; 46 47 /* Provision the worst case scenario */ 48 for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { 49 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); 50 total += nr_pages; 51 } 52 53 return total; 54 } 55 56 static inline unsigned long __hyp_pgtable_total_pages(void) 57 { 58 unsigned long res = 0, i; 59 60 /* Cover all of memory with page-granularity */ 61 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { 62 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; 63 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); 64 } 65 66 return res; 67 } 68 69 static inline unsigned long hyp_s1_pgtable_pages(void) 70 { 71 unsigned long res; 72 73 res = __hyp_pgtable_total_pages(); 74 75 /* Allow 1 GiB for private mappings */ 76 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); 77 78 return res; 79 } 80 81 static inline unsigned long host_s2_pgtable_pages(void) 82 { 83 unsigned long res; 84 85 /* 86 * Include an extra 16 pages to safely upper-bound the worst case of 87 * concatenated pgds. 88 */ 89 res = __hyp_pgtable_total_pages() + 16; 90 91 /* Allow 1 GiB for MMIO mappings */ 92 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); 93 94 return res; 95 } 96 97 #endif /* __ARM64_KVM_PKVM_H__ */ 98