xref: /linux/mm/hugetlb_vmemmap.h (revision 247dbcdbf790c52fc76cf8e327cd0a5778e41e66)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * HugeTLB Vmemmap Optimization (HVO)
4  *
5  * Copyright (c) 2020, ByteDance. All rights reserved.
6  *
7  *     Author: Muchun Song <songmuchun@bytedance.com>
8  */
9 #ifndef _LINUX_HUGETLB_VMEMMAP_H
10 #define _LINUX_HUGETLB_VMEMMAP_H
11 #include <linux/hugetlb.h>
12 
13 /*
14  * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
15  * Documentation/vm/vmemmap_dedup.rst.
16  */
17 #define HUGETLB_VMEMMAP_RESERVE_SIZE	PAGE_SIZE
18 #define HUGETLB_VMEMMAP_RESERVE_PAGES	(HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
19 
20 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
21 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
22 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
23 
24 static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
25 {
26 	return pages_per_huge_page(h) * sizeof(struct page);
27 }
28 
29 /*
30  * Return how many vmemmap size associated with a HugeTLB page that can be
31  * optimized and can be freed to the buddy allocator.
32  */
33 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
34 {
35 	int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
36 
37 	if (!is_power_of_2(sizeof(struct page)))
38 		return 0;
39 	return size > 0 ? size : 0;
40 }
41 #else
42 static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
43 {
44 	return 0;
45 }
46 
47 static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
48 {
49 }
50 
51 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
52 {
53 	return 0;
54 }
55 #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
56 
57 static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
58 {
59 	return hugetlb_vmemmap_optimizable_size(h) != 0;
60 }
61 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
62