1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * HugeTLB Vmemmap Optimization (HVO)
4 *
5 * Copyright (c) 2020, ByteDance. All rights reserved.
6 *
7 * Author: Muchun Song <songmuchun@bytedance.com>
8 */
9 #ifndef _LINUX_HUGETLB_VMEMMAP_H
10 #define _LINUX_HUGETLB_VMEMMAP_H
11 #include <linux/hugetlb.h>
12
13 /*
14 * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
15 * Documentation/mm/vmemmap_dedup.rst.
16 */
17 #define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
18 #define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
19
20 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
21 int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);
22 long hugetlb_vmemmap_restore_folios(const struct hstate *h,
23 struct list_head *folio_list,
24 struct list_head *non_hvo_folios);
25 void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio);
26 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
27
hugetlb_vmemmap_size(const struct hstate * h)28 static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
29 {
30 return pages_per_huge_page(h) * sizeof(struct page);
31 }
32
33 /*
34 * Return how many vmemmap size associated with a HugeTLB page that can be
35 * optimized and can be freed to the buddy allocator.
36 */
hugetlb_vmemmap_optimizable_size(const struct hstate * h)37 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
38 {
39 int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
40
41 if (!is_power_of_2(sizeof(struct page)))
42 return 0;
43 return size > 0 ? size : 0;
44 }
45 #else
hugetlb_vmemmap_restore_folio(const struct hstate * h,struct folio * folio)46 static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
47 {
48 return 0;
49 }
50
hugetlb_vmemmap_restore_folios(const struct hstate * h,struct list_head * folio_list,struct list_head * non_hvo_folios)51 static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
52 struct list_head *folio_list,
53 struct list_head *non_hvo_folios)
54 {
55 list_splice_init(folio_list, non_hvo_folios);
56 return 0;
57 }
58
hugetlb_vmemmap_optimize_folio(const struct hstate * h,struct folio * folio)59 static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
60 {
61 }
62
hugetlb_vmemmap_optimize_folios(struct hstate * h,struct list_head * folio_list)63 static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
64 {
65 }
66
hugetlb_vmemmap_optimizable_size(const struct hstate * h)67 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
68 {
69 return 0;
70 }
71 #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
72
hugetlb_vmemmap_optimizable(const struct hstate * h)73 static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
74 {
75 return hugetlb_vmemmap_optimizable_size(h) != 0;
76 }
77 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
78