1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Internal HugeTLB definitions.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6
7 #ifndef _LINUX_HUGETLB_INTERNAL_H
8 #define _LINUX_HUGETLB_INTERNAL_H
9
10 #include <linux/hugetlb.h>
11 #include <linux/hugetlb_cgroup.h>
12
13 /*
14 * Check if the hstate represents gigantic pages but gigantic page
15 * runtime support is not available. This is a common condition used to
16 * skip operations that cannot be performed on gigantic pages when runtime
17 * support is disabled.
18 */
hstate_is_gigantic_no_runtime(struct hstate * h)19 static inline bool hstate_is_gigantic_no_runtime(struct hstate *h)
20 {
21 return hstate_is_gigantic(h) && !gigantic_page_runtime_supported();
22 }
23
24 /*
25 * common helper functions for hstate_next_node_to_{alloc|free}.
26 * We may have allocated or freed a huge page based on a different
27 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
28 * be outside of *nodes_allowed. Ensure that we use an allowed
29 * node for alloc or free.
30 */
next_node_allowed(int nid,nodemask_t * nodes_allowed)31 static inline int next_node_allowed(int nid, nodemask_t *nodes_allowed)
32 {
33 nid = next_node_in(nid, *nodes_allowed);
34 VM_BUG_ON(nid >= MAX_NUMNODES);
35
36 return nid;
37 }
38
get_valid_node_allowed(int nid,nodemask_t * nodes_allowed)39 static inline int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
40 {
41 if (!node_isset(nid, *nodes_allowed))
42 nid = next_node_allowed(nid, nodes_allowed);
43 return nid;
44 }
45
46 /*
47 * returns the previously saved node ["this node"] from which to
48 * allocate a persistent huge page for the pool and advance the
49 * next node from which to allocate, handling wrap at end of node
50 * mask.
51 */
hstate_next_node_to_alloc(int * next_node,nodemask_t * nodes_allowed)52 static inline int hstate_next_node_to_alloc(int *next_node,
53 nodemask_t *nodes_allowed)
54 {
55 int nid;
56
57 VM_BUG_ON(!nodes_allowed);
58
59 nid = get_valid_node_allowed(*next_node, nodes_allowed);
60 *next_node = next_node_allowed(nid, nodes_allowed);
61
62 return nid;
63 }
64
65 /*
66 * helper for remove_pool_hugetlb_folio() - return the previously saved
67 * node ["this node"] from which to free a huge page. Advance the
68 * next node id whether or not we find a free huge page to free so
69 * that the next attempt to free addresses the next node.
70 */
hstate_next_node_to_free(struct hstate * h,nodemask_t * nodes_allowed)71 static inline int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
72 {
73 int nid;
74
75 VM_BUG_ON(!nodes_allowed);
76
77 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
78 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
79
80 return nid;
81 }
82
83 #define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \
84 for (nr_nodes = nodes_weight(*mask); \
85 nr_nodes > 0 && \
86 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \
87 nr_nodes--)
88
89 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
90 for (nr_nodes = nodes_weight(*mask); \
91 nr_nodes > 0 && \
92 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
93 nr_nodes--)
94
95 extern void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
96 bool adjust_surplus);
97 extern void add_hugetlb_folio(struct hstate *h, struct folio *folio,
98 bool adjust_surplus);
99 extern void init_new_hugetlb_folio(struct folio *folio);
100 extern void prep_and_add_allocated_folios(struct hstate *h,
101 struct list_head *folio_list);
102 extern long demote_pool_huge_page(struct hstate *src,
103 nodemask_t *nodes_allowed,
104 unsigned long nr_to_demote);
105 extern ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
106 struct hstate *h, int nid,
107 unsigned long count, size_t len);
108
109 extern void hugetlb_sysfs_init(void) __init;
110
111 #ifdef CONFIG_SYSCTL
112 extern void hugetlb_sysctl_init(void);
113 #else
hugetlb_sysctl_init(void)114 static inline void hugetlb_sysctl_init(void) { }
115 #endif
116
117 #endif /* _LINUX_HUGETLB_INTERNAL_H */
118