xref: /linux/mm/hugetlb_cma.c (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/mm.h>
4 #include <linux/cma.h>
5 #include <linux/compiler.h>
6 #include <linux/mm_inline.h>
7 
8 #include <asm/page.h>
9 #include <asm/setup.h>
10 
11 #include <linux/hugetlb.h>
12 #include "internal.h"
13 #include "hugetlb_cma.h"
14 
15 
16 static struct cma *hugetlb_cma[MAX_NUMNODES];
17 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
18 static bool hugetlb_cma_only;
19 static unsigned long hugetlb_cma_size __initdata;
20 
hugetlb_cma_free_folio(struct folio * folio)21 void hugetlb_cma_free_folio(struct folio *folio)
22 {
23 	int nid = folio_nid(folio);
24 
25 	WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
26 }
27 
28 
hugetlb_cma_alloc_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)29 struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
30 				      int nid, nodemask_t *nodemask)
31 {
32 	int node;
33 	struct folio *folio = NULL;
34 
35 	if (hugetlb_cma[nid])
36 		folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
37 
38 	if (!folio && !(gfp_mask & __GFP_THISNODE)) {
39 		for_each_node_mask(node, *nodemask) {
40 			if (node == nid || !hugetlb_cma[node])
41 				continue;
42 
43 			folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
44 			if (folio)
45 				break;
46 		}
47 	}
48 
49 	if (folio)
50 		folio_set_hugetlb_cma(folio);
51 
52 	return folio;
53 }
54 
55 struct huge_bootmem_page * __init
hugetlb_cma_alloc_bootmem(struct hstate * h,int * nid,bool node_exact)56 hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact)
57 {
58 	struct cma *cma;
59 	struct huge_bootmem_page *m;
60 	int node = *nid;
61 
62 	cma = hugetlb_cma[*nid];
63 	m = cma_reserve_early(cma, huge_page_size(h));
64 	if (!m) {
65 		if (node_exact)
66 			return NULL;
67 
68 		for_each_node_mask(node, hugetlb_bootmem_nodes) {
69 			cma = hugetlb_cma[node];
70 			if (!cma || node == *nid)
71 				continue;
72 			m = cma_reserve_early(cma, huge_page_size(h));
73 			if (m) {
74 				*nid = node;
75 				break;
76 			}
77 		}
78 	}
79 
80 	if (m) {
81 		m->flags = HUGE_BOOTMEM_CMA;
82 		m->cma = cma;
83 	}
84 
85 	return m;
86 }
87 
88 
89 static bool cma_reserve_called __initdata;
90 
cmdline_parse_hugetlb_cma(char * p)91 static int __init cmdline_parse_hugetlb_cma(char *p)
92 {
93 	int nid, count = 0;
94 	unsigned long tmp;
95 	char *s = p;
96 
97 	while (*s) {
98 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
99 			break;
100 
101 		if (s[count] == ':') {
102 			if (tmp >= MAX_NUMNODES)
103 				break;
104 			nid = array_index_nospec(tmp, MAX_NUMNODES);
105 
106 			s += count + 1;
107 			tmp = memparse(s, &s);
108 			hugetlb_cma_size_in_node[nid] = tmp;
109 			hugetlb_cma_size += tmp;
110 
111 			/*
112 			 * Skip the separator if have one, otherwise
113 			 * break the parsing.
114 			 */
115 			if (*s == ',')
116 				s++;
117 			else
118 				break;
119 		} else {
120 			hugetlb_cma_size = memparse(p, &p);
121 			break;
122 		}
123 	}
124 
125 	return 0;
126 }
127 
128 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
129 
cmdline_parse_hugetlb_cma_only(char * p)130 static int __init cmdline_parse_hugetlb_cma_only(char *p)
131 {
132 	return kstrtobool(p, &hugetlb_cma_only);
133 }
134 
135 early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
136 
hugetlb_cma_reserve(int order)137 void __init hugetlb_cma_reserve(int order)
138 {
139 	unsigned long size, reserved, per_node;
140 	bool node_specific_cma_alloc = false;
141 	int nid;
142 
143 	/*
144 	 * HugeTLB CMA reservation is required for gigantic
145 	 * huge pages which could not be allocated via the
146 	 * page allocator. Just warn if there is any change
147 	 * breaking this assumption.
148 	 */
149 	VM_WARN_ON(order <= MAX_PAGE_ORDER);
150 	cma_reserve_called = true;
151 
152 	if (!hugetlb_cma_size)
153 		return;
154 
155 	hugetlb_bootmem_set_nodes();
156 
157 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
158 		if (hugetlb_cma_size_in_node[nid] == 0)
159 			continue;
160 
161 		if (!node_isset(nid, hugetlb_bootmem_nodes)) {
162 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
163 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
164 			hugetlb_cma_size_in_node[nid] = 0;
165 			continue;
166 		}
167 
168 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
169 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
170 				nid, (PAGE_SIZE << order) / SZ_1M);
171 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
172 			hugetlb_cma_size_in_node[nid] = 0;
173 		} else {
174 			node_specific_cma_alloc = true;
175 		}
176 	}
177 
178 	/* Validate the CMA size again in case some invalid nodes specified. */
179 	if (!hugetlb_cma_size)
180 		return;
181 
182 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
183 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
184 			(PAGE_SIZE << order) / SZ_1M);
185 		hugetlb_cma_size = 0;
186 		return;
187 	}
188 
189 	if (!node_specific_cma_alloc) {
190 		/*
191 		 * If 3 GB area is requested on a machine with 4 numa nodes,
192 		 * let's allocate 1 GB on first three nodes and ignore the last one.
193 		 */
194 		per_node = DIV_ROUND_UP(hugetlb_cma_size,
195 					nodes_weight(hugetlb_bootmem_nodes));
196 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
197 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
198 	}
199 
200 	reserved = 0;
201 	for_each_node_mask(nid, hugetlb_bootmem_nodes) {
202 		int res;
203 		char name[CMA_MAX_NAME];
204 
205 		if (node_specific_cma_alloc) {
206 			if (hugetlb_cma_size_in_node[nid] == 0)
207 				continue;
208 
209 			size = hugetlb_cma_size_in_node[nid];
210 		} else {
211 			size = min(per_node, hugetlb_cma_size - reserved);
212 		}
213 
214 		size = round_up(size, PAGE_SIZE << order);
215 
216 		snprintf(name, sizeof(name), "hugetlb%d", nid);
217 		/*
218 		 * Note that 'order per bit' is based on smallest size that
219 		 * may be returned to CMA allocator in the case of
220 		 * huge page demotion.
221 		 */
222 		res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
223 					HUGETLB_PAGE_ORDER, name,
224 					&hugetlb_cma[nid], nid);
225 		if (res) {
226 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
227 				res, nid);
228 			continue;
229 		}
230 
231 		reserved += size;
232 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
233 			size / SZ_1M, nid);
234 
235 		if (reserved >= hugetlb_cma_size)
236 			break;
237 	}
238 
239 	if (!reserved)
240 		/*
241 		 * hugetlb_cma_size is used to determine if allocations from
242 		 * cma are possible.  Set to zero if no cma regions are set up.
243 		 */
244 		hugetlb_cma_size = 0;
245 }
246 
hugetlb_cma_check(void)247 void __init hugetlb_cma_check(void)
248 {
249 	if (!hugetlb_cma_size || cma_reserve_called)
250 		return;
251 
252 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
253 }
254 
hugetlb_cma_exclusive_alloc(void)255 bool hugetlb_cma_exclusive_alloc(void)
256 {
257 	return hugetlb_cma_only;
258 }
259 
hugetlb_cma_total_size(void)260 unsigned long __init hugetlb_cma_total_size(void)
261 {
262 	return hugetlb_cma_size;
263 }
264 
hugetlb_cma_validate_params(void)265 void __init hugetlb_cma_validate_params(void)
266 {
267 	if (!hugetlb_cma_size)
268 		hugetlb_cma_only = false;
269 }
270 
hugetlb_early_cma(struct hstate * h)271 bool __init hugetlb_early_cma(struct hstate *h)
272 {
273 	if (arch_has_huge_bootmem_alloc())
274 		return false;
275 
276 	return hstate_is_gigantic(h) && hugetlb_cma_only;
277 }
278