xref: /linux/mm/hugetlb_vmemmap.c (revision c358f53871605a1a8d7ed6e544a05ea00e9c80cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * HugeTLB Vmemmap Optimization (HVO)
4  *
5  * Copyright (c) 2020, ByteDance. All rights reserved.
6  *
7  *     Author: Muchun Song <songmuchun@bytedance.com>
8  *
9  * See Documentation/mm/vmemmap_dedup.rst
10  */
11 #define pr_fmt(fmt)	"HugeTLB: " fmt
12 
13 #include <linux/pgtable.h>
14 #include <linux/bootmem_info.h>
15 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
17 #include "hugetlb_vmemmap.h"
18 
19 /**
20  * struct vmemmap_remap_walk - walk vmemmap page table
21  *
22  * @remap_pte:		called for each lowest-level entry (PTE).
23  * @nr_walked:		the number of walked pte.
24  * @reuse_page:		the page which is reused for the tail vmemmap pages.
25  * @reuse_addr:		the virtual address of the @reuse_page page.
26  * @vmemmap_pages:	the list head of the vmemmap pages that can be freed
27  *			or is mapped from.
28  */
29 struct vmemmap_remap_walk {
30 	void			(*remap_pte)(pte_t *pte, unsigned long addr,
31 					     struct vmemmap_remap_walk *walk);
32 	unsigned long		nr_walked;
33 	struct page		*reuse_page;
34 	unsigned long		reuse_addr;
35 	struct list_head	*vmemmap_pages;
36 };
37 
38 static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
39 {
40 	pmd_t __pmd;
41 	int i;
42 	unsigned long addr = start;
43 	struct page *page = pmd_page(*pmd);
44 	pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
45 
46 	if (!pgtable)
47 		return -ENOMEM;
48 
49 	pmd_populate_kernel(&init_mm, &__pmd, pgtable);
50 
51 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
52 		pte_t entry, *pte;
53 		pgprot_t pgprot = PAGE_KERNEL;
54 
55 		entry = mk_pte(page + i, pgprot);
56 		pte = pte_offset_kernel(&__pmd, addr);
57 		set_pte_at(&init_mm, addr, pte, entry);
58 	}
59 
60 	spin_lock(&init_mm.page_table_lock);
61 	if (likely(pmd_leaf(*pmd))) {
62 		/*
63 		 * Higher order allocations from buddy allocator must be able to
64 		 * be treated as indepdenent small pages (as they can be freed
65 		 * individually).
66 		 */
67 		if (!PageReserved(page))
68 			split_page(page, get_order(PMD_SIZE));
69 
70 		/* Make pte visible before pmd. See comment in pmd_install(). */
71 		smp_wmb();
72 		pmd_populate_kernel(&init_mm, pmd, pgtable);
73 		flush_tlb_kernel_range(start, start + PMD_SIZE);
74 	} else {
75 		pte_free_kernel(&init_mm, pgtable);
76 	}
77 	spin_unlock(&init_mm.page_table_lock);
78 
79 	return 0;
80 }
81 
82 static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
83 {
84 	int leaf;
85 
86 	spin_lock(&init_mm.page_table_lock);
87 	leaf = pmd_leaf(*pmd);
88 	spin_unlock(&init_mm.page_table_lock);
89 
90 	if (!leaf)
91 		return 0;
92 
93 	return __split_vmemmap_huge_pmd(pmd, start);
94 }
95 
96 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
97 			      unsigned long end,
98 			      struct vmemmap_remap_walk *walk)
99 {
100 	pte_t *pte = pte_offset_kernel(pmd, addr);
101 
102 	/*
103 	 * The reuse_page is found 'first' in table walk before we start
104 	 * remapping (which is calling @walk->remap_pte).
105 	 */
106 	if (!walk->reuse_page) {
107 		walk->reuse_page = pte_page(*pte);
108 		/*
109 		 * Because the reuse address is part of the range that we are
110 		 * walking, skip the reuse address range.
111 		 */
112 		addr += PAGE_SIZE;
113 		pte++;
114 		walk->nr_walked++;
115 	}
116 
117 	for (; addr != end; addr += PAGE_SIZE, pte++) {
118 		walk->remap_pte(pte, addr, walk);
119 		walk->nr_walked++;
120 	}
121 }
122 
123 static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
124 			     unsigned long end,
125 			     struct vmemmap_remap_walk *walk)
126 {
127 	pmd_t *pmd;
128 	unsigned long next;
129 
130 	pmd = pmd_offset(pud, addr);
131 	do {
132 		int ret;
133 
134 		ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
135 		if (ret)
136 			return ret;
137 
138 		next = pmd_addr_end(addr, end);
139 		vmemmap_pte_range(pmd, addr, next, walk);
140 	} while (pmd++, addr = next, addr != end);
141 
142 	return 0;
143 }
144 
145 static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
146 			     unsigned long end,
147 			     struct vmemmap_remap_walk *walk)
148 {
149 	pud_t *pud;
150 	unsigned long next;
151 
152 	pud = pud_offset(p4d, addr);
153 	do {
154 		int ret;
155 
156 		next = pud_addr_end(addr, end);
157 		ret = vmemmap_pmd_range(pud, addr, next, walk);
158 		if (ret)
159 			return ret;
160 	} while (pud++, addr = next, addr != end);
161 
162 	return 0;
163 }
164 
165 static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
166 			     unsigned long end,
167 			     struct vmemmap_remap_walk *walk)
168 {
169 	p4d_t *p4d;
170 	unsigned long next;
171 
172 	p4d = p4d_offset(pgd, addr);
173 	do {
174 		int ret;
175 
176 		next = p4d_addr_end(addr, end);
177 		ret = vmemmap_pud_range(p4d, addr, next, walk);
178 		if (ret)
179 			return ret;
180 	} while (p4d++, addr = next, addr != end);
181 
182 	return 0;
183 }
184 
185 static int vmemmap_remap_range(unsigned long start, unsigned long end,
186 			       struct vmemmap_remap_walk *walk)
187 {
188 	unsigned long addr = start;
189 	unsigned long next;
190 	pgd_t *pgd;
191 
192 	VM_BUG_ON(!PAGE_ALIGNED(start));
193 	VM_BUG_ON(!PAGE_ALIGNED(end));
194 
195 	pgd = pgd_offset_k(addr);
196 	do {
197 		int ret;
198 
199 		next = pgd_addr_end(addr, end);
200 		ret = vmemmap_p4d_range(pgd, addr, next, walk);
201 		if (ret)
202 			return ret;
203 	} while (pgd++, addr = next, addr != end);
204 
205 	/*
206 	 * We only change the mapping of the vmemmap virtual address range
207 	 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
208 	 * belongs to the range.
209 	 */
210 	flush_tlb_kernel_range(start + PAGE_SIZE, end);
211 
212 	return 0;
213 }
214 
215 /*
216  * Free a vmemmap page. A vmemmap page can be allocated from the memblock
217  * allocator or buddy allocator. If the PG_reserved flag is set, it means
218  * that it allocated from the memblock allocator, just free it via the
219  * free_bootmem_page(). Otherwise, use __free_page().
220  */
221 static inline void free_vmemmap_page(struct page *page)
222 {
223 	if (PageReserved(page))
224 		free_bootmem_page(page);
225 	else
226 		__free_page(page);
227 }
228 
229 /* Free a list of the vmemmap pages */
230 static void free_vmemmap_page_list(struct list_head *list)
231 {
232 	struct page *page, *next;
233 
234 	list_for_each_entry_safe(page, next, list, lru) {
235 		list_del(&page->lru);
236 		free_vmemmap_page(page);
237 	}
238 }
239 
240 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
241 			      struct vmemmap_remap_walk *walk)
242 {
243 	/*
244 	 * Remap the tail pages as read-only to catch illegal write operation
245 	 * to the tail pages.
246 	 */
247 	pgprot_t pgprot = PAGE_KERNEL_RO;
248 	pte_t entry = mk_pte(walk->reuse_page, pgprot);
249 	struct page *page = pte_page(*pte);
250 
251 	list_add_tail(&page->lru, walk->vmemmap_pages);
252 	set_pte_at(&init_mm, addr, pte, entry);
253 }
254 
255 /*
256  * How many struct page structs need to be reset. When we reuse the head
257  * struct page, the special metadata (e.g. page->flags or page->mapping)
258  * cannot copy to the tail struct page structs. The invalid value will be
259  * checked in the free_tail_pages_check(). In order to avoid the message
260  * of "corrupted mapping in tail page". We need to reset at least 3 (one
261  * head struct page struct and two tail struct page structs) struct page
262  * structs.
263  */
264 #define NR_RESET_STRUCT_PAGE		3
265 
266 static inline void reset_struct_pages(struct page *start)
267 {
268 	struct page *from = start + NR_RESET_STRUCT_PAGE;
269 
270 	BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
271 	memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
272 }
273 
274 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
275 				struct vmemmap_remap_walk *walk)
276 {
277 	pgprot_t pgprot = PAGE_KERNEL;
278 	struct page *page;
279 	void *to;
280 
281 	BUG_ON(pte_page(*pte) != walk->reuse_page);
282 
283 	page = list_first_entry(walk->vmemmap_pages, struct page, lru);
284 	list_del(&page->lru);
285 	to = page_to_virt(page);
286 	copy_page(to, (void *)walk->reuse_addr);
287 	reset_struct_pages(to);
288 
289 	/*
290 	 * Makes sure that preceding stores to the page contents become visible
291 	 * before the set_pte_at() write.
292 	 */
293 	smp_wmb();
294 	set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
295 }
296 
297 /**
298  * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
299  *			to the page which @reuse is mapped to, then free vmemmap
300  *			which the range are mapped to.
301  * @start:	start address of the vmemmap virtual address range that we want
302  *		to remap.
303  * @end:	end address of the vmemmap virtual address range that we want to
304  *		remap.
305  * @reuse:	reuse address.
306  *
307  * Return: %0 on success, negative error code otherwise.
308  */
309 static int vmemmap_remap_free(unsigned long start, unsigned long end,
310 			      unsigned long reuse)
311 {
312 	int ret;
313 	LIST_HEAD(vmemmap_pages);
314 	struct vmemmap_remap_walk walk = {
315 		.remap_pte	= vmemmap_remap_pte,
316 		.reuse_addr	= reuse,
317 		.vmemmap_pages	= &vmemmap_pages,
318 	};
319 
320 	/*
321 	 * In order to make remapping routine most efficient for the huge pages,
322 	 * the routine of vmemmap page table walking has the following rules
323 	 * (see more details from the vmemmap_pte_range()):
324 	 *
325 	 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
326 	 *   should be continuous.
327 	 * - The @reuse address is part of the range [@reuse, @end) that we are
328 	 *   walking which is passed to vmemmap_remap_range().
329 	 * - The @reuse address is the first in the complete range.
330 	 *
331 	 * So we need to make sure that @start and @reuse meet the above rules.
332 	 */
333 	BUG_ON(start - reuse != PAGE_SIZE);
334 
335 	mmap_read_lock(&init_mm);
336 	ret = vmemmap_remap_range(reuse, end, &walk);
337 	if (ret && walk.nr_walked) {
338 		end = reuse + walk.nr_walked * PAGE_SIZE;
339 		/*
340 		 * vmemmap_pages contains pages from the previous
341 		 * vmemmap_remap_range call which failed.  These
342 		 * are pages which were removed from the vmemmap.
343 		 * They will be restored in the following call.
344 		 */
345 		walk = (struct vmemmap_remap_walk) {
346 			.remap_pte	= vmemmap_restore_pte,
347 			.reuse_addr	= reuse,
348 			.vmemmap_pages	= &vmemmap_pages,
349 		};
350 
351 		vmemmap_remap_range(reuse, end, &walk);
352 	}
353 	mmap_read_unlock(&init_mm);
354 
355 	free_vmemmap_page_list(&vmemmap_pages);
356 
357 	return ret;
358 }
359 
360 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
361 				   gfp_t gfp_mask, struct list_head *list)
362 {
363 	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
364 	int nid = page_to_nid((struct page *)start);
365 	struct page *page, *next;
366 
367 	while (nr_pages--) {
368 		page = alloc_pages_node(nid, gfp_mask, 0);
369 		if (!page)
370 			goto out;
371 		list_add_tail(&page->lru, list);
372 	}
373 
374 	return 0;
375 out:
376 	list_for_each_entry_safe(page, next, list, lru)
377 		__free_pages(page, 0);
378 	return -ENOMEM;
379 }
380 
381 /**
382  * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
383  *			 to the page which is from the @vmemmap_pages
384  *			 respectively.
385  * @start:	start address of the vmemmap virtual address range that we want
386  *		to remap.
387  * @end:	end address of the vmemmap virtual address range that we want to
388  *		remap.
389  * @reuse:	reuse address.
390  * @gfp_mask:	GFP flag for allocating vmemmap pages.
391  *
392  * Return: %0 on success, negative error code otherwise.
393  */
394 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
395 			       unsigned long reuse, gfp_t gfp_mask)
396 {
397 	LIST_HEAD(vmemmap_pages);
398 	struct vmemmap_remap_walk walk = {
399 		.remap_pte	= vmemmap_restore_pte,
400 		.reuse_addr	= reuse,
401 		.vmemmap_pages	= &vmemmap_pages,
402 	};
403 
404 	/* See the comment in the vmemmap_remap_free(). */
405 	BUG_ON(start - reuse != PAGE_SIZE);
406 
407 	if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
408 		return -ENOMEM;
409 
410 	mmap_read_lock(&init_mm);
411 	vmemmap_remap_range(reuse, end, &walk);
412 	mmap_read_unlock(&init_mm);
413 
414 	return 0;
415 }
416 
417 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
418 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
419 
420 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
421 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
422 
423 /**
424  * hugetlb_vmemmap_restore - restore previously optimized (by
425  *			     hugetlb_vmemmap_optimize()) vmemmap pages which
426  *			     will be reallocated and remapped.
427  * @h:		struct hstate.
428  * @head:	the head page whose vmemmap pages will be restored.
429  *
430  * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
431  * negative error code otherwise.
432  */
433 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
434 {
435 	int ret;
436 	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
437 	unsigned long vmemmap_reuse;
438 
439 	if (!HPageVmemmapOptimized(head))
440 		return 0;
441 
442 	vmemmap_end	= vmemmap_start + hugetlb_vmemmap_size(h);
443 	vmemmap_reuse	= vmemmap_start;
444 	vmemmap_start	+= HUGETLB_VMEMMAP_RESERVE_SIZE;
445 
446 	/*
447 	 * The pages which the vmemmap virtual address range [@vmemmap_start,
448 	 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
449 	 * the range is mapped to the page which @vmemmap_reuse is mapped to.
450 	 * When a HugeTLB page is freed to the buddy allocator, previously
451 	 * discarded vmemmap pages must be allocated and remapping.
452 	 */
453 	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse,
454 				  GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
455 	if (!ret) {
456 		ClearHPageVmemmapOptimized(head);
457 		static_branch_dec(&hugetlb_optimize_vmemmap_key);
458 	}
459 
460 	return ret;
461 }
462 
463 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
464 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
465 {
466 	if (!READ_ONCE(vmemmap_optimize_enabled))
467 		return false;
468 
469 	if (!hugetlb_vmemmap_optimizable(h))
470 		return false;
471 
472 	if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
473 		pmd_t *pmdp, pmd;
474 		struct page *vmemmap_page;
475 		unsigned long vaddr = (unsigned long)head;
476 
477 		/*
478 		 * Only the vmemmap page's vmemmap page can be self-hosted.
479 		 * Walking the page tables to find the backing page of the
480 		 * vmemmap page.
481 		 */
482 		pmdp = pmd_off_k(vaddr);
483 		/*
484 		 * The READ_ONCE() is used to stabilize *pmdp in a register or
485 		 * on the stack so that it will stop changing under the code.
486 		 * The only concurrent operation where it can be changed is
487 		 * split_vmemmap_huge_pmd() (*pmdp will be stable after this
488 		 * operation).
489 		 */
490 		pmd = READ_ONCE(*pmdp);
491 		if (pmd_leaf(pmd))
492 			vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
493 		else
494 			vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
495 		/*
496 		 * Due to HugeTLB alignment requirements and the vmemmap pages
497 		 * being at the start of the hotplugged memory region in
498 		 * memory_hotplug.memmap_on_memory case. Checking any vmemmap
499 		 * page's vmemmap page if it is marked as VmemmapSelfHosted is
500 		 * sufficient.
501 		 *
502 		 * [                  hotplugged memory                  ]
503 		 * [        section        ][...][        section        ]
504 		 * [ vmemmap ][              usable memory               ]
505 		 *   ^   |     |                                        |
506 		 *   +---+     |                                        |
507 		 *     ^       |                                        |
508 		 *     +-------+                                        |
509 		 *          ^                                           |
510 		 *          +-------------------------------------------+
511 		 */
512 		if (PageVmemmapSelfHosted(vmemmap_page))
513 			return false;
514 	}
515 
516 	return true;
517 }
518 
519 /**
520  * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
521  * @h:		struct hstate.
522  * @head:	the head page whose vmemmap pages will be optimized.
523  *
524  * This function only tries to optimize @head's vmemmap pages and does not
525  * guarantee that the optimization will succeed after it returns. The caller
526  * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
527  * have been optimized.
528  */
529 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
530 {
531 	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
532 	unsigned long vmemmap_reuse;
533 
534 	if (!vmemmap_should_optimize(h, head))
535 		return;
536 
537 	static_branch_inc(&hugetlb_optimize_vmemmap_key);
538 
539 	vmemmap_end	= vmemmap_start + hugetlb_vmemmap_size(h);
540 	vmemmap_reuse	= vmemmap_start;
541 	vmemmap_start	+= HUGETLB_VMEMMAP_RESERVE_SIZE;
542 
543 	/*
544 	 * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
545 	 * to the page which @vmemmap_reuse is mapped to, then free the pages
546 	 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
547 	 */
548 	if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
549 		static_branch_dec(&hugetlb_optimize_vmemmap_key);
550 	else
551 		SetHPageVmemmapOptimized(head);
552 }
553 
554 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
555 	{
556 		.procname	= "hugetlb_optimize_vmemmap",
557 		.data		= &vmemmap_optimize_enabled,
558 		.maxlen		= sizeof(int),
559 		.mode		= 0644,
560 		.proc_handler	= proc_dobool,
561 	},
562 	{ }
563 };
564 
565 static int __init hugetlb_vmemmap_init(void)
566 {
567 	/* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
568 	BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE);
569 
570 	if (IS_ENABLED(CONFIG_PROC_SYSCTL)) {
571 		const struct hstate *h;
572 
573 		for_each_hstate(h) {
574 			if (hugetlb_vmemmap_optimizable(h)) {
575 				register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
576 				break;
577 			}
578 		}
579 	}
580 	return 0;
581 }
582 late_initcall(hugetlb_vmemmap_init);
583