xref: /linux/mm/hugetlb_vmemmap.c (revision 247dbcdbf790c52fc76cf8e327cd0a5778e41e66)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * HugeTLB Vmemmap Optimization (HVO)
4  *
5  * Copyright (c) 2020, ByteDance. All rights reserved.
6  *
7  *     Author: Muchun Song <songmuchun@bytedance.com>
8  *
9  * See Documentation/mm/vmemmap_dedup.rst
10  */
11 #define pr_fmt(fmt)	"HugeTLB: " fmt
12 
13 #include <linux/pgtable.h>
14 #include <linux/moduleparam.h>
15 #include <linux/bootmem_info.h>
16 #include <linux/mmdebug.h>
17 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
19 #include "hugetlb_vmemmap.h"
20 
21 /**
22  * struct vmemmap_remap_walk - walk vmemmap page table
23  *
24  * @remap_pte:		called for each lowest-level entry (PTE).
25  * @nr_walked:		the number of walked pte.
26  * @reuse_page:		the page which is reused for the tail vmemmap pages.
27  * @reuse_addr:		the virtual address of the @reuse_page page.
28  * @vmemmap_pages:	the list head of the vmemmap pages that can be freed
29  *			or is mapped from.
30  */
31 struct vmemmap_remap_walk {
32 	void			(*remap_pte)(pte_t *pte, unsigned long addr,
33 					     struct vmemmap_remap_walk *walk);
34 	unsigned long		nr_walked;
35 	struct page		*reuse_page;
36 	unsigned long		reuse_addr;
37 	struct list_head	*vmemmap_pages;
38 };
39 
40 static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
41 {
42 	pmd_t __pmd;
43 	int i;
44 	unsigned long addr = start;
45 	struct page *head;
46 	pte_t *pgtable;
47 
48 	spin_lock(&init_mm.page_table_lock);
49 	head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
50 	spin_unlock(&init_mm.page_table_lock);
51 
52 	if (!head)
53 		return 0;
54 
55 	pgtable = pte_alloc_one_kernel(&init_mm);
56 	if (!pgtable)
57 		return -ENOMEM;
58 
59 	pmd_populate_kernel(&init_mm, &__pmd, pgtable);
60 
61 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
62 		pte_t entry, *pte;
63 		pgprot_t pgprot = PAGE_KERNEL;
64 
65 		entry = mk_pte(head + i, pgprot);
66 		pte = pte_offset_kernel(&__pmd, addr);
67 		set_pte_at(&init_mm, addr, pte, entry);
68 	}
69 
70 	spin_lock(&init_mm.page_table_lock);
71 	if (likely(pmd_leaf(*pmd))) {
72 		/*
73 		 * Higher order allocations from buddy allocator must be able to
74 		 * be treated as indepdenent small pages (as they can be freed
75 		 * individually).
76 		 */
77 		if (!PageReserved(head))
78 			split_page(head, get_order(PMD_SIZE));
79 
80 		/* Make pte visible before pmd. See comment in pmd_install(). */
81 		smp_wmb();
82 		pmd_populate_kernel(&init_mm, pmd, pgtable);
83 		flush_tlb_kernel_range(start, start + PMD_SIZE);
84 	} else {
85 		pte_free_kernel(&init_mm, pgtable);
86 	}
87 	spin_unlock(&init_mm.page_table_lock);
88 
89 	return 0;
90 }
91 
92 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
93 			      unsigned long end,
94 			      struct vmemmap_remap_walk *walk)
95 {
96 	pte_t *pte = pte_offset_kernel(pmd, addr);
97 
98 	/*
99 	 * The reuse_page is found 'first' in table walk before we start
100 	 * remapping (which is calling @walk->remap_pte).
101 	 */
102 	if (!walk->reuse_page) {
103 		walk->reuse_page = pte_page(ptep_get(pte));
104 		/*
105 		 * Because the reuse address is part of the range that we are
106 		 * walking, skip the reuse address range.
107 		 */
108 		addr += PAGE_SIZE;
109 		pte++;
110 		walk->nr_walked++;
111 	}
112 
113 	for (; addr != end; addr += PAGE_SIZE, pte++) {
114 		walk->remap_pte(pte, addr, walk);
115 		walk->nr_walked++;
116 	}
117 }
118 
119 static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
120 			     unsigned long end,
121 			     struct vmemmap_remap_walk *walk)
122 {
123 	pmd_t *pmd;
124 	unsigned long next;
125 
126 	pmd = pmd_offset(pud, addr);
127 	do {
128 		int ret;
129 
130 		ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
131 		if (ret)
132 			return ret;
133 
134 		next = pmd_addr_end(addr, end);
135 		vmemmap_pte_range(pmd, addr, next, walk);
136 	} while (pmd++, addr = next, addr != end);
137 
138 	return 0;
139 }
140 
141 static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
142 			     unsigned long end,
143 			     struct vmemmap_remap_walk *walk)
144 {
145 	pud_t *pud;
146 	unsigned long next;
147 
148 	pud = pud_offset(p4d, addr);
149 	do {
150 		int ret;
151 
152 		next = pud_addr_end(addr, end);
153 		ret = vmemmap_pmd_range(pud, addr, next, walk);
154 		if (ret)
155 			return ret;
156 	} while (pud++, addr = next, addr != end);
157 
158 	return 0;
159 }
160 
161 static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
162 			     unsigned long end,
163 			     struct vmemmap_remap_walk *walk)
164 {
165 	p4d_t *p4d;
166 	unsigned long next;
167 
168 	p4d = p4d_offset(pgd, addr);
169 	do {
170 		int ret;
171 
172 		next = p4d_addr_end(addr, end);
173 		ret = vmemmap_pud_range(p4d, addr, next, walk);
174 		if (ret)
175 			return ret;
176 	} while (p4d++, addr = next, addr != end);
177 
178 	return 0;
179 }
180 
181 static int vmemmap_remap_range(unsigned long start, unsigned long end,
182 			       struct vmemmap_remap_walk *walk)
183 {
184 	unsigned long addr = start;
185 	unsigned long next;
186 	pgd_t *pgd;
187 
188 	VM_BUG_ON(!PAGE_ALIGNED(start));
189 	VM_BUG_ON(!PAGE_ALIGNED(end));
190 
191 	pgd = pgd_offset_k(addr);
192 	do {
193 		int ret;
194 
195 		next = pgd_addr_end(addr, end);
196 		ret = vmemmap_p4d_range(pgd, addr, next, walk);
197 		if (ret)
198 			return ret;
199 	} while (pgd++, addr = next, addr != end);
200 
201 	flush_tlb_kernel_range(start, end);
202 
203 	return 0;
204 }
205 
206 /*
207  * Free a vmemmap page. A vmemmap page can be allocated from the memblock
208  * allocator or buddy allocator. If the PG_reserved flag is set, it means
209  * that it allocated from the memblock allocator, just free it via the
210  * free_bootmem_page(). Otherwise, use __free_page().
211  */
212 static inline void free_vmemmap_page(struct page *page)
213 {
214 	if (PageReserved(page))
215 		free_bootmem_page(page);
216 	else
217 		__free_page(page);
218 }
219 
220 /* Free a list of the vmemmap pages */
221 static void free_vmemmap_page_list(struct list_head *list)
222 {
223 	struct page *page, *next;
224 
225 	list_for_each_entry_safe(page, next, list, lru)
226 		free_vmemmap_page(page);
227 }
228 
229 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
230 			      struct vmemmap_remap_walk *walk)
231 {
232 	/*
233 	 * Remap the tail pages as read-only to catch illegal write operation
234 	 * to the tail pages.
235 	 */
236 	pgprot_t pgprot = PAGE_KERNEL_RO;
237 	struct page *page = pte_page(ptep_get(pte));
238 	pte_t entry;
239 
240 	/* Remapping the head page requires r/w */
241 	if (unlikely(addr == walk->reuse_addr)) {
242 		pgprot = PAGE_KERNEL;
243 		list_del(&walk->reuse_page->lru);
244 
245 		/*
246 		 * Makes sure that preceding stores to the page contents from
247 		 * vmemmap_remap_free() become visible before the set_pte_at()
248 		 * write.
249 		 */
250 		smp_wmb();
251 	}
252 
253 	entry = mk_pte(walk->reuse_page, pgprot);
254 	list_add_tail(&page->lru, walk->vmemmap_pages);
255 	set_pte_at(&init_mm, addr, pte, entry);
256 }
257 
258 /*
259  * How many struct page structs need to be reset. When we reuse the head
260  * struct page, the special metadata (e.g. page->flags or page->mapping)
261  * cannot copy to the tail struct page structs. The invalid value will be
262  * checked in the free_tail_page_prepare(). In order to avoid the message
263  * of "corrupted mapping in tail page". We need to reset at least 3 (one
264  * head struct page struct and two tail struct page structs) struct page
265  * structs.
266  */
267 #define NR_RESET_STRUCT_PAGE		3
268 
269 static inline void reset_struct_pages(struct page *start)
270 {
271 	struct page *from = start + NR_RESET_STRUCT_PAGE;
272 
273 	BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
274 	memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
275 }
276 
277 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
278 				struct vmemmap_remap_walk *walk)
279 {
280 	pgprot_t pgprot = PAGE_KERNEL;
281 	struct page *page;
282 	void *to;
283 
284 	BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page);
285 
286 	page = list_first_entry(walk->vmemmap_pages, struct page, lru);
287 	list_del(&page->lru);
288 	to = page_to_virt(page);
289 	copy_page(to, (void *)walk->reuse_addr);
290 	reset_struct_pages(to);
291 
292 	/*
293 	 * Makes sure that preceding stores to the page contents become visible
294 	 * before the set_pte_at() write.
295 	 */
296 	smp_wmb();
297 	set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
298 }
299 
300 /**
301  * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
302  *			to the page which @reuse is mapped to, then free vmemmap
303  *			which the range are mapped to.
304  * @start:	start address of the vmemmap virtual address range that we want
305  *		to remap.
306  * @end:	end address of the vmemmap virtual address range that we want to
307  *		remap.
308  * @reuse:	reuse address.
309  *
310  * Return: %0 on success, negative error code otherwise.
311  */
312 static int vmemmap_remap_free(unsigned long start, unsigned long end,
313 			      unsigned long reuse)
314 {
315 	int ret;
316 	LIST_HEAD(vmemmap_pages);
317 	struct vmemmap_remap_walk walk = {
318 		.remap_pte	= vmemmap_remap_pte,
319 		.reuse_addr	= reuse,
320 		.vmemmap_pages	= &vmemmap_pages,
321 	};
322 	int nid = page_to_nid((struct page *)reuse);
323 	gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
324 
325 	/*
326 	 * Allocate a new head vmemmap page to avoid breaking a contiguous
327 	 * block of struct page memory when freeing it back to page allocator
328 	 * in free_vmemmap_page_list(). This will allow the likely contiguous
329 	 * struct page backing memory to be kept contiguous and allowing for
330 	 * more allocations of hugepages. Fallback to the currently
331 	 * mapped head page in case should it fail to allocate.
332 	 */
333 	walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
334 	if (walk.reuse_page) {
335 		copy_page(page_to_virt(walk.reuse_page),
336 			  (void *)walk.reuse_addr);
337 		list_add(&walk.reuse_page->lru, &vmemmap_pages);
338 	}
339 
340 	/*
341 	 * In order to make remapping routine most efficient for the huge pages,
342 	 * the routine of vmemmap page table walking has the following rules
343 	 * (see more details from the vmemmap_pte_range()):
344 	 *
345 	 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
346 	 *   should be continuous.
347 	 * - The @reuse address is part of the range [@reuse, @end) that we are
348 	 *   walking which is passed to vmemmap_remap_range().
349 	 * - The @reuse address is the first in the complete range.
350 	 *
351 	 * So we need to make sure that @start and @reuse meet the above rules.
352 	 */
353 	BUG_ON(start - reuse != PAGE_SIZE);
354 
355 	mmap_read_lock(&init_mm);
356 	ret = vmemmap_remap_range(reuse, end, &walk);
357 	if (ret && walk.nr_walked) {
358 		end = reuse + walk.nr_walked * PAGE_SIZE;
359 		/*
360 		 * vmemmap_pages contains pages from the previous
361 		 * vmemmap_remap_range call which failed.  These
362 		 * are pages which were removed from the vmemmap.
363 		 * They will be restored in the following call.
364 		 */
365 		walk = (struct vmemmap_remap_walk) {
366 			.remap_pte	= vmemmap_restore_pte,
367 			.reuse_addr	= reuse,
368 			.vmemmap_pages	= &vmemmap_pages,
369 		};
370 
371 		vmemmap_remap_range(reuse, end, &walk);
372 	}
373 	mmap_read_unlock(&init_mm);
374 
375 	free_vmemmap_page_list(&vmemmap_pages);
376 
377 	return ret;
378 }
379 
380 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
381 				   struct list_head *list)
382 {
383 	gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
384 	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
385 	int nid = page_to_nid((struct page *)start);
386 	struct page *page, *next;
387 
388 	while (nr_pages--) {
389 		page = alloc_pages_node(nid, gfp_mask, 0);
390 		if (!page)
391 			goto out;
392 		list_add_tail(&page->lru, list);
393 	}
394 
395 	return 0;
396 out:
397 	list_for_each_entry_safe(page, next, list, lru)
398 		__free_page(page);
399 	return -ENOMEM;
400 }
401 
402 /**
403  * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
404  *			 to the page which is from the @vmemmap_pages
405  *			 respectively.
406  * @start:	start address of the vmemmap virtual address range that we want
407  *		to remap.
408  * @end:	end address of the vmemmap virtual address range that we want to
409  *		remap.
410  * @reuse:	reuse address.
411  *
412  * Return: %0 on success, negative error code otherwise.
413  */
414 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
415 			       unsigned long reuse)
416 {
417 	LIST_HEAD(vmemmap_pages);
418 	struct vmemmap_remap_walk walk = {
419 		.remap_pte	= vmemmap_restore_pte,
420 		.reuse_addr	= reuse,
421 		.vmemmap_pages	= &vmemmap_pages,
422 	};
423 
424 	/* See the comment in the vmemmap_remap_free(). */
425 	BUG_ON(start - reuse != PAGE_SIZE);
426 
427 	if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
428 		return -ENOMEM;
429 
430 	mmap_read_lock(&init_mm);
431 	vmemmap_remap_range(reuse, end, &walk);
432 	mmap_read_unlock(&init_mm);
433 
434 	return 0;
435 }
436 
437 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
438 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
439 
440 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
441 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
442 
443 /**
444  * hugetlb_vmemmap_restore - restore previously optimized (by
445  *			     hugetlb_vmemmap_optimize()) vmemmap pages which
446  *			     will be reallocated and remapped.
447  * @h:		struct hstate.
448  * @head:	the head page whose vmemmap pages will be restored.
449  *
450  * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
451  * negative error code otherwise.
452  */
453 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
454 {
455 	int ret;
456 	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
457 	unsigned long vmemmap_reuse;
458 
459 	VM_WARN_ON_ONCE(!PageHuge(head));
460 	if (!HPageVmemmapOptimized(head))
461 		return 0;
462 
463 	vmemmap_end	= vmemmap_start + hugetlb_vmemmap_size(h);
464 	vmemmap_reuse	= vmemmap_start;
465 	vmemmap_start	+= HUGETLB_VMEMMAP_RESERVE_SIZE;
466 
467 	/*
468 	 * The pages which the vmemmap virtual address range [@vmemmap_start,
469 	 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
470 	 * the range is mapped to the page which @vmemmap_reuse is mapped to.
471 	 * When a HugeTLB page is freed to the buddy allocator, previously
472 	 * discarded vmemmap pages must be allocated and remapping.
473 	 */
474 	ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
475 	if (!ret) {
476 		ClearHPageVmemmapOptimized(head);
477 		static_branch_dec(&hugetlb_optimize_vmemmap_key);
478 	}
479 
480 	return ret;
481 }
482 
483 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
484 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
485 {
486 	if (!READ_ONCE(vmemmap_optimize_enabled))
487 		return false;
488 
489 	if (!hugetlb_vmemmap_optimizable(h))
490 		return false;
491 
492 	if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
493 		pmd_t *pmdp, pmd;
494 		struct page *vmemmap_page;
495 		unsigned long vaddr = (unsigned long)head;
496 
497 		/*
498 		 * Only the vmemmap page's vmemmap page can be self-hosted.
499 		 * Walking the page tables to find the backing page of the
500 		 * vmemmap page.
501 		 */
502 		pmdp = pmd_off_k(vaddr);
503 		/*
504 		 * The READ_ONCE() is used to stabilize *pmdp in a register or
505 		 * on the stack so that it will stop changing under the code.
506 		 * The only concurrent operation where it can be changed is
507 		 * split_vmemmap_huge_pmd() (*pmdp will be stable after this
508 		 * operation).
509 		 */
510 		pmd = READ_ONCE(*pmdp);
511 		if (pmd_leaf(pmd))
512 			vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
513 		else
514 			vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
515 		/*
516 		 * Due to HugeTLB alignment requirements and the vmemmap pages
517 		 * being at the start of the hotplugged memory region in
518 		 * memory_hotplug.memmap_on_memory case. Checking any vmemmap
519 		 * page's vmemmap page if it is marked as VmemmapSelfHosted is
520 		 * sufficient.
521 		 *
522 		 * [                  hotplugged memory                  ]
523 		 * [        section        ][...][        section        ]
524 		 * [ vmemmap ][              usable memory               ]
525 		 *   ^   |     |                                        |
526 		 *   +---+     |                                        |
527 		 *     ^       |                                        |
528 		 *     +-------+                                        |
529 		 *          ^                                           |
530 		 *          +-------------------------------------------+
531 		 */
532 		if (PageVmemmapSelfHosted(vmemmap_page))
533 			return false;
534 	}
535 
536 	return true;
537 }
538 
539 /**
540  * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
541  * @h:		struct hstate.
542  * @head:	the head page whose vmemmap pages will be optimized.
543  *
544  * This function only tries to optimize @head's vmemmap pages and does not
545  * guarantee that the optimization will succeed after it returns. The caller
546  * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
547  * have been optimized.
548  */
549 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
550 {
551 	unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
552 	unsigned long vmemmap_reuse;
553 
554 	VM_WARN_ON_ONCE(!PageHuge(head));
555 	if (!vmemmap_should_optimize(h, head))
556 		return;
557 
558 	static_branch_inc(&hugetlb_optimize_vmemmap_key);
559 
560 	vmemmap_end	= vmemmap_start + hugetlb_vmemmap_size(h);
561 	vmemmap_reuse	= vmemmap_start;
562 	vmemmap_start	+= HUGETLB_VMEMMAP_RESERVE_SIZE;
563 
564 	/*
565 	 * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
566 	 * to the page which @vmemmap_reuse is mapped to, then free the pages
567 	 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
568 	 */
569 	if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
570 		static_branch_dec(&hugetlb_optimize_vmemmap_key);
571 	else
572 		SetHPageVmemmapOptimized(head);
573 }
574 
575 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
576 	{
577 		.procname	= "hugetlb_optimize_vmemmap",
578 		.data		= &vmemmap_optimize_enabled,
579 		.maxlen		= sizeof(vmemmap_optimize_enabled),
580 		.mode		= 0644,
581 		.proc_handler	= proc_dobool,
582 	},
583 	{ }
584 };
585 
586 static int __init hugetlb_vmemmap_init(void)
587 {
588 	const struct hstate *h;
589 
590 	/* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
591 	BUILD_BUG_ON(__NR_USED_SUBPAGE > HUGETLB_VMEMMAP_RESERVE_PAGES);
592 
593 	for_each_hstate(h) {
594 		if (hugetlb_vmemmap_optimizable(h)) {
595 			register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
596 			break;
597 		}
598 	}
599 	return 0;
600 }
601 late_initcall(hugetlb_vmemmap_init);
602