xref: /linux/mm/bootmem_info.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Bootmem core functions.
4  *
5  * Copyright (c) 2020, Bytedance.
6  *
7  *     Author: Muchun Song <songmuchun@bytedance.com>
8  *
9  */
10 #include <linux/mm.h>
11 #include <linux/compiler.h>
12 #include <linux/memblock.h>
13 #include <linux/bootmem_info.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/kmemleak.h>
16 
17 void get_page_bootmem(unsigned long info, struct page *page,
18 		enum bootmem_type type)
19 {
20 	BUG_ON(type > 0xf);
21 	BUG_ON(info > (ULONG_MAX >> 4));
22 	SetPagePrivate(page);
23 	set_page_private(page, info << 4 | type);
24 	page_ref_inc(page);
25 }
26 
27 void put_page_bootmem(struct page *page)
28 {
29 	enum bootmem_type type = bootmem_type(page);
30 
31 	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
32 	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
33 
34 	if (page_ref_dec_return(page) == 1) {
35 		ClearPagePrivate(page);
36 		set_page_private(page, 0);
37 		INIT_LIST_HEAD(&page->lru);
38 		kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
39 		free_reserved_page(page);
40 	}
41 }
42 
43 static void __init register_page_bootmem_info_section(unsigned long start_pfn)
44 {
45 	unsigned long mapsize, section_nr, i;
46 	struct mem_section *ms;
47 	struct mem_section_usage *usage;
48 	struct page *page;
49 
50 	start_pfn = SECTION_ALIGN_DOWN(start_pfn);
51 	section_nr = pfn_to_section_nr(start_pfn);
52 	ms = __nr_to_section(section_nr);
53 
54 	if (!preinited_vmemmap_section(ms))
55 		register_page_bootmem_memmap(section_nr, pfn_to_page(start_pfn),
56 					     PAGES_PER_SECTION);
57 
58 	usage = ms->usage;
59 	page = virt_to_page(usage);
60 
61 	mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
62 
63 	for (i = 0; i < mapsize; i++, page++)
64 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
65 }
66 
67 void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
68 {
69 	unsigned long i, pfn, end_pfn, nr_pages;
70 	int node = pgdat->node_id;
71 	struct page *page;
72 
73 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
74 	page = virt_to_page(pgdat);
75 
76 	for (i = 0; i < nr_pages; i++, page++)
77 		get_page_bootmem(node, page, NODE_INFO);
78 
79 	pfn = pgdat->node_start_pfn;
80 	end_pfn = pgdat_end_pfn(pgdat);
81 
82 	/* register section info */
83 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
84 		/*
85 		 * Some platforms can assign the same pfn to multiple nodes - on
86 		 * node0 as well as nodeN.  To avoid registering a pfn against
87 		 * multiple nodes we check that this pfn does not already
88 		 * reside in some other nodes.
89 		 */
90 		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
91 			register_page_bootmem_info_section(pfn);
92 	}
93 }
94