xref: /linux/mm/memory_hotplug.c (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 /*
2  *  linux/mm/memory_hotplug.c
3  *
4  *  Copyright (C)
5  */
6 
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/module.h>
15 #include <linux/pagevec.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
24 #include <linux/ioport.h>
25 #include <linux/cpuset.h>
26 
27 #include <asm/tlbflush.h>
28 
29 /* add this memory to iomem resource */
30 static struct resource *register_memory_resource(u64 start, u64 size)
31 {
32 	struct resource *res;
33 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
34 	BUG_ON(!res);
35 
36 	res->name = "System RAM";
37 	res->start = start;
38 	res->end = start + size - 1;
39 	res->flags = IORESOURCE_MEM;
40 	if (request_resource(&iomem_resource, res) < 0) {
41 		printk("System RAM resource %llx - %llx cannot be added\n",
42 		(unsigned long long)res->start, (unsigned long long)res->end);
43 		kfree(res);
44 		res = NULL;
45 	}
46 	return res;
47 }
48 
49 static void release_memory_resource(struct resource *res)
50 {
51 	if (!res)
52 		return;
53 	release_resource(res);
54 	kfree(res);
55 	return;
56 }
57 
58 
59 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
60 static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
61 {
62 	struct pglist_data *pgdat = zone->zone_pgdat;
63 	int nr_pages = PAGES_PER_SECTION;
64 	int nid = pgdat->node_id;
65 	int zone_type;
66 
67 	zone_type = zone - pgdat->node_zones;
68 	if (!populated_zone(zone)) {
69 		int ret = 0;
70 		ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
71 		if (ret < 0)
72 			return ret;
73 	}
74 	memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
75 	return 0;
76 }
77 
78 static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
79 {
80 	int nr_pages = PAGES_PER_SECTION;
81 	int ret;
82 
83 	if (pfn_valid(phys_start_pfn))
84 		return -EEXIST;
85 
86 	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
87 
88 	if (ret < 0)
89 		return ret;
90 
91 	ret = __add_zone(zone, phys_start_pfn);
92 
93 	if (ret < 0)
94 		return ret;
95 
96 	return register_new_memory(__pfn_to_section(phys_start_pfn));
97 }
98 
99 /*
100  * Reasonably generic function for adding memory.  It is
101  * expected that archs that support memory hotplug will
102  * call this function after deciding the zone to which to
103  * add the new pages.
104  */
105 int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
106 		 unsigned long nr_pages)
107 {
108 	unsigned long i;
109 	int err = 0;
110 	int start_sec, end_sec;
111 	/* during initialize mem_map, align hot-added range to section */
112 	start_sec = pfn_to_section_nr(phys_start_pfn);
113 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
114 
115 	for (i = start_sec; i <= end_sec; i++) {
116 		err = __add_section(zone, i << PFN_SECTION_SHIFT);
117 
118 		/*
119 		 * EEXIST is finally dealed with by ioresource collision
120 		 * check. see add_memory() => register_memory_resource()
121 		 * Warning will be printed if there is collision.
122 		 */
123 		if (err && (err != -EEXIST))
124 			break;
125 		err = 0;
126 	}
127 
128 	return err;
129 }
130 EXPORT_SYMBOL_GPL(__add_pages);
131 
132 static void grow_zone_span(struct zone *zone,
133 		unsigned long start_pfn, unsigned long end_pfn)
134 {
135 	unsigned long old_zone_end_pfn;
136 
137 	zone_span_writelock(zone);
138 
139 	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
140 	if (start_pfn < zone->zone_start_pfn)
141 		zone->zone_start_pfn = start_pfn;
142 
143 	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
144 				zone->zone_start_pfn;
145 
146 	zone_span_writeunlock(zone);
147 }
148 
149 static void grow_pgdat_span(struct pglist_data *pgdat,
150 		unsigned long start_pfn, unsigned long end_pfn)
151 {
152 	unsigned long old_pgdat_end_pfn =
153 		pgdat->node_start_pfn + pgdat->node_spanned_pages;
154 
155 	if (start_pfn < pgdat->node_start_pfn)
156 		pgdat->node_start_pfn = start_pfn;
157 
158 	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
159 					pgdat->node_start_pfn;
160 }
161 
162 int online_pages(unsigned long pfn, unsigned long nr_pages)
163 {
164 	unsigned long i;
165 	unsigned long flags;
166 	unsigned long onlined_pages = 0;
167 	struct resource res;
168 	u64 section_end;
169 	unsigned long start_pfn;
170 	struct zone *zone;
171 	int need_zonelists_rebuild = 0;
172 
173 	/*
174 	 * This doesn't need a lock to do pfn_to_page().
175 	 * The section can't be removed here because of the
176 	 * memory_block->state_sem.
177 	 */
178 	zone = page_zone(pfn_to_page(pfn));
179 	pgdat_resize_lock(zone->zone_pgdat, &flags);
180 	grow_zone_span(zone, pfn, pfn + nr_pages);
181 	grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
182 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
183 
184 	/*
185 	 * If this zone is not populated, then it is not in zonelist.
186 	 * This means the page allocator ignores this zone.
187 	 * So, zonelist must be updated after online.
188 	 */
189 	if (!populated_zone(zone))
190 		need_zonelists_rebuild = 1;
191 
192 	res.start = (u64)pfn << PAGE_SHIFT;
193 	res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
194 	res.flags = IORESOURCE_MEM; /* we just need system ram */
195 	section_end = res.end;
196 
197 	while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
198 		start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
199 		nr_pages = (unsigned long)
200                            ((res.end + 1 - res.start) >> PAGE_SHIFT);
201 
202 		if (PageReserved(pfn_to_page(start_pfn))) {
203 			/* this region's page is not onlined now */
204 			for (i = 0; i < nr_pages; i++) {
205 				struct page *page = pfn_to_page(start_pfn + i);
206 				online_page(page);
207 				onlined_pages++;
208 			}
209 		}
210 
211 		res.start = res.end + 1;
212 		res.end = section_end;
213 	}
214 	zone->present_pages += onlined_pages;
215 	zone->zone_pgdat->node_present_pages += onlined_pages;
216 
217 	setup_per_zone_pages_min();
218 
219 	if (need_zonelists_rebuild)
220 		build_all_zonelists();
221 	vm_total_pages = nr_free_pagecache_pages();
222 	writeback_set_ratelimit();
223 	return 0;
224 }
225 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
226 
227 static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
228 {
229 	struct pglist_data *pgdat;
230 	unsigned long zones_size[MAX_NR_ZONES] = {0};
231 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
232 	unsigned long start_pfn = start >> PAGE_SHIFT;
233 
234 	pgdat = arch_alloc_nodedata(nid);
235 	if (!pgdat)
236 		return NULL;
237 
238 	arch_refresh_nodedata(nid, pgdat);
239 
240 	/* we can use NODE_DATA(nid) from here */
241 
242 	/* init node's zones as empty zones, we don't have any present pages.*/
243 	free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
244 
245 	return pgdat;
246 }
247 
248 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
249 {
250 	arch_refresh_nodedata(nid, NULL);
251 	arch_free_nodedata(pgdat);
252 	return;
253 }
254 
255 
256 int add_memory(int nid, u64 start, u64 size)
257 {
258 	pg_data_t *pgdat = NULL;
259 	int new_pgdat = 0;
260 	struct resource *res;
261 	int ret;
262 
263 	res = register_memory_resource(start, size);
264 	if (!res)
265 		return -EEXIST;
266 
267 	if (!node_online(nid)) {
268 		pgdat = hotadd_new_pgdat(nid, start);
269 		if (!pgdat)
270 			return -ENOMEM;
271 		new_pgdat = 1;
272 		ret = kswapd_run(nid);
273 		if (ret)
274 			goto error;
275 	}
276 
277 	/* call arch's memory hotadd */
278 	ret = arch_add_memory(nid, start, size);
279 
280 	if (ret < 0)
281 		goto error;
282 
283 	/* we online node here. we can't roll back from here. */
284 	node_set_online(nid);
285 
286 	cpuset_track_online_nodes();
287 
288 	if (new_pgdat) {
289 		ret = register_one_node(nid);
290 		/*
291 		 * If sysfs file of new node can't create, cpu on the node
292 		 * can't be hot-added. There is no rollback way now.
293 		 * So, check by BUG_ON() to catch it reluctantly..
294 		 */
295 		BUG_ON(ret);
296 	}
297 
298 	return ret;
299 error:
300 	/* rollback pgdat allocation and others */
301 	if (new_pgdat)
302 		rollback_node_hotadd(nid, pgdat);
303 	if (res)
304 		release_memory_resource(res);
305 
306 	return ret;
307 }
308 EXPORT_SYMBOL_GPL(add_memory);
309