xref: /linux/arch/powerpc/platforms/pseries/hotplug-memory.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries Memory Hotplug infrastructure.
4  *
5  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6  */
7 
8 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
9 
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
16 
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/sparsemem.h>
20 #include <asm/fadump.h>
21 #include <asm/drmem.h>
22 #include "pseries.h"
23 
dlpar_free_property(struct property * prop)24 static void dlpar_free_property(struct property *prop)
25 {
26 	kfree(prop->name);
27 	kfree(prop->value);
28 	kfree(prop);
29 }
30 
dlpar_clone_property(struct property * prop,u32 prop_size)31 static struct property *dlpar_clone_property(struct property *prop,
32 					     u32 prop_size)
33 {
34 	struct property *new_prop;
35 
36 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
37 	if (!new_prop)
38 		return NULL;
39 
40 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
41 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
42 	if (!new_prop->name || !new_prop->value) {
43 		dlpar_free_property(new_prop);
44 		return NULL;
45 	}
46 
47 	memcpy(new_prop->value, prop->value, prop->length);
48 	new_prop->length = prop_size;
49 
50 	of_property_set_flag(new_prop, OF_DYNAMIC);
51 	return new_prop;
52 }
53 
find_aa_index(struct device_node * dr_node,struct property * ala_prop,const u32 * lmb_assoc,u32 * aa_index)54 static bool find_aa_index(struct device_node *dr_node,
55 			 struct property *ala_prop,
56 			 const u32 *lmb_assoc, u32 *aa_index)
57 {
58 	__be32 *assoc_arrays;
59 	u32 new_prop_size;
60 	struct property *new_prop;
61 	int aa_arrays, aa_array_entries, aa_array_sz;
62 	int i, index;
63 
64 	/*
65 	 * The ibm,associativity-lookup-arrays property is defined to be
66 	 * a 32-bit value specifying the number of associativity arrays
67 	 * followed by a 32-bitvalue specifying the number of entries per
68 	 * array, followed by the associativity arrays.
69 	 */
70 	assoc_arrays = ala_prop->value;
71 
72 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
73 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
74 	aa_array_sz = aa_array_entries * sizeof(u32);
75 
76 	for (i = 0; i < aa_arrays; i++) {
77 		index = (i * aa_array_entries) + 2;
78 
79 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
80 			continue;
81 
82 		*aa_index = i;
83 		return true;
84 	}
85 
86 	new_prop_size = ala_prop->length + aa_array_sz;
87 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
88 	if (!new_prop)
89 		return false;
90 
91 	assoc_arrays = new_prop->value;
92 
93 	/* increment the number of entries in the lookup array */
94 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
95 
96 	/* copy the new associativity into the lookup array */
97 	index = aa_arrays * aa_array_entries + 2;
98 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
99 
100 	of_update_property(dr_node, new_prop);
101 
102 	/*
103 	 * The associativity lookup array index for this lmb is
104 	 * number of entries - 1 since we added its associativity
105 	 * to the end of the lookup array.
106 	 */
107 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
108 	return true;
109 }
110 
update_lmb_associativity_index(struct drmem_lmb * lmb)111 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
112 {
113 	struct device_node *parent, *lmb_node, *dr_node;
114 	struct property *ala_prop;
115 	const u32 *lmb_assoc;
116 	u32 aa_index;
117 	bool found;
118 
119 	parent = of_find_node_by_path("/");
120 	if (!parent)
121 		return -ENODEV;
122 
123 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
124 					     parent);
125 	of_node_put(parent);
126 	if (!lmb_node)
127 		return -EINVAL;
128 
129 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
130 	if (!lmb_assoc) {
131 		dlpar_free_cc_nodes(lmb_node);
132 		return -ENODEV;
133 	}
134 
135 	update_numa_distance(lmb_node);
136 
137 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
138 	if (!dr_node) {
139 		dlpar_free_cc_nodes(lmb_node);
140 		return -ENODEV;
141 	}
142 
143 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
144 				    NULL);
145 	if (!ala_prop) {
146 		of_node_put(dr_node);
147 		dlpar_free_cc_nodes(lmb_node);
148 		return -ENODEV;
149 	}
150 
151 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
152 
153 	of_node_put(dr_node);
154 	dlpar_free_cc_nodes(lmb_node);
155 
156 	if (!found) {
157 		pr_err("Could not find LMB associativity\n");
158 		return -1;
159 	}
160 
161 	lmb->aa_index = aa_index;
162 	return 0;
163 }
164 
lmb_to_memblock(struct drmem_lmb * lmb)165 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
166 {
167 	unsigned long section_nr;
168 	struct memory_block *mem_block;
169 
170 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
171 
172 	mem_block = find_memory_block(section_nr);
173 	return mem_block;
174 }
175 
get_lmb_range(u32 drc_index,int n_lmbs,struct drmem_lmb ** start_lmb,struct drmem_lmb ** end_lmb)176 static int get_lmb_range(u32 drc_index, int n_lmbs,
177 			 struct drmem_lmb **start_lmb,
178 			 struct drmem_lmb **end_lmb)
179 {
180 	struct drmem_lmb *lmb, *start, *end;
181 	struct drmem_lmb *limit;
182 
183 	start = NULL;
184 	for_each_drmem_lmb(lmb) {
185 		if (lmb->drc_index == drc_index) {
186 			start = lmb;
187 			break;
188 		}
189 	}
190 
191 	if (!start)
192 		return -EINVAL;
193 
194 	end = &start[n_lmbs];
195 
196 	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
197 	if (end > limit)
198 		return -EINVAL;
199 
200 	*start_lmb = start;
201 	*end_lmb = end;
202 	return 0;
203 }
204 
dlpar_change_lmb_state(struct drmem_lmb * lmb,bool online)205 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
206 {
207 	struct memory_block *mem_block;
208 	int rc;
209 
210 	mem_block = lmb_to_memblock(lmb);
211 	if (!mem_block) {
212 		pr_err("Failed memory block lookup for LMB 0x%x\n", lmb->drc_index);
213 		return -EINVAL;
214 	}
215 
216 	if (online && mem_block->dev.offline)
217 		rc = device_online(&mem_block->dev);
218 	else if (!online && !mem_block->dev.offline)
219 		rc = device_offline(&mem_block->dev);
220 	else
221 		rc = 0;
222 
223 	put_device(&mem_block->dev);
224 
225 	return rc;
226 }
227 
dlpar_online_lmb(struct drmem_lmb * lmb)228 static int dlpar_online_lmb(struct drmem_lmb *lmb)
229 {
230 	return dlpar_change_lmb_state(lmb, true);
231 }
232 
233 #ifdef CONFIG_MEMORY_HOTREMOVE
dlpar_offline_lmb(struct drmem_lmb * lmb)234 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
235 {
236 	return dlpar_change_lmb_state(lmb, false);
237 }
238 
pseries_remove_memblock(unsigned long base,unsigned long memblock_size)239 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
240 {
241 	unsigned long start_pfn;
242 	int sections_per_block;
243 	int i;
244 
245 	start_pfn = base >> PAGE_SHIFT;
246 
247 	lock_device_hotplug();
248 
249 	if (!pfn_valid(start_pfn))
250 		goto out;
251 
252 	sections_per_block = memory_block_size / MIN_MEMORY_BLOCK_SIZE;
253 
254 	for (i = 0; i < sections_per_block; i++) {
255 		__remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
256 		base += MIN_MEMORY_BLOCK_SIZE;
257 	}
258 
259 out:
260 	/* Update memory regions for memory remove */
261 	memblock_remove(base, memblock_size);
262 	unlock_device_hotplug();
263 	return 0;
264 }
265 
pseries_remove_mem_node(struct device_node * np)266 static int pseries_remove_mem_node(struct device_node *np)
267 {
268 	int ret;
269 	struct resource res;
270 
271 	/*
272 	 * Check to see if we are actually removing memory
273 	 */
274 	if (!of_node_is_type(np, "memory"))
275 		return 0;
276 
277 	/*
278 	 * Find the base address and size of the memblock
279 	 */
280 	ret = of_address_to_resource(np, 0, &res);
281 	if (ret)
282 		return ret;
283 
284 	pseries_remove_memblock(res.start, resource_size(&res));
285 	return 0;
286 }
287 
lmb_is_removable(struct drmem_lmb * lmb)288 static bool lmb_is_removable(struct drmem_lmb *lmb)
289 {
290 	if ((lmb->flags & DRCONF_MEM_RESERVED) ||
291 		!(lmb->flags & DRCONF_MEM_ASSIGNED))
292 		return false;
293 
294 #ifdef CONFIG_FA_DUMP
295 	/*
296 	 * Don't hot-remove memory that falls in fadump boot memory area
297 	 * and memory that is reserved for capturing old kernel memory.
298 	 */
299 	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
300 		return false;
301 #endif
302 	/* device_offline() will determine if we can actually remove this lmb */
303 	return true;
304 }
305 
306 static int dlpar_add_lmb(struct drmem_lmb *);
307 
dlpar_remove_lmb(struct drmem_lmb * lmb)308 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
309 {
310 	struct memory_block *mem_block;
311 	int rc;
312 
313 	if (!lmb_is_removable(lmb))
314 		return -EINVAL;
315 
316 	mem_block = lmb_to_memblock(lmb);
317 	if (mem_block == NULL)
318 		return -EINVAL;
319 
320 	rc = dlpar_offline_lmb(lmb);
321 	if (rc) {
322 		put_device(&mem_block->dev);
323 		return rc;
324 	}
325 
326 	__remove_memory(lmb->base_addr, memory_block_size);
327 	put_device(&mem_block->dev);
328 
329 	/* Update memory regions for memory remove */
330 	memblock_remove(lmb->base_addr, memory_block_size);
331 
332 	invalidate_lmb_associativity_index(lmb);
333 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
334 
335 	return 0;
336 }
337 
dlpar_memory_remove_by_count(u32 lmbs_to_remove)338 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
339 {
340 	struct drmem_lmb *lmb;
341 	int lmbs_reserved = 0;
342 	int lmbs_available = 0;
343 	int rc;
344 
345 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
346 
347 	if (lmbs_to_remove == 0)
348 		return -EINVAL;
349 
350 	/* Validate that there are enough LMBs to satisfy the request */
351 	for_each_drmem_lmb(lmb) {
352 		if (lmb_is_removable(lmb))
353 			lmbs_available++;
354 
355 		if (lmbs_available == lmbs_to_remove)
356 			break;
357 	}
358 
359 	if (lmbs_available < lmbs_to_remove) {
360 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
361 			lmbs_available, lmbs_to_remove);
362 		return -EINVAL;
363 	}
364 
365 	for_each_drmem_lmb(lmb) {
366 		rc = dlpar_remove_lmb(lmb);
367 		if (rc)
368 			continue;
369 
370 		/* Mark this lmb so we can add it later if all of the
371 		 * requested LMBs cannot be removed.
372 		 */
373 		drmem_mark_lmb_reserved(lmb);
374 
375 		lmbs_reserved++;
376 		if (lmbs_reserved == lmbs_to_remove)
377 			break;
378 	}
379 
380 	if (lmbs_reserved != lmbs_to_remove) {
381 		pr_err("Memory hot-remove failed, adding LMB's back\n");
382 
383 		for_each_drmem_lmb(lmb) {
384 			if (!drmem_lmb_reserved(lmb))
385 				continue;
386 
387 			rc = dlpar_add_lmb(lmb);
388 			if (rc)
389 				pr_err("Failed to add LMB back, drc index %x\n",
390 				       lmb->drc_index);
391 
392 			drmem_remove_lmb_reservation(lmb);
393 
394 			lmbs_reserved--;
395 			if (lmbs_reserved == 0)
396 				break;
397 		}
398 
399 		rc = -EINVAL;
400 	} else {
401 		for_each_drmem_lmb(lmb) {
402 			if (!drmem_lmb_reserved(lmb))
403 				continue;
404 
405 			dlpar_release_drc(lmb->drc_index);
406 			pr_info("Memory at %llx was hot-removed\n",
407 				lmb->base_addr);
408 
409 			drmem_remove_lmb_reservation(lmb);
410 
411 			lmbs_reserved--;
412 			if (lmbs_reserved == 0)
413 				break;
414 		}
415 		rc = 0;
416 	}
417 
418 	return rc;
419 }
420 
dlpar_memory_remove_by_index(u32 drc_index)421 static int dlpar_memory_remove_by_index(u32 drc_index)
422 {
423 	struct drmem_lmb *lmb;
424 	int lmb_found;
425 	int rc;
426 
427 	pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
428 
429 	lmb_found = 0;
430 	for_each_drmem_lmb(lmb) {
431 		if (lmb->drc_index == drc_index) {
432 			lmb_found = 1;
433 			rc = dlpar_remove_lmb(lmb);
434 			if (!rc)
435 				dlpar_release_drc(lmb->drc_index);
436 
437 			break;
438 		}
439 	}
440 
441 	if (!lmb_found) {
442 		pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
443 		rc = -EINVAL;
444 	} else if (rc) {
445 		pr_debug("Failed to hot-remove memory at %llx\n",
446 			 lmb->base_addr);
447 	} else {
448 		pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
449 	}
450 
451 	return rc;
452 }
453 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)454 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
455 {
456 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
457 	int rc;
458 
459 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
460 		lmbs_to_remove, drc_index);
461 
462 	if (lmbs_to_remove == 0)
463 		return -EINVAL;
464 
465 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
466 	if (rc)
467 		return -EINVAL;
468 
469 	/*
470 	 * Validate that all LMBs in range are not reserved. Note that it
471 	 * is ok if they are !ASSIGNED since our goal here is to remove the
472 	 * LMB range, regardless of whether some LMBs were already removed
473 	 * by any other reason.
474 	 *
475 	 * This is a contrast to what is done in remove_by_count() where we
476 	 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
477 	 * because we want to remove a fixed amount of LMBs in that function.
478 	 */
479 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
480 		if (lmb->flags & DRCONF_MEM_RESERVED) {
481 			pr_err("Memory at %llx (drc index %x) is reserved\n",
482 				lmb->base_addr, lmb->drc_index);
483 			return -EINVAL;
484 		}
485 	}
486 
487 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
488 		/*
489 		 * dlpar_remove_lmb() will error out if the LMB is already
490 		 * !ASSIGNED, but this case is a no-op for us.
491 		 */
492 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
493 			continue;
494 
495 		rc = dlpar_remove_lmb(lmb);
496 		if (rc)
497 			break;
498 
499 		drmem_mark_lmb_reserved(lmb);
500 	}
501 
502 	if (rc) {
503 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
504 
505 
506 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
507 			if (!drmem_lmb_reserved(lmb))
508 				continue;
509 
510 			/*
511 			 * Setting the isolation state of an UNISOLATED/CONFIGURED
512 			 * device to UNISOLATE is a no-op, but the hypervisor can
513 			 * use it as a hint that the LMB removal failed.
514 			 */
515 			dlpar_unisolate_drc(lmb->drc_index);
516 
517 			rc = dlpar_add_lmb(lmb);
518 			if (rc)
519 				pr_err("Failed to add LMB, drc index %x\n",
520 				       lmb->drc_index);
521 
522 			drmem_remove_lmb_reservation(lmb);
523 		}
524 		rc = -EINVAL;
525 	} else {
526 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
527 			if (!drmem_lmb_reserved(lmb))
528 				continue;
529 
530 			dlpar_release_drc(lmb->drc_index);
531 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
532 				lmb->base_addr, lmb->drc_index);
533 
534 			drmem_remove_lmb_reservation(lmb);
535 		}
536 	}
537 
538 	return rc;
539 }
540 
541 #else
pseries_remove_memblock(unsigned long base,unsigned long memblock_size)542 static inline int pseries_remove_memblock(unsigned long base,
543 					  unsigned long memblock_size)
544 {
545 	return -EOPNOTSUPP;
546 }
pseries_remove_mem_node(struct device_node * np)547 static inline int pseries_remove_mem_node(struct device_node *np)
548 {
549 	return 0;
550 }
dlpar_remove_lmb(struct drmem_lmb * lmb)551 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
552 {
553 	return -EOPNOTSUPP;
554 }
dlpar_memory_remove_by_count(u32 lmbs_to_remove)555 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
556 {
557 	return -EOPNOTSUPP;
558 }
dlpar_memory_remove_by_index(u32 drc_index)559 static int dlpar_memory_remove_by_index(u32 drc_index)
560 {
561 	return -EOPNOTSUPP;
562 }
563 
dlpar_memory_remove_by_ic(u32 lmbs_to_remove,u32 drc_index)564 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
565 {
566 	return -EOPNOTSUPP;
567 }
568 #endif /* CONFIG_MEMORY_HOTREMOVE */
569 
dlpar_add_lmb(struct drmem_lmb * lmb)570 static int dlpar_add_lmb(struct drmem_lmb *lmb)
571 {
572 	unsigned long block_sz;
573 	int nid, rc;
574 
575 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
576 		return -EINVAL;
577 
578 	rc = update_lmb_associativity_index(lmb);
579 	if (rc) {
580 		dlpar_release_drc(lmb->drc_index);
581 		pr_err("Failed to configure LMB 0x%x\n", lmb->drc_index);
582 		return rc;
583 	}
584 
585 	block_sz = memory_block_size_bytes();
586 
587 	/* Find the node id for this LMB.  Fake one if necessary. */
588 	nid = of_drconf_to_nid_single(lmb);
589 	if (nid < 0 || !node_possible(nid))
590 		nid = first_online_node;
591 
592 	/* Add the memory */
593 	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY);
594 	if (rc) {
595 		pr_err("Failed to add LMB 0x%x to node %u", lmb->drc_index, nid);
596 		invalidate_lmb_associativity_index(lmb);
597 		return rc;
598 	}
599 
600 	rc = dlpar_online_lmb(lmb);
601 	if (rc) {
602 		pr_err("Failed to online LMB 0x%x on node %u\n", lmb->drc_index, nid);
603 		__remove_memory(lmb->base_addr, block_sz);
604 		invalidate_lmb_associativity_index(lmb);
605 	} else {
606 		lmb->flags |= DRCONF_MEM_ASSIGNED;
607 	}
608 
609 	return rc;
610 }
611 
dlpar_memory_add_by_count(u32 lmbs_to_add)612 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
613 {
614 	struct drmem_lmb *lmb;
615 	int lmbs_available = 0;
616 	int lmbs_reserved = 0;
617 	int rc;
618 
619 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
620 
621 	if (lmbs_to_add == 0)
622 		return -EINVAL;
623 
624 	/* Validate that there are enough LMBs to satisfy the request */
625 	for_each_drmem_lmb(lmb) {
626 		if (lmb->flags & DRCONF_MEM_RESERVED)
627 			continue;
628 
629 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
630 			lmbs_available++;
631 
632 		if (lmbs_available == lmbs_to_add)
633 			break;
634 	}
635 
636 	if (lmbs_available < lmbs_to_add)
637 		return -EINVAL;
638 
639 	for_each_drmem_lmb(lmb) {
640 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
641 			continue;
642 
643 		rc = dlpar_acquire_drc(lmb->drc_index);
644 		if (rc)
645 			continue;
646 
647 		rc = dlpar_add_lmb(lmb);
648 		if (rc) {
649 			dlpar_release_drc(lmb->drc_index);
650 			continue;
651 		}
652 
653 		/* Mark this lmb so we can remove it later if all of the
654 		 * requested LMBs cannot be added.
655 		 */
656 		drmem_mark_lmb_reserved(lmb);
657 		lmbs_reserved++;
658 		if (lmbs_reserved == lmbs_to_add)
659 			break;
660 	}
661 
662 	if (lmbs_reserved != lmbs_to_add) {
663 		pr_err("Memory hot-add failed, removing any added LMBs\n");
664 
665 		for_each_drmem_lmb(lmb) {
666 			if (!drmem_lmb_reserved(lmb))
667 				continue;
668 
669 			rc = dlpar_remove_lmb(lmb);
670 			if (rc)
671 				pr_err("Failed to remove LMB, drc index %x\n",
672 				       lmb->drc_index);
673 			else
674 				dlpar_release_drc(lmb->drc_index);
675 
676 			drmem_remove_lmb_reservation(lmb);
677 			lmbs_reserved--;
678 
679 			if (lmbs_reserved == 0)
680 				break;
681 		}
682 		rc = -EINVAL;
683 	} else {
684 		for_each_drmem_lmb(lmb) {
685 			if (!drmem_lmb_reserved(lmb))
686 				continue;
687 
688 			pr_debug("Memory at %llx (drc index %x) was hot-added\n",
689 				 lmb->base_addr, lmb->drc_index);
690 			drmem_remove_lmb_reservation(lmb);
691 			lmbs_reserved--;
692 
693 			if (lmbs_reserved == 0)
694 				break;
695 		}
696 		rc = 0;
697 	}
698 
699 	return rc;
700 }
701 
dlpar_memory_add_by_index(u32 drc_index)702 static int dlpar_memory_add_by_index(u32 drc_index)
703 {
704 	struct drmem_lmb *lmb;
705 	int rc, lmb_found;
706 
707 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
708 
709 	lmb_found = 0;
710 	for_each_drmem_lmb(lmb) {
711 		if (lmb->drc_index == drc_index) {
712 			lmb_found = 1;
713 			rc = dlpar_acquire_drc(lmb->drc_index);
714 			if (!rc) {
715 				rc = dlpar_add_lmb(lmb);
716 				if (rc)
717 					dlpar_release_drc(lmb->drc_index);
718 			}
719 
720 			break;
721 		}
722 	}
723 
724 	if (!lmb_found)
725 		rc = -EINVAL;
726 
727 	if (rc)
728 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
729 	else
730 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
731 			lmb->base_addr, drc_index);
732 
733 	return rc;
734 }
735 
dlpar_memory_add_by_ic(u32 lmbs_to_add,u32 drc_index)736 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
737 {
738 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
739 	int rc;
740 
741 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
742 		lmbs_to_add, drc_index);
743 
744 	if (lmbs_to_add == 0)
745 		return -EINVAL;
746 
747 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
748 	if (rc)
749 		return -EINVAL;
750 
751 	/* Validate that the LMBs in this range are not reserved */
752 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
753 		/* Fail immediately if the whole range can't be hot-added */
754 		if (lmb->flags & DRCONF_MEM_RESERVED) {
755 			pr_err("Memory at %llx (drc index %x) is reserved\n",
756 					lmb->base_addr, lmb->drc_index);
757 			return -EINVAL;
758 		}
759 	}
760 
761 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
762 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
763 			continue;
764 
765 		rc = dlpar_acquire_drc(lmb->drc_index);
766 		if (rc)
767 			break;
768 
769 		rc = dlpar_add_lmb(lmb);
770 		if (rc) {
771 			dlpar_release_drc(lmb->drc_index);
772 			break;
773 		}
774 
775 		drmem_mark_lmb_reserved(lmb);
776 	}
777 
778 	if (rc) {
779 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
780 
781 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
782 			if (!drmem_lmb_reserved(lmb))
783 				continue;
784 
785 			rc = dlpar_remove_lmb(lmb);
786 			if (rc)
787 				pr_err("Failed to remove LMB, drc index %x\n",
788 				       lmb->drc_index);
789 			else
790 				dlpar_release_drc(lmb->drc_index);
791 
792 			drmem_remove_lmb_reservation(lmb);
793 		}
794 		rc = -EINVAL;
795 	} else {
796 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
797 			if (!drmem_lmb_reserved(lmb))
798 				continue;
799 
800 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
801 				lmb->base_addr, lmb->drc_index);
802 			drmem_remove_lmb_reservation(lmb);
803 		}
804 	}
805 
806 	return rc;
807 }
808 
dlpar_memory(struct pseries_hp_errorlog * hp_elog)809 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
810 {
811 	u32 count, drc_index;
812 	int rc;
813 
814 	lock_device_hotplug();
815 
816 	switch (hp_elog->action) {
817 	case PSERIES_HP_ELOG_ACTION_ADD:
818 		switch (hp_elog->id_type) {
819 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
820 			count = be32_to_cpu(hp_elog->_drc_u.drc_count);
821 			rc = dlpar_memory_add_by_count(count);
822 			break;
823 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
824 			drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
825 			rc = dlpar_memory_add_by_index(drc_index);
826 			break;
827 		case PSERIES_HP_ELOG_ID_DRC_IC:
828 			count = be32_to_cpu(hp_elog->_drc_u.ic.count);
829 			drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
830 			rc = dlpar_memory_add_by_ic(count, drc_index);
831 			break;
832 		default:
833 			rc = -EINVAL;
834 			break;
835 		}
836 
837 		break;
838 	case PSERIES_HP_ELOG_ACTION_REMOVE:
839 		switch (hp_elog->id_type) {
840 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
841 			count = be32_to_cpu(hp_elog->_drc_u.drc_count);
842 			rc = dlpar_memory_remove_by_count(count);
843 			break;
844 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
845 			drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
846 			rc = dlpar_memory_remove_by_index(drc_index);
847 			break;
848 		case PSERIES_HP_ELOG_ID_DRC_IC:
849 			count = be32_to_cpu(hp_elog->_drc_u.ic.count);
850 			drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
851 			rc = dlpar_memory_remove_by_ic(count, drc_index);
852 			break;
853 		default:
854 			rc = -EINVAL;
855 			break;
856 		}
857 
858 		break;
859 	default:
860 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
861 		rc = -EINVAL;
862 		break;
863 	}
864 
865 	if (!rc)
866 		rc = drmem_update_dt();
867 
868 	unlock_device_hotplug();
869 	return rc;
870 }
871 
pseries_add_mem_node(struct device_node * np)872 static int pseries_add_mem_node(struct device_node *np)
873 {
874 	int ret;
875 	struct resource res;
876 
877 	/*
878 	 * Check to see if we are actually adding memory
879 	 */
880 	if (!of_node_is_type(np, "memory"))
881 		return 0;
882 
883 	/*
884 	 * Find the base and size of the memblock
885 	 */
886 	ret = of_address_to_resource(np, 0, &res);
887 	if (ret)
888 		return ret;
889 
890 	/*
891 	 * Update memory region to represent the memory add
892 	 */
893 	ret = memblock_add(res.start, resource_size(&res));
894 	return (ret < 0) ? -EINVAL : 0;
895 }
896 
pseries_memory_notifier(struct notifier_block * nb,unsigned long action,void * data)897 static int pseries_memory_notifier(struct notifier_block *nb,
898 				   unsigned long action, void *data)
899 {
900 	struct of_reconfig_data *rd = data;
901 	int err = 0;
902 
903 	switch (action) {
904 	case OF_RECONFIG_ATTACH_NODE:
905 		err = pseries_add_mem_node(rd->dn);
906 		break;
907 	case OF_RECONFIG_DETACH_NODE:
908 		err = pseries_remove_mem_node(rd->dn);
909 		break;
910 	case OF_RECONFIG_UPDATE_PROPERTY:
911 		if (!strcmp(rd->dn->name,
912 			    "ibm,dynamic-reconfiguration-memory"))
913 			drmem_update_lmbs(rd->prop);
914 	}
915 	return notifier_from_errno(err);
916 }
917 
918 static struct notifier_block pseries_mem_nb = {
919 	.notifier_call = pseries_memory_notifier,
920 };
921 
pseries_memory_hotplug_init(void)922 static int __init pseries_memory_hotplug_init(void)
923 {
924 	if (firmware_has_feature(FW_FEATURE_LPAR))
925 		of_reconfig_notifier_register(&pseries_mem_nb);
926 
927 	return 0;
928 }
929 machine_device_initcall(pseries, pseries_memory_hotplug_init);
930