xref: /linux/arch/powerpc/platforms/pseries/hotplug-memory.c (revision 09275d717d1b2d7d5ed91f2140bb34246514a1b4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries Memory Hotplug infrastructure.
4  *
5  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6  */
7 
8 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
9 
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
16 
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/prom.h>
20 #include <asm/sparsemem.h>
21 #include <asm/fadump.h>
22 #include <asm/drmem.h>
23 #include "pseries.h"
24 
25 unsigned long pseries_memory_block_size(void)
26 {
27 	struct device_node *np;
28 	u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
29 	struct resource r;
30 
31 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
32 	if (np) {
33 		const __be64 *size;
34 
35 		size = of_get_property(np, "ibm,lmb-size", NULL);
36 		if (size)
37 			memblock_size = be64_to_cpup(size);
38 		of_node_put(np);
39 	} else  if (machine_is(pseries)) {
40 		/* This fallback really only applies to pseries */
41 		unsigned int memzero_size = 0;
42 
43 		np = of_find_node_by_path("/memory@0");
44 		if (np) {
45 			if (!of_address_to_resource(np, 0, &r))
46 				memzero_size = resource_size(&r);
47 			of_node_put(np);
48 		}
49 
50 		if (memzero_size) {
51 			/* We now know the size of memory@0, use this to find
52 			 * the first memoryblock and get its size.
53 			 */
54 			char buf[64];
55 
56 			sprintf(buf, "/memory@%x", memzero_size);
57 			np = of_find_node_by_path(buf);
58 			if (np) {
59 				if (!of_address_to_resource(np, 0, &r))
60 					memblock_size = resource_size(&r);
61 				of_node_put(np);
62 			}
63 		}
64 	}
65 	return memblock_size;
66 }
67 
68 static void dlpar_free_property(struct property *prop)
69 {
70 	kfree(prop->name);
71 	kfree(prop->value);
72 	kfree(prop);
73 }
74 
75 static struct property *dlpar_clone_property(struct property *prop,
76 					     u32 prop_size)
77 {
78 	struct property *new_prop;
79 
80 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
81 	if (!new_prop)
82 		return NULL;
83 
84 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
85 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
86 	if (!new_prop->name || !new_prop->value) {
87 		dlpar_free_property(new_prop);
88 		return NULL;
89 	}
90 
91 	memcpy(new_prop->value, prop->value, prop->length);
92 	new_prop->length = prop_size;
93 
94 	of_property_set_flag(new_prop, OF_DYNAMIC);
95 	return new_prop;
96 }
97 
98 static bool find_aa_index(struct device_node *dr_node,
99 			 struct property *ala_prop,
100 			 const u32 *lmb_assoc, u32 *aa_index)
101 {
102 	u32 *assoc_arrays, new_prop_size;
103 	struct property *new_prop;
104 	int aa_arrays, aa_array_entries, aa_array_sz;
105 	int i, index;
106 
107 	/*
108 	 * The ibm,associativity-lookup-arrays property is defined to be
109 	 * a 32-bit value specifying the number of associativity arrays
110 	 * followed by a 32-bitvalue specifying the number of entries per
111 	 * array, followed by the associativity arrays.
112 	 */
113 	assoc_arrays = ala_prop->value;
114 
115 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
116 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
117 	aa_array_sz = aa_array_entries * sizeof(u32);
118 
119 	for (i = 0; i < aa_arrays; i++) {
120 		index = (i * aa_array_entries) + 2;
121 
122 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
123 			continue;
124 
125 		*aa_index = i;
126 		return true;
127 	}
128 
129 	new_prop_size = ala_prop->length + aa_array_sz;
130 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
131 	if (!new_prop)
132 		return false;
133 
134 	assoc_arrays = new_prop->value;
135 
136 	/* increment the number of entries in the lookup array */
137 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
138 
139 	/* copy the new associativity into the lookup array */
140 	index = aa_arrays * aa_array_entries + 2;
141 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
142 
143 	of_update_property(dr_node, new_prop);
144 
145 	/*
146 	 * The associativity lookup array index for this lmb is
147 	 * number of entries - 1 since we added its associativity
148 	 * to the end of the lookup array.
149 	 */
150 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
151 	return true;
152 }
153 
154 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
155 {
156 	struct device_node *parent, *lmb_node, *dr_node;
157 	struct property *ala_prop;
158 	const u32 *lmb_assoc;
159 	u32 aa_index;
160 	bool found;
161 
162 	parent = of_find_node_by_path("/");
163 	if (!parent)
164 		return -ENODEV;
165 
166 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
167 					     parent);
168 	of_node_put(parent);
169 	if (!lmb_node)
170 		return -EINVAL;
171 
172 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
173 	if (!lmb_assoc) {
174 		dlpar_free_cc_nodes(lmb_node);
175 		return -ENODEV;
176 	}
177 
178 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
179 	if (!dr_node) {
180 		dlpar_free_cc_nodes(lmb_node);
181 		return -ENODEV;
182 	}
183 
184 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
185 				    NULL);
186 	if (!ala_prop) {
187 		of_node_put(dr_node);
188 		dlpar_free_cc_nodes(lmb_node);
189 		return -ENODEV;
190 	}
191 
192 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
193 
194 	of_node_put(dr_node);
195 	dlpar_free_cc_nodes(lmb_node);
196 
197 	if (!found) {
198 		pr_err("Could not find LMB associativity\n");
199 		return -1;
200 	}
201 
202 	lmb->aa_index = aa_index;
203 	return 0;
204 }
205 
206 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
207 {
208 	unsigned long section_nr;
209 	struct mem_section *mem_sect;
210 	struct memory_block *mem_block;
211 
212 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
213 	mem_sect = __nr_to_section(section_nr);
214 
215 	mem_block = find_memory_block(mem_sect);
216 	return mem_block;
217 }
218 
219 static int get_lmb_range(u32 drc_index, int n_lmbs,
220 			 struct drmem_lmb **start_lmb,
221 			 struct drmem_lmb **end_lmb)
222 {
223 	struct drmem_lmb *lmb, *start, *end;
224 	struct drmem_lmb *limit;
225 
226 	start = NULL;
227 	for_each_drmem_lmb(lmb) {
228 		if (lmb->drc_index == drc_index) {
229 			start = lmb;
230 			break;
231 		}
232 	}
233 
234 	if (!start)
235 		return -EINVAL;
236 
237 	end = &start[n_lmbs];
238 
239 	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
240 	if (end > limit)
241 		return -EINVAL;
242 
243 	*start_lmb = start;
244 	*end_lmb = end;
245 	return 0;
246 }
247 
248 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
249 {
250 	struct memory_block *mem_block;
251 	int rc;
252 
253 	mem_block = lmb_to_memblock(lmb);
254 	if (!mem_block)
255 		return -EINVAL;
256 
257 	if (online && mem_block->dev.offline)
258 		rc = device_online(&mem_block->dev);
259 	else if (!online && !mem_block->dev.offline)
260 		rc = device_offline(&mem_block->dev);
261 	else
262 		rc = 0;
263 
264 	put_device(&mem_block->dev);
265 
266 	return rc;
267 }
268 
269 static int dlpar_online_lmb(struct drmem_lmb *lmb)
270 {
271 	return dlpar_change_lmb_state(lmb, true);
272 }
273 
274 #ifdef CONFIG_MEMORY_HOTREMOVE
275 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
276 {
277 	return dlpar_change_lmb_state(lmb, false);
278 }
279 
280 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
281 {
282 	unsigned long block_sz, start_pfn;
283 	int sections_per_block;
284 	int i, nid;
285 
286 	start_pfn = base >> PAGE_SHIFT;
287 
288 	lock_device_hotplug();
289 
290 	if (!pfn_valid(start_pfn))
291 		goto out;
292 
293 	block_sz = pseries_memory_block_size();
294 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
295 	nid = memory_add_physaddr_to_nid(base);
296 
297 	for (i = 0; i < sections_per_block; i++) {
298 		__remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
299 		base += MIN_MEMORY_BLOCK_SIZE;
300 	}
301 
302 out:
303 	/* Update memory regions for memory remove */
304 	memblock_remove(base, memblock_size);
305 	unlock_device_hotplug();
306 	return 0;
307 }
308 
309 static int pseries_remove_mem_node(struct device_node *np)
310 {
311 	const __be32 *regs;
312 	unsigned long base;
313 	unsigned int lmb_size;
314 	int ret = -EINVAL;
315 
316 	/*
317 	 * Check to see if we are actually removing memory
318 	 */
319 	if (!of_node_is_type(np, "memory"))
320 		return 0;
321 
322 	/*
323 	 * Find the base address and size of the memblock
324 	 */
325 	regs = of_get_property(np, "reg", NULL);
326 	if (!regs)
327 		return ret;
328 
329 	base = be64_to_cpu(*(unsigned long *)regs);
330 	lmb_size = be32_to_cpu(regs[3]);
331 
332 	pseries_remove_memblock(base, lmb_size);
333 	return 0;
334 }
335 
336 static bool lmb_is_removable(struct drmem_lmb *lmb)
337 {
338 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
339 		return false;
340 
341 #ifdef CONFIG_FA_DUMP
342 	/*
343 	 * Don't hot-remove memory that falls in fadump boot memory area
344 	 * and memory that is reserved for capturing old kernel memory.
345 	 */
346 	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
347 		return false;
348 #endif
349 	/* device_offline() will determine if we can actually remove this lmb */
350 	return true;
351 }
352 
353 static int dlpar_add_lmb(struct drmem_lmb *);
354 
355 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
356 {
357 	struct memory_block *mem_block;
358 	unsigned long block_sz;
359 	int rc;
360 
361 	if (!lmb_is_removable(lmb))
362 		return -EINVAL;
363 
364 	mem_block = lmb_to_memblock(lmb);
365 	if (mem_block == NULL)
366 		return -EINVAL;
367 
368 	rc = dlpar_offline_lmb(lmb);
369 	if (rc) {
370 		put_device(&mem_block->dev);
371 		return rc;
372 	}
373 
374 	block_sz = pseries_memory_block_size();
375 
376 	__remove_memory(mem_block->nid, lmb->base_addr, block_sz);
377 	put_device(&mem_block->dev);
378 
379 	/* Update memory regions for memory remove */
380 	memblock_remove(lmb->base_addr, block_sz);
381 
382 	invalidate_lmb_associativity_index(lmb);
383 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
384 
385 	return 0;
386 }
387 
388 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
389 {
390 	struct drmem_lmb *lmb;
391 	int lmbs_removed = 0;
392 	int lmbs_available = 0;
393 	int rc;
394 
395 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
396 
397 	if (lmbs_to_remove == 0)
398 		return -EINVAL;
399 
400 	/* Validate that there are enough LMBs to satisfy the request */
401 	for_each_drmem_lmb(lmb) {
402 		if (lmb_is_removable(lmb))
403 			lmbs_available++;
404 
405 		if (lmbs_available == lmbs_to_remove)
406 			break;
407 	}
408 
409 	if (lmbs_available < lmbs_to_remove) {
410 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
411 			lmbs_available, lmbs_to_remove);
412 		return -EINVAL;
413 	}
414 
415 	for_each_drmem_lmb(lmb) {
416 		rc = dlpar_remove_lmb(lmb);
417 		if (rc)
418 			continue;
419 
420 		/* Mark this lmb so we can add it later if all of the
421 		 * requested LMBs cannot be removed.
422 		 */
423 		drmem_mark_lmb_reserved(lmb);
424 
425 		lmbs_removed++;
426 		if (lmbs_removed == lmbs_to_remove)
427 			break;
428 	}
429 
430 	if (lmbs_removed != lmbs_to_remove) {
431 		pr_err("Memory hot-remove failed, adding LMB's back\n");
432 
433 		for_each_drmem_lmb(lmb) {
434 			if (!drmem_lmb_reserved(lmb))
435 				continue;
436 
437 			rc = dlpar_add_lmb(lmb);
438 			if (rc)
439 				pr_err("Failed to add LMB back, drc index %x\n",
440 				       lmb->drc_index);
441 
442 			drmem_remove_lmb_reservation(lmb);
443 		}
444 
445 		rc = -EINVAL;
446 	} else {
447 		for_each_drmem_lmb(lmb) {
448 			if (!drmem_lmb_reserved(lmb))
449 				continue;
450 
451 			dlpar_release_drc(lmb->drc_index);
452 			pr_info("Memory at %llx was hot-removed\n",
453 				lmb->base_addr);
454 
455 			drmem_remove_lmb_reservation(lmb);
456 		}
457 		rc = 0;
458 	}
459 
460 	return rc;
461 }
462 
463 static int dlpar_memory_remove_by_index(u32 drc_index)
464 {
465 	struct drmem_lmb *lmb;
466 	int lmb_found;
467 	int rc;
468 
469 	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
470 
471 	lmb_found = 0;
472 	for_each_drmem_lmb(lmb) {
473 		if (lmb->drc_index == drc_index) {
474 			lmb_found = 1;
475 			rc = dlpar_remove_lmb(lmb);
476 			if (!rc)
477 				dlpar_release_drc(lmb->drc_index);
478 
479 			break;
480 		}
481 	}
482 
483 	if (!lmb_found)
484 		rc = -EINVAL;
485 
486 	if (rc)
487 		pr_info("Failed to hot-remove memory at %llx\n",
488 			lmb->base_addr);
489 	else
490 		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
491 
492 	return rc;
493 }
494 
495 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
496 {
497 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
498 	int lmbs_available = 0;
499 	int rc;
500 
501 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
502 		lmbs_to_remove, drc_index);
503 
504 	if (lmbs_to_remove == 0)
505 		return -EINVAL;
506 
507 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
508 	if (rc)
509 		return -EINVAL;
510 
511 	/* Validate that there are enough LMBs to satisfy the request */
512 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
513 		if (lmb->flags & DRCONF_MEM_RESERVED)
514 			break;
515 
516 		lmbs_available++;
517 	}
518 
519 	if (lmbs_available < lmbs_to_remove)
520 		return -EINVAL;
521 
522 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
523 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
524 			continue;
525 
526 		rc = dlpar_remove_lmb(lmb);
527 		if (rc)
528 			break;
529 
530 		drmem_mark_lmb_reserved(lmb);
531 	}
532 
533 	if (rc) {
534 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
535 
536 
537 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
538 			if (!drmem_lmb_reserved(lmb))
539 				continue;
540 
541 			rc = dlpar_add_lmb(lmb);
542 			if (rc)
543 				pr_err("Failed to add LMB, drc index %x\n",
544 				       lmb->drc_index);
545 
546 			drmem_remove_lmb_reservation(lmb);
547 		}
548 		rc = -EINVAL;
549 	} else {
550 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
551 			if (!drmem_lmb_reserved(lmb))
552 				continue;
553 
554 			dlpar_release_drc(lmb->drc_index);
555 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
556 				lmb->base_addr, lmb->drc_index);
557 
558 			drmem_remove_lmb_reservation(lmb);
559 		}
560 	}
561 
562 	return rc;
563 }
564 
565 #else
566 static inline int pseries_remove_memblock(unsigned long base,
567 					  unsigned int memblock_size)
568 {
569 	return -EOPNOTSUPP;
570 }
571 static inline int pseries_remove_mem_node(struct device_node *np)
572 {
573 	return 0;
574 }
575 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
576 {
577 	return -EOPNOTSUPP;
578 }
579 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
580 {
581 	return -EOPNOTSUPP;
582 }
583 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
584 {
585 	return -EOPNOTSUPP;
586 }
587 static int dlpar_memory_remove_by_index(u32 drc_index)
588 {
589 	return -EOPNOTSUPP;
590 }
591 
592 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
593 {
594 	return -EOPNOTSUPP;
595 }
596 #endif /* CONFIG_MEMORY_HOTREMOVE */
597 
598 static int dlpar_add_lmb(struct drmem_lmb *lmb)
599 {
600 	unsigned long block_sz;
601 	int nid, rc;
602 
603 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
604 		return -EINVAL;
605 
606 	rc = update_lmb_associativity_index(lmb);
607 	if (rc) {
608 		dlpar_release_drc(lmb->drc_index);
609 		return rc;
610 	}
611 
612 	block_sz = memory_block_size_bytes();
613 
614 	/* Find the node id for this address. */
615 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
616 
617 	/* Add the memory */
618 	rc = __add_memory(nid, lmb->base_addr, block_sz);
619 	if (rc) {
620 		invalidate_lmb_associativity_index(lmb);
621 		return rc;
622 	}
623 
624 	rc = dlpar_online_lmb(lmb);
625 	if (rc) {
626 		__remove_memory(nid, lmb->base_addr, block_sz);
627 		invalidate_lmb_associativity_index(lmb);
628 	} else {
629 		lmb->flags |= DRCONF_MEM_ASSIGNED;
630 	}
631 
632 	return rc;
633 }
634 
635 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
636 {
637 	struct drmem_lmb *lmb;
638 	int lmbs_available = 0;
639 	int lmbs_added = 0;
640 	int rc;
641 
642 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
643 
644 	if (lmbs_to_add == 0)
645 		return -EINVAL;
646 
647 	/* Validate that there are enough LMBs to satisfy the request */
648 	for_each_drmem_lmb(lmb) {
649 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
650 			lmbs_available++;
651 
652 		if (lmbs_available == lmbs_to_add)
653 			break;
654 	}
655 
656 	if (lmbs_available < lmbs_to_add)
657 		return -EINVAL;
658 
659 	for_each_drmem_lmb(lmb) {
660 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
661 			continue;
662 
663 		rc = dlpar_acquire_drc(lmb->drc_index);
664 		if (rc)
665 			continue;
666 
667 		rc = dlpar_add_lmb(lmb);
668 		if (rc) {
669 			dlpar_release_drc(lmb->drc_index);
670 			continue;
671 		}
672 
673 		/* Mark this lmb so we can remove it later if all of the
674 		 * requested LMBs cannot be added.
675 		 */
676 		drmem_mark_lmb_reserved(lmb);
677 
678 		lmbs_added++;
679 		if (lmbs_added == lmbs_to_add)
680 			break;
681 	}
682 
683 	if (lmbs_added != lmbs_to_add) {
684 		pr_err("Memory hot-add failed, removing any added LMBs\n");
685 
686 		for_each_drmem_lmb(lmb) {
687 			if (!drmem_lmb_reserved(lmb))
688 				continue;
689 
690 			rc = dlpar_remove_lmb(lmb);
691 			if (rc)
692 				pr_err("Failed to remove LMB, drc index %x\n",
693 				       lmb->drc_index);
694 			else
695 				dlpar_release_drc(lmb->drc_index);
696 
697 			drmem_remove_lmb_reservation(lmb);
698 		}
699 		rc = -EINVAL;
700 	} else {
701 		for_each_drmem_lmb(lmb) {
702 			if (!drmem_lmb_reserved(lmb))
703 				continue;
704 
705 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
706 				lmb->base_addr, lmb->drc_index);
707 			drmem_remove_lmb_reservation(lmb);
708 		}
709 		rc = 0;
710 	}
711 
712 	return rc;
713 }
714 
715 static int dlpar_memory_add_by_index(u32 drc_index)
716 {
717 	struct drmem_lmb *lmb;
718 	int rc, lmb_found;
719 
720 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
721 
722 	lmb_found = 0;
723 	for_each_drmem_lmb(lmb) {
724 		if (lmb->drc_index == drc_index) {
725 			lmb_found = 1;
726 			rc = dlpar_acquire_drc(lmb->drc_index);
727 			if (!rc) {
728 				rc = dlpar_add_lmb(lmb);
729 				if (rc)
730 					dlpar_release_drc(lmb->drc_index);
731 			}
732 
733 			break;
734 		}
735 	}
736 
737 	if (!lmb_found)
738 		rc = -EINVAL;
739 
740 	if (rc)
741 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
742 	else
743 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
744 			lmb->base_addr, drc_index);
745 
746 	return rc;
747 }
748 
749 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
750 {
751 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
752 	int lmbs_available = 0;
753 	int rc;
754 
755 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
756 		lmbs_to_add, drc_index);
757 
758 	if (lmbs_to_add == 0)
759 		return -EINVAL;
760 
761 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
762 	if (rc)
763 		return -EINVAL;
764 
765 	/* Validate that the LMBs in this range are not reserved */
766 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
767 		if (lmb->flags & DRCONF_MEM_RESERVED)
768 			break;
769 
770 		lmbs_available++;
771 	}
772 
773 	if (lmbs_available < lmbs_to_add)
774 		return -EINVAL;
775 
776 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
777 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
778 			continue;
779 
780 		rc = dlpar_acquire_drc(lmb->drc_index);
781 		if (rc)
782 			break;
783 
784 		rc = dlpar_add_lmb(lmb);
785 		if (rc) {
786 			dlpar_release_drc(lmb->drc_index);
787 			break;
788 		}
789 
790 		drmem_mark_lmb_reserved(lmb);
791 	}
792 
793 	if (rc) {
794 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
795 
796 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
797 			if (!drmem_lmb_reserved(lmb))
798 				continue;
799 
800 			rc = dlpar_remove_lmb(lmb);
801 			if (rc)
802 				pr_err("Failed to remove LMB, drc index %x\n",
803 				       lmb->drc_index);
804 			else
805 				dlpar_release_drc(lmb->drc_index);
806 
807 			drmem_remove_lmb_reservation(lmb);
808 		}
809 		rc = -EINVAL;
810 	} else {
811 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
812 			if (!drmem_lmb_reserved(lmb))
813 				continue;
814 
815 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
816 				lmb->base_addr, lmb->drc_index);
817 			drmem_remove_lmb_reservation(lmb);
818 		}
819 	}
820 
821 	return rc;
822 }
823 
824 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
825 {
826 	u32 count, drc_index;
827 	int rc;
828 
829 	lock_device_hotplug();
830 
831 	switch (hp_elog->action) {
832 	case PSERIES_HP_ELOG_ACTION_ADD:
833 		switch (hp_elog->id_type) {
834 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
835 			count = hp_elog->_drc_u.drc_count;
836 			rc = dlpar_memory_add_by_count(count);
837 			break;
838 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
839 			drc_index = hp_elog->_drc_u.drc_index;
840 			rc = dlpar_memory_add_by_index(drc_index);
841 			break;
842 		case PSERIES_HP_ELOG_ID_DRC_IC:
843 			count = hp_elog->_drc_u.ic.count;
844 			drc_index = hp_elog->_drc_u.ic.index;
845 			rc = dlpar_memory_add_by_ic(count, drc_index);
846 			break;
847 		default:
848 			rc = -EINVAL;
849 			break;
850 		}
851 
852 		break;
853 	case PSERIES_HP_ELOG_ACTION_REMOVE:
854 		switch (hp_elog->id_type) {
855 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
856 			count = hp_elog->_drc_u.drc_count;
857 			rc = dlpar_memory_remove_by_count(count);
858 			break;
859 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
860 			drc_index = hp_elog->_drc_u.drc_index;
861 			rc = dlpar_memory_remove_by_index(drc_index);
862 			break;
863 		case PSERIES_HP_ELOG_ID_DRC_IC:
864 			count = hp_elog->_drc_u.ic.count;
865 			drc_index = hp_elog->_drc_u.ic.index;
866 			rc = dlpar_memory_remove_by_ic(count, drc_index);
867 			break;
868 		default:
869 			rc = -EINVAL;
870 			break;
871 		}
872 
873 		break;
874 	default:
875 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
876 		rc = -EINVAL;
877 		break;
878 	}
879 
880 	if (!rc)
881 		rc = drmem_update_dt();
882 
883 	unlock_device_hotplug();
884 	return rc;
885 }
886 
887 static int pseries_add_mem_node(struct device_node *np)
888 {
889 	const __be32 *regs;
890 	unsigned long base;
891 	unsigned int lmb_size;
892 	int ret = -EINVAL;
893 
894 	/*
895 	 * Check to see if we are actually adding memory
896 	 */
897 	if (!of_node_is_type(np, "memory"))
898 		return 0;
899 
900 	/*
901 	 * Find the base and size of the memblock
902 	 */
903 	regs = of_get_property(np, "reg", NULL);
904 	if (!regs)
905 		return ret;
906 
907 	base = be64_to_cpu(*(unsigned long *)regs);
908 	lmb_size = be32_to_cpu(regs[3]);
909 
910 	/*
911 	 * Update memory region to represent the memory add
912 	 */
913 	ret = memblock_add(base, lmb_size);
914 	return (ret < 0) ? -EINVAL : 0;
915 }
916 
917 static int pseries_memory_notifier(struct notifier_block *nb,
918 				   unsigned long action, void *data)
919 {
920 	struct of_reconfig_data *rd = data;
921 	int err = 0;
922 
923 	switch (action) {
924 	case OF_RECONFIG_ATTACH_NODE:
925 		err = pseries_add_mem_node(rd->dn);
926 		break;
927 	case OF_RECONFIG_DETACH_NODE:
928 		err = pseries_remove_mem_node(rd->dn);
929 		break;
930 	}
931 	return notifier_from_errno(err);
932 }
933 
934 static struct notifier_block pseries_mem_nb = {
935 	.notifier_call = pseries_memory_notifier,
936 };
937 
938 static int __init pseries_memory_hotplug_init(void)
939 {
940 	if (firmware_has_feature(FW_FEATURE_LPAR))
941 		of_reconfig_notifier_register(&pseries_mem_nb);
942 
943 	return 0;
944 }
945 machine_device_initcall(pseries, pseries_memory_hotplug_init);
946