xref: /linux/arch/powerpc/platforms/pseries/hotplug-cpu.c (revision 74205b3fc2effde821b219d955c70e727dc43cc6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries CPU Hotplug infrastructure.
4  *
5  * Split out from arch/powerpc/platforms/pseries/setup.c
6  *  arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
7  *
8  * Peter Bergner, IBM	March 2001.
9  * Copyright (C) 2001 IBM.
10  * Dave Engebretsen, Peter Bergner, and
11  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12  * Plus various changes from other IBM teams...
13  *
14  * Copyright (C) 2006 Michael Ellerman, IBM Corporation
15  */
16 
17 #define pr_fmt(fmt)     "pseries-hotplug-cpu: " fmt
18 
19 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>	/* for idle_task_exit */
23 #include <linux/sched/hotplug.h>
24 #include <linux/cpu.h>
25 #include <linux/of.h>
26 #include <linux/slab.h>
27 #include <asm/prom.h>
28 #include <asm/rtas.h>
29 #include <asm/firmware.h>
30 #include <asm/machdep.h>
31 #include <asm/vdso_datapage.h>
32 #include <asm/xics.h>
33 #include <asm/xive.h>
34 #include <asm/plpar_wrappers.h>
35 #include <asm/topology.h>
36 
37 #include "pseries.h"
38 
39 /* This version can't take the spinlock, because it never returns */
40 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
41 
42 static void rtas_stop_self(void)
43 {
44 	static struct rtas_args args;
45 
46 	local_irq_disable();
47 
48 	BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
49 
50 	printk("cpu %u (hwid %u) Ready to die...\n",
51 	       smp_processor_id(), hard_smp_processor_id());
52 
53 	rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
54 
55 	panic("Alas, I survived.\n");
56 }
57 
58 static void pseries_cpu_offline_self(void)
59 {
60 	unsigned int hwcpu = hard_smp_processor_id();
61 
62 	local_irq_disable();
63 	idle_task_exit();
64 	if (xive_enabled())
65 		xive_teardown_cpu();
66 	else
67 		xics_teardown_cpu();
68 
69 	unregister_slb_shadow(hwcpu);
70 	rtas_stop_self();
71 
72 	/* Should never get here... */
73 	BUG();
74 	for(;;);
75 }
76 
77 static int pseries_cpu_disable(void)
78 {
79 	int cpu = smp_processor_id();
80 
81 	set_cpu_online(cpu, false);
82 	vdso_data->processorCount--;
83 
84 	/*fix boot_cpuid here*/
85 	if (cpu == boot_cpuid)
86 		boot_cpuid = cpumask_any(cpu_online_mask);
87 
88 	/* FIXME: abstract this to not be platform specific later on */
89 	if (xive_enabled())
90 		xive_smp_disable_cpu();
91 	else
92 		xics_migrate_irqs_away();
93 
94 	cleanup_cpu_mmu_context();
95 
96 	return 0;
97 }
98 
99 /*
100  * pseries_cpu_die: Wait for the cpu to die.
101  * @cpu: logical processor id of the CPU whose death we're awaiting.
102  *
103  * This function is called from the context of the thread which is performing
104  * the cpu-offline. Here we wait for long enough to allow the cpu in question
105  * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
106  * notifications.
107  *
108  * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to
109  * self-destruct.
110  */
111 static void pseries_cpu_die(unsigned int cpu)
112 {
113 	int cpu_status = 1;
114 	unsigned int pcpu = get_hard_smp_processor_id(cpu);
115 	unsigned long timeout = jiffies + msecs_to_jiffies(120000);
116 
117 	while (true) {
118 		cpu_status = smp_query_cpu_stopped(pcpu);
119 		if (cpu_status == QCSS_STOPPED ||
120 		    cpu_status == QCSS_HARDWARE_ERROR)
121 			break;
122 
123 		if (time_after(jiffies, timeout)) {
124 			pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
125 				cpu, pcpu);
126 			timeout = jiffies + msecs_to_jiffies(120000);
127 		}
128 
129 		cond_resched();
130 	}
131 
132 	if (cpu_status == QCSS_HARDWARE_ERROR) {
133 		pr_warn("CPU %i (hwid %i) reported error while dying\n",
134 			cpu, pcpu);
135 	}
136 
137 	/* Isolation and deallocation are definitely done by
138 	 * drslot_chrp_cpu.  If they were not they would be
139 	 * done here.  Change isolate state to Isolate and
140 	 * change allocation-state to Unusable.
141 	 */
142 	paca_ptrs[cpu]->cpu_start = 0;
143 }
144 
145 /*
146  * Update cpu_present_mask and paca(s) for a new cpu node.  The wrinkle
147  * here is that a cpu device node may represent up to two logical cpus
148  * in the SMT case.  We must honor the assumption in other code that
149  * the logical ids for sibling SMT threads x and y are adjacent, such
150  * that x^1 == y and y^1 == x.
151  */
152 static int pseries_add_processor(struct device_node *np)
153 {
154 	unsigned int cpu;
155 	cpumask_var_t candidate_mask, tmp;
156 	int err = -ENOSPC, len, nthreads, i;
157 	const __be32 *intserv;
158 
159 	intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
160 	if (!intserv)
161 		return 0;
162 
163 	zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
164 	zalloc_cpumask_var(&tmp, GFP_KERNEL);
165 
166 	nthreads = len / sizeof(u32);
167 	for (i = 0; i < nthreads; i++)
168 		cpumask_set_cpu(i, tmp);
169 
170 	cpu_maps_update_begin();
171 
172 	BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
173 
174 	/* Get a bitmap of unoccupied slots. */
175 	cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
176 	if (cpumask_empty(candidate_mask)) {
177 		/* If we get here, it most likely means that NR_CPUS is
178 		 * less than the partition's max processors setting.
179 		 */
180 		printk(KERN_ERR "Cannot add cpu %pOF; this system configuration"
181 		       " supports %d logical cpus.\n", np,
182 		       num_possible_cpus());
183 		goto out_unlock;
184 	}
185 
186 	while (!cpumask_empty(tmp))
187 		if (cpumask_subset(tmp, candidate_mask))
188 			/* Found a range where we can insert the new cpu(s) */
189 			break;
190 		else
191 			cpumask_shift_left(tmp, tmp, nthreads);
192 
193 	if (cpumask_empty(tmp)) {
194 		printk(KERN_ERR "Unable to find space in cpu_present_mask for"
195 		       " processor %pOFn with %d thread(s)\n", np,
196 		       nthreads);
197 		goto out_unlock;
198 	}
199 
200 	for_each_cpu(cpu, tmp) {
201 		BUG_ON(cpu_present(cpu));
202 		set_cpu_present(cpu, true);
203 		set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
204 	}
205 	err = 0;
206 out_unlock:
207 	cpu_maps_update_done();
208 	free_cpumask_var(candidate_mask);
209 	free_cpumask_var(tmp);
210 	return err;
211 }
212 
213 /*
214  * Update the present map for a cpu node which is going away, and set
215  * the hard id in the paca(s) to -1 to be consistent with boot time
216  * convention for non-present cpus.
217  */
218 static void pseries_remove_processor(struct device_node *np)
219 {
220 	unsigned int cpu;
221 	int len, nthreads, i;
222 	const __be32 *intserv;
223 	u32 thread;
224 
225 	intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
226 	if (!intserv)
227 		return;
228 
229 	nthreads = len / sizeof(u32);
230 
231 	cpu_maps_update_begin();
232 	for (i = 0; i < nthreads; i++) {
233 		thread = be32_to_cpu(intserv[i]);
234 		for_each_present_cpu(cpu) {
235 			if (get_hard_smp_processor_id(cpu) != thread)
236 				continue;
237 			BUG_ON(cpu_online(cpu));
238 			set_cpu_present(cpu, false);
239 			set_hard_smp_processor_id(cpu, -1);
240 			update_numa_cpu_lookup_table(cpu, -1);
241 			break;
242 		}
243 		if (cpu >= nr_cpu_ids)
244 			printk(KERN_WARNING "Could not find cpu to remove "
245 			       "with physical id 0x%x\n", thread);
246 	}
247 	cpu_maps_update_done();
248 }
249 
250 static int dlpar_offline_cpu(struct device_node *dn)
251 {
252 	int rc = 0;
253 	unsigned int cpu;
254 	int len, nthreads, i;
255 	const __be32 *intserv;
256 	u32 thread;
257 
258 	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
259 	if (!intserv)
260 		return -EINVAL;
261 
262 	nthreads = len / sizeof(u32);
263 
264 	cpu_maps_update_begin();
265 	for (i = 0; i < nthreads; i++) {
266 		thread = be32_to_cpu(intserv[i]);
267 		for_each_present_cpu(cpu) {
268 			if (get_hard_smp_processor_id(cpu) != thread)
269 				continue;
270 
271 			if (!cpu_online(cpu))
272 				break;
273 
274 			/*
275 			 * device_offline() will return -EBUSY (via cpu_down()) if there
276 			 * is only one CPU left. Check it here to fail earlier and with a
277 			 * more informative error message, while also retaining the
278 			 * cpu_add_remove_lock to be sure that no CPUs are being
279 			 * online/offlined during this check.
280 			 */
281 			if (num_online_cpus() == 1) {
282 				pr_warn("Unable to remove last online CPU %pOFn\n", dn);
283 				rc = -EBUSY;
284 				goto out_unlock;
285 			}
286 
287 			cpu_maps_update_done();
288 			rc = device_offline(get_cpu_device(cpu));
289 			if (rc)
290 				goto out;
291 			cpu_maps_update_begin();
292 			break;
293 		}
294 		if (cpu == num_possible_cpus()) {
295 			pr_warn("Could not find cpu to offline with physical id 0x%x\n",
296 				thread);
297 		}
298 	}
299 out_unlock:
300 	cpu_maps_update_done();
301 
302 out:
303 	return rc;
304 }
305 
306 static int dlpar_online_cpu(struct device_node *dn)
307 {
308 	int rc = 0;
309 	unsigned int cpu;
310 	int len, nthreads, i;
311 	const __be32 *intserv;
312 	u32 thread;
313 
314 	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
315 	if (!intserv)
316 		return -EINVAL;
317 
318 	nthreads = len / sizeof(u32);
319 
320 	cpu_maps_update_begin();
321 	for (i = 0; i < nthreads; i++) {
322 		thread = be32_to_cpu(intserv[i]);
323 		for_each_present_cpu(cpu) {
324 			if (get_hard_smp_processor_id(cpu) != thread)
325 				continue;
326 			cpu_maps_update_done();
327 			find_and_online_cpu_nid(cpu);
328 			rc = device_online(get_cpu_device(cpu));
329 			if (rc) {
330 				dlpar_offline_cpu(dn);
331 				goto out;
332 			}
333 			cpu_maps_update_begin();
334 
335 			break;
336 		}
337 		if (cpu == num_possible_cpus())
338 			printk(KERN_WARNING "Could not find cpu to online "
339 			       "with physical id 0x%x\n", thread);
340 	}
341 	cpu_maps_update_done();
342 
343 out:
344 	return rc;
345 
346 }
347 
348 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
349 {
350 	struct device_node *child = NULL;
351 	u32 my_drc_index;
352 	bool found;
353 	int rc;
354 
355 	/* Assume cpu doesn't exist */
356 	found = false;
357 
358 	for_each_child_of_node(parent, child) {
359 		rc = of_property_read_u32(child, "ibm,my-drc-index",
360 					  &my_drc_index);
361 		if (rc)
362 			continue;
363 
364 		if (my_drc_index == drc_index) {
365 			of_node_put(child);
366 			found = true;
367 			break;
368 		}
369 	}
370 
371 	return found;
372 }
373 
374 static bool drc_info_valid_index(struct device_node *parent, u32 drc_index)
375 {
376 	struct property *info;
377 	struct of_drc_info drc;
378 	const __be32 *value;
379 	u32 index;
380 	int count, i, j;
381 
382 	info = of_find_property(parent, "ibm,drc-info", NULL);
383 	if (!info)
384 		return false;
385 
386 	value = of_prop_next_u32(info, NULL, &count);
387 
388 	/* First value of ibm,drc-info is number of drc-info records */
389 	if (value)
390 		value++;
391 	else
392 		return false;
393 
394 	for (i = 0; i < count; i++) {
395 		if (of_read_drc_info_cell(&info, &value, &drc))
396 			return false;
397 
398 		if (strncmp(drc.drc_type, "CPU", 3))
399 			break;
400 
401 		if (drc_index > drc.last_drc_index)
402 			continue;
403 
404 		index = drc.drc_index_start;
405 		for (j = 0; j < drc.num_sequential_elems; j++) {
406 			if (drc_index == index)
407 				return true;
408 
409 			index += drc.sequential_inc;
410 		}
411 	}
412 
413 	return false;
414 }
415 
416 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
417 {
418 	bool found = false;
419 	int rc, index;
420 
421 	if (of_find_property(parent, "ibm,drc-info", NULL))
422 		return drc_info_valid_index(parent, drc_index);
423 
424 	/* Note that the format of the ibm,drc-indexes array is
425 	 * the number of entries in the array followed by the array
426 	 * of drc values so we start looking at index = 1.
427 	 */
428 	index = 1;
429 	while (!found) {
430 		u32 drc;
431 
432 		rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
433 						index++, &drc);
434 
435 		if (rc)
436 			break;
437 
438 		if (drc == drc_index)
439 			found = true;
440 	}
441 
442 	return found;
443 }
444 
445 static ssize_t dlpar_cpu_add(u32 drc_index)
446 {
447 	struct device_node *dn, *parent;
448 	int rc, saved_rc;
449 
450 	pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
451 
452 	parent = of_find_node_by_path("/cpus");
453 	if (!parent) {
454 		pr_warn("Failed to find CPU root node \"/cpus\"\n");
455 		return -ENODEV;
456 	}
457 
458 	if (dlpar_cpu_exists(parent, drc_index)) {
459 		of_node_put(parent);
460 		pr_warn("CPU with drc index %x already exists\n", drc_index);
461 		return -EINVAL;
462 	}
463 
464 	if (!valid_cpu_drc_index(parent, drc_index)) {
465 		of_node_put(parent);
466 		pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
467 		return -EINVAL;
468 	}
469 
470 	rc = dlpar_acquire_drc(drc_index);
471 	if (rc) {
472 		pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
473 			rc, drc_index);
474 		of_node_put(parent);
475 		return -EINVAL;
476 	}
477 
478 	dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
479 	if (!dn) {
480 		pr_warn("Failed call to configure-connector, drc index: %x\n",
481 			drc_index);
482 		dlpar_release_drc(drc_index);
483 		of_node_put(parent);
484 		return -EINVAL;
485 	}
486 
487 	rc = dlpar_attach_node(dn, parent);
488 
489 	/* Regardless we are done with parent now */
490 	of_node_put(parent);
491 
492 	if (rc) {
493 		saved_rc = rc;
494 		pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n",
495 			dn, rc, drc_index);
496 
497 		rc = dlpar_release_drc(drc_index);
498 		if (!rc)
499 			dlpar_free_cc_nodes(dn);
500 
501 		return saved_rc;
502 	}
503 
504 	rc = dlpar_online_cpu(dn);
505 	if (rc) {
506 		saved_rc = rc;
507 		pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n",
508 			dn, rc, drc_index);
509 
510 		rc = dlpar_detach_node(dn);
511 		if (!rc)
512 			dlpar_release_drc(drc_index);
513 
514 		return saved_rc;
515 	}
516 
517 	pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn,
518 		 drc_index);
519 	return rc;
520 }
521 
522 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
523 {
524 	int rc;
525 
526 	pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n",
527 		 dn, drc_index);
528 
529 	rc = dlpar_offline_cpu(dn);
530 	if (rc) {
531 		pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc);
532 		return -EINVAL;
533 	}
534 
535 	rc = dlpar_release_drc(drc_index);
536 	if (rc) {
537 		pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
538 			drc_index, dn, rc);
539 		dlpar_online_cpu(dn);
540 		return rc;
541 	}
542 
543 	rc = dlpar_detach_node(dn);
544 	if (rc) {
545 		int saved_rc = rc;
546 
547 		pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc);
548 
549 		rc = dlpar_acquire_drc(drc_index);
550 		if (!rc)
551 			dlpar_online_cpu(dn);
552 
553 		return saved_rc;
554 	}
555 
556 	pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
557 	return 0;
558 }
559 
560 static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
561 {
562 	struct device_node *dn;
563 	u32 my_index;
564 	int rc;
565 
566 	for_each_node_by_type(dn, "cpu") {
567 		rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
568 		if (rc)
569 			continue;
570 
571 		if (my_index == drc_index)
572 			break;
573 	}
574 
575 	return dn;
576 }
577 
578 static int dlpar_cpu_remove_by_index(u32 drc_index)
579 {
580 	struct device_node *dn;
581 	int rc;
582 
583 	dn = cpu_drc_index_to_dn(drc_index);
584 	if (!dn) {
585 		pr_warn("Cannot find CPU (drc index %x) to remove\n",
586 			drc_index);
587 		return -ENODEV;
588 	}
589 
590 	rc = dlpar_cpu_remove(dn, drc_index);
591 	of_node_put(dn);
592 	return rc;
593 }
594 
595 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
596 {
597 	struct device_node *dn;
598 	int cpus_found = 0;
599 	int rc;
600 
601 	/* We want to find cpus_to_remove + 1 CPUs to ensure we do not
602 	 * remove the last CPU.
603 	 */
604 	for_each_node_by_type(dn, "cpu") {
605 		cpus_found++;
606 
607 		if (cpus_found > cpus_to_remove) {
608 			of_node_put(dn);
609 			break;
610 		}
611 
612 		/* Note that cpus_found is always 1 ahead of the index
613 		 * into the cpu_drcs array, so we use cpus_found - 1
614 		 */
615 		rc = of_property_read_u32(dn, "ibm,my-drc-index",
616 					  &cpu_drcs[cpus_found - 1]);
617 		if (rc) {
618 			pr_warn("Error occurred getting drc-index for %pOFn\n",
619 				dn);
620 			of_node_put(dn);
621 			return -1;
622 		}
623 	}
624 
625 	if (cpus_found < cpus_to_remove) {
626 		pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
627 			cpus_found, cpus_to_remove);
628 	} else if (cpus_found == cpus_to_remove) {
629 		pr_warn("Cannot remove all CPUs\n");
630 	}
631 
632 	return cpus_found;
633 }
634 
635 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
636 {
637 	u32 *cpu_drcs;
638 	int cpus_found;
639 	int cpus_removed = 0;
640 	int i, rc;
641 
642 	pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
643 
644 	cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
645 	if (!cpu_drcs)
646 		return -EINVAL;
647 
648 	cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
649 	if (cpus_found <= cpus_to_remove) {
650 		kfree(cpu_drcs);
651 		return -EINVAL;
652 	}
653 
654 	for (i = 0; i < cpus_to_remove; i++) {
655 		rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
656 		if (rc)
657 			break;
658 
659 		cpus_removed++;
660 	}
661 
662 	if (cpus_removed != cpus_to_remove) {
663 		pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
664 
665 		for (i = 0; i < cpus_removed; i++)
666 			dlpar_cpu_add(cpu_drcs[i]);
667 
668 		rc = -EINVAL;
669 	} else {
670 		rc = 0;
671 	}
672 
673 	kfree(cpu_drcs);
674 	return rc;
675 }
676 
677 static int find_drc_info_cpus_to_add(struct device_node *cpus,
678 				     struct property *info,
679 				     u32 *cpu_drcs, u32 cpus_to_add)
680 {
681 	struct of_drc_info drc;
682 	const __be32 *value;
683 	u32 count, drc_index;
684 	int cpus_found = 0;
685 	int i, j;
686 
687 	if (!info)
688 		return -1;
689 
690 	value = of_prop_next_u32(info, NULL, &count);
691 	if (value)
692 		value++;
693 
694 	for (i = 0; i < count; i++) {
695 		of_read_drc_info_cell(&info, &value, &drc);
696 		if (strncmp(drc.drc_type, "CPU", 3))
697 			break;
698 
699 		drc_index = drc.drc_index_start;
700 		for (j = 0; j < drc.num_sequential_elems; j++) {
701 			if (dlpar_cpu_exists(cpus, drc_index))
702 				continue;
703 
704 			cpu_drcs[cpus_found++] = drc_index;
705 
706 			if (cpus_found == cpus_to_add)
707 				return cpus_found;
708 
709 			drc_index += drc.sequential_inc;
710 		}
711 	}
712 
713 	return cpus_found;
714 }
715 
716 static int find_drc_index_cpus_to_add(struct device_node *cpus,
717 				      u32 *cpu_drcs, u32 cpus_to_add)
718 {
719 	int cpus_found = 0;
720 	int index, rc;
721 	u32 drc_index;
722 
723 	/* Search the ibm,drc-indexes array for possible CPU drcs to
724 	 * add. Note that the format of the ibm,drc-indexes array is
725 	 * the number of entries in the array followed by the array
726 	 * of drc values so we start looking at index = 1.
727 	 */
728 	index = 1;
729 	while (cpus_found < cpus_to_add) {
730 		rc = of_property_read_u32_index(cpus, "ibm,drc-indexes",
731 						index++, &drc_index);
732 
733 		if (rc)
734 			break;
735 
736 		if (dlpar_cpu_exists(cpus, drc_index))
737 			continue;
738 
739 		cpu_drcs[cpus_found++] = drc_index;
740 	}
741 
742 	return cpus_found;
743 }
744 
745 static int dlpar_cpu_add_by_count(u32 cpus_to_add)
746 {
747 	struct device_node *parent;
748 	struct property *info;
749 	u32 *cpu_drcs;
750 	int cpus_added = 0;
751 	int cpus_found;
752 	int i, rc;
753 
754 	pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
755 
756 	cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
757 	if (!cpu_drcs)
758 		return -EINVAL;
759 
760 	parent = of_find_node_by_path("/cpus");
761 	if (!parent) {
762 		pr_warn("Could not find CPU root node in device tree\n");
763 		kfree(cpu_drcs);
764 		return -1;
765 	}
766 
767 	info = of_find_property(parent, "ibm,drc-info", NULL);
768 	if (info)
769 		cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add);
770 	else
771 		cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add);
772 
773 	of_node_put(parent);
774 
775 	if (cpus_found < cpus_to_add) {
776 		pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
777 			cpus_found, cpus_to_add);
778 		kfree(cpu_drcs);
779 		return -EINVAL;
780 	}
781 
782 	for (i = 0; i < cpus_to_add; i++) {
783 		rc = dlpar_cpu_add(cpu_drcs[i]);
784 		if (rc)
785 			break;
786 
787 		cpus_added++;
788 	}
789 
790 	if (cpus_added < cpus_to_add) {
791 		pr_warn("CPU hot-add failed, removing any added CPUs\n");
792 
793 		for (i = 0; i < cpus_added; i++)
794 			dlpar_cpu_remove_by_index(cpu_drcs[i]);
795 
796 		rc = -EINVAL;
797 	} else {
798 		rc = 0;
799 	}
800 
801 	kfree(cpu_drcs);
802 	return rc;
803 }
804 
805 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
806 {
807 	u32 count, drc_index;
808 	int rc;
809 
810 	count = hp_elog->_drc_u.drc_count;
811 	drc_index = hp_elog->_drc_u.drc_index;
812 
813 	lock_device_hotplug();
814 
815 	switch (hp_elog->action) {
816 	case PSERIES_HP_ELOG_ACTION_REMOVE:
817 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
818 			rc = dlpar_cpu_remove_by_count(count);
819 		else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
820 			rc = dlpar_cpu_remove_by_index(drc_index);
821 		else
822 			rc = -EINVAL;
823 		break;
824 	case PSERIES_HP_ELOG_ACTION_ADD:
825 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
826 			rc = dlpar_cpu_add_by_count(count);
827 		else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
828 			rc = dlpar_cpu_add(drc_index);
829 		else
830 			rc = -EINVAL;
831 		break;
832 	default:
833 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
834 		rc = -EINVAL;
835 		break;
836 	}
837 
838 	unlock_device_hotplug();
839 	return rc;
840 }
841 
842 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
843 
844 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
845 {
846 	u32 drc_index;
847 	int rc;
848 
849 	rc = kstrtou32(buf, 0, &drc_index);
850 	if (rc)
851 		return -EINVAL;
852 
853 	rc = dlpar_cpu_add(drc_index);
854 
855 	return rc ? rc : count;
856 }
857 
858 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
859 {
860 	struct device_node *dn;
861 	u32 drc_index;
862 	int rc;
863 
864 	dn = of_find_node_by_path(buf);
865 	if (!dn)
866 		return -EINVAL;
867 
868 	rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
869 	if (rc) {
870 		of_node_put(dn);
871 		return -EINVAL;
872 	}
873 
874 	rc = dlpar_cpu_remove(dn, drc_index);
875 	of_node_put(dn);
876 
877 	return rc ? rc : count;
878 }
879 
880 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
881 
882 static int pseries_smp_notifier(struct notifier_block *nb,
883 				unsigned long action, void *data)
884 {
885 	struct of_reconfig_data *rd = data;
886 	int err = 0;
887 
888 	switch (action) {
889 	case OF_RECONFIG_ATTACH_NODE:
890 		err = pseries_add_processor(rd->dn);
891 		break;
892 	case OF_RECONFIG_DETACH_NODE:
893 		pseries_remove_processor(rd->dn);
894 		break;
895 	}
896 	return notifier_from_errno(err);
897 }
898 
899 static struct notifier_block pseries_smp_nb = {
900 	.notifier_call = pseries_smp_notifier,
901 };
902 
903 static int __init pseries_cpu_hotplug_init(void)
904 {
905 	int qcss_tok;
906 
907 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
908 	ppc_md.cpu_probe = dlpar_cpu_probe;
909 	ppc_md.cpu_release = dlpar_cpu_release;
910 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
911 
912 	rtas_stop_self_token = rtas_token("stop-self");
913 	qcss_tok = rtas_token("query-cpu-stopped-state");
914 
915 	if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
916 			qcss_tok == RTAS_UNKNOWN_SERVICE) {
917 		printk(KERN_INFO "CPU Hotplug not supported by firmware "
918 				"- disabling.\n");
919 		return 0;
920 	}
921 
922 	smp_ops->cpu_offline_self = pseries_cpu_offline_self;
923 	smp_ops->cpu_disable = pseries_cpu_disable;
924 	smp_ops->cpu_die = pseries_cpu_die;
925 
926 	/* Processors can be added/removed only on LPAR */
927 	if (firmware_has_feature(FW_FEATURE_LPAR))
928 		of_reconfig_notifier_register(&pseries_smp_nb);
929 
930 	return 0;
931 }
932 machine_arch_initcall(pseries, pseries_cpu_hotplug_init);
933