xref: /linux/arch/powerpc/platforms/pseries/dlpar.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Support for dynamic reconfiguration for PCI, Memory, and CPU
4  * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5  *
6  * Copyright (C) 2009 Nathan Fontenot
7  * Copyright (C) 2009 IBM Corporation
8  */
9 
10 #define pr_fmt(fmt)	"dlpar: " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/notifier.h>
14 #include <linux/spinlock.h>
15 #include <linux/cpu.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18 
19 #include "of_helpers.h"
20 #include "pseries.h"
21 
22 #include <asm/machdep.h>
23 #include <linux/uaccess.h>
24 #include <asm/rtas.h>
25 #include <asm/rtas-work-area.h>
26 #include <asm/prom.h>
27 
28 static struct workqueue_struct *pseries_hp_wq;
29 
30 struct pseries_hp_work {
31 	struct work_struct work;
32 	struct pseries_hp_errorlog *errlog;
33 };
34 
35 struct cc_workarea {
36 	__be32	drc_index;
37 	__be32	zero;
38 	__be32	name_offset;
39 	__be32	prop_length;
40 	__be32	prop_offset;
41 };
42 
dlpar_free_cc_property(struct property * prop)43 void dlpar_free_cc_property(struct property *prop)
44 {
45 	kfree(prop->name);
46 	kfree(prop->value);
47 	kfree(prop);
48 }
49 
dlpar_parse_cc_property(struct cc_workarea * ccwa)50 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
51 {
52 	struct property *prop;
53 	char *name;
54 	char *value;
55 
56 	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
57 	if (!prop)
58 		return NULL;
59 
60 	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
61 	prop->name = kstrdup(name, GFP_KERNEL);
62 	if (!prop->name) {
63 		dlpar_free_cc_property(prop);
64 		return NULL;
65 	}
66 
67 	prop->length = be32_to_cpu(ccwa->prop_length);
68 	value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
69 	prop->value = kmemdup(value, prop->length, GFP_KERNEL);
70 	if (!prop->value) {
71 		dlpar_free_cc_property(prop);
72 		return NULL;
73 	}
74 
75 	return prop;
76 }
77 
dlpar_parse_cc_node(struct cc_workarea * ccwa)78 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
79 {
80 	struct device_node *dn;
81 	const char *name;
82 
83 	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
84 	if (!dn)
85 		return NULL;
86 
87 	name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
88 	dn->full_name = kstrdup(name, GFP_KERNEL);
89 	if (!dn->full_name) {
90 		kfree(dn);
91 		return NULL;
92 	}
93 
94 	of_node_set_flag(dn, OF_DYNAMIC);
95 	of_node_init(dn);
96 
97 	return dn;
98 }
99 
dlpar_free_one_cc_node(struct device_node * dn)100 static void dlpar_free_one_cc_node(struct device_node *dn)
101 {
102 	struct property *prop;
103 
104 	while (dn->properties) {
105 		prop = dn->properties;
106 		dn->properties = prop->next;
107 		dlpar_free_cc_property(prop);
108 	}
109 
110 	kfree(dn->full_name);
111 	kfree(dn);
112 }
113 
dlpar_free_cc_nodes(struct device_node * dn)114 void dlpar_free_cc_nodes(struct device_node *dn)
115 {
116 	if (dn->child)
117 		dlpar_free_cc_nodes(dn->child);
118 
119 	if (dn->sibling)
120 		dlpar_free_cc_nodes(dn->sibling);
121 
122 	dlpar_free_one_cc_node(dn);
123 }
124 
125 #define COMPLETE	0
126 #define NEXT_SIBLING    1
127 #define NEXT_CHILD      2
128 #define NEXT_PROPERTY   3
129 #define PREV_PARENT     4
130 #define MORE_MEMORY     5
131 #define ERR_CFG_USE     -9003
132 
dlpar_configure_connector(__be32 drc_index,struct device_node * parent)133 struct device_node *dlpar_configure_connector(__be32 drc_index,
134 					      struct device_node *parent)
135 {
136 	struct device_node *dn;
137 	struct device_node *first_dn = NULL;
138 	struct device_node *last_dn = NULL;
139 	struct property *property;
140 	struct property *last_property = NULL;
141 	struct cc_workarea *ccwa;
142 	struct rtas_work_area *work_area;
143 	char *data_buf;
144 	int cc_token;
145 	int rc = -1;
146 
147 	cc_token = rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR);
148 	if (cc_token == RTAS_UNKNOWN_SERVICE)
149 		return NULL;
150 
151 	work_area = rtas_work_area_alloc(SZ_4K);
152 	data_buf = rtas_work_area_raw_buf(work_area);
153 
154 	ccwa = (struct cc_workarea *)&data_buf[0];
155 	ccwa->drc_index = drc_index;
156 	ccwa->zero = 0;
157 
158 	do {
159 		do {
160 			rc = rtas_call(cc_token, 2, 1, NULL,
161 				       rtas_work_area_phys(work_area), NULL);
162 		} while (rtas_busy_delay(rc));
163 
164 		switch (rc) {
165 		case COMPLETE:
166 			break;
167 
168 		case NEXT_SIBLING:
169 			dn = dlpar_parse_cc_node(ccwa);
170 			if (!dn)
171 				goto cc_error;
172 
173 			dn->parent = last_dn->parent;
174 			last_dn->sibling = dn;
175 			last_dn = dn;
176 			break;
177 
178 		case NEXT_CHILD:
179 			dn = dlpar_parse_cc_node(ccwa);
180 			if (!dn)
181 				goto cc_error;
182 
183 			if (!first_dn) {
184 				dn->parent = parent;
185 				first_dn = dn;
186 			} else {
187 				dn->parent = last_dn;
188 				if (last_dn)
189 					last_dn->child = dn;
190 			}
191 
192 			last_dn = dn;
193 			break;
194 
195 		case NEXT_PROPERTY:
196 			property = dlpar_parse_cc_property(ccwa);
197 			if (!property)
198 				goto cc_error;
199 
200 			if (!last_dn->properties)
201 				last_dn->properties = property;
202 			else
203 				last_property->next = property;
204 
205 			last_property = property;
206 			break;
207 
208 		case PREV_PARENT:
209 			last_dn = last_dn->parent;
210 			break;
211 
212 		case MORE_MEMORY:
213 		case ERR_CFG_USE:
214 		default:
215 			printk(KERN_ERR "Unexpected Error (%d) "
216 			       "returned from configure-connector\n", rc);
217 			goto cc_error;
218 		}
219 	} while (rc);
220 
221 cc_error:
222 	rtas_work_area_free(work_area);
223 
224 	if (rc) {
225 		if (first_dn)
226 			dlpar_free_cc_nodes(first_dn);
227 
228 		return NULL;
229 	}
230 
231 	return first_dn;
232 }
233 
dlpar_attach_node(struct device_node * dn,struct device_node * parent)234 int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
235 {
236 	int rc;
237 
238 	dn->parent = parent;
239 
240 	rc = of_attach_node(dn);
241 	if (rc) {
242 		printk(KERN_ERR "Failed to add device node %pOF\n", dn);
243 		return rc;
244 	}
245 
246 	return 0;
247 }
248 
dlpar_detach_node(struct device_node * dn)249 int dlpar_detach_node(struct device_node *dn)
250 {
251 	struct device_node *child;
252 	int rc;
253 
254 	for_each_child_of_node(dn, child)
255 		dlpar_detach_node(child);
256 
257 	rc = of_detach_node(dn);
258 	if (rc)
259 		return rc;
260 
261 	of_node_put(dn);
262 
263 	return 0;
264 }
dlpar_changeset_attach_cc_nodes(struct of_changeset * ocs,struct device_node * dn)265 static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs,
266 					struct device_node *dn)
267 {
268 	int rc;
269 
270 	rc = of_changeset_attach_node(ocs, dn);
271 
272 	if (!rc && dn->child)
273 		rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child);
274 	if (!rc && dn->sibling)
275 		rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling);
276 
277 	return rc;
278 }
279 
280 #define DR_ENTITY_SENSE		9003
281 #define DR_ENTITY_PRESENT	1
282 #define DR_ENTITY_UNUSABLE	2
283 #define ALLOCATION_STATE	9003
284 #define ALLOC_UNUSABLE		0
285 #define ALLOC_USABLE		1
286 #define ISOLATION_STATE		9001
287 #define ISOLATE			0
288 #define UNISOLATE		1
289 
dlpar_acquire_drc(u32 drc_index)290 int dlpar_acquire_drc(u32 drc_index)
291 {
292 	int dr_status, rc;
293 
294 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
295 	if (rc || dr_status != DR_ENTITY_UNUSABLE)
296 		return -1;
297 
298 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
299 	if (rc)
300 		return rc;
301 
302 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
303 	if (rc) {
304 		rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
305 		return rc;
306 	}
307 
308 	return 0;
309 }
310 
dlpar_release_drc(u32 drc_index)311 int dlpar_release_drc(u32 drc_index)
312 {
313 	int dr_status, rc;
314 
315 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
316 	if (rc || dr_status != DR_ENTITY_PRESENT)
317 		return -1;
318 
319 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
320 	if (rc)
321 		return rc;
322 
323 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
324 	if (rc) {
325 		rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
326 		return rc;
327 	}
328 
329 	return 0;
330 }
331 
dlpar_unisolate_drc(u32 drc_index)332 int dlpar_unisolate_drc(u32 drc_index)
333 {
334 	int dr_status, rc;
335 
336 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
337 	if (rc || dr_status != DR_ENTITY_PRESENT)
338 		return -1;
339 
340 	rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
341 
342 	return 0;
343 }
344 
345 static struct device_node *
get_device_node_with_drc_index(u32 index)346 get_device_node_with_drc_index(u32 index)
347 {
348 	struct device_node *np = NULL;
349 	u32 node_index;
350 	int rc;
351 
352 	for_each_node_with_property(np, "ibm,my-drc-index") {
353 		rc = of_property_read_u32(np, "ibm,my-drc-index",
354 					     &node_index);
355 		if (rc) {
356 			pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
357 			       __func__, np, "ibm,my-drc-index", rc);
358 			of_node_put(np);
359 			return NULL;
360 		}
361 
362 		if (index == node_index)
363 			break;
364 	}
365 
366 	return np;
367 }
368 
369 static struct device_node *
get_device_node_with_drc_info(u32 index)370 get_device_node_with_drc_info(u32 index)
371 {
372 	struct device_node *np = NULL;
373 	struct of_drc_info drc;
374 	struct property *info;
375 	const __be32 *value;
376 	u32 node_index;
377 	int i, j, count;
378 
379 	for_each_node_with_property(np, "ibm,drc-info") {
380 		info = of_find_property(np, "ibm,drc-info", NULL);
381 		if (info == NULL) {
382 			/* XXX can this happen? */
383 			of_node_put(np);
384 			return NULL;
385 		}
386 		value = of_prop_next_u32(info, NULL, &count);
387 		if (value == NULL)
388 			continue;
389 		value++;
390 		for (i = 0; i < count; i++) {
391 			if (of_read_drc_info_cell(&info, &value, &drc))
392 				break;
393 			if (index > drc.last_drc_index)
394 				continue;
395 			node_index = drc.drc_index_start;
396 			for (j = 0; j < drc.num_sequential_elems; j++) {
397 				if (index == node_index)
398 					return np;
399 				node_index += drc.sequential_inc;
400 			}
401 		}
402 	}
403 
404 	return NULL;
405 }
406 
dlpar_hp_dt_add(u32 index)407 static int dlpar_hp_dt_add(u32 index)
408 {
409 	struct device_node *np, *nodes;
410 	struct of_changeset ocs;
411 	int rc;
412 
413 	/*
414 	 * Do not add device node(s) if already exists in the
415 	 * device tree.
416 	 */
417 	np = get_device_node_with_drc_index(index);
418 	if (np) {
419 		pr_err("%s: Adding device node for index (%d), but "
420 				"already exists in the device tree\n",
421 				__func__, index);
422 		rc = -EINVAL;
423 		goto out;
424 	}
425 
426 	np = get_device_node_with_drc_info(index);
427 
428 	if (!np)
429 		return -EIO;
430 
431 	/* Next, configure the connector. */
432 	nodes = dlpar_configure_connector(cpu_to_be32(index), np);
433 	if (!nodes) {
434 		rc = -EIO;
435 		goto out;
436 	}
437 
438 	/*
439 	 * Add the new nodes from dlpar_configure_connector() onto
440 	 * the device-tree.
441 	 */
442 	of_changeset_init(&ocs);
443 	rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes);
444 
445 	if (!rc)
446 		rc = of_changeset_apply(&ocs);
447 	else
448 		dlpar_free_cc_nodes(nodes);
449 
450 	of_changeset_destroy(&ocs);
451 
452 out:
453 	of_node_put(np);
454 	return rc;
455 }
456 
changeset_detach_node_recursive(struct of_changeset * ocs,struct device_node * node)457 static int changeset_detach_node_recursive(struct of_changeset *ocs,
458 					struct device_node *node)
459 {
460 	struct device_node *child;
461 	int rc;
462 
463 	for_each_child_of_node(node, child) {
464 		rc = changeset_detach_node_recursive(ocs, child);
465 		if (rc) {
466 			of_node_put(child);
467 			return rc;
468 		}
469 	}
470 
471 	return of_changeset_detach_node(ocs, node);
472 }
473 
dlpar_hp_dt_remove(u32 drc_index)474 static int dlpar_hp_dt_remove(u32 drc_index)
475 {
476 	struct device_node *np;
477 	struct of_changeset ocs;
478 	u32 index;
479 	int rc = 0;
480 
481 	/*
482 	 * Prune all nodes with a matching index.
483 	 */
484 	of_changeset_init(&ocs);
485 
486 	for_each_node_with_property(np, "ibm,my-drc-index") {
487 		rc = of_property_read_u32(np, "ibm,my-drc-index", &index);
488 		if (rc) {
489 			pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
490 				__func__, np, "ibm,my-drc-index", rc);
491 			of_node_put(np);
492 			goto out;
493 		}
494 
495 		if (index == drc_index) {
496 			rc = changeset_detach_node_recursive(&ocs, np);
497 			if (rc) {
498 				of_node_put(np);
499 				goto out;
500 			}
501 		}
502 	}
503 
504 	rc = of_changeset_apply(&ocs);
505 
506 out:
507 	of_changeset_destroy(&ocs);
508 	return rc;
509 }
510 
dlpar_hp_dt(struct pseries_hp_errorlog * phpe)511 static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe)
512 {
513 	u32 drc_index;
514 	int rc;
515 
516 	if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX)
517 		return -EINVAL;
518 
519 	drc_index = be32_to_cpu(phpe->_drc_u.drc_index);
520 
521 	lock_device_hotplug();
522 
523 	switch (phpe->action) {
524 	case PSERIES_HP_ELOG_ACTION_ADD:
525 		rc = dlpar_hp_dt_add(drc_index);
526 		break;
527 	case PSERIES_HP_ELOG_ACTION_REMOVE:
528 		rc = dlpar_hp_dt_remove(drc_index);
529 		break;
530 	default:
531 		pr_err("Invalid action (%d) specified\n", phpe->action);
532 		rc = -EINVAL;
533 		break;
534 	}
535 
536 	unlock_device_hotplug();
537 
538 	return rc;
539 }
540 
handle_dlpar_errorlog(struct pseries_hp_errorlog * hp_elog)541 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
542 {
543 	int rc;
544 
545 	switch (hp_elog->resource) {
546 	case PSERIES_HP_ELOG_RESOURCE_MEM:
547 		rc = dlpar_memory(hp_elog);
548 		break;
549 	case PSERIES_HP_ELOG_RESOURCE_CPU:
550 		rc = dlpar_cpu(hp_elog);
551 		break;
552 	case PSERIES_HP_ELOG_RESOURCE_PMEM:
553 		rc = dlpar_hp_pmem(hp_elog);
554 		break;
555 	case PSERIES_HP_ELOG_RESOURCE_DT:
556 		rc = dlpar_hp_dt(hp_elog);
557 		break;
558 
559 	default:
560 		pr_warn_ratelimited("Invalid resource (%d) specified\n",
561 				    hp_elog->resource);
562 		rc = -EINVAL;
563 	}
564 
565 	return rc;
566 }
567 
pseries_hp_work_fn(struct work_struct * work)568 static void pseries_hp_work_fn(struct work_struct *work)
569 {
570 	struct pseries_hp_work *hp_work =
571 			container_of(work, struct pseries_hp_work, work);
572 
573 	handle_dlpar_errorlog(hp_work->errlog);
574 
575 	kfree(hp_work->errlog);
576 	kfree(work);
577 }
578 
queue_hotplug_event(struct pseries_hp_errorlog * hp_errlog)579 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
580 {
581 	struct pseries_hp_work *work;
582 	struct pseries_hp_errorlog *hp_errlog_copy;
583 
584 	hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
585 	if (!hp_errlog_copy)
586 		return;
587 
588 	work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
589 	if (work) {
590 		INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
591 		work->errlog = hp_errlog_copy;
592 		queue_work(pseries_hp_wq, (struct work_struct *)work);
593 	} else {
594 		kfree(hp_errlog_copy);
595 	}
596 }
597 
dlpar_parse_resource(char ** cmd,struct pseries_hp_errorlog * hp_elog)598 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
599 {
600 	char *arg;
601 
602 	arg = strsep(cmd, " ");
603 	if (!arg)
604 		return -EINVAL;
605 
606 	if (sysfs_streq(arg, "memory")) {
607 		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
608 	} else if (sysfs_streq(arg, "cpu")) {
609 		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
610 	} else if (sysfs_streq(arg, "dt")) {
611 		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT;
612 	} else {
613 		pr_err("Invalid resource specified.\n");
614 		return -EINVAL;
615 	}
616 
617 	return 0;
618 }
619 
dlpar_parse_action(char ** cmd,struct pseries_hp_errorlog * hp_elog)620 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
621 {
622 	char *arg;
623 
624 	arg = strsep(cmd, " ");
625 	if (!arg)
626 		return -EINVAL;
627 
628 	if (sysfs_streq(arg, "add")) {
629 		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
630 	} else if (sysfs_streq(arg, "remove")) {
631 		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
632 	} else {
633 		pr_err("Invalid action specified.\n");
634 		return -EINVAL;
635 	}
636 
637 	return 0;
638 }
639 
dlpar_parse_id_type(char ** cmd,struct pseries_hp_errorlog * hp_elog)640 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
641 {
642 	char *arg;
643 	u32 count, index;
644 
645 	arg = strsep(cmd, " ");
646 	if (!arg)
647 		return -EINVAL;
648 
649 	if (sysfs_streq(arg, "indexed-count")) {
650 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
651 		arg = strsep(cmd, " ");
652 		if (!arg) {
653 			pr_err("No DRC count specified.\n");
654 			return -EINVAL;
655 		}
656 
657 		if (kstrtou32(arg, 0, &count)) {
658 			pr_err("Invalid DRC count specified.\n");
659 			return -EINVAL;
660 		}
661 
662 		arg = strsep(cmd, " ");
663 		if (!arg) {
664 			pr_err("No DRC Index specified.\n");
665 			return -EINVAL;
666 		}
667 
668 		if (kstrtou32(arg, 0, &index)) {
669 			pr_err("Invalid DRC Index specified.\n");
670 			return -EINVAL;
671 		}
672 
673 		hp_elog->_drc_u.ic.count = cpu_to_be32(count);
674 		hp_elog->_drc_u.ic.index = cpu_to_be32(index);
675 	} else if (sysfs_streq(arg, "index")) {
676 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
677 		arg = strsep(cmd, " ");
678 		if (!arg) {
679 			pr_err("No DRC Index specified.\n");
680 			return -EINVAL;
681 		}
682 
683 		if (kstrtou32(arg, 0, &index)) {
684 			pr_err("Invalid DRC Index specified.\n");
685 			return -EINVAL;
686 		}
687 
688 		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
689 	} else if (sysfs_streq(arg, "count")) {
690 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
691 		arg = strsep(cmd, " ");
692 		if (!arg) {
693 			pr_err("No DRC count specified.\n");
694 			return -EINVAL;
695 		}
696 
697 		if (kstrtou32(arg, 0, &count)) {
698 			pr_err("Invalid DRC count specified.\n");
699 			return -EINVAL;
700 		}
701 
702 		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
703 	} else {
704 		pr_err("Invalid id_type specified.\n");
705 		return -EINVAL;
706 	}
707 
708 	return 0;
709 }
710 
dlpar_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)711 static ssize_t dlpar_store(const struct class *class, const struct class_attribute *attr,
712 			   const char *buf, size_t count)
713 {
714 	struct pseries_hp_errorlog hp_elog;
715 	char *argbuf;
716 	char *args;
717 	int rc;
718 
719 	args = argbuf = kstrdup(buf, GFP_KERNEL);
720 	if (!argbuf)
721 		return -ENOMEM;
722 
723 	/*
724 	 * Parse out the request from the user, this will be in the form:
725 	 * <resource> <action> <id_type> <id>
726 	 */
727 	rc = dlpar_parse_resource(&args, &hp_elog);
728 	if (rc)
729 		goto dlpar_store_out;
730 
731 	rc = dlpar_parse_action(&args, &hp_elog);
732 	if (rc)
733 		goto dlpar_store_out;
734 
735 	rc = dlpar_parse_id_type(&args, &hp_elog);
736 	if (rc)
737 		goto dlpar_store_out;
738 
739 	rc = handle_dlpar_errorlog(&hp_elog);
740 
741 dlpar_store_out:
742 	kfree(argbuf);
743 
744 	if (rc)
745 		pr_err("Could not handle DLPAR request \"%s\"\n", buf);
746 
747 	return rc ? rc : count;
748 }
749 
dlpar_show(const struct class * class,const struct class_attribute * attr,char * buf)750 static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr,
751 			  char *buf)
752 {
753 	return sprintf(buf, "%s\n", "memory,cpu,dt");
754 }
755 
756 static CLASS_ATTR_RW(dlpar);
757 
dlpar_workqueue_init(void)758 int __init dlpar_workqueue_init(void)
759 {
760 	if (pseries_hp_wq)
761 		return 0;
762 
763 	pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0);
764 
765 	return pseries_hp_wq ? 0 : -ENOMEM;
766 }
767 
dlpar_sysfs_init(void)768 static int __init dlpar_sysfs_init(void)
769 {
770 	int rc;
771 
772 	rc = dlpar_workqueue_init();
773 	if (rc)
774 		return rc;
775 
776 	return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
777 }
778 machine_device_initcall(pseries, dlpar_sysfs_init);
779 
780