xref: /linux/drivers/s390/char/sclp_cmd.c (revision 603d6637aeb9a14cd0087d7c24c3777bfa51fcbf)
1 /*
2  * Copyright IBM Corp. 2007, 2009
3  *
4  * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5  *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/mm.h>
18 #include <linux/mmzone.h>
19 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <asm/chpid.h>
23 #include <asm/sclp.h>
24 #include <asm/setup.h>
25 #include <asm/ctl_reg.h>
26 
27 #include "sclp.h"
28 
29 #define SCLP_CMDW_READ_SCP_INFO		0x00020001
30 #define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
31 
32 struct read_info_sccb {
33 	struct	sccb_header header;	/* 0-7 */
34 	u16	rnmax;			/* 8-9 */
35 	u8	rnsize;			/* 10 */
36 	u8	_reserved0[24 - 11];	/* 11-15 */
37 	u8	loadparm[8];		/* 24-31 */
38 	u8	_reserved1[48 - 32];	/* 32-47 */
39 	u64	facilities;		/* 48-55 */
40 	u8	_reserved2[84 - 56];	/* 56-83 */
41 	u8	fac84;			/* 84 */
42 	u8	fac85;			/* 85 */
43 	u8	_reserved3[91 - 86];	/* 86-90 */
44 	u8	flags;			/* 91 */
45 	u8	_reserved4[100 - 92];	/* 92-99 */
46 	u32	rnsize2;		/* 100-103 */
47 	u64	rnmax2;			/* 104-111 */
48 	u8	_reserved5[4096 - 112];	/* 112-4095 */
49 } __attribute__((packed, aligned(PAGE_SIZE)));
50 
51 static struct read_info_sccb __initdata early_read_info_sccb;
52 static int __initdata early_read_info_sccb_valid;
53 
54 u64 sclp_facilities;
55 static u8 sclp_fac84;
56 static u8 sclp_fac85;
57 static unsigned long long rzm;
58 static unsigned long long rnmax;
59 
60 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
61 {
62 	int rc;
63 
64 	__ctl_set_bit(0, 9);
65 	rc = sclp_service_call(cmd, sccb);
66 	if (rc)
67 		goto out;
68 	__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
69 			PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
70 	local_irq_disable();
71 out:
72 	/* Contents of the sccb might have changed. */
73 	barrier();
74 	__ctl_clear_bit(0, 9);
75 	return rc;
76 }
77 
78 static void __init sclp_read_info_early(void)
79 {
80 	int rc;
81 	int i;
82 	struct read_info_sccb *sccb;
83 	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
84 				  SCLP_CMDW_READ_SCP_INFO};
85 
86 	sccb = &early_read_info_sccb;
87 	for (i = 0; i < ARRAY_SIZE(commands); i++) {
88 		do {
89 			memset(sccb, 0, sizeof(*sccb));
90 			sccb->header.length = sizeof(*sccb);
91 			sccb->header.function_code = 0x80;
92 			sccb->header.control_mask[2] = 0x80;
93 			rc = sclp_cmd_sync_early(commands[i], sccb);
94 		} while (rc == -EBUSY);
95 
96 		if (rc)
97 			break;
98 		if (sccb->header.response_code == 0x10) {
99 			early_read_info_sccb_valid = 1;
100 			break;
101 		}
102 		if (sccb->header.response_code != 0x1f0)
103 			break;
104 	}
105 }
106 
107 void __init sclp_facilities_detect(void)
108 {
109 	struct read_info_sccb *sccb;
110 
111 	sclp_read_info_early();
112 	if (!early_read_info_sccb_valid)
113 		return;
114 
115 	sccb = &early_read_info_sccb;
116 	sclp_facilities = sccb->facilities;
117 	sclp_fac84 = sccb->fac84;
118 	sclp_fac85 = sccb->fac85;
119 	rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
120 	rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
121 	rzm <<= 20;
122 }
123 
124 unsigned long long sclp_get_rnmax(void)
125 {
126 	return rnmax;
127 }
128 
129 unsigned long long sclp_get_rzm(void)
130 {
131 	return rzm;
132 }
133 
134 u8 sclp_get_fac85(void)
135 {
136 	return sclp_fac85;
137 }
138 EXPORT_SYMBOL_GPL(sclp_get_fac85);
139 
140 /*
141  * This function will be called after sclp_facilities_detect(), which gets
142  * called from early.c code. Therefore the sccb should have valid contents.
143  */
144 void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
145 {
146 	struct read_info_sccb *sccb;
147 
148 	if (!early_read_info_sccb_valid)
149 		return;
150 	sccb = &early_read_info_sccb;
151 	info->is_valid = 1;
152 	if (sccb->flags & 0x2)
153 		info->has_dump = 1;
154 	memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
155 }
156 
157 static void sclp_sync_callback(struct sclp_req *req, void *data)
158 {
159 	struct completion *completion = data;
160 
161 	complete(completion);
162 }
163 
164 static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
165 {
166 	struct completion completion;
167 	struct sclp_req *request;
168 	int rc;
169 
170 	request = kzalloc(sizeof(*request), GFP_KERNEL);
171 	if (!request)
172 		return -ENOMEM;
173 	request->command = cmd;
174 	request->sccb = sccb;
175 	request->status = SCLP_REQ_FILLED;
176 	request->callback = sclp_sync_callback;
177 	request->callback_data = &completion;
178 	init_completion(&completion);
179 
180 	/* Perform sclp request. */
181 	rc = sclp_add_request(request);
182 	if (rc)
183 		goto out;
184 	wait_for_completion(&completion);
185 
186 	/* Check response. */
187 	if (request->status != SCLP_REQ_DONE) {
188 		pr_warning("sync request failed (cmd=0x%08x, "
189 			   "status=0x%02x)\n", cmd, request->status);
190 		rc = -EIO;
191 	}
192 out:
193 	kfree(request);
194 	return rc;
195 }
196 
197 /*
198  * CPU configuration related functions.
199  */
200 
201 #define SCLP_CMDW_READ_CPU_INFO		0x00010001
202 #define SCLP_CMDW_CONFIGURE_CPU		0x00110001
203 #define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
204 
205 struct read_cpu_info_sccb {
206 	struct	sccb_header header;
207 	u16	nr_configured;
208 	u16	offset_configured;
209 	u16	nr_standby;
210 	u16	offset_standby;
211 	u8	reserved[4096 - 16];
212 } __attribute__((packed, aligned(PAGE_SIZE)));
213 
214 static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
215 			       struct read_cpu_info_sccb *sccb)
216 {
217 	char *page = (char *) sccb;
218 
219 	memset(info, 0, sizeof(*info));
220 	info->configured = sccb->nr_configured;
221 	info->standby = sccb->nr_standby;
222 	info->combined = sccb->nr_configured + sccb->nr_standby;
223 	info->has_cpu_type = sclp_fac84 & 0x1;
224 	memcpy(&info->cpu, page + sccb->offset_configured,
225 	       info->combined * sizeof(struct sclp_cpu_entry));
226 }
227 
228 int sclp_get_cpu_info(struct sclp_cpu_info *info)
229 {
230 	int rc;
231 	struct read_cpu_info_sccb *sccb;
232 
233 	if (!SCLP_HAS_CPU_INFO)
234 		return -EOPNOTSUPP;
235 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
236 	if (!sccb)
237 		return -ENOMEM;
238 	sccb->header.length = sizeof(*sccb);
239 	rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
240 	if (rc)
241 		goto out;
242 	if (sccb->header.response_code != 0x0010) {
243 		pr_warning("readcpuinfo failed (response=0x%04x)\n",
244 			   sccb->header.response_code);
245 		rc = -EIO;
246 		goto out;
247 	}
248 	sclp_fill_cpu_info(info, sccb);
249 out:
250 	free_page((unsigned long) sccb);
251 	return rc;
252 }
253 
254 struct cpu_configure_sccb {
255 	struct sccb_header header;
256 } __attribute__((packed, aligned(8)));
257 
258 static int do_cpu_configure(sclp_cmdw_t cmd)
259 {
260 	struct cpu_configure_sccb *sccb;
261 	int rc;
262 
263 	if (!SCLP_HAS_CPU_RECONFIG)
264 		return -EOPNOTSUPP;
265 	/*
266 	 * This is not going to cross a page boundary since we force
267 	 * kmalloc to have a minimum alignment of 8 bytes on s390.
268 	 */
269 	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
270 	if (!sccb)
271 		return -ENOMEM;
272 	sccb->header.length = sizeof(*sccb);
273 	rc = do_sync_request(cmd, sccb);
274 	if (rc)
275 		goto out;
276 	switch (sccb->header.response_code) {
277 	case 0x0020:
278 	case 0x0120:
279 		break;
280 	default:
281 		pr_warning("configure cpu failed (cmd=0x%08x, "
282 			   "response=0x%04x)\n", cmd,
283 			   sccb->header.response_code);
284 		rc = -EIO;
285 		break;
286 	}
287 out:
288 	kfree(sccb);
289 	return rc;
290 }
291 
292 int sclp_cpu_configure(u8 cpu)
293 {
294 	return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
295 }
296 
297 int sclp_cpu_deconfigure(u8 cpu)
298 {
299 	return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
300 }
301 
302 #ifdef CONFIG_MEMORY_HOTPLUG
303 
304 static DEFINE_MUTEX(sclp_mem_mutex);
305 static LIST_HEAD(sclp_mem_list);
306 static u8 sclp_max_storage_id;
307 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
308 static int sclp_mem_state_changed;
309 
310 struct memory_increment {
311 	struct list_head list;
312 	u16 rn;
313 	int standby;
314 	int usecount;
315 };
316 
317 struct assign_storage_sccb {
318 	struct sccb_header header;
319 	u16 rn;
320 } __packed;
321 
322 int arch_get_memory_phys_device(unsigned long start_pfn)
323 {
324 	if (!rzm)
325 		return 0;
326 	return PFN_PHYS(start_pfn) >> ilog2(rzm);
327 }
328 
329 static unsigned long long rn2addr(u16 rn)
330 {
331 	return (unsigned long long) (rn - 1) * rzm;
332 }
333 
334 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
335 {
336 	struct assign_storage_sccb *sccb;
337 	int rc;
338 
339 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
340 	if (!sccb)
341 		return -ENOMEM;
342 	sccb->header.length = PAGE_SIZE;
343 	sccb->rn = rn;
344 	rc = do_sync_request(cmd, sccb);
345 	if (rc)
346 		goto out;
347 	switch (sccb->header.response_code) {
348 	case 0x0020:
349 	case 0x0120:
350 		break;
351 	default:
352 		pr_warning("assign storage failed (cmd=0x%08x, "
353 			   "response=0x%04x, rn=0x%04x)\n", cmd,
354 			   sccb->header.response_code, rn);
355 		rc = -EIO;
356 		break;
357 	}
358 out:
359 	free_page((unsigned long) sccb);
360 	return rc;
361 }
362 
363 static int sclp_assign_storage(u16 rn)
364 {
365 	unsigned long long start, address;
366 	int rc;
367 
368 	rc = do_assign_storage(0x000d0001, rn);
369 	if (rc)
370 		goto out;
371 	start = address = rn2addr(rn);
372 	for (; address < start + rzm; address += PAGE_SIZE)
373 		page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
374 out:
375 	return rc;
376 }
377 
378 static int sclp_unassign_storage(u16 rn)
379 {
380 	return do_assign_storage(0x000c0001, rn);
381 }
382 
383 struct attach_storage_sccb {
384 	struct sccb_header header;
385 	u16 :16;
386 	u16 assigned;
387 	u32 :32;
388 	u32 entries[0];
389 } __packed;
390 
391 static int sclp_attach_storage(u8 id)
392 {
393 	struct attach_storage_sccb *sccb;
394 	int rc;
395 	int i;
396 
397 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
398 	if (!sccb)
399 		return -ENOMEM;
400 	sccb->header.length = PAGE_SIZE;
401 	rc = do_sync_request(0x00080001 | id << 8, sccb);
402 	if (rc)
403 		goto out;
404 	switch (sccb->header.response_code) {
405 	case 0x0020:
406 		set_bit(id, sclp_storage_ids);
407 		for (i = 0; i < sccb->assigned; i++) {
408 			if (sccb->entries[i])
409 				sclp_unassign_storage(sccb->entries[i] >> 16);
410 		}
411 		break;
412 	default:
413 		rc = -EIO;
414 		break;
415 	}
416 out:
417 	free_page((unsigned long) sccb);
418 	return rc;
419 }
420 
421 static int sclp_mem_change_state(unsigned long start, unsigned long size,
422 				 int online)
423 {
424 	struct memory_increment *incr;
425 	unsigned long long istart;
426 	int rc = 0;
427 
428 	list_for_each_entry(incr, &sclp_mem_list, list) {
429 		istart = rn2addr(incr->rn);
430 		if (start + size - 1 < istart)
431 			break;
432 		if (start > istart + rzm - 1)
433 			continue;
434 		if (online) {
435 			if (incr->usecount++)
436 				continue;
437 			/*
438 			 * Don't break the loop if one assign fails. Loop may
439 			 * be walked again on CANCEL and we can't save
440 			 * information if state changed before or not.
441 			 * So continue and increase usecount for all increments.
442 			 */
443 			rc |= sclp_assign_storage(incr->rn);
444 		} else {
445 			if (--incr->usecount)
446 				continue;
447 			sclp_unassign_storage(incr->rn);
448 		}
449 	}
450 	return rc ? -EIO : 0;
451 }
452 
453 static int sclp_mem_notifier(struct notifier_block *nb,
454 			     unsigned long action, void *data)
455 {
456 	unsigned long start, size;
457 	struct memory_notify *arg;
458 	unsigned char id;
459 	int rc = 0;
460 
461 	arg = data;
462 	start = arg->start_pfn << PAGE_SHIFT;
463 	size = arg->nr_pages << PAGE_SHIFT;
464 	mutex_lock(&sclp_mem_mutex);
465 	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
466 		sclp_attach_storage(id);
467 	switch (action) {
468 	case MEM_ONLINE:
469 	case MEM_GOING_OFFLINE:
470 	case MEM_CANCEL_OFFLINE:
471 		break;
472 	case MEM_GOING_ONLINE:
473 		rc = sclp_mem_change_state(start, size, 1);
474 		break;
475 	case MEM_CANCEL_ONLINE:
476 		sclp_mem_change_state(start, size, 0);
477 		break;
478 	case MEM_OFFLINE:
479 		sclp_mem_change_state(start, size, 0);
480 		break;
481 	default:
482 		rc = -EINVAL;
483 		break;
484 	}
485 	if (!rc)
486 		sclp_mem_state_changed = 1;
487 	mutex_unlock(&sclp_mem_mutex);
488 	return rc ? NOTIFY_BAD : NOTIFY_OK;
489 }
490 
491 static struct notifier_block sclp_mem_nb = {
492 	.notifier_call = sclp_mem_notifier,
493 };
494 
495 static void __init add_memory_merged(u16 rn)
496 {
497 	static u16 first_rn, num;
498 	unsigned long long start, size;
499 
500 	if (rn && first_rn && (first_rn + num == rn)) {
501 		num++;
502 		return;
503 	}
504 	if (!first_rn)
505 		goto skip_add;
506 	start = rn2addr(first_rn);
507 	size = (unsigned long long ) num * rzm;
508 	if (start >= VMEM_MAX_PHYS)
509 		goto skip_add;
510 	if (start + size > VMEM_MAX_PHYS)
511 		size = VMEM_MAX_PHYS - start;
512 	if (memory_end_set && (start >= memory_end))
513 		goto skip_add;
514 	if (memory_end_set && (start + size > memory_end))
515 		size = memory_end - start;
516 	add_memory(0, start, size);
517 skip_add:
518 	first_rn = rn;
519 	num = 1;
520 }
521 
522 static void __init sclp_add_standby_memory(void)
523 {
524 	struct memory_increment *incr;
525 
526 	list_for_each_entry(incr, &sclp_mem_list, list)
527 		if (incr->standby)
528 			add_memory_merged(incr->rn);
529 	add_memory_merged(0);
530 }
531 
532 static void __init insert_increment(u16 rn, int standby, int assigned)
533 {
534 	struct memory_increment *incr, *new_incr;
535 	struct list_head *prev;
536 	u16 last_rn;
537 
538 	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
539 	if (!new_incr)
540 		return;
541 	new_incr->rn = rn;
542 	new_incr->standby = standby;
543 	if (!standby)
544 		new_incr->usecount = 1;
545 	last_rn = 0;
546 	prev = &sclp_mem_list;
547 	list_for_each_entry(incr, &sclp_mem_list, list) {
548 		if (assigned && incr->rn > rn)
549 			break;
550 		if (!assigned && incr->rn - last_rn > 1)
551 			break;
552 		last_rn = incr->rn;
553 		prev = &incr->list;
554 	}
555 	if (!assigned)
556 		new_incr->rn = last_rn + 1;
557 	if (new_incr->rn > rnmax) {
558 		kfree(new_incr);
559 		return;
560 	}
561 	list_add(&new_incr->list, prev);
562 }
563 
564 static int sclp_mem_freeze(struct device *dev)
565 {
566 	if (!sclp_mem_state_changed)
567 		return 0;
568 	pr_err("Memory hotplug state changed, suspend refused.\n");
569 	return -EPERM;
570 }
571 
572 struct read_storage_sccb {
573 	struct sccb_header header;
574 	u16 max_id;
575 	u16 assigned;
576 	u16 standby;
577 	u16 :16;
578 	u32 entries[0];
579 } __packed;
580 
581 static const struct dev_pm_ops sclp_mem_pm_ops = {
582 	.freeze		= sclp_mem_freeze,
583 };
584 
585 static struct platform_driver sclp_mem_pdrv = {
586 	.driver = {
587 		.name	= "sclp_mem",
588 		.pm	= &sclp_mem_pm_ops,
589 	},
590 };
591 
592 static int __init sclp_detect_standby_memory(void)
593 {
594 	struct platform_device *sclp_pdev;
595 	struct read_storage_sccb *sccb;
596 	int i, id, assigned, rc;
597 
598 	if (!early_read_info_sccb_valid)
599 		return 0;
600 	if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
601 		return 0;
602 	rc = -ENOMEM;
603 	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
604 	if (!sccb)
605 		goto out;
606 	assigned = 0;
607 	for (id = 0; id <= sclp_max_storage_id; id++) {
608 		memset(sccb, 0, PAGE_SIZE);
609 		sccb->header.length = PAGE_SIZE;
610 		rc = do_sync_request(0x00040001 | id << 8, sccb);
611 		if (rc)
612 			goto out;
613 		switch (sccb->header.response_code) {
614 		case 0x0010:
615 			set_bit(id, sclp_storage_ids);
616 			for (i = 0; i < sccb->assigned; i++) {
617 				if (!sccb->entries[i])
618 					continue;
619 				assigned++;
620 				insert_increment(sccb->entries[i] >> 16, 0, 1);
621 			}
622 			break;
623 		case 0x0310:
624 			break;
625 		case 0x0410:
626 			for (i = 0; i < sccb->assigned; i++) {
627 				if (!sccb->entries[i])
628 					continue;
629 				assigned++;
630 				insert_increment(sccb->entries[i] >> 16, 1, 1);
631 			}
632 			break;
633 		default:
634 			rc = -EIO;
635 			break;
636 		}
637 		if (!rc)
638 			sclp_max_storage_id = sccb->max_id;
639 	}
640 	if (rc || list_empty(&sclp_mem_list))
641 		goto out;
642 	for (i = 1; i <= rnmax - assigned; i++)
643 		insert_increment(0, 1, 0);
644 	rc = register_memory_notifier(&sclp_mem_nb);
645 	if (rc)
646 		goto out;
647 	rc = platform_driver_register(&sclp_mem_pdrv);
648 	if (rc)
649 		goto out;
650 	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
651 	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
652 	if (rc)
653 		goto out_driver;
654 	sclp_add_standby_memory();
655 	goto out;
656 out_driver:
657 	platform_driver_unregister(&sclp_mem_pdrv);
658 out:
659 	free_page((unsigned long) sccb);
660 	return rc;
661 }
662 __initcall(sclp_detect_standby_memory);
663 
664 #endif /* CONFIG_MEMORY_HOTPLUG */
665 
666 /*
667  * Channel path configuration related functions.
668  */
669 
670 #define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
671 #define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
672 #define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
673 
674 struct chp_cfg_sccb {
675 	struct sccb_header header;
676 	u8 ccm;
677 	u8 reserved[6];
678 	u8 cssid;
679 } __attribute__((packed));
680 
681 static int do_chp_configure(sclp_cmdw_t cmd)
682 {
683 	struct chp_cfg_sccb *sccb;
684 	int rc;
685 
686 	if (!SCLP_HAS_CHP_RECONFIG)
687 		return -EOPNOTSUPP;
688 	/* Prepare sccb. */
689 	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
690 	if (!sccb)
691 		return -ENOMEM;
692 	sccb->header.length = sizeof(*sccb);
693 	rc = do_sync_request(cmd, sccb);
694 	if (rc)
695 		goto out;
696 	switch (sccb->header.response_code) {
697 	case 0x0020:
698 	case 0x0120:
699 	case 0x0440:
700 	case 0x0450:
701 		break;
702 	default:
703 		pr_warning("configure channel-path failed "
704 			   "(cmd=0x%08x, response=0x%04x)\n", cmd,
705 			   sccb->header.response_code);
706 		rc = -EIO;
707 		break;
708 	}
709 out:
710 	free_page((unsigned long) sccb);
711 	return rc;
712 }
713 
714 /**
715  * sclp_chp_configure - perform configure channel-path sclp command
716  * @chpid: channel-path ID
717  *
718  * Perform configure channel-path command sclp command for specified chpid.
719  * Return 0 after command successfully finished, non-zero otherwise.
720  */
721 int sclp_chp_configure(struct chp_id chpid)
722 {
723 	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
724 }
725 
726 /**
727  * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
728  * @chpid: channel-path ID
729  *
730  * Perform deconfigure channel-path command sclp command for specified chpid
731  * and wait for completion. On success return 0. Return non-zero otherwise.
732  */
733 int sclp_chp_deconfigure(struct chp_id chpid)
734 {
735 	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
736 }
737 
738 struct chp_info_sccb {
739 	struct sccb_header header;
740 	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
741 	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
742 	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
743 	u8 ccm;
744 	u8 reserved[6];
745 	u8 cssid;
746 } __attribute__((packed));
747 
748 /**
749  * sclp_chp_read_info - perform read channel-path information sclp command
750  * @info: resulting channel-path information data
751  *
752  * Perform read channel-path information sclp command and wait for completion.
753  * On success, store channel-path information in @info and return 0. Return
754  * non-zero otherwise.
755  */
756 int sclp_chp_read_info(struct sclp_chp_info *info)
757 {
758 	struct chp_info_sccb *sccb;
759 	int rc;
760 
761 	if (!SCLP_HAS_CHP_INFO)
762 		return -EOPNOTSUPP;
763 	/* Prepare sccb. */
764 	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
765 	if (!sccb)
766 		return -ENOMEM;
767 	sccb->header.length = sizeof(*sccb);
768 	rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
769 	if (rc)
770 		goto out;
771 	if (sccb->header.response_code != 0x0010) {
772 		pr_warning("read channel-path info failed "
773 			   "(response=0x%04x)\n", sccb->header.response_code);
774 		rc = -EIO;
775 		goto out;
776 	}
777 	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
778 	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
779 	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
780 out:
781 	free_page((unsigned long) sccb);
782 	return rc;
783 }
784