xref: /linux/drivers/s390/cio/chsc.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright IBM Corp. 1999,2008
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Arnd Bergmann (arndb@de.ibm.com)
9  */
10 
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/device.h>
15 
16 #include <asm/cio.h>
17 #include <asm/chpid.h>
18 #include <asm/chsc.h>
19 
20 #include "../s390mach.h"
21 #include "css.h"
22 #include "cio.h"
23 #include "cio_debug.h"
24 #include "ioasm.h"
25 #include "chp.h"
26 #include "chsc.h"
27 
28 static void *sei_page;
29 
30 /**
31  * chsc_error_from_response() - convert a chsc response to an error
32  * @response: chsc response code
33  *
34  * Returns an appropriate Linux error code for @response.
35  */
36 int chsc_error_from_response(int response)
37 {
38 	switch (response) {
39 	case 0x0001:
40 		return 0;
41 	case 0x0002:
42 	case 0x0003:
43 	case 0x0006:
44 	case 0x0007:
45 	case 0x0008:
46 	case 0x000a:
47 		return -EINVAL;
48 	case 0x0004:
49 		return -EOPNOTSUPP;
50 	default:
51 		return -EIO;
52 	}
53 }
54 EXPORT_SYMBOL_GPL(chsc_error_from_response);
55 
56 struct chsc_ssd_area {
57 	struct chsc_header request;
58 	u16 :10;
59 	u16 ssid:2;
60 	u16 :4;
61 	u16 f_sch;	  /* first subchannel */
62 	u16 :16;
63 	u16 l_sch;	  /* last subchannel */
64 	u32 :32;
65 	struct chsc_header response;
66 	u32 :32;
67 	u8 sch_valid : 1;
68 	u8 dev_valid : 1;
69 	u8 st	     : 3; /* subchannel type */
70 	u8 zeroes    : 3;
71 	u8  unit_addr;	  /* unit address */
72 	u16 devno;	  /* device number */
73 	u8 path_mask;
74 	u8 fla_valid_mask;
75 	u16 sch;	  /* subchannel */
76 	u8 chpid[8];	  /* chpids 0-7 */
77 	u16 fla[8];	  /* full link addresses 0-7 */
78 } __attribute__ ((packed));
79 
80 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
81 {
82 	unsigned long page;
83 	struct chsc_ssd_area *ssd_area;
84 	int ccode;
85 	int ret;
86 	int i;
87 	int mask;
88 
89 	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
90 	if (!page)
91 		return -ENOMEM;
92 	ssd_area = (struct chsc_ssd_area *) page;
93 	ssd_area->request.length = 0x0010;
94 	ssd_area->request.code = 0x0004;
95 	ssd_area->ssid = schid.ssid;
96 	ssd_area->f_sch = schid.sch_no;
97 	ssd_area->l_sch = schid.sch_no;
98 
99 	ccode = chsc(ssd_area);
100 	/* Check response. */
101 	if (ccode > 0) {
102 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
103 		goto out_free;
104 	}
105 	ret = chsc_error_from_response(ssd_area->response.code);
106 	if (ret != 0) {
107 		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
108 			      schid.ssid, schid.sch_no,
109 			      ssd_area->response.code);
110 		goto out_free;
111 	}
112 	if (!ssd_area->sch_valid) {
113 		ret = -ENODEV;
114 		goto out_free;
115 	}
116 	/* Copy data */
117 	ret = 0;
118 	memset(ssd, 0, sizeof(struct chsc_ssd_info));
119 	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
120 	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
121 		goto out_free;
122 	ssd->path_mask = ssd_area->path_mask;
123 	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
124 	for (i = 0; i < 8; i++) {
125 		mask = 0x80 >> i;
126 		if (ssd_area->path_mask & mask) {
127 			chp_id_init(&ssd->chpid[i]);
128 			ssd->chpid[i].id = ssd_area->chpid[i];
129 		}
130 		if (ssd_area->fla_valid_mask & mask)
131 			ssd->fla[i] = ssd_area->fla[i];
132 	}
133 out_free:
134 	free_page(page);
135 	return ret;
136 }
137 
138 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
139 {
140 	spin_lock_irq(sch->lock);
141 	if (sch->driver && sch->driver->chp_event)
142 		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
143 			goto out_unreg;
144 	spin_unlock_irq(sch->lock);
145 	return 0;
146 
147 out_unreg:
148 	sch->lpm = 0;
149 	spin_unlock_irq(sch->lock);
150 	css_schedule_eval(sch->schid);
151 	return 0;
152 }
153 
154 void chsc_chp_offline(struct chp_id chpid)
155 {
156 	char dbf_txt[15];
157 	struct chp_link link;
158 
159 	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
160 	CIO_TRACE_EVENT(2, dbf_txt);
161 
162 	if (chp_get_status(chpid) <= 0)
163 		return;
164 	memset(&link, 0, sizeof(struct chp_link));
165 	link.chpid = chpid;
166 	/* Wait until previous actions have settled. */
167 	css_wait_for_slow_path();
168 	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
169 }
170 
171 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
172 {
173 	struct schib schib;
174 	/*
175 	 * We don't know the device yet, but since a path
176 	 * may be available now to the device we'll have
177 	 * to do recognition again.
178 	 * Since we don't have any idea about which chpid
179 	 * that beast may be on we'll have to do a stsch
180 	 * on all devices, grr...
181 	 */
182 	if (stsch_err(schid, &schib))
183 		/* We're through */
184 		return -ENXIO;
185 
186 	/* Put it on the slow path. */
187 	css_schedule_eval(schid);
188 	return 0;
189 }
190 
191 static int __s390_process_res_acc(struct subchannel *sch, void *data)
192 {
193 	spin_lock_irq(sch->lock);
194 	if (sch->driver && sch->driver->chp_event)
195 		sch->driver->chp_event(sch, data, CHP_ONLINE);
196 	spin_unlock_irq(sch->lock);
197 
198 	return 0;
199 }
200 
201 static void s390_process_res_acc(struct chp_link *link)
202 {
203 	char dbf_txt[15];
204 
205 	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
206 		link->chpid.id);
207 	CIO_TRACE_EVENT( 2, dbf_txt);
208 	if (link->fla != 0) {
209 		sprintf(dbf_txt, "fla%x", link->fla);
210 		CIO_TRACE_EVENT( 2, dbf_txt);
211 	}
212 	/* Wait until previous actions have settled. */
213 	css_wait_for_slow_path();
214 	/*
215 	 * I/O resources may have become accessible.
216 	 * Scan through all subchannels that may be concerned and
217 	 * do a validation on those.
218 	 * The more information we have (info), the less scanning
219 	 * will we have to do.
220 	 */
221 	for_each_subchannel_staged(__s390_process_res_acc,
222 				   s390_process_res_acc_new_sch, link);
223 }
224 
225 static int
226 __get_chpid_from_lir(void *data)
227 {
228 	struct lir {
229 		u8  iq;
230 		u8  ic;
231 		u16 sci;
232 		/* incident-node descriptor */
233 		u32 indesc[28];
234 		/* attached-node descriptor */
235 		u32 andesc[28];
236 		/* incident-specific information */
237 		u32 isinfo[28];
238 	} __attribute__ ((packed)) *lir;
239 
240 	lir = data;
241 	if (!(lir->iq&0x80))
242 		/* NULL link incident record */
243 		return -EINVAL;
244 	if (!(lir->indesc[0]&0xc0000000))
245 		/* node descriptor not valid */
246 		return -EINVAL;
247 	if (!(lir->indesc[0]&0x10000000))
248 		/* don't handle device-type nodes - FIXME */
249 		return -EINVAL;
250 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
251 
252 	return (u16) (lir->indesc[0]&0x000000ff);
253 }
254 
255 struct chsc_sei_area {
256 	struct chsc_header request;
257 	u32 reserved1;
258 	u32 reserved2;
259 	u32 reserved3;
260 	struct chsc_header response;
261 	u32 reserved4;
262 	u8  flags;
263 	u8  vf;		/* validity flags */
264 	u8  rs;		/* reporting source */
265 	u8  cc;		/* content code */
266 	u16 fla;	/* full link address */
267 	u16 rsid;	/* reporting source id */
268 	u32 reserved5;
269 	u32 reserved6;
270 	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
271 	/* ccdf has to be big enough for a link-incident record */
272 } __attribute__ ((packed));
273 
274 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
275 {
276 	struct chp_id chpid;
277 	int id;
278 
279 	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
280 		      sei_area->rs, sei_area->rsid);
281 	if (sei_area->rs != 4)
282 		return;
283 	id = __get_chpid_from_lir(sei_area->ccdf);
284 	if (id < 0)
285 		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
286 	else {
287 		chp_id_init(&chpid);
288 		chpid.id = id;
289 		chsc_chp_offline(chpid);
290 	}
291 }
292 
293 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
294 {
295 	struct chp_link link;
296 	struct chp_id chpid;
297 	int status;
298 
299 	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
300 		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
301 	if (sei_area->rs != 4)
302 		return;
303 	chp_id_init(&chpid);
304 	chpid.id = sei_area->rsid;
305 	/* allocate a new channel path structure, if needed */
306 	status = chp_get_status(chpid);
307 	if (status < 0)
308 		chp_new(chpid);
309 	else if (!status)
310 		return;
311 	memset(&link, 0, sizeof(struct chp_link));
312 	link.chpid = chpid;
313 	if ((sei_area->vf & 0xc0) != 0) {
314 		link.fla = sei_area->fla;
315 		if ((sei_area->vf & 0xc0) == 0xc0)
316 			/* full link address */
317 			link.fla_mask = 0xffff;
318 		else
319 			/* link address */
320 			link.fla_mask = 0xff00;
321 	}
322 	s390_process_res_acc(&link);
323 }
324 
325 struct chp_config_data {
326 	u8 map[32];
327 	u8 op;
328 	u8 pc;
329 };
330 
331 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
332 {
333 	struct chp_config_data *data;
334 	struct chp_id chpid;
335 	int num;
336 
337 	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
338 	if (sei_area->rs != 0)
339 		return;
340 	data = (struct chp_config_data *) &(sei_area->ccdf);
341 	chp_id_init(&chpid);
342 	for (num = 0; num <= __MAX_CHPID; num++) {
343 		if (!chp_test_bit(data->map, num))
344 			continue;
345 		chpid.id = num;
346 		printk(KERN_WARNING "cio: processing configure event %d for "
347 		       "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
348 		switch (data->op) {
349 		case 0:
350 			chp_cfg_schedule(chpid, 1);
351 			break;
352 		case 1:
353 			chp_cfg_schedule(chpid, 0);
354 			break;
355 		case 2:
356 			chp_cfg_cancel_deconfigure(chpid);
357 			break;
358 		}
359 	}
360 }
361 
362 static void chsc_process_sei(struct chsc_sei_area *sei_area)
363 {
364 	/* Check if we might have lost some information. */
365 	if (sei_area->flags & 0x40) {
366 		CIO_CRW_EVENT(2, "chsc: event overflow\n");
367 		css_schedule_eval_all();
368 	}
369 	/* which kind of information was stored? */
370 	switch (sei_area->cc) {
371 	case 1: /* link incident*/
372 		chsc_process_sei_link_incident(sei_area);
373 		break;
374 	case 2: /* i/o resource accessibiliy */
375 		chsc_process_sei_res_acc(sei_area);
376 		break;
377 	case 8: /* channel-path-configuration notification */
378 		chsc_process_sei_chp_config(sei_area);
379 		break;
380 	default: /* other stuff */
381 		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
382 			      sei_area->cc);
383 		break;
384 	}
385 }
386 
387 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
388 {
389 	struct chsc_sei_area *sei_area;
390 
391 	if (overflow) {
392 		css_schedule_eval_all();
393 		return;
394 	}
395 	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
396 		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
397 		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
398 		      crw0->erc, crw0->rsid);
399 	if (!sei_page)
400 		return;
401 	/* Access to sei_page is serialized through machine check handler
402 	 * thread, so no need for locking. */
403 	sei_area = sei_page;
404 
405 	CIO_TRACE_EVENT(2, "prcss");
406 	do {
407 		memset(sei_area, 0, sizeof(*sei_area));
408 		sei_area->request.length = 0x0010;
409 		sei_area->request.code = 0x000e;
410 		if (chsc(sei_area))
411 			break;
412 
413 		if (sei_area->response.code == 0x0001) {
414 			CIO_CRW_EVENT(4, "chsc: sei successful\n");
415 			chsc_process_sei(sei_area);
416 		} else {
417 			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
418 				      sei_area->response.code);
419 			break;
420 		}
421 	} while (sei_area->flags & 0x80);
422 }
423 
424 void chsc_chp_online(struct chp_id chpid)
425 {
426 	char dbf_txt[15];
427 	struct chp_link link;
428 
429 	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
430 	CIO_TRACE_EVENT(2, dbf_txt);
431 
432 	if (chp_get_status(chpid) != 0) {
433 		memset(&link, 0, sizeof(struct chp_link));
434 		link.chpid = chpid;
435 		/* Wait until previous actions have settled. */
436 		css_wait_for_slow_path();
437 		for_each_subchannel_staged(__s390_process_res_acc, NULL,
438 					   &link);
439 	}
440 }
441 
442 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
443 					 struct chp_id chpid, int on)
444 {
445 	unsigned long flags;
446 	struct chp_link link;
447 
448 	memset(&link, 0, sizeof(struct chp_link));
449 	link.chpid = chpid;
450 	spin_lock_irqsave(sch->lock, flags);
451 	if (sch->driver && sch->driver->chp_event)
452 		sch->driver->chp_event(sch, &link,
453 				       on ? CHP_VARY_ON : CHP_VARY_OFF);
454 	spin_unlock_irqrestore(sch->lock, flags);
455 }
456 
457 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
458 {
459 	struct chp_id *chpid = data;
460 
461 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
462 	return 0;
463 }
464 
465 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
466 {
467 	struct chp_id *chpid = data;
468 
469 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
470 	return 0;
471 }
472 
473 static int
474 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
475 {
476 	struct schib schib;
477 
478 	if (stsch_err(schid, &schib))
479 		/* We're through */
480 		return -ENXIO;
481 	/* Put it on the slow path. */
482 	css_schedule_eval(schid);
483 	return 0;
484 }
485 
486 /**
487  * chsc_chp_vary - propagate channel-path vary operation to subchannels
488  * @chpid: channl-path ID
489  * @on: non-zero for vary online, zero for vary offline
490  */
491 int chsc_chp_vary(struct chp_id chpid, int on)
492 {
493 	struct chp_link link;
494 
495 	memset(&link, 0, sizeof(struct chp_link));
496 	link.chpid = chpid;
497 	/* Wait until previous actions have settled. */
498 	css_wait_for_slow_path();
499 	/*
500 	 * Redo PathVerification on the devices the chpid connects to
501 	 */
502 
503 	if (on)
504 		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
505 					   __s390_vary_chpid_on, &link);
506 	else
507 		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
508 					   NULL, &link);
509 
510 	return 0;
511 }
512 
513 static void
514 chsc_remove_cmg_attr(struct channel_subsystem *css)
515 {
516 	int i;
517 
518 	for (i = 0; i <= __MAX_CHPID; i++) {
519 		if (!css->chps[i])
520 			continue;
521 		chp_remove_cmg_attr(css->chps[i]);
522 	}
523 }
524 
525 static int
526 chsc_add_cmg_attr(struct channel_subsystem *css)
527 {
528 	int i, ret;
529 
530 	ret = 0;
531 	for (i = 0; i <= __MAX_CHPID; i++) {
532 		if (!css->chps[i])
533 			continue;
534 		ret = chp_add_cmg_attr(css->chps[i]);
535 		if (ret)
536 			goto cleanup;
537 	}
538 	return ret;
539 cleanup:
540 	for (--i; i >= 0; i--) {
541 		if (!css->chps[i])
542 			continue;
543 		chp_remove_cmg_attr(css->chps[i]);
544 	}
545 	return ret;
546 }
547 
548 static int
549 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
550 {
551 	struct {
552 		struct chsc_header request;
553 		u32 operation_code : 2;
554 		u32 : 30;
555 		u32 key : 4;
556 		u32 : 28;
557 		u32 zeroes1;
558 		u32 cub_addr1;
559 		u32 zeroes2;
560 		u32 cub_addr2;
561 		u32 reserved[13];
562 		struct chsc_header response;
563 		u32 status : 8;
564 		u32 : 4;
565 		u32 fmt : 4;
566 		u32 : 16;
567 	} __attribute__ ((packed)) *secm_area;
568 	int ret, ccode;
569 
570 	secm_area = page;
571 	secm_area->request.length = 0x0050;
572 	secm_area->request.code = 0x0016;
573 
574 	secm_area->key = PAGE_DEFAULT_KEY;
575 	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
576 	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
577 
578 	secm_area->operation_code = enable ? 0 : 1;
579 
580 	ccode = chsc(secm_area);
581 	if (ccode > 0)
582 		return (ccode == 3) ? -ENODEV : -EBUSY;
583 
584 	switch (secm_area->response.code) {
585 	case 0x0102:
586 	case 0x0103:
587 		ret = -EINVAL;
588 	default:
589 		ret = chsc_error_from_response(secm_area->response.code);
590 	}
591 	if (ret != 0)
592 		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
593 			      secm_area->response.code);
594 	return ret;
595 }
596 
597 int
598 chsc_secm(struct channel_subsystem *css, int enable)
599 {
600 	void  *secm_area;
601 	int ret;
602 
603 	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
604 	if (!secm_area)
605 		return -ENOMEM;
606 
607 	if (enable && !css->cm_enabled) {
608 		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
609 		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
610 		if (!css->cub_addr1 || !css->cub_addr2) {
611 			free_page((unsigned long)css->cub_addr1);
612 			free_page((unsigned long)css->cub_addr2);
613 			free_page((unsigned long)secm_area);
614 			return -ENOMEM;
615 		}
616 	}
617 	ret = __chsc_do_secm(css, enable, secm_area);
618 	if (!ret) {
619 		css->cm_enabled = enable;
620 		if (css->cm_enabled) {
621 			ret = chsc_add_cmg_attr(css);
622 			if (ret) {
623 				memset(secm_area, 0, PAGE_SIZE);
624 				__chsc_do_secm(css, 0, secm_area);
625 				css->cm_enabled = 0;
626 			}
627 		} else
628 			chsc_remove_cmg_attr(css);
629 	}
630 	if (!css->cm_enabled) {
631 		free_page((unsigned long)css->cub_addr1);
632 		free_page((unsigned long)css->cub_addr2);
633 	}
634 	free_page((unsigned long)secm_area);
635 	return ret;
636 }
637 
638 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
639 				     int c, int m,
640 				     struct chsc_response_struct *resp)
641 {
642 	int ccode, ret;
643 
644 	struct {
645 		struct chsc_header request;
646 		u32 : 2;
647 		u32 m : 1;
648 		u32 c : 1;
649 		u32 fmt : 4;
650 		u32 cssid : 8;
651 		u32 : 4;
652 		u32 rfmt : 4;
653 		u32 first_chpid : 8;
654 		u32 : 24;
655 		u32 last_chpid : 8;
656 		u32 zeroes1;
657 		struct chsc_header response;
658 		u8 data[PAGE_SIZE - 20];
659 	} __attribute__ ((packed)) *scpd_area;
660 
661 	if ((rfmt == 1) && !css_general_characteristics.fcs)
662 		return -EINVAL;
663 	if ((rfmt == 2) && !css_general_characteristics.cib)
664 		return -EINVAL;
665 	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
666 	if (!scpd_area)
667 		return -ENOMEM;
668 
669 	scpd_area->request.length = 0x0010;
670 	scpd_area->request.code = 0x0002;
671 
672 	scpd_area->cssid = chpid.cssid;
673 	scpd_area->first_chpid = chpid.id;
674 	scpd_area->last_chpid = chpid.id;
675 	scpd_area->m = m;
676 	scpd_area->c = c;
677 	scpd_area->fmt = fmt;
678 	scpd_area->rfmt = rfmt;
679 
680 	ccode = chsc(scpd_area);
681 	if (ccode > 0) {
682 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
683 		goto out;
684 	}
685 
686 	ret = chsc_error_from_response(scpd_area->response.code);
687 	if (ret == 0)
688 		/* Success. */
689 		memcpy(resp, &scpd_area->response, scpd_area->response.length);
690 	else
691 		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
692 			      scpd_area->response.code);
693 out:
694 	free_page((unsigned long)scpd_area);
695 	return ret;
696 }
697 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
698 
699 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
700 					  struct channel_path_desc *desc)
701 {
702 	struct chsc_response_struct *chsc_resp;
703 	int ret;
704 
705 	chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
706 	if (!chsc_resp)
707 		return -ENOMEM;
708 	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
709 	if (ret)
710 		goto out_free;
711 	memcpy(desc, &chsc_resp->data, chsc_resp->length);
712 out_free:
713 	kfree(chsc_resp);
714 	return ret;
715 }
716 
717 static void
718 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
719 			  struct cmg_chars *chars)
720 {
721 	switch (chp->cmg) {
722 	case 2:
723 	case 3:
724 		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
725 					 GFP_KERNEL);
726 		if (chp->cmg_chars) {
727 			int i, mask;
728 			struct cmg_chars *cmg_chars;
729 
730 			cmg_chars = chp->cmg_chars;
731 			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
732 				mask = 0x80 >> (i + 3);
733 				if (cmcv & mask)
734 					cmg_chars->values[i] = chars->values[i];
735 				else
736 					cmg_chars->values[i] = 0;
737 			}
738 		}
739 		break;
740 	default:
741 		/* No cmg-dependent data. */
742 		break;
743 	}
744 }
745 
746 int chsc_get_channel_measurement_chars(struct channel_path *chp)
747 {
748 	int ccode, ret;
749 
750 	struct {
751 		struct chsc_header request;
752 		u32 : 24;
753 		u32 first_chpid : 8;
754 		u32 : 24;
755 		u32 last_chpid : 8;
756 		u32 zeroes1;
757 		struct chsc_header response;
758 		u32 zeroes2;
759 		u32 not_valid : 1;
760 		u32 shared : 1;
761 		u32 : 22;
762 		u32 chpid : 8;
763 		u32 cmcv : 5;
764 		u32 : 11;
765 		u32 cmgq : 8;
766 		u32 cmg : 8;
767 		u32 zeroes3;
768 		u32 data[NR_MEASUREMENT_CHARS];
769 	} __attribute__ ((packed)) *scmc_area;
770 
771 	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
772 	if (!scmc_area)
773 		return -ENOMEM;
774 
775 	scmc_area->request.length = 0x0010;
776 	scmc_area->request.code = 0x0022;
777 
778 	scmc_area->first_chpid = chp->chpid.id;
779 	scmc_area->last_chpid = chp->chpid.id;
780 
781 	ccode = chsc(scmc_area);
782 	if (ccode > 0) {
783 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
784 		goto out;
785 	}
786 
787 	ret = chsc_error_from_response(scmc_area->response.code);
788 	if (ret == 0) {
789 		/* Success. */
790 		if (!scmc_area->not_valid) {
791 			chp->cmg = scmc_area->cmg;
792 			chp->shared = scmc_area->shared;
793 			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
794 						  (struct cmg_chars *)
795 						  &scmc_area->data);
796 		} else {
797 			chp->cmg = -1;
798 			chp->shared = -1;
799 		}
800 	} else {
801 		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
802 			      scmc_area->response.code);
803 	}
804 out:
805 	free_page((unsigned long)scmc_area);
806 	return ret;
807 }
808 
809 int __init chsc_alloc_sei_area(void)
810 {
811 	int ret;
812 
813 	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
814 	if (!sei_page) {
815 		CIO_MSG_EVENT(0, "Can't allocate page for processing of "
816 			      "chsc machine checks!\n");
817 		return -ENOMEM;
818 	}
819 	ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
820 	if (ret)
821 		kfree(sei_page);
822 	return ret;
823 }
824 
825 void __init chsc_free_sei_area(void)
826 {
827 	s390_unregister_crw_handler(CRW_RSC_CSS);
828 	kfree(sei_page);
829 }
830 
831 int __init
832 chsc_enable_facility(int operation_code)
833 {
834 	int ret;
835 	struct {
836 		struct chsc_header request;
837 		u8 reserved1:4;
838 		u8 format:4;
839 		u8 reserved2;
840 		u16 operation_code;
841 		u32 reserved3;
842 		u32 reserved4;
843 		u32 operation_data_area[252];
844 		struct chsc_header response;
845 		u32 reserved5:4;
846 		u32 format2:4;
847 		u32 reserved6:24;
848 	} __attribute__ ((packed)) *sda_area;
849 
850 	sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
851 	if (!sda_area)
852 		return -ENOMEM;
853 	sda_area->request.length = 0x0400;
854 	sda_area->request.code = 0x0031;
855 	sda_area->operation_code = operation_code;
856 
857 	ret = chsc(sda_area);
858 	if (ret > 0) {
859 		ret = (ret == 3) ? -ENODEV : -EBUSY;
860 		goto out;
861 	}
862 
863 	switch (sda_area->response.code) {
864 	case 0x0101:
865 		ret = -EOPNOTSUPP;
866 		break;
867 	default:
868 		ret = chsc_error_from_response(sda_area->response.code);
869 	}
870 	if (ret != 0)
871 		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
872 			      operation_code, sda_area->response.code);
873  out:
874 	free_page((unsigned long)sda_area);
875 	return ret;
876 }
877 
878 struct css_general_char css_general_characteristics;
879 struct css_chsc_char css_chsc_characteristics;
880 
881 int __init
882 chsc_determine_css_characteristics(void)
883 {
884 	int result;
885 	struct {
886 		struct chsc_header request;
887 		u32 reserved1;
888 		u32 reserved2;
889 		u32 reserved3;
890 		struct chsc_header response;
891 		u32 reserved4;
892 		u32 general_char[510];
893 		u32 chsc_char[518];
894 	} __attribute__ ((packed)) *scsc_area;
895 
896 	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
897 	if (!scsc_area)
898 		return -ENOMEM;
899 
900 	scsc_area->request.length = 0x0010;
901 	scsc_area->request.code = 0x0010;
902 
903 	result = chsc(scsc_area);
904 	if (result) {
905 		result = (result == 3) ? -ENODEV : -EBUSY;
906 		goto exit;
907 	}
908 
909 	result = chsc_error_from_response(scsc_area->response.code);
910 	if (result == 0) {
911 		memcpy(&css_general_characteristics, scsc_area->general_char,
912 		       sizeof(css_general_characteristics));
913 		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
914 		       sizeof(css_chsc_characteristics));
915 	} else
916 		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
917 			      scsc_area->response.code);
918 exit:
919 	free_page ((unsigned long) scsc_area);
920 	return result;
921 }
922 
923 EXPORT_SYMBOL_GPL(css_general_characteristics);
924 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
925 
926 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
927 {
928 	struct {
929 		struct chsc_header request;
930 		unsigned int rsvd0;
931 		unsigned int op : 8;
932 		unsigned int rsvd1 : 8;
933 		unsigned int ctrl : 16;
934 		unsigned int rsvd2[5];
935 		struct chsc_header response;
936 		unsigned int rsvd3[7];
937 	} __attribute__ ((packed)) *rr;
938 	int rc;
939 
940 	memset(page, 0, PAGE_SIZE);
941 	rr = page;
942 	rr->request.length = 0x0020;
943 	rr->request.code = 0x0033;
944 	rr->op = op;
945 	rr->ctrl = ctrl;
946 	rc = chsc(rr);
947 	if (rc)
948 		return -EIO;
949 	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
950 	return rc;
951 }
952 
953 int chsc_sstpi(void *page, void *result, size_t size)
954 {
955 	struct {
956 		struct chsc_header request;
957 		unsigned int rsvd0[3];
958 		struct chsc_header response;
959 		char data[size];
960 	} __attribute__ ((packed)) *rr;
961 	int rc;
962 
963 	memset(page, 0, PAGE_SIZE);
964 	rr = page;
965 	rr->request.length = 0x0010;
966 	rr->request.code = 0x0038;
967 	rc = chsc(rr);
968 	if (rc)
969 		return -EIO;
970 	memcpy(result, &rr->data, size);
971 	return (rr->response.code == 0x0001) ? 0 : -EIO;
972 }
973 
974