xref: /linux/drivers/s390/cio/chsc.c (revision 643d1f7fe3aa12c8bdea6fa5b4ba874ff6dd601d)
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *			      IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
9  *		 Arnd Bergmann (arndb@de.ibm.com)
10  */
11 
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19 
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26 
27 static void *sei_page;
28 
29 struct chsc_ssd_area {
30 	struct chsc_header request;
31 	u16 :10;
32 	u16 ssid:2;
33 	u16 :4;
34 	u16 f_sch;	  /* first subchannel */
35 	u16 :16;
36 	u16 l_sch;	  /* last subchannel */
37 	u32 :32;
38 	struct chsc_header response;
39 	u32 :32;
40 	u8 sch_valid : 1;
41 	u8 dev_valid : 1;
42 	u8 st	     : 3; /* subchannel type */
43 	u8 zeroes    : 3;
44 	u8  unit_addr;	  /* unit address */
45 	u16 devno;	  /* device number */
46 	u8 path_mask;
47 	u8 fla_valid_mask;
48 	u16 sch;	  /* subchannel */
49 	u8 chpid[8];	  /* chpids 0-7 */
50 	u16 fla[8];	  /* full link addresses 0-7 */
51 } __attribute__ ((packed));
52 
53 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
54 {
55 	unsigned long page;
56 	struct chsc_ssd_area *ssd_area;
57 	int ccode;
58 	int ret;
59 	int i;
60 	int mask;
61 
62 	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 	if (!page)
64 		return -ENOMEM;
65 	ssd_area = (struct chsc_ssd_area *) page;
66 	ssd_area->request.length = 0x0010;
67 	ssd_area->request.code = 0x0004;
68 	ssd_area->ssid = schid.ssid;
69 	ssd_area->f_sch = schid.sch_no;
70 	ssd_area->l_sch = schid.sch_no;
71 
72 	ccode = chsc(ssd_area);
73 	/* Check response. */
74 	if (ccode > 0) {
75 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
76 		goto out_free;
77 	}
78 	if (ssd_area->response.code != 0x0001) {
79 		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80 			      schid.ssid, schid.sch_no,
81 			      ssd_area->response.code);
82 		ret = -EIO;
83 		goto out_free;
84 	}
85 	if (!ssd_area->sch_valid) {
86 		ret = -ENODEV;
87 		goto out_free;
88 	}
89 	/* Copy data */
90 	ret = 0;
91 	memset(ssd, 0, sizeof(struct chsc_ssd_info));
92 	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
93 	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
94 		goto out_free;
95 	ssd->path_mask = ssd_area->path_mask;
96 	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
97 	for (i = 0; i < 8; i++) {
98 		mask = 0x80 >> i;
99 		if (ssd_area->path_mask & mask) {
100 			chp_id_init(&ssd->chpid[i]);
101 			ssd->chpid[i].id = ssd_area->chpid[i];
102 		}
103 		if (ssd_area->fla_valid_mask & mask)
104 			ssd->fla[i] = ssd_area->fla[i];
105 	}
106 out_free:
107 	free_page(page);
108 	return ret;
109 }
110 
111 static int check_for_io_on_path(struct subchannel *sch, int mask)
112 {
113 	int cc;
114 
115 	cc = stsch(sch->schid, &sch->schib);
116 	if (cc)
117 		return 0;
118 	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
119 		return 1;
120 	return 0;
121 }
122 
123 static void terminate_internal_io(struct subchannel *sch)
124 {
125 	if (cio_clear(sch)) {
126 		/* Recheck device in case clear failed. */
127 		sch->lpm = 0;
128 		if (device_trigger_verify(sch) != 0)
129 			css_schedule_eval(sch->schid);
130 		return;
131 	}
132 	/* Request retry of internal operation. */
133 	device_set_intretry(sch);
134 	/* Call handler. */
135 	if (sch->driver && sch->driver->termination)
136 		sch->driver->termination(sch);
137 }
138 
139 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
140 {
141 	int j;
142 	int mask;
143 	struct chp_id *chpid = data;
144 	struct schib schib;
145 
146 	for (j = 0; j < 8; j++) {
147 		mask = 0x80 >> j;
148 		if ((sch->schib.pmcw.pim & mask) &&
149 		    (sch->schib.pmcw.chpid[j] == chpid->id))
150 			break;
151 	}
152 	if (j >= 8)
153 		return 0;
154 
155 	spin_lock_irq(sch->lock);
156 
157 	stsch(sch->schid, &schib);
158 	if (!css_sch_is_valid(&schib))
159 		goto out_unreg;
160 	memcpy(&sch->schib, &schib, sizeof(struct schib));
161 	/* Check for single path devices. */
162 	if (sch->schib.pmcw.pim == 0x80)
163 		goto out_unreg;
164 
165 	if (check_for_io_on_path(sch, mask)) {
166 		if (device_is_online(sch))
167 			device_kill_io(sch);
168 		else {
169 			terminate_internal_io(sch);
170 			/* Re-start path verification. */
171 			if (sch->driver && sch->driver->verify)
172 				sch->driver->verify(sch);
173 		}
174 	} else {
175 		/* trigger path verification. */
176 		if (sch->driver && sch->driver->verify)
177 			sch->driver->verify(sch);
178 		else if (sch->lpm == mask)
179 			goto out_unreg;
180 	}
181 
182 	spin_unlock_irq(sch->lock);
183 	return 0;
184 
185 out_unreg:
186 	sch->lpm = 0;
187 	spin_unlock_irq(sch->lock);
188 	css_schedule_eval(sch->schid);
189 	return 0;
190 }
191 
192 void chsc_chp_offline(struct chp_id chpid)
193 {
194 	char dbf_txt[15];
195 
196 	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
197 	CIO_TRACE_EVENT(2, dbf_txt);
198 
199 	if (chp_get_status(chpid) <= 0)
200 		return;
201 	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
202 }
203 
204 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
205 {
206 	struct schib schib;
207 	/*
208 	 * We don't know the device yet, but since a path
209 	 * may be available now to the device we'll have
210 	 * to do recognition again.
211 	 * Since we don't have any idea about which chpid
212 	 * that beast may be on we'll have to do a stsch
213 	 * on all devices, grr...
214 	 */
215 	if (stsch_err(schid, &schib))
216 		/* We're through */
217 		return -ENXIO;
218 
219 	/* Put it on the slow path. */
220 	css_schedule_eval(schid);
221 	return 0;
222 }
223 
224 struct res_acc_data {
225 	struct chp_id chpid;
226 	u32 fla_mask;
227 	u16 fla;
228 };
229 
230 static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
231 			      struct res_acc_data *data)
232 {
233 	int i;
234 	int mask;
235 
236 	for (i = 0; i < 8; i++) {
237 		mask = 0x80 >> i;
238 		if (!(ssd->path_mask & mask))
239 			continue;
240 		if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
241 			continue;
242 		if ((ssd->fla_valid_mask & mask) &&
243 		    ((ssd->fla[i] & data->fla_mask) != data->fla))
244 			continue;
245 		return mask;
246 	}
247 	return 0;
248 }
249 
250 static int __s390_process_res_acc(struct subchannel *sch, void *data)
251 {
252 	int chp_mask, old_lpm;
253 	struct res_acc_data *res_data = data;
254 
255 	spin_lock_irq(sch->lock);
256 	chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
257 	if (chp_mask == 0)
258 		goto out;
259 	if (stsch(sch->schid, &sch->schib))
260 		goto out;
261 	old_lpm = sch->lpm;
262 	sch->lpm = ((sch->schib.pmcw.pim &
263 		     sch->schib.pmcw.pam &
264 		     sch->schib.pmcw.pom)
265 		    | chp_mask) & sch->opm;
266 	if (!old_lpm && sch->lpm)
267 		device_trigger_reprobe(sch);
268 	else if (sch->driver && sch->driver->verify)
269 		sch->driver->verify(sch);
270 out:
271 	spin_unlock_irq(sch->lock);
272 
273 	return 0;
274 }
275 
276 static void s390_process_res_acc (struct res_acc_data *res_data)
277 {
278 	char dbf_txt[15];
279 
280 	sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
281 		res_data->chpid.id);
282 	CIO_TRACE_EVENT( 2, dbf_txt);
283 	if (res_data->fla != 0) {
284 		sprintf(dbf_txt, "fla%x", res_data->fla);
285 		CIO_TRACE_EVENT( 2, dbf_txt);
286 	}
287 
288 	/*
289 	 * I/O resources may have become accessible.
290 	 * Scan through all subchannels that may be concerned and
291 	 * do a validation on those.
292 	 * The more information we have (info), the less scanning
293 	 * will we have to do.
294 	 */
295 	for_each_subchannel_staged(__s390_process_res_acc,
296 				   s390_process_res_acc_new_sch, res_data);
297 }
298 
299 static int
300 __get_chpid_from_lir(void *data)
301 {
302 	struct lir {
303 		u8  iq;
304 		u8  ic;
305 		u16 sci;
306 		/* incident-node descriptor */
307 		u32 indesc[28];
308 		/* attached-node descriptor */
309 		u32 andesc[28];
310 		/* incident-specific information */
311 		u32 isinfo[28];
312 	} __attribute__ ((packed)) *lir;
313 
314 	lir = data;
315 	if (!(lir->iq&0x80))
316 		/* NULL link incident record */
317 		return -EINVAL;
318 	if (!(lir->indesc[0]&0xc0000000))
319 		/* node descriptor not valid */
320 		return -EINVAL;
321 	if (!(lir->indesc[0]&0x10000000))
322 		/* don't handle device-type nodes - FIXME */
323 		return -EINVAL;
324 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
325 
326 	return (u16) (lir->indesc[0]&0x000000ff);
327 }
328 
329 struct chsc_sei_area {
330 	struct chsc_header request;
331 	u32 reserved1;
332 	u32 reserved2;
333 	u32 reserved3;
334 	struct chsc_header response;
335 	u32 reserved4;
336 	u8  flags;
337 	u8  vf;		/* validity flags */
338 	u8  rs;		/* reporting source */
339 	u8  cc;		/* content code */
340 	u16 fla;	/* full link address */
341 	u16 rsid;	/* reporting source id */
342 	u32 reserved5;
343 	u32 reserved6;
344 	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
345 	/* ccdf has to be big enough for a link-incident record */
346 } __attribute__ ((packed));
347 
348 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
349 {
350 	struct chp_id chpid;
351 	int id;
352 
353 	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
354 		      sei_area->rs, sei_area->rsid);
355 	if (sei_area->rs != 4)
356 		return;
357 	id = __get_chpid_from_lir(sei_area->ccdf);
358 	if (id < 0)
359 		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
360 	else {
361 		chp_id_init(&chpid);
362 		chpid.id = id;
363 		chsc_chp_offline(chpid);
364 	}
365 }
366 
367 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
368 {
369 	struct res_acc_data res_data;
370 	struct chp_id chpid;
371 	int status;
372 
373 	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
374 		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
375 	if (sei_area->rs != 4)
376 		return;
377 	chp_id_init(&chpid);
378 	chpid.id = sei_area->rsid;
379 	/* allocate a new channel path structure, if needed */
380 	status = chp_get_status(chpid);
381 	if (status < 0)
382 		chp_new(chpid);
383 	else if (!status)
384 		return;
385 	memset(&res_data, 0, sizeof(struct res_acc_data));
386 	res_data.chpid = chpid;
387 	if ((sei_area->vf & 0xc0) != 0) {
388 		res_data.fla = sei_area->fla;
389 		if ((sei_area->vf & 0xc0) == 0xc0)
390 			/* full link address */
391 			res_data.fla_mask = 0xffff;
392 		else
393 			/* link address */
394 			res_data.fla_mask = 0xff00;
395 	}
396 	s390_process_res_acc(&res_data);
397 }
398 
399 struct chp_config_data {
400 	u8 map[32];
401 	u8 op;
402 	u8 pc;
403 };
404 
405 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
406 {
407 	struct chp_config_data *data;
408 	struct chp_id chpid;
409 	int num;
410 
411 	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
412 	if (sei_area->rs != 0)
413 		return;
414 	data = (struct chp_config_data *) &(sei_area->ccdf);
415 	chp_id_init(&chpid);
416 	for (num = 0; num <= __MAX_CHPID; num++) {
417 		if (!chp_test_bit(data->map, num))
418 			continue;
419 		chpid.id = num;
420 		printk(KERN_WARNING "cio: processing configure event %d for "
421 		       "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
422 		switch (data->op) {
423 		case 0:
424 			chp_cfg_schedule(chpid, 1);
425 			break;
426 		case 1:
427 			chp_cfg_schedule(chpid, 0);
428 			break;
429 		case 2:
430 			chp_cfg_cancel_deconfigure(chpid);
431 			break;
432 		}
433 	}
434 }
435 
436 static void chsc_process_sei(struct chsc_sei_area *sei_area)
437 {
438 	/* Check if we might have lost some information. */
439 	if (sei_area->flags & 0x40) {
440 		CIO_CRW_EVENT(2, "chsc: event overflow\n");
441 		css_schedule_eval_all();
442 	}
443 	/* which kind of information was stored? */
444 	switch (sei_area->cc) {
445 	case 1: /* link incident*/
446 		chsc_process_sei_link_incident(sei_area);
447 		break;
448 	case 2: /* i/o resource accessibiliy */
449 		chsc_process_sei_res_acc(sei_area);
450 		break;
451 	case 8: /* channel-path-configuration notification */
452 		chsc_process_sei_chp_config(sei_area);
453 		break;
454 	default: /* other stuff */
455 		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
456 			      sei_area->cc);
457 		break;
458 	}
459 }
460 
461 void chsc_process_crw(void)
462 {
463 	struct chsc_sei_area *sei_area;
464 
465 	if (!sei_page)
466 		return;
467 	/* Access to sei_page is serialized through machine check handler
468 	 * thread, so no need for locking. */
469 	sei_area = sei_page;
470 
471 	CIO_TRACE_EVENT( 2, "prcss");
472 	do {
473 		memset(sei_area, 0, sizeof(*sei_area));
474 		sei_area->request.length = 0x0010;
475 		sei_area->request.code = 0x000e;
476 		if (chsc(sei_area))
477 			break;
478 
479 		if (sei_area->response.code == 0x0001) {
480 			CIO_CRW_EVENT(4, "chsc: sei successful\n");
481 			chsc_process_sei(sei_area);
482 		} else {
483 			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
484 				      sei_area->response.code);
485 			break;
486 		}
487 	} while (sei_area->flags & 0x80);
488 }
489 
490 static int __chp_add_new_sch(struct subchannel_id schid, void *data)
491 {
492 	struct schib schib;
493 
494 	if (stsch_err(schid, &schib))
495 		/* We're through */
496 		return -ENXIO;
497 
498 	/* Put it on the slow path. */
499 	css_schedule_eval(schid);
500 	return 0;
501 }
502 
503 
504 static int __chp_add(struct subchannel *sch, void *data)
505 {
506 	int i, mask;
507 	struct chp_id *chpid = data;
508 
509 	spin_lock_irq(sch->lock);
510 	for (i=0; i<8; i++) {
511 		mask = 0x80 >> i;
512 		if ((sch->schib.pmcw.pim & mask) &&
513 		    (sch->schib.pmcw.chpid[i] == chpid->id))
514 			break;
515 	}
516 	if (i==8) {
517 		spin_unlock_irq(sch->lock);
518 		return 0;
519 	}
520 	if (stsch(sch->schid, &sch->schib)) {
521 		spin_unlock_irq(sch->lock);
522 		css_schedule_eval(sch->schid);
523 		return 0;
524 	}
525 	sch->lpm = ((sch->schib.pmcw.pim &
526 		     sch->schib.pmcw.pam &
527 		     sch->schib.pmcw.pom)
528 		    | mask) & sch->opm;
529 
530 	if (sch->driver && sch->driver->verify)
531 		sch->driver->verify(sch);
532 
533 	spin_unlock_irq(sch->lock);
534 
535 	return 0;
536 }
537 
538 void chsc_chp_online(struct chp_id chpid)
539 {
540 	char dbf_txt[15];
541 
542 	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
543 	CIO_TRACE_EVENT(2, dbf_txt);
544 
545 	if (chp_get_status(chpid) != 0)
546 		for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
547 					   &chpid);
548 }
549 
550 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
551 					 struct chp_id chpid, int on)
552 {
553 	int chp, old_lpm;
554 	int mask;
555 	unsigned long flags;
556 
557 	spin_lock_irqsave(sch->lock, flags);
558 	old_lpm = sch->lpm;
559 	for (chp = 0; chp < 8; chp++) {
560 		mask = 0x80 >> chp;
561 		if (!(sch->ssd_info.path_mask & mask))
562 			continue;
563 		if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
564 			continue;
565 
566 		if (on) {
567 			sch->opm |= mask;
568 			sch->lpm |= mask;
569 			if (!old_lpm)
570 				device_trigger_reprobe(sch);
571 			else if (sch->driver && sch->driver->verify)
572 				sch->driver->verify(sch);
573 			break;
574 		}
575 		sch->opm &= ~mask;
576 		sch->lpm &= ~mask;
577 		if (check_for_io_on_path(sch, mask)) {
578 			if (device_is_online(sch))
579 				/* Path verification is done after killing. */
580 				device_kill_io(sch);
581 			else {
582 				/* Kill and retry internal I/O. */
583 				terminate_internal_io(sch);
584 				/* Re-start path verification. */
585 				if (sch->driver && sch->driver->verify)
586 					sch->driver->verify(sch);
587 			}
588 		} else if (!sch->lpm) {
589 			if (device_trigger_verify(sch) != 0)
590 				css_schedule_eval(sch->schid);
591 		} else if (sch->driver && sch->driver->verify)
592 			sch->driver->verify(sch);
593 		break;
594 	}
595 	spin_unlock_irqrestore(sch->lock, flags);
596 }
597 
598 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
599 {
600 	struct chp_id *chpid = data;
601 
602 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
603 	return 0;
604 }
605 
606 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
607 {
608 	struct chp_id *chpid = data;
609 
610 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
611 	return 0;
612 }
613 
614 static int
615 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
616 {
617 	struct schib schib;
618 
619 	if (stsch_err(schid, &schib))
620 		/* We're through */
621 		return -ENXIO;
622 	/* Put it on the slow path. */
623 	css_schedule_eval(schid);
624 	return 0;
625 }
626 
627 /**
628  * chsc_chp_vary - propagate channel-path vary operation to subchannels
629  * @chpid: channl-path ID
630  * @on: non-zero for vary online, zero for vary offline
631  */
632 int chsc_chp_vary(struct chp_id chpid, int on)
633 {
634 	/*
635 	 * Redo PathVerification on the devices the chpid connects to
636 	 */
637 
638 	if (on)
639 		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
640 					   __s390_vary_chpid_on, &chpid);
641 	else
642 		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
643 					   NULL, &chpid);
644 
645 	return 0;
646 }
647 
648 static void
649 chsc_remove_cmg_attr(struct channel_subsystem *css)
650 {
651 	int i;
652 
653 	for (i = 0; i <= __MAX_CHPID; i++) {
654 		if (!css->chps[i])
655 			continue;
656 		chp_remove_cmg_attr(css->chps[i]);
657 	}
658 }
659 
660 static int
661 chsc_add_cmg_attr(struct channel_subsystem *css)
662 {
663 	int i, ret;
664 
665 	ret = 0;
666 	for (i = 0; i <= __MAX_CHPID; i++) {
667 		if (!css->chps[i])
668 			continue;
669 		ret = chp_add_cmg_attr(css->chps[i]);
670 		if (ret)
671 			goto cleanup;
672 	}
673 	return ret;
674 cleanup:
675 	for (--i; i >= 0; i--) {
676 		if (!css->chps[i])
677 			continue;
678 		chp_remove_cmg_attr(css->chps[i]);
679 	}
680 	return ret;
681 }
682 
683 static int
684 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
685 {
686 	struct {
687 		struct chsc_header request;
688 		u32 operation_code : 2;
689 		u32 : 30;
690 		u32 key : 4;
691 		u32 : 28;
692 		u32 zeroes1;
693 		u32 cub_addr1;
694 		u32 zeroes2;
695 		u32 cub_addr2;
696 		u32 reserved[13];
697 		struct chsc_header response;
698 		u32 status : 8;
699 		u32 : 4;
700 		u32 fmt : 4;
701 		u32 : 16;
702 	} __attribute__ ((packed)) *secm_area;
703 	int ret, ccode;
704 
705 	secm_area = page;
706 	secm_area->request.length = 0x0050;
707 	secm_area->request.code = 0x0016;
708 
709 	secm_area->key = PAGE_DEFAULT_KEY;
710 	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
711 	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
712 
713 	secm_area->operation_code = enable ? 0 : 1;
714 
715 	ccode = chsc(secm_area);
716 	if (ccode > 0)
717 		return (ccode == 3) ? -ENODEV : -EBUSY;
718 
719 	switch (secm_area->response.code) {
720 	case 0x0001: /* Success. */
721 		ret = 0;
722 		break;
723 	case 0x0003: /* Invalid block. */
724 	case 0x0007: /* Invalid format. */
725 	case 0x0008: /* Other invalid block. */
726 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
727 		ret = -EINVAL;
728 		break;
729 	case 0x0004: /* Command not provided in model. */
730 		CIO_CRW_EVENT(2, "Model does not provide secm\n");
731 		ret = -EOPNOTSUPP;
732 		break;
733 	case 0x0102: /* cub adresses incorrect */
734 		CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
735 		ret = -EINVAL;
736 		break;
737 	case 0x0103: /* key error */
738 		CIO_CRW_EVENT(2, "Access key error in secm\n");
739 		ret = -EINVAL;
740 		break;
741 	case 0x0105: /* error while starting */
742 		CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
743 		ret = -EIO;
744 		break;
745 	default:
746 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
747 			      secm_area->response.code);
748 		ret = -EIO;
749 	}
750 	return ret;
751 }
752 
753 int
754 chsc_secm(struct channel_subsystem *css, int enable)
755 {
756 	void  *secm_area;
757 	int ret;
758 
759 	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
760 	if (!secm_area)
761 		return -ENOMEM;
762 
763 	mutex_lock(&css->mutex);
764 	if (enable && !css->cm_enabled) {
765 		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
766 		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
767 		if (!css->cub_addr1 || !css->cub_addr2) {
768 			free_page((unsigned long)css->cub_addr1);
769 			free_page((unsigned long)css->cub_addr2);
770 			free_page((unsigned long)secm_area);
771 			mutex_unlock(&css->mutex);
772 			return -ENOMEM;
773 		}
774 	}
775 	ret = __chsc_do_secm(css, enable, secm_area);
776 	if (!ret) {
777 		css->cm_enabled = enable;
778 		if (css->cm_enabled) {
779 			ret = chsc_add_cmg_attr(css);
780 			if (ret) {
781 				memset(secm_area, 0, PAGE_SIZE);
782 				__chsc_do_secm(css, 0, secm_area);
783 				css->cm_enabled = 0;
784 			}
785 		} else
786 			chsc_remove_cmg_attr(css);
787 	}
788 	if (!css->cm_enabled) {
789 		free_page((unsigned long)css->cub_addr1);
790 		free_page((unsigned long)css->cub_addr2);
791 	}
792 	mutex_unlock(&css->mutex);
793 	free_page((unsigned long)secm_area);
794 	return ret;
795 }
796 
797 int chsc_determine_channel_path_description(struct chp_id chpid,
798 					    struct channel_path_desc *desc)
799 {
800 	int ccode, ret;
801 
802 	struct {
803 		struct chsc_header request;
804 		u32 : 24;
805 		u32 first_chpid : 8;
806 		u32 : 24;
807 		u32 last_chpid : 8;
808 		u32 zeroes1;
809 		struct chsc_header response;
810 		u32 zeroes2;
811 		struct channel_path_desc desc;
812 	} __attribute__ ((packed)) *scpd_area;
813 
814 	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
815 	if (!scpd_area)
816 		return -ENOMEM;
817 
818 	scpd_area->request.length = 0x0010;
819 	scpd_area->request.code = 0x0002;
820 
821 	scpd_area->first_chpid = chpid.id;
822 	scpd_area->last_chpid = chpid.id;
823 
824 	ccode = chsc(scpd_area);
825 	if (ccode > 0) {
826 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
827 		goto out;
828 	}
829 
830 	switch (scpd_area->response.code) {
831 	case 0x0001: /* Success. */
832 		memcpy(desc, &scpd_area->desc,
833 		       sizeof(struct channel_path_desc));
834 		ret = 0;
835 		break;
836 	case 0x0003: /* Invalid block. */
837 	case 0x0007: /* Invalid format. */
838 	case 0x0008: /* Other invalid block. */
839 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
840 		ret = -EINVAL;
841 		break;
842 	case 0x0004: /* Command not provided in model. */
843 		CIO_CRW_EVENT(2, "Model does not provide scpd\n");
844 		ret = -EOPNOTSUPP;
845 		break;
846 	default:
847 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
848 			      scpd_area->response.code);
849 		ret = -EIO;
850 	}
851 out:
852 	free_page((unsigned long)scpd_area);
853 	return ret;
854 }
855 
856 static void
857 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
858 			  struct cmg_chars *chars)
859 {
860 	switch (chp->cmg) {
861 	case 2:
862 	case 3:
863 		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
864 					 GFP_KERNEL);
865 		if (chp->cmg_chars) {
866 			int i, mask;
867 			struct cmg_chars *cmg_chars;
868 
869 			cmg_chars = chp->cmg_chars;
870 			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
871 				mask = 0x80 >> (i + 3);
872 				if (cmcv & mask)
873 					cmg_chars->values[i] = chars->values[i];
874 				else
875 					cmg_chars->values[i] = 0;
876 			}
877 		}
878 		break;
879 	default:
880 		/* No cmg-dependent data. */
881 		break;
882 	}
883 }
884 
885 int chsc_get_channel_measurement_chars(struct channel_path *chp)
886 {
887 	int ccode, ret;
888 
889 	struct {
890 		struct chsc_header request;
891 		u32 : 24;
892 		u32 first_chpid : 8;
893 		u32 : 24;
894 		u32 last_chpid : 8;
895 		u32 zeroes1;
896 		struct chsc_header response;
897 		u32 zeroes2;
898 		u32 not_valid : 1;
899 		u32 shared : 1;
900 		u32 : 22;
901 		u32 chpid : 8;
902 		u32 cmcv : 5;
903 		u32 : 11;
904 		u32 cmgq : 8;
905 		u32 cmg : 8;
906 		u32 zeroes3;
907 		u32 data[NR_MEASUREMENT_CHARS];
908 	} __attribute__ ((packed)) *scmc_area;
909 
910 	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
911 	if (!scmc_area)
912 		return -ENOMEM;
913 
914 	scmc_area->request.length = 0x0010;
915 	scmc_area->request.code = 0x0022;
916 
917 	scmc_area->first_chpid = chp->chpid.id;
918 	scmc_area->last_chpid = chp->chpid.id;
919 
920 	ccode = chsc(scmc_area);
921 	if (ccode > 0) {
922 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
923 		goto out;
924 	}
925 
926 	switch (scmc_area->response.code) {
927 	case 0x0001: /* Success. */
928 		if (!scmc_area->not_valid) {
929 			chp->cmg = scmc_area->cmg;
930 			chp->shared = scmc_area->shared;
931 			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
932 						  (struct cmg_chars *)
933 						  &scmc_area->data);
934 		} else {
935 			chp->cmg = -1;
936 			chp->shared = -1;
937 		}
938 		ret = 0;
939 		break;
940 	case 0x0003: /* Invalid block. */
941 	case 0x0007: /* Invalid format. */
942 	case 0x0008: /* Invalid bit combination. */
943 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
944 		ret = -EINVAL;
945 		break;
946 	case 0x0004: /* Command not provided. */
947 		CIO_CRW_EVENT(2, "Model does not provide scmc\n");
948 		ret = -EOPNOTSUPP;
949 		break;
950 	default:
951 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
952 			      scmc_area->response.code);
953 		ret = -EIO;
954 	}
955 out:
956 	free_page((unsigned long)scmc_area);
957 	return ret;
958 }
959 
960 int __init chsc_alloc_sei_area(void)
961 {
962 	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
963 	if (!sei_page)
964 		CIO_MSG_EVENT(0, "Can't allocate page for processing of "
965 			      "chsc machine checks!\n");
966 	return (sei_page ? 0 : -ENOMEM);
967 }
968 
969 void __init chsc_free_sei_area(void)
970 {
971 	kfree(sei_page);
972 }
973 
974 int __init
975 chsc_enable_facility(int operation_code)
976 {
977 	int ret;
978 	struct {
979 		struct chsc_header request;
980 		u8 reserved1:4;
981 		u8 format:4;
982 		u8 reserved2;
983 		u16 operation_code;
984 		u32 reserved3;
985 		u32 reserved4;
986 		u32 operation_data_area[252];
987 		struct chsc_header response;
988 		u32 reserved5:4;
989 		u32 format2:4;
990 		u32 reserved6:24;
991 	} __attribute__ ((packed)) *sda_area;
992 
993 	sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
994 	if (!sda_area)
995 		return -ENOMEM;
996 	sda_area->request.length = 0x0400;
997 	sda_area->request.code = 0x0031;
998 	sda_area->operation_code = operation_code;
999 
1000 	ret = chsc(sda_area);
1001 	if (ret > 0) {
1002 		ret = (ret == 3) ? -ENODEV : -EBUSY;
1003 		goto out;
1004 	}
1005 	switch (sda_area->response.code) {
1006 	case 0x0001: /* everything ok */
1007 		ret = 0;
1008 		break;
1009 	case 0x0003: /* invalid request block */
1010 	case 0x0007:
1011 		ret = -EINVAL;
1012 		break;
1013 	case 0x0004: /* command not provided */
1014 	case 0x0101: /* facility not provided */
1015 		ret = -EOPNOTSUPP;
1016 		break;
1017 	default: /* something went wrong */
1018 		ret = -EIO;
1019 	}
1020  out:
1021 	free_page((unsigned long)sda_area);
1022 	return ret;
1023 }
1024 
1025 struct css_general_char css_general_characteristics;
1026 struct css_chsc_char css_chsc_characteristics;
1027 
1028 int __init
1029 chsc_determine_css_characteristics(void)
1030 {
1031 	int result;
1032 	struct {
1033 		struct chsc_header request;
1034 		u32 reserved1;
1035 		u32 reserved2;
1036 		u32 reserved3;
1037 		struct chsc_header response;
1038 		u32 reserved4;
1039 		u32 general_char[510];
1040 		u32 chsc_char[518];
1041 	} __attribute__ ((packed)) *scsc_area;
1042 
1043 	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1044 	if (!scsc_area) {
1045 		CIO_MSG_EVENT(0, "Was not able to determine available "
1046 			      "CHSCs due to no memory.\n");
1047 		return -ENOMEM;
1048 	}
1049 
1050 	scsc_area->request.length = 0x0010;
1051 	scsc_area->request.code = 0x0010;
1052 
1053 	result = chsc(scsc_area);
1054 	if (result) {
1055 		CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
1056 			      "cc=%i.\n", result);
1057 		result = -EIO;
1058 		goto exit;
1059 	}
1060 
1061 	if (scsc_area->response.code != 1) {
1062 		CIO_MSG_EVENT(0, "Was not able to determine "
1063 			      "available CHSCs.\n");
1064 		result = -EIO;
1065 		goto exit;
1066 	}
1067 	memcpy(&css_general_characteristics, scsc_area->general_char,
1068 	       sizeof(css_general_characteristics));
1069 	memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1070 	       sizeof(css_chsc_characteristics));
1071 exit:
1072 	free_page ((unsigned long) scsc_area);
1073 	return result;
1074 }
1075 
1076 EXPORT_SYMBOL_GPL(css_general_characteristics);
1077 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1078