xref: /linux/drivers/s390/cio/chsc.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *			      IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
9  *		 Arnd Bergmann (arndb@de.ibm.com)
10  */
11 
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 
17 #include <asm/cio.h>
18 
19 #include "css.h"
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "ioasm.h"
23 #include "chsc.h"
24 
25 static void *sei_page;
26 
27 static int new_channel_path(int chpid);
28 
29 static inline void
30 set_chp_logically_online(int chp, int onoff)
31 {
32 	css[0]->chps[chp]->state = onoff;
33 }
34 
35 static int
36 get_chp_status(int chp)
37 {
38 	return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
39 }
40 
41 void
42 chsc_validate_chpids(struct subchannel *sch)
43 {
44 	int mask, chp;
45 
46 	for (chp = 0; chp <= 7; chp++) {
47 		mask = 0x80 >> chp;
48 		if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
49 			/* disable using this path */
50 			sch->opm &= ~mask;
51 	}
52 }
53 
54 void
55 chpid_is_actually_online(int chp)
56 {
57 	int state;
58 
59 	state = get_chp_status(chp);
60 	if (state < 0) {
61 		need_rescan = 1;
62 		queue_work(slow_path_wq, &slow_path_work);
63 	} else
64 		WARN_ON(!state);
65 }
66 
67 /* FIXME: this is _always_ called for every subchannel. shouldn't we
68  *	  process more than one at a time? */
69 static int
70 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
71 {
72 	int ccode, j;
73 
74 	struct {
75 		struct chsc_header request;
76 		u16 reserved1a:10;
77 		u16 ssid:2;
78 		u16 reserved1b:4;
79 		u16 f_sch;	  /* first subchannel */
80 		u16 reserved2;
81 		u16 l_sch;	  /* last subchannel */
82 		u32 reserved3;
83 		struct chsc_header response;
84 		u32 reserved4;
85 		u8 sch_valid : 1;
86 		u8 dev_valid : 1;
87 		u8 st	     : 3; /* subchannel type */
88 		u8 zeroes    : 3;
89 		u8  unit_addr;	  /* unit address */
90 		u16 devno;	  /* device number */
91 		u8 path_mask;
92 		u8 fla_valid_mask;
93 		u16 sch;	  /* subchannel */
94 		u8 chpid[8];	  /* chpids 0-7 */
95 		u16 fla[8];	  /* full link addresses 0-7 */
96 	} *ssd_area;
97 
98 	ssd_area = page;
99 
100 	ssd_area->request.length = 0x0010;
101 	ssd_area->request.code = 0x0004;
102 
103 	ssd_area->ssid = sch->schid.ssid;
104 	ssd_area->f_sch = sch->schid.sch_no;
105 	ssd_area->l_sch = sch->schid.sch_no;
106 
107 	ccode = chsc(ssd_area);
108 	if (ccode > 0) {
109 		pr_debug("chsc returned with ccode = %d\n", ccode);
110 		return (ccode == 3) ? -ENODEV : -EBUSY;
111 	}
112 
113 	switch (ssd_area->response.code) {
114 	case 0x0001: /* everything ok */
115 		break;
116 	case 0x0002:
117 		CIO_CRW_EVENT(2, "Invalid command!\n");
118 		return -EINVAL;
119 	case 0x0003:
120 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
121 		return -EINVAL;
122 	case 0x0004:
123 		CIO_CRW_EVENT(2, "Model does not provide ssd\n");
124 		return -EOPNOTSUPP;
125 	default:
126 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
127 			      ssd_area->response.code);
128 		return -EIO;
129 	}
130 
131 	/*
132 	 * ssd_area->st stores the type of the detected
133 	 * subchannel, with the following definitions:
134 	 *
135 	 * 0: I/O subchannel:	  All fields have meaning
136 	 * 1: CHSC subchannel:	  Only sch_val, st and sch
137 	 *			  have meaning
138 	 * 2: Message subchannel: All fields except unit_addr
139 	 *			  have meaning
140 	 * 3: ADM subchannel:	  Only sch_val, st and sch
141 	 *			  have meaning
142 	 *
143 	 * Other types are currently undefined.
144 	 */
145 	if (ssd_area->st > 3) { /* uhm, that looks strange... */
146 		CIO_CRW_EVENT(0, "Strange subchannel type %d"
147 			      " for sch 0.%x.%04x\n", ssd_area->st,
148 			      sch->schid.ssid, sch->schid.sch_no);
149 		/*
150 		 * There may have been a new subchannel type defined in the
151 		 * time since this code was written; since we don't know which
152 		 * fields have meaning and what to do with it we just jump out
153 		 */
154 		return 0;
155 	} else {
156 		const char *type[4] = {"I/O", "chsc", "message", "ADM"};
157 		CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
158 			      sch->schid.ssid, sch->schid.sch_no,
159 			      type[ssd_area->st]);
160 
161 		sch->ssd_info.valid = 1;
162 		sch->ssd_info.type = ssd_area->st;
163 	}
164 
165 	if (ssd_area->st == 0 || ssd_area->st == 2) {
166 		for (j = 0; j < 8; j++) {
167 			if (!((0x80 >> j) & ssd_area->path_mask &
168 			      ssd_area->fla_valid_mask))
169 				continue;
170 			sch->ssd_info.chpid[j] = ssd_area->chpid[j];
171 			sch->ssd_info.fla[j]   = ssd_area->fla[j];
172 		}
173 	}
174 	return 0;
175 }
176 
177 int
178 css_get_ssd_info(struct subchannel *sch)
179 {
180 	int ret;
181 	void *page;
182 
183 	page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
184 	if (!page)
185 		return -ENOMEM;
186 	spin_lock_irq(&sch->lock);
187 	ret = chsc_get_sch_desc_irq(sch, page);
188 	if (ret) {
189 		static int cio_chsc_err_msg;
190 
191 		if (!cio_chsc_err_msg) {
192 			printk(KERN_ERR
193 			       "chsc_get_sch_descriptions:"
194 			       " Error %d while doing chsc; "
195 			       "processing some machine checks may "
196 			       "not work\n", ret);
197 			cio_chsc_err_msg = 1;
198 		}
199 	}
200 	spin_unlock_irq(&sch->lock);
201 	free_page((unsigned long)page);
202 	if (!ret) {
203 		int j, chpid;
204 		/* Allocate channel path structures, if needed. */
205 		for (j = 0; j < 8; j++) {
206 			chpid = sch->ssd_info.chpid[j];
207 			if (chpid && (get_chp_status(chpid) < 0))
208 			    new_channel_path(chpid);
209 		}
210 	}
211 	return ret;
212 }
213 
214 static int
215 s390_subchannel_remove_chpid(struct device *dev, void *data)
216 {
217 	int j;
218 	int mask;
219 	struct subchannel *sch;
220 	struct channel_path *chpid;
221 	struct schib schib;
222 
223 	sch = to_subchannel(dev);
224 	chpid = data;
225 	for (j = 0; j < 8; j++)
226 		if (sch->schib.pmcw.chpid[j] == chpid->id)
227 			break;
228 	if (j >= 8)
229 		return 0;
230 
231 	mask = 0x80 >> j;
232 	spin_lock_irq(&sch->lock);
233 
234 	stsch(sch->schid, &schib);
235 	if (!schib.pmcw.dnv)
236 		goto out_unreg;
237 	memcpy(&sch->schib, &schib, sizeof(struct schib));
238 	/* Check for single path devices. */
239 	if (sch->schib.pmcw.pim == 0x80)
240 		goto out_unreg;
241 	if (sch->vpm == mask)
242 		goto out_unreg;
243 
244 	if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
245 	    (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
246 	    (sch->schib.pmcw.lpum == mask)) {
247 		int cc;
248 
249 		cc = cio_clear(sch);
250 		if (cc == -ENODEV)
251 			goto out_unreg;
252 		/* Call handler. */
253 		if (sch->driver && sch->driver->termination)
254 			sch->driver->termination(&sch->dev);
255 		goto out_unlock;
256 	}
257 
258 	/* trigger path verification. */
259 	if (sch->driver && sch->driver->verify)
260 		sch->driver->verify(&sch->dev);
261 out_unlock:
262 	spin_unlock_irq(&sch->lock);
263 	return 0;
264 out_unreg:
265 	spin_unlock_irq(&sch->lock);
266 	sch->lpm = 0;
267 	if (css_enqueue_subchannel_slow(sch->schid)) {
268 		css_clear_subchannel_slow_list();
269 		need_rescan = 1;
270 	}
271 	return 0;
272 }
273 
274 static inline void
275 s390_set_chpid_offline( __u8 chpid)
276 {
277 	char dbf_txt[15];
278 	struct device *dev;
279 
280 	sprintf(dbf_txt, "chpr%x", chpid);
281 	CIO_TRACE_EVENT(2, dbf_txt);
282 
283 	if (get_chp_status(chpid) <= 0)
284 		return;
285 	dev = get_device(&css[0]->chps[chpid]->dev);
286 	bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
287 			 s390_subchannel_remove_chpid);
288 
289 	if (need_rescan || css_slow_subchannels_exist())
290 		queue_work(slow_path_wq, &slow_path_work);
291 	put_device(dev);
292 }
293 
294 struct res_acc_data {
295 	struct channel_path *chp;
296 	u32 fla_mask;
297 	u16 fla;
298 };
299 
300 static int
301 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
302 {
303 	int found;
304 	int chp;
305 	int ccode;
306 
307 	found = 0;
308 	for (chp = 0; chp <= 7; chp++)
309 		/*
310 		 * check if chpid is in information updated by ssd
311 		 */
312 		if (sch->ssd_info.valid &&
313 		    sch->ssd_info.chpid[chp] == res_data->chp->id &&
314 		    (sch->ssd_info.fla[chp] & res_data->fla_mask)
315 		    == res_data->fla) {
316 			found = 1;
317 			break;
318 		}
319 
320 	if (found == 0)
321 		return 0;
322 
323 	/*
324 	 * Do a stsch to update our subchannel structure with the
325 	 * new path information and eventually check for logically
326 	 * offline chpids.
327 	 */
328 	ccode = stsch(sch->schid, &sch->schib);
329 	if (ccode > 0)
330 		return 0;
331 
332 	return 0x80 >> chp;
333 }
334 
335 static inline int
336 s390_process_res_acc_new_sch(struct subchannel_id schid)
337 {
338 	struct schib schib;
339 	int ret;
340 	/*
341 	 * We don't know the device yet, but since a path
342 	 * may be available now to the device we'll have
343 	 * to do recognition again.
344 	 * Since we don't have any idea about which chpid
345 	 * that beast may be on we'll have to do a stsch
346 	 * on all devices, grr...
347 	 */
348 	if (stsch_err(schid, &schib))
349 		/* We're through */
350 		return need_rescan ? -EAGAIN : -ENXIO;
351 
352 	/* Put it on the slow path. */
353 	ret = css_enqueue_subchannel_slow(schid);
354 	if (ret) {
355 		css_clear_subchannel_slow_list();
356 		need_rescan = 1;
357 		return -EAGAIN;
358 	}
359 	return 0;
360 }
361 
362 static int
363 __s390_process_res_acc(struct subchannel_id schid, void *data)
364 {
365 	int chp_mask, old_lpm;
366 	struct res_acc_data *res_data;
367 	struct subchannel *sch;
368 
369 	res_data = (struct res_acc_data *)data;
370 	sch = get_subchannel_by_schid(schid);
371 	if (!sch)
372 		/* Check if a subchannel is newly available. */
373 		return s390_process_res_acc_new_sch(schid);
374 
375 	spin_lock_irq(&sch->lock);
376 
377 	chp_mask = s390_process_res_acc_sch(res_data, sch);
378 
379 	if (chp_mask == 0) {
380 		spin_unlock_irq(&sch->lock);
381 		return 0;
382 	}
383 	old_lpm = sch->lpm;
384 	sch->lpm = ((sch->schib.pmcw.pim &
385 		     sch->schib.pmcw.pam &
386 		     sch->schib.pmcw.pom)
387 		    | chp_mask) & sch->opm;
388 	if (!old_lpm && sch->lpm)
389 		device_trigger_reprobe(sch);
390 	else if (sch->driver && sch->driver->verify)
391 		sch->driver->verify(&sch->dev);
392 
393 	spin_unlock_irq(&sch->lock);
394 	put_device(&sch->dev);
395 	return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
396 }
397 
398 
399 static int
400 s390_process_res_acc (struct res_acc_data *res_data)
401 {
402 	int rc;
403 	char dbf_txt[15];
404 
405 	sprintf(dbf_txt, "accpr%x", res_data->chp->id);
406 	CIO_TRACE_EVENT( 2, dbf_txt);
407 	if (res_data->fla != 0) {
408 		sprintf(dbf_txt, "fla%x", res_data->fla);
409 		CIO_TRACE_EVENT( 2, dbf_txt);
410 	}
411 
412 	/*
413 	 * I/O resources may have become accessible.
414 	 * Scan through all subchannels that may be concerned and
415 	 * do a validation on those.
416 	 * The more information we have (info), the less scanning
417 	 * will we have to do.
418 	 */
419 	rc = for_each_subchannel(__s390_process_res_acc, res_data);
420 	if (css_slow_subchannels_exist())
421 		rc = -EAGAIN;
422 	else if (rc != -EAGAIN)
423 		rc = 0;
424 	return rc;
425 }
426 
427 static int
428 __get_chpid_from_lir(void *data)
429 {
430 	struct lir {
431 		u8  iq;
432 		u8  ic;
433 		u16 sci;
434 		/* incident-node descriptor */
435 		u32 indesc[28];
436 		/* attached-node descriptor */
437 		u32 andesc[28];
438 		/* incident-specific information */
439 		u32 isinfo[28];
440 	} *lir;
441 
442 	lir = (struct lir*) data;
443 	if (!(lir->iq&0x80))
444 		/* NULL link incident record */
445 		return -EINVAL;
446 	if (!(lir->indesc[0]&0xc0000000))
447 		/* node descriptor not valid */
448 		return -EINVAL;
449 	if (!(lir->indesc[0]&0x10000000))
450 		/* don't handle device-type nodes - FIXME */
451 		return -EINVAL;
452 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
453 
454 	return (u16) (lir->indesc[0]&0x000000ff);
455 }
456 
457 int
458 chsc_process_crw(void)
459 {
460 	int chpid, ret;
461 	struct res_acc_data res_data;
462 	struct {
463 		struct chsc_header request;
464 		u32 reserved1;
465 		u32 reserved2;
466 		u32 reserved3;
467 		struct chsc_header response;
468 		u32 reserved4;
469 		u8  flags;
470 		u8  vf;		/* validity flags */
471 		u8  rs;		/* reporting source */
472 		u8  cc;		/* content code */
473 		u16 fla;	/* full link address */
474 		u16 rsid;	/* reporting source id */
475 		u32 reserved5;
476 		u32 reserved6;
477 		u32 ccdf[96];	/* content-code dependent field */
478 		/* ccdf has to be big enough for a link-incident record */
479 	} *sei_area;
480 
481 	if (!sei_page)
482 		return 0;
483 	/*
484 	 * build the chsc request block for store event information
485 	 * and do the call
486 	 * This function is only called by the machine check handler thread,
487 	 * so we don't need locking for the sei_page.
488 	 */
489 	sei_area = sei_page;
490 
491 	CIO_TRACE_EVENT( 2, "prcss");
492 	ret = 0;
493 	do {
494 		int ccode, status;
495 		struct device *dev;
496 		memset(sei_area, 0, sizeof(*sei_area));
497 		memset(&res_data, 0, sizeof(struct res_acc_data));
498 		sei_area->request.length = 0x0010;
499 		sei_area->request.code = 0x000e;
500 
501 		ccode = chsc(sei_area);
502 		if (ccode > 0)
503 			return 0;
504 
505 		switch (sei_area->response.code) {
506 			/* for debug purposes, check for problems */
507 		case 0x0001:
508 			CIO_CRW_EVENT(4, "chsc_process_crw: event information "
509 					"successfully stored\n");
510 			break; /* everything ok */
511 		case 0x0002:
512 			CIO_CRW_EVENT(2,
513 				      "chsc_process_crw: invalid command!\n");
514 			return 0;
515 		case 0x0003:
516 			CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
517 				      "request block!\n");
518 			return 0;
519 		case 0x0005:
520 			CIO_CRW_EVENT(2, "chsc_process_crw: no event "
521 				      "information stored\n");
522 			return 0;
523 		default:
524 			CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
525 				      sei_area->response.code);
526 			return 0;
527 		}
528 
529 		/* Check if we might have lost some information. */
530 		if (sei_area->flags & 0x40)
531 			CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
532 				       "has been lost due to overflow!\n");
533 
534 		if (sei_area->rs != 4) {
535 			CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
536 				      "(%04X) isn't a chpid!\n",
537 				      sei_area->rsid);
538 			continue;
539 		}
540 
541 		/* which kind of information was stored? */
542 		switch (sei_area->cc) {
543 		case 1: /* link incident*/
544 			CIO_CRW_EVENT(4, "chsc_process_crw: "
545 				      "channel subsystem reports link incident,"
546 				      " reporting source is chpid %x\n",
547 				      sei_area->rsid);
548 			chpid = __get_chpid_from_lir(sei_area->ccdf);
549 			if (chpid < 0)
550 				CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
551 					      __FUNCTION__);
552 			else
553 				s390_set_chpid_offline(chpid);
554 			break;
555 
556 		case 2: /* i/o resource accessibiliy */
557 			CIO_CRW_EVENT(4, "chsc_process_crw: "
558 				      "channel subsystem reports some I/O "
559 				      "devices may have become accessible\n");
560 			pr_debug("Data received after sei: \n");
561 			pr_debug("Validity flags: %x\n", sei_area->vf);
562 
563 			/* allocate a new channel path structure, if needed */
564 			status = get_chp_status(sei_area->rsid);
565 			if (status < 0)
566 				new_channel_path(sei_area->rsid);
567 			else if (!status)
568 				break;
569 			dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
570 			res_data.chp = to_channelpath(dev);
571 			pr_debug("chpid: %x", sei_area->rsid);
572 			if ((sei_area->vf & 0xc0) != 0) {
573 				res_data.fla = sei_area->fla;
574 				if ((sei_area->vf & 0xc0) == 0xc0) {
575 					pr_debug(" full link addr: %x",
576 						 sei_area->fla);
577 					res_data.fla_mask = 0xffff;
578 				} else {
579 					pr_debug(" link addr: %x",
580 						 sei_area->fla);
581 					res_data.fla_mask = 0xff00;
582 				}
583 			}
584 			ret = s390_process_res_acc(&res_data);
585 			pr_debug("\n\n");
586 			put_device(dev);
587 			break;
588 
589 		default: /* other stuff */
590 			CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
591 				      sei_area->cc);
592 			break;
593 		}
594 	} while (sei_area->flags & 0x80);
595 	return ret;
596 }
597 
598 static inline int
599 __chp_add_new_sch(struct subchannel_id schid)
600 {
601 	struct schib schib;
602 	int ret;
603 
604 	if (stsch(schid, &schib))
605 		/* We're through */
606 		return need_rescan ? -EAGAIN : -ENXIO;
607 
608 	/* Put it on the slow path. */
609 	ret = css_enqueue_subchannel_slow(schid);
610 	if (ret) {
611 		css_clear_subchannel_slow_list();
612 		need_rescan = 1;
613 		return -EAGAIN;
614 	}
615 	return 0;
616 }
617 
618 
619 static int
620 __chp_add(struct subchannel_id schid, void *data)
621 {
622 	int i;
623 	struct channel_path *chp;
624 	struct subchannel *sch;
625 
626 	chp = (struct channel_path *)data;
627 	sch = get_subchannel_by_schid(schid);
628 	if (!sch)
629 		/* Check if the subchannel is now available. */
630 		return __chp_add_new_sch(schid);
631 	spin_lock_irq(&sch->lock);
632 	for (i=0; i<8; i++)
633 		if (sch->schib.pmcw.chpid[i] == chp->id) {
634 			if (stsch(sch->schid, &sch->schib) != 0) {
635 				/* Endgame. */
636 				spin_unlock_irq(&sch->lock);
637 				return -ENXIO;
638 			}
639 			break;
640 		}
641 	if (i==8) {
642 		spin_unlock_irq(&sch->lock);
643 		return 0;
644 	}
645 	sch->lpm = ((sch->schib.pmcw.pim &
646 		     sch->schib.pmcw.pam &
647 		     sch->schib.pmcw.pom)
648 		    | 0x80 >> i) & sch->opm;
649 
650 	if (sch->driver && sch->driver->verify)
651 		sch->driver->verify(&sch->dev);
652 
653 	spin_unlock_irq(&sch->lock);
654 	put_device(&sch->dev);
655 	return 0;
656 }
657 
658 static int
659 chp_add(int chpid)
660 {
661 	int rc;
662 	char dbf_txt[15];
663 	struct device *dev;
664 
665 	if (!get_chp_status(chpid))
666 		return 0; /* no need to do the rest */
667 
668 	sprintf(dbf_txt, "cadd%x", chpid);
669 	CIO_TRACE_EVENT(2, dbf_txt);
670 
671 	dev = get_device(&css[0]->chps[chpid]->dev);
672 	rc = for_each_subchannel(__chp_add, to_channelpath(dev));
673 	if (css_slow_subchannels_exist())
674 		rc = -EAGAIN;
675 	if (rc != -EAGAIN)
676 		rc = 0;
677 	put_device(dev);
678 	return rc;
679 }
680 
681 /*
682  * Handling of crw machine checks with channel path source.
683  */
684 int
685 chp_process_crw(int chpid, int on)
686 {
687 	if (on == 0) {
688 		/* Path has gone. We use the link incident routine.*/
689 		s390_set_chpid_offline(chpid);
690 		return 0; /* De-register is async anyway. */
691 	}
692 	/*
693 	 * Path has come. Allocate a new channel path structure,
694 	 * if needed.
695 	 */
696 	if (get_chp_status(chpid) < 0)
697 		new_channel_path(chpid);
698 	/* Avoid the extra overhead in process_rec_acc. */
699 	return chp_add(chpid);
700 }
701 
702 static inline int
703 __check_for_io_and_kill(struct subchannel *sch, int index)
704 {
705 	int cc;
706 
707 	if (!device_is_online(sch))
708 		/* cio could be doing I/O. */
709 		return 0;
710 	cc = stsch(sch->schid, &sch->schib);
711 	if (cc)
712 		return 0;
713 	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
714 		device_set_waiting(sch);
715 		return 1;
716 	}
717 	return 0;
718 }
719 
720 static inline void
721 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
722 {
723 	int chp, old_lpm;
724 	unsigned long flags;
725 
726 	if (!sch->ssd_info.valid)
727 		return;
728 
729 	spin_lock_irqsave(&sch->lock, flags);
730 	old_lpm = sch->lpm;
731 	for (chp = 0; chp < 8; chp++) {
732 		if (sch->ssd_info.chpid[chp] != chpid)
733 			continue;
734 
735 		if (on) {
736 			sch->opm |= (0x80 >> chp);
737 			sch->lpm |= (0x80 >> chp);
738 			if (!old_lpm)
739 				device_trigger_reprobe(sch);
740 			else if (sch->driver && sch->driver->verify)
741 				sch->driver->verify(&sch->dev);
742 		} else {
743 			sch->opm &= ~(0x80 >> chp);
744 			sch->lpm &= ~(0x80 >> chp);
745 			/*
746 			 * Give running I/O a grace period in which it
747 			 * can successfully terminate, even using the
748 			 * just varied off path. Then kill it.
749 			 */
750 			if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
751 				if (css_enqueue_subchannel_slow(sch->schid)) {
752 					css_clear_subchannel_slow_list();
753 					need_rescan = 1;
754 				}
755 			} else if (sch->driver && sch->driver->verify)
756 				sch->driver->verify(&sch->dev);
757 		}
758 		break;
759 	}
760 	spin_unlock_irqrestore(&sch->lock, flags);
761 }
762 
763 static int
764 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
765 {
766 	struct subchannel *sch;
767 	__u8 *chpid;
768 
769 	sch = to_subchannel(dev);
770 	chpid = data;
771 
772 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
773 	return 0;
774 }
775 
776 static int
777 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
778 {
779 	struct subchannel *sch;
780 	__u8 *chpid;
781 
782 	sch = to_subchannel(dev);
783 	chpid = data;
784 
785 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
786 	return 0;
787 }
788 
789 static int
790 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
791 {
792 	struct schib schib;
793 	struct subchannel *sch;
794 
795 	sch = get_subchannel_by_schid(schid);
796 	if (sch) {
797 		put_device(&sch->dev);
798 		return 0;
799 	}
800 	if (stsch_err(schid, &schib))
801 		/* We're through */
802 		return -ENXIO;
803 	/* Put it on the slow path. */
804 	if (css_enqueue_subchannel_slow(schid)) {
805 		css_clear_subchannel_slow_list();
806 		need_rescan = 1;
807 		return -EAGAIN;
808 	}
809 	return 0;
810 }
811 
812 /*
813  * Function: s390_vary_chpid
814  * Varies the specified chpid online or offline
815  */
816 static int
817 s390_vary_chpid( __u8 chpid, int on)
818 {
819 	char dbf_text[15];
820 	int status;
821 
822 	sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
823 	CIO_TRACE_EVENT( 2, dbf_text);
824 
825 	status = get_chp_status(chpid);
826 	if (status < 0) {
827 		printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
828 		return -EINVAL;
829 	}
830 
831 	if (!on && !status) {
832 		printk(KERN_ERR "chpid %x is already offline\n", chpid);
833 		return -EINVAL;
834 	}
835 
836 	set_chp_logically_online(chpid, on);
837 
838 	/*
839 	 * Redo PathVerification on the devices the chpid connects to
840 	 */
841 
842 	bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
843 			 s390_subchannel_vary_chpid_on :
844 			 s390_subchannel_vary_chpid_off);
845 	if (on)
846 		/* Scan for new devices on varied on path. */
847 		for_each_subchannel(__s390_vary_chpid_on, NULL);
848 	if (need_rescan || css_slow_subchannels_exist())
849 		queue_work(slow_path_wq, &slow_path_work);
850 	return 0;
851 }
852 
853 /*
854  * Channel measurement related functions
855  */
856 static ssize_t
857 chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
858 			   size_t count)
859 {
860 	struct channel_path *chp;
861 	unsigned int size;
862 
863 	chp = to_channelpath(container_of(kobj, struct device, kobj));
864 	if (!chp->cmg_chars)
865 		return 0;
866 
867 	size = sizeof(struct cmg_chars);
868 
869 	if (off > size)
870 		return 0;
871 	if (off + count > size)
872 		count = size - off;
873 	memcpy(buf, chp->cmg_chars + off, count);
874 	return count;
875 }
876 
877 static struct bin_attribute chp_measurement_chars_attr = {
878 	.attr = {
879 		.name = "measurement_chars",
880 		.mode = S_IRUSR,
881 		.owner = THIS_MODULE,
882 	},
883 	.size = sizeof(struct cmg_chars),
884 	.read = chp_measurement_chars_read,
885 };
886 
887 static void
888 chp_measurement_copy_block(struct cmg_entry *buf,
889 			   struct channel_subsystem *css, int chpid)
890 {
891 	void *area;
892 	struct cmg_entry *entry, reference_buf;
893 	int idx;
894 
895 	if (chpid < 128) {
896 		area = css->cub_addr1;
897 		idx = chpid;
898 	} else {
899 		area = css->cub_addr2;
900 		idx = chpid - 128;
901 	}
902 	entry = area + (idx * sizeof(struct cmg_entry));
903 	do {
904 		memcpy(buf, entry, sizeof(*entry));
905 		memcpy(&reference_buf, entry, sizeof(*entry));
906 	} while (reference_buf.values[0] != buf->values[0]);
907 }
908 
909 static ssize_t
910 chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
911 {
912 	struct channel_path *chp;
913 	struct channel_subsystem *css;
914 	unsigned int size;
915 
916 	chp = to_channelpath(container_of(kobj, struct device, kobj));
917 	css = to_css(chp->dev.parent);
918 
919 	size = sizeof(struct cmg_entry);
920 
921 	/* Only allow single reads. */
922 	if (off || count < size)
923 		return 0;
924 	chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
925 	count = size;
926 	return count;
927 }
928 
929 static struct bin_attribute chp_measurement_attr = {
930 	.attr = {
931 		.name = "measurement",
932 		.mode = S_IRUSR,
933 		.owner = THIS_MODULE,
934 	},
935 	.size = sizeof(struct cmg_entry),
936 	.read = chp_measurement_read,
937 };
938 
939 static void
940 chsc_remove_chp_cmg_attr(struct channel_path *chp)
941 {
942 	sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr);
943 	sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr);
944 }
945 
946 static int
947 chsc_add_chp_cmg_attr(struct channel_path *chp)
948 {
949 	int ret;
950 
951 	ret = sysfs_create_bin_file(&chp->dev.kobj,
952 				    &chp_measurement_chars_attr);
953 	if (ret)
954 		return ret;
955 	ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr);
956 	if (ret)
957 		sysfs_remove_bin_file(&chp->dev.kobj,
958 				      &chp_measurement_chars_attr);
959 	return ret;
960 }
961 
962 static void
963 chsc_remove_cmg_attr(struct channel_subsystem *css)
964 {
965 	int i;
966 
967 	for (i = 0; i <= __MAX_CHPID; i++) {
968 		if (!css->chps[i])
969 			continue;
970 		chsc_remove_chp_cmg_attr(css->chps[i]);
971 	}
972 }
973 
974 static int
975 chsc_add_cmg_attr(struct channel_subsystem *css)
976 {
977 	int i, ret;
978 
979 	ret = 0;
980 	for (i = 0; i <= __MAX_CHPID; i++) {
981 		if (!css->chps[i])
982 			continue;
983 		ret = chsc_add_chp_cmg_attr(css->chps[i]);
984 		if (ret)
985 			goto cleanup;
986 	}
987 	return ret;
988 cleanup:
989 	for (--i; i >= 0; i--) {
990 		if (!css->chps[i])
991 			continue;
992 		chsc_remove_chp_cmg_attr(css->chps[i]);
993 	}
994 	return ret;
995 }
996 
997 
998 static int
999 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1000 {
1001 	struct {
1002 		struct chsc_header request;
1003 		u32 operation_code : 2;
1004 		u32 : 30;
1005 		u32 key : 4;
1006 		u32 : 28;
1007 		u32 zeroes1;
1008 		u32 cub_addr1;
1009 		u32 zeroes2;
1010 		u32 cub_addr2;
1011 		u32 reserved[13];
1012 		struct chsc_header response;
1013 		u32 status : 8;
1014 		u32 : 4;
1015 		u32 fmt : 4;
1016 		u32 : 16;
1017 	} *secm_area;
1018 	int ret, ccode;
1019 
1020 	secm_area = page;
1021 	secm_area->request.length = 0x0050;
1022 	secm_area->request.code = 0x0016;
1023 
1024 	secm_area->key = PAGE_DEFAULT_KEY;
1025 	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
1026 	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
1027 
1028 	secm_area->operation_code = enable ? 0 : 1;
1029 
1030 	ccode = chsc(secm_area);
1031 	if (ccode > 0)
1032 		return (ccode == 3) ? -ENODEV : -EBUSY;
1033 
1034 	switch (secm_area->response.code) {
1035 	case 0x0001: /* Success. */
1036 		ret = 0;
1037 		break;
1038 	case 0x0003: /* Invalid block. */
1039 	case 0x0007: /* Invalid format. */
1040 	case 0x0008: /* Other invalid block. */
1041 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1042 		ret = -EINVAL;
1043 		break;
1044 	case 0x0004: /* Command not provided in model. */
1045 		CIO_CRW_EVENT(2, "Model does not provide secm\n");
1046 		ret = -EOPNOTSUPP;
1047 		break;
1048 	case 0x0102: /* cub adresses incorrect */
1049 		CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
1050 		ret = -EINVAL;
1051 		break;
1052 	case 0x0103: /* key error */
1053 		CIO_CRW_EVENT(2, "Access key error in secm\n");
1054 		ret = -EINVAL;
1055 		break;
1056 	case 0x0105: /* error while starting */
1057 		CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
1058 		ret = -EIO;
1059 		break;
1060 	default:
1061 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1062 			      secm_area->response.code);
1063 		ret = -EIO;
1064 	}
1065 	return ret;
1066 }
1067 
1068 int
1069 chsc_secm(struct channel_subsystem *css, int enable)
1070 {
1071 	void  *secm_area;
1072 	int ret;
1073 
1074 	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
1075 	if (!secm_area)
1076 		return -ENOMEM;
1077 
1078 	mutex_lock(&css->mutex);
1079 	if (enable && !css->cm_enabled) {
1080 		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1081 		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1082 		if (!css->cub_addr1 || !css->cub_addr2) {
1083 			free_page((unsigned long)css->cub_addr1);
1084 			free_page((unsigned long)css->cub_addr2);
1085 			free_page((unsigned long)secm_area);
1086 			mutex_unlock(&css->mutex);
1087 			return -ENOMEM;
1088 		}
1089 	}
1090 	ret = __chsc_do_secm(css, enable, secm_area);
1091 	if (!ret) {
1092 		css->cm_enabled = enable;
1093 		if (css->cm_enabled) {
1094 			ret = chsc_add_cmg_attr(css);
1095 			if (ret) {
1096 				memset(secm_area, 0, PAGE_SIZE);
1097 				__chsc_do_secm(css, 0, secm_area);
1098 				css->cm_enabled = 0;
1099 			}
1100 		} else
1101 			chsc_remove_cmg_attr(css);
1102 	}
1103 	if (enable && !css->cm_enabled) {
1104 		free_page((unsigned long)css->cub_addr1);
1105 		free_page((unsigned long)css->cub_addr2);
1106 	}
1107 	mutex_unlock(&css->mutex);
1108 	free_page((unsigned long)secm_area);
1109 	return ret;
1110 }
1111 
1112 /*
1113  * Files for the channel path entries.
1114  */
1115 static ssize_t
1116 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1117 {
1118 	struct channel_path *chp = container_of(dev, struct channel_path, dev);
1119 
1120 	if (!chp)
1121 		return 0;
1122 	return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1123 		sprintf(buf, "offline\n"));
1124 }
1125 
1126 static ssize_t
1127 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1128 {
1129 	struct channel_path *cp = container_of(dev, struct channel_path, dev);
1130 	char cmd[10];
1131 	int num_args;
1132 	int error;
1133 
1134 	num_args = sscanf(buf, "%5s", cmd);
1135 	if (!num_args)
1136 		return count;
1137 
1138 	if (!strnicmp(cmd, "on", 2))
1139 		error = s390_vary_chpid(cp->id, 1);
1140 	else if (!strnicmp(cmd, "off", 3))
1141 		error = s390_vary_chpid(cp->id, 0);
1142 	else
1143 		error = -EINVAL;
1144 
1145 	return error < 0 ? error : count;
1146 
1147 }
1148 
1149 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1150 
1151 static ssize_t
1152 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1153 {
1154 	struct channel_path *chp = container_of(dev, struct channel_path, dev);
1155 
1156 	if (!chp)
1157 		return 0;
1158 	return sprintf(buf, "%x\n", chp->desc.desc);
1159 }
1160 
1161 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1162 
1163 static ssize_t
1164 chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1165 {
1166 	struct channel_path *chp = to_channelpath(dev);
1167 
1168 	if (!chp)
1169 		return 0;
1170 	if (chp->cmg == -1) /* channel measurements not available */
1171 		return sprintf(buf, "unknown\n");
1172 	return sprintf(buf, "%x\n", chp->cmg);
1173 }
1174 
1175 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1176 
1177 static ssize_t
1178 chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1179 {
1180 	struct channel_path *chp = to_channelpath(dev);
1181 
1182 	if (!chp)
1183 		return 0;
1184 	if (chp->shared == -1) /* channel measurements not available */
1185 		return sprintf(buf, "unknown\n");
1186 	return sprintf(buf, "%x\n", chp->shared);
1187 }
1188 
1189 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1190 
1191 static struct attribute * chp_attrs[] = {
1192 	&dev_attr_status.attr,
1193 	&dev_attr_type.attr,
1194 	&dev_attr_cmg.attr,
1195 	&dev_attr_shared.attr,
1196 	NULL,
1197 };
1198 
1199 static struct attribute_group chp_attr_group = {
1200 	.attrs = chp_attrs,
1201 };
1202 
1203 static void
1204 chp_release(struct device *dev)
1205 {
1206 	struct channel_path *cp;
1207 
1208 	cp = container_of(dev, struct channel_path, dev);
1209 	kfree(cp);
1210 }
1211 
1212 static int
1213 chsc_determine_channel_path_description(int chpid,
1214 					struct channel_path_desc *desc)
1215 {
1216 	int ccode, ret;
1217 
1218 	struct {
1219 		struct chsc_header request;
1220 		u32 : 24;
1221 		u32 first_chpid : 8;
1222 		u32 : 24;
1223 		u32 last_chpid : 8;
1224 		u32 zeroes1;
1225 		struct chsc_header response;
1226 		u32 zeroes2;
1227 		struct channel_path_desc desc;
1228 	} *scpd_area;
1229 
1230 	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1231 	if (!scpd_area)
1232 		return -ENOMEM;
1233 
1234 	scpd_area->request.length = 0x0010;
1235 	scpd_area->request.code = 0x0002;
1236 
1237 	scpd_area->first_chpid = chpid;
1238 	scpd_area->last_chpid = chpid;
1239 
1240 	ccode = chsc(scpd_area);
1241 	if (ccode > 0) {
1242 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
1243 		goto out;
1244 	}
1245 
1246 	switch (scpd_area->response.code) {
1247 	case 0x0001: /* Success. */
1248 		memcpy(desc, &scpd_area->desc,
1249 		       sizeof(struct channel_path_desc));
1250 		ret = 0;
1251 		break;
1252 	case 0x0003: /* Invalid block. */
1253 	case 0x0007: /* Invalid format. */
1254 	case 0x0008: /* Other invalid block. */
1255 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1256 		ret = -EINVAL;
1257 		break;
1258 	case 0x0004: /* Command not provided in model. */
1259 		CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1260 		ret = -EOPNOTSUPP;
1261 		break;
1262 	default:
1263 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1264 			      scpd_area->response.code);
1265 		ret = -EIO;
1266 	}
1267 out:
1268 	free_page((unsigned long)scpd_area);
1269 	return ret;
1270 }
1271 
1272 static void
1273 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1274 			  struct cmg_chars *chars)
1275 {
1276 	switch (chp->cmg) {
1277 	case 2:
1278 	case 3:
1279 		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1280 					 GFP_KERNEL);
1281 		if (chp->cmg_chars) {
1282 			int i, mask;
1283 			struct cmg_chars *cmg_chars;
1284 
1285 			cmg_chars = chp->cmg_chars;
1286 			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1287 				mask = 0x80 >> (i + 3);
1288 				if (cmcv & mask)
1289 					cmg_chars->values[i] = chars->values[i];
1290 				else
1291 					cmg_chars->values[i] = 0;
1292 			}
1293 		}
1294 		break;
1295 	default:
1296 		/* No cmg-dependent data. */
1297 		break;
1298 	}
1299 }
1300 
1301 static int
1302 chsc_get_channel_measurement_chars(struct channel_path *chp)
1303 {
1304 	int ccode, ret;
1305 
1306 	struct {
1307 		struct chsc_header request;
1308 		u32 : 24;
1309 		u32 first_chpid : 8;
1310 		u32 : 24;
1311 		u32 last_chpid : 8;
1312 		u32 zeroes1;
1313 		struct chsc_header response;
1314 		u32 zeroes2;
1315 		u32 not_valid : 1;
1316 		u32 shared : 1;
1317 		u32 : 22;
1318 		u32 chpid : 8;
1319 		u32 cmcv : 5;
1320 		u32 : 11;
1321 		u32 cmgq : 8;
1322 		u32 cmg : 8;
1323 		u32 zeroes3;
1324 		u32 data[NR_MEASUREMENT_CHARS];
1325 	} *scmc_area;
1326 
1327 	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1328 	if (!scmc_area)
1329 		return -ENOMEM;
1330 
1331 	scmc_area->request.length = 0x0010;
1332 	scmc_area->request.code = 0x0022;
1333 
1334 	scmc_area->first_chpid = chp->id;
1335 	scmc_area->last_chpid = chp->id;
1336 
1337 	ccode = chsc(scmc_area);
1338 	if (ccode > 0) {
1339 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
1340 		goto out;
1341 	}
1342 
1343 	switch (scmc_area->response.code) {
1344 	case 0x0001: /* Success. */
1345 		if (!scmc_area->not_valid) {
1346 			chp->cmg = scmc_area->cmg;
1347 			chp->shared = scmc_area->shared;
1348 			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1349 						  (struct cmg_chars *)
1350 						  &scmc_area->data);
1351 		} else {
1352 			chp->cmg = -1;
1353 			chp->shared = -1;
1354 		}
1355 		ret = 0;
1356 		break;
1357 	case 0x0003: /* Invalid block. */
1358 	case 0x0007: /* Invalid format. */
1359 	case 0x0008: /* Invalid bit combination. */
1360 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1361 		ret = -EINVAL;
1362 		break;
1363 	case 0x0004: /* Command not provided. */
1364 		CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1365 		ret = -EOPNOTSUPP;
1366 		break;
1367 	default:
1368 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1369 			      scmc_area->response.code);
1370 		ret = -EIO;
1371 	}
1372 out:
1373 	free_page((unsigned long)scmc_area);
1374 	return ret;
1375 }
1376 
1377 /*
1378  * Entries for chpids on the system bus.
1379  * This replaces /proc/chpids.
1380  */
1381 static int
1382 new_channel_path(int chpid)
1383 {
1384 	struct channel_path *chp;
1385 	int ret;
1386 
1387 	chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1388 	if (!chp)
1389 		return -ENOMEM;
1390 
1391 	/* fill in status, etc. */
1392 	chp->id = chpid;
1393 	chp->state = 1;
1394 	chp->dev = (struct device) {
1395 		.parent  = &css[0]->device,
1396 		.release = chp_release,
1397 	};
1398 	snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1399 
1400 	/* Obtain channel path description and fill it in. */
1401 	ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1402 	if (ret)
1403 		goto out_free;
1404 	/* Get channel-measurement characteristics. */
1405 	if (css_characteristics_avail && css_chsc_characteristics.scmc
1406 	    && css_chsc_characteristics.secm) {
1407 		ret = chsc_get_channel_measurement_chars(chp);
1408 		if (ret)
1409 			goto out_free;
1410 	} else {
1411 		static int msg_done;
1412 
1413 		if (!msg_done) {
1414 			printk(KERN_WARNING "cio: Channel measurements not "
1415 			       "available, continuing.\n");
1416 			msg_done = 1;
1417 		}
1418 		chp->cmg = -1;
1419 	}
1420 
1421 	/* make it known to the system */
1422 	ret = device_register(&chp->dev);
1423 	if (ret) {
1424 		printk(KERN_WARNING "%s: could not register %02x\n",
1425 		       __func__, chpid);
1426 		goto out_free;
1427 	}
1428 	ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1429 	if (ret) {
1430 		device_unregister(&chp->dev);
1431 		goto out_free;
1432 	}
1433 	mutex_lock(&css[0]->mutex);
1434 	if (css[0]->cm_enabled) {
1435 		ret = chsc_add_chp_cmg_attr(chp);
1436 		if (ret) {
1437 			sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1438 			device_unregister(&chp->dev);
1439 			mutex_unlock(&css[0]->mutex);
1440 			goto out_free;
1441 		}
1442 	}
1443 	css[0]->chps[chpid] = chp;
1444 	mutex_unlock(&css[0]->mutex);
1445 	return ret;
1446 out_free:
1447 	kfree(chp);
1448 	return ret;
1449 }
1450 
1451 void *
1452 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1453 {
1454 	struct channel_path *chp;
1455 	struct channel_path_desc *desc;
1456 
1457 	chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1458 	if (!chp)
1459 		return NULL;
1460 	desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1461 	if (!desc)
1462 		return NULL;
1463 	memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1464 	return desc;
1465 }
1466 
1467 
1468 static int __init
1469 chsc_alloc_sei_area(void)
1470 {
1471 	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1472 	if (!sei_page)
1473 		printk(KERN_WARNING"Can't allocate page for processing of " \
1474 		       "chsc machine checks!\n");
1475 	return (sei_page ? 0 : -ENOMEM);
1476 }
1477 
1478 int __init
1479 chsc_enable_facility(int operation_code)
1480 {
1481 	int ret;
1482 	struct {
1483 		struct chsc_header request;
1484 		u8 reserved1:4;
1485 		u8 format:4;
1486 		u8 reserved2;
1487 		u16 operation_code;
1488 		u32 reserved3;
1489 		u32 reserved4;
1490 		u32 operation_data_area[252];
1491 		struct chsc_header response;
1492 		u32 reserved5:4;
1493 		u32 format2:4;
1494 		u32 reserved6:24;
1495 	} *sda_area;
1496 
1497 	sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1498 	if (!sda_area)
1499 		return -ENOMEM;
1500 	sda_area->request.length = 0x0400;
1501 	sda_area->request.code = 0x0031;
1502 	sda_area->operation_code = operation_code;
1503 
1504 	ret = chsc(sda_area);
1505 	if (ret > 0) {
1506 		ret = (ret == 3) ? -ENODEV : -EBUSY;
1507 		goto out;
1508 	}
1509 	switch (sda_area->response.code) {
1510 	case 0x0001: /* everything ok */
1511 		ret = 0;
1512 		break;
1513 	case 0x0003: /* invalid request block */
1514 	case 0x0007:
1515 		ret = -EINVAL;
1516 		break;
1517 	case 0x0004: /* command not provided */
1518 	case 0x0101: /* facility not provided */
1519 		ret = -EOPNOTSUPP;
1520 		break;
1521 	default: /* something went wrong */
1522 		ret = -EIO;
1523 	}
1524  out:
1525 	free_page((unsigned long)sda_area);
1526 	return ret;
1527 }
1528 
1529 subsys_initcall(chsc_alloc_sei_area);
1530 
1531 struct css_general_char css_general_characteristics;
1532 struct css_chsc_char css_chsc_characteristics;
1533 
1534 int __init
1535 chsc_determine_css_characteristics(void)
1536 {
1537 	int result;
1538 	struct {
1539 		struct chsc_header request;
1540 		u32 reserved1;
1541 		u32 reserved2;
1542 		u32 reserved3;
1543 		struct chsc_header response;
1544 		u32 reserved4;
1545 		u32 general_char[510];
1546 		u32 chsc_char[518];
1547 	} *scsc_area;
1548 
1549 	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1550 	if (!scsc_area) {
1551 	        printk(KERN_WARNING"cio: Was not able to determine available" \
1552 		       "CHSCs due to no memory.\n");
1553 		return -ENOMEM;
1554 	}
1555 
1556 	scsc_area->request.length = 0x0010;
1557 	scsc_area->request.code = 0x0010;
1558 
1559 	result = chsc(scsc_area);
1560 	if (result) {
1561 		printk(KERN_WARNING"cio: Was not able to determine " \
1562 		       "available CHSCs, cc=%i.\n", result);
1563 		result = -EIO;
1564 		goto exit;
1565 	}
1566 
1567 	if (scsc_area->response.code != 1) {
1568 		printk(KERN_WARNING"cio: Was not able to determine " \
1569 		       "available CHSCs.\n");
1570 		result = -EIO;
1571 		goto exit;
1572 	}
1573 	memcpy(&css_general_characteristics, scsc_area->general_char,
1574 	       sizeof(css_general_characteristics));
1575 	memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1576 	       sizeof(css_chsc_characteristics));
1577 exit:
1578 	free_page ((unsigned long) scsc_area);
1579 	return result;
1580 }
1581 
1582 EXPORT_SYMBOL_GPL(css_general_characteristics);
1583 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1584