xref: /linux/drivers/s390/cio/chsc.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *   $Revision: 1.120 $
5  *
6  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7  *			      IBM Corporation
8  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
9  *		 Cornelia Huck (cohuck@de.ibm.com)
10  *		 Arnd Bergmann (arndb@de.ibm.com)
11  */
12 
13 #include <linux/module.h>
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 
19 #include <asm/cio.h>
20 
21 #include "css.h"
22 #include "cio.h"
23 #include "cio_debug.h"
24 #include "ioasm.h"
25 #include "chsc.h"
26 
27 static struct channel_path *chps[NR_CHPIDS];
28 
29 static void *sei_page;
30 
31 static int new_channel_path(int chpid);
32 
33 static inline void
34 set_chp_logically_online(int chp, int onoff)
35 {
36 	chps[chp]->state = onoff;
37 }
38 
39 static int
40 get_chp_status(int chp)
41 {
42 	return (chps[chp] ? chps[chp]->state : -ENODEV);
43 }
44 
45 void
46 chsc_validate_chpids(struct subchannel *sch)
47 {
48 	int mask, chp;
49 
50 	for (chp = 0; chp <= 7; chp++) {
51 		mask = 0x80 >> chp;
52 		if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
53 			/* disable using this path */
54 			sch->opm &= ~mask;
55 	}
56 }
57 
58 void
59 chpid_is_actually_online(int chp)
60 {
61 	int state;
62 
63 	state = get_chp_status(chp);
64 	if (state < 0) {
65 		need_rescan = 1;
66 		queue_work(slow_path_wq, &slow_path_work);
67 	} else
68 		WARN_ON(!state);
69 }
70 
71 /* FIXME: this is _always_ called for every subchannel. shouldn't we
72  *	  process more than one at a time? */
73 static int
74 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
75 {
76 	int ccode, j;
77 
78 	struct {
79 		struct chsc_header request;
80 		u16 reserved1;
81 		u16 f_sch;	  /* first subchannel */
82 		u16 reserved2;
83 		u16 l_sch;	  /* last subchannel */
84 		u32 reserved3;
85 		struct chsc_header response;
86 		u32 reserved4;
87 		u8 sch_valid : 1;
88 		u8 dev_valid : 1;
89 		u8 st	     : 3; /* subchannel type */
90 		u8 zeroes    : 3;
91 		u8  unit_addr;	  /* unit address */
92 		u16 devno;	  /* device number */
93 		u8 path_mask;
94 		u8 fla_valid_mask;
95 		u16 sch;	  /* subchannel */
96 		u8 chpid[8];	  /* chpids 0-7 */
97 		u16 fla[8];	  /* full link addresses 0-7 */
98 	} *ssd_area;
99 
100 	ssd_area = page;
101 
102 	ssd_area->request = (struct chsc_header) {
103 		.length = 0x0010,
104 		.code   = 0x0004,
105 	};
106 
107 	ssd_area->f_sch = sch->irq;
108 	ssd_area->l_sch = sch->irq;
109 
110 	ccode = chsc(ssd_area);
111 	if (ccode > 0) {
112 		pr_debug("chsc returned with ccode = %d\n", ccode);
113 		return (ccode == 3) ? -ENODEV : -EBUSY;
114 	}
115 
116 	switch (ssd_area->response.code) {
117 	case 0x0001: /* everything ok */
118 		break;
119 	case 0x0002:
120 		CIO_CRW_EVENT(2, "Invalid command!\n");
121 		return -EINVAL;
122 	case 0x0003:
123 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
124 		return -EINVAL;
125 	case 0x0004:
126 		CIO_CRW_EVENT(2, "Model does not provide ssd\n");
127 		return -EOPNOTSUPP;
128 	default:
129 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
130 			      ssd_area->response.code);
131 		return -EIO;
132 	}
133 
134 	/*
135 	 * ssd_area->st stores the type of the detected
136 	 * subchannel, with the following definitions:
137 	 *
138 	 * 0: I/O subchannel:	  All fields have meaning
139 	 * 1: CHSC subchannel:	  Only sch_val, st and sch
140 	 *			  have meaning
141 	 * 2: Message subchannel: All fields except unit_addr
142 	 *			  have meaning
143 	 * 3: ADM subchannel:	  Only sch_val, st and sch
144 	 *			  have meaning
145 	 *
146 	 * Other types are currently undefined.
147 	 */
148 	if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 		CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 			      " for sch %04x\n", ssd_area->st, sch->irq);
151 		/*
152 		 * There may have been a new subchannel type defined in the
153 		 * time since this code was written; since we don't know which
154 		 * fields have meaning and what to do with it we just jump out
155 		 */
156 		return 0;
157 	} else {
158 		const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 		CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
160 			      sch->irq, type[ssd_area->st]);
161 
162 		sch->ssd_info.valid = 1;
163 		sch->ssd_info.type = ssd_area->st;
164 	}
165 
166 	if (ssd_area->st == 0 || ssd_area->st == 2) {
167 		for (j = 0; j < 8; j++) {
168 			if (!((0x80 >> j) & ssd_area->path_mask &
169 			      ssd_area->fla_valid_mask))
170 				continue;
171 			sch->ssd_info.chpid[j] = ssd_area->chpid[j];
172 			sch->ssd_info.fla[j]   = ssd_area->fla[j];
173 		}
174 	}
175 	return 0;
176 }
177 
178 int
179 css_get_ssd_info(struct subchannel *sch)
180 {
181 	int ret;
182 	void *page;
183 
184 	page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
185 	if (!page)
186 		return -ENOMEM;
187 	spin_lock_irq(&sch->lock);
188 	ret = chsc_get_sch_desc_irq(sch, page);
189 	if (ret) {
190 		static int cio_chsc_err_msg;
191 
192 		if (!cio_chsc_err_msg) {
193 			printk(KERN_ERR
194 			       "chsc_get_sch_descriptions:"
195 			       " Error %d while doing chsc; "
196 			       "processing some machine checks may "
197 			       "not work\n", ret);
198 			cio_chsc_err_msg = 1;
199 		}
200 	}
201 	spin_unlock_irq(&sch->lock);
202 	free_page((unsigned long)page);
203 	if (!ret) {
204 		int j, chpid;
205 		/* Allocate channel path structures, if needed. */
206 		for (j = 0; j < 8; j++) {
207 			chpid = sch->ssd_info.chpid[j];
208 			if (chpid && (get_chp_status(chpid) < 0))
209 			    new_channel_path(chpid);
210 		}
211 	}
212 	return ret;
213 }
214 
215 static int
216 s390_subchannel_remove_chpid(struct device *dev, void *data)
217 {
218 	int j;
219 	int mask;
220 	struct subchannel *sch;
221 	__u8 *chpid;
222 	struct schib schib;
223 
224 	sch = to_subchannel(dev);
225 	chpid = data;
226 	for (j = 0; j < 8; j++)
227 		if (sch->schib.pmcw.chpid[j] == *chpid)
228 			break;
229 	if (j >= 8)
230 		return 0;
231 
232 	mask = 0x80 >> j;
233 	spin_lock(&sch->lock);
234 
235 	stsch(sch->irq, &schib);
236 	if (!schib.pmcw.dnv)
237 		goto out_unreg;
238 	memcpy(&sch->schib, &schib, sizeof(struct schib));
239 	/* Check for single path devices. */
240 	if (sch->schib.pmcw.pim == 0x80)
241 		goto out_unreg;
242 	if (sch->vpm == mask)
243 		goto out_unreg;
244 
245 	if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
246 				     SCSW_ACTL_HALT_PEND |
247 				     SCSW_ACTL_START_PEND |
248 				     SCSW_ACTL_RESUME_PEND)) &&
249 	    (sch->schib.pmcw.lpum == mask)) {
250 		int cc = cio_cancel(sch);
251 
252 		if (cc == -ENODEV)
253 			goto out_unreg;
254 
255 		if (cc == -EINVAL) {
256 			cc = cio_clear(sch);
257 			if (cc == -ENODEV)
258 				goto out_unreg;
259 			/* Call handler. */
260 			if (sch->driver && sch->driver->termination)
261 				sch->driver->termination(&sch->dev);
262 			goto out_unlock;
263 		}
264 	} else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
265 		   (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
266 		   (sch->schib.pmcw.lpum == mask)) {
267 		int cc;
268 
269 		cc = cio_clear(sch);
270 		if (cc == -ENODEV)
271 			goto out_unreg;
272 		/* Call handler. */
273 		if (sch->driver && sch->driver->termination)
274 			sch->driver->termination(&sch->dev);
275 		goto out_unlock;
276 	}
277 
278 	/* trigger path verification. */
279 	if (sch->driver && sch->driver->verify)
280 		sch->driver->verify(&sch->dev);
281 out_unlock:
282 	spin_unlock(&sch->lock);
283 	return 0;
284 out_unreg:
285 	spin_unlock(&sch->lock);
286 	sch->lpm = 0;
287 	if (css_enqueue_subchannel_slow(sch->irq)) {
288 		css_clear_subchannel_slow_list();
289 		need_rescan = 1;
290 	}
291 	return 0;
292 }
293 
294 static inline void
295 s390_set_chpid_offline( __u8 chpid)
296 {
297 	char dbf_txt[15];
298 
299 	sprintf(dbf_txt, "chpr%x", chpid);
300 	CIO_TRACE_EVENT(2, dbf_txt);
301 
302 	if (get_chp_status(chpid) <= 0)
303 		return;
304 
305 	bus_for_each_dev(&css_bus_type, NULL, &chpid,
306 			 s390_subchannel_remove_chpid);
307 
308 	if (need_rescan || css_slow_subchannels_exist())
309 		queue_work(slow_path_wq, &slow_path_work);
310 }
311 
312 static int
313 s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
314 			 struct subchannel *sch)
315 {
316 	int found;
317 	int chp;
318 	int ccode;
319 
320 	found = 0;
321 	for (chp = 0; chp <= 7; chp++)
322 		/*
323 		 * check if chpid is in information updated by ssd
324 		 */
325 		if (sch->ssd_info.valid &&
326 		    sch->ssd_info.chpid[chp] == chpid &&
327 		    (sch->ssd_info.fla[chp] & fla_mask) == fla) {
328 			found = 1;
329 			break;
330 		}
331 
332 	if (found == 0)
333 		return 0;
334 
335 	/*
336 	 * Do a stsch to update our subchannel structure with the
337 	 * new path information and eventually check for logically
338 	 * offline chpids.
339 	 */
340 	ccode = stsch(sch->irq, &sch->schib);
341 	if (ccode > 0)
342 		return 0;
343 
344 	return 0x80 >> chp;
345 }
346 
347 static int
348 s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
349 {
350 	struct subchannel *sch;
351 	int irq, rc;
352 	char dbf_txt[15];
353 
354 	sprintf(dbf_txt, "accpr%x", chpid);
355 	CIO_TRACE_EVENT( 2, dbf_txt);
356 	if (fla != 0) {
357 		sprintf(dbf_txt, "fla%x", fla);
358 		CIO_TRACE_EVENT( 2, dbf_txt);
359 	}
360 
361 	/*
362 	 * I/O resources may have become accessible.
363 	 * Scan through all subchannels that may be concerned and
364 	 * do a validation on those.
365 	 * The more information we have (info), the less scanning
366 	 * will we have to do.
367 	 */
368 
369 	if (!get_chp_status(chpid))
370 		return 0; /* no need to do the rest */
371 
372 	rc = 0;
373 	for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
374 		int chp_mask, old_lpm;
375 
376 		sch = get_subchannel_by_schid(irq);
377 		if (!sch) {
378 			struct schib schib;
379 			int ret;
380 			/*
381 			 * We don't know the device yet, but since a path
382 			 * may be available now to the device we'll have
383 			 * to do recognition again.
384 			 * Since we don't have any idea about which chpid
385 			 * that beast may be on we'll have to do a stsch
386 			 * on all devices, grr...
387 			 */
388 			if (stsch(irq, &schib)) {
389 				/* We're through */
390 				if (need_rescan)
391 					rc = -EAGAIN;
392 				break;
393 			}
394 			if (need_rescan) {
395 				rc = -EAGAIN;
396 				continue;
397 			}
398 			/* Put it on the slow path. */
399 			ret = css_enqueue_subchannel_slow(irq);
400 			if (ret) {
401 				css_clear_subchannel_slow_list();
402 				need_rescan = 1;
403 			}
404 			rc = -EAGAIN;
405 			continue;
406 		}
407 
408 		spin_lock_irq(&sch->lock);
409 
410 		chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
411 
412 		if (chp_mask == 0) {
413 
414 			spin_unlock_irq(&sch->lock);
415 			continue;
416 		}
417 		old_lpm = sch->lpm;
418 		sch->lpm = ((sch->schib.pmcw.pim &
419 			     sch->schib.pmcw.pam &
420 			     sch->schib.pmcw.pom)
421 			    | chp_mask) & sch->opm;
422 		if (!old_lpm && sch->lpm)
423 			device_trigger_reprobe(sch);
424 		else if (sch->driver && sch->driver->verify)
425 			sch->driver->verify(&sch->dev);
426 
427 		spin_unlock_irq(&sch->lock);
428 		put_device(&sch->dev);
429 		if (fla_mask == 0xffff)
430 			break;
431 	}
432 	return rc;
433 }
434 
435 static int
436 __get_chpid_from_lir(void *data)
437 {
438 	struct lir {
439 		u8  iq;
440 		u8  ic;
441 		u16 sci;
442 		/* incident-node descriptor */
443 		u32 indesc[28];
444 		/* attached-node descriptor */
445 		u32 andesc[28];
446 		/* incident-specific information */
447 		u32 isinfo[28];
448 	} *lir;
449 
450 	lir = (struct lir*) data;
451 	if (!(lir->iq&0x80))
452 		/* NULL link incident record */
453 		return -EINVAL;
454 	if (!(lir->indesc[0]&0xc0000000))
455 		/* node descriptor not valid */
456 		return -EINVAL;
457 	if (!(lir->indesc[0]&0x10000000))
458 		/* don't handle device-type nodes - FIXME */
459 		return -EINVAL;
460 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
461 
462 	return (u16) (lir->indesc[0]&0x000000ff);
463 }
464 
465 int
466 chsc_process_crw(void)
467 {
468 	int chpid, ret;
469 	struct {
470 		struct chsc_header request;
471 		u32 reserved1;
472 		u32 reserved2;
473 		u32 reserved3;
474 		struct chsc_header response;
475 		u32 reserved4;
476 		u8  flags;
477 		u8  vf;		/* validity flags */
478 		u8  rs;		/* reporting source */
479 		u8  cc;		/* content code */
480 		u16 fla;	/* full link address */
481 		u16 rsid;	/* reporting source id */
482 		u32 reserved5;
483 		u32 reserved6;
484 		u32 ccdf[96];	/* content-code dependent field */
485 		/* ccdf has to be big enough for a link-incident record */
486 	} *sei_area;
487 
488 	if (!sei_page)
489 		return 0;
490 	/*
491 	 * build the chsc request block for store event information
492 	 * and do the call
493 	 * This function is only called by the machine check handler thread,
494 	 * so we don't need locking for the sei_page.
495 	 */
496 	sei_area = sei_page;
497 
498 	CIO_TRACE_EVENT( 2, "prcss");
499 	ret = 0;
500 	do {
501 		int ccode, status;
502 		memset(sei_area, 0, sizeof(*sei_area));
503 
504 		sei_area->request = (struct chsc_header) {
505 			.length = 0x0010,
506 			.code   = 0x000e,
507 		};
508 
509 		ccode = chsc(sei_area);
510 		if (ccode > 0)
511 			return 0;
512 
513 		switch (sei_area->response.code) {
514 			/* for debug purposes, check for problems */
515 		case 0x0001:
516 			CIO_CRW_EVENT(4, "chsc_process_crw: event information "
517 					"successfully stored\n");
518 			break; /* everything ok */
519 		case 0x0002:
520 			CIO_CRW_EVENT(2,
521 				      "chsc_process_crw: invalid command!\n");
522 			return 0;
523 		case 0x0003:
524 			CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
525 				      "request block!\n");
526 			return 0;
527 		case 0x0005:
528 			CIO_CRW_EVENT(2, "chsc_process_crw: no event "
529 				      "information stored\n");
530 			return 0;
531 		default:
532 			CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
533 				      sei_area->response.code);
534 			return 0;
535 		}
536 
537 		/* Check if we might have lost some information. */
538 		if (sei_area->flags & 0x40)
539 			CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
540 				       "has been lost due to overflow!\n");
541 
542 		if (sei_area->rs != 4) {
543 			CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
544 				      "(%04X) isn't a chpid!\n",
545 				      sei_area->rsid);
546 			continue;
547 		}
548 
549 		/* which kind of information was stored? */
550 		switch (sei_area->cc) {
551 		case 1: /* link incident*/
552 			CIO_CRW_EVENT(4, "chsc_process_crw: "
553 				      "channel subsystem reports link incident,"
554 				      " reporting source is chpid %x\n",
555 				      sei_area->rsid);
556 			chpid = __get_chpid_from_lir(sei_area->ccdf);
557 			if (chpid < 0)
558 				CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
559 					      __FUNCTION__);
560 			else
561 				s390_set_chpid_offline(chpid);
562 			break;
563 
564 		case 2: /* i/o resource accessibiliy */
565 			CIO_CRW_EVENT(4, "chsc_process_crw: "
566 				      "channel subsystem reports some I/O "
567 				      "devices may have become accessible\n");
568 			pr_debug("Data received after sei: \n");
569 			pr_debug("Validity flags: %x\n", sei_area->vf);
570 
571 			/* allocate a new channel path structure, if needed */
572 			status = get_chp_status(sei_area->rsid);
573 			if (status < 0)
574 				new_channel_path(sei_area->rsid);
575 			else if (!status)
576 				return 0;
577 			if ((sei_area->vf & 0x80) == 0) {
578 				pr_debug("chpid: %x\n", sei_area->rsid);
579 				ret = s390_process_res_acc(sei_area->rsid,
580 							   0, 0);
581 			} else if ((sei_area->vf & 0xc0) == 0x80) {
582 				pr_debug("chpid: %x link addr: %x\n",
583 					 sei_area->rsid, sei_area->fla);
584 				ret = s390_process_res_acc(sei_area->rsid,
585 							   sei_area->fla,
586 							   0xff00);
587 			} else if ((sei_area->vf & 0xc0) == 0xc0) {
588 				pr_debug("chpid: %x full link addr: %x\n",
589 					 sei_area->rsid, sei_area->fla);
590 				ret = s390_process_res_acc(sei_area->rsid,
591 							   sei_area->fla,
592 							   0xffff);
593 			}
594 			pr_debug("\n");
595 
596 			break;
597 
598 		default: /* other stuff */
599 			CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
600 				      sei_area->cc);
601 			break;
602 		}
603 	} while (sei_area->flags & 0x80);
604 	return ret;
605 }
606 
607 static int
608 chp_add(int chpid)
609 {
610 	struct subchannel *sch;
611 	int irq, ret, rc;
612 	char dbf_txt[15];
613 
614 	if (!get_chp_status(chpid))
615 		return 0; /* no need to do the rest */
616 
617 	sprintf(dbf_txt, "cadd%x", chpid);
618 	CIO_TRACE_EVENT(2, dbf_txt);
619 
620 	rc = 0;
621 	for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
622 		int i;
623 
624 		sch = get_subchannel_by_schid(irq);
625 		if (!sch) {
626 			struct schib schib;
627 
628 			if (stsch(irq, &schib)) {
629 				/* We're through */
630 				if (need_rescan)
631 					rc = -EAGAIN;
632 				break;
633 			}
634 			if (need_rescan) {
635 				rc = -EAGAIN;
636 				continue;
637 			}
638 			/* Put it on the slow path. */
639 			ret = css_enqueue_subchannel_slow(irq);
640 			if (ret) {
641 				css_clear_subchannel_slow_list();
642 				need_rescan = 1;
643 			}
644 			rc = -EAGAIN;
645 			continue;
646 		}
647 
648 		spin_lock(&sch->lock);
649 		for (i=0; i<8; i++)
650 			if (sch->schib.pmcw.chpid[i] == chpid) {
651 				if (stsch(sch->irq, &sch->schib) != 0) {
652 					/* Endgame. */
653 					spin_unlock(&sch->lock);
654 					return rc;
655 				}
656 				break;
657 			}
658 		if (i==8) {
659 			spin_unlock(&sch->lock);
660 			return rc;
661 		}
662 		sch->lpm = ((sch->schib.pmcw.pim &
663 			     sch->schib.pmcw.pam &
664 			     sch->schib.pmcw.pom)
665 			    | 0x80 >> i) & sch->opm;
666 
667 		if (sch->driver && sch->driver->verify)
668 			sch->driver->verify(&sch->dev);
669 
670 		spin_unlock(&sch->lock);
671 		put_device(&sch->dev);
672 	}
673 	return rc;
674 }
675 
676 /*
677  * Handling of crw machine checks with channel path source.
678  */
679 int
680 chp_process_crw(int chpid, int on)
681 {
682 	if (on == 0) {
683 		/* Path has gone. We use the link incident routine.*/
684 		s390_set_chpid_offline(chpid);
685 		return 0; /* De-register is async anyway. */
686 	}
687 	/*
688 	 * Path has come. Allocate a new channel path structure,
689 	 * if needed.
690 	 */
691 	if (get_chp_status(chpid) < 0)
692 		new_channel_path(chpid);
693 	/* Avoid the extra overhead in process_rec_acc. */
694 	return chp_add(chpid);
695 }
696 
697 static inline int
698 __check_for_io_and_kill(struct subchannel *sch, int index)
699 {
700 	int cc;
701 
702 	if (!device_is_online(sch))
703 		/* cio could be doing I/O. */
704 		return 0;
705 	cc = stsch(sch->irq, &sch->schib);
706 	if (cc)
707 		return 0;
708 	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
709 		device_set_waiting(sch);
710 		return 1;
711 	}
712 	return 0;
713 }
714 
715 static inline void
716 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
717 {
718 	int chp, old_lpm;
719 	unsigned long flags;
720 
721 	if (!sch->ssd_info.valid)
722 		return;
723 
724 	spin_lock_irqsave(&sch->lock, flags);
725 	old_lpm = sch->lpm;
726 	for (chp = 0; chp < 8; chp++) {
727 		if (sch->ssd_info.chpid[chp] != chpid)
728 			continue;
729 
730 		if (on) {
731 			sch->opm |= (0x80 >> chp);
732 			sch->lpm |= (0x80 >> chp);
733 			if (!old_lpm)
734 				device_trigger_reprobe(sch);
735 			else if (sch->driver && sch->driver->verify)
736 				sch->driver->verify(&sch->dev);
737 		} else {
738 			sch->opm &= ~(0x80 >> chp);
739 			sch->lpm &= ~(0x80 >> chp);
740 			/*
741 			 * Give running I/O a grace period in which it
742 			 * can successfully terminate, even using the
743 			 * just varied off path. Then kill it.
744 			 */
745 			if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
746 				if (css_enqueue_subchannel_slow(sch->irq)) {
747 					css_clear_subchannel_slow_list();
748 					need_rescan = 1;
749 				}
750 			} else if (sch->driver && sch->driver->verify)
751 				sch->driver->verify(&sch->dev);
752 		}
753 		break;
754 	}
755 	spin_unlock_irqrestore(&sch->lock, flags);
756 }
757 
758 static int
759 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
760 {
761 	struct subchannel *sch;
762 	__u8 *chpid;
763 
764 	sch = to_subchannel(dev);
765 	chpid = data;
766 
767 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
768 	return 0;
769 }
770 
771 static int
772 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
773 {
774 	struct subchannel *sch;
775 	__u8 *chpid;
776 
777 	sch = to_subchannel(dev);
778 	chpid = data;
779 
780 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
781 	return 0;
782 }
783 
784 /*
785  * Function: s390_vary_chpid
786  * Varies the specified chpid online or offline
787  */
788 static int
789 s390_vary_chpid( __u8 chpid, int on)
790 {
791 	char dbf_text[15];
792 	int status, irq, ret;
793 	struct subchannel *sch;
794 
795 	sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
796 	CIO_TRACE_EVENT( 2, dbf_text);
797 
798 	status = get_chp_status(chpid);
799 	if (status < 0) {
800 		printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
801 		return -EINVAL;
802 	}
803 
804 	if (!on && !status) {
805 		printk(KERN_ERR "chpid %x is already offline\n", chpid);
806 		return -EINVAL;
807 	}
808 
809 	set_chp_logically_online(chpid, on);
810 
811 	/*
812 	 * Redo PathVerification on the devices the chpid connects to
813 	 */
814 
815 	bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
816 			 s390_subchannel_vary_chpid_on :
817 			 s390_subchannel_vary_chpid_off);
818 	if (!on)
819 		goto out;
820 	/* Scan for new devices on varied on path. */
821 	for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
822 		struct schib schib;
823 
824 		if (need_rescan)
825 			break;
826 		sch = get_subchannel_by_schid(irq);
827 		if (sch) {
828 			put_device(&sch->dev);
829 			continue;
830 		}
831 		if (stsch(irq, &schib))
832 			/* We're through */
833 			break;
834 		/* Put it on the slow path. */
835 		ret = css_enqueue_subchannel_slow(irq);
836 		if (ret) {
837 			css_clear_subchannel_slow_list();
838 			need_rescan = 1;
839 		}
840 	}
841 out:
842 	if (need_rescan || css_slow_subchannels_exist())
843 		queue_work(slow_path_wq, &slow_path_work);
844 	return 0;
845 }
846 
847 /*
848  * Files for the channel path entries.
849  */
850 static ssize_t
851 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
852 {
853 	struct channel_path *chp = container_of(dev, struct channel_path, dev);
854 
855 	if (!chp)
856 		return 0;
857 	return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
858 		sprintf(buf, "offline\n"));
859 }
860 
861 static ssize_t
862 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
863 {
864 	struct channel_path *cp = container_of(dev, struct channel_path, dev);
865 	char cmd[10];
866 	int num_args;
867 	int error;
868 
869 	num_args = sscanf(buf, "%5s", cmd);
870 	if (!num_args)
871 		return count;
872 
873 	if (!strnicmp(cmd, "on", 2))
874 		error = s390_vary_chpid(cp->id, 1);
875 	else if (!strnicmp(cmd, "off", 3))
876 		error = s390_vary_chpid(cp->id, 0);
877 	else
878 		error = -EINVAL;
879 
880 	return error < 0 ? error : count;
881 
882 }
883 
884 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
885 
886 static ssize_t
887 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
888 {
889 	struct channel_path *chp = container_of(dev, struct channel_path, dev);
890 
891 	if (!chp)
892 		return 0;
893 	return sprintf(buf, "%x\n", chp->desc.desc);
894 }
895 
896 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
897 
898 static struct attribute * chp_attrs[] = {
899 	&dev_attr_status.attr,
900 	&dev_attr_type.attr,
901 	NULL,
902 };
903 
904 static struct attribute_group chp_attr_group = {
905 	.attrs = chp_attrs,
906 };
907 
908 static void
909 chp_release(struct device *dev)
910 {
911 	struct channel_path *cp;
912 
913 	cp = container_of(dev, struct channel_path, dev);
914 	kfree(cp);
915 }
916 
917 static int
918 chsc_determine_channel_path_description(int chpid,
919 					struct channel_path_desc *desc)
920 {
921 	int ccode, ret;
922 
923 	struct {
924 		struct chsc_header request;
925 		u32 : 24;
926 		u32 first_chpid : 8;
927 		u32 : 24;
928 		u32 last_chpid : 8;
929 		u32 zeroes1;
930 		struct chsc_header response;
931 		u32 zeroes2;
932 		struct channel_path_desc desc;
933 	} *scpd_area;
934 
935 	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
936 	if (!scpd_area)
937 		return -ENOMEM;
938 
939 	scpd_area->request = (struct chsc_header) {
940 		.length = 0x0010,
941 		.code   = 0x0002,
942 	};
943 
944 	scpd_area->first_chpid = chpid;
945 	scpd_area->last_chpid = chpid;
946 
947 	ccode = chsc(scpd_area);
948 	if (ccode > 0) {
949 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
950 		goto out;
951 	}
952 
953 	switch (scpd_area->response.code) {
954 	case 0x0001: /* Success. */
955 		memcpy(desc, &scpd_area->desc,
956 		       sizeof(struct channel_path_desc));
957 		ret = 0;
958 		break;
959 	case 0x0003: /* Invalid block. */
960 	case 0x0007: /* Invalid format. */
961 	case 0x0008: /* Other invalid block. */
962 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
963 		ret = -EINVAL;
964 		break;
965 	case 0x0004: /* Command not provided in model. */
966 		CIO_CRW_EVENT(2, "Model does not provide scpd\n");
967 		ret = -EOPNOTSUPP;
968 		break;
969 	default:
970 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
971 			      scpd_area->response.code);
972 		ret = -EIO;
973 	}
974 out:
975 	free_page((unsigned long)scpd_area);
976 	return ret;
977 }
978 
979 /*
980  * Entries for chpids on the system bus.
981  * This replaces /proc/chpids.
982  */
983 static int
984 new_channel_path(int chpid)
985 {
986 	struct channel_path *chp;
987 	int ret;
988 
989 	chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
990 	if (!chp)
991 		return -ENOMEM;
992 	memset(chp, 0, sizeof(struct channel_path));
993 
994 	/* fill in status, etc. */
995 	chp->id = chpid;
996 	chp->state = 1;
997 	chp->dev = (struct device) {
998 		.parent  = &css_bus_device,
999 		.release = chp_release,
1000 	};
1001 	snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1002 
1003 	/* Obtain channel path description and fill it in. */
1004 	ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1005 	if (ret)
1006 		goto out_free;
1007 
1008 	/* make it known to the system */
1009 	ret = device_register(&chp->dev);
1010 	if (ret) {
1011 		printk(KERN_WARNING "%s: could not register %02x\n",
1012 		       __func__, chpid);
1013 		goto out_free;
1014 	}
1015 	ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1016 	if (ret) {
1017 		device_unregister(&chp->dev);
1018 		goto out_free;
1019 	} else
1020 		chps[chpid] = chp;
1021 	return ret;
1022 out_free:
1023 	kfree(chp);
1024 	return ret;
1025 }
1026 
1027 void *
1028 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1029 {
1030 	struct channel_path *chp;
1031 	struct channel_path_desc *desc;
1032 
1033 	chp = chps[sch->schib.pmcw.chpid[chp_no]];
1034 	if (!chp)
1035 		return NULL;
1036 	desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1037 	if (!desc)
1038 		return NULL;
1039 	memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1040 	return desc;
1041 }
1042 
1043 
1044 static int __init
1045 chsc_alloc_sei_area(void)
1046 {
1047 	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1048 	if (!sei_page)
1049 		printk(KERN_WARNING"Can't allocate page for processing of " \
1050 		       "chsc machine checks!\n");
1051 	return (sei_page ? 0 : -ENOMEM);
1052 }
1053 
1054 subsys_initcall(chsc_alloc_sei_area);
1055 
1056 struct css_general_char css_general_characteristics;
1057 struct css_chsc_char css_chsc_characteristics;
1058 
1059 int __init
1060 chsc_determine_css_characteristics(void)
1061 {
1062 	int result;
1063 	struct {
1064 		struct chsc_header request;
1065 		u32 reserved1;
1066 		u32 reserved2;
1067 		u32 reserved3;
1068 		struct chsc_header response;
1069 		u32 reserved4;
1070 		u32 general_char[510];
1071 		u32 chsc_char[518];
1072 	} *scsc_area;
1073 
1074 	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1075 	if (!scsc_area) {
1076 	        printk(KERN_WARNING"cio: Was not able to determine available" \
1077 		       "CHSCs due to no memory.\n");
1078 		return -ENOMEM;
1079 	}
1080 
1081 	scsc_area->request = (struct chsc_header) {
1082 		.length = 0x0010,
1083 		.code   = 0x0010,
1084 	};
1085 
1086 	result = chsc(scsc_area);
1087 	if (result) {
1088 		printk(KERN_WARNING"cio: Was not able to determine " \
1089 		       "available CHSCs, cc=%i.\n", result);
1090 		result = -EIO;
1091 		goto exit;
1092 	}
1093 
1094 	if (scsc_area->response.code != 1) {
1095 		printk(KERN_WARNING"cio: Was not able to determine " \
1096 		       "available CHSCs.\n");
1097 		result = -EIO;
1098 		goto exit;
1099 	}
1100 	memcpy(&css_general_characteristics, scsc_area->general_char,
1101 	       sizeof(css_general_characteristics));
1102 	memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1103 	       sizeof(css_chsc_characteristics));
1104 exit:
1105 	free_page ((unsigned long) scsc_area);
1106 	return result;
1107 }
1108 
1109 EXPORT_SYMBOL_GPL(css_general_characteristics);
1110 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1111