xref: /linux/drivers/s390/cio/chsc.c (revision 60b2737de1b1ddfdb90f3ba622634eb49d6f3603)
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *   $Revision: 1.119 $
5  *
6  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7  *			      IBM Corporation
8  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
9  *		 Cornelia Huck (cohuck@de.ibm.com)
10  *		 Arnd Bergmann (arndb@de.ibm.com)
11  */
12 
13 #include <linux/module.h>
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 
19 #include <asm/cio.h>
20 
21 #include "css.h"
22 #include "cio.h"
23 #include "cio_debug.h"
24 #include "ioasm.h"
25 #include "chsc.h"
26 
27 static struct channel_path *chps[NR_CHPIDS];
28 
29 static void *sei_page;
30 
31 static int new_channel_path(int chpid);
32 
33 static inline void
34 set_chp_logically_online(int chp, int onoff)
35 {
36 	chps[chp]->state = onoff;
37 }
38 
39 static int
40 get_chp_status(int chp)
41 {
42 	return (chps[chp] ? chps[chp]->state : -ENODEV);
43 }
44 
45 void
46 chsc_validate_chpids(struct subchannel *sch)
47 {
48 	int mask, chp;
49 
50 	for (chp = 0; chp <= 7; chp++) {
51 		mask = 0x80 >> chp;
52 		if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
53 			/* disable using this path */
54 			sch->opm &= ~mask;
55 	}
56 }
57 
58 void
59 chpid_is_actually_online(int chp)
60 {
61 	int state;
62 
63 	state = get_chp_status(chp);
64 	if (state < 0) {
65 		need_rescan = 1;
66 		queue_work(slow_path_wq, &slow_path_work);
67 	} else
68 		WARN_ON(!state);
69 }
70 
71 /* FIXME: this is _always_ called for every subchannel. shouldn't we
72  *	  process more than one at a time? */
73 static int
74 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
75 {
76 	int ccode, j;
77 
78 	struct {
79 		struct chsc_header request;
80 		u16 reserved1;
81 		u16 f_sch;	  /* first subchannel */
82 		u16 reserved2;
83 		u16 l_sch;	  /* last subchannel */
84 		u32 reserved3;
85 		struct chsc_header response;
86 		u32 reserved4;
87 		u8 sch_valid : 1;
88 		u8 dev_valid : 1;
89 		u8 st	     : 3; /* subchannel type */
90 		u8 zeroes    : 3;
91 		u8  unit_addr;	  /* unit address */
92 		u16 devno;	  /* device number */
93 		u8 path_mask;
94 		u8 fla_valid_mask;
95 		u16 sch;	  /* subchannel */
96 		u8 chpid[8];	  /* chpids 0-7 */
97 		u16 fla[8];	  /* full link addresses 0-7 */
98 	} *ssd_area;
99 
100 	ssd_area = page;
101 
102 	ssd_area->request = (struct chsc_header) {
103 		.length = 0x0010,
104 		.code   = 0x0004,
105 	};
106 
107 	ssd_area->f_sch = sch->irq;
108 	ssd_area->l_sch = sch->irq;
109 
110 	ccode = chsc(ssd_area);
111 	if (ccode > 0) {
112 		pr_debug("chsc returned with ccode = %d\n", ccode);
113 		return (ccode == 3) ? -ENODEV : -EBUSY;
114 	}
115 
116 	switch (ssd_area->response.code) {
117 	case 0x0001: /* everything ok */
118 		break;
119 	case 0x0002:
120 		CIO_CRW_EVENT(2, "Invalid command!\n");
121 		return -EINVAL;
122 	case 0x0003:
123 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
124 		return -EINVAL;
125 	case 0x0004:
126 		CIO_CRW_EVENT(2, "Model does not provide ssd\n");
127 		return -EOPNOTSUPP;
128 	default:
129 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
130 			      ssd_area->response.code);
131 		return -EIO;
132 	}
133 
134 	/*
135 	 * ssd_area->st stores the type of the detected
136 	 * subchannel, with the following definitions:
137 	 *
138 	 * 0: I/O subchannel:	  All fields have meaning
139 	 * 1: CHSC subchannel:	  Only sch_val, st and sch
140 	 *			  have meaning
141 	 * 2: Message subchannel: All fields except unit_addr
142 	 *			  have meaning
143 	 * 3: ADM subchannel:	  Only sch_val, st and sch
144 	 *			  have meaning
145 	 *
146 	 * Other types are currently undefined.
147 	 */
148 	if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 		CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 			      " for sch %04x\n", ssd_area->st, sch->irq);
151 		/*
152 		 * There may have been a new subchannel type defined in the
153 		 * time since this code was written; since we don't know which
154 		 * fields have meaning and what to do with it we just jump out
155 		 */
156 		return 0;
157 	} else {
158 		const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 		CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
160 			      sch->irq, type[ssd_area->st]);
161 
162 		sch->ssd_info.valid = 1;
163 		sch->ssd_info.type = ssd_area->st;
164 	}
165 
166 	if (ssd_area->st == 0 || ssd_area->st == 2) {
167 		for (j = 0; j < 8; j++) {
168 			if (!((0x80 >> j) & ssd_area->path_mask &
169 			      ssd_area->fla_valid_mask))
170 				continue;
171 			sch->ssd_info.chpid[j] = ssd_area->chpid[j];
172 			sch->ssd_info.fla[j]   = ssd_area->fla[j];
173 		}
174 	}
175 	return 0;
176 }
177 
178 int
179 css_get_ssd_info(struct subchannel *sch)
180 {
181 	int ret;
182 	void *page;
183 
184 	page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
185 	if (!page)
186 		return -ENOMEM;
187 	spin_lock_irq(&sch->lock);
188 	ret = chsc_get_sch_desc_irq(sch, page);
189 	if (ret) {
190 		static int cio_chsc_err_msg;
191 
192 		if (!cio_chsc_err_msg) {
193 			printk(KERN_ERR
194 			       "chsc_get_sch_descriptions:"
195 			       " Error %d while doing chsc; "
196 			       "processing some machine checks may "
197 			       "not work\n", ret);
198 			cio_chsc_err_msg = 1;
199 		}
200 	}
201 	spin_unlock_irq(&sch->lock);
202 	free_page((unsigned long)page);
203 	if (!ret) {
204 		int j, chpid;
205 		/* Allocate channel path structures, if needed. */
206 		for (j = 0; j < 8; j++) {
207 			chpid = sch->ssd_info.chpid[j];
208 			if (chpid && (get_chp_status(chpid) < 0))
209 			    new_channel_path(chpid);
210 		}
211 	}
212 	return ret;
213 }
214 
215 static int
216 s390_subchannel_remove_chpid(struct device *dev, void *data)
217 {
218 	int j;
219 	int mask;
220 	struct subchannel *sch;
221 	__u8 *chpid;
222 	struct schib schib;
223 
224 	sch = to_subchannel(dev);
225 	chpid = data;
226 	for (j = 0; j < 8; j++)
227 		if (sch->schib.pmcw.chpid[j] == *chpid)
228 			break;
229 	if (j >= 8)
230 		return 0;
231 
232 	mask = 0x80 >> j;
233 	spin_lock(&sch->lock);
234 
235 	stsch(sch->irq, &schib);
236 	if (!schib.pmcw.dnv)
237 		goto out_unreg;
238 	memcpy(&sch->schib, &schib, sizeof(struct schib));
239 	/* Check for single path devices. */
240 	if (sch->schib.pmcw.pim == 0x80)
241 		goto out_unreg;
242 	if (sch->vpm == mask)
243 		goto out_unreg;
244 
245 	if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
246 				     SCSW_ACTL_HALT_PEND |
247 				     SCSW_ACTL_START_PEND |
248 				     SCSW_ACTL_RESUME_PEND)) &&
249 	    (sch->schib.pmcw.lpum == mask)) {
250 		int cc = cio_cancel(sch);
251 
252 		if (cc == -ENODEV)
253 			goto out_unreg;
254 
255 		if (cc == -EINVAL) {
256 			cc = cio_clear(sch);
257 			if (cc == -ENODEV)
258 				goto out_unreg;
259 			/* Call handler. */
260 			if (sch->driver && sch->driver->termination)
261 				sch->driver->termination(&sch->dev);
262 			goto out_unlock;
263 		}
264 	} else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
265 		   (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
266 		   (sch->schib.pmcw.lpum == mask)) {
267 		int cc;
268 
269 		cc = cio_clear(sch);
270 		if (cc == -ENODEV)
271 			goto out_unreg;
272 		/* Call handler. */
273 		if (sch->driver && sch->driver->termination)
274 			sch->driver->termination(&sch->dev);
275 		goto out_unlock;
276 	}
277 
278 	/* trigger path verification. */
279 	if (sch->driver && sch->driver->verify)
280 		sch->driver->verify(&sch->dev);
281 out_unlock:
282 	spin_unlock(&sch->lock);
283 	return 0;
284 out_unreg:
285 	spin_unlock(&sch->lock);
286 	sch->lpm = 0;
287 	if (css_enqueue_subchannel_slow(sch->irq)) {
288 		css_clear_subchannel_slow_list();
289 		need_rescan = 1;
290 	}
291 	return 0;
292 }
293 
294 static inline void
295 s390_set_chpid_offline( __u8 chpid)
296 {
297 	char dbf_txt[15];
298 
299 	sprintf(dbf_txt, "chpr%x", chpid);
300 	CIO_TRACE_EVENT(2, dbf_txt);
301 
302 	if (get_chp_status(chpid) <= 0)
303 		return;
304 
305 	bus_for_each_dev(&css_bus_type, NULL, &chpid,
306 			 s390_subchannel_remove_chpid);
307 
308 	if (need_rescan || css_slow_subchannels_exist())
309 		queue_work(slow_path_wq, &slow_path_work);
310 }
311 
312 static int
313 s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
314 			 struct subchannel *sch)
315 {
316 	int found;
317 	int chp;
318 	int ccode;
319 
320 	found = 0;
321 	for (chp = 0; chp <= 7; chp++)
322 		/*
323 		 * check if chpid is in information updated by ssd
324 		 */
325 		if (sch->ssd_info.valid &&
326 		    sch->ssd_info.chpid[chp] == chpid &&
327 		    (sch->ssd_info.fla[chp] & fla_mask) == fla) {
328 			found = 1;
329 			break;
330 		}
331 
332 	if (found == 0)
333 		return 0;
334 
335 	/*
336 	 * Do a stsch to update our subchannel structure with the
337 	 * new path information and eventually check for logically
338 	 * offline chpids.
339 	 */
340 	ccode = stsch(sch->irq, &sch->schib);
341 	if (ccode > 0)
342 		return 0;
343 
344 	return 0x80 >> chp;
345 }
346 
347 static int
348 s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
349 {
350 	struct subchannel *sch;
351 	int irq, rc;
352 	char dbf_txt[15];
353 
354 	sprintf(dbf_txt, "accpr%x", chpid);
355 	CIO_TRACE_EVENT( 2, dbf_txt);
356 	if (fla != 0) {
357 		sprintf(dbf_txt, "fla%x", fla);
358 		CIO_TRACE_EVENT( 2, dbf_txt);
359 	}
360 
361 	/*
362 	 * I/O resources may have become accessible.
363 	 * Scan through all subchannels that may be concerned and
364 	 * do a validation on those.
365 	 * The more information we have (info), the less scanning
366 	 * will we have to do.
367 	 */
368 
369 	if (!get_chp_status(chpid))
370 		return 0; /* no need to do the rest */
371 
372 	rc = 0;
373 	for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
374 		int chp_mask, old_lpm;
375 
376 		sch = get_subchannel_by_schid(irq);
377 		if (!sch) {
378 			struct schib schib;
379 			int ret;
380 			/*
381 			 * We don't know the device yet, but since a path
382 			 * may be available now to the device we'll have
383 			 * to do recognition again.
384 			 * Since we don't have any idea about which chpid
385 			 * that beast may be on we'll have to do a stsch
386 			 * on all devices, grr...
387 			 */
388 			if (stsch(irq, &schib)) {
389 				/* We're through */
390 				if (need_rescan)
391 					rc = -EAGAIN;
392 				break;
393 			}
394 			if (need_rescan) {
395 				rc = -EAGAIN;
396 				continue;
397 			}
398 			/* Put it on the slow path. */
399 			ret = css_enqueue_subchannel_slow(irq);
400 			if (ret) {
401 				css_clear_subchannel_slow_list();
402 				need_rescan = 1;
403 			}
404 			rc = -EAGAIN;
405 			continue;
406 		}
407 
408 		spin_lock_irq(&sch->lock);
409 
410 		chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
411 
412 		if (chp_mask == 0) {
413 
414 			spin_unlock_irq(&sch->lock);
415 
416 			if (fla_mask != 0)
417 				break;
418 			else
419 				continue;
420 		}
421 		old_lpm = sch->lpm;
422 		sch->lpm = ((sch->schib.pmcw.pim &
423 			     sch->schib.pmcw.pam &
424 			     sch->schib.pmcw.pom)
425 			    | chp_mask) & sch->opm;
426 		if (!old_lpm && sch->lpm)
427 			device_trigger_reprobe(sch);
428 		else if (sch->driver && sch->driver->verify)
429 			sch->driver->verify(&sch->dev);
430 
431 		spin_unlock_irq(&sch->lock);
432 		put_device(&sch->dev);
433 		if (fla_mask != 0)
434 			break;
435 	}
436 	return rc;
437 }
438 
439 static int
440 __get_chpid_from_lir(void *data)
441 {
442 	struct lir {
443 		u8  iq;
444 		u8  ic;
445 		u16 sci;
446 		/* incident-node descriptor */
447 		u32 indesc[28];
448 		/* attached-node descriptor */
449 		u32 andesc[28];
450 		/* incident-specific information */
451 		u32 isinfo[28];
452 	} *lir;
453 
454 	lir = (struct lir*) data;
455 	if (!(lir->iq&0x80))
456 		/* NULL link incident record */
457 		return -EINVAL;
458 	if (!(lir->indesc[0]&0xc0000000))
459 		/* node descriptor not valid */
460 		return -EINVAL;
461 	if (!(lir->indesc[0]&0x10000000))
462 		/* don't handle device-type nodes - FIXME */
463 		return -EINVAL;
464 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
465 
466 	return (u16) (lir->indesc[0]&0x000000ff);
467 }
468 
469 int
470 chsc_process_crw(void)
471 {
472 	int chpid, ret;
473 	struct {
474 		struct chsc_header request;
475 		u32 reserved1;
476 		u32 reserved2;
477 		u32 reserved3;
478 		struct chsc_header response;
479 		u32 reserved4;
480 		u8  flags;
481 		u8  vf;		/* validity flags */
482 		u8  rs;		/* reporting source */
483 		u8  cc;		/* content code */
484 		u16 fla;	/* full link address */
485 		u16 rsid;	/* reporting source id */
486 		u32 reserved5;
487 		u32 reserved6;
488 		u32 ccdf[96];	/* content-code dependent field */
489 		/* ccdf has to be big enough for a link-incident record */
490 	} *sei_area;
491 
492 	if (!sei_page)
493 		return 0;
494 	/*
495 	 * build the chsc request block for store event information
496 	 * and do the call
497 	 * This function is only called by the machine check handler thread,
498 	 * so we don't need locking for the sei_page.
499 	 */
500 	sei_area = sei_page;
501 
502 	CIO_TRACE_EVENT( 2, "prcss");
503 	ret = 0;
504 	do {
505 		int ccode, status;
506 		memset(sei_area, 0, sizeof(*sei_area));
507 
508 		sei_area->request = (struct chsc_header) {
509 			.length = 0x0010,
510 			.code   = 0x000e,
511 		};
512 
513 		ccode = chsc(sei_area);
514 		if (ccode > 0)
515 			return 0;
516 
517 		switch (sei_area->response.code) {
518 			/* for debug purposes, check for problems */
519 		case 0x0001:
520 			CIO_CRW_EVENT(4, "chsc_process_crw: event information "
521 					"successfully stored\n");
522 			break; /* everything ok */
523 		case 0x0002:
524 			CIO_CRW_EVENT(2,
525 				      "chsc_process_crw: invalid command!\n");
526 			return 0;
527 		case 0x0003:
528 			CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
529 				      "request block!\n");
530 			return 0;
531 		case 0x0005:
532 			CIO_CRW_EVENT(2, "chsc_process_crw: no event "
533 				      "information stored\n");
534 			return 0;
535 		default:
536 			CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
537 				      sei_area->response.code);
538 			return 0;
539 		}
540 
541 		/* Check if we might have lost some information. */
542 		if (sei_area->flags & 0x40)
543 			CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
544 				       "has been lost due to overflow!\n");
545 
546 		if (sei_area->rs != 4) {
547 			CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
548 				      "(%04X) isn't a chpid!\n",
549 				      sei_area->rsid);
550 			continue;
551 		}
552 
553 		/* which kind of information was stored? */
554 		switch (sei_area->cc) {
555 		case 1: /* link incident*/
556 			CIO_CRW_EVENT(4, "chsc_process_crw: "
557 				      "channel subsystem reports link incident,"
558 				      " reporting source is chpid %x\n",
559 				      sei_area->rsid);
560 			chpid = __get_chpid_from_lir(sei_area->ccdf);
561 			if (chpid < 0)
562 				CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
563 					      __FUNCTION__);
564 			else
565 				s390_set_chpid_offline(chpid);
566 			break;
567 
568 		case 2: /* i/o resource accessibiliy */
569 			CIO_CRW_EVENT(4, "chsc_process_crw: "
570 				      "channel subsystem reports some I/O "
571 				      "devices may have become accessible\n");
572 			pr_debug("Data received after sei: \n");
573 			pr_debug("Validity flags: %x\n", sei_area->vf);
574 
575 			/* allocate a new channel path structure, if needed */
576 			status = get_chp_status(sei_area->rsid);
577 			if (status < 0)
578 				new_channel_path(sei_area->rsid);
579 			else if (!status)
580 				return 0;
581 			if ((sei_area->vf & 0x80) == 0) {
582 				pr_debug("chpid: %x\n", sei_area->rsid);
583 				ret = s390_process_res_acc(sei_area->rsid,
584 							   0, 0);
585 			} else if ((sei_area->vf & 0xc0) == 0x80) {
586 				pr_debug("chpid: %x link addr: %x\n",
587 					 sei_area->rsid, sei_area->fla);
588 				ret = s390_process_res_acc(sei_area->rsid,
589 							   sei_area->fla,
590 							   0xff00);
591 			} else if ((sei_area->vf & 0xc0) == 0xc0) {
592 				pr_debug("chpid: %x full link addr: %x\n",
593 					 sei_area->rsid, sei_area->fla);
594 				ret = s390_process_res_acc(sei_area->rsid,
595 							   sei_area->fla,
596 							   0xffff);
597 			}
598 			pr_debug("\n");
599 
600 			break;
601 
602 		default: /* other stuff */
603 			CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
604 				      sei_area->cc);
605 			break;
606 		}
607 	} while (sei_area->flags & 0x80);
608 	return ret;
609 }
610 
611 static int
612 chp_add(int chpid)
613 {
614 	struct subchannel *sch;
615 	int irq, ret, rc;
616 	char dbf_txt[15];
617 
618 	if (!get_chp_status(chpid))
619 		return 0; /* no need to do the rest */
620 
621 	sprintf(dbf_txt, "cadd%x", chpid);
622 	CIO_TRACE_EVENT(2, dbf_txt);
623 
624 	rc = 0;
625 	for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
626 		int i;
627 
628 		sch = get_subchannel_by_schid(irq);
629 		if (!sch) {
630 			struct schib schib;
631 
632 			if (stsch(irq, &schib)) {
633 				/* We're through */
634 				if (need_rescan)
635 					rc = -EAGAIN;
636 				break;
637 			}
638 			if (need_rescan) {
639 				rc = -EAGAIN;
640 				continue;
641 			}
642 			/* Put it on the slow path. */
643 			ret = css_enqueue_subchannel_slow(irq);
644 			if (ret) {
645 				css_clear_subchannel_slow_list();
646 				need_rescan = 1;
647 			}
648 			rc = -EAGAIN;
649 			continue;
650 		}
651 
652 		spin_lock(&sch->lock);
653 		for (i=0; i<8; i++)
654 			if (sch->schib.pmcw.chpid[i] == chpid) {
655 				if (stsch(sch->irq, &sch->schib) != 0) {
656 					/* Endgame. */
657 					spin_unlock(&sch->lock);
658 					return rc;
659 				}
660 				break;
661 			}
662 		if (i==8) {
663 			spin_unlock(&sch->lock);
664 			return rc;
665 		}
666 		sch->lpm = ((sch->schib.pmcw.pim &
667 			     sch->schib.pmcw.pam &
668 			     sch->schib.pmcw.pom)
669 			    | 0x80 >> i) & sch->opm;
670 
671 		if (sch->driver && sch->driver->verify)
672 			sch->driver->verify(&sch->dev);
673 
674 		spin_unlock(&sch->lock);
675 		put_device(&sch->dev);
676 	}
677 	return rc;
678 }
679 
680 /*
681  * Handling of crw machine checks with channel path source.
682  */
683 int
684 chp_process_crw(int chpid, int on)
685 {
686 	if (on == 0) {
687 		/* Path has gone. We use the link incident routine.*/
688 		s390_set_chpid_offline(chpid);
689 		return 0; /* De-register is async anyway. */
690 	}
691 	/*
692 	 * Path has come. Allocate a new channel path structure,
693 	 * if needed.
694 	 */
695 	if (get_chp_status(chpid) < 0)
696 		new_channel_path(chpid);
697 	/* Avoid the extra overhead in process_rec_acc. */
698 	return chp_add(chpid);
699 }
700 
701 static inline int
702 __check_for_io_and_kill(struct subchannel *sch, int index)
703 {
704 	int cc;
705 
706 	if (!device_is_online(sch))
707 		/* cio could be doing I/O. */
708 		return 0;
709 	cc = stsch(sch->irq, &sch->schib);
710 	if (cc)
711 		return 0;
712 	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
713 		device_set_waiting(sch);
714 		return 1;
715 	}
716 	return 0;
717 }
718 
719 static inline void
720 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
721 {
722 	int chp, old_lpm;
723 	unsigned long flags;
724 
725 	if (!sch->ssd_info.valid)
726 		return;
727 
728 	spin_lock_irqsave(&sch->lock, flags);
729 	old_lpm = sch->lpm;
730 	for (chp = 0; chp < 8; chp++) {
731 		if (sch->ssd_info.chpid[chp] != chpid)
732 			continue;
733 
734 		if (on) {
735 			sch->opm |= (0x80 >> chp);
736 			sch->lpm |= (0x80 >> chp);
737 			if (!old_lpm)
738 				device_trigger_reprobe(sch);
739 			else if (sch->driver && sch->driver->verify)
740 				sch->driver->verify(&sch->dev);
741 		} else {
742 			sch->opm &= ~(0x80 >> chp);
743 			sch->lpm &= ~(0x80 >> chp);
744 			/*
745 			 * Give running I/O a grace period in which it
746 			 * can successfully terminate, even using the
747 			 * just varied off path. Then kill it.
748 			 */
749 			if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
750 				if (css_enqueue_subchannel_slow(sch->irq)) {
751 					css_clear_subchannel_slow_list();
752 					need_rescan = 1;
753 				}
754 			} else if (sch->driver && sch->driver->verify)
755 				sch->driver->verify(&sch->dev);
756 		}
757 		break;
758 	}
759 	spin_unlock_irqrestore(&sch->lock, flags);
760 }
761 
762 static int
763 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
764 {
765 	struct subchannel *sch;
766 	__u8 *chpid;
767 
768 	sch = to_subchannel(dev);
769 	chpid = data;
770 
771 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
772 	return 0;
773 }
774 
775 static int
776 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
777 {
778 	struct subchannel *sch;
779 	__u8 *chpid;
780 
781 	sch = to_subchannel(dev);
782 	chpid = data;
783 
784 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
785 	return 0;
786 }
787 
788 /*
789  * Function: s390_vary_chpid
790  * Varies the specified chpid online or offline
791  */
792 static int
793 s390_vary_chpid( __u8 chpid, int on)
794 {
795 	char dbf_text[15];
796 	int status, irq, ret;
797 	struct subchannel *sch;
798 
799 	sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
800 	CIO_TRACE_EVENT( 2, dbf_text);
801 
802 	status = get_chp_status(chpid);
803 	if (status < 0) {
804 		printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
805 		return -EINVAL;
806 	}
807 
808 	if (!on && !status) {
809 		printk(KERN_ERR "chpid %x is already offline\n", chpid);
810 		return -EINVAL;
811 	}
812 
813 	set_chp_logically_online(chpid, on);
814 
815 	/*
816 	 * Redo PathVerification on the devices the chpid connects to
817 	 */
818 
819 	bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
820 			 s390_subchannel_vary_chpid_on :
821 			 s390_subchannel_vary_chpid_off);
822 	if (!on)
823 		goto out;
824 	/* Scan for new devices on varied on path. */
825 	for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
826 		struct schib schib;
827 
828 		if (need_rescan)
829 			break;
830 		sch = get_subchannel_by_schid(irq);
831 		if (sch) {
832 			put_device(&sch->dev);
833 			continue;
834 		}
835 		if (stsch(irq, &schib))
836 			/* We're through */
837 			break;
838 		/* Put it on the slow path. */
839 		ret = css_enqueue_subchannel_slow(irq);
840 		if (ret) {
841 			css_clear_subchannel_slow_list();
842 			need_rescan = 1;
843 		}
844 	}
845 out:
846 	if (need_rescan || css_slow_subchannels_exist())
847 		queue_work(slow_path_wq, &slow_path_work);
848 	return 0;
849 }
850 
851 /*
852  * Files for the channel path entries.
853  */
854 static ssize_t
855 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
856 {
857 	struct channel_path *chp = container_of(dev, struct channel_path, dev);
858 
859 	if (!chp)
860 		return 0;
861 	return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
862 		sprintf(buf, "offline\n"));
863 }
864 
865 static ssize_t
866 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
867 {
868 	struct channel_path *cp = container_of(dev, struct channel_path, dev);
869 	char cmd[10];
870 	int num_args;
871 	int error;
872 
873 	num_args = sscanf(buf, "%5s", cmd);
874 	if (!num_args)
875 		return count;
876 
877 	if (!strnicmp(cmd, "on", 2))
878 		error = s390_vary_chpid(cp->id, 1);
879 	else if (!strnicmp(cmd, "off", 3))
880 		error = s390_vary_chpid(cp->id, 0);
881 	else
882 		error = -EINVAL;
883 
884 	return error < 0 ? error : count;
885 
886 }
887 
888 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
889 
890 static ssize_t
891 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
892 {
893 	struct channel_path *chp = container_of(dev, struct channel_path, dev);
894 
895 	if (!chp)
896 		return 0;
897 	return sprintf(buf, "%x\n", chp->desc.desc);
898 }
899 
900 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
901 
902 static struct attribute * chp_attrs[] = {
903 	&dev_attr_status.attr,
904 	&dev_attr_type.attr,
905 	NULL,
906 };
907 
908 static struct attribute_group chp_attr_group = {
909 	.attrs = chp_attrs,
910 };
911 
912 static void
913 chp_release(struct device *dev)
914 {
915 	struct channel_path *cp;
916 
917 	cp = container_of(dev, struct channel_path, dev);
918 	kfree(cp);
919 }
920 
921 static int
922 chsc_determine_channel_path_description(int chpid,
923 					struct channel_path_desc *desc)
924 {
925 	int ccode, ret;
926 
927 	struct {
928 		struct chsc_header request;
929 		u32 : 24;
930 		u32 first_chpid : 8;
931 		u32 : 24;
932 		u32 last_chpid : 8;
933 		u32 zeroes1;
934 		struct chsc_header response;
935 		u32 zeroes2;
936 		struct channel_path_desc desc;
937 	} *scpd_area;
938 
939 	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
940 	if (!scpd_area)
941 		return -ENOMEM;
942 
943 	scpd_area->request = (struct chsc_header) {
944 		.length = 0x0010,
945 		.code   = 0x0002,
946 	};
947 
948 	scpd_area->first_chpid = chpid;
949 	scpd_area->last_chpid = chpid;
950 
951 	ccode = chsc(scpd_area);
952 	if (ccode > 0) {
953 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
954 		goto out;
955 	}
956 
957 	switch (scpd_area->response.code) {
958 	case 0x0001: /* Success. */
959 		memcpy(desc, &scpd_area->desc,
960 		       sizeof(struct channel_path_desc));
961 		ret = 0;
962 		break;
963 	case 0x0003: /* Invalid block. */
964 	case 0x0007: /* Invalid format. */
965 	case 0x0008: /* Other invalid block. */
966 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
967 		ret = -EINVAL;
968 		break;
969 	case 0x0004: /* Command not provided in model. */
970 		CIO_CRW_EVENT(2, "Model does not provide scpd\n");
971 		ret = -EOPNOTSUPP;
972 		break;
973 	default:
974 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
975 			      scpd_area->response.code);
976 		ret = -EIO;
977 	}
978 out:
979 	free_page((unsigned long)scpd_area);
980 	return ret;
981 }
982 
983 /*
984  * Entries for chpids on the system bus.
985  * This replaces /proc/chpids.
986  */
987 static int
988 new_channel_path(int chpid)
989 {
990 	struct channel_path *chp;
991 	int ret;
992 
993 	chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
994 	if (!chp)
995 		return -ENOMEM;
996 	memset(chp, 0, sizeof(struct channel_path));
997 
998 	/* fill in status, etc. */
999 	chp->id = chpid;
1000 	chp->state = 1;
1001 	chp->dev = (struct device) {
1002 		.parent  = &css_bus_device,
1003 		.release = chp_release,
1004 	};
1005 	snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1006 
1007 	/* Obtain channel path description and fill it in. */
1008 	ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1009 	if (ret)
1010 		goto out_free;
1011 
1012 	/* make it known to the system */
1013 	ret = device_register(&chp->dev);
1014 	if (ret) {
1015 		printk(KERN_WARNING "%s: could not register %02x\n",
1016 		       __func__, chpid);
1017 		goto out_free;
1018 	}
1019 	ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1020 	if (ret) {
1021 		device_unregister(&chp->dev);
1022 		goto out_free;
1023 	} else
1024 		chps[chpid] = chp;
1025 	return ret;
1026 out_free:
1027 	kfree(chp);
1028 	return ret;
1029 }
1030 
1031 void *
1032 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1033 {
1034 	struct channel_path *chp;
1035 	struct channel_path_desc *desc;
1036 
1037 	chp = chps[sch->schib.pmcw.chpid[chp_no]];
1038 	if (!chp)
1039 		return NULL;
1040 	desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1041 	if (!desc)
1042 		return NULL;
1043 	memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1044 	return desc;
1045 }
1046 
1047 
1048 static int __init
1049 chsc_alloc_sei_area(void)
1050 {
1051 	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1052 	if (!sei_page)
1053 		printk(KERN_WARNING"Can't allocate page for processing of " \
1054 		       "chsc machine checks!\n");
1055 	return (sei_page ? 0 : -ENOMEM);
1056 }
1057 
1058 subsys_initcall(chsc_alloc_sei_area);
1059 
1060 struct css_general_char css_general_characteristics;
1061 struct css_chsc_char css_chsc_characteristics;
1062 
1063 int __init
1064 chsc_determine_css_characteristics(void)
1065 {
1066 	int result;
1067 	struct {
1068 		struct chsc_header request;
1069 		u32 reserved1;
1070 		u32 reserved2;
1071 		u32 reserved3;
1072 		struct chsc_header response;
1073 		u32 reserved4;
1074 		u32 general_char[510];
1075 		u32 chsc_char[518];
1076 	} *scsc_area;
1077 
1078 	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1079 	if (!scsc_area) {
1080 	        printk(KERN_WARNING"cio: Was not able to determine available" \
1081 		       "CHSCs due to no memory.\n");
1082 		return -ENOMEM;
1083 	}
1084 
1085 	scsc_area->request = (struct chsc_header) {
1086 		.length = 0x0010,
1087 		.code   = 0x0010,
1088 	};
1089 
1090 	result = chsc(scsc_area);
1091 	if (result) {
1092 		printk(KERN_WARNING"cio: Was not able to determine " \
1093 		       "available CHSCs, cc=%i.\n", result);
1094 		result = -EIO;
1095 		goto exit;
1096 	}
1097 
1098 	if (scsc_area->response.code != 1) {
1099 		printk(KERN_WARNING"cio: Was not able to determine " \
1100 		       "available CHSCs.\n");
1101 		result = -EIO;
1102 		goto exit;
1103 	}
1104 	memcpy(&css_general_characteristics, scsc_area->general_char,
1105 	       sizeof(css_general_characteristics));
1106 	memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1107 	       sizeof(css_chsc_characteristics));
1108 exit:
1109 	free_page ((unsigned long) scsc_area);
1110 	return result;
1111 }
1112 
1113 EXPORT_SYMBOL_GPL(css_general_characteristics);
1114 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1115