xref: /linux/drivers/s390/cio/device_fsm.c (revision 2624f124b3b5d550ab2fbef7ee3bc0e1fed09722)
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *			 IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14 
15 #include <asm/ccwdev.h>
16 #include <asm/cio.h>
17 
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 
25 int
26 device_is_online(struct subchannel *sch)
27 {
28 	struct ccw_device *cdev;
29 
30 	if (!sch->dev.driver_data)
31 		return 0;
32 	cdev = sch->dev.driver_data;
33 	return (cdev->private->state == DEV_STATE_ONLINE);
34 }
35 
36 int
37 device_is_disconnected(struct subchannel *sch)
38 {
39 	struct ccw_device *cdev;
40 
41 	if (!sch->dev.driver_data)
42 		return 0;
43 	cdev = sch->dev.driver_data;
44 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
45 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
46 }
47 
48 void
49 device_set_disconnected(struct subchannel *sch)
50 {
51 	struct ccw_device *cdev;
52 
53 	if (!sch->dev.driver_data)
54 		return;
55 	cdev = sch->dev.driver_data;
56 	ccw_device_set_timeout(cdev, 0);
57 	cdev->private->flags.fake_irb = 0;
58 	cdev->private->state = DEV_STATE_DISCONNECTED;
59 }
60 
61 void
62 device_set_waiting(struct subchannel *sch)
63 {
64 	struct ccw_device *cdev;
65 
66 	if (!sch->dev.driver_data)
67 		return;
68 	cdev = sch->dev.driver_data;
69 	ccw_device_set_timeout(cdev, 10*HZ);
70 	cdev->private->state = DEV_STATE_WAIT4IO;
71 }
72 
73 /*
74  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
75  */
76 static void
77 ccw_device_timeout(unsigned long data)
78 {
79 	struct ccw_device *cdev;
80 
81 	cdev = (struct ccw_device *) data;
82 	spin_lock_irq(cdev->ccwlock);
83 	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
84 	spin_unlock_irq(cdev->ccwlock);
85 }
86 
87 /*
88  * Set timeout
89  */
90 void
91 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
92 {
93 	if (expires == 0) {
94 		del_timer(&cdev->private->timer);
95 		return;
96 	}
97 	if (timer_pending(&cdev->private->timer)) {
98 		if (mod_timer(&cdev->private->timer, jiffies + expires))
99 			return;
100 	}
101 	cdev->private->timer.function = ccw_device_timeout;
102 	cdev->private->timer.data = (unsigned long) cdev;
103 	cdev->private->timer.expires = jiffies + expires;
104 	add_timer(&cdev->private->timer);
105 }
106 
107 /* Kill any pending timers after machine check. */
108 void
109 device_kill_pending_timer(struct subchannel *sch)
110 {
111 	struct ccw_device *cdev;
112 
113 	if (!sch->dev.driver_data)
114 		return;
115 	cdev = sch->dev.driver_data;
116 	ccw_device_set_timeout(cdev, 0);
117 }
118 
119 /*
120  * Cancel running i/o. This is called repeatedly since halt/clear are
121  * asynchronous operations. We do one try with cio_cancel, two tries
122  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
123  * Returns 0 if device now idle, -ENODEV for device not operational and
124  * -EBUSY if an interrupt is expected (either from halt/clear or from a
125  * status pending).
126  */
127 int
128 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
129 {
130 	struct subchannel *sch;
131 	int ret;
132 
133 	sch = to_subchannel(cdev->dev.parent);
134 	ret = stsch(sch->irq, &sch->schib);
135 	if (ret || !sch->schib.pmcw.dnv)
136 		return -ENODEV;
137 	if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
138 		/* Not operational or no activity -> done. */
139 		return 0;
140 	/* Stage 1: cancel io. */
141 	if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
142 	    !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
143 		ret = cio_cancel(sch);
144 		if (ret != -EINVAL)
145 			return ret;
146 		/* cancel io unsuccessful. From now on it is asynchronous. */
147 		cdev->private->iretry = 3;	/* 3 halt retries. */
148 	}
149 	if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
150 		/* Stage 2: halt io. */
151 		if (cdev->private->iretry) {
152 			cdev->private->iretry--;
153 			ret = cio_halt(sch);
154 			return (ret == 0) ? -EBUSY : ret;
155 		}
156 		/* halt io unsuccessful. */
157 		cdev->private->iretry = 255;	/* 255 clear retries. */
158 	}
159 	/* Stage 3: clear io. */
160 	if (cdev->private->iretry) {
161 		cdev->private->iretry--;
162 		ret = cio_clear (sch);
163 		return (ret == 0) ? -EBUSY : ret;
164 	}
165 	panic("Can't stop i/o on subchannel.\n");
166 }
167 
168 static int
169 ccw_device_handle_oper(struct ccw_device *cdev)
170 {
171 	struct subchannel *sch;
172 
173 	sch = to_subchannel(cdev->dev.parent);
174 	cdev->private->flags.recog_done = 1;
175 	/*
176 	 * Check if cu type and device type still match. If
177 	 * not, it is certainly another device and we have to
178 	 * de- and re-register. Also check here for non-matching devno.
179 	 */
180 	if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
181 	    cdev->id.cu_model != cdev->private->senseid.cu_model ||
182 	    cdev->id.dev_type != cdev->private->senseid.dev_type ||
183 	    cdev->id.dev_model != cdev->private->senseid.dev_model ||
184 	    cdev->private->devno != sch->schib.pmcw.dev) {
185 		PREPARE_WORK(&cdev->private->kick_work,
186 			     ccw_device_do_unreg_rereg, (void *)cdev);
187 		queue_work(ccw_device_work, &cdev->private->kick_work);
188 		return 0;
189 	}
190 	cdev->private->flags.donotify = 1;
191 	return 1;
192 }
193 
194 /*
195  * The machine won't give us any notification by machine check if a chpid has
196  * been varied online on the SE so we have to find out by magic (i. e. driving
197  * the channel subsystem to device selection and updating our path masks).
198  */
199 static inline void
200 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
201 {
202 	int mask, i;
203 
204 	for (i = 0; i<8; i++) {
205 		mask = 0x80 >> i;
206 		if (!(sch->lpm & mask))
207 			continue;
208 		if (old_lpm & mask)
209 			continue;
210 		chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
211 	}
212 }
213 
214 /*
215  * Stop device recognition.
216  */
217 static void
218 ccw_device_recog_done(struct ccw_device *cdev, int state)
219 {
220 	struct subchannel *sch;
221 	int notify, old_lpm, same_dev;
222 
223 	sch = to_subchannel(cdev->dev.parent);
224 
225 	ccw_device_set_timeout(cdev, 0);
226 	cio_disable_subchannel(sch);
227 	/*
228 	 * Now that we tried recognition, we have performed device selection
229 	 * through ssch() and the path information is up to date.
230 	 */
231 	old_lpm = sch->lpm;
232 	stsch(sch->irq, &sch->schib);
233 	sch->lpm = sch->schib.pmcw.pim &
234 		sch->schib.pmcw.pam &
235 		sch->schib.pmcw.pom &
236 		sch->opm;
237 	/* Check since device may again have become not operational. */
238 	if (!sch->schib.pmcw.dnv)
239 		state = DEV_STATE_NOT_OPER;
240 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
241 		/* Force reprobe on all chpids. */
242 		old_lpm = 0;
243 	if (sch->lpm != old_lpm)
244 		__recover_lost_chpids(sch, old_lpm);
245 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
246 		if (state == DEV_STATE_NOT_OPER) {
247 			cdev->private->flags.recog_done = 1;
248 			cdev->private->state = DEV_STATE_DISCONNECTED;
249 			return;
250 		}
251 		/* Boxed devices don't need extra treatment. */
252 	}
253 	notify = 0;
254 	same_dev = 0; /* Keep the compiler quiet... */
255 	switch (state) {
256 	case DEV_STATE_NOT_OPER:
257 		CIO_DEBUG(KERN_WARNING, 2,
258 			  "SenseID : unknown device %04x on subchannel %04x\n",
259 			  cdev->private->devno, sch->irq);
260 		break;
261 	case DEV_STATE_OFFLINE:
262 		if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
263 			same_dev = ccw_device_handle_oper(cdev);
264 			notify = 1;
265 		}
266 		/* fill out sense information */
267 		cdev->id = (struct ccw_device_id) {
268 			.cu_type   = cdev->private->senseid.cu_type,
269 			.cu_model  = cdev->private->senseid.cu_model,
270 			.dev_type  = cdev->private->senseid.dev_type,
271 			.dev_model = cdev->private->senseid.dev_model,
272 		};
273 		if (notify) {
274 			cdev->private->state = DEV_STATE_OFFLINE;
275 			if (same_dev) {
276 				/* Get device online again. */
277 				ccw_device_online(cdev);
278 				wake_up(&cdev->private->wait_q);
279 			}
280 			return;
281 		}
282 		/* Issue device info message. */
283 		CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
284 			  "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
285 			  "%04X/%02X\n", cdev->private->devno,
286 			  cdev->id.cu_type, cdev->id.cu_model,
287 			  cdev->id.dev_type, cdev->id.dev_model);
288 		break;
289 	case DEV_STATE_BOXED:
290 		CIO_DEBUG(KERN_WARNING, 2,
291 			  "SenseID : boxed device %04x on subchannel %04x\n",
292 			  cdev->private->devno, sch->irq);
293 		break;
294 	}
295 	cdev->private->state = state;
296 	io_subchannel_recog_done(cdev);
297 	if (state != DEV_STATE_NOT_OPER)
298 		wake_up(&cdev->private->wait_q);
299 }
300 
301 /*
302  * Function called from device_id.c after sense id has completed.
303  */
304 void
305 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
306 {
307 	switch (err) {
308 	case 0:
309 		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
310 		break;
311 	case -ETIME:		/* Sense id stopped by timeout. */
312 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
313 		break;
314 	default:
315 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
316 		break;
317 	}
318 }
319 
320 static void
321 ccw_device_oper_notify(void *data)
322 {
323 	struct ccw_device *cdev;
324 	struct subchannel *sch;
325 	int ret;
326 
327 	cdev = (struct ccw_device *)data;
328 	sch = to_subchannel(cdev->dev.parent);
329 	ret = (sch->driver && sch->driver->notify) ?
330 		sch->driver->notify(&sch->dev, CIO_OPER) : 0;
331 	if (!ret)
332 		/* Driver doesn't want device back. */
333 		ccw_device_do_unreg_rereg((void *)cdev);
334 	else
335 		wake_up(&cdev->private->wait_q);
336 }
337 
338 /*
339  * Finished with online/offline processing.
340  */
341 static void
342 ccw_device_done(struct ccw_device *cdev, int state)
343 {
344 	struct subchannel *sch;
345 
346 	sch = to_subchannel(cdev->dev.parent);
347 
348 	if (state != DEV_STATE_ONLINE)
349 		cio_disable_subchannel(sch);
350 
351 	/* Reset device status. */
352 	memset(&cdev->private->irb, 0, sizeof(struct irb));
353 
354 	cdev->private->state = state;
355 
356 
357 	if (state == DEV_STATE_BOXED)
358 		CIO_DEBUG(KERN_WARNING, 2,
359 			  "Boxed device %04x on subchannel %04x\n",
360 			  cdev->private->devno, sch->irq);
361 
362 	if (cdev->private->flags.donotify) {
363 		cdev->private->flags.donotify = 0;
364 		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
365 			     (void *)cdev);
366 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
367 	}
368 	wake_up(&cdev->private->wait_q);
369 
370 	if (css_init_done && state != DEV_STATE_ONLINE)
371 		put_device (&cdev->dev);
372 }
373 
374 /*
375  * Function called from device_pgid.c after sense path ground has completed.
376  */
377 void
378 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
379 {
380 	struct subchannel *sch;
381 
382 	sch = to_subchannel(cdev->dev.parent);
383 	switch (err) {
384 	case 0:
385 		/* Start Path Group verification. */
386 		sch->vpm = 0;	/* Start with no path groups set. */
387 		cdev->private->state = DEV_STATE_VERIFY;
388 		ccw_device_verify_start(cdev);
389 		break;
390 	case -ETIME:		/* Sense path group id stopped by timeout. */
391 	case -EUSERS:		/* device is reserved for someone else. */
392 		ccw_device_done(cdev, DEV_STATE_BOXED);
393 		break;
394 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
395 		cdev->private->options.pgroup = 0;
396 		ccw_device_done(cdev, DEV_STATE_ONLINE);
397 		break;
398 	default:
399 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
400 		break;
401 	}
402 }
403 
404 /*
405  * Start device recognition.
406  */
407 int
408 ccw_device_recognition(struct ccw_device *cdev)
409 {
410 	struct subchannel *sch;
411 	int ret;
412 
413 	if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
414 	    (cdev->private->state != DEV_STATE_BOXED))
415 		return -EINVAL;
416 	sch = to_subchannel(cdev->dev.parent);
417 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
418 	if (ret != 0)
419 		/* Couldn't enable the subchannel for i/o. Sick device. */
420 		return ret;
421 
422 	/* After 60s the device recognition is considered to have failed. */
423 	ccw_device_set_timeout(cdev, 60*HZ);
424 
425 	/*
426 	 * We used to start here with a sense pgid to find out whether a device
427 	 * is locked by someone else. Unfortunately, the sense pgid command
428 	 * code has other meanings on devices predating the path grouping
429 	 * algorithm, so we start with sense id and box the device after an
430 	 * timeout (or if sense pgid during path verification detects the device
431 	 * is locked, as may happen on newer devices).
432 	 */
433 	cdev->private->flags.recog_done = 0;
434 	cdev->private->state = DEV_STATE_SENSE_ID;
435 	ccw_device_sense_id_start(cdev);
436 	return 0;
437 }
438 
439 /*
440  * Handle timeout in device recognition.
441  */
442 static void
443 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
444 {
445 	int ret;
446 
447 	ret = ccw_device_cancel_halt_clear(cdev);
448 	switch (ret) {
449 	case 0:
450 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
451 		break;
452 	case -ENODEV:
453 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
454 		break;
455 	default:
456 		ccw_device_set_timeout(cdev, 3*HZ);
457 	}
458 }
459 
460 
461 static void
462 ccw_device_nopath_notify(void *data)
463 {
464 	struct ccw_device *cdev;
465 	struct subchannel *sch;
466 	int ret;
467 
468 	cdev = (struct ccw_device *)data;
469 	sch = to_subchannel(cdev->dev.parent);
470 	/* Extra sanity. */
471 	if (sch->lpm)
472 		return;
473 	ret = (sch->driver && sch->driver->notify) ?
474 		sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
475 	if (!ret) {
476 		if (get_device(&sch->dev)) {
477 			/* Driver doesn't want to keep device. */
478 			cio_disable_subchannel(sch);
479 			if (get_device(&cdev->dev)) {
480 				PREPARE_WORK(&cdev->private->kick_work,
481 					     ccw_device_call_sch_unregister,
482 					     (void *)cdev);
483 				queue_work(ccw_device_work,
484 					   &cdev->private->kick_work);
485 			} else
486 				put_device(&sch->dev);
487 		}
488 	} else {
489 		cio_disable_subchannel(sch);
490 		ccw_device_set_timeout(cdev, 0);
491 		cdev->private->flags.fake_irb = 0;
492 		cdev->private->state = DEV_STATE_DISCONNECTED;
493 		wake_up(&cdev->private->wait_q);
494 	}
495 }
496 
497 void
498 ccw_device_verify_done(struct ccw_device *cdev, int err)
499 {
500 	cdev->private->flags.doverify = 0;
501 	switch (err) {
502 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
503 		cdev->private->options.pgroup = 0;
504 	case 0:
505 		ccw_device_done(cdev, DEV_STATE_ONLINE);
506 		/* Deliver fake irb to device driver, if needed. */
507 		if (cdev->private->flags.fake_irb) {
508 			memset(&cdev->private->irb, 0, sizeof(struct irb));
509 			cdev->private->irb.scsw = (struct scsw) {
510 				.cc = 1,
511 				.fctl = SCSW_FCTL_START_FUNC,
512 				.actl = SCSW_ACTL_START_PEND,
513 				.stctl = SCSW_STCTL_STATUS_PEND,
514 			};
515 			cdev->private->flags.fake_irb = 0;
516 			if (cdev->handler)
517 				cdev->handler(cdev, cdev->private->intparm,
518 					      &cdev->private->irb);
519 			memset(&cdev->private->irb, 0, sizeof(struct irb));
520 		}
521 		break;
522 	case -ETIME:
523 		ccw_device_done(cdev, DEV_STATE_BOXED);
524 		break;
525 	default:
526 		PREPARE_WORK(&cdev->private->kick_work,
527 			     ccw_device_nopath_notify, (void *)cdev);
528 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
529 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
530 		break;
531 	}
532 }
533 
534 /*
535  * Get device online.
536  */
537 int
538 ccw_device_online(struct ccw_device *cdev)
539 {
540 	struct subchannel *sch;
541 	int ret;
542 
543 	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
544 	    (cdev->private->state != DEV_STATE_BOXED))
545 		return -EINVAL;
546 	sch = to_subchannel(cdev->dev.parent);
547 	if (css_init_done && !get_device(&cdev->dev))
548 		return -ENODEV;
549 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
550 	if (ret != 0) {
551 		/* Couldn't enable the subchannel for i/o. Sick device. */
552 		if (ret == -ENODEV)
553 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
554 		return ret;
555 	}
556 	/* Do we want to do path grouping? */
557 	if (!cdev->private->options.pgroup) {
558 		/* No, set state online immediately. */
559 		ccw_device_done(cdev, DEV_STATE_ONLINE);
560 		return 0;
561 	}
562 	/* Do a SensePGID first. */
563 	cdev->private->state = DEV_STATE_SENSE_PGID;
564 	ccw_device_sense_pgid_start(cdev);
565 	return 0;
566 }
567 
568 void
569 ccw_device_disband_done(struct ccw_device *cdev, int err)
570 {
571 	switch (err) {
572 	case 0:
573 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
574 		break;
575 	case -ETIME:
576 		ccw_device_done(cdev, DEV_STATE_BOXED);
577 		break;
578 	default:
579 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
580 		break;
581 	}
582 }
583 
584 /*
585  * Shutdown device.
586  */
587 int
588 ccw_device_offline(struct ccw_device *cdev)
589 {
590 	struct subchannel *sch;
591 
592 	sch = to_subchannel(cdev->dev.parent);
593 	if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv)
594 		return -ENODEV;
595 	if (cdev->private->state != DEV_STATE_ONLINE) {
596 		if (sch->schib.scsw.actl != 0)
597 			return -EBUSY;
598 		return -EINVAL;
599 	}
600 	if (sch->schib.scsw.actl != 0)
601 		return -EBUSY;
602 	/* Are we doing path grouping? */
603 	if (!cdev->private->options.pgroup) {
604 		/* No, set state offline immediately. */
605 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
606 		return 0;
607 	}
608 	/* Start Set Path Group commands. */
609 	cdev->private->state = DEV_STATE_DISBAND_PGID;
610 	ccw_device_disband_start(cdev);
611 	return 0;
612 }
613 
614 /*
615  * Handle timeout in device online/offline process.
616  */
617 static void
618 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
619 {
620 	int ret;
621 
622 	ret = ccw_device_cancel_halt_clear(cdev);
623 	switch (ret) {
624 	case 0:
625 		ccw_device_done(cdev, DEV_STATE_BOXED);
626 		break;
627 	case -ENODEV:
628 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
629 		break;
630 	default:
631 		ccw_device_set_timeout(cdev, 3*HZ);
632 	}
633 }
634 
635 /*
636  * Handle not oper event in device recognition.
637  */
638 static void
639 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
640 {
641 	ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
642 }
643 
644 /*
645  * Handle not operational event while offline.
646  */
647 static void
648 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
649 {
650 	struct subchannel *sch;
651 
652 	cdev->private->state = DEV_STATE_NOT_OPER;
653 	sch = to_subchannel(cdev->dev.parent);
654 	if (get_device(&cdev->dev)) {
655 		PREPARE_WORK(&cdev->private->kick_work,
656 			     ccw_device_call_sch_unregister, (void *)cdev);
657 		queue_work(ccw_device_work, &cdev->private->kick_work);
658 	}
659 	wake_up(&cdev->private->wait_q);
660 }
661 
662 /*
663  * Handle not operational event while online.
664  */
665 static void
666 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
667 {
668 	struct subchannel *sch;
669 
670 	sch = to_subchannel(cdev->dev.parent);
671 	if (sch->driver->notify &&
672 	    sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
673 			ccw_device_set_timeout(cdev, 0);
674 			cdev->private->flags.fake_irb = 0;
675 			cdev->private->state = DEV_STATE_DISCONNECTED;
676 			wake_up(&cdev->private->wait_q);
677 			return;
678 	}
679 	cdev->private->state = DEV_STATE_NOT_OPER;
680 	cio_disable_subchannel(sch);
681 	if (sch->schib.scsw.actl != 0) {
682 		// FIXME: not-oper indication to device driver ?
683 		ccw_device_call_handler(cdev);
684 	}
685 	if (get_device(&cdev->dev)) {
686 		PREPARE_WORK(&cdev->private->kick_work,
687 			     ccw_device_call_sch_unregister, (void *)cdev);
688 		queue_work(ccw_device_work, &cdev->private->kick_work);
689 	}
690 	wake_up(&cdev->private->wait_q);
691 }
692 
693 /*
694  * Handle path verification event.
695  */
696 static void
697 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
698 {
699 	struct subchannel *sch;
700 
701 	if (!cdev->private->options.pgroup)
702 		return;
703 	if (cdev->private->state == DEV_STATE_W4SENSE) {
704 		cdev->private->flags.doverify = 1;
705 		return;
706 	}
707 	sch = to_subchannel(cdev->dev.parent);
708 	/*
709 	 * Since we might not just be coming from an interrupt from the
710 	 * subchannel we have to update the schib.
711 	 */
712 	stsch(sch->irq, &sch->schib);
713 
714 	if (sch->schib.scsw.actl != 0 ||
715 	    (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
716 		/*
717 		 * No final status yet or final status not yet delivered
718 		 * to the device driver. Can't do path verfication now,
719 		 * delay until final status was delivered.
720 		 */
721 		cdev->private->flags.doverify = 1;
722 		return;
723 	}
724 	/* Device is idle, we can do the path verification. */
725 	cdev->private->state = DEV_STATE_VERIFY;
726 	ccw_device_verify_start(cdev);
727 }
728 
729 /*
730  * Got an interrupt for a normal io (state online).
731  */
732 static void
733 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
734 {
735 	struct irb *irb;
736 
737 	irb = (struct irb *) __LC_IRB;
738 	/* Check for unsolicited interrupt. */
739 	if ((irb->scsw.stctl ==
740 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
741 	    && (!irb->scsw.cc)) {
742 		if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
743 		    !irb->esw.esw0.erw.cons) {
744 			/* Unit check but no sense data. Need basic sense. */
745 			if (ccw_device_do_sense(cdev, irb) != 0)
746 				goto call_handler_unsol;
747 			memcpy(irb, &cdev->private->irb, sizeof(struct irb));
748 			cdev->private->state = DEV_STATE_W4SENSE;
749 			cdev->private->intparm = 0;
750 			return;
751 		}
752 call_handler_unsol:
753 		if (cdev->handler)
754 			cdev->handler (cdev, 0, irb);
755 		return;
756 	}
757 	/* Accumulate status and find out if a basic sense is needed. */
758 	ccw_device_accumulate_irb(cdev, irb);
759 	if (cdev->private->flags.dosense) {
760 		if (ccw_device_do_sense(cdev, irb) == 0) {
761 			cdev->private->state = DEV_STATE_W4SENSE;
762 		}
763 		return;
764 	}
765 	/* Call the handler. */
766 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
767 		/* Start delayed path verification. */
768 		ccw_device_online_verify(cdev, 0);
769 }
770 
771 /*
772  * Got an timeout in online state.
773  */
774 static void
775 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
776 {
777 	int ret;
778 
779 	ccw_device_set_timeout(cdev, 0);
780 	ret = ccw_device_cancel_halt_clear(cdev);
781 	if (ret == -EBUSY) {
782 		ccw_device_set_timeout(cdev, 3*HZ);
783 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
784 		return;
785 	}
786 	if (ret == -ENODEV) {
787 		struct subchannel *sch;
788 
789 		sch = to_subchannel(cdev->dev.parent);
790 		if (!sch->lpm) {
791 			PREPARE_WORK(&cdev->private->kick_work,
792 				     ccw_device_nopath_notify, (void *)cdev);
793 			queue_work(ccw_device_notify_work,
794 				   &cdev->private->kick_work);
795 		} else
796 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
797 	} else if (cdev->handler)
798 		cdev->handler(cdev, cdev->private->intparm,
799 			      ERR_PTR(-ETIMEDOUT));
800 }
801 
802 /*
803  * Got an interrupt for a basic sense.
804  */
805 void
806 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
807 {
808 	struct irb *irb;
809 
810 	irb = (struct irb *) __LC_IRB;
811 	/* Check for unsolicited interrupt. */
812 	if (irb->scsw.stctl ==
813 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
814 		if (irb->scsw.cc == 1)
815 			/* Basic sense hasn't started. Try again. */
816 			ccw_device_do_sense(cdev, irb);
817 		else {
818 			printk("Huh? %s(%s): unsolicited interrupt...\n",
819 			       __FUNCTION__, cdev->dev.bus_id);
820 			if (cdev->handler)
821 				cdev->handler (cdev, 0, irb);
822 		}
823 		return;
824 	}
825 	/* Add basic sense info to irb. */
826 	ccw_device_accumulate_basic_sense(cdev, irb);
827 	if (cdev->private->flags.dosense) {
828 		/* Another basic sense is needed. */
829 		ccw_device_do_sense(cdev, irb);
830 		return;
831 	}
832 	cdev->private->state = DEV_STATE_ONLINE;
833 	/* Call the handler. */
834 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
835 		/* Start delayed path verification. */
836 		ccw_device_online_verify(cdev, 0);
837 }
838 
839 static void
840 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
841 {
842 	struct irb *irb;
843 
844 	irb = (struct irb *) __LC_IRB;
845 	/* Accumulate status. We don't do basic sense. */
846 	ccw_device_accumulate_irb(cdev, irb);
847 	/* Try to start delayed device verification. */
848 	ccw_device_online_verify(cdev, 0);
849 	/* Note: Don't call handler for cio initiated clear! */
850 }
851 
852 static void
853 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
854 {
855 	struct subchannel *sch;
856 
857 	sch = to_subchannel(cdev->dev.parent);
858 	ccw_device_set_timeout(cdev, 0);
859 	/* OK, i/o is dead now. Call interrupt handler. */
860 	cdev->private->state = DEV_STATE_ONLINE;
861 	if (cdev->handler)
862 		cdev->handler(cdev, cdev->private->intparm,
863 			      ERR_PTR(-ETIMEDOUT));
864 	if (!sch->lpm) {
865 		PREPARE_WORK(&cdev->private->kick_work,
866 			     ccw_device_nopath_notify, (void *)cdev);
867 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
868 	} else if (cdev->private->flags.doverify)
869 		/* Start delayed path verification. */
870 		ccw_device_online_verify(cdev, 0);
871 }
872 
873 static void
874 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
875 {
876 	int ret;
877 
878 	ret = ccw_device_cancel_halt_clear(cdev);
879 	if (ret == -EBUSY) {
880 		ccw_device_set_timeout(cdev, 3*HZ);
881 		return;
882 	}
883 	if (ret == -ENODEV) {
884 		struct subchannel *sch;
885 
886 		sch = to_subchannel(cdev->dev.parent);
887 		if (!sch->lpm) {
888 			PREPARE_WORK(&cdev->private->kick_work,
889 				     ccw_device_nopath_notify, (void *)cdev);
890 			queue_work(ccw_device_notify_work,
891 				   &cdev->private->kick_work);
892 		} else
893 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
894 		return;
895 	}
896 	//FIXME: Can we get here?
897 	cdev->private->state = DEV_STATE_ONLINE;
898 	if (cdev->handler)
899 		cdev->handler(cdev, cdev->private->intparm,
900 			      ERR_PTR(-ETIMEDOUT));
901 }
902 
903 static void
904 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
905 {
906 	struct irb *irb;
907 	struct subchannel *sch;
908 
909 	irb = (struct irb *) __LC_IRB;
910 	/*
911 	 * Accumulate status and find out if a basic sense is needed.
912 	 * This is fine since we have already adapted the lpm.
913 	 */
914 	ccw_device_accumulate_irb(cdev, irb);
915 	if (cdev->private->flags.dosense) {
916 		if (ccw_device_do_sense(cdev, irb) == 0) {
917 			cdev->private->state = DEV_STATE_W4SENSE;
918 		}
919 		return;
920 	}
921 
922 	/* Iff device is idle, reset timeout. */
923 	sch = to_subchannel(cdev->dev.parent);
924 	if (!stsch(sch->irq, &sch->schib))
925 		if (sch->schib.scsw.actl == 0)
926 			ccw_device_set_timeout(cdev, 0);
927 	/* Call the handler. */
928 	ccw_device_call_handler(cdev);
929 	if (!sch->lpm) {
930 		PREPARE_WORK(&cdev->private->kick_work,
931 			     ccw_device_nopath_notify, (void *)cdev);
932 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
933 	} else if (cdev->private->flags.doverify)
934 		ccw_device_online_verify(cdev, 0);
935 }
936 
937 static void
938 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
939 {
940 	int ret;
941 	struct subchannel *sch;
942 
943 	sch = to_subchannel(cdev->dev.parent);
944 	ccw_device_set_timeout(cdev, 0);
945 	ret = ccw_device_cancel_halt_clear(cdev);
946 	if (ret == -EBUSY) {
947 		ccw_device_set_timeout(cdev, 3*HZ);
948 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
949 		return;
950 	}
951 	if (ret == -ENODEV) {
952 		if (!sch->lpm) {
953 			PREPARE_WORK(&cdev->private->kick_work,
954 				     ccw_device_nopath_notify, (void *)cdev);
955 			queue_work(ccw_device_notify_work,
956 				   &cdev->private->kick_work);
957 		} else
958 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
959 		return;
960 	}
961 	if (cdev->handler)
962 		cdev->handler(cdev, cdev->private->intparm,
963 			      ERR_PTR(-ETIMEDOUT));
964 	if (!sch->lpm) {
965 		PREPARE_WORK(&cdev->private->kick_work,
966 			     ccw_device_nopath_notify, (void *)cdev);
967 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
968 	} else if (cdev->private->flags.doverify)
969 		/* Start delayed path verification. */
970 		ccw_device_online_verify(cdev, 0);
971 }
972 
973 static void
974 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
975 {
976 	/* When the I/O has terminated, we have to start verification. */
977 	if (cdev->private->options.pgroup)
978 		cdev->private->flags.doverify = 1;
979 }
980 
981 static void
982 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
983 {
984 	struct irb *irb;
985 
986 	switch (dev_event) {
987 	case DEV_EVENT_INTERRUPT:
988 		irb = (struct irb *) __LC_IRB;
989 		/* Check for unsolicited interrupt. */
990 		if ((irb->scsw.stctl ==
991 		     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
992 		    (!irb->scsw.cc))
993 			/* FIXME: we should restart stlck here, but this
994 			 * is extremely unlikely ... */
995 			goto out_wakeup;
996 
997 		ccw_device_accumulate_irb(cdev, irb);
998 		/* We don't care about basic sense etc. */
999 		break;
1000 	default: /* timeout */
1001 		break;
1002 	}
1003 out_wakeup:
1004 	wake_up(&cdev->private->wait_q);
1005 }
1006 
1007 static void
1008 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1009 {
1010 	struct subchannel *sch;
1011 
1012 	sch = to_subchannel(cdev->dev.parent);
1013 	if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1014 		/* Couldn't enable the subchannel for i/o. Sick device. */
1015 		return;
1016 
1017 	/* After 60s the device recognition is considered to have failed. */
1018 	ccw_device_set_timeout(cdev, 60*HZ);
1019 
1020 	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1021 	ccw_device_sense_id_start(cdev);
1022 }
1023 
1024 void
1025 device_trigger_reprobe(struct subchannel *sch)
1026 {
1027 	struct ccw_device *cdev;
1028 
1029 	if (!sch->dev.driver_data)
1030 		return;
1031 	cdev = sch->dev.driver_data;
1032 	if (cdev->private->state != DEV_STATE_DISCONNECTED)
1033 		return;
1034 
1035 	/* Update some values. */
1036 	if (stsch(sch->irq, &sch->schib))
1037 		return;
1038 
1039 	/*
1040 	 * The pim, pam, pom values may not be accurate, but they are the best
1041 	 * we have before performing device selection :/
1042 	 */
1043 	sch->lpm = sch->schib.pmcw.pim &
1044 		sch->schib.pmcw.pam &
1045 		sch->schib.pmcw.pom &
1046 		sch->opm;
1047 	/* Re-set some bits in the pmcw that were lost. */
1048 	sch->schib.pmcw.isc = 3;
1049 	sch->schib.pmcw.csense = 1;
1050 	sch->schib.pmcw.ena = 0;
1051 	if ((sch->lpm & (sch->lpm - 1)) != 0)
1052 		sch->schib.pmcw.mp = 1;
1053 	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1054 	/* We should also udate ssd info, but this has to wait. */
1055 	ccw_device_start_id(cdev, 0);
1056 }
1057 
1058 static void
1059 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1060 {
1061 	struct subchannel *sch;
1062 
1063 	sch = to_subchannel(cdev->dev.parent);
1064 	/*
1065 	 * An interrupt in state offline means a previous disable was not
1066 	 * successful. Try again.
1067 	 */
1068 	cio_disable_subchannel(sch);
1069 }
1070 
1071 static void
1072 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1073 {
1074 	retry_set_schib(cdev);
1075 	cdev->private->state = DEV_STATE_ONLINE;
1076 	dev_fsm_event(cdev, dev_event);
1077 }
1078 
1079 
1080 static void
1081 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1082 {
1083 	ccw_device_set_timeout(cdev, 0);
1084 	if (dev_event == DEV_EVENT_NOTOPER)
1085 		cdev->private->state = DEV_STATE_NOT_OPER;
1086 	else
1087 		cdev->private->state = DEV_STATE_OFFLINE;
1088 	wake_up(&cdev->private->wait_q);
1089 }
1090 
1091 static void
1092 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1093 {
1094 	int ret;
1095 
1096 	ret = ccw_device_cancel_halt_clear(cdev);
1097 	switch (ret) {
1098 	case 0:
1099 		cdev->private->state = DEV_STATE_OFFLINE;
1100 		wake_up(&cdev->private->wait_q);
1101 		break;
1102 	case -ENODEV:
1103 		cdev->private->state = DEV_STATE_NOT_OPER;
1104 		wake_up(&cdev->private->wait_q);
1105 		break;
1106 	default:
1107 		ccw_device_set_timeout(cdev, HZ/10);
1108 	}
1109 }
1110 
1111 /*
1112  * No operation action. This is used e.g. to ignore a timeout event in
1113  * state offline.
1114  */
1115 static void
1116 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1117 {
1118 }
1119 
1120 /*
1121  * Bug operation action.
1122  */
1123 static void
1124 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1125 {
1126 	printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1127 	       cdev->private->state, dev_event);
1128 	BUG();
1129 }
1130 
1131 /*
1132  * device statemachine
1133  */
1134 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1135 	[DEV_STATE_NOT_OPER] = {
1136 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1137 		[DEV_EVENT_INTERRUPT]	= ccw_device_bug,
1138 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1139 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1140 	},
1141 	[DEV_STATE_SENSE_PGID] = {
1142 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1143 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_pgid_irq,
1144 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1145 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1146 	},
1147 	[DEV_STATE_SENSE_ID] = {
1148 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1149 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1150 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1151 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1152 	},
1153 	[DEV_STATE_OFFLINE] = {
1154 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1155 		[DEV_EVENT_INTERRUPT]	= ccw_device_offline_irq,
1156 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1157 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1158 	},
1159 	[DEV_STATE_VERIFY] = {
1160 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1161 		[DEV_EVENT_INTERRUPT]	= ccw_device_verify_irq,
1162 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1163 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1164 	},
1165 	[DEV_STATE_ONLINE] = {
1166 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1167 		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1168 		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1169 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1170 	},
1171 	[DEV_STATE_W4SENSE] = {
1172 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1173 		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1174 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1175 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1176 	},
1177 	[DEV_STATE_DISBAND_PGID] = {
1178 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1179 		[DEV_EVENT_INTERRUPT]	= ccw_device_disband_irq,
1180 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1181 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1182 	},
1183 	[DEV_STATE_BOXED] = {
1184 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1185 		[DEV_EVENT_INTERRUPT]	= ccw_device_stlck_done,
1186 		[DEV_EVENT_TIMEOUT]	= ccw_device_stlck_done,
1187 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1188 	},
1189 	/* states to wait for i/o completion before doing something */
1190 	[DEV_STATE_CLEAR_VERIFY] = {
1191 		[DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1192 		[DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1193 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1194 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1195 	},
1196 	[DEV_STATE_TIMEOUT_KILL] = {
1197 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1198 		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1199 		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1200 		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1201 	},
1202 	[DEV_STATE_WAIT4IO] = {
1203 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1204 		[DEV_EVENT_INTERRUPT]	= ccw_device_wait4io_irq,
1205 		[DEV_EVENT_TIMEOUT]	= ccw_device_wait4io_timeout,
1206 		[DEV_EVENT_VERIFY]	= ccw_device_wait4io_verify,
1207 	},
1208 	[DEV_STATE_QUIESCE] = {
1209 		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1210 		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1211 		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1212 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1213 	},
1214 	/* special states for devices gone not operational */
1215 	[DEV_STATE_DISCONNECTED] = {
1216 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1217 		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1218 		[DEV_EVENT_TIMEOUT]	= ccw_device_bug,
1219 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1220 	},
1221 	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1222 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1223 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1224 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1225 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1226 	},
1227 	[DEV_STATE_CMFCHANGE] = {
1228 		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1229 		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1230 		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1231 		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1232 	},
1233 };
1234 
1235 /*
1236  * io_subchannel_irq is called for "real" interrupts or for status
1237  * pending conditions on msch.
1238  */
1239 void
1240 io_subchannel_irq (struct device *pdev)
1241 {
1242 	struct ccw_device *cdev;
1243 
1244 	cdev = to_subchannel(pdev)->dev.driver_data;
1245 
1246 	CIO_TRACE_EVENT (3, "IRQ");
1247 	CIO_TRACE_EVENT (3, pdev->bus_id);
1248 	if (cdev)
1249 		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1250 }
1251 
1252 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1253