xref: /linux/drivers/s390/cio/device_fsm.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *			 IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14 
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17 
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25 
26 int
27 device_is_online(struct subchannel *sch)
28 {
29 	struct ccw_device *cdev;
30 
31 	if (!sch->dev.driver_data)
32 		return 0;
33 	cdev = sch->dev.driver_data;
34 	return (cdev->private->state == DEV_STATE_ONLINE);
35 }
36 
37 int
38 device_is_disconnected(struct subchannel *sch)
39 {
40 	struct ccw_device *cdev;
41 
42 	if (!sch->dev.driver_data)
43 		return 0;
44 	cdev = sch->dev.driver_data;
45 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
46 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
47 }
48 
49 void
50 device_set_disconnected(struct subchannel *sch)
51 {
52 	struct ccw_device *cdev;
53 
54 	if (!sch->dev.driver_data)
55 		return;
56 	cdev = sch->dev.driver_data;
57 	ccw_device_set_timeout(cdev, 0);
58 	cdev->private->flags.fake_irb = 0;
59 	cdev->private->state = DEV_STATE_DISCONNECTED;
60 }
61 
62 void
63 device_set_waiting(struct subchannel *sch)
64 {
65 	struct ccw_device *cdev;
66 
67 	if (!sch->dev.driver_data)
68 		return;
69 	cdev = sch->dev.driver_data;
70 	ccw_device_set_timeout(cdev, 10*HZ);
71 	cdev->private->state = DEV_STATE_WAIT4IO;
72 }
73 
74 /*
75  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
76  */
77 static void
78 ccw_device_timeout(unsigned long data)
79 {
80 	struct ccw_device *cdev;
81 
82 	cdev = (struct ccw_device *) data;
83 	spin_lock_irq(cdev->ccwlock);
84 	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
85 	spin_unlock_irq(cdev->ccwlock);
86 }
87 
88 /*
89  * Set timeout
90  */
91 void
92 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
93 {
94 	if (expires == 0) {
95 		del_timer(&cdev->private->timer);
96 		return;
97 	}
98 	if (timer_pending(&cdev->private->timer)) {
99 		if (mod_timer(&cdev->private->timer, jiffies + expires))
100 			return;
101 	}
102 	cdev->private->timer.function = ccw_device_timeout;
103 	cdev->private->timer.data = (unsigned long) cdev;
104 	cdev->private->timer.expires = jiffies + expires;
105 	add_timer(&cdev->private->timer);
106 }
107 
108 /* Kill any pending timers after machine check. */
109 void
110 device_kill_pending_timer(struct subchannel *sch)
111 {
112 	struct ccw_device *cdev;
113 
114 	if (!sch->dev.driver_data)
115 		return;
116 	cdev = sch->dev.driver_data;
117 	ccw_device_set_timeout(cdev, 0);
118 }
119 
120 /*
121  * Cancel running i/o. This is called repeatedly since halt/clear are
122  * asynchronous operations. We do one try with cio_cancel, two tries
123  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
124  * Returns 0 if device now idle, -ENODEV for device not operational and
125  * -EBUSY if an interrupt is expected (either from halt/clear or from a
126  * status pending).
127  */
128 int
129 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
130 {
131 	struct subchannel *sch;
132 	int ret;
133 
134 	sch = to_subchannel(cdev->dev.parent);
135 	ret = stsch(sch->irq, &sch->schib);
136 	if (ret || !sch->schib.pmcw.dnv)
137 		return -ENODEV;
138 	if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
139 		/* Not operational or no activity -> done. */
140 		return 0;
141 	/* Stage 1: cancel io. */
142 	if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
143 	    !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
144 		ret = cio_cancel(sch);
145 		if (ret != -EINVAL)
146 			return ret;
147 		/* cancel io unsuccessful. From now on it is asynchronous. */
148 		cdev->private->iretry = 3;	/* 3 halt retries. */
149 	}
150 	if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
151 		/* Stage 2: halt io. */
152 		if (cdev->private->iretry) {
153 			cdev->private->iretry--;
154 			ret = cio_halt(sch);
155 			return (ret == 0) ? -EBUSY : ret;
156 		}
157 		/* halt io unsuccessful. */
158 		cdev->private->iretry = 255;	/* 255 clear retries. */
159 	}
160 	/* Stage 3: clear io. */
161 	if (cdev->private->iretry) {
162 		cdev->private->iretry--;
163 		ret = cio_clear (sch);
164 		return (ret == 0) ? -EBUSY : ret;
165 	}
166 	panic("Can't stop i/o on subchannel.\n");
167 }
168 
169 static int
170 ccw_device_handle_oper(struct ccw_device *cdev)
171 {
172 	struct subchannel *sch;
173 
174 	sch = to_subchannel(cdev->dev.parent);
175 	cdev->private->flags.recog_done = 1;
176 	/*
177 	 * Check if cu type and device type still match. If
178 	 * not, it is certainly another device and we have to
179 	 * de- and re-register. Also check here for non-matching devno.
180 	 */
181 	if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
182 	    cdev->id.cu_model != cdev->private->senseid.cu_model ||
183 	    cdev->id.dev_type != cdev->private->senseid.dev_type ||
184 	    cdev->id.dev_model != cdev->private->senseid.dev_model ||
185 	    cdev->private->devno != sch->schib.pmcw.dev) {
186 		PREPARE_WORK(&cdev->private->kick_work,
187 			     ccw_device_do_unreg_rereg, (void *)cdev);
188 		queue_work(ccw_device_work, &cdev->private->kick_work);
189 		return 0;
190 	}
191 	cdev->private->flags.donotify = 1;
192 	return 1;
193 }
194 
195 /*
196  * The machine won't give us any notification by machine check if a chpid has
197  * been varied online on the SE so we have to find out by magic (i. e. driving
198  * the channel subsystem to device selection and updating our path masks).
199  */
200 static inline void
201 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
202 {
203 	int mask, i;
204 
205 	for (i = 0; i<8; i++) {
206 		mask = 0x80 >> i;
207 		if (!(sch->lpm & mask))
208 			continue;
209 		if (old_lpm & mask)
210 			continue;
211 		chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
212 	}
213 }
214 
215 /*
216  * Stop device recognition.
217  */
218 static void
219 ccw_device_recog_done(struct ccw_device *cdev, int state)
220 {
221 	struct subchannel *sch;
222 	int notify, old_lpm, same_dev;
223 
224 	sch = to_subchannel(cdev->dev.parent);
225 
226 	ccw_device_set_timeout(cdev, 0);
227 	cio_disable_subchannel(sch);
228 	/*
229 	 * Now that we tried recognition, we have performed device selection
230 	 * through ssch() and the path information is up to date.
231 	 */
232 	old_lpm = sch->lpm;
233 	stsch(sch->irq, &sch->schib);
234 	sch->lpm = sch->schib.pmcw.pim &
235 		sch->schib.pmcw.pam &
236 		sch->schib.pmcw.pom &
237 		sch->opm;
238 	/* Check since device may again have become not operational. */
239 	if (!sch->schib.pmcw.dnv)
240 		state = DEV_STATE_NOT_OPER;
241 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
242 		/* Force reprobe on all chpids. */
243 		old_lpm = 0;
244 	if (sch->lpm != old_lpm)
245 		__recover_lost_chpids(sch, old_lpm);
246 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
247 		if (state == DEV_STATE_NOT_OPER) {
248 			cdev->private->flags.recog_done = 1;
249 			cdev->private->state = DEV_STATE_DISCONNECTED;
250 			return;
251 		}
252 		/* Boxed devices don't need extra treatment. */
253 	}
254 	notify = 0;
255 	same_dev = 0; /* Keep the compiler quiet... */
256 	switch (state) {
257 	case DEV_STATE_NOT_OPER:
258 		CIO_DEBUG(KERN_WARNING, 2,
259 			  "SenseID : unknown device %04x on subchannel %04x\n",
260 			  cdev->private->devno, sch->irq);
261 		break;
262 	case DEV_STATE_OFFLINE:
263 		if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
264 			same_dev = ccw_device_handle_oper(cdev);
265 			notify = 1;
266 		}
267 		/* fill out sense information */
268 		cdev->id = (struct ccw_device_id) {
269 			.cu_type   = cdev->private->senseid.cu_type,
270 			.cu_model  = cdev->private->senseid.cu_model,
271 			.dev_type  = cdev->private->senseid.dev_type,
272 			.dev_model = cdev->private->senseid.dev_model,
273 		};
274 		if (notify) {
275 			cdev->private->state = DEV_STATE_OFFLINE;
276 			if (same_dev) {
277 				/* Get device online again. */
278 				ccw_device_online(cdev);
279 				wake_up(&cdev->private->wait_q);
280 			}
281 			return;
282 		}
283 		/* Issue device info message. */
284 		CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
285 			  "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
286 			  "%04X/%02X\n", cdev->private->devno,
287 			  cdev->id.cu_type, cdev->id.cu_model,
288 			  cdev->id.dev_type, cdev->id.dev_model);
289 		break;
290 	case DEV_STATE_BOXED:
291 		CIO_DEBUG(KERN_WARNING, 2,
292 			  "SenseID : boxed device %04x on subchannel %04x\n",
293 			  cdev->private->devno, sch->irq);
294 		break;
295 	}
296 	cdev->private->state = state;
297 	io_subchannel_recog_done(cdev);
298 	if (state != DEV_STATE_NOT_OPER)
299 		wake_up(&cdev->private->wait_q);
300 }
301 
302 /*
303  * Function called from device_id.c after sense id has completed.
304  */
305 void
306 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
307 {
308 	switch (err) {
309 	case 0:
310 		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
311 		break;
312 	case -ETIME:		/* Sense id stopped by timeout. */
313 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
314 		break;
315 	default:
316 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
317 		break;
318 	}
319 }
320 
321 static void
322 ccw_device_oper_notify(void *data)
323 {
324 	struct ccw_device *cdev;
325 	struct subchannel *sch;
326 	int ret;
327 
328 	cdev = (struct ccw_device *)data;
329 	sch = to_subchannel(cdev->dev.parent);
330 	ret = (sch->driver && sch->driver->notify) ?
331 		sch->driver->notify(&sch->dev, CIO_OPER) : 0;
332 	if (!ret)
333 		/* Driver doesn't want device back. */
334 		ccw_device_do_unreg_rereg((void *)cdev);
335 	else
336 		wake_up(&cdev->private->wait_q);
337 }
338 
339 /*
340  * Finished with online/offline processing.
341  */
342 static void
343 ccw_device_done(struct ccw_device *cdev, int state)
344 {
345 	struct subchannel *sch;
346 
347 	sch = to_subchannel(cdev->dev.parent);
348 
349 	if (state != DEV_STATE_ONLINE)
350 		cio_disable_subchannel(sch);
351 
352 	/* Reset device status. */
353 	memset(&cdev->private->irb, 0, sizeof(struct irb));
354 
355 	cdev->private->state = state;
356 
357 
358 	if (state == DEV_STATE_BOXED)
359 		CIO_DEBUG(KERN_WARNING, 2,
360 			  "Boxed device %04x on subchannel %04x\n",
361 			  cdev->private->devno, sch->irq);
362 
363 	if (cdev->private->flags.donotify) {
364 		cdev->private->flags.donotify = 0;
365 		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
366 			     (void *)cdev);
367 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
368 	}
369 	wake_up(&cdev->private->wait_q);
370 
371 	if (css_init_done && state != DEV_STATE_ONLINE)
372 		put_device (&cdev->dev);
373 }
374 
375 /*
376  * Function called from device_pgid.c after sense path ground has completed.
377  */
378 void
379 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
380 {
381 	struct subchannel *sch;
382 
383 	sch = to_subchannel(cdev->dev.parent);
384 	switch (err) {
385 	case 0:
386 		/* Start Path Group verification. */
387 		sch->vpm = 0;	/* Start with no path groups set. */
388 		cdev->private->state = DEV_STATE_VERIFY;
389 		ccw_device_verify_start(cdev);
390 		break;
391 	case -ETIME:		/* Sense path group id stopped by timeout. */
392 	case -EUSERS:		/* device is reserved for someone else. */
393 		ccw_device_done(cdev, DEV_STATE_BOXED);
394 		break;
395 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
396 		cdev->private->options.pgroup = 0;
397 		ccw_device_done(cdev, DEV_STATE_ONLINE);
398 		break;
399 	default:
400 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
401 		break;
402 	}
403 }
404 
405 /*
406  * Start device recognition.
407  */
408 int
409 ccw_device_recognition(struct ccw_device *cdev)
410 {
411 	struct subchannel *sch;
412 	int ret;
413 
414 	if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
415 	    (cdev->private->state != DEV_STATE_BOXED))
416 		return -EINVAL;
417 	sch = to_subchannel(cdev->dev.parent);
418 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
419 	if (ret != 0)
420 		/* Couldn't enable the subchannel for i/o. Sick device. */
421 		return ret;
422 
423 	/* After 60s the device recognition is considered to have failed. */
424 	ccw_device_set_timeout(cdev, 60*HZ);
425 
426 	/*
427 	 * We used to start here with a sense pgid to find out whether a device
428 	 * is locked by someone else. Unfortunately, the sense pgid command
429 	 * code has other meanings on devices predating the path grouping
430 	 * algorithm, so we start with sense id and box the device after an
431 	 * timeout (or if sense pgid during path verification detects the device
432 	 * is locked, as may happen on newer devices).
433 	 */
434 	cdev->private->flags.recog_done = 0;
435 	cdev->private->state = DEV_STATE_SENSE_ID;
436 	ccw_device_sense_id_start(cdev);
437 	return 0;
438 }
439 
440 /*
441  * Handle timeout in device recognition.
442  */
443 static void
444 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
445 {
446 	int ret;
447 
448 	ret = ccw_device_cancel_halt_clear(cdev);
449 	switch (ret) {
450 	case 0:
451 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
452 		break;
453 	case -ENODEV:
454 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
455 		break;
456 	default:
457 		ccw_device_set_timeout(cdev, 3*HZ);
458 	}
459 }
460 
461 
462 static void
463 ccw_device_nopath_notify(void *data)
464 {
465 	struct ccw_device *cdev;
466 	struct subchannel *sch;
467 	int ret;
468 
469 	cdev = (struct ccw_device *)data;
470 	sch = to_subchannel(cdev->dev.parent);
471 	/* Extra sanity. */
472 	if (sch->lpm)
473 		return;
474 	ret = (sch->driver && sch->driver->notify) ?
475 		sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
476 	if (!ret) {
477 		if (get_device(&sch->dev)) {
478 			/* Driver doesn't want to keep device. */
479 			cio_disable_subchannel(sch);
480 			if (get_device(&cdev->dev)) {
481 				PREPARE_WORK(&cdev->private->kick_work,
482 					     ccw_device_call_sch_unregister,
483 					     (void *)cdev);
484 				queue_work(ccw_device_work,
485 					   &cdev->private->kick_work);
486 			} else
487 				put_device(&sch->dev);
488 		}
489 	} else {
490 		cio_disable_subchannel(sch);
491 		ccw_device_set_timeout(cdev, 0);
492 		cdev->private->flags.fake_irb = 0;
493 		cdev->private->state = DEV_STATE_DISCONNECTED;
494 		wake_up(&cdev->private->wait_q);
495 	}
496 }
497 
498 void
499 ccw_device_verify_done(struct ccw_device *cdev, int err)
500 {
501 	cdev->private->flags.doverify = 0;
502 	switch (err) {
503 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
504 		cdev->private->options.pgroup = 0;
505 	case 0:
506 		ccw_device_done(cdev, DEV_STATE_ONLINE);
507 		/* Deliver fake irb to device driver, if needed. */
508 		if (cdev->private->flags.fake_irb) {
509 			memset(&cdev->private->irb, 0, sizeof(struct irb));
510 			cdev->private->irb.scsw = (struct scsw) {
511 				.cc = 1,
512 				.fctl = SCSW_FCTL_START_FUNC,
513 				.actl = SCSW_ACTL_START_PEND,
514 				.stctl = SCSW_STCTL_STATUS_PEND,
515 			};
516 			cdev->private->flags.fake_irb = 0;
517 			if (cdev->handler)
518 				cdev->handler(cdev, cdev->private->intparm,
519 					      &cdev->private->irb);
520 			memset(&cdev->private->irb, 0, sizeof(struct irb));
521 		}
522 		break;
523 	case -ETIME:
524 		ccw_device_done(cdev, DEV_STATE_BOXED);
525 		break;
526 	default:
527 		PREPARE_WORK(&cdev->private->kick_work,
528 			     ccw_device_nopath_notify, (void *)cdev);
529 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
530 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
531 		break;
532 	}
533 }
534 
535 /*
536  * Get device online.
537  */
538 int
539 ccw_device_online(struct ccw_device *cdev)
540 {
541 	struct subchannel *sch;
542 	int ret;
543 
544 	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
545 	    (cdev->private->state != DEV_STATE_BOXED))
546 		return -EINVAL;
547 	sch = to_subchannel(cdev->dev.parent);
548 	if (css_init_done && !get_device(&cdev->dev))
549 		return -ENODEV;
550 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
551 	if (ret != 0) {
552 		/* Couldn't enable the subchannel for i/o. Sick device. */
553 		if (ret == -ENODEV)
554 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
555 		return ret;
556 	}
557 	/* Do we want to do path grouping? */
558 	if (!cdev->private->options.pgroup) {
559 		/* No, set state online immediately. */
560 		ccw_device_done(cdev, DEV_STATE_ONLINE);
561 		return 0;
562 	}
563 	/* Do a SensePGID first. */
564 	cdev->private->state = DEV_STATE_SENSE_PGID;
565 	ccw_device_sense_pgid_start(cdev);
566 	return 0;
567 }
568 
569 void
570 ccw_device_disband_done(struct ccw_device *cdev, int err)
571 {
572 	switch (err) {
573 	case 0:
574 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
575 		break;
576 	case -ETIME:
577 		ccw_device_done(cdev, DEV_STATE_BOXED);
578 		break;
579 	default:
580 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
581 		break;
582 	}
583 }
584 
585 /*
586  * Shutdown device.
587  */
588 int
589 ccw_device_offline(struct ccw_device *cdev)
590 {
591 	struct subchannel *sch;
592 
593 	sch = to_subchannel(cdev->dev.parent);
594 	if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv)
595 		return -ENODEV;
596 	if (cdev->private->state != DEV_STATE_ONLINE) {
597 		if (sch->schib.scsw.actl != 0)
598 			return -EBUSY;
599 		return -EINVAL;
600 	}
601 	if (sch->schib.scsw.actl != 0)
602 		return -EBUSY;
603 	/* Are we doing path grouping? */
604 	if (!cdev->private->options.pgroup) {
605 		/* No, set state offline immediately. */
606 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
607 		return 0;
608 	}
609 	/* Start Set Path Group commands. */
610 	cdev->private->state = DEV_STATE_DISBAND_PGID;
611 	ccw_device_disband_start(cdev);
612 	return 0;
613 }
614 
615 /*
616  * Handle timeout in device online/offline process.
617  */
618 static void
619 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
620 {
621 	int ret;
622 
623 	ret = ccw_device_cancel_halt_clear(cdev);
624 	switch (ret) {
625 	case 0:
626 		ccw_device_done(cdev, DEV_STATE_BOXED);
627 		break;
628 	case -ENODEV:
629 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
630 		break;
631 	default:
632 		ccw_device_set_timeout(cdev, 3*HZ);
633 	}
634 }
635 
636 /*
637  * Handle not oper event in device recognition.
638  */
639 static void
640 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
641 {
642 	ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
643 }
644 
645 /*
646  * Handle not operational event while offline.
647  */
648 static void
649 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
650 {
651 	struct subchannel *sch;
652 
653 	cdev->private->state = DEV_STATE_NOT_OPER;
654 	sch = to_subchannel(cdev->dev.parent);
655 	if (get_device(&cdev->dev)) {
656 		PREPARE_WORK(&cdev->private->kick_work,
657 			     ccw_device_call_sch_unregister, (void *)cdev);
658 		queue_work(ccw_device_work, &cdev->private->kick_work);
659 	}
660 	wake_up(&cdev->private->wait_q);
661 }
662 
663 /*
664  * Handle not operational event while online.
665  */
666 static void
667 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
668 {
669 	struct subchannel *sch;
670 
671 	sch = to_subchannel(cdev->dev.parent);
672 	if (sch->driver->notify &&
673 	    sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
674 			ccw_device_set_timeout(cdev, 0);
675 			cdev->private->flags.fake_irb = 0;
676 			cdev->private->state = DEV_STATE_DISCONNECTED;
677 			wake_up(&cdev->private->wait_q);
678 			return;
679 	}
680 	cdev->private->state = DEV_STATE_NOT_OPER;
681 	cio_disable_subchannel(sch);
682 	if (sch->schib.scsw.actl != 0) {
683 		// FIXME: not-oper indication to device driver ?
684 		ccw_device_call_handler(cdev);
685 	}
686 	if (get_device(&cdev->dev)) {
687 		PREPARE_WORK(&cdev->private->kick_work,
688 			     ccw_device_call_sch_unregister, (void *)cdev);
689 		queue_work(ccw_device_work, &cdev->private->kick_work);
690 	}
691 	wake_up(&cdev->private->wait_q);
692 }
693 
694 /*
695  * Handle path verification event.
696  */
697 static void
698 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
699 {
700 	struct subchannel *sch;
701 
702 	if (!cdev->private->options.pgroup)
703 		return;
704 	if (cdev->private->state == DEV_STATE_W4SENSE) {
705 		cdev->private->flags.doverify = 1;
706 		return;
707 	}
708 	sch = to_subchannel(cdev->dev.parent);
709 	/*
710 	 * Since we might not just be coming from an interrupt from the
711 	 * subchannel we have to update the schib.
712 	 */
713 	stsch(sch->irq, &sch->schib);
714 
715 	if (sch->schib.scsw.actl != 0 ||
716 	    (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
717 		/*
718 		 * No final status yet or final status not yet delivered
719 		 * to the device driver. Can't do path verfication now,
720 		 * delay until final status was delivered.
721 		 */
722 		cdev->private->flags.doverify = 1;
723 		return;
724 	}
725 	/* Device is idle, we can do the path verification. */
726 	cdev->private->state = DEV_STATE_VERIFY;
727 	ccw_device_verify_start(cdev);
728 }
729 
730 /*
731  * Got an interrupt for a normal io (state online).
732  */
733 static void
734 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
735 {
736 	struct irb *irb;
737 
738 	irb = (struct irb *) __LC_IRB;
739 	/* Check for unsolicited interrupt. */
740 	if ((irb->scsw.stctl ==
741 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
742 	    && (!irb->scsw.cc)) {
743 		if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
744 		    !irb->esw.esw0.erw.cons) {
745 			/* Unit check but no sense data. Need basic sense. */
746 			if (ccw_device_do_sense(cdev, irb) != 0)
747 				goto call_handler_unsol;
748 			memcpy(irb, &cdev->private->irb, sizeof(struct irb));
749 			cdev->private->state = DEV_STATE_W4SENSE;
750 			cdev->private->intparm = 0;
751 			return;
752 		}
753 call_handler_unsol:
754 		if (cdev->handler)
755 			cdev->handler (cdev, 0, irb);
756 		return;
757 	}
758 	/* Accumulate status and find out if a basic sense is needed. */
759 	ccw_device_accumulate_irb(cdev, irb);
760 	if (cdev->private->flags.dosense) {
761 		if (ccw_device_do_sense(cdev, irb) == 0) {
762 			cdev->private->state = DEV_STATE_W4SENSE;
763 		}
764 		return;
765 	}
766 	/* Call the handler. */
767 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
768 		/* Start delayed path verification. */
769 		ccw_device_online_verify(cdev, 0);
770 }
771 
772 /*
773  * Got an timeout in online state.
774  */
775 static void
776 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
777 {
778 	int ret;
779 
780 	ccw_device_set_timeout(cdev, 0);
781 	ret = ccw_device_cancel_halt_clear(cdev);
782 	if (ret == -EBUSY) {
783 		ccw_device_set_timeout(cdev, 3*HZ);
784 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
785 		return;
786 	}
787 	if (ret == -ENODEV) {
788 		struct subchannel *sch;
789 
790 		sch = to_subchannel(cdev->dev.parent);
791 		if (!sch->lpm) {
792 			PREPARE_WORK(&cdev->private->kick_work,
793 				     ccw_device_nopath_notify, (void *)cdev);
794 			queue_work(ccw_device_notify_work,
795 				   &cdev->private->kick_work);
796 		} else
797 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
798 	} else if (cdev->handler)
799 		cdev->handler(cdev, cdev->private->intparm,
800 			      ERR_PTR(-ETIMEDOUT));
801 }
802 
803 /*
804  * Got an interrupt for a basic sense.
805  */
806 void
807 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
808 {
809 	struct irb *irb;
810 
811 	irb = (struct irb *) __LC_IRB;
812 	/* Check for unsolicited interrupt. */
813 	if (irb->scsw.stctl ==
814 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
815 		if (irb->scsw.cc == 1)
816 			/* Basic sense hasn't started. Try again. */
817 			ccw_device_do_sense(cdev, irb);
818 		else {
819 			printk("Huh? %s(%s): unsolicited interrupt...\n",
820 			       __FUNCTION__, cdev->dev.bus_id);
821 			if (cdev->handler)
822 				cdev->handler (cdev, 0, irb);
823 		}
824 		return;
825 	}
826 	/* Add basic sense info to irb. */
827 	ccw_device_accumulate_basic_sense(cdev, irb);
828 	if (cdev->private->flags.dosense) {
829 		/* Another basic sense is needed. */
830 		ccw_device_do_sense(cdev, irb);
831 		return;
832 	}
833 	cdev->private->state = DEV_STATE_ONLINE;
834 	/* Call the handler. */
835 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
836 		/* Start delayed path verification. */
837 		ccw_device_online_verify(cdev, 0);
838 }
839 
840 static void
841 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
842 {
843 	struct irb *irb;
844 
845 	irb = (struct irb *) __LC_IRB;
846 	/* Accumulate status. We don't do basic sense. */
847 	ccw_device_accumulate_irb(cdev, irb);
848 	/* Try to start delayed device verification. */
849 	ccw_device_online_verify(cdev, 0);
850 	/* Note: Don't call handler for cio initiated clear! */
851 }
852 
853 static void
854 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
855 {
856 	struct subchannel *sch;
857 
858 	sch = to_subchannel(cdev->dev.parent);
859 	ccw_device_set_timeout(cdev, 0);
860 	/* OK, i/o is dead now. Call interrupt handler. */
861 	cdev->private->state = DEV_STATE_ONLINE;
862 	if (cdev->handler)
863 		cdev->handler(cdev, cdev->private->intparm,
864 			      ERR_PTR(-ETIMEDOUT));
865 	if (!sch->lpm) {
866 		PREPARE_WORK(&cdev->private->kick_work,
867 			     ccw_device_nopath_notify, (void *)cdev);
868 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
869 	} else if (cdev->private->flags.doverify)
870 		/* Start delayed path verification. */
871 		ccw_device_online_verify(cdev, 0);
872 }
873 
874 static void
875 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
876 {
877 	int ret;
878 
879 	ret = ccw_device_cancel_halt_clear(cdev);
880 	if (ret == -EBUSY) {
881 		ccw_device_set_timeout(cdev, 3*HZ);
882 		return;
883 	}
884 	if (ret == -ENODEV) {
885 		struct subchannel *sch;
886 
887 		sch = to_subchannel(cdev->dev.parent);
888 		if (!sch->lpm) {
889 			PREPARE_WORK(&cdev->private->kick_work,
890 				     ccw_device_nopath_notify, (void *)cdev);
891 			queue_work(ccw_device_notify_work,
892 				   &cdev->private->kick_work);
893 		} else
894 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
895 		return;
896 	}
897 	//FIXME: Can we get here?
898 	cdev->private->state = DEV_STATE_ONLINE;
899 	if (cdev->handler)
900 		cdev->handler(cdev, cdev->private->intparm,
901 			      ERR_PTR(-ETIMEDOUT));
902 }
903 
904 static void
905 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
906 {
907 	struct irb *irb;
908 	struct subchannel *sch;
909 
910 	irb = (struct irb *) __LC_IRB;
911 	/*
912 	 * Accumulate status and find out if a basic sense is needed.
913 	 * This is fine since we have already adapted the lpm.
914 	 */
915 	ccw_device_accumulate_irb(cdev, irb);
916 	if (cdev->private->flags.dosense) {
917 		if (ccw_device_do_sense(cdev, irb) == 0) {
918 			cdev->private->state = DEV_STATE_W4SENSE;
919 		}
920 		return;
921 	}
922 
923 	/* Iff device is idle, reset timeout. */
924 	sch = to_subchannel(cdev->dev.parent);
925 	if (!stsch(sch->irq, &sch->schib))
926 		if (sch->schib.scsw.actl == 0)
927 			ccw_device_set_timeout(cdev, 0);
928 	/* Call the handler. */
929 	ccw_device_call_handler(cdev);
930 	if (!sch->lpm) {
931 		PREPARE_WORK(&cdev->private->kick_work,
932 			     ccw_device_nopath_notify, (void *)cdev);
933 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
934 	} else if (cdev->private->flags.doverify)
935 		ccw_device_online_verify(cdev, 0);
936 }
937 
938 static void
939 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
940 {
941 	int ret;
942 	struct subchannel *sch;
943 
944 	sch = to_subchannel(cdev->dev.parent);
945 	ccw_device_set_timeout(cdev, 0);
946 	ret = ccw_device_cancel_halt_clear(cdev);
947 	if (ret == -EBUSY) {
948 		ccw_device_set_timeout(cdev, 3*HZ);
949 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
950 		return;
951 	}
952 	if (ret == -ENODEV) {
953 		if (!sch->lpm) {
954 			PREPARE_WORK(&cdev->private->kick_work,
955 				     ccw_device_nopath_notify, (void *)cdev);
956 			queue_work(ccw_device_notify_work,
957 				   &cdev->private->kick_work);
958 		} else
959 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
960 		return;
961 	}
962 	if (cdev->handler)
963 		cdev->handler(cdev, cdev->private->intparm,
964 			      ERR_PTR(-ETIMEDOUT));
965 	if (!sch->lpm) {
966 		PREPARE_WORK(&cdev->private->kick_work,
967 			     ccw_device_nopath_notify, (void *)cdev);
968 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
969 	} else if (cdev->private->flags.doverify)
970 		/* Start delayed path verification. */
971 		ccw_device_online_verify(cdev, 0);
972 }
973 
974 static void
975 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
976 {
977 	/* When the I/O has terminated, we have to start verification. */
978 	if (cdev->private->options.pgroup)
979 		cdev->private->flags.doverify = 1;
980 }
981 
982 static void
983 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
984 {
985 	struct irb *irb;
986 
987 	switch (dev_event) {
988 	case DEV_EVENT_INTERRUPT:
989 		irb = (struct irb *) __LC_IRB;
990 		/* Check for unsolicited interrupt. */
991 		if ((irb->scsw.stctl ==
992 		     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
993 		    (!irb->scsw.cc))
994 			/* FIXME: we should restart stlck here, but this
995 			 * is extremely unlikely ... */
996 			goto out_wakeup;
997 
998 		ccw_device_accumulate_irb(cdev, irb);
999 		/* We don't care about basic sense etc. */
1000 		break;
1001 	default: /* timeout */
1002 		break;
1003 	}
1004 out_wakeup:
1005 	wake_up(&cdev->private->wait_q);
1006 }
1007 
1008 static void
1009 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1010 {
1011 	struct subchannel *sch;
1012 
1013 	sch = to_subchannel(cdev->dev.parent);
1014 	if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1015 		/* Couldn't enable the subchannel for i/o. Sick device. */
1016 		return;
1017 
1018 	/* After 60s the device recognition is considered to have failed. */
1019 	ccw_device_set_timeout(cdev, 60*HZ);
1020 
1021 	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1022 	ccw_device_sense_id_start(cdev);
1023 }
1024 
1025 void
1026 device_trigger_reprobe(struct subchannel *sch)
1027 {
1028 	struct ccw_device *cdev;
1029 
1030 	if (!sch->dev.driver_data)
1031 		return;
1032 	cdev = sch->dev.driver_data;
1033 	if (cdev->private->state != DEV_STATE_DISCONNECTED)
1034 		return;
1035 
1036 	/* Update some values. */
1037 	if (stsch(sch->irq, &sch->schib))
1038 		return;
1039 
1040 	/*
1041 	 * The pim, pam, pom values may not be accurate, but they are the best
1042 	 * we have before performing device selection :/
1043 	 */
1044 	sch->lpm = sch->schib.pmcw.pim &
1045 		sch->schib.pmcw.pam &
1046 		sch->schib.pmcw.pom &
1047 		sch->opm;
1048 	/* Re-set some bits in the pmcw that were lost. */
1049 	sch->schib.pmcw.isc = 3;
1050 	sch->schib.pmcw.csense = 1;
1051 	sch->schib.pmcw.ena = 0;
1052 	if ((sch->lpm & (sch->lpm - 1)) != 0)
1053 		sch->schib.pmcw.mp = 1;
1054 	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1055 	/* We should also udate ssd info, but this has to wait. */
1056 	ccw_device_start_id(cdev, 0);
1057 }
1058 
1059 static void
1060 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1061 {
1062 	struct subchannel *sch;
1063 
1064 	sch = to_subchannel(cdev->dev.parent);
1065 	/*
1066 	 * An interrupt in state offline means a previous disable was not
1067 	 * successful. Try again.
1068 	 */
1069 	cio_disable_subchannel(sch);
1070 }
1071 
1072 static void
1073 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1074 {
1075 	retry_set_schib(cdev);
1076 	cdev->private->state = DEV_STATE_ONLINE;
1077 	dev_fsm_event(cdev, dev_event);
1078 }
1079 
1080 
1081 static void
1082 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1083 {
1084 	ccw_device_set_timeout(cdev, 0);
1085 	if (dev_event == DEV_EVENT_NOTOPER)
1086 		cdev->private->state = DEV_STATE_NOT_OPER;
1087 	else
1088 		cdev->private->state = DEV_STATE_OFFLINE;
1089 	wake_up(&cdev->private->wait_q);
1090 }
1091 
1092 static void
1093 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1094 {
1095 	int ret;
1096 
1097 	ret = ccw_device_cancel_halt_clear(cdev);
1098 	switch (ret) {
1099 	case 0:
1100 		cdev->private->state = DEV_STATE_OFFLINE;
1101 		wake_up(&cdev->private->wait_q);
1102 		break;
1103 	case -ENODEV:
1104 		cdev->private->state = DEV_STATE_NOT_OPER;
1105 		wake_up(&cdev->private->wait_q);
1106 		break;
1107 	default:
1108 		ccw_device_set_timeout(cdev, HZ/10);
1109 	}
1110 }
1111 
1112 /*
1113  * No operation action. This is used e.g. to ignore a timeout event in
1114  * state offline.
1115  */
1116 static void
1117 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1118 {
1119 }
1120 
1121 /*
1122  * Bug operation action.
1123  */
1124 static void
1125 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1126 {
1127 	printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1128 	       cdev->private->state, dev_event);
1129 	BUG();
1130 }
1131 
1132 /*
1133  * device statemachine
1134  */
1135 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1136 	[DEV_STATE_NOT_OPER] = {
1137 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1138 		[DEV_EVENT_INTERRUPT]	= ccw_device_bug,
1139 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1140 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1141 	},
1142 	[DEV_STATE_SENSE_PGID] = {
1143 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1144 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_pgid_irq,
1145 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1146 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1147 	},
1148 	[DEV_STATE_SENSE_ID] = {
1149 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1150 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1151 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1152 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1153 	},
1154 	[DEV_STATE_OFFLINE] = {
1155 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1156 		[DEV_EVENT_INTERRUPT]	= ccw_device_offline_irq,
1157 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1158 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1159 	},
1160 	[DEV_STATE_VERIFY] = {
1161 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1162 		[DEV_EVENT_INTERRUPT]	= ccw_device_verify_irq,
1163 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1164 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1165 	},
1166 	[DEV_STATE_ONLINE] = {
1167 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1168 		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1169 		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1170 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1171 	},
1172 	[DEV_STATE_W4SENSE] = {
1173 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1174 		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1175 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1176 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1177 	},
1178 	[DEV_STATE_DISBAND_PGID] = {
1179 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1180 		[DEV_EVENT_INTERRUPT]	= ccw_device_disband_irq,
1181 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1182 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1183 	},
1184 	[DEV_STATE_BOXED] = {
1185 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1186 		[DEV_EVENT_INTERRUPT]	= ccw_device_stlck_done,
1187 		[DEV_EVENT_TIMEOUT]	= ccw_device_stlck_done,
1188 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1189 	},
1190 	/* states to wait for i/o completion before doing something */
1191 	[DEV_STATE_CLEAR_VERIFY] = {
1192 		[DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1193 		[DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1194 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1195 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1196 	},
1197 	[DEV_STATE_TIMEOUT_KILL] = {
1198 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1199 		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1200 		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1201 		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1202 	},
1203 	[DEV_STATE_WAIT4IO] = {
1204 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1205 		[DEV_EVENT_INTERRUPT]	= ccw_device_wait4io_irq,
1206 		[DEV_EVENT_TIMEOUT]	= ccw_device_wait4io_timeout,
1207 		[DEV_EVENT_VERIFY]	= ccw_device_wait4io_verify,
1208 	},
1209 	[DEV_STATE_QUIESCE] = {
1210 		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1211 		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1212 		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1213 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1214 	},
1215 	/* special states for devices gone not operational */
1216 	[DEV_STATE_DISCONNECTED] = {
1217 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1218 		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1219 		[DEV_EVENT_TIMEOUT]	= ccw_device_bug,
1220 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1221 	},
1222 	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1223 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1224 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1225 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1226 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1227 	},
1228 	[DEV_STATE_CMFCHANGE] = {
1229 		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1230 		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1231 		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1232 		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1233 	},
1234 };
1235 
1236 /*
1237  * io_subchannel_irq is called for "real" interrupts or for status
1238  * pending conditions on msch.
1239  */
1240 void
1241 io_subchannel_irq (struct device *pdev)
1242 {
1243 	struct ccw_device *cdev;
1244 
1245 	cdev = to_subchannel(pdev)->dev.driver_data;
1246 
1247 	CIO_TRACE_EVENT (3, "IRQ");
1248 	CIO_TRACE_EVENT (3, pdev->bus_id);
1249 	if (cdev)
1250 		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1251 }
1252 
1253 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1254