xref: /linux/drivers/s390/cio/device_fsm.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *			 IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14 #include <linux/jiffies.h>
15 #include <linux/string.h>
16 
17 #include <asm/ccwdev.h>
18 #include <asm/cio.h>
19 
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "css.h"
23 #include "device.h"
24 #include "chsc.h"
25 #include "ioasm.h"
26 
27 int
28 device_is_online(struct subchannel *sch)
29 {
30 	struct ccw_device *cdev;
31 
32 	if (!sch->dev.driver_data)
33 		return 0;
34 	cdev = sch->dev.driver_data;
35 	return (cdev->private->state == DEV_STATE_ONLINE);
36 }
37 
38 int
39 device_is_disconnected(struct subchannel *sch)
40 {
41 	struct ccw_device *cdev;
42 
43 	if (!sch->dev.driver_data)
44 		return 0;
45 	cdev = sch->dev.driver_data;
46 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
47 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
48 }
49 
50 void
51 device_set_disconnected(struct subchannel *sch)
52 {
53 	struct ccw_device *cdev;
54 
55 	if (!sch->dev.driver_data)
56 		return;
57 	cdev = sch->dev.driver_data;
58 	ccw_device_set_timeout(cdev, 0);
59 	cdev->private->flags.fake_irb = 0;
60 	cdev->private->state = DEV_STATE_DISCONNECTED;
61 }
62 
63 void
64 device_set_waiting(struct subchannel *sch)
65 {
66 	struct ccw_device *cdev;
67 
68 	if (!sch->dev.driver_data)
69 		return;
70 	cdev = sch->dev.driver_data;
71 	ccw_device_set_timeout(cdev, 10*HZ);
72 	cdev->private->state = DEV_STATE_WAIT4IO;
73 }
74 
75 /*
76  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
77  */
78 static void
79 ccw_device_timeout(unsigned long data)
80 {
81 	struct ccw_device *cdev;
82 
83 	cdev = (struct ccw_device *) data;
84 	spin_lock_irq(cdev->ccwlock);
85 	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
86 	spin_unlock_irq(cdev->ccwlock);
87 }
88 
89 /*
90  * Set timeout
91  */
92 void
93 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
94 {
95 	if (expires == 0) {
96 		del_timer(&cdev->private->timer);
97 		return;
98 	}
99 	if (timer_pending(&cdev->private->timer)) {
100 		if (mod_timer(&cdev->private->timer, jiffies + expires))
101 			return;
102 	}
103 	cdev->private->timer.function = ccw_device_timeout;
104 	cdev->private->timer.data = (unsigned long) cdev;
105 	cdev->private->timer.expires = jiffies + expires;
106 	add_timer(&cdev->private->timer);
107 }
108 
109 /* Kill any pending timers after machine check. */
110 void
111 device_kill_pending_timer(struct subchannel *sch)
112 {
113 	struct ccw_device *cdev;
114 
115 	if (!sch->dev.driver_data)
116 		return;
117 	cdev = sch->dev.driver_data;
118 	ccw_device_set_timeout(cdev, 0);
119 }
120 
121 /*
122  * Cancel running i/o. This is called repeatedly since halt/clear are
123  * asynchronous operations. We do one try with cio_cancel, two tries
124  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
125  * Returns 0 if device now idle, -ENODEV for device not operational and
126  * -EBUSY if an interrupt is expected (either from halt/clear or from a
127  * status pending).
128  */
129 int
130 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
131 {
132 	struct subchannel *sch;
133 	int ret;
134 
135 	sch = to_subchannel(cdev->dev.parent);
136 	ret = stsch(sch->schid, &sch->schib);
137 	if (ret || !sch->schib.pmcw.dnv)
138 		return -ENODEV;
139 	if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
140 		/* Not operational or no activity -> done. */
141 		return 0;
142 	/* Stage 1: cancel io. */
143 	if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
144 	    !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
145 		ret = cio_cancel(sch);
146 		if (ret != -EINVAL)
147 			return ret;
148 		/* cancel io unsuccessful. From now on it is asynchronous. */
149 		cdev->private->iretry = 3;	/* 3 halt retries. */
150 	}
151 	if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
152 		/* Stage 2: halt io. */
153 		if (cdev->private->iretry) {
154 			cdev->private->iretry--;
155 			ret = cio_halt(sch);
156 			return (ret == 0) ? -EBUSY : ret;
157 		}
158 		/* halt io unsuccessful. */
159 		cdev->private->iretry = 255;	/* 255 clear retries. */
160 	}
161 	/* Stage 3: clear io. */
162 	if (cdev->private->iretry) {
163 		cdev->private->iretry--;
164 		ret = cio_clear (sch);
165 		return (ret == 0) ? -EBUSY : ret;
166 	}
167 	panic("Can't stop i/o on subchannel.\n");
168 }
169 
170 static int
171 ccw_device_handle_oper(struct ccw_device *cdev)
172 {
173 	struct subchannel *sch;
174 
175 	sch = to_subchannel(cdev->dev.parent);
176 	cdev->private->flags.recog_done = 1;
177 	/*
178 	 * Check if cu type and device type still match. If
179 	 * not, it is certainly another device and we have to
180 	 * de- and re-register. Also check here for non-matching devno.
181 	 */
182 	if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
183 	    cdev->id.cu_model != cdev->private->senseid.cu_model ||
184 	    cdev->id.dev_type != cdev->private->senseid.dev_type ||
185 	    cdev->id.dev_model != cdev->private->senseid.dev_model ||
186 	    cdev->private->devno != sch->schib.pmcw.dev) {
187 		PREPARE_WORK(&cdev->private->kick_work,
188 			     ccw_device_do_unreg_rereg, (void *)cdev);
189 		queue_work(ccw_device_work, &cdev->private->kick_work);
190 		return 0;
191 	}
192 	cdev->private->flags.donotify = 1;
193 	return 1;
194 }
195 
196 /*
197  * The machine won't give us any notification by machine check if a chpid has
198  * been varied online on the SE so we have to find out by magic (i. e. driving
199  * the channel subsystem to device selection and updating our path masks).
200  */
201 static inline void
202 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
203 {
204 	int mask, i;
205 
206 	for (i = 0; i<8; i++) {
207 		mask = 0x80 >> i;
208 		if (!(sch->lpm & mask))
209 			continue;
210 		if (old_lpm & mask)
211 			continue;
212 		chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
213 	}
214 }
215 
216 /*
217  * Stop device recognition.
218  */
219 static void
220 ccw_device_recog_done(struct ccw_device *cdev, int state)
221 {
222 	struct subchannel *sch;
223 	int notify, old_lpm, same_dev;
224 
225 	sch = to_subchannel(cdev->dev.parent);
226 
227 	ccw_device_set_timeout(cdev, 0);
228 	cio_disable_subchannel(sch);
229 	/*
230 	 * Now that we tried recognition, we have performed device selection
231 	 * through ssch() and the path information is up to date.
232 	 */
233 	old_lpm = sch->lpm;
234 	stsch(sch->schid, &sch->schib);
235 	sch->lpm = sch->schib.pmcw.pim &
236 		sch->schib.pmcw.pam &
237 		sch->schib.pmcw.pom &
238 		sch->opm;
239 	/* Check since device may again have become not operational. */
240 	if (!sch->schib.pmcw.dnv)
241 		state = DEV_STATE_NOT_OPER;
242 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
243 		/* Force reprobe on all chpids. */
244 		old_lpm = 0;
245 	if (sch->lpm != old_lpm)
246 		__recover_lost_chpids(sch, old_lpm);
247 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
248 		if (state == DEV_STATE_NOT_OPER) {
249 			cdev->private->flags.recog_done = 1;
250 			cdev->private->state = DEV_STATE_DISCONNECTED;
251 			return;
252 		}
253 		/* Boxed devices don't need extra treatment. */
254 	}
255 	notify = 0;
256 	same_dev = 0; /* Keep the compiler quiet... */
257 	switch (state) {
258 	case DEV_STATE_NOT_OPER:
259 		CIO_DEBUG(KERN_WARNING, 2,
260 			  "SenseID : unknown device %04x on subchannel "
261 			  "0.%x.%04x\n", cdev->private->devno,
262 			  sch->schid.ssid, sch->schid.sch_no);
263 		break;
264 	case DEV_STATE_OFFLINE:
265 		if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
266 			same_dev = ccw_device_handle_oper(cdev);
267 			notify = 1;
268 		}
269 		/* fill out sense information */
270 		cdev->id = (struct ccw_device_id) {
271 			.cu_type   = cdev->private->senseid.cu_type,
272 			.cu_model  = cdev->private->senseid.cu_model,
273 			.dev_type  = cdev->private->senseid.dev_type,
274 			.dev_model = cdev->private->senseid.dev_model,
275 		};
276 		if (notify) {
277 			cdev->private->state = DEV_STATE_OFFLINE;
278 			if (same_dev) {
279 				/* Get device online again. */
280 				ccw_device_online(cdev);
281 				wake_up(&cdev->private->wait_q);
282 			}
283 			return;
284 		}
285 		/* Issue device info message. */
286 		CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
287 			  "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
288 			  "%04X/%02X\n",
289 			  cdev->private->ssid, cdev->private->devno,
290 			  cdev->id.cu_type, cdev->id.cu_model,
291 			  cdev->id.dev_type, cdev->id.dev_model);
292 		break;
293 	case DEV_STATE_BOXED:
294 		CIO_DEBUG(KERN_WARNING, 2,
295 			  "SenseID : boxed device %04x on subchannel "
296 			  "0.%x.%04x\n", cdev->private->devno,
297 			  sch->schid.ssid, sch->schid.sch_no);
298 		break;
299 	}
300 	cdev->private->state = state;
301 	io_subchannel_recog_done(cdev);
302 	if (state != DEV_STATE_NOT_OPER)
303 		wake_up(&cdev->private->wait_q);
304 }
305 
306 /*
307  * Function called from device_id.c after sense id has completed.
308  */
309 void
310 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
311 {
312 	switch (err) {
313 	case 0:
314 		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
315 		break;
316 	case -ETIME:		/* Sense id stopped by timeout. */
317 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
318 		break;
319 	default:
320 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
321 		break;
322 	}
323 }
324 
325 static void
326 ccw_device_oper_notify(void *data)
327 {
328 	struct ccw_device *cdev;
329 	struct subchannel *sch;
330 	int ret;
331 
332 	cdev = (struct ccw_device *)data;
333 	sch = to_subchannel(cdev->dev.parent);
334 	ret = (sch->driver && sch->driver->notify) ?
335 		sch->driver->notify(&sch->dev, CIO_OPER) : 0;
336 	if (!ret)
337 		/* Driver doesn't want device back. */
338 		ccw_device_do_unreg_rereg((void *)cdev);
339 	else {
340 		/* Reenable channel measurements, if needed. */
341 		cmf_reenable(cdev);
342 		wake_up(&cdev->private->wait_q);
343 	}
344 }
345 
346 /*
347  * Finished with online/offline processing.
348  */
349 static void
350 ccw_device_done(struct ccw_device *cdev, int state)
351 {
352 	struct subchannel *sch;
353 
354 	sch = to_subchannel(cdev->dev.parent);
355 
356 	if (state != DEV_STATE_ONLINE)
357 		cio_disable_subchannel(sch);
358 
359 	/* Reset device status. */
360 	memset(&cdev->private->irb, 0, sizeof(struct irb));
361 
362 	cdev->private->state = state;
363 
364 
365 	if (state == DEV_STATE_BOXED)
366 		CIO_DEBUG(KERN_WARNING, 2,
367 			  "Boxed device %04x on subchannel %04x\n",
368 			  cdev->private->devno, sch->schid.sch_no);
369 
370 	if (cdev->private->flags.donotify) {
371 		cdev->private->flags.donotify = 0;
372 		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
373 			     (void *)cdev);
374 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
375 	}
376 	wake_up(&cdev->private->wait_q);
377 
378 	if (css_init_done && state != DEV_STATE_ONLINE)
379 		put_device (&cdev->dev);
380 }
381 
382 /*
383  * Function called from device_pgid.c after sense path ground has completed.
384  */
385 void
386 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
387 {
388 	struct subchannel *sch;
389 
390 	sch = to_subchannel(cdev->dev.parent);
391 	switch (err) {
392 	case 0:
393 		/* Start Path Group verification. */
394 		sch->vpm = 0;	/* Start with no path groups set. */
395 		cdev->private->state = DEV_STATE_VERIFY;
396 		ccw_device_verify_start(cdev);
397 		break;
398 	case -ETIME:		/* Sense path group id stopped by timeout. */
399 	case -EUSERS:		/* device is reserved for someone else. */
400 		ccw_device_done(cdev, DEV_STATE_BOXED);
401 		break;
402 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
403 		cdev->private->options.pgroup = 0;
404 		ccw_device_done(cdev, DEV_STATE_ONLINE);
405 		break;
406 	default:
407 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
408 		break;
409 	}
410 }
411 
412 /*
413  * Start device recognition.
414  */
415 int
416 ccw_device_recognition(struct ccw_device *cdev)
417 {
418 	struct subchannel *sch;
419 	int ret;
420 
421 	if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
422 	    (cdev->private->state != DEV_STATE_BOXED))
423 		return -EINVAL;
424 	sch = to_subchannel(cdev->dev.parent);
425 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
426 	if (ret != 0)
427 		/* Couldn't enable the subchannel for i/o. Sick device. */
428 		return ret;
429 
430 	/* After 60s the device recognition is considered to have failed. */
431 	ccw_device_set_timeout(cdev, 60*HZ);
432 
433 	/*
434 	 * We used to start here with a sense pgid to find out whether a device
435 	 * is locked by someone else. Unfortunately, the sense pgid command
436 	 * code has other meanings on devices predating the path grouping
437 	 * algorithm, so we start with sense id and box the device after an
438 	 * timeout (or if sense pgid during path verification detects the device
439 	 * is locked, as may happen on newer devices).
440 	 */
441 	cdev->private->flags.recog_done = 0;
442 	cdev->private->state = DEV_STATE_SENSE_ID;
443 	ccw_device_sense_id_start(cdev);
444 	return 0;
445 }
446 
447 /*
448  * Handle timeout in device recognition.
449  */
450 static void
451 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
452 {
453 	int ret;
454 
455 	ret = ccw_device_cancel_halt_clear(cdev);
456 	switch (ret) {
457 	case 0:
458 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
459 		break;
460 	case -ENODEV:
461 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
462 		break;
463 	default:
464 		ccw_device_set_timeout(cdev, 3*HZ);
465 	}
466 }
467 
468 
469 static void
470 ccw_device_nopath_notify(void *data)
471 {
472 	struct ccw_device *cdev;
473 	struct subchannel *sch;
474 	int ret;
475 
476 	cdev = (struct ccw_device *)data;
477 	sch = to_subchannel(cdev->dev.parent);
478 	/* Extra sanity. */
479 	if (sch->lpm)
480 		return;
481 	ret = (sch->driver && sch->driver->notify) ?
482 		sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
483 	if (!ret) {
484 		if (get_device(&sch->dev)) {
485 			/* Driver doesn't want to keep device. */
486 			cio_disable_subchannel(sch);
487 			if (get_device(&cdev->dev)) {
488 				PREPARE_WORK(&cdev->private->kick_work,
489 					     ccw_device_call_sch_unregister,
490 					     (void *)cdev);
491 				queue_work(ccw_device_work,
492 					   &cdev->private->kick_work);
493 			} else
494 				put_device(&sch->dev);
495 		}
496 	} else {
497 		cio_disable_subchannel(sch);
498 		ccw_device_set_timeout(cdev, 0);
499 		cdev->private->flags.fake_irb = 0;
500 		cdev->private->state = DEV_STATE_DISCONNECTED;
501 		wake_up(&cdev->private->wait_q);
502 	}
503 }
504 
505 void
506 ccw_device_verify_done(struct ccw_device *cdev, int err)
507 {
508 	cdev->private->flags.doverify = 0;
509 	switch (err) {
510 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
511 		cdev->private->options.pgroup = 0;
512 	case 0:
513 		ccw_device_done(cdev, DEV_STATE_ONLINE);
514 		/* Deliver fake irb to device driver, if needed. */
515 		if (cdev->private->flags.fake_irb) {
516 			memset(&cdev->private->irb, 0, sizeof(struct irb));
517 			cdev->private->irb.scsw = (struct scsw) {
518 				.cc = 1,
519 				.fctl = SCSW_FCTL_START_FUNC,
520 				.actl = SCSW_ACTL_START_PEND,
521 				.stctl = SCSW_STCTL_STATUS_PEND,
522 			};
523 			cdev->private->flags.fake_irb = 0;
524 			if (cdev->handler)
525 				cdev->handler(cdev, cdev->private->intparm,
526 					      &cdev->private->irb);
527 			memset(&cdev->private->irb, 0, sizeof(struct irb));
528 		}
529 		break;
530 	case -ETIME:
531 		ccw_device_done(cdev, DEV_STATE_BOXED);
532 		break;
533 	default:
534 		PREPARE_WORK(&cdev->private->kick_work,
535 			     ccw_device_nopath_notify, (void *)cdev);
536 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
537 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
538 		break;
539 	}
540 }
541 
542 /*
543  * Get device online.
544  */
545 int
546 ccw_device_online(struct ccw_device *cdev)
547 {
548 	struct subchannel *sch;
549 	int ret;
550 
551 	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
552 	    (cdev->private->state != DEV_STATE_BOXED))
553 		return -EINVAL;
554 	sch = to_subchannel(cdev->dev.parent);
555 	if (css_init_done && !get_device(&cdev->dev))
556 		return -ENODEV;
557 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
558 	if (ret != 0) {
559 		/* Couldn't enable the subchannel for i/o. Sick device. */
560 		if (ret == -ENODEV)
561 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
562 		return ret;
563 	}
564 	/* Do we want to do path grouping? */
565 	if (!cdev->private->options.pgroup) {
566 		/* No, set state online immediately. */
567 		ccw_device_done(cdev, DEV_STATE_ONLINE);
568 		return 0;
569 	}
570 	/* Do a SensePGID first. */
571 	cdev->private->state = DEV_STATE_SENSE_PGID;
572 	ccw_device_sense_pgid_start(cdev);
573 	return 0;
574 }
575 
576 void
577 ccw_device_disband_done(struct ccw_device *cdev, int err)
578 {
579 	switch (err) {
580 	case 0:
581 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
582 		break;
583 	case -ETIME:
584 		ccw_device_done(cdev, DEV_STATE_BOXED);
585 		break;
586 	default:
587 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
588 		break;
589 	}
590 }
591 
592 /*
593  * Shutdown device.
594  */
595 int
596 ccw_device_offline(struct ccw_device *cdev)
597 {
598 	struct subchannel *sch;
599 
600 	sch = to_subchannel(cdev->dev.parent);
601 	if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
602 		return -ENODEV;
603 	if (cdev->private->state != DEV_STATE_ONLINE) {
604 		if (sch->schib.scsw.actl != 0)
605 			return -EBUSY;
606 		return -EINVAL;
607 	}
608 	if (sch->schib.scsw.actl != 0)
609 		return -EBUSY;
610 	/* Are we doing path grouping? */
611 	if (!cdev->private->options.pgroup) {
612 		/* No, set state offline immediately. */
613 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
614 		return 0;
615 	}
616 	/* Start Set Path Group commands. */
617 	cdev->private->state = DEV_STATE_DISBAND_PGID;
618 	ccw_device_disband_start(cdev);
619 	return 0;
620 }
621 
622 /*
623  * Handle timeout in device online/offline process.
624  */
625 static void
626 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
627 {
628 	int ret;
629 
630 	ret = ccw_device_cancel_halt_clear(cdev);
631 	switch (ret) {
632 	case 0:
633 		ccw_device_done(cdev, DEV_STATE_BOXED);
634 		break;
635 	case -ENODEV:
636 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
637 		break;
638 	default:
639 		ccw_device_set_timeout(cdev, 3*HZ);
640 	}
641 }
642 
643 /*
644  * Handle not oper event in device recognition.
645  */
646 static void
647 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
648 {
649 	ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
650 }
651 
652 /*
653  * Handle not operational event while offline.
654  */
655 static void
656 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
657 {
658 	struct subchannel *sch;
659 
660 	cdev->private->state = DEV_STATE_NOT_OPER;
661 	sch = to_subchannel(cdev->dev.parent);
662 	if (get_device(&cdev->dev)) {
663 		PREPARE_WORK(&cdev->private->kick_work,
664 			     ccw_device_call_sch_unregister, (void *)cdev);
665 		queue_work(ccw_device_work, &cdev->private->kick_work);
666 	}
667 	wake_up(&cdev->private->wait_q);
668 }
669 
670 /*
671  * Handle not operational event while online.
672  */
673 static void
674 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
675 {
676 	struct subchannel *sch;
677 
678 	sch = to_subchannel(cdev->dev.parent);
679 	if (sch->driver->notify &&
680 	    sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
681 			ccw_device_set_timeout(cdev, 0);
682 			cdev->private->flags.fake_irb = 0;
683 			cdev->private->state = DEV_STATE_DISCONNECTED;
684 			wake_up(&cdev->private->wait_q);
685 			return;
686 	}
687 	cdev->private->state = DEV_STATE_NOT_OPER;
688 	cio_disable_subchannel(sch);
689 	if (sch->schib.scsw.actl != 0) {
690 		// FIXME: not-oper indication to device driver ?
691 		ccw_device_call_handler(cdev);
692 	}
693 	if (get_device(&cdev->dev)) {
694 		PREPARE_WORK(&cdev->private->kick_work,
695 			     ccw_device_call_sch_unregister, (void *)cdev);
696 		queue_work(ccw_device_work, &cdev->private->kick_work);
697 	}
698 	wake_up(&cdev->private->wait_q);
699 }
700 
701 /*
702  * Handle path verification event.
703  */
704 static void
705 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
706 {
707 	struct subchannel *sch;
708 
709 	if (!cdev->private->options.pgroup)
710 		return;
711 	if (cdev->private->state == DEV_STATE_W4SENSE) {
712 		cdev->private->flags.doverify = 1;
713 		return;
714 	}
715 	sch = to_subchannel(cdev->dev.parent);
716 	/*
717 	 * Since we might not just be coming from an interrupt from the
718 	 * subchannel we have to update the schib.
719 	 */
720 	stsch(sch->schid, &sch->schib);
721 
722 	if (sch->schib.scsw.actl != 0 ||
723 	    (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
724 		/*
725 		 * No final status yet or final status not yet delivered
726 		 * to the device driver. Can't do path verfication now,
727 		 * delay until final status was delivered.
728 		 */
729 		cdev->private->flags.doverify = 1;
730 		return;
731 	}
732 	/* Device is idle, we can do the path verification. */
733 	cdev->private->state = DEV_STATE_VERIFY;
734 	ccw_device_verify_start(cdev);
735 }
736 
737 /*
738  * Got an interrupt for a normal io (state online).
739  */
740 static void
741 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
742 {
743 	struct irb *irb;
744 
745 	irb = (struct irb *) __LC_IRB;
746 	/* Check for unsolicited interrupt. */
747 	if ((irb->scsw.stctl ==
748 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
749 	    && (!irb->scsw.cc)) {
750 		if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
751 		    !irb->esw.esw0.erw.cons) {
752 			/* Unit check but no sense data. Need basic sense. */
753 			if (ccw_device_do_sense(cdev, irb) != 0)
754 				goto call_handler_unsol;
755 			memcpy(&cdev->private->irb, irb, sizeof(struct irb));
756 			cdev->private->state = DEV_STATE_W4SENSE;
757 			cdev->private->intparm = 0;
758 			return;
759 		}
760 call_handler_unsol:
761 		if (cdev->handler)
762 			cdev->handler (cdev, 0, irb);
763 		return;
764 	}
765 	/* Accumulate status and find out if a basic sense is needed. */
766 	ccw_device_accumulate_irb(cdev, irb);
767 	if (cdev->private->flags.dosense) {
768 		if (ccw_device_do_sense(cdev, irb) == 0) {
769 			cdev->private->state = DEV_STATE_W4SENSE;
770 		}
771 		return;
772 	}
773 	/* Call the handler. */
774 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
775 		/* Start delayed path verification. */
776 		ccw_device_online_verify(cdev, 0);
777 }
778 
779 /*
780  * Got an timeout in online state.
781  */
782 static void
783 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
784 {
785 	int ret;
786 
787 	ccw_device_set_timeout(cdev, 0);
788 	ret = ccw_device_cancel_halt_clear(cdev);
789 	if (ret == -EBUSY) {
790 		ccw_device_set_timeout(cdev, 3*HZ);
791 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
792 		return;
793 	}
794 	if (ret == -ENODEV) {
795 		struct subchannel *sch;
796 
797 		sch = to_subchannel(cdev->dev.parent);
798 		if (!sch->lpm) {
799 			PREPARE_WORK(&cdev->private->kick_work,
800 				     ccw_device_nopath_notify, (void *)cdev);
801 			queue_work(ccw_device_notify_work,
802 				   &cdev->private->kick_work);
803 		} else
804 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
805 	} else if (cdev->handler)
806 		cdev->handler(cdev, cdev->private->intparm,
807 			      ERR_PTR(-ETIMEDOUT));
808 }
809 
810 /*
811  * Got an interrupt for a basic sense.
812  */
813 void
814 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
815 {
816 	struct irb *irb;
817 
818 	irb = (struct irb *) __LC_IRB;
819 	/* Check for unsolicited interrupt. */
820 	if (irb->scsw.stctl ==
821 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
822 		if (irb->scsw.cc == 1)
823 			/* Basic sense hasn't started. Try again. */
824 			ccw_device_do_sense(cdev, irb);
825 		else {
826 			printk("Huh? %s(%s): unsolicited interrupt...\n",
827 			       __FUNCTION__, cdev->dev.bus_id);
828 			if (cdev->handler)
829 				cdev->handler (cdev, 0, irb);
830 		}
831 		return;
832 	}
833 	/*
834 	 * Check if a halt or clear has been issued in the meanwhile. If yes,
835 	 * only deliver the halt/clear interrupt to the device driver as if it
836 	 * had killed the original request.
837 	 */
838 	if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
839 		cdev->private->flags.dosense = 0;
840 		memset(&cdev->private->irb, 0, sizeof(struct irb));
841 		ccw_device_accumulate_irb(cdev, irb);
842 		goto call_handler;
843 	}
844 	/* Add basic sense info to irb. */
845 	ccw_device_accumulate_basic_sense(cdev, irb);
846 	if (cdev->private->flags.dosense) {
847 		/* Another basic sense is needed. */
848 		ccw_device_do_sense(cdev, irb);
849 		return;
850 	}
851 call_handler:
852 	cdev->private->state = DEV_STATE_ONLINE;
853 	/* Call the handler. */
854 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
855 		/* Start delayed path verification. */
856 		ccw_device_online_verify(cdev, 0);
857 }
858 
859 static void
860 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
861 {
862 	struct irb *irb;
863 
864 	irb = (struct irb *) __LC_IRB;
865 	/* Accumulate status. We don't do basic sense. */
866 	ccw_device_accumulate_irb(cdev, irb);
867 	/* Remember to clear irb to avoid residuals. */
868 	memset(&cdev->private->irb, 0, sizeof(struct irb));
869 	/* Try to start delayed device verification. */
870 	ccw_device_online_verify(cdev, 0);
871 	/* Note: Don't call handler for cio initiated clear! */
872 }
873 
874 static void
875 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
876 {
877 	struct subchannel *sch;
878 
879 	sch = to_subchannel(cdev->dev.parent);
880 	ccw_device_set_timeout(cdev, 0);
881 	/* OK, i/o is dead now. Call interrupt handler. */
882 	cdev->private->state = DEV_STATE_ONLINE;
883 	if (cdev->handler)
884 		cdev->handler(cdev, cdev->private->intparm,
885 			      ERR_PTR(-ETIMEDOUT));
886 	if (!sch->lpm) {
887 		PREPARE_WORK(&cdev->private->kick_work,
888 			     ccw_device_nopath_notify, (void *)cdev);
889 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
890 	} else if (cdev->private->flags.doverify)
891 		/* Start delayed path verification. */
892 		ccw_device_online_verify(cdev, 0);
893 }
894 
895 static void
896 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
897 {
898 	int ret;
899 
900 	ret = ccw_device_cancel_halt_clear(cdev);
901 	if (ret == -EBUSY) {
902 		ccw_device_set_timeout(cdev, 3*HZ);
903 		return;
904 	}
905 	if (ret == -ENODEV) {
906 		struct subchannel *sch;
907 
908 		sch = to_subchannel(cdev->dev.parent);
909 		if (!sch->lpm) {
910 			PREPARE_WORK(&cdev->private->kick_work,
911 				     ccw_device_nopath_notify, (void *)cdev);
912 			queue_work(ccw_device_notify_work,
913 				   &cdev->private->kick_work);
914 		} else
915 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
916 		return;
917 	}
918 	//FIXME: Can we get here?
919 	cdev->private->state = DEV_STATE_ONLINE;
920 	if (cdev->handler)
921 		cdev->handler(cdev, cdev->private->intparm,
922 			      ERR_PTR(-ETIMEDOUT));
923 }
924 
925 static void
926 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
927 {
928 	struct irb *irb;
929 	struct subchannel *sch;
930 
931 	irb = (struct irb *) __LC_IRB;
932 	/*
933 	 * Accumulate status and find out if a basic sense is needed.
934 	 * This is fine since we have already adapted the lpm.
935 	 */
936 	ccw_device_accumulate_irb(cdev, irb);
937 	if (cdev->private->flags.dosense) {
938 		if (ccw_device_do_sense(cdev, irb) == 0) {
939 			cdev->private->state = DEV_STATE_W4SENSE;
940 		}
941 		return;
942 	}
943 
944 	/* Iff device is idle, reset timeout. */
945 	sch = to_subchannel(cdev->dev.parent);
946 	if (!stsch(sch->schid, &sch->schib))
947 		if (sch->schib.scsw.actl == 0)
948 			ccw_device_set_timeout(cdev, 0);
949 	/* Call the handler. */
950 	ccw_device_call_handler(cdev);
951 	if (!sch->lpm) {
952 		PREPARE_WORK(&cdev->private->kick_work,
953 			     ccw_device_nopath_notify, (void *)cdev);
954 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
955 	} else if (cdev->private->flags.doverify)
956 		ccw_device_online_verify(cdev, 0);
957 }
958 
959 static void
960 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
961 {
962 	int ret;
963 	struct subchannel *sch;
964 
965 	sch = to_subchannel(cdev->dev.parent);
966 	ccw_device_set_timeout(cdev, 0);
967 	ret = ccw_device_cancel_halt_clear(cdev);
968 	if (ret == -EBUSY) {
969 		ccw_device_set_timeout(cdev, 3*HZ);
970 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
971 		return;
972 	}
973 	if (ret == -ENODEV) {
974 		if (!sch->lpm) {
975 			PREPARE_WORK(&cdev->private->kick_work,
976 				     ccw_device_nopath_notify, (void *)cdev);
977 			queue_work(ccw_device_notify_work,
978 				   &cdev->private->kick_work);
979 		} else
980 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
981 		return;
982 	}
983 	if (cdev->handler)
984 		cdev->handler(cdev, cdev->private->intparm,
985 			      ERR_PTR(-ETIMEDOUT));
986 	if (!sch->lpm) {
987 		PREPARE_WORK(&cdev->private->kick_work,
988 			     ccw_device_nopath_notify, (void *)cdev);
989 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
990 	} else if (cdev->private->flags.doverify)
991 		/* Start delayed path verification. */
992 		ccw_device_online_verify(cdev, 0);
993 }
994 
995 static void
996 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
997 {
998 	/* When the I/O has terminated, we have to start verification. */
999 	if (cdev->private->options.pgroup)
1000 		cdev->private->flags.doverify = 1;
1001 }
1002 
1003 static void
1004 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1005 {
1006 	struct irb *irb;
1007 
1008 	switch (dev_event) {
1009 	case DEV_EVENT_INTERRUPT:
1010 		irb = (struct irb *) __LC_IRB;
1011 		/* Check for unsolicited interrupt. */
1012 		if ((irb->scsw.stctl ==
1013 		     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1014 		    (!irb->scsw.cc))
1015 			/* FIXME: we should restart stlck here, but this
1016 			 * is extremely unlikely ... */
1017 			goto out_wakeup;
1018 
1019 		ccw_device_accumulate_irb(cdev, irb);
1020 		/* We don't care about basic sense etc. */
1021 		break;
1022 	default: /* timeout */
1023 		break;
1024 	}
1025 out_wakeup:
1026 	wake_up(&cdev->private->wait_q);
1027 }
1028 
1029 static void
1030 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1031 {
1032 	struct subchannel *sch;
1033 
1034 	sch = to_subchannel(cdev->dev.parent);
1035 	if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1036 		/* Couldn't enable the subchannel for i/o. Sick device. */
1037 		return;
1038 
1039 	/* After 60s the device recognition is considered to have failed. */
1040 	ccw_device_set_timeout(cdev, 60*HZ);
1041 
1042 	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1043 	ccw_device_sense_id_start(cdev);
1044 }
1045 
1046 void
1047 device_trigger_reprobe(struct subchannel *sch)
1048 {
1049 	struct ccw_device *cdev;
1050 
1051 	if (!sch->dev.driver_data)
1052 		return;
1053 	cdev = sch->dev.driver_data;
1054 	if (cdev->private->state != DEV_STATE_DISCONNECTED)
1055 		return;
1056 
1057 	/* Update some values. */
1058 	if (stsch(sch->schid, &sch->schib))
1059 		return;
1060 
1061 	/*
1062 	 * The pim, pam, pom values may not be accurate, but they are the best
1063 	 * we have before performing device selection :/
1064 	 */
1065 	sch->lpm = sch->schib.pmcw.pim &
1066 		sch->schib.pmcw.pam &
1067 		sch->schib.pmcw.pom &
1068 		sch->opm;
1069 	/* Re-set some bits in the pmcw that were lost. */
1070 	sch->schib.pmcw.isc = 3;
1071 	sch->schib.pmcw.csense = 1;
1072 	sch->schib.pmcw.ena = 0;
1073 	if ((sch->lpm & (sch->lpm - 1)) != 0)
1074 		sch->schib.pmcw.mp = 1;
1075 	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1076 	/* We should also udate ssd info, but this has to wait. */
1077 	ccw_device_start_id(cdev, 0);
1078 }
1079 
1080 static void
1081 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1082 {
1083 	struct subchannel *sch;
1084 
1085 	sch = to_subchannel(cdev->dev.parent);
1086 	/*
1087 	 * An interrupt in state offline means a previous disable was not
1088 	 * successful. Try again.
1089 	 */
1090 	cio_disable_subchannel(sch);
1091 }
1092 
1093 static void
1094 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1095 {
1096 	retry_set_schib(cdev);
1097 	cdev->private->state = DEV_STATE_ONLINE;
1098 	dev_fsm_event(cdev, dev_event);
1099 }
1100 
1101 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1102 				       enum dev_event dev_event)
1103 {
1104 	cmf_retry_copy_block(cdev);
1105 	cdev->private->state = DEV_STATE_ONLINE;
1106 	dev_fsm_event(cdev, dev_event);
1107 }
1108 
1109 static void
1110 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1111 {
1112 	ccw_device_set_timeout(cdev, 0);
1113 	if (dev_event == DEV_EVENT_NOTOPER)
1114 		cdev->private->state = DEV_STATE_NOT_OPER;
1115 	else
1116 		cdev->private->state = DEV_STATE_OFFLINE;
1117 	wake_up(&cdev->private->wait_q);
1118 }
1119 
1120 static void
1121 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1122 {
1123 	int ret;
1124 
1125 	ret = ccw_device_cancel_halt_clear(cdev);
1126 	switch (ret) {
1127 	case 0:
1128 		cdev->private->state = DEV_STATE_OFFLINE;
1129 		wake_up(&cdev->private->wait_q);
1130 		break;
1131 	case -ENODEV:
1132 		cdev->private->state = DEV_STATE_NOT_OPER;
1133 		wake_up(&cdev->private->wait_q);
1134 		break;
1135 	default:
1136 		ccw_device_set_timeout(cdev, HZ/10);
1137 	}
1138 }
1139 
1140 /*
1141  * No operation action. This is used e.g. to ignore a timeout event in
1142  * state offline.
1143  */
1144 static void
1145 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1146 {
1147 }
1148 
1149 /*
1150  * Bug operation action.
1151  */
1152 static void
1153 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1154 {
1155 	printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1156 	       cdev->private->state, dev_event);
1157 	BUG();
1158 }
1159 
1160 /*
1161  * device statemachine
1162  */
1163 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1164 	[DEV_STATE_NOT_OPER] = {
1165 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1166 		[DEV_EVENT_INTERRUPT]	= ccw_device_bug,
1167 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1168 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1169 	},
1170 	[DEV_STATE_SENSE_PGID] = {
1171 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1172 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_pgid_irq,
1173 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1174 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1175 	},
1176 	[DEV_STATE_SENSE_ID] = {
1177 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1178 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1179 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1180 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1181 	},
1182 	[DEV_STATE_OFFLINE] = {
1183 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1184 		[DEV_EVENT_INTERRUPT]	= ccw_device_offline_irq,
1185 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1186 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1187 	},
1188 	[DEV_STATE_VERIFY] = {
1189 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1190 		[DEV_EVENT_INTERRUPT]	= ccw_device_verify_irq,
1191 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1192 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1193 	},
1194 	[DEV_STATE_ONLINE] = {
1195 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1196 		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1197 		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1198 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1199 	},
1200 	[DEV_STATE_W4SENSE] = {
1201 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1202 		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1203 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1204 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1205 	},
1206 	[DEV_STATE_DISBAND_PGID] = {
1207 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1208 		[DEV_EVENT_INTERRUPT]	= ccw_device_disband_irq,
1209 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1210 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1211 	},
1212 	[DEV_STATE_BOXED] = {
1213 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1214 		[DEV_EVENT_INTERRUPT]	= ccw_device_stlck_done,
1215 		[DEV_EVENT_TIMEOUT]	= ccw_device_stlck_done,
1216 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1217 	},
1218 	/* states to wait for i/o completion before doing something */
1219 	[DEV_STATE_CLEAR_VERIFY] = {
1220 		[DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1221 		[DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1222 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1223 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1224 	},
1225 	[DEV_STATE_TIMEOUT_KILL] = {
1226 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1227 		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1228 		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1229 		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1230 	},
1231 	[DEV_STATE_WAIT4IO] = {
1232 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1233 		[DEV_EVENT_INTERRUPT]	= ccw_device_wait4io_irq,
1234 		[DEV_EVENT_TIMEOUT]	= ccw_device_wait4io_timeout,
1235 		[DEV_EVENT_VERIFY]	= ccw_device_wait4io_verify,
1236 	},
1237 	[DEV_STATE_QUIESCE] = {
1238 		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1239 		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1240 		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1241 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1242 	},
1243 	/* special states for devices gone not operational */
1244 	[DEV_STATE_DISCONNECTED] = {
1245 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1246 		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1247 		[DEV_EVENT_TIMEOUT]	= ccw_device_bug,
1248 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1249 	},
1250 	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1251 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1252 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1253 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1254 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1255 	},
1256 	[DEV_STATE_CMFCHANGE] = {
1257 		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1258 		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1259 		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1260 		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1261 	},
1262 	[DEV_STATE_CMFUPDATE] = {
1263 		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
1264 		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
1265 		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
1266 		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
1267 	},
1268 };
1269 
1270 /*
1271  * io_subchannel_irq is called for "real" interrupts or for status
1272  * pending conditions on msch.
1273  */
1274 void
1275 io_subchannel_irq (struct device *pdev)
1276 {
1277 	struct ccw_device *cdev;
1278 
1279 	cdev = to_subchannel(pdev)->dev.driver_data;
1280 
1281 	CIO_TRACE_EVENT (3, "IRQ");
1282 	CIO_TRACE_EVENT (3, pdev->bus_id);
1283 	if (cdev)
1284 		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1285 }
1286 
1287 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1288