xref: /linux/drivers/s390/cio/device_fsm.c (revision bc46b7cbc58c4cb562b6a45a1fbc7b8e7b23df58)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * finite state machine for device handling
4  *
5  *    Copyright IBM Corp. 2002, 2008
6  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
7  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9 
10 #include <linux/export.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/io.h>
14 #include <linux/jiffies.h>
15 #include <linux/string.h>
16 
17 #include <asm/ccwdev.h>
18 #include <asm/cio.h>
19 #include <asm/chpid.h>
20 
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "css.h"
24 #include "device.h"
25 #include "chsc.h"
26 #include "ioasm.h"
27 #include "chp.h"
28 
29 static int timeout_log_enabled;
30 
ccw_timeout_log_setup(char * unused)31 static int __init ccw_timeout_log_setup(char *unused)
32 {
33 	timeout_log_enabled = 1;
34 	return 1;
35 }
36 
37 __setup("ccw_timeout_log", ccw_timeout_log_setup);
38 
ccw_timeout_log(struct ccw_device * cdev)39 static void ccw_timeout_log(struct ccw_device *cdev)
40 {
41 	struct schib schib;
42 	struct subchannel *sch;
43 	struct io_subchannel_private *private;
44 	union orb *orb;
45 	int cc;
46 
47 	sch = to_subchannel(cdev->dev.parent);
48 	private = to_io_private(sch);
49 	orb = &private->orb;
50 	cc = stsch(sch->schid, &schib);
51 
52 	printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, "
53 	       "device information:\n", get_tod_clock());
54 	printk(KERN_WARNING "cio: orb:\n");
55 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
56 		       orb, sizeof(*orb), 0);
57 	printk(KERN_WARNING "cio: ccw device bus id: %s\n",
58 	       dev_name(&cdev->dev));
59 	printk(KERN_WARNING "cio: subchannel bus id: %s\n",
60 	       dev_name(&sch->dev));
61 	printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
62 	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
63 
64 	if (orb->tm.b) {
65 		printk(KERN_WARNING "cio: orb indicates transport mode\n");
66 		printk(KERN_WARNING "cio: last tcw:\n");
67 		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
68 			       dma32_to_virt(orb->tm.tcw),
69 			       sizeof(struct tcw), 0);
70 	} else {
71 		printk(KERN_WARNING "cio: orb indicates command mode\n");
72 		if (dma32_to_virt(orb->cmd.cpa) ==
73 		    &private->dma_area->sense_ccw ||
74 		    dma32_to_virt(orb->cmd.cpa) ==
75 		    cdev->private->dma_area->iccws)
76 			printk(KERN_WARNING "cio: last channel program "
77 			       "(intern):\n");
78 		else
79 			printk(KERN_WARNING "cio: last channel program:\n");
80 
81 		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
82 			       dma32_to_virt(orb->cmd.cpa),
83 			       sizeof(struct ccw1), 0);
84 	}
85 	printk(KERN_WARNING "cio: ccw device state: %d\n",
86 	       cdev->private->state);
87 	printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
88 	printk(KERN_WARNING "cio: schib:\n");
89 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
90 		       &schib, sizeof(schib), 0);
91 	printk(KERN_WARNING "cio: ccw device flags:\n");
92 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
93 		       &cdev->private->flags, sizeof(cdev->private->flags), 0);
94 }
95 
96 /*
97  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
98  */
99 void
ccw_device_timeout(struct timer_list * t)100 ccw_device_timeout(struct timer_list *t)
101 {
102 	struct ccw_device_private *priv = timer_container_of(priv, t, timer);
103 	struct ccw_device *cdev = priv->cdev;
104 
105 	spin_lock_irq(cdev->ccwlock);
106 	if (timeout_log_enabled)
107 		ccw_timeout_log(cdev);
108 	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
109 	spin_unlock_irq(cdev->ccwlock);
110 }
111 
112 /*
113  * Set timeout
114  */
115 void
ccw_device_set_timeout(struct ccw_device * cdev,int expires)116 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
117 {
118 	if (expires == 0)
119 		timer_delete(&cdev->private->timer);
120 	else
121 		mod_timer(&cdev->private->timer, jiffies + expires);
122 }
123 
124 int
ccw_device_cancel_halt_clear(struct ccw_device * cdev)125 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
126 {
127 	struct subchannel *sch;
128 	int ret;
129 
130 	sch = to_subchannel(cdev->dev.parent);
131 	ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
132 
133 	if (ret == -EIO)
134 		CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
135 			      cdev->private->dev_id.ssid,
136 			      cdev->private->dev_id.devno);
137 
138 	return ret;
139 }
140 
ccw_device_update_sense_data(struct ccw_device * cdev)141 void ccw_device_update_sense_data(struct ccw_device *cdev)
142 {
143 	memset(&cdev->id, 0, sizeof(cdev->id));
144 	cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
145 	cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
146 	cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
147 	cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
148 }
149 
ccw_device_test_sense_data(struct ccw_device * cdev)150 int ccw_device_test_sense_data(struct ccw_device *cdev)
151 {
152 	return cdev->id.cu_type ==
153 		cdev->private->dma_area->senseid.cu_type &&
154 		cdev->id.cu_model ==
155 		cdev->private->dma_area->senseid.cu_model &&
156 		cdev->id.dev_type ==
157 		cdev->private->dma_area->senseid.dev_type &&
158 		cdev->id.dev_model ==
159 		cdev->private->dma_area->senseid.dev_model;
160 }
161 
162 /*
163  * The machine won't give us any notification by machine check if a chpid has
164  * been varied online on the SE so we have to find out by magic (i. e. driving
165  * the channel subsystem to device selection and updating our path masks).
166  */
167 static void
__recover_lost_chpids(struct subchannel * sch,int old_lpm)168 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
169 {
170 	int mask, i;
171 	struct chp_id chpid;
172 
173 	chp_id_init(&chpid);
174 	for (i = 0; i<8; i++) {
175 		mask = 0x80 >> i;
176 		if (!(sch->lpm & mask))
177 			continue;
178 		if (old_lpm & mask)
179 			continue;
180 		chpid.id = sch->schib.pmcw.chpid[i];
181 		if (!chp_is_registered(chpid))
182 			css_schedule_eval_all();
183 	}
184 }
185 
186 /*
187  * Stop device recognition.
188  */
189 static void
ccw_device_recog_done(struct ccw_device * cdev,int state)190 ccw_device_recog_done(struct ccw_device *cdev, int state)
191 {
192 	struct subchannel *sch;
193 	int old_lpm;
194 
195 	sch = to_subchannel(cdev->dev.parent);
196 
197 	if (cio_disable_subchannel(sch))
198 		state = DEV_STATE_NOT_OPER;
199 	/*
200 	 * Now that we tried recognition, we have performed device selection
201 	 * through ssch() and the path information is up to date.
202 	 */
203 	old_lpm = sch->lpm;
204 
205 	/* Check since device may again have become not operational. */
206 	if (cio_update_schib(sch))
207 		state = DEV_STATE_NOT_OPER;
208 	else
209 		sch->lpm = sch->schib.pmcw.pam & sch->opm;
210 
211 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
212 		/* Force reprobe on all chpids. */
213 		old_lpm = 0;
214 	if (sch->lpm != old_lpm)
215 		__recover_lost_chpids(sch, old_lpm);
216 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
217 	    (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
218 		cdev->private->flags.recog_done = 1;
219 		cdev->private->state = DEV_STATE_DISCONNECTED;
220 		wake_up(&cdev->private->wait_q);
221 		return;
222 	}
223 	switch (state) {
224 	case DEV_STATE_NOT_OPER:
225 		break;
226 	case DEV_STATE_OFFLINE:
227 		if (!cdev->online) {
228 			ccw_device_update_sense_data(cdev);
229 			break;
230 		}
231 		cdev->private->state = DEV_STATE_OFFLINE;
232 		cdev->private->flags.recog_done = 1;
233 		if (ccw_device_test_sense_data(cdev)) {
234 			cdev->private->flags.donotify = 1;
235 			ccw_device_online(cdev);
236 			wake_up(&cdev->private->wait_q);
237 		} else {
238 			ccw_device_update_sense_data(cdev);
239 			ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
240 		}
241 		return;
242 	case DEV_STATE_BOXED:
243 		if (cdev->id.cu_type != 0) { /* device was recognized before */
244 			cdev->private->flags.recog_done = 1;
245 			cdev->private->state = DEV_STATE_BOXED;
246 			wake_up(&cdev->private->wait_q);
247 			return;
248 		}
249 		break;
250 	}
251 	cdev->private->state = state;
252 	io_subchannel_recog_done(cdev);
253 	wake_up(&cdev->private->wait_q);
254 }
255 
256 /*
257  * Function called from device_id.c after sense id has completed.
258  */
259 void
ccw_device_sense_id_done(struct ccw_device * cdev,int err)260 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
261 {
262 	switch (err) {
263 	case 0:
264 		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
265 		break;
266 	case -ETIME:		/* Sense id stopped by timeout. */
267 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
268 		break;
269 	default:
270 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
271 		break;
272 	}
273 }
274 
275 /**
276   * ccw_device_notify() - inform the device's driver about an event
277   * @cdev: device for which an event occurred
278   * @event: event that occurred
279   *
280   * Returns:
281   *   -%EINVAL if the device is offline or has no driver.
282   *   -%EOPNOTSUPP if the device's driver has no notifier registered.
283   *   %NOTIFY_OK if the driver wants to keep the device.
284   *   %NOTIFY_BAD if the driver doesn't want to keep the device.
285   */
ccw_device_notify(struct ccw_device * cdev,int event)286 int ccw_device_notify(struct ccw_device *cdev, int event)
287 {
288 	int ret = -EINVAL;
289 
290 	if (!cdev->drv)
291 		goto out;
292 	if (!cdev->online)
293 		goto out;
294 	CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
295 		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
296 		      event);
297 	if (!cdev->drv->notify) {
298 		ret = -EOPNOTSUPP;
299 		goto out;
300 	}
301 	if (cdev->drv->notify(cdev, event))
302 		ret = NOTIFY_OK;
303 	else
304 		ret = NOTIFY_BAD;
305 out:
306 	return ret;
307 }
308 
ccw_device_oper_notify(struct ccw_device * cdev)309 static void ccw_device_oper_notify(struct ccw_device *cdev)
310 {
311 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
312 
313 	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
314 		/* Re-enable channel measurements, if needed. */
315 		ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
316 		/* Save indication for new paths. */
317 		cdev->private->path_new_mask = sch->vpm;
318 		return;
319 	}
320 	/* Driver doesn't want device back. */
321 	ccw_device_set_notoper(cdev);
322 	ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
323 }
324 
325 /*
326  * Finished with online/offline processing.
327  */
328 static void
ccw_device_done(struct ccw_device * cdev,int state)329 ccw_device_done(struct ccw_device *cdev, int state)
330 {
331 	struct subchannel *sch;
332 
333 	sch = to_subchannel(cdev->dev.parent);
334 
335 	ccw_device_set_timeout(cdev, 0);
336 
337 	if (state != DEV_STATE_ONLINE)
338 		cio_disable_subchannel(sch);
339 
340 	/* Reset device status. */
341 	memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
342 
343 	cdev->private->state = state;
344 
345 	switch (state) {
346 	case DEV_STATE_BOXED:
347 		CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
348 			      cdev->private->dev_id.devno, sch->schid.sch_no);
349 		if (cdev->online &&
350 		    ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
351 			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
352 		cdev->private->flags.donotify = 0;
353 		break;
354 	case DEV_STATE_NOT_OPER:
355 		CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
356 			      cdev->private->dev_id.devno, sch->schid.sch_no);
357 		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
358 			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
359 		else
360 			ccw_device_set_disconnected(cdev);
361 		cdev->private->flags.donotify = 0;
362 		break;
363 	case DEV_STATE_DISCONNECTED:
364 		CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
365 			      "%04x\n", cdev->private->dev_id.devno,
366 			      sch->schid.sch_no);
367 		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
368 			cdev->private->state = DEV_STATE_NOT_OPER;
369 			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
370 		} else
371 			ccw_device_set_disconnected(cdev);
372 		cdev->private->flags.donotify = 0;
373 		break;
374 	default:
375 		break;
376 	}
377 
378 	if (cdev->private->flags.donotify) {
379 		cdev->private->flags.donotify = 0;
380 		ccw_device_oper_notify(cdev);
381 	}
382 	wake_up(&cdev->private->wait_q);
383 }
384 
385 /*
386  * Start device recognition.
387  */
ccw_device_recognition(struct ccw_device * cdev)388 void ccw_device_recognition(struct ccw_device *cdev)
389 {
390 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
391 
392 	/*
393 	 * We used to start here with a sense pgid to find out whether a device
394 	 * is locked by someone else. Unfortunately, the sense pgid command
395 	 * code has other meanings on devices predating the path grouping
396 	 * algorithm, so we start with sense id and box the device after an
397 	 * timeout (or if sense pgid during path verification detects the device
398 	 * is locked, as may happen on newer devices).
399 	 */
400 	cdev->private->flags.recog_done = 0;
401 	cdev->private->state = DEV_STATE_SENSE_ID;
402 	if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch))) {
403 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
404 		return;
405 	}
406 	ccw_device_sense_id_start(cdev);
407 }
408 
409 /*
410  * Handle events for states that use the ccw request infrastructure.
411  */
ccw_device_request_event(struct ccw_device * cdev,enum dev_event e)412 static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
413 {
414 	switch (e) {
415 	case DEV_EVENT_NOTOPER:
416 		ccw_request_notoper(cdev);
417 		break;
418 	case DEV_EVENT_INTERRUPT:
419 		ccw_request_handler(cdev);
420 		break;
421 	case DEV_EVENT_TIMEOUT:
422 		ccw_request_timeout(cdev);
423 		break;
424 	default:
425 		break;
426 	}
427 }
428 
ccw_device_report_path_events(struct ccw_device * cdev)429 static void ccw_device_report_path_events(struct ccw_device *cdev)
430 {
431 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
432 	int path_event[8];
433 	int chp, mask;
434 
435 	for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
436 		path_event[chp] = PE_NONE;
437 		if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
438 			path_event[chp] |= PE_PATH_GONE;
439 		if (mask & cdev->private->path_new_mask & sch->vpm)
440 			path_event[chp] |= PE_PATH_AVAILABLE;
441 		if (mask & cdev->private->pgid_reset_mask & sch->vpm)
442 			path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
443 	}
444 	if (cdev->online && cdev->drv->path_event)
445 		cdev->drv->path_event(cdev, path_event);
446 }
447 
ccw_device_reset_path_events(struct ccw_device * cdev)448 static void ccw_device_reset_path_events(struct ccw_device *cdev)
449 {
450 	cdev->private->path_gone_mask = 0;
451 	cdev->private->path_new_mask = 0;
452 	cdev->private->pgid_reset_mask = 0;
453 }
454 
create_fake_irb(struct irb * irb,int type)455 static void create_fake_irb(struct irb *irb, int type)
456 {
457 	memset(irb, 0, sizeof(*irb));
458 	if (type == FAKE_CMD_IRB) {
459 		struct cmd_scsw *scsw = &irb->scsw.cmd;
460 		scsw->cc = 1;
461 		scsw->fctl = SCSW_FCTL_START_FUNC;
462 		scsw->actl = SCSW_ACTL_START_PEND;
463 		scsw->stctl = SCSW_STCTL_STATUS_PEND;
464 	} else if (type == FAKE_TM_IRB) {
465 		struct tm_scsw *scsw = &irb->scsw.tm;
466 		scsw->x = 1;
467 		scsw->cc = 1;
468 		scsw->fctl = SCSW_FCTL_START_FUNC;
469 		scsw->actl = SCSW_ACTL_START_PEND;
470 		scsw->stctl = SCSW_STCTL_STATUS_PEND;
471 	}
472 }
473 
ccw_device_handle_broken_paths(struct ccw_device * cdev)474 static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
475 {
476 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
477 	u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
478 
479 	if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
480 		ccw_device_schedule_recovery();
481 
482 	cdev->private->path_broken_mask = broken_paths;
483 }
484 
ccw_device_verify_done(struct ccw_device * cdev,int err)485 void ccw_device_verify_done(struct ccw_device *cdev, int err)
486 {
487 	struct subchannel *sch;
488 
489 	sch = to_subchannel(cdev->dev.parent);
490 	/* Update schib - pom may have changed. */
491 	if (cio_update_schib(sch)) {
492 		err = -ENODEV;
493 		goto callback;
494 	}
495 	/* Update lpm with verified path mask. */
496 	sch->lpm = sch->vpm;
497 	/* Repeat path verification? */
498 	if (cdev->private->flags.doverify) {
499 		ccw_device_verify_start(cdev);
500 		return;
501 	}
502 callback:
503 	switch (err) {
504 	case 0:
505 		ccw_device_done(cdev, DEV_STATE_ONLINE);
506 		/* Deliver fake irb to device driver, if needed. */
507 		if (cdev->private->flags.fake_irb) {
508 			CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
509 				      cdev->private->dev_id.ssid,
510 				      cdev->private->dev_id.devno,
511 				      cdev->private->intparm,
512 				      cdev->private->flags.fake_irb);
513 			create_fake_irb(&cdev->private->dma_area->irb,
514 					cdev->private->flags.fake_irb);
515 			cdev->private->flags.fake_irb = 0;
516 			if (cdev->handler)
517 				cdev->handler(cdev, cdev->private->intparm,
518 					      &cdev->private->dma_area->irb);
519 			memset(&cdev->private->dma_area->irb, 0,
520 			       sizeof(struct irb));
521 		}
522 		ccw_device_report_path_events(cdev);
523 		ccw_device_handle_broken_paths(cdev);
524 		break;
525 	case -ETIME:
526 	case -EUSERS:
527 		/* Reset oper notify indication after verify error. */
528 		cdev->private->flags.donotify = 0;
529 		ccw_device_done(cdev, DEV_STATE_BOXED);
530 		break;
531 	case -EACCES:
532 		/* Reset oper notify indication after verify error. */
533 		cdev->private->flags.donotify = 0;
534 		ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
535 		break;
536 	default:
537 		/* Reset oper notify indication after verify error. */
538 		cdev->private->flags.donotify = 0;
539 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
540 		break;
541 	}
542 	ccw_device_reset_path_events(cdev);
543 }
544 
545 /*
546  * Get device online.
547  */
548 int
ccw_device_online(struct ccw_device * cdev)549 ccw_device_online(struct ccw_device *cdev)
550 {
551 	struct subchannel *sch;
552 	int ret;
553 
554 	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
555 	    (cdev->private->state != DEV_STATE_BOXED))
556 		return -EINVAL;
557 	sch = to_subchannel(cdev->dev.parent);
558 	ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
559 	if (ret != 0) {
560 		/* Couldn't enable the subchannel for i/o. Sick device. */
561 		if (ret == -ENODEV)
562 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
563 		return ret;
564 	}
565 	/* Start initial path verification. */
566 	cdev->private->state = DEV_STATE_VERIFY;
567 	ccw_device_verify_start(cdev);
568 	return 0;
569 }
570 
571 void
ccw_device_disband_done(struct ccw_device * cdev,int err)572 ccw_device_disband_done(struct ccw_device *cdev, int err)
573 {
574 	switch (err) {
575 	case 0:
576 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
577 		break;
578 	case -ETIME:
579 		ccw_device_done(cdev, DEV_STATE_BOXED);
580 		break;
581 	default:
582 		cdev->private->flags.donotify = 0;
583 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
584 		break;
585 	}
586 }
587 
588 /*
589  * Shutdown device.
590  */
591 int
ccw_device_offline(struct ccw_device * cdev)592 ccw_device_offline(struct ccw_device *cdev)
593 {
594 	struct subchannel *sch;
595 
596 	/* Allow ccw_device_offline while disconnected. */
597 	if (cdev->private->state == DEV_STATE_DISCONNECTED ||
598 	    cdev->private->state == DEV_STATE_NOT_OPER) {
599 		cdev->private->flags.donotify = 0;
600 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
601 		return 0;
602 	}
603 	if (cdev->private->state == DEV_STATE_BOXED) {
604 		ccw_device_done(cdev, DEV_STATE_BOXED);
605 		return 0;
606 	}
607 	if (ccw_device_is_orphan(cdev)) {
608 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
609 		return 0;
610 	}
611 	sch = to_subchannel(cdev->dev.parent);
612 	if (cio_update_schib(sch))
613 		return -ENODEV;
614 	if (scsw_actl(&sch->schib.scsw) != 0)
615 		return -EBUSY;
616 	if (cdev->private->state != DEV_STATE_ONLINE)
617 		return -EINVAL;
618 	/* Are we doing path grouping? */
619 	if (!cdev->private->flags.pgroup) {
620 		/* No, set state offline immediately. */
621 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
622 		return 0;
623 	}
624 	/* Start Set Path Group commands. */
625 	cdev->private->state = DEV_STATE_DISBAND_PGID;
626 	ccw_device_disband_start(cdev);
627 	return 0;
628 }
629 
630 /*
631  * Handle not operational event in non-special state.
632  */
ccw_device_generic_notoper(struct ccw_device * cdev,enum dev_event dev_event)633 static void ccw_device_generic_notoper(struct ccw_device *cdev,
634 				       enum dev_event dev_event)
635 {
636 	if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
637 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
638 	else
639 		ccw_device_set_disconnected(cdev);
640 }
641 
642 /*
643  * Handle path verification event in offline state.
644  */
ccw_device_offline_verify(struct ccw_device * cdev,enum dev_event dev_event)645 static void ccw_device_offline_verify(struct ccw_device *cdev,
646 				      enum dev_event dev_event)
647 {
648 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
649 
650 	css_schedule_eval(sch->schid);
651 }
652 
653 /*
654  * Handle path verification event.
655  */
656 static void
ccw_device_online_verify(struct ccw_device * cdev,enum dev_event dev_event)657 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
658 {
659 	struct subchannel *sch;
660 
661 	if (cdev->private->state == DEV_STATE_W4SENSE) {
662 		cdev->private->flags.doverify = 1;
663 		return;
664 	}
665 	sch = to_subchannel(cdev->dev.parent);
666 	/*
667 	 * Since we might not just be coming from an interrupt from the
668 	 * subchannel we have to update the schib.
669 	 */
670 	if (cio_update_schib(sch)) {
671 		ccw_device_verify_done(cdev, -ENODEV);
672 		return;
673 	}
674 
675 	if (scsw_actl(&sch->schib.scsw) != 0 ||
676 	    (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
677 	    (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
678 	     SCSW_STCTL_STATUS_PEND)) {
679 		/*
680 		 * No final status yet or final status not yet delivered
681 		 * to the device driver. Can't do path verification now,
682 		 * delay until final status was delivered.
683 		 */
684 		cdev->private->flags.doverify = 1;
685 		return;
686 	}
687 	/* Device is idle, we can do the path verification. */
688 	cdev->private->state = DEV_STATE_VERIFY;
689 	ccw_device_verify_start(cdev);
690 }
691 
692 /*
693  * Handle path verification event in boxed state.
694  */
ccw_device_boxed_verify(struct ccw_device * cdev,enum dev_event dev_event)695 static void ccw_device_boxed_verify(struct ccw_device *cdev,
696 				    enum dev_event dev_event)
697 {
698 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
699 
700 	if (cdev->online) {
701 		if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)))
702 			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
703 		else
704 			ccw_device_online_verify(cdev, dev_event);
705 	} else
706 		css_schedule_eval(sch->schid);
707 }
708 
709 /*
710  * Pass interrupt to device driver.
711  */
ccw_device_call_handler(struct ccw_device * cdev)712 static int ccw_device_call_handler(struct ccw_device *cdev)
713 {
714 	unsigned int stctl;
715 	int ending_status;
716 
717 	/*
718 	 * we allow for the device action handler if .
719 	 *  - we received ending status
720 	 *  - the action handler requested to see all interrupts
721 	 *  - we received an intermediate status
722 	 *  - fast notification was requested (primary status)
723 	 *  - unsolicited interrupts
724 	 */
725 	stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
726 	ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
727 		(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
728 		(stctl == SCSW_STCTL_STATUS_PEND);
729 	if (!ending_status &&
730 	    !cdev->private->options.repall &&
731 	    !(stctl & SCSW_STCTL_INTER_STATUS) &&
732 	    !(cdev->private->options.fast &&
733 	      (stctl & SCSW_STCTL_PRIM_STATUS)))
734 		return 0;
735 
736 	if (ending_status)
737 		ccw_device_set_timeout(cdev, 0);
738 
739 	if (cdev->handler)
740 		cdev->handler(cdev, cdev->private->intparm,
741 			      &cdev->private->dma_area->irb);
742 
743 	memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
744 	return 1;
745 }
746 
747 /*
748  * Got an interrupt for a normal io (state online).
749  */
750 static void
ccw_device_irq(struct ccw_device * cdev,enum dev_event dev_event)751 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
752 {
753 	struct irb *irb;
754 	int is_cmd;
755 
756 	irb = this_cpu_ptr(&cio_irb);
757 	is_cmd = !scsw_is_tm(&irb->scsw);
758 	/* Check for unsolicited interrupt. */
759 	if (!scsw_is_solicited(&irb->scsw)) {
760 		if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
761 		    !irb->esw.esw0.erw.cons) {
762 			/* Unit check but no sense data. Need basic sense. */
763 			if (ccw_device_do_sense(cdev, irb) != 0)
764 				goto call_handler_unsol;
765 			memcpy(&cdev->private->dma_area->irb, irb,
766 			       sizeof(struct irb));
767 			cdev->private->state = DEV_STATE_W4SENSE;
768 			cdev->private->intparm = 0;
769 			return;
770 		}
771 call_handler_unsol:
772 		if (cdev->handler)
773 			cdev->handler (cdev, 0, irb);
774 		if (cdev->private->flags.doverify)
775 			ccw_device_online_verify(cdev, 0);
776 		return;
777 	}
778 	/* Accumulate status and find out if a basic sense is needed. */
779 	ccw_device_accumulate_irb(cdev, irb);
780 	if (is_cmd && cdev->private->flags.dosense) {
781 		if (ccw_device_do_sense(cdev, irb) == 0) {
782 			cdev->private->state = DEV_STATE_W4SENSE;
783 		}
784 		return;
785 	}
786 	/* Call the handler. */
787 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
788 		/* Start delayed path verification. */
789 		ccw_device_online_verify(cdev, 0);
790 }
791 
792 /*
793  * Got an timeout in online state.
794  */
795 static void
ccw_device_online_timeout(struct ccw_device * cdev,enum dev_event dev_event)796 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
797 {
798 	int ret;
799 
800 	ccw_device_set_timeout(cdev, 0);
801 	cdev->private->iretry = 255;
802 	cdev->private->async_kill_io_rc = -ETIMEDOUT;
803 	ret = ccw_device_cancel_halt_clear(cdev);
804 	if (ret == -EBUSY) {
805 		ccw_device_set_timeout(cdev, 3*HZ);
806 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
807 		return;
808 	}
809 	if (ret)
810 		dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
811 	else if (cdev->handler)
812 		cdev->handler(cdev, cdev->private->intparm,
813 			      ERR_PTR(-ETIMEDOUT));
814 }
815 
816 /*
817  * Got an interrupt for a basic sense.
818  */
819 static void
ccw_device_w4sense(struct ccw_device * cdev,enum dev_event dev_event)820 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
821 {
822 	struct irb *irb;
823 
824 	irb = this_cpu_ptr(&cio_irb);
825 	/* Check for unsolicited interrupt. */
826 	if (scsw_stctl(&irb->scsw) ==
827 	    (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
828 		if (scsw_cc(&irb->scsw) == 1)
829 			/* Basic sense hasn't started. Try again. */
830 			ccw_device_do_sense(cdev, irb);
831 		else {
832 			CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
833 				      "interrupt during w4sense...\n",
834 				      cdev->private->dev_id.ssid,
835 				      cdev->private->dev_id.devno);
836 			if (cdev->handler)
837 				cdev->handler (cdev, 0, irb);
838 		}
839 		return;
840 	}
841 	/*
842 	 * Check if a halt or clear has been issued in the meanwhile. If yes,
843 	 * only deliver the halt/clear interrupt to the device driver as if it
844 	 * had killed the original request.
845 	 */
846 	if (scsw_fctl(&irb->scsw) &
847 	    (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
848 		cdev->private->flags.dosense = 0;
849 		memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
850 		ccw_device_accumulate_irb(cdev, irb);
851 		goto call_handler;
852 	}
853 	/* Add basic sense info to irb. */
854 	ccw_device_accumulate_basic_sense(cdev, irb);
855 	if (cdev->private->flags.dosense) {
856 		/* Another basic sense is needed. */
857 		ccw_device_do_sense(cdev, irb);
858 		return;
859 	}
860 call_handler:
861 	cdev->private->state = DEV_STATE_ONLINE;
862 	/* In case sensing interfered with setting the device online */
863 	wake_up(&cdev->private->wait_q);
864 	/* Call the handler. */
865 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
866 		/* Start delayed path verification. */
867 		ccw_device_online_verify(cdev, 0);
868 }
869 
870 static void
ccw_device_killing_irq(struct ccw_device * cdev,enum dev_event dev_event)871 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
872 {
873 	ccw_device_set_timeout(cdev, 0);
874 	/* Start delayed path verification. */
875 	ccw_device_online_verify(cdev, 0);
876 	/* OK, i/o is dead now. Call interrupt handler. */
877 	if (cdev->handler)
878 		cdev->handler(cdev, cdev->private->intparm,
879 			      ERR_PTR(cdev->private->async_kill_io_rc));
880 }
881 
882 static void
ccw_device_killing_timeout(struct ccw_device * cdev,enum dev_event dev_event)883 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
884 {
885 	int ret;
886 
887 	ret = ccw_device_cancel_halt_clear(cdev);
888 	if (ret == -EBUSY) {
889 		ccw_device_set_timeout(cdev, 3*HZ);
890 		return;
891 	}
892 	/* Start delayed path verification. */
893 	ccw_device_online_verify(cdev, 0);
894 	if (cdev->handler)
895 		cdev->handler(cdev, cdev->private->intparm,
896 			      ERR_PTR(cdev->private->async_kill_io_rc));
897 }
898 
ccw_device_kill_io(struct ccw_device * cdev)899 void ccw_device_kill_io(struct ccw_device *cdev)
900 {
901 	int ret;
902 
903 	ccw_device_set_timeout(cdev, 0);
904 	cdev->private->iretry = 255;
905 	cdev->private->async_kill_io_rc = -EIO;
906 	ret = ccw_device_cancel_halt_clear(cdev);
907 	if (ret == -EBUSY) {
908 		ccw_device_set_timeout(cdev, 3*HZ);
909 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
910 		return;
911 	}
912 	/* Start delayed path verification. */
913 	ccw_device_online_verify(cdev, 0);
914 	if (cdev->handler)
915 		cdev->handler(cdev, cdev->private->intparm,
916 			      ERR_PTR(-EIO));
917 }
918 
919 static void
ccw_device_delay_verify(struct ccw_device * cdev,enum dev_event dev_event)920 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
921 {
922 	/* Start verification after current task finished. */
923 	cdev->private->flags.doverify = 1;
924 }
925 
926 static void
ccw_device_start_id(struct ccw_device * cdev,enum dev_event dev_event)927 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
928 {
929 	struct subchannel *sch;
930 
931 	sch = to_subchannel(cdev->dev.parent);
932 	if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)) != 0)
933 		/* Couldn't enable the subchannel for i/o. Sick device. */
934 		return;
935 	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
936 	ccw_device_sense_id_start(cdev);
937 }
938 
ccw_device_trigger_reprobe(struct ccw_device * cdev)939 void ccw_device_trigger_reprobe(struct ccw_device *cdev)
940 {
941 	struct subchannel *sch;
942 
943 	if (cdev->private->state != DEV_STATE_DISCONNECTED)
944 		return;
945 
946 	sch = to_subchannel(cdev->dev.parent);
947 	/* Update some values. */
948 	if (cio_update_schib(sch))
949 		return;
950 	/*
951 	 * The pim, pam, pom values may not be accurate, but they are the best
952 	 * we have before performing device selection :/
953 	 */
954 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
955 	/*
956 	 * Use the initial configuration since we can't be sure that the old
957 	 * paths are valid.
958 	 */
959 	io_subchannel_init_config(sch);
960 	if (cio_commit_config(sch))
961 		return;
962 
963 	/* We should also udate ssd info, but this has to wait. */
964 	/* Check if this is another device which appeared on the same sch. */
965 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
966 		css_schedule_eval(sch->schid);
967 	else
968 		ccw_device_start_id(cdev, 0);
969 }
970 
ccw_device_disabled_irq(struct ccw_device * cdev,enum dev_event dev_event)971 static void ccw_device_disabled_irq(struct ccw_device *cdev,
972 				    enum dev_event dev_event)
973 {
974 	struct subchannel *sch;
975 
976 	sch = to_subchannel(cdev->dev.parent);
977 	/*
978 	 * An interrupt in a disabled state means a previous disable was not
979 	 * successful - should not happen, but we try to disable again.
980 	 */
981 	cio_disable_subchannel(sch);
982 }
983 
984 static void
ccw_device_change_cmfstate(struct ccw_device * cdev,enum dev_event dev_event)985 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
986 {
987 	retry_set_schib(cdev);
988 	cdev->private->state = DEV_STATE_ONLINE;
989 	dev_fsm_event(cdev, dev_event);
990 }
991 
ccw_device_update_cmfblock(struct ccw_device * cdev,enum dev_event dev_event)992 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
993 				       enum dev_event dev_event)
994 {
995 	cmf_retry_copy_block(cdev);
996 	cdev->private->state = DEV_STATE_ONLINE;
997 	dev_fsm_event(cdev, dev_event);
998 }
999 
1000 static void
ccw_device_quiesce_done(struct ccw_device * cdev,enum dev_event dev_event)1001 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1002 {
1003 	ccw_device_set_timeout(cdev, 0);
1004 	cdev->private->state = DEV_STATE_NOT_OPER;
1005 	wake_up(&cdev->private->wait_q);
1006 }
1007 
1008 static void
ccw_device_quiesce_timeout(struct ccw_device * cdev,enum dev_event dev_event)1009 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1010 {
1011 	int ret;
1012 
1013 	ret = ccw_device_cancel_halt_clear(cdev);
1014 	if (ret == -EBUSY) {
1015 		ccw_device_set_timeout(cdev, HZ/10);
1016 	} else {
1017 		cdev->private->state = DEV_STATE_NOT_OPER;
1018 		wake_up(&cdev->private->wait_q);
1019 	}
1020 }
1021 
1022 /*
1023  * No operation action. This is used e.g. to ignore a timeout event in
1024  * state offline.
1025  */
1026 static void
ccw_device_nop(struct ccw_device * cdev,enum dev_event dev_event)1027 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1028 {
1029 }
1030 
1031 /*
1032  * device statemachine
1033  */
1034 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1035 	[DEV_STATE_NOT_OPER] = {
1036 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1037 		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
1038 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1039 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1040 	},
1041 	[DEV_STATE_SENSE_ID] = {
1042 		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1043 		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1044 		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1045 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1046 	},
1047 	[DEV_STATE_OFFLINE] = {
1048 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1049 		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
1050 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1051 		[DEV_EVENT_VERIFY]	= ccw_device_offline_verify,
1052 	},
1053 	[DEV_STATE_VERIFY] = {
1054 		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1055 		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1056 		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1057 		[DEV_EVENT_VERIFY]	= ccw_device_delay_verify,
1058 	},
1059 	[DEV_STATE_ONLINE] = {
1060 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1061 		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1062 		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1063 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1064 	},
1065 	[DEV_STATE_W4SENSE] = {
1066 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1067 		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1068 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1069 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1070 	},
1071 	[DEV_STATE_DISBAND_PGID] = {
1072 		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1073 		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1074 		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1075 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1076 	},
1077 	[DEV_STATE_BOXED] = {
1078 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1079 		[DEV_EVENT_INTERRUPT]	= ccw_device_nop,
1080 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1081 		[DEV_EVENT_VERIFY]	= ccw_device_boxed_verify,
1082 	},
1083 	/* states to wait for i/o completion before doing something */
1084 	[DEV_STATE_TIMEOUT_KILL] = {
1085 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1086 		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1087 		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1088 		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1089 	},
1090 	[DEV_STATE_QUIESCE] = {
1091 		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1092 		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1093 		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1094 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1095 	},
1096 	/* special states for devices gone not operational */
1097 	[DEV_STATE_DISCONNECTED] = {
1098 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1099 		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1100 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1101 		[DEV_EVENT_VERIFY]	= ccw_device_start_id,
1102 	},
1103 	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1104 		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1105 		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1106 		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1107 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1108 	},
1109 	[DEV_STATE_CMFCHANGE] = {
1110 		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1111 		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1112 		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1113 		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1114 	},
1115 	[DEV_STATE_CMFUPDATE] = {
1116 		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
1117 		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
1118 		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
1119 		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
1120 	},
1121 	[DEV_STATE_STEAL_LOCK] = {
1122 		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
1123 		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
1124 		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
1125 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1126 	},
1127 };
1128 
1129 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1130