xref: /linux/drivers/s390/cio/device.c (revision 9208c05f9fdfd927ea160b97dfef3c379049fff2)
1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3  *  bus driver for ccw devices
4  *
5  *    Copyright IBM Corp. 2002, 2008
6  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/device.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/timer.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/sched/signal.h>
27 #include <linux/dma-mapping.h>
28 
29 #include <asm/ccwdev.h>
30 #include <asm/cio.h>
31 #include <asm/param.h>		/* HZ */
32 #include <asm/cmb.h>
33 #include <asm/isc.h>
34 
35 #include "chp.h"
36 #include "cio.h"
37 #include "cio_debug.h"
38 #include "css.h"
39 #include "device.h"
40 #include "ioasm.h"
41 #include "io_sch.h"
42 #include "blacklist.h"
43 #include "chsc.h"
44 
45 static struct timer_list recovery_timer;
46 static DEFINE_SPINLOCK(recovery_lock);
47 static int recovery_phase;
48 static const unsigned long recovery_delay[] = { 3, 30, 300 };
49 
50 static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
51 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
52 static const struct bus_type ccw_bus_type;
53 
54 /******************* bus type handling ***********************/
55 
56 /* The Linux driver model distinguishes between a bus type and
57  * the bus itself. Of course we only have one channel
58  * subsystem driver and one channel system per machine, but
59  * we still use the abstraction. T.R. says it's a good idea. */
60 static int
61 ccw_bus_match (struct device * dev, const struct device_driver * drv)
62 {
63 	struct ccw_device *cdev = to_ccwdev(dev);
64 	const struct ccw_driver *cdrv = to_ccwdrv(drv);
65 	const struct ccw_device_id *ids = cdrv->ids, *found;
66 
67 	if (!ids)
68 		return 0;
69 
70 	found = ccw_device_id_match(ids, &cdev->id);
71 	if (!found)
72 		return 0;
73 
74 	cdev->id.driver_info = found->driver_info;
75 
76 	return 1;
77 }
78 
79 /* Store modalias string delimited by prefix/suffix string into buffer with
80  * specified size. Return length of resulting string (excluding trailing '\0')
81  * even if string doesn't fit buffer (snprintf semantics). */
82 static int snprint_alias(char *buf, size_t size,
83 			 const struct ccw_device_id *id, const char *suffix)
84 {
85 	int len;
86 
87 	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
88 	if (len > size)
89 		return len;
90 	buf += len;
91 	size -= len;
92 
93 	if (id->dev_type != 0)
94 		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
95 				id->dev_model, suffix);
96 	else
97 		len += snprintf(buf, size, "dtdm%s", suffix);
98 
99 	return len;
100 }
101 
102 /* Set up environment variables for ccw device uevent. Return 0 on success,
103  * non-zero otherwise. */
104 static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env)
105 {
106 	const struct ccw_device *cdev = to_ccwdev(dev);
107 	const struct ccw_device_id *id = &(cdev->id);
108 	int ret;
109 	char modalias_buf[30];
110 
111 	/* CU_TYPE= */
112 	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
113 	if (ret)
114 		return ret;
115 
116 	/* CU_MODEL= */
117 	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
118 	if (ret)
119 		return ret;
120 
121 	/* The next two can be zero, that's ok for us */
122 	/* DEV_TYPE= */
123 	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
124 	if (ret)
125 		return ret;
126 
127 	/* DEV_MODEL= */
128 	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
129 	if (ret)
130 		return ret;
131 
132 	/* MODALIAS=  */
133 	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
134 	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
135 	return ret;
136 }
137 
138 static void io_subchannel_irq(struct subchannel *);
139 static int io_subchannel_probe(struct subchannel *);
140 static void io_subchannel_remove(struct subchannel *);
141 static void io_subchannel_shutdown(struct subchannel *);
142 static int io_subchannel_sch_event(struct subchannel *, int);
143 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
144 				   int);
145 static void recovery_func(struct timer_list *unused);
146 
147 static struct css_device_id io_subchannel_ids[] = {
148 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
149 	{ /* end of list */ },
150 };
151 
152 static int io_subchannel_settle(void)
153 {
154 	int ret;
155 
156 	ret = wait_event_interruptible(ccw_device_init_wq,
157 				atomic_read(&ccw_device_init_count) == 0);
158 	if (ret)
159 		return -EINTR;
160 	flush_workqueue(cio_work_q);
161 	return 0;
162 }
163 
164 static struct css_driver io_subchannel_driver = {
165 	.drv = {
166 		.owner = THIS_MODULE,
167 		.name = "io_subchannel",
168 	},
169 	.subchannel_type = io_subchannel_ids,
170 	.irq = io_subchannel_irq,
171 	.sch_event = io_subchannel_sch_event,
172 	.chp_event = io_subchannel_chp_event,
173 	.probe = io_subchannel_probe,
174 	.remove = io_subchannel_remove,
175 	.shutdown = io_subchannel_shutdown,
176 	.settle = io_subchannel_settle,
177 };
178 
179 int __init io_subchannel_init(void)
180 {
181 	int ret;
182 
183 	timer_setup(&recovery_timer, recovery_func, 0);
184 	ret = bus_register(&ccw_bus_type);
185 	if (ret)
186 		return ret;
187 	ret = css_driver_register(&io_subchannel_driver);
188 	if (ret)
189 		bus_unregister(&ccw_bus_type);
190 
191 	return ret;
192 }
193 
194 
195 /************************ device handling **************************/
196 
197 static ssize_t
198 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
199 {
200 	struct ccw_device *cdev = to_ccwdev(dev);
201 	struct ccw_device_id *id = &(cdev->id);
202 
203 	if (id->dev_type != 0)
204 		return sysfs_emit(buf, "%04x/%02x\n", id->dev_type, id->dev_model);
205 	else
206 		return sysfs_emit(buf, "n/a\n");
207 }
208 
209 static ssize_t
210 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
211 {
212 	struct ccw_device *cdev = to_ccwdev(dev);
213 	struct ccw_device_id *id = &(cdev->id);
214 
215 	return sysfs_emit(buf, "%04x/%02x\n", id->cu_type, id->cu_model);
216 }
217 
218 static ssize_t
219 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
220 {
221 	struct ccw_device *cdev = to_ccwdev(dev);
222 	struct ccw_device_id *id = &(cdev->id);
223 	int len;
224 
225 	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
226 
227 	return len > PAGE_SIZE ? PAGE_SIZE : len;
228 }
229 
230 static ssize_t
231 online_show (struct device *dev, struct device_attribute *attr, char *buf)
232 {
233 	struct ccw_device *cdev = to_ccwdev(dev);
234 
235 	return sysfs_emit(buf, cdev->online ? "1\n" : "0\n");
236 }
237 
238 int ccw_device_is_orphan(struct ccw_device *cdev)
239 {
240 	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
241 }
242 
243 static void ccw_device_unregister(struct ccw_device *cdev)
244 {
245 	mutex_lock(&cdev->reg_mutex);
246 	if (device_is_registered(&cdev->dev)) {
247 		/* Undo device_add(). */
248 		device_del(&cdev->dev);
249 	}
250 	mutex_unlock(&cdev->reg_mutex);
251 
252 	if (cdev->private->flags.initialized) {
253 		cdev->private->flags.initialized = 0;
254 		/* Release reference from device_initialize(). */
255 		put_device(&cdev->dev);
256 	}
257 }
258 
259 static void io_subchannel_quiesce(struct subchannel *);
260 
261 /**
262  * ccw_device_set_offline() - disable a ccw device for I/O
263  * @cdev: target ccw device
264  *
265  * This function calls the driver's set_offline() function for @cdev, if
266  * given, and then disables @cdev.
267  * Returns:
268  *   %0 on success and a negative error value on failure.
269  * Context:
270  *  enabled, ccw device lock not held
271  */
272 int ccw_device_set_offline(struct ccw_device *cdev)
273 {
274 	struct subchannel *sch;
275 	int ret, state;
276 
277 	if (!cdev)
278 		return -ENODEV;
279 	if (!cdev->online || !cdev->drv)
280 		return -EINVAL;
281 
282 	if (cdev->drv->set_offline) {
283 		ret = cdev->drv->set_offline(cdev);
284 		if (ret != 0)
285 			return ret;
286 	}
287 	spin_lock_irq(cdev->ccwlock);
288 	sch = to_subchannel(cdev->dev.parent);
289 	cdev->online = 0;
290 	/* Wait until a final state or DISCONNECTED is reached */
291 	while (!dev_fsm_final_state(cdev) &&
292 	       cdev->private->state != DEV_STATE_DISCONNECTED) {
293 		spin_unlock_irq(cdev->ccwlock);
294 		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
295 			   cdev->private->state == DEV_STATE_DISCONNECTED));
296 		spin_lock_irq(cdev->ccwlock);
297 	}
298 	do {
299 		ret = ccw_device_offline(cdev);
300 		if (!ret)
301 			break;
302 		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
303 			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
304 			      cdev->private->dev_id.devno);
305 		if (ret != -EBUSY)
306 			goto error;
307 		state = cdev->private->state;
308 		spin_unlock_irq(cdev->ccwlock);
309 		io_subchannel_quiesce(sch);
310 		spin_lock_irq(cdev->ccwlock);
311 		cdev->private->state = state;
312 	} while (ret == -EBUSY);
313 	spin_unlock_irq(cdev->ccwlock);
314 	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
315 		   cdev->private->state == DEV_STATE_DISCONNECTED));
316 	/* Inform the user if set offline failed. */
317 	if (cdev->private->state == DEV_STATE_BOXED) {
318 		pr_warn("%s: The device entered boxed state while being set offline\n",
319 			dev_name(&cdev->dev));
320 	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
321 		pr_warn("%s: The device stopped operating while being set offline\n",
322 			dev_name(&cdev->dev));
323 	}
324 	/* Give up reference from ccw_device_set_online(). */
325 	put_device(&cdev->dev);
326 	return 0;
327 
328 error:
329 	cdev->private->state = DEV_STATE_OFFLINE;
330 	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
331 	spin_unlock_irq(cdev->ccwlock);
332 	/* Give up reference from ccw_device_set_online(). */
333 	put_device(&cdev->dev);
334 	return -ENODEV;
335 }
336 
337 /**
338  * ccw_device_set_online() - enable a ccw device for I/O
339  * @cdev: target ccw device
340  *
341  * This function first enables @cdev and then calls the driver's set_online()
342  * function for @cdev, if given. If set_online() returns an error, @cdev is
343  * disabled again.
344  * Returns:
345  *   %0 on success and a negative error value on failure.
346  * Context:
347  *  enabled, ccw device lock not held
348  */
349 int ccw_device_set_online(struct ccw_device *cdev)
350 {
351 	int ret;
352 	int ret2;
353 
354 	if (!cdev)
355 		return -ENODEV;
356 	if (cdev->online || !cdev->drv)
357 		return -EINVAL;
358 	/* Hold on to an extra reference while device is online. */
359 	if (!get_device(&cdev->dev))
360 		return -ENODEV;
361 
362 	spin_lock_irq(cdev->ccwlock);
363 	ret = ccw_device_online(cdev);
364 	if (ret) {
365 		spin_unlock_irq(cdev->ccwlock);
366 		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
367 			      "device 0.%x.%04x\n",
368 			      ret, cdev->private->dev_id.ssid,
369 			      cdev->private->dev_id.devno);
370 		/* Give up online reference since onlining failed. */
371 		put_device(&cdev->dev);
372 		return ret;
373 	}
374 	/* Wait until a final state is reached */
375 	while (!dev_fsm_final_state(cdev)) {
376 		spin_unlock_irq(cdev->ccwlock);
377 		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
378 		spin_lock_irq(cdev->ccwlock);
379 	}
380 	/* Check if online processing was successful */
381 	if ((cdev->private->state != DEV_STATE_ONLINE) &&
382 	    (cdev->private->state != DEV_STATE_W4SENSE)) {
383 		spin_unlock_irq(cdev->ccwlock);
384 		/* Inform the user that set online failed. */
385 		if (cdev->private->state == DEV_STATE_BOXED) {
386 			pr_warn("%s: Setting the device online failed because it is boxed\n",
387 				dev_name(&cdev->dev));
388 		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
389 			pr_warn("%s: Setting the device online failed because it is not operational\n",
390 				dev_name(&cdev->dev));
391 		}
392 		/* Give up online reference since onlining failed. */
393 		put_device(&cdev->dev);
394 		return -ENODEV;
395 	}
396 	spin_unlock_irq(cdev->ccwlock);
397 	if (cdev->drv->set_online)
398 		ret = cdev->drv->set_online(cdev);
399 	if (ret)
400 		goto rollback;
401 
402 	spin_lock_irq(cdev->ccwlock);
403 	cdev->online = 1;
404 	spin_unlock_irq(cdev->ccwlock);
405 	return 0;
406 
407 rollback:
408 	spin_lock_irq(cdev->ccwlock);
409 	/* Wait until a final state or DISCONNECTED is reached */
410 	while (!dev_fsm_final_state(cdev) &&
411 	       cdev->private->state != DEV_STATE_DISCONNECTED) {
412 		spin_unlock_irq(cdev->ccwlock);
413 		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
414 			   cdev->private->state == DEV_STATE_DISCONNECTED));
415 		spin_lock_irq(cdev->ccwlock);
416 	}
417 	ret2 = ccw_device_offline(cdev);
418 	if (ret2)
419 		goto error;
420 	spin_unlock_irq(cdev->ccwlock);
421 	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
422 		   cdev->private->state == DEV_STATE_DISCONNECTED));
423 	/* Give up online reference since onlining failed. */
424 	put_device(&cdev->dev);
425 	return ret;
426 
427 error:
428 	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
429 		      "device 0.%x.%04x\n",
430 		      ret2, cdev->private->dev_id.ssid,
431 		      cdev->private->dev_id.devno);
432 	cdev->private->state = DEV_STATE_OFFLINE;
433 	spin_unlock_irq(cdev->ccwlock);
434 	/* Give up online reference since onlining failed. */
435 	put_device(&cdev->dev);
436 	return ret;
437 }
438 
439 static int online_store_handle_offline(struct ccw_device *cdev)
440 {
441 	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
442 		spin_lock_irq(cdev->ccwlock);
443 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
444 		spin_unlock_irq(cdev->ccwlock);
445 		return 0;
446 	}
447 	if (cdev->drv && cdev->drv->set_offline)
448 		return ccw_device_set_offline(cdev);
449 	return -EINVAL;
450 }
451 
452 static int online_store_recog_and_online(struct ccw_device *cdev)
453 {
454 	/* Do device recognition, if needed. */
455 	if (cdev->private->state == DEV_STATE_BOXED) {
456 		spin_lock_irq(cdev->ccwlock);
457 		ccw_device_recognition(cdev);
458 		spin_unlock_irq(cdev->ccwlock);
459 		wait_event(cdev->private->wait_q,
460 			   cdev->private->flags.recog_done);
461 		if (cdev->private->state != DEV_STATE_OFFLINE)
462 			/* recognition failed */
463 			return -EAGAIN;
464 	}
465 	if (cdev->drv && cdev->drv->set_online)
466 		return ccw_device_set_online(cdev);
467 	return -EINVAL;
468 }
469 
470 static int online_store_handle_online(struct ccw_device *cdev, int force)
471 {
472 	int ret;
473 
474 	ret = online_store_recog_and_online(cdev);
475 	if (ret && !force)
476 		return ret;
477 	if (force && cdev->private->state == DEV_STATE_BOXED) {
478 		ret = ccw_device_stlck(cdev);
479 		if (ret)
480 			return ret;
481 		if (cdev->id.cu_type == 0)
482 			cdev->private->state = DEV_STATE_NOT_OPER;
483 		ret = online_store_recog_and_online(cdev);
484 		if (ret)
485 			return ret;
486 	}
487 	return 0;
488 }
489 
490 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
491 			     const char *buf, size_t count)
492 {
493 	struct ccw_device *cdev = to_ccwdev(dev);
494 	int force, ret;
495 	unsigned long i;
496 
497 	/* Prevent conflict between multiple on-/offline processing requests. */
498 	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
499 		return -EAGAIN;
500 	/* Prevent conflict between internal I/Os and on-/offline processing. */
501 	if (!dev_fsm_final_state(cdev) &&
502 	    cdev->private->state != DEV_STATE_DISCONNECTED) {
503 		ret = -EAGAIN;
504 		goto out;
505 	}
506 	/* Prevent conflict between pending work and on-/offline processing.*/
507 	if (work_pending(&cdev->private->todo_work)) {
508 		ret = -EAGAIN;
509 		goto out;
510 	}
511 	if (!strncmp(buf, "force\n", count)) {
512 		force = 1;
513 		i = 1;
514 		ret = 0;
515 	} else {
516 		force = 0;
517 		ret = kstrtoul(buf, 16, &i);
518 	}
519 	if (ret)
520 		goto out;
521 
522 	device_lock(dev);
523 	switch (i) {
524 	case 0:
525 		ret = online_store_handle_offline(cdev);
526 		break;
527 	case 1:
528 		ret = online_store_handle_online(cdev, force);
529 		break;
530 	default:
531 		ret = -EINVAL;
532 	}
533 	device_unlock(dev);
534 
535 out:
536 	atomic_set(&cdev->private->onoff, 0);
537 	return (ret < 0) ? ret : count;
538 }
539 
540 static ssize_t
541 available_show (struct device *dev, struct device_attribute *attr, char *buf)
542 {
543 	struct ccw_device *cdev = to_ccwdev(dev);
544 	struct subchannel *sch;
545 
546 	if (ccw_device_is_orphan(cdev))
547 		return sysfs_emit(buf, "no device\n");
548 	switch (cdev->private->state) {
549 	case DEV_STATE_BOXED:
550 		return sysfs_emit(buf, "boxed\n");
551 	case DEV_STATE_DISCONNECTED:
552 	case DEV_STATE_DISCONNECTED_SENSE_ID:
553 	case DEV_STATE_NOT_OPER:
554 		sch = to_subchannel(dev->parent);
555 		if (!sch->lpm)
556 			return sysfs_emit(buf, "no path\n");
557 		else
558 			return sysfs_emit(buf, "no device\n");
559 	default:
560 		/* All other states considered fine. */
561 		return sysfs_emit(buf, "good\n");
562 	}
563 }
564 
565 static ssize_t
566 initiate_logging(struct device *dev, struct device_attribute *attr,
567 		 const char *buf, size_t count)
568 {
569 	struct subchannel *sch = to_subchannel(dev);
570 	int rc;
571 
572 	rc = chsc_siosl(sch->schid);
573 	if (rc < 0) {
574 		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
575 			sch->schid.ssid, sch->schid.sch_no, rc);
576 		return rc;
577 	}
578 	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
579 		  sch->schid.ssid, sch->schid.sch_no);
580 	return count;
581 }
582 
583 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
584 			char *buf)
585 {
586 	struct subchannel *sch = to_subchannel(dev);
587 
588 	return sysfs_emit(buf, "%02x\n", sch->vpm);
589 }
590 
591 static DEVICE_ATTR_RO(devtype);
592 static DEVICE_ATTR_RO(cutype);
593 static DEVICE_ATTR_RO(modalias);
594 static DEVICE_ATTR_RW(online);
595 static DEVICE_ATTR(availability, 0444, available_show, NULL);
596 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
597 static DEVICE_ATTR_RO(vpm);
598 
599 static struct attribute *io_subchannel_attrs[] = {
600 	&dev_attr_logging.attr,
601 	&dev_attr_vpm.attr,
602 	NULL,
603 };
604 
605 static const struct attribute_group io_subchannel_attr_group = {
606 	.attrs = io_subchannel_attrs,
607 };
608 
609 static struct attribute * ccwdev_attrs[] = {
610 	&dev_attr_devtype.attr,
611 	&dev_attr_cutype.attr,
612 	&dev_attr_modalias.attr,
613 	&dev_attr_online.attr,
614 	&dev_attr_cmb_enable.attr,
615 	&dev_attr_availability.attr,
616 	NULL,
617 };
618 
619 static const struct attribute_group ccwdev_attr_group = {
620 	.attrs = ccwdev_attrs,
621 };
622 
623 static const struct attribute_group *ccwdev_attr_groups[] = {
624 	&ccwdev_attr_group,
625 	NULL,
626 };
627 
628 static int match_dev_id(struct device *dev, const void *data)
629 {
630 	struct ccw_device *cdev = to_ccwdev(dev);
631 	struct ccw_dev_id *dev_id = (void *)data;
632 
633 	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
634 }
635 
636 /**
637  * get_ccwdev_by_dev_id() - obtain device from a ccw device id
638  * @dev_id: id of the device to be searched
639  *
640  * This function searches all devices attached to the ccw bus for a device
641  * matching @dev_id.
642  * Returns:
643  *  If a device is found its reference count is increased and returned;
644  *  else %NULL is returned.
645  */
646 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
647 {
648 	struct device *dev;
649 
650 	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
651 
652 	return dev ? to_ccwdev(dev) : NULL;
653 }
654 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
655 
656 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
657 {
658 	int ret;
659 
660 	mutex_lock(&cdev->reg_mutex);
661 	if (device_is_registered(&cdev->dev)) {
662 		device_release_driver(&cdev->dev);
663 		ret = device_attach(&cdev->dev);
664 		WARN_ON(ret == -ENODEV);
665 	}
666 	mutex_unlock(&cdev->reg_mutex);
667 }
668 
669 static void
670 ccw_device_release(struct device *dev)
671 {
672 	struct ccw_device *cdev;
673 
674 	cdev = to_ccwdev(dev);
675 	cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
676 			sizeof(*cdev->private->dma_area));
677 	cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
678 	/* Release reference of parent subchannel. */
679 	put_device(cdev->dev.parent);
680 	kfree(cdev->private);
681 	kfree(cdev);
682 }
683 
684 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
685 {
686 	struct ccw_device *cdev;
687 	struct gen_pool *dma_pool;
688 	int ret;
689 
690 	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
691 	if (!cdev) {
692 		ret = -ENOMEM;
693 		goto err_cdev;
694 	}
695 	cdev->private = kzalloc(sizeof(struct ccw_device_private),
696 				GFP_KERNEL | GFP_DMA);
697 	if (!cdev->private) {
698 		ret = -ENOMEM;
699 		goto err_priv;
700 	}
701 
702 	cdev->dev.dma_mask = sch->dev.dma_mask;
703 	ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
704 	if (ret)
705 		goto err_coherent_mask;
706 
707 	dma_pool = cio_gp_dma_create(&cdev->dev, 1);
708 	if (!dma_pool) {
709 		ret = -ENOMEM;
710 		goto err_dma_pool;
711 	}
712 	cdev->private->dma_pool = dma_pool;
713 	cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
714 					sizeof(*cdev->private->dma_area));
715 	if (!cdev->private->dma_area) {
716 		ret = -ENOMEM;
717 		goto err_dma_area;
718 	}
719 	return cdev;
720 err_dma_area:
721 	cio_gp_dma_destroy(dma_pool, &cdev->dev);
722 err_dma_pool:
723 err_coherent_mask:
724 	kfree(cdev->private);
725 err_priv:
726 	kfree(cdev);
727 err_cdev:
728 	return ERR_PTR(ret);
729 }
730 
731 static void ccw_device_todo(struct work_struct *work);
732 
733 static int io_subchannel_initialize_dev(struct subchannel *sch,
734 					struct ccw_device *cdev)
735 {
736 	struct ccw_device_private *priv = cdev->private;
737 	int ret;
738 
739 	priv->cdev = cdev;
740 	priv->int_class = IRQIO_CIO;
741 	priv->state = DEV_STATE_NOT_OPER;
742 	priv->dev_id.devno = sch->schib.pmcw.dev;
743 	priv->dev_id.ssid = sch->schid.ssid;
744 
745 	INIT_WORK(&priv->todo_work, ccw_device_todo);
746 	INIT_LIST_HEAD(&priv->cmb_list);
747 	init_waitqueue_head(&priv->wait_q);
748 	timer_setup(&priv->timer, ccw_device_timeout, 0);
749 	mutex_init(&cdev->reg_mutex);
750 
751 	atomic_set(&priv->onoff, 0);
752 	cdev->ccwlock = &sch->lock;
753 	cdev->dev.parent = &sch->dev;
754 	cdev->dev.release = ccw_device_release;
755 	cdev->dev.bus = &ccw_bus_type;
756 	cdev->dev.groups = ccwdev_attr_groups;
757 	/* Do first half of device_register. */
758 	device_initialize(&cdev->dev);
759 	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
760 			   cdev->private->dev_id.devno);
761 	if (ret)
762 		goto out_put;
763 	if (!get_device(&sch->dev)) {
764 		ret = -ENODEV;
765 		goto out_put;
766 	}
767 	priv->flags.initialized = 1;
768 	spin_lock_irq(&sch->lock);
769 	sch_set_cdev(sch, cdev);
770 	spin_unlock_irq(&sch->lock);
771 	return 0;
772 
773 out_put:
774 	/* Release reference from device_initialize(). */
775 	put_device(&cdev->dev);
776 	return ret;
777 }
778 
779 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
780 {
781 	struct ccw_device *cdev;
782 	int ret;
783 
784 	cdev = io_subchannel_allocate_dev(sch);
785 	if (!IS_ERR(cdev)) {
786 		ret = io_subchannel_initialize_dev(sch, cdev);
787 		if (ret)
788 			cdev = ERR_PTR(ret);
789 	}
790 	return cdev;
791 }
792 
793 static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
794 
795 static void sch_create_and_recog_new_device(struct subchannel *sch)
796 {
797 	struct ccw_device *cdev;
798 
799 	/* Need to allocate a new ccw device. */
800 	cdev = io_subchannel_create_ccwdev(sch);
801 	if (IS_ERR(cdev)) {
802 		/* OK, we did everything we could... */
803 		css_sch_device_unregister(sch);
804 		return;
805 	}
806 	/* Start recognition for the new ccw device. */
807 	io_subchannel_recog(cdev, sch);
808 }
809 
810 /*
811  * Register recognized device.
812  */
813 static void io_subchannel_register(struct ccw_device *cdev)
814 {
815 	struct subchannel *sch;
816 	int ret, adjust_init_count = 1;
817 	unsigned long flags;
818 
819 	sch = to_subchannel(cdev->dev.parent);
820 	/*
821 	 * Check if subchannel is still registered. It may have become
822 	 * unregistered if a machine check hit us after finishing
823 	 * device recognition but before the register work could be
824 	 * queued.
825 	 */
826 	if (!device_is_registered(&sch->dev))
827 		goto out_err;
828 	css_update_ssd_info(sch);
829 	/*
830 	 * io_subchannel_register() will also be called after device
831 	 * recognition has been done for a boxed device (which will already
832 	 * be registered). We need to reprobe since we may now have sense id
833 	 * information.
834 	 */
835 	mutex_lock(&cdev->reg_mutex);
836 	if (device_is_registered(&cdev->dev)) {
837 		if (!cdev->drv) {
838 			ret = device_reprobe(&cdev->dev);
839 			if (ret)
840 				/* We can't do much here. */
841 				CIO_MSG_EVENT(0, "device_reprobe() returned"
842 					      " %d for 0.%x.%04x\n", ret,
843 					      cdev->private->dev_id.ssid,
844 					      cdev->private->dev_id.devno);
845 		}
846 		adjust_init_count = 0;
847 		goto out;
848 	}
849 	/* make it known to the system */
850 	ret = device_add(&cdev->dev);
851 	if (ret) {
852 		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
853 			      cdev->private->dev_id.ssid,
854 			      cdev->private->dev_id.devno, ret);
855 		spin_lock_irqsave(&sch->lock, flags);
856 		sch_set_cdev(sch, NULL);
857 		spin_unlock_irqrestore(&sch->lock, flags);
858 		mutex_unlock(&cdev->reg_mutex);
859 		/* Release initial device reference. */
860 		put_device(&cdev->dev);
861 		goto out_err;
862 	}
863 out:
864 	cdev->private->flags.recog_done = 1;
865 	mutex_unlock(&cdev->reg_mutex);
866 	wake_up(&cdev->private->wait_q);
867 out_err:
868 	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
869 		wake_up(&ccw_device_init_wq);
870 }
871 
872 /*
873  * subchannel recognition done. Called from the state machine.
874  */
875 void
876 io_subchannel_recog_done(struct ccw_device *cdev)
877 {
878 	if (css_init_done == 0) {
879 		cdev->private->flags.recog_done = 1;
880 		return;
881 	}
882 	switch (cdev->private->state) {
883 	case DEV_STATE_BOXED:
884 		/* Device did not respond in time. */
885 	case DEV_STATE_NOT_OPER:
886 		cdev->private->flags.recog_done = 1;
887 		/* Remove device found not operational. */
888 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
889 		if (atomic_dec_and_test(&ccw_device_init_count))
890 			wake_up(&ccw_device_init_wq);
891 		break;
892 	case DEV_STATE_OFFLINE:
893 		/*
894 		 * We can't register the device in interrupt context so
895 		 * we schedule a work item.
896 		 */
897 		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
898 		break;
899 	}
900 }
901 
902 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
903 {
904 	/* Increase counter of devices currently in recognition. */
905 	atomic_inc(&ccw_device_init_count);
906 
907 	/* Start async. device sensing. */
908 	spin_lock_irq(&sch->lock);
909 	ccw_device_recognition(cdev);
910 	spin_unlock_irq(&sch->lock);
911 }
912 
913 static int ccw_device_move_to_sch(struct ccw_device *cdev,
914 				  struct subchannel *sch)
915 {
916 	struct subchannel *old_sch;
917 	int rc, old_enabled = 0;
918 
919 	old_sch = to_subchannel(cdev->dev.parent);
920 	/* Obtain child reference for new parent. */
921 	if (!get_device(&sch->dev))
922 		return -ENODEV;
923 
924 	if (!sch_is_pseudo_sch(old_sch)) {
925 		spin_lock_irq(&old_sch->lock);
926 		old_enabled = old_sch->schib.pmcw.ena;
927 		rc = 0;
928 		if (old_enabled)
929 			rc = cio_disable_subchannel(old_sch);
930 		spin_unlock_irq(&old_sch->lock);
931 		if (rc == -EBUSY) {
932 			/* Release child reference for new parent. */
933 			put_device(&sch->dev);
934 			return rc;
935 		}
936 	}
937 
938 	mutex_lock(&sch->reg_mutex);
939 	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
940 	mutex_unlock(&sch->reg_mutex);
941 	if (rc) {
942 		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
943 			      cdev->private->dev_id.ssid,
944 			      cdev->private->dev_id.devno, sch->schid.ssid,
945 			      sch->schib.pmcw.dev, rc);
946 		if (old_enabled) {
947 			/* Try to re-enable the old subchannel. */
948 			spin_lock_irq(&old_sch->lock);
949 			cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
950 			spin_unlock_irq(&old_sch->lock);
951 		}
952 		/* Release child reference for new parent. */
953 		put_device(&sch->dev);
954 		return rc;
955 	}
956 	/* Clean up old subchannel. */
957 	if (!sch_is_pseudo_sch(old_sch)) {
958 		spin_lock_irq(&old_sch->lock);
959 		sch_set_cdev(old_sch, NULL);
960 		spin_unlock_irq(&old_sch->lock);
961 		css_schedule_eval(old_sch->schid);
962 	}
963 	/* Release child reference for old parent. */
964 	put_device(&old_sch->dev);
965 	/* Initialize new subchannel. */
966 	spin_lock_irq(&sch->lock);
967 	cdev->ccwlock = &sch->lock;
968 	if (!sch_is_pseudo_sch(sch))
969 		sch_set_cdev(sch, cdev);
970 	spin_unlock_irq(&sch->lock);
971 	if (!sch_is_pseudo_sch(sch))
972 		css_update_ssd_info(sch);
973 	return 0;
974 }
975 
976 static int ccw_device_move_to_orph(struct ccw_device *cdev)
977 {
978 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
979 	struct channel_subsystem *css = to_css(sch->dev.parent);
980 
981 	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
982 }
983 
984 static void io_subchannel_irq(struct subchannel *sch)
985 {
986 	struct ccw_device *cdev;
987 
988 	cdev = sch_get_cdev(sch);
989 
990 	CIO_TRACE_EVENT(6, "IRQ");
991 	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
992 	if (cdev)
993 		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
994 	else
995 		inc_irq_stat(IRQIO_CIO);
996 }
997 
998 void io_subchannel_init_config(struct subchannel *sch)
999 {
1000 	memset(&sch->config, 0, sizeof(sch->config));
1001 	sch->config.csense = 1;
1002 }
1003 
1004 static void io_subchannel_init_fields(struct subchannel *sch)
1005 {
1006 	if (cio_is_console(sch->schid))
1007 		sch->opm = 0xff;
1008 	else
1009 		sch->opm = chp_get_sch_opm(sch);
1010 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1011 	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1012 
1013 	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1014 		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1015 		      sch->schib.pmcw.dev, sch->schid.ssid,
1016 		      sch->schid.sch_no, sch->schib.pmcw.pim,
1017 		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1018 
1019 	io_subchannel_init_config(sch);
1020 }
1021 
1022 /*
1023  * Note: We always return 0 so that we bind to the device even on error.
1024  * This is needed so that our remove function is called on unregister.
1025  */
1026 static int io_subchannel_probe(struct subchannel *sch)
1027 {
1028 	struct io_subchannel_private *io_priv;
1029 	struct ccw_device *cdev;
1030 	int rc;
1031 
1032 	if (cio_is_console(sch->schid)) {
1033 		rc = sysfs_create_group(&sch->dev.kobj,
1034 					&io_subchannel_attr_group);
1035 		if (rc)
1036 			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1037 				      "attributes for subchannel "
1038 				      "0.%x.%04x (rc=%d)\n",
1039 				      sch->schid.ssid, sch->schid.sch_no, rc);
1040 		/*
1041 		* The console subchannel already has an associated ccw_device.
1042 		* Register it and exit.
1043 		*/
1044 		cdev = sch_get_cdev(sch);
1045 		rc = device_add(&cdev->dev);
1046 		if (rc) {
1047 			/* Release online reference. */
1048 			put_device(&cdev->dev);
1049 			goto out_schedule;
1050 		}
1051 		if (atomic_dec_and_test(&ccw_device_init_count))
1052 			wake_up(&ccw_device_init_wq);
1053 		return 0;
1054 	}
1055 	io_subchannel_init_fields(sch);
1056 	rc = cio_commit_config(sch);
1057 	if (rc)
1058 		goto out_schedule;
1059 	rc = sysfs_create_group(&sch->dev.kobj,
1060 				&io_subchannel_attr_group);
1061 	if (rc)
1062 		goto out_schedule;
1063 	/* Allocate I/O subchannel private data. */
1064 	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1065 	if (!io_priv)
1066 		goto out_schedule;
1067 
1068 	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1069 				sizeof(*io_priv->dma_area),
1070 				&io_priv->dma_area_dma, GFP_KERNEL);
1071 	if (!io_priv->dma_area) {
1072 		kfree(io_priv);
1073 		goto out_schedule;
1074 	}
1075 
1076 	set_io_private(sch, io_priv);
1077 	css_schedule_eval(sch->schid);
1078 	return 0;
1079 
1080 out_schedule:
1081 	spin_lock_irq(&sch->lock);
1082 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1083 	spin_unlock_irq(&sch->lock);
1084 	return 0;
1085 }
1086 
1087 static void io_subchannel_remove(struct subchannel *sch)
1088 {
1089 	struct io_subchannel_private *io_priv = to_io_private(sch);
1090 	struct ccw_device *cdev;
1091 
1092 	cdev = sch_get_cdev(sch);
1093 	if (!cdev)
1094 		goto out_free;
1095 
1096 	ccw_device_unregister(cdev);
1097 	spin_lock_irq(&sch->lock);
1098 	sch_set_cdev(sch, NULL);
1099 	set_io_private(sch, NULL);
1100 	spin_unlock_irq(&sch->lock);
1101 out_free:
1102 	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1103 			  io_priv->dma_area, io_priv->dma_area_dma);
1104 	kfree(io_priv);
1105 	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1106 }
1107 
1108 static void io_subchannel_verify(struct subchannel *sch)
1109 {
1110 	struct ccw_device *cdev;
1111 
1112 	cdev = sch_get_cdev(sch);
1113 	if (cdev)
1114 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1115 	else
1116 		css_schedule_eval(sch->schid);
1117 }
1118 
1119 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1120 {
1121 	struct ccw_device *cdev;
1122 
1123 	cdev = sch_get_cdev(sch);
1124 	if (!cdev)
1125 		return;
1126 	if (cio_update_schib(sch))
1127 		goto err;
1128 	/* Check for I/O on path. */
1129 	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1130 		goto out;
1131 	if (cdev->private->state == DEV_STATE_ONLINE) {
1132 		ccw_device_kill_io(cdev);
1133 		goto out;
1134 	}
1135 	if (cio_clear(sch))
1136 		goto err;
1137 out:
1138 	/* Trigger path verification. */
1139 	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1140 	return;
1141 
1142 err:
1143 	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1144 }
1145 
1146 static int io_subchannel_chp_event(struct subchannel *sch,
1147 				   struct chp_link *link, int event)
1148 {
1149 	struct ccw_device *cdev = sch_get_cdev(sch);
1150 	int mask, chpid, valid_bit;
1151 	int path_event[8];
1152 
1153 	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1154 	if (!mask)
1155 		return 0;
1156 	switch (event) {
1157 	case CHP_VARY_OFF:
1158 		sch->opm &= ~mask;
1159 		sch->lpm &= ~mask;
1160 		if (cdev)
1161 			cdev->private->path_gone_mask |= mask;
1162 		io_subchannel_terminate_path(sch, mask);
1163 		break;
1164 	case CHP_VARY_ON:
1165 		sch->opm |= mask;
1166 		sch->lpm |= mask;
1167 		if (cdev)
1168 			cdev->private->path_new_mask |= mask;
1169 		io_subchannel_verify(sch);
1170 		break;
1171 	case CHP_OFFLINE:
1172 		if (cio_update_schib(sch))
1173 			return -ENODEV;
1174 		if (cdev)
1175 			cdev->private->path_gone_mask |= mask;
1176 		io_subchannel_terminate_path(sch, mask);
1177 		break;
1178 	case CHP_ONLINE:
1179 		if (cio_update_schib(sch))
1180 			return -ENODEV;
1181 		sch->lpm |= mask & sch->opm;
1182 		if (cdev)
1183 			cdev->private->path_new_mask |= mask;
1184 		io_subchannel_verify(sch);
1185 		break;
1186 	case CHP_FCES_EVENT:
1187 		/* Forward Endpoint Security event */
1188 		for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
1189 				valid_bit >>= 1) {
1190 			if (mask & valid_bit)
1191 				path_event[chpid] = PE_PATH_FCES_EVENT;
1192 			else
1193 				path_event[chpid] = PE_NONE;
1194 		}
1195 		if (cdev && cdev->drv && cdev->drv->path_event)
1196 			cdev->drv->path_event(cdev, path_event);
1197 		break;
1198 	}
1199 	return 0;
1200 }
1201 
1202 static void io_subchannel_quiesce(struct subchannel *sch)
1203 {
1204 	struct ccw_device *cdev;
1205 	int ret;
1206 
1207 	spin_lock_irq(&sch->lock);
1208 	cdev = sch_get_cdev(sch);
1209 	if (cio_is_console(sch->schid))
1210 		goto out_unlock;
1211 	if (!sch->schib.pmcw.ena)
1212 		goto out_unlock;
1213 	ret = cio_disable_subchannel(sch);
1214 	if (ret != -EBUSY)
1215 		goto out_unlock;
1216 	if (cdev->handler)
1217 		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1218 	while (ret == -EBUSY) {
1219 		cdev->private->state = DEV_STATE_QUIESCE;
1220 		cdev->private->iretry = 255;
1221 		ret = ccw_device_cancel_halt_clear(cdev);
1222 		if (ret == -EBUSY) {
1223 			ccw_device_set_timeout(cdev, HZ/10);
1224 			spin_unlock_irq(&sch->lock);
1225 			wait_event(cdev->private->wait_q,
1226 				   cdev->private->state != DEV_STATE_QUIESCE);
1227 			spin_lock_irq(&sch->lock);
1228 		}
1229 		ret = cio_disable_subchannel(sch);
1230 	}
1231 out_unlock:
1232 	spin_unlock_irq(&sch->lock);
1233 }
1234 
1235 static void io_subchannel_shutdown(struct subchannel *sch)
1236 {
1237 	io_subchannel_quiesce(sch);
1238 }
1239 
1240 static int device_is_disconnected(struct ccw_device *cdev)
1241 {
1242 	if (!cdev)
1243 		return 0;
1244 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1245 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1246 }
1247 
1248 static int recovery_check(struct device *dev, void *data)
1249 {
1250 	struct ccw_device *cdev = to_ccwdev(dev);
1251 	struct subchannel *sch;
1252 	int *redo = data;
1253 
1254 	spin_lock_irq(cdev->ccwlock);
1255 	switch (cdev->private->state) {
1256 	case DEV_STATE_ONLINE:
1257 		sch = to_subchannel(cdev->dev.parent);
1258 		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1259 			break;
1260 		fallthrough;
1261 	case DEV_STATE_DISCONNECTED:
1262 		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1263 			      cdev->private->dev_id.ssid,
1264 			      cdev->private->dev_id.devno);
1265 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1266 		*redo = 1;
1267 		break;
1268 	case DEV_STATE_DISCONNECTED_SENSE_ID:
1269 		*redo = 1;
1270 		break;
1271 	}
1272 	spin_unlock_irq(cdev->ccwlock);
1273 
1274 	return 0;
1275 }
1276 
1277 static void recovery_work_func(struct work_struct *unused)
1278 {
1279 	int redo = 0;
1280 
1281 	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1282 	if (redo) {
1283 		spin_lock_irq(&recovery_lock);
1284 		if (!timer_pending(&recovery_timer)) {
1285 			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1286 				recovery_phase++;
1287 			mod_timer(&recovery_timer, jiffies +
1288 				  recovery_delay[recovery_phase] * HZ);
1289 		}
1290 		spin_unlock_irq(&recovery_lock);
1291 	} else
1292 		CIO_MSG_EVENT(3, "recovery: end\n");
1293 }
1294 
1295 static DECLARE_WORK(recovery_work, recovery_work_func);
1296 
1297 static void recovery_func(struct timer_list *unused)
1298 {
1299 	/*
1300 	 * We can't do our recovery in softirq context and it's not
1301 	 * performance critical, so we schedule it.
1302 	 */
1303 	schedule_work(&recovery_work);
1304 }
1305 
1306 void ccw_device_schedule_recovery(void)
1307 {
1308 	unsigned long flags;
1309 
1310 	CIO_MSG_EVENT(3, "recovery: schedule\n");
1311 	spin_lock_irqsave(&recovery_lock, flags);
1312 	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1313 		recovery_phase = 0;
1314 		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1315 	}
1316 	spin_unlock_irqrestore(&recovery_lock, flags);
1317 }
1318 
1319 static int purge_fn(struct device *dev, void *data)
1320 {
1321 	struct ccw_device *cdev = to_ccwdev(dev);
1322 	struct ccw_dev_id *id = &cdev->private->dev_id;
1323 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1324 
1325 	spin_lock_irq(cdev->ccwlock);
1326 	if (is_blacklisted(id->ssid, id->devno) &&
1327 	    (cdev->private->state == DEV_STATE_OFFLINE) &&
1328 	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1329 		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1330 			      id->devno);
1331 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1332 		css_sched_sch_todo(sch, SCH_TODO_UNREG);
1333 		atomic_set(&cdev->private->onoff, 0);
1334 	}
1335 	spin_unlock_irq(cdev->ccwlock);
1336 	/* Abort loop in case of pending signal. */
1337 	if (signal_pending(current))
1338 		return -EINTR;
1339 
1340 	return 0;
1341 }
1342 
1343 /**
1344  * ccw_purge_blacklisted - purge unused, blacklisted devices
1345  *
1346  * Unregister all ccw devices that are offline and on the blacklist.
1347  */
1348 int ccw_purge_blacklisted(void)
1349 {
1350 	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1351 	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1352 	return 0;
1353 }
1354 
1355 void ccw_device_set_disconnected(struct ccw_device *cdev)
1356 {
1357 	if (!cdev)
1358 		return;
1359 	ccw_device_set_timeout(cdev, 0);
1360 	cdev->private->flags.fake_irb = 0;
1361 	cdev->private->state = DEV_STATE_DISCONNECTED;
1362 	if (cdev->online)
1363 		ccw_device_schedule_recovery();
1364 }
1365 
1366 void ccw_device_set_notoper(struct ccw_device *cdev)
1367 {
1368 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1369 
1370 	CIO_TRACE_EVENT(2, "notoper");
1371 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1372 	ccw_device_set_timeout(cdev, 0);
1373 	cio_disable_subchannel(sch);
1374 	cdev->private->state = DEV_STATE_NOT_OPER;
1375 }
1376 
1377 enum io_sch_action {
1378 	IO_SCH_UNREG,
1379 	IO_SCH_ORPH_UNREG,
1380 	IO_SCH_UNREG_CDEV,
1381 	IO_SCH_ATTACH,
1382 	IO_SCH_UNREG_ATTACH,
1383 	IO_SCH_ORPH_ATTACH,
1384 	IO_SCH_REPROBE,
1385 	IO_SCH_VERIFY,
1386 	IO_SCH_DISC,
1387 	IO_SCH_NOP,
1388 	IO_SCH_ORPH_CDEV,
1389 };
1390 
1391 static enum io_sch_action sch_get_action(struct subchannel *sch)
1392 {
1393 	struct ccw_device *cdev;
1394 	int rc;
1395 
1396 	cdev = sch_get_cdev(sch);
1397 	rc = cio_update_schib(sch);
1398 
1399 	if (rc == -ENODEV) {
1400 		/* Not operational. */
1401 		if (!cdev)
1402 			return IO_SCH_UNREG;
1403 		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1404 			return IO_SCH_UNREG;
1405 		return IO_SCH_ORPH_UNREG;
1406 	}
1407 
1408 	/* Avoid unregistering subchannels without working device. */
1409 	if (rc == -EACCES) {
1410 		if (!cdev)
1411 			return IO_SCH_NOP;
1412 		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1413 			return IO_SCH_UNREG_CDEV;
1414 		return IO_SCH_ORPH_CDEV;
1415 	}
1416 
1417 	/* Operational. */
1418 	if (!cdev)
1419 		return IO_SCH_ATTACH;
1420 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1421 		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1422 			return IO_SCH_UNREG_ATTACH;
1423 		return IO_SCH_ORPH_ATTACH;
1424 	}
1425 	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1426 		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1427 			return IO_SCH_UNREG_CDEV;
1428 		return IO_SCH_DISC;
1429 	}
1430 	if (device_is_disconnected(cdev))
1431 		return IO_SCH_REPROBE;
1432 	if (cdev->online)
1433 		return IO_SCH_VERIFY;
1434 	if (cdev->private->state == DEV_STATE_NOT_OPER)
1435 		return IO_SCH_UNREG_ATTACH;
1436 	return IO_SCH_NOP;
1437 }
1438 
1439 /**
1440  * io_subchannel_sch_event - process subchannel event
1441  * @sch: subchannel
1442  * @process: non-zero if function is called in process context
1443  *
1444  * An unspecified event occurred for this subchannel. Adjust data according
1445  * to the current operational state of the subchannel and device. Return
1446  * zero when the event has been handled sufficiently or -EAGAIN when this
1447  * function should be called again in process context.
1448  */
1449 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1450 {
1451 	unsigned long flags;
1452 	struct ccw_device *cdev;
1453 	struct ccw_dev_id dev_id;
1454 	enum io_sch_action action;
1455 	int rc = -EAGAIN;
1456 
1457 	spin_lock_irqsave(&sch->lock, flags);
1458 	if (!device_is_registered(&sch->dev))
1459 		goto out_unlock;
1460 	if (work_pending(&sch->todo_work))
1461 		goto out_unlock;
1462 	cdev = sch_get_cdev(sch);
1463 	if (cdev && work_pending(&cdev->private->todo_work))
1464 		goto out_unlock;
1465 	action = sch_get_action(sch);
1466 	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1467 		      sch->schid.ssid, sch->schid.sch_no, process,
1468 		      action);
1469 	/* Perform immediate actions while holding the lock. */
1470 	switch (action) {
1471 	case IO_SCH_REPROBE:
1472 		/* Trigger device recognition. */
1473 		ccw_device_trigger_reprobe(cdev);
1474 		rc = 0;
1475 		goto out_unlock;
1476 	case IO_SCH_VERIFY:
1477 		/* Trigger path verification. */
1478 		io_subchannel_verify(sch);
1479 		rc = 0;
1480 		goto out_unlock;
1481 	case IO_SCH_DISC:
1482 		ccw_device_set_disconnected(cdev);
1483 		rc = 0;
1484 		goto out_unlock;
1485 	case IO_SCH_ORPH_UNREG:
1486 	case IO_SCH_ORPH_CDEV:
1487 	case IO_SCH_ORPH_ATTACH:
1488 		ccw_device_set_disconnected(cdev);
1489 		break;
1490 	case IO_SCH_UNREG_CDEV:
1491 	case IO_SCH_UNREG_ATTACH:
1492 	case IO_SCH_UNREG:
1493 		if (!cdev)
1494 			break;
1495 		if (cdev->private->state == DEV_STATE_SENSE_ID) {
1496 			/*
1497 			 * Note: delayed work triggered by this event
1498 			 * and repeated calls to sch_event are synchronized
1499 			 * by the above check for work_pending(cdev).
1500 			 */
1501 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1502 		} else
1503 			ccw_device_set_notoper(cdev);
1504 		break;
1505 	case IO_SCH_NOP:
1506 		rc = 0;
1507 		goto out_unlock;
1508 	default:
1509 		break;
1510 	}
1511 	spin_unlock_irqrestore(&sch->lock, flags);
1512 	/* All other actions require process context. */
1513 	if (!process)
1514 		goto out;
1515 	/* Handle attached ccw device. */
1516 	switch (action) {
1517 	case IO_SCH_ORPH_UNREG:
1518 	case IO_SCH_ORPH_CDEV:
1519 	case IO_SCH_ORPH_ATTACH:
1520 		/* Move ccw device to orphanage. */
1521 		rc = ccw_device_move_to_orph(cdev);
1522 		if (rc)
1523 			goto out;
1524 		break;
1525 	case IO_SCH_UNREG_CDEV:
1526 	case IO_SCH_UNREG_ATTACH:
1527 		spin_lock_irqsave(&sch->lock, flags);
1528 		sch_set_cdev(sch, NULL);
1529 		spin_unlock_irqrestore(&sch->lock, flags);
1530 		/* Unregister ccw device. */
1531 		ccw_device_unregister(cdev);
1532 		break;
1533 	default:
1534 		break;
1535 	}
1536 	/* Handle subchannel. */
1537 	switch (action) {
1538 	case IO_SCH_ORPH_UNREG:
1539 	case IO_SCH_UNREG:
1540 		css_sch_device_unregister(sch);
1541 		break;
1542 	case IO_SCH_ORPH_ATTACH:
1543 	case IO_SCH_UNREG_ATTACH:
1544 	case IO_SCH_ATTACH:
1545 		dev_id.ssid = sch->schid.ssid;
1546 		dev_id.devno = sch->schib.pmcw.dev;
1547 		cdev = get_ccwdev_by_dev_id(&dev_id);
1548 		if (!cdev) {
1549 			sch_create_and_recog_new_device(sch);
1550 			break;
1551 		}
1552 		rc = ccw_device_move_to_sch(cdev, sch);
1553 		if (rc) {
1554 			/* Release reference from get_ccwdev_by_dev_id() */
1555 			put_device(&cdev->dev);
1556 			goto out;
1557 		}
1558 		spin_lock_irqsave(&sch->lock, flags);
1559 		ccw_device_trigger_reprobe(cdev);
1560 		spin_unlock_irqrestore(&sch->lock, flags);
1561 		/* Release reference from get_ccwdev_by_dev_id() */
1562 		put_device(&cdev->dev);
1563 		break;
1564 	default:
1565 		break;
1566 	}
1567 	return 0;
1568 
1569 out_unlock:
1570 	spin_unlock_irqrestore(&sch->lock, flags);
1571 out:
1572 	return rc;
1573 }
1574 
1575 static void ccw_device_set_int_class(struct ccw_device *cdev)
1576 {
1577 	struct ccw_driver *cdrv = cdev->drv;
1578 
1579 	/* Note: we interpret class 0 in this context as an uninitialized
1580 	 * field since it translates to a non-I/O interrupt class. */
1581 	if (cdrv->int_class != 0)
1582 		cdev->private->int_class = cdrv->int_class;
1583 	else
1584 		cdev->private->int_class = IRQIO_CIO;
1585 }
1586 
1587 #ifdef CONFIG_CCW_CONSOLE
1588 int __init ccw_device_enable_console(struct ccw_device *cdev)
1589 {
1590 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1591 	int rc;
1592 
1593 	if (!cdev->drv || !cdev->handler)
1594 		return -EINVAL;
1595 
1596 	io_subchannel_init_fields(sch);
1597 	rc = cio_commit_config(sch);
1598 	if (rc)
1599 		return rc;
1600 	sch->driver = &io_subchannel_driver;
1601 	io_subchannel_recog(cdev, sch);
1602 	/* Now wait for the async. recognition to come to an end. */
1603 	spin_lock_irq(cdev->ccwlock);
1604 	while (!dev_fsm_final_state(cdev))
1605 		ccw_device_wait_idle(cdev);
1606 
1607 	/* Hold on to an extra reference while device is online. */
1608 	get_device(&cdev->dev);
1609 	rc = ccw_device_online(cdev);
1610 	if (rc)
1611 		goto out_unlock;
1612 
1613 	while (!dev_fsm_final_state(cdev))
1614 		ccw_device_wait_idle(cdev);
1615 
1616 	if (cdev->private->state == DEV_STATE_ONLINE)
1617 		cdev->online = 1;
1618 	else
1619 		rc = -EIO;
1620 out_unlock:
1621 	spin_unlock_irq(cdev->ccwlock);
1622 	if (rc) /* Give up online reference since onlining failed. */
1623 		put_device(&cdev->dev);
1624 	return rc;
1625 }
1626 
1627 struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1628 {
1629 	struct io_subchannel_private *io_priv;
1630 	struct ccw_device *cdev;
1631 	struct subchannel *sch;
1632 
1633 	sch = cio_probe_console();
1634 	if (IS_ERR(sch))
1635 		return ERR_CAST(sch);
1636 
1637 	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1638 	if (!io_priv)
1639 		goto err_priv;
1640 	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1641 				sizeof(*io_priv->dma_area),
1642 				&io_priv->dma_area_dma, GFP_KERNEL);
1643 	if (!io_priv->dma_area)
1644 		goto err_dma_area;
1645 	set_io_private(sch, io_priv);
1646 	cdev = io_subchannel_create_ccwdev(sch);
1647 	if (IS_ERR(cdev)) {
1648 		dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1649 				  io_priv->dma_area, io_priv->dma_area_dma);
1650 		set_io_private(sch, NULL);
1651 		put_device(&sch->dev);
1652 		kfree(io_priv);
1653 		return cdev;
1654 	}
1655 	cdev->drv = drv;
1656 	ccw_device_set_int_class(cdev);
1657 	return cdev;
1658 
1659 err_dma_area:
1660 	kfree(io_priv);
1661 err_priv:
1662 	put_device(&sch->dev);
1663 	return ERR_PTR(-ENOMEM);
1664 }
1665 
1666 void __init ccw_device_destroy_console(struct ccw_device *cdev)
1667 {
1668 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1669 	struct io_subchannel_private *io_priv = to_io_private(sch);
1670 
1671 	set_io_private(sch, NULL);
1672 	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1673 			  io_priv->dma_area, io_priv->dma_area_dma);
1674 	put_device(&sch->dev);
1675 	put_device(&cdev->dev);
1676 	kfree(io_priv);
1677 }
1678 
1679 /**
1680  * ccw_device_wait_idle() - busy wait for device to become idle
1681  * @cdev: ccw device
1682  *
1683  * Poll until activity control is zero, that is, no function or data
1684  * transfer is pending/active.
1685  * Called with device lock being held.
1686  */
1687 void ccw_device_wait_idle(struct ccw_device *cdev)
1688 {
1689 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1690 
1691 	while (1) {
1692 		cio_tsch(sch);
1693 		if (sch->schib.scsw.cmd.actl == 0)
1694 			break;
1695 		udelay(100);
1696 	}
1697 }
1698 #endif
1699 
1700 /**
1701  * get_ccwdev_by_busid() - obtain device from a bus id
1702  * @cdrv: driver the device is owned by
1703  * @bus_id: bus id of the device to be searched
1704  *
1705  * This function searches all devices owned by @cdrv for a device with a bus
1706  * id matching @bus_id.
1707  * Returns:
1708  *  If a match is found, its reference count of the found device is increased
1709  *  and it is returned; else %NULL is returned.
1710  */
1711 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1712 				       const char *bus_id)
1713 {
1714 	struct device *dev;
1715 
1716 	dev = driver_find_device_by_name(&cdrv->driver, bus_id);
1717 
1718 	return dev ? to_ccwdev(dev) : NULL;
1719 }
1720 
1721 /************************** device driver handling ************************/
1722 
1723 /* This is the implementation of the ccw_driver class. The probe, remove
1724  * and release methods are initially very similar to the device_driver
1725  * implementations, with the difference that they have ccw_device
1726  * arguments.
1727  *
1728  * A ccw driver also contains the information that is needed for
1729  * device matching.
1730  */
1731 static int
1732 ccw_device_probe (struct device *dev)
1733 {
1734 	struct ccw_device *cdev = to_ccwdev(dev);
1735 	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1736 	int ret;
1737 
1738 	cdev->drv = cdrv; /* to let the driver call _set_online */
1739 	ccw_device_set_int_class(cdev);
1740 	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1741 	if (ret) {
1742 		cdev->drv = NULL;
1743 		cdev->private->int_class = IRQIO_CIO;
1744 		return ret;
1745 	}
1746 
1747 	return 0;
1748 }
1749 
1750 static void ccw_device_remove(struct device *dev)
1751 {
1752 	struct ccw_device *cdev = to_ccwdev(dev);
1753 	struct ccw_driver *cdrv = cdev->drv;
1754 	struct subchannel *sch;
1755 	int ret;
1756 
1757 	if (cdrv->remove)
1758 		cdrv->remove(cdev);
1759 
1760 	spin_lock_irq(cdev->ccwlock);
1761 	if (cdev->online) {
1762 		cdev->online = 0;
1763 		ret = ccw_device_offline(cdev);
1764 		spin_unlock_irq(cdev->ccwlock);
1765 		if (ret == 0)
1766 			wait_event(cdev->private->wait_q,
1767 				   dev_fsm_final_state(cdev));
1768 		else
1769 			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1770 				      "device 0.%x.%04x\n",
1771 				      ret, cdev->private->dev_id.ssid,
1772 				      cdev->private->dev_id.devno);
1773 		/* Give up reference obtained in ccw_device_set_online(). */
1774 		put_device(&cdev->dev);
1775 		spin_lock_irq(cdev->ccwlock);
1776 	}
1777 	ccw_device_set_timeout(cdev, 0);
1778 	cdev->drv = NULL;
1779 	cdev->private->int_class = IRQIO_CIO;
1780 	sch = to_subchannel(cdev->dev.parent);
1781 	spin_unlock_irq(cdev->ccwlock);
1782 	io_subchannel_quiesce(sch);
1783 	__disable_cmf(cdev);
1784 }
1785 
1786 static void ccw_device_shutdown(struct device *dev)
1787 {
1788 	struct ccw_device *cdev;
1789 
1790 	cdev = to_ccwdev(dev);
1791 	if (cdev->drv && cdev->drv->shutdown)
1792 		cdev->drv->shutdown(cdev);
1793 	__disable_cmf(cdev);
1794 }
1795 
1796 static const struct bus_type ccw_bus_type = {
1797 	.name   = "ccw",
1798 	.match  = ccw_bus_match,
1799 	.uevent = ccw_uevent,
1800 	.probe  = ccw_device_probe,
1801 	.remove = ccw_device_remove,
1802 	.shutdown = ccw_device_shutdown,
1803 };
1804 
1805 /**
1806  * ccw_driver_register() - register a ccw driver
1807  * @cdriver: driver to be registered
1808  *
1809  * This function is mainly a wrapper around driver_register().
1810  * Returns:
1811  *   %0 on success and a negative error value on failure.
1812  */
1813 int ccw_driver_register(struct ccw_driver *cdriver)
1814 {
1815 	struct device_driver *drv = &cdriver->driver;
1816 
1817 	drv->bus = &ccw_bus_type;
1818 
1819 	return driver_register(drv);
1820 }
1821 
1822 /**
1823  * ccw_driver_unregister() - deregister a ccw driver
1824  * @cdriver: driver to be deregistered
1825  *
1826  * This function is mainly a wrapper around driver_unregister().
1827  */
1828 void ccw_driver_unregister(struct ccw_driver *cdriver)
1829 {
1830 	driver_unregister(&cdriver->driver);
1831 }
1832 
1833 static void ccw_device_todo(struct work_struct *work)
1834 {
1835 	struct ccw_device_private *priv;
1836 	struct ccw_device *cdev;
1837 	struct subchannel *sch;
1838 	enum cdev_todo todo;
1839 
1840 	priv = container_of(work, struct ccw_device_private, todo_work);
1841 	cdev = priv->cdev;
1842 	sch = to_subchannel(cdev->dev.parent);
1843 	/* Find out todo. */
1844 	spin_lock_irq(cdev->ccwlock);
1845 	todo = priv->todo;
1846 	priv->todo = CDEV_TODO_NOTHING;
1847 	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1848 		      priv->dev_id.ssid, priv->dev_id.devno, todo);
1849 	spin_unlock_irq(cdev->ccwlock);
1850 	/* Perform todo. */
1851 	switch (todo) {
1852 	case CDEV_TODO_ENABLE_CMF:
1853 		cmf_reenable(cdev);
1854 		break;
1855 	case CDEV_TODO_REBIND:
1856 		ccw_device_do_unbind_bind(cdev);
1857 		break;
1858 	case CDEV_TODO_REGISTER:
1859 		io_subchannel_register(cdev);
1860 		break;
1861 	case CDEV_TODO_UNREG_EVAL:
1862 		if (!sch_is_pseudo_sch(sch))
1863 			css_schedule_eval(sch->schid);
1864 		fallthrough;
1865 	case CDEV_TODO_UNREG:
1866 		spin_lock_irq(&sch->lock);
1867 		sch_set_cdev(sch, NULL);
1868 		spin_unlock_irq(&sch->lock);
1869 		ccw_device_unregister(cdev);
1870 		break;
1871 	default:
1872 		break;
1873 	}
1874 	/* Release workqueue ref. */
1875 	put_device(&cdev->dev);
1876 }
1877 
1878 /**
1879  * ccw_device_sched_todo - schedule ccw device operation
1880  * @cdev: ccw device
1881  * @todo: todo
1882  *
1883  * Schedule the operation identified by @todo to be performed on the slow path
1884  * workqueue. Do nothing if another operation with higher priority is already
1885  * scheduled. Needs to be called with ccwdev lock held.
1886  */
1887 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
1888 {
1889 	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
1890 		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
1891 		      todo);
1892 	if (cdev->private->todo >= todo)
1893 		return;
1894 	cdev->private->todo = todo;
1895 	/* Get workqueue ref. */
1896 	if (!get_device(&cdev->dev))
1897 		return;
1898 	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
1899 		/* Already queued, release workqueue ref. */
1900 		put_device(&cdev->dev);
1901 	}
1902 }
1903 
1904 /**
1905  * ccw_device_siosl() - initiate logging
1906  * @cdev: ccw device
1907  *
1908  * This function is used to invoke model-dependent logging within the channel
1909  * subsystem.
1910  */
1911 int ccw_device_siosl(struct ccw_device *cdev)
1912 {
1913 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1914 
1915 	return chsc_siosl(sch->schid);
1916 }
1917 EXPORT_SYMBOL_GPL(ccw_device_siosl);
1918 
1919 EXPORT_SYMBOL(ccw_device_set_online);
1920 EXPORT_SYMBOL(ccw_device_set_offline);
1921 EXPORT_SYMBOL(ccw_driver_register);
1922 EXPORT_SYMBOL(ccw_driver_unregister);
1923 EXPORT_SYMBOL(get_ccwdev_by_busid);
1924