1 // SPDX-License-Identifier: GPL-1.0+ 2 /* 3 * bus driver for ccw devices 4 * 5 * Copyright IBM Corp. 2002, 2008 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/spinlock.h> 17 #include <linux/errno.h> 18 #include <linux/err.h> 19 #include <linux/slab.h> 20 #include <linux/list.h> 21 #include <linux/device.h> 22 #include <linux/workqueue.h> 23 #include <linux/delay.h> 24 #include <linux/timer.h> 25 #include <linux/kernel_stat.h> 26 #include <linux/sched/signal.h> 27 #include <linux/dma-mapping.h> 28 29 #include <asm/ccwdev.h> 30 #include <asm/cio.h> 31 #include <asm/param.h> /* HZ */ 32 #include <asm/cmb.h> 33 #include <asm/isc.h> 34 35 #include "chp.h" 36 #include "cio.h" 37 #include "cio_debug.h" 38 #include "css.h" 39 #include "device.h" 40 #include "ioasm.h" 41 #include "io_sch.h" 42 #include "blacklist.h" 43 #include "chsc.h" 44 45 static struct timer_list recovery_timer; 46 static DEFINE_SPINLOCK(recovery_lock); 47 static int recovery_phase; 48 static const unsigned long recovery_delay[] = { 3, 30, 300 }; 49 50 static atomic_t ccw_device_init_count = ATOMIC_INIT(0); 51 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); 52 static const struct bus_type ccw_bus_type; 53 54 /******************* bus type handling ***********************/ 55 56 /* The Linux driver model distinguishes between a bus type and 57 * the bus itself. Of course we only have one channel 58 * subsystem driver and one channel system per machine, but 59 * we still use the abstraction. T.R. says it's a good idea. */ 60 static int 61 ccw_bus_match (struct device * dev, const struct device_driver * drv) 62 { 63 struct ccw_device *cdev = to_ccwdev(dev); 64 const struct ccw_driver *cdrv = to_ccwdrv(drv); 65 const struct ccw_device_id *ids = cdrv->ids, *found; 66 67 if (!ids) 68 return 0; 69 70 found = ccw_device_id_match(ids, &cdev->id); 71 if (!found) 72 return 0; 73 74 cdev->id.driver_info = found->driver_info; 75 76 return 1; 77 } 78 79 /* Store modalias string delimited by prefix/suffix string into buffer with 80 * specified size. Return length of resulting string (excluding trailing '\0') 81 * even if string doesn't fit buffer (snprintf semantics). */ 82 static int snprint_alias(char *buf, size_t size, 83 const struct ccw_device_id *id, const char *suffix) 84 { 85 int len; 86 87 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); 88 if (len > size) 89 return len; 90 buf += len; 91 size -= len; 92 93 if (id->dev_type != 0) 94 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, 95 id->dev_model, suffix); 96 else 97 len += snprintf(buf, size, "dtdm%s", suffix); 98 99 return len; 100 } 101 102 /* Set up environment variables for ccw device uevent. Return 0 on success, 103 * non-zero otherwise. */ 104 static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env) 105 { 106 const struct ccw_device *cdev = to_ccwdev(dev); 107 const struct ccw_device_id *id = &(cdev->id); 108 int ret; 109 char modalias_buf[30]; 110 111 /* CU_TYPE= */ 112 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); 113 if (ret) 114 return ret; 115 116 /* CU_MODEL= */ 117 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); 118 if (ret) 119 return ret; 120 121 /* The next two can be zero, that's ok for us */ 122 /* DEV_TYPE= */ 123 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); 124 if (ret) 125 return ret; 126 127 /* DEV_MODEL= */ 128 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); 129 if (ret) 130 return ret; 131 132 /* MODALIAS= */ 133 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 134 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); 135 return ret; 136 } 137 138 static void io_subchannel_irq(struct subchannel *); 139 static int io_subchannel_probe(struct subchannel *); 140 static void io_subchannel_remove(struct subchannel *); 141 static void io_subchannel_shutdown(struct subchannel *); 142 static int io_subchannel_sch_event(struct subchannel *, int); 143 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 144 int); 145 static void recovery_func(struct timer_list *unused); 146 147 static struct css_device_id io_subchannel_ids[] = { 148 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 149 { /* end of list */ }, 150 }; 151 152 static int io_subchannel_settle(void) 153 { 154 int ret; 155 156 ret = wait_event_interruptible(ccw_device_init_wq, 157 atomic_read(&ccw_device_init_count) == 0); 158 if (ret) 159 return -EINTR; 160 flush_workqueue(cio_work_q); 161 return 0; 162 } 163 164 static struct css_driver io_subchannel_driver = { 165 .drv = { 166 .owner = THIS_MODULE, 167 .name = "io_subchannel", 168 }, 169 .subchannel_type = io_subchannel_ids, 170 .irq = io_subchannel_irq, 171 .sch_event = io_subchannel_sch_event, 172 .chp_event = io_subchannel_chp_event, 173 .probe = io_subchannel_probe, 174 .remove = io_subchannel_remove, 175 .shutdown = io_subchannel_shutdown, 176 .settle = io_subchannel_settle, 177 }; 178 179 int __init io_subchannel_init(void) 180 { 181 int ret; 182 183 timer_setup(&recovery_timer, recovery_func, 0); 184 ret = bus_register(&ccw_bus_type); 185 if (ret) 186 return ret; 187 ret = css_driver_register(&io_subchannel_driver); 188 if (ret) 189 bus_unregister(&ccw_bus_type); 190 191 return ret; 192 } 193 194 195 /************************ device handling **************************/ 196 197 static ssize_t 198 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 199 { 200 struct ccw_device *cdev = to_ccwdev(dev); 201 struct ccw_device_id *id = &(cdev->id); 202 203 if (id->dev_type != 0) 204 return sysfs_emit(buf, "%04x/%02x\n", id->dev_type, id->dev_model); 205 else 206 return sysfs_emit(buf, "n/a\n"); 207 } 208 209 static ssize_t 210 cutype_show (struct device *dev, struct device_attribute *attr, char *buf) 211 { 212 struct ccw_device *cdev = to_ccwdev(dev); 213 struct ccw_device_id *id = &(cdev->id); 214 215 return sysfs_emit(buf, "%04x/%02x\n", id->cu_type, id->cu_model); 216 } 217 218 static ssize_t 219 modalias_show (struct device *dev, struct device_attribute *attr, char *buf) 220 { 221 struct ccw_device *cdev = to_ccwdev(dev); 222 struct ccw_device_id *id = &(cdev->id); 223 int len; 224 225 len = snprint_alias(buf, PAGE_SIZE, id, "\n"); 226 227 return len > PAGE_SIZE ? PAGE_SIZE : len; 228 } 229 230 static ssize_t 231 online_show (struct device *dev, struct device_attribute *attr, char *buf) 232 { 233 struct ccw_device *cdev = to_ccwdev(dev); 234 235 return sysfs_emit(buf, cdev->online ? "1\n" : "0\n"); 236 } 237 238 int ccw_device_is_orphan(struct ccw_device *cdev) 239 { 240 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 241 } 242 243 static void ccw_device_unregister(struct ccw_device *cdev) 244 { 245 mutex_lock(&cdev->reg_mutex); 246 if (device_is_registered(&cdev->dev)) { 247 /* Undo device_add(). */ 248 device_del(&cdev->dev); 249 } 250 mutex_unlock(&cdev->reg_mutex); 251 252 if (cdev->private->flags.initialized) { 253 cdev->private->flags.initialized = 0; 254 /* Release reference from device_initialize(). */ 255 put_device(&cdev->dev); 256 } 257 } 258 259 static void io_subchannel_quiesce(struct subchannel *); 260 261 /** 262 * ccw_device_set_offline() - disable a ccw device for I/O 263 * @cdev: target ccw device 264 * 265 * This function calls the driver's set_offline() function for @cdev, if 266 * given, and then disables @cdev. 267 * Returns: 268 * %0 on success and a negative error value on failure. 269 * Context: 270 * enabled, ccw device lock not held 271 */ 272 int ccw_device_set_offline(struct ccw_device *cdev) 273 { 274 struct subchannel *sch; 275 int ret, state; 276 277 if (!cdev) 278 return -ENODEV; 279 if (!cdev->online || !cdev->drv) 280 return -EINVAL; 281 282 if (cdev->drv->set_offline) { 283 ret = cdev->drv->set_offline(cdev); 284 if (ret != 0) 285 return ret; 286 } 287 spin_lock_irq(cdev->ccwlock); 288 sch = to_subchannel(cdev->dev.parent); 289 cdev->online = 0; 290 /* Wait until a final state or DISCONNECTED is reached */ 291 while (!dev_fsm_final_state(cdev) && 292 cdev->private->state != DEV_STATE_DISCONNECTED) { 293 spin_unlock_irq(cdev->ccwlock); 294 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 295 cdev->private->state == DEV_STATE_DISCONNECTED)); 296 spin_lock_irq(cdev->ccwlock); 297 } 298 do { 299 ret = ccw_device_offline(cdev); 300 if (!ret) 301 break; 302 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " 303 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, 304 cdev->private->dev_id.devno); 305 if (ret != -EBUSY) 306 goto error; 307 state = cdev->private->state; 308 spin_unlock_irq(cdev->ccwlock); 309 io_subchannel_quiesce(sch); 310 spin_lock_irq(cdev->ccwlock); 311 cdev->private->state = state; 312 } while (ret == -EBUSY); 313 spin_unlock_irq(cdev->ccwlock); 314 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 315 cdev->private->state == DEV_STATE_DISCONNECTED)); 316 /* Inform the user if set offline failed. */ 317 if (cdev->private->state == DEV_STATE_BOXED) { 318 pr_warn("%s: The device entered boxed state while being set offline\n", 319 dev_name(&cdev->dev)); 320 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 321 pr_warn("%s: The device stopped operating while being set offline\n", 322 dev_name(&cdev->dev)); 323 } 324 /* Give up reference from ccw_device_set_online(). */ 325 put_device(&cdev->dev); 326 return 0; 327 328 error: 329 cdev->private->state = DEV_STATE_OFFLINE; 330 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 331 spin_unlock_irq(cdev->ccwlock); 332 /* Give up reference from ccw_device_set_online(). */ 333 put_device(&cdev->dev); 334 return -ENODEV; 335 } 336 337 /** 338 * ccw_device_set_online() - enable a ccw device for I/O 339 * @cdev: target ccw device 340 * 341 * This function first enables @cdev and then calls the driver's set_online() 342 * function for @cdev, if given. If set_online() returns an error, @cdev is 343 * disabled again. 344 * Returns: 345 * %0 on success and a negative error value on failure. 346 * Context: 347 * enabled, ccw device lock not held 348 */ 349 int ccw_device_set_online(struct ccw_device *cdev) 350 { 351 int ret; 352 int ret2; 353 354 if (!cdev) 355 return -ENODEV; 356 if (cdev->online || !cdev->drv) 357 return -EINVAL; 358 /* Hold on to an extra reference while device is online. */ 359 if (!get_device(&cdev->dev)) 360 return -ENODEV; 361 362 spin_lock_irq(cdev->ccwlock); 363 ret = ccw_device_online(cdev); 364 if (ret) { 365 spin_unlock_irq(cdev->ccwlock); 366 CIO_MSG_EVENT(0, "ccw_device_online returned %d, " 367 "device 0.%x.%04x\n", 368 ret, cdev->private->dev_id.ssid, 369 cdev->private->dev_id.devno); 370 /* Give up online reference since onlining failed. */ 371 put_device(&cdev->dev); 372 return ret; 373 } 374 /* Wait until a final state is reached */ 375 while (!dev_fsm_final_state(cdev)) { 376 spin_unlock_irq(cdev->ccwlock); 377 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 378 spin_lock_irq(cdev->ccwlock); 379 } 380 /* Check if online processing was successful */ 381 if ((cdev->private->state != DEV_STATE_ONLINE) && 382 (cdev->private->state != DEV_STATE_W4SENSE)) { 383 spin_unlock_irq(cdev->ccwlock); 384 /* Inform the user that set online failed. */ 385 if (cdev->private->state == DEV_STATE_BOXED) { 386 pr_warn("%s: Setting the device online failed because it is boxed\n", 387 dev_name(&cdev->dev)); 388 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 389 pr_warn("%s: Setting the device online failed because it is not operational\n", 390 dev_name(&cdev->dev)); 391 } 392 /* Give up online reference since onlining failed. */ 393 put_device(&cdev->dev); 394 return -ENODEV; 395 } 396 spin_unlock_irq(cdev->ccwlock); 397 if (cdev->drv->set_online) 398 ret = cdev->drv->set_online(cdev); 399 if (ret) 400 goto rollback; 401 402 spin_lock_irq(cdev->ccwlock); 403 cdev->online = 1; 404 spin_unlock_irq(cdev->ccwlock); 405 return 0; 406 407 rollback: 408 spin_lock_irq(cdev->ccwlock); 409 /* Wait until a final state or DISCONNECTED is reached */ 410 while (!dev_fsm_final_state(cdev) && 411 cdev->private->state != DEV_STATE_DISCONNECTED) { 412 spin_unlock_irq(cdev->ccwlock); 413 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 414 cdev->private->state == DEV_STATE_DISCONNECTED)); 415 spin_lock_irq(cdev->ccwlock); 416 } 417 ret2 = ccw_device_offline(cdev); 418 if (ret2) 419 goto error; 420 spin_unlock_irq(cdev->ccwlock); 421 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 422 cdev->private->state == DEV_STATE_DISCONNECTED)); 423 /* Give up online reference since onlining failed. */ 424 put_device(&cdev->dev); 425 return ret; 426 427 error: 428 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " 429 "device 0.%x.%04x\n", 430 ret2, cdev->private->dev_id.ssid, 431 cdev->private->dev_id.devno); 432 cdev->private->state = DEV_STATE_OFFLINE; 433 spin_unlock_irq(cdev->ccwlock); 434 /* Give up online reference since onlining failed. */ 435 put_device(&cdev->dev); 436 return ret; 437 } 438 439 static int online_store_handle_offline(struct ccw_device *cdev) 440 { 441 if (cdev->private->state == DEV_STATE_DISCONNECTED) { 442 spin_lock_irq(cdev->ccwlock); 443 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); 444 spin_unlock_irq(cdev->ccwlock); 445 return 0; 446 } 447 if (cdev->drv && cdev->drv->set_offline) 448 return ccw_device_set_offline(cdev); 449 return -EINVAL; 450 } 451 452 static int online_store_recog_and_online(struct ccw_device *cdev) 453 { 454 /* Do device recognition, if needed. */ 455 if (cdev->private->state == DEV_STATE_BOXED) { 456 spin_lock_irq(cdev->ccwlock); 457 ccw_device_recognition(cdev); 458 spin_unlock_irq(cdev->ccwlock); 459 wait_event(cdev->private->wait_q, 460 cdev->private->flags.recog_done); 461 if (cdev->private->state != DEV_STATE_OFFLINE) 462 /* recognition failed */ 463 return -EAGAIN; 464 } 465 if (cdev->drv && cdev->drv->set_online) 466 return ccw_device_set_online(cdev); 467 return -EINVAL; 468 } 469 470 static int online_store_handle_online(struct ccw_device *cdev, int force) 471 { 472 int ret; 473 474 ret = online_store_recog_and_online(cdev); 475 if (ret && !force) 476 return ret; 477 if (force && cdev->private->state == DEV_STATE_BOXED) { 478 ret = ccw_device_stlck(cdev); 479 if (ret) 480 return ret; 481 if (cdev->id.cu_type == 0) 482 cdev->private->state = DEV_STATE_NOT_OPER; 483 ret = online_store_recog_and_online(cdev); 484 if (ret) 485 return ret; 486 } 487 return 0; 488 } 489 490 static ssize_t online_store (struct device *dev, struct device_attribute *attr, 491 const char *buf, size_t count) 492 { 493 struct ccw_device *cdev = to_ccwdev(dev); 494 int force, ret; 495 unsigned long i; 496 497 /* Prevent conflict between multiple on-/offline processing requests. */ 498 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 499 return -EAGAIN; 500 /* Prevent conflict between internal I/Os and on-/offline processing. */ 501 if (!dev_fsm_final_state(cdev) && 502 cdev->private->state != DEV_STATE_DISCONNECTED) { 503 ret = -EAGAIN; 504 goto out; 505 } 506 /* Prevent conflict between pending work and on-/offline processing.*/ 507 if (work_pending(&cdev->private->todo_work)) { 508 ret = -EAGAIN; 509 goto out; 510 } 511 if (!strncmp(buf, "force\n", count)) { 512 force = 1; 513 i = 1; 514 ret = 0; 515 } else { 516 force = 0; 517 ret = kstrtoul(buf, 16, &i); 518 } 519 if (ret) 520 goto out; 521 522 device_lock(dev); 523 switch (i) { 524 case 0: 525 ret = online_store_handle_offline(cdev); 526 break; 527 case 1: 528 ret = online_store_handle_online(cdev, force); 529 break; 530 default: 531 ret = -EINVAL; 532 } 533 device_unlock(dev); 534 535 out: 536 atomic_set(&cdev->private->onoff, 0); 537 return (ret < 0) ? ret : count; 538 } 539 540 static ssize_t 541 available_show (struct device *dev, struct device_attribute *attr, char *buf) 542 { 543 struct ccw_device *cdev = to_ccwdev(dev); 544 struct subchannel *sch; 545 546 if (ccw_device_is_orphan(cdev)) 547 return sysfs_emit(buf, "no device\n"); 548 switch (cdev->private->state) { 549 case DEV_STATE_BOXED: 550 return sysfs_emit(buf, "boxed\n"); 551 case DEV_STATE_DISCONNECTED: 552 case DEV_STATE_DISCONNECTED_SENSE_ID: 553 case DEV_STATE_NOT_OPER: 554 sch = to_subchannel(dev->parent); 555 if (!sch->lpm) 556 return sysfs_emit(buf, "no path\n"); 557 else 558 return sysfs_emit(buf, "no device\n"); 559 default: 560 /* All other states considered fine. */ 561 return sysfs_emit(buf, "good\n"); 562 } 563 } 564 565 static ssize_t 566 initiate_logging(struct device *dev, struct device_attribute *attr, 567 const char *buf, size_t count) 568 { 569 struct subchannel *sch = to_subchannel(dev); 570 int rc; 571 572 rc = chsc_siosl(sch->schid); 573 if (rc < 0) { 574 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n", 575 sch->schid.ssid, sch->schid.sch_no, rc); 576 return rc; 577 } 578 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", 579 sch->schid.ssid, sch->schid.sch_no); 580 return count; 581 } 582 583 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, 584 char *buf) 585 { 586 struct subchannel *sch = to_subchannel(dev); 587 588 return sysfs_emit(buf, "%02x\n", sch->vpm); 589 } 590 591 static DEVICE_ATTR_RO(devtype); 592 static DEVICE_ATTR_RO(cutype); 593 static DEVICE_ATTR_RO(modalias); 594 static DEVICE_ATTR_RW(online); 595 static DEVICE_ATTR(availability, 0444, available_show, NULL); 596 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); 597 static DEVICE_ATTR_RO(vpm); 598 599 static struct attribute *io_subchannel_attrs[] = { 600 &dev_attr_logging.attr, 601 &dev_attr_vpm.attr, 602 NULL, 603 }; 604 605 static const struct attribute_group io_subchannel_attr_group = { 606 .attrs = io_subchannel_attrs, 607 }; 608 609 static struct attribute * ccwdev_attrs[] = { 610 &dev_attr_devtype.attr, 611 &dev_attr_cutype.attr, 612 &dev_attr_modalias.attr, 613 &dev_attr_online.attr, 614 &dev_attr_cmb_enable.attr, 615 &dev_attr_availability.attr, 616 NULL, 617 }; 618 619 static const struct attribute_group ccwdev_attr_group = { 620 .attrs = ccwdev_attrs, 621 }; 622 623 static const struct attribute_group *ccwdev_attr_groups[] = { 624 &ccwdev_attr_group, 625 NULL, 626 }; 627 628 static int match_dev_id(struct device *dev, const void *data) 629 { 630 struct ccw_device *cdev = to_ccwdev(dev); 631 struct ccw_dev_id *dev_id = (void *)data; 632 633 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 634 } 635 636 /** 637 * get_ccwdev_by_dev_id() - obtain device from a ccw device id 638 * @dev_id: id of the device to be searched 639 * 640 * This function searches all devices attached to the ccw bus for a device 641 * matching @dev_id. 642 * Returns: 643 * If a device is found its reference count is increased and returned; 644 * else %NULL is returned. 645 */ 646 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) 647 { 648 struct device *dev; 649 650 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); 651 652 return dev ? to_ccwdev(dev) : NULL; 653 } 654 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); 655 656 static void ccw_device_do_unbind_bind(struct ccw_device *cdev) 657 { 658 int ret; 659 660 mutex_lock(&cdev->reg_mutex); 661 if (device_is_registered(&cdev->dev)) { 662 device_release_driver(&cdev->dev); 663 ret = device_attach(&cdev->dev); 664 WARN_ON(ret == -ENODEV); 665 } 666 mutex_unlock(&cdev->reg_mutex); 667 } 668 669 static void 670 ccw_device_release(struct device *dev) 671 { 672 struct ccw_device *cdev; 673 674 cdev = to_ccwdev(dev); 675 cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area, 676 sizeof(*cdev->private->dma_area)); 677 cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev); 678 /* Release reference of parent subchannel. */ 679 put_device(cdev->dev.parent); 680 kfree(cdev->private); 681 kfree(cdev); 682 } 683 684 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 685 { 686 struct ccw_device *cdev; 687 struct gen_pool *dma_pool; 688 int ret; 689 690 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 691 if (!cdev) { 692 ret = -ENOMEM; 693 goto err_cdev; 694 } 695 cdev->private = kzalloc(sizeof(struct ccw_device_private), 696 GFP_KERNEL | GFP_DMA); 697 if (!cdev->private) { 698 ret = -ENOMEM; 699 goto err_priv; 700 } 701 702 cdev->dev.dma_mask = sch->dev.dma_mask; 703 ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask); 704 if (ret) 705 goto err_coherent_mask; 706 707 dma_pool = cio_gp_dma_create(&cdev->dev, 1); 708 if (!dma_pool) { 709 ret = -ENOMEM; 710 goto err_dma_pool; 711 } 712 cdev->private->dma_pool = dma_pool; 713 cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, 714 sizeof(*cdev->private->dma_area)); 715 if (!cdev->private->dma_area) { 716 ret = -ENOMEM; 717 goto err_dma_area; 718 } 719 return cdev; 720 err_dma_area: 721 cio_gp_dma_destroy(dma_pool, &cdev->dev); 722 err_dma_pool: 723 err_coherent_mask: 724 kfree(cdev->private); 725 err_priv: 726 kfree(cdev); 727 err_cdev: 728 return ERR_PTR(ret); 729 } 730 731 static void ccw_device_todo(struct work_struct *work); 732 733 static int io_subchannel_initialize_dev(struct subchannel *sch, 734 struct ccw_device *cdev) 735 { 736 struct ccw_device_private *priv = cdev->private; 737 int ret; 738 739 priv->cdev = cdev; 740 priv->int_class = IRQIO_CIO; 741 priv->state = DEV_STATE_NOT_OPER; 742 priv->dev_id.devno = sch->schib.pmcw.dev; 743 priv->dev_id.ssid = sch->schid.ssid; 744 745 INIT_WORK(&priv->todo_work, ccw_device_todo); 746 INIT_LIST_HEAD(&priv->cmb_list); 747 init_waitqueue_head(&priv->wait_q); 748 timer_setup(&priv->timer, ccw_device_timeout, 0); 749 mutex_init(&cdev->reg_mutex); 750 751 atomic_set(&priv->onoff, 0); 752 cdev->ccwlock = &sch->lock; 753 cdev->dev.parent = &sch->dev; 754 cdev->dev.release = ccw_device_release; 755 cdev->dev.bus = &ccw_bus_type; 756 cdev->dev.groups = ccwdev_attr_groups; 757 /* Do first half of device_register. */ 758 device_initialize(&cdev->dev); 759 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, 760 cdev->private->dev_id.devno); 761 if (ret) 762 goto out_put; 763 if (!get_device(&sch->dev)) { 764 ret = -ENODEV; 765 goto out_put; 766 } 767 priv->flags.initialized = 1; 768 spin_lock_irq(&sch->lock); 769 sch_set_cdev(sch, cdev); 770 spin_unlock_irq(&sch->lock); 771 return 0; 772 773 out_put: 774 /* Release reference from device_initialize(). */ 775 put_device(&cdev->dev); 776 return ret; 777 } 778 779 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 780 { 781 struct ccw_device *cdev; 782 int ret; 783 784 cdev = io_subchannel_allocate_dev(sch); 785 if (!IS_ERR(cdev)) { 786 ret = io_subchannel_initialize_dev(sch, cdev); 787 if (ret) 788 cdev = ERR_PTR(ret); 789 } 790 return cdev; 791 } 792 793 static void io_subchannel_recog(struct ccw_device *, struct subchannel *); 794 795 static void sch_create_and_recog_new_device(struct subchannel *sch) 796 { 797 struct ccw_device *cdev; 798 799 /* Need to allocate a new ccw device. */ 800 cdev = io_subchannel_create_ccwdev(sch); 801 if (IS_ERR(cdev)) { 802 /* OK, we did everything we could... */ 803 css_sch_device_unregister(sch); 804 return; 805 } 806 /* Start recognition for the new ccw device. */ 807 io_subchannel_recog(cdev, sch); 808 } 809 810 /* 811 * Register recognized device. 812 */ 813 static void io_subchannel_register(struct ccw_device *cdev) 814 { 815 struct subchannel *sch; 816 int ret, adjust_init_count = 1; 817 unsigned long flags; 818 819 sch = to_subchannel(cdev->dev.parent); 820 /* 821 * Check if subchannel is still registered. It may have become 822 * unregistered if a machine check hit us after finishing 823 * device recognition but before the register work could be 824 * queued. 825 */ 826 if (!device_is_registered(&sch->dev)) 827 goto out_err; 828 css_update_ssd_info(sch); 829 /* 830 * io_subchannel_register() will also be called after device 831 * recognition has been done for a boxed device (which will already 832 * be registered). We need to reprobe since we may now have sense id 833 * information. 834 */ 835 mutex_lock(&cdev->reg_mutex); 836 if (device_is_registered(&cdev->dev)) { 837 if (!cdev->drv) { 838 ret = device_reprobe(&cdev->dev); 839 if (ret) 840 /* We can't do much here. */ 841 CIO_MSG_EVENT(0, "device_reprobe() returned" 842 " %d for 0.%x.%04x\n", ret, 843 cdev->private->dev_id.ssid, 844 cdev->private->dev_id.devno); 845 } 846 adjust_init_count = 0; 847 goto out; 848 } 849 /* make it known to the system */ 850 ret = device_add(&cdev->dev); 851 if (ret) { 852 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 853 cdev->private->dev_id.ssid, 854 cdev->private->dev_id.devno, ret); 855 spin_lock_irqsave(&sch->lock, flags); 856 sch_set_cdev(sch, NULL); 857 spin_unlock_irqrestore(&sch->lock, flags); 858 mutex_unlock(&cdev->reg_mutex); 859 /* Release initial device reference. */ 860 put_device(&cdev->dev); 861 goto out_err; 862 } 863 out: 864 cdev->private->flags.recog_done = 1; 865 mutex_unlock(&cdev->reg_mutex); 866 wake_up(&cdev->private->wait_q); 867 out_err: 868 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) 869 wake_up(&ccw_device_init_wq); 870 } 871 872 /* 873 * subchannel recognition done. Called from the state machine. 874 */ 875 void 876 io_subchannel_recog_done(struct ccw_device *cdev) 877 { 878 if (css_init_done == 0) { 879 cdev->private->flags.recog_done = 1; 880 return; 881 } 882 switch (cdev->private->state) { 883 case DEV_STATE_BOXED: 884 /* Device did not respond in time. */ 885 case DEV_STATE_NOT_OPER: 886 cdev->private->flags.recog_done = 1; 887 /* Remove device found not operational. */ 888 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 889 if (atomic_dec_and_test(&ccw_device_init_count)) 890 wake_up(&ccw_device_init_wq); 891 break; 892 case DEV_STATE_OFFLINE: 893 /* 894 * We can't register the device in interrupt context so 895 * we schedule a work item. 896 */ 897 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); 898 break; 899 } 900 } 901 902 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 903 { 904 /* Increase counter of devices currently in recognition. */ 905 atomic_inc(&ccw_device_init_count); 906 907 /* Start async. device sensing. */ 908 spin_lock_irq(&sch->lock); 909 ccw_device_recognition(cdev); 910 spin_unlock_irq(&sch->lock); 911 } 912 913 static int ccw_device_move_to_sch(struct ccw_device *cdev, 914 struct subchannel *sch) 915 { 916 struct subchannel *old_sch; 917 int rc, old_enabled = 0; 918 919 old_sch = to_subchannel(cdev->dev.parent); 920 /* Obtain child reference for new parent. */ 921 if (!get_device(&sch->dev)) 922 return -ENODEV; 923 924 if (!sch_is_pseudo_sch(old_sch)) { 925 spin_lock_irq(&old_sch->lock); 926 old_enabled = old_sch->schib.pmcw.ena; 927 rc = 0; 928 if (old_enabled) 929 rc = cio_disable_subchannel(old_sch); 930 spin_unlock_irq(&old_sch->lock); 931 if (rc == -EBUSY) { 932 /* Release child reference for new parent. */ 933 put_device(&sch->dev); 934 return rc; 935 } 936 } 937 938 mutex_lock(&sch->reg_mutex); 939 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 940 mutex_unlock(&sch->reg_mutex); 941 if (rc) { 942 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", 943 cdev->private->dev_id.ssid, 944 cdev->private->dev_id.devno, sch->schid.ssid, 945 sch->schib.pmcw.dev, rc); 946 if (old_enabled) { 947 /* Try to re-enable the old subchannel. */ 948 spin_lock_irq(&old_sch->lock); 949 cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch)); 950 spin_unlock_irq(&old_sch->lock); 951 } 952 /* Release child reference for new parent. */ 953 put_device(&sch->dev); 954 return rc; 955 } 956 /* Clean up old subchannel. */ 957 if (!sch_is_pseudo_sch(old_sch)) { 958 spin_lock_irq(&old_sch->lock); 959 sch_set_cdev(old_sch, NULL); 960 spin_unlock_irq(&old_sch->lock); 961 css_schedule_eval(old_sch->schid); 962 } 963 /* Release child reference for old parent. */ 964 put_device(&old_sch->dev); 965 /* Initialize new subchannel. */ 966 spin_lock_irq(&sch->lock); 967 cdev->ccwlock = &sch->lock; 968 if (!sch_is_pseudo_sch(sch)) 969 sch_set_cdev(sch, cdev); 970 spin_unlock_irq(&sch->lock); 971 if (!sch_is_pseudo_sch(sch)) 972 css_update_ssd_info(sch); 973 return 0; 974 } 975 976 static int ccw_device_move_to_orph(struct ccw_device *cdev) 977 { 978 struct subchannel *sch = to_subchannel(cdev->dev.parent); 979 struct channel_subsystem *css = to_css(sch->dev.parent); 980 981 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); 982 } 983 984 static void io_subchannel_irq(struct subchannel *sch) 985 { 986 struct ccw_device *cdev; 987 988 cdev = sch_get_cdev(sch); 989 990 CIO_TRACE_EVENT(6, "IRQ"); 991 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); 992 if (cdev) 993 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 994 else 995 inc_irq_stat(IRQIO_CIO); 996 } 997 998 void io_subchannel_init_config(struct subchannel *sch) 999 { 1000 memset(&sch->config, 0, sizeof(sch->config)); 1001 sch->config.csense = 1; 1002 } 1003 1004 static void io_subchannel_init_fields(struct subchannel *sch) 1005 { 1006 if (cio_is_console(sch->schid)) 1007 sch->opm = 0xff; 1008 else 1009 sch->opm = chp_get_sch_opm(sch); 1010 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1011 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; 1012 1013 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" 1014 " - PIM = %02X, PAM = %02X, POM = %02X\n", 1015 sch->schib.pmcw.dev, sch->schid.ssid, 1016 sch->schid.sch_no, sch->schib.pmcw.pim, 1017 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 1018 1019 io_subchannel_init_config(sch); 1020 } 1021 1022 /* 1023 * Note: We always return 0 so that we bind to the device even on error. 1024 * This is needed so that our remove function is called on unregister. 1025 */ 1026 static int io_subchannel_probe(struct subchannel *sch) 1027 { 1028 struct io_subchannel_private *io_priv; 1029 struct ccw_device *cdev; 1030 int rc; 1031 1032 if (cio_is_console(sch->schid)) { 1033 rc = sysfs_create_group(&sch->dev.kobj, 1034 &io_subchannel_attr_group); 1035 if (rc) 1036 CIO_MSG_EVENT(0, "Failed to create io subchannel " 1037 "attributes for subchannel " 1038 "0.%x.%04x (rc=%d)\n", 1039 sch->schid.ssid, sch->schid.sch_no, rc); 1040 /* 1041 * The console subchannel already has an associated ccw_device. 1042 * Register it and exit. 1043 */ 1044 cdev = sch_get_cdev(sch); 1045 rc = device_add(&cdev->dev); 1046 if (rc) { 1047 /* Release online reference. */ 1048 put_device(&cdev->dev); 1049 goto out_schedule; 1050 } 1051 if (atomic_dec_and_test(&ccw_device_init_count)) 1052 wake_up(&ccw_device_init_wq); 1053 return 0; 1054 } 1055 io_subchannel_init_fields(sch); 1056 rc = cio_commit_config(sch); 1057 if (rc) 1058 goto out_schedule; 1059 rc = sysfs_create_group(&sch->dev.kobj, 1060 &io_subchannel_attr_group); 1061 if (rc) 1062 goto out_schedule; 1063 /* Allocate I/O subchannel private data. */ 1064 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1065 if (!io_priv) 1066 goto out_schedule; 1067 1068 io_priv->dma_area = dma_alloc_coherent(&sch->dev, 1069 sizeof(*io_priv->dma_area), 1070 &io_priv->dma_area_dma, GFP_KERNEL); 1071 if (!io_priv->dma_area) { 1072 kfree(io_priv); 1073 goto out_schedule; 1074 } 1075 1076 set_io_private(sch, io_priv); 1077 css_schedule_eval(sch->schid); 1078 return 0; 1079 1080 out_schedule: 1081 spin_lock_irq(&sch->lock); 1082 css_sched_sch_todo(sch, SCH_TODO_UNREG); 1083 spin_unlock_irq(&sch->lock); 1084 return 0; 1085 } 1086 1087 static void io_subchannel_remove(struct subchannel *sch) 1088 { 1089 struct io_subchannel_private *io_priv = to_io_private(sch); 1090 struct ccw_device *cdev; 1091 1092 cdev = sch_get_cdev(sch); 1093 if (!cdev) 1094 goto out_free; 1095 1096 ccw_device_unregister(cdev); 1097 spin_lock_irq(&sch->lock); 1098 sch_set_cdev(sch, NULL); 1099 set_io_private(sch, NULL); 1100 spin_unlock_irq(&sch->lock); 1101 out_free: 1102 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1103 io_priv->dma_area, io_priv->dma_area_dma); 1104 kfree(io_priv); 1105 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1106 } 1107 1108 static void io_subchannel_verify(struct subchannel *sch) 1109 { 1110 struct ccw_device *cdev; 1111 1112 cdev = sch_get_cdev(sch); 1113 if (cdev) 1114 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1115 else 1116 css_schedule_eval(sch->schid); 1117 } 1118 1119 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1120 { 1121 struct ccw_device *cdev; 1122 1123 cdev = sch_get_cdev(sch); 1124 if (!cdev) 1125 return; 1126 if (cio_update_schib(sch)) 1127 goto err; 1128 /* Check for I/O on path. */ 1129 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) 1130 goto out; 1131 if (cdev->private->state == DEV_STATE_ONLINE) { 1132 ccw_device_kill_io(cdev); 1133 goto out; 1134 } 1135 if (cio_clear(sch)) 1136 goto err; 1137 out: 1138 /* Trigger path verification. */ 1139 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1140 return; 1141 1142 err: 1143 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1144 } 1145 1146 static int io_subchannel_chp_event(struct subchannel *sch, 1147 struct chp_link *link, int event) 1148 { 1149 struct ccw_device *cdev = sch_get_cdev(sch); 1150 int mask, chpid, valid_bit; 1151 int path_event[8]; 1152 1153 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1154 if (!mask) 1155 return 0; 1156 switch (event) { 1157 case CHP_VARY_OFF: 1158 sch->opm &= ~mask; 1159 sch->lpm &= ~mask; 1160 if (cdev) 1161 cdev->private->path_gone_mask |= mask; 1162 io_subchannel_terminate_path(sch, mask); 1163 break; 1164 case CHP_VARY_ON: 1165 sch->opm |= mask; 1166 sch->lpm |= mask; 1167 if (cdev) 1168 cdev->private->path_new_mask |= mask; 1169 io_subchannel_verify(sch); 1170 break; 1171 case CHP_OFFLINE: 1172 if (cio_update_schib(sch)) 1173 return -ENODEV; 1174 if (cdev) 1175 cdev->private->path_gone_mask |= mask; 1176 io_subchannel_terminate_path(sch, mask); 1177 break; 1178 case CHP_ONLINE: 1179 if (cio_update_schib(sch)) 1180 return -ENODEV; 1181 sch->lpm |= mask & sch->opm; 1182 if (cdev) 1183 cdev->private->path_new_mask |= mask; 1184 io_subchannel_verify(sch); 1185 break; 1186 case CHP_FCES_EVENT: 1187 /* Forward Endpoint Security event */ 1188 for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++, 1189 valid_bit >>= 1) { 1190 if (mask & valid_bit) 1191 path_event[chpid] = PE_PATH_FCES_EVENT; 1192 else 1193 path_event[chpid] = PE_NONE; 1194 } 1195 if (cdev && cdev->drv && cdev->drv->path_event) 1196 cdev->drv->path_event(cdev, path_event); 1197 break; 1198 } 1199 return 0; 1200 } 1201 1202 static void io_subchannel_quiesce(struct subchannel *sch) 1203 { 1204 struct ccw_device *cdev; 1205 int ret; 1206 1207 spin_lock_irq(&sch->lock); 1208 cdev = sch_get_cdev(sch); 1209 if (cio_is_console(sch->schid)) 1210 goto out_unlock; 1211 if (!sch->schib.pmcw.ena) 1212 goto out_unlock; 1213 ret = cio_disable_subchannel(sch); 1214 if (ret != -EBUSY) 1215 goto out_unlock; 1216 if (cdev->handler) 1217 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1218 while (ret == -EBUSY) { 1219 cdev->private->state = DEV_STATE_QUIESCE; 1220 cdev->private->iretry = 255; 1221 ret = ccw_device_cancel_halt_clear(cdev); 1222 if (ret == -EBUSY) { 1223 ccw_device_set_timeout(cdev, HZ/10); 1224 spin_unlock_irq(&sch->lock); 1225 wait_event(cdev->private->wait_q, 1226 cdev->private->state != DEV_STATE_QUIESCE); 1227 spin_lock_irq(&sch->lock); 1228 } 1229 ret = cio_disable_subchannel(sch); 1230 } 1231 out_unlock: 1232 spin_unlock_irq(&sch->lock); 1233 } 1234 1235 static void io_subchannel_shutdown(struct subchannel *sch) 1236 { 1237 io_subchannel_quiesce(sch); 1238 } 1239 1240 static int device_is_disconnected(struct ccw_device *cdev) 1241 { 1242 if (!cdev) 1243 return 0; 1244 return (cdev->private->state == DEV_STATE_DISCONNECTED || 1245 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 1246 } 1247 1248 static int recovery_check(struct device *dev, void *data) 1249 { 1250 struct ccw_device *cdev = to_ccwdev(dev); 1251 struct subchannel *sch; 1252 int *redo = data; 1253 1254 spin_lock_irq(cdev->ccwlock); 1255 switch (cdev->private->state) { 1256 case DEV_STATE_ONLINE: 1257 sch = to_subchannel(cdev->dev.parent); 1258 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) 1259 break; 1260 fallthrough; 1261 case DEV_STATE_DISCONNECTED: 1262 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1263 cdev->private->dev_id.ssid, 1264 cdev->private->dev_id.devno); 1265 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1266 *redo = 1; 1267 break; 1268 case DEV_STATE_DISCONNECTED_SENSE_ID: 1269 *redo = 1; 1270 break; 1271 } 1272 spin_unlock_irq(cdev->ccwlock); 1273 1274 return 0; 1275 } 1276 1277 static void recovery_work_func(struct work_struct *unused) 1278 { 1279 int redo = 0; 1280 1281 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); 1282 if (redo) { 1283 spin_lock_irq(&recovery_lock); 1284 if (!timer_pending(&recovery_timer)) { 1285 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) 1286 recovery_phase++; 1287 mod_timer(&recovery_timer, jiffies + 1288 recovery_delay[recovery_phase] * HZ); 1289 } 1290 spin_unlock_irq(&recovery_lock); 1291 } else 1292 CIO_MSG_EVENT(3, "recovery: end\n"); 1293 } 1294 1295 static DECLARE_WORK(recovery_work, recovery_work_func); 1296 1297 static void recovery_func(struct timer_list *unused) 1298 { 1299 /* 1300 * We can't do our recovery in softirq context and it's not 1301 * performance critical, so we schedule it. 1302 */ 1303 schedule_work(&recovery_work); 1304 } 1305 1306 void ccw_device_schedule_recovery(void) 1307 { 1308 unsigned long flags; 1309 1310 CIO_MSG_EVENT(3, "recovery: schedule\n"); 1311 spin_lock_irqsave(&recovery_lock, flags); 1312 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1313 recovery_phase = 0; 1314 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); 1315 } 1316 spin_unlock_irqrestore(&recovery_lock, flags); 1317 } 1318 1319 static int purge_fn(struct subchannel *sch, void *data) 1320 { 1321 struct ccw_device *cdev; 1322 1323 spin_lock_irq(&sch->lock); 1324 if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv) 1325 goto unlock; 1326 1327 if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) 1328 goto unlock; 1329 1330 cdev = sch_get_cdev(sch); 1331 if (cdev) { 1332 if (cdev->private->state != DEV_STATE_OFFLINE) 1333 goto unlock; 1334 1335 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 1336 goto unlock; 1337 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1338 atomic_set(&cdev->private->onoff, 0); 1339 } 1340 1341 css_sched_sch_todo(sch, SCH_TODO_UNREG); 1342 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid, 1343 sch->schib.pmcw.dev, cdev ? "" : " (no cdev)"); 1344 1345 unlock: 1346 spin_unlock_irq(&sch->lock); 1347 /* Abort loop in case of pending signal. */ 1348 if (signal_pending(current)) 1349 return -EINTR; 1350 1351 return 0; 1352 } 1353 1354 /** 1355 * ccw_purge_blacklisted - purge unused, blacklisted devices 1356 * 1357 * Unregister all ccw devices that are offline and on the blacklist. 1358 */ 1359 int ccw_purge_blacklisted(void) 1360 { 1361 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); 1362 for_each_subchannel_staged(purge_fn, NULL, NULL); 1363 return 0; 1364 } 1365 1366 void ccw_device_set_disconnected(struct ccw_device *cdev) 1367 { 1368 if (!cdev) 1369 return; 1370 ccw_device_set_timeout(cdev, 0); 1371 cdev->private->flags.fake_irb = 0; 1372 cdev->private->state = DEV_STATE_DISCONNECTED; 1373 if (cdev->online) 1374 ccw_device_schedule_recovery(); 1375 } 1376 1377 void ccw_device_set_notoper(struct ccw_device *cdev) 1378 { 1379 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1380 1381 CIO_TRACE_EVENT(2, "notoper"); 1382 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 1383 ccw_device_set_timeout(cdev, 0); 1384 cio_disable_subchannel(sch); 1385 cdev->private->state = DEV_STATE_NOT_OPER; 1386 } 1387 1388 enum io_sch_action { 1389 IO_SCH_UNREG, 1390 IO_SCH_ORPH_UNREG, 1391 IO_SCH_UNREG_CDEV, 1392 IO_SCH_ATTACH, 1393 IO_SCH_UNREG_ATTACH, 1394 IO_SCH_ORPH_ATTACH, 1395 IO_SCH_REPROBE, 1396 IO_SCH_VERIFY, 1397 IO_SCH_DISC, 1398 IO_SCH_NOP, 1399 IO_SCH_ORPH_CDEV, 1400 }; 1401 1402 static enum io_sch_action sch_get_action(struct subchannel *sch) 1403 { 1404 struct ccw_device *cdev; 1405 int rc; 1406 1407 cdev = sch_get_cdev(sch); 1408 rc = cio_update_schib(sch); 1409 1410 if (rc == -ENODEV) { 1411 /* Not operational. */ 1412 if (!cdev) 1413 return IO_SCH_UNREG; 1414 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1415 return IO_SCH_UNREG; 1416 return IO_SCH_ORPH_UNREG; 1417 } 1418 1419 /* Avoid unregistering subchannels without working device. */ 1420 if (rc == -EACCES) { 1421 if (!cdev) 1422 return IO_SCH_NOP; 1423 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1424 return IO_SCH_UNREG_CDEV; 1425 return IO_SCH_ORPH_CDEV; 1426 } 1427 1428 /* Operational. */ 1429 if (!cdev) 1430 return IO_SCH_ATTACH; 1431 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1432 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1433 return IO_SCH_UNREG_ATTACH; 1434 return IO_SCH_ORPH_ATTACH; 1435 } 1436 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1437 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 1438 return IO_SCH_UNREG_CDEV; 1439 return IO_SCH_DISC; 1440 } 1441 if (device_is_disconnected(cdev)) 1442 return IO_SCH_REPROBE; 1443 if (cdev->online) 1444 return IO_SCH_VERIFY; 1445 if (cdev->private->state == DEV_STATE_NOT_OPER) 1446 return IO_SCH_UNREG_ATTACH; 1447 return IO_SCH_NOP; 1448 } 1449 1450 /** 1451 * io_subchannel_sch_event - process subchannel event 1452 * @sch: subchannel 1453 * @process: non-zero if function is called in process context 1454 * 1455 * An unspecified event occurred for this subchannel. Adjust data according 1456 * to the current operational state of the subchannel and device. Return 1457 * zero when the event has been handled sufficiently or -EAGAIN when this 1458 * function should be called again in process context. 1459 */ 1460 static int io_subchannel_sch_event(struct subchannel *sch, int process) 1461 { 1462 unsigned long flags; 1463 struct ccw_device *cdev; 1464 struct ccw_dev_id dev_id; 1465 enum io_sch_action action; 1466 int rc = -EAGAIN; 1467 1468 spin_lock_irqsave(&sch->lock, flags); 1469 if (!device_is_registered(&sch->dev)) 1470 goto out_unlock; 1471 if (work_pending(&sch->todo_work)) 1472 goto out_unlock; 1473 cdev = sch_get_cdev(sch); 1474 if (cdev && work_pending(&cdev->private->todo_work)) 1475 goto out_unlock; 1476 action = sch_get_action(sch); 1477 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", 1478 sch->schid.ssid, sch->schid.sch_no, process, 1479 action); 1480 /* Perform immediate actions while holding the lock. */ 1481 switch (action) { 1482 case IO_SCH_REPROBE: 1483 /* Trigger device recognition. */ 1484 ccw_device_trigger_reprobe(cdev); 1485 rc = 0; 1486 goto out_unlock; 1487 case IO_SCH_VERIFY: 1488 /* Trigger path verification. */ 1489 io_subchannel_verify(sch); 1490 rc = 0; 1491 goto out_unlock; 1492 case IO_SCH_DISC: 1493 ccw_device_set_disconnected(cdev); 1494 rc = 0; 1495 goto out_unlock; 1496 case IO_SCH_ORPH_UNREG: 1497 case IO_SCH_ORPH_CDEV: 1498 case IO_SCH_ORPH_ATTACH: 1499 ccw_device_set_disconnected(cdev); 1500 break; 1501 case IO_SCH_UNREG_CDEV: 1502 case IO_SCH_UNREG_ATTACH: 1503 case IO_SCH_UNREG: 1504 if (!cdev) 1505 break; 1506 if (cdev->private->state == DEV_STATE_SENSE_ID) { 1507 /* 1508 * Note: delayed work triggered by this event 1509 * and repeated calls to sch_event are synchronized 1510 * by the above check for work_pending(cdev). 1511 */ 1512 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1513 } else 1514 ccw_device_set_notoper(cdev); 1515 break; 1516 case IO_SCH_NOP: 1517 rc = 0; 1518 goto out_unlock; 1519 default: 1520 break; 1521 } 1522 spin_unlock_irqrestore(&sch->lock, flags); 1523 /* All other actions require process context. */ 1524 if (!process) 1525 goto out; 1526 /* Handle attached ccw device. */ 1527 switch (action) { 1528 case IO_SCH_ORPH_UNREG: 1529 case IO_SCH_ORPH_CDEV: 1530 case IO_SCH_ORPH_ATTACH: 1531 /* Move ccw device to orphanage. */ 1532 rc = ccw_device_move_to_orph(cdev); 1533 if (rc) 1534 goto out; 1535 break; 1536 case IO_SCH_UNREG_CDEV: 1537 case IO_SCH_UNREG_ATTACH: 1538 spin_lock_irqsave(&sch->lock, flags); 1539 sch_set_cdev(sch, NULL); 1540 spin_unlock_irqrestore(&sch->lock, flags); 1541 /* Unregister ccw device. */ 1542 ccw_device_unregister(cdev); 1543 break; 1544 default: 1545 break; 1546 } 1547 /* Handle subchannel. */ 1548 switch (action) { 1549 case IO_SCH_ORPH_UNREG: 1550 case IO_SCH_UNREG: 1551 css_sch_device_unregister(sch); 1552 break; 1553 case IO_SCH_ORPH_ATTACH: 1554 case IO_SCH_UNREG_ATTACH: 1555 case IO_SCH_ATTACH: 1556 dev_id.ssid = sch->schid.ssid; 1557 dev_id.devno = sch->schib.pmcw.dev; 1558 cdev = get_ccwdev_by_dev_id(&dev_id); 1559 if (!cdev) { 1560 sch_create_and_recog_new_device(sch); 1561 break; 1562 } 1563 rc = ccw_device_move_to_sch(cdev, sch); 1564 if (rc) { 1565 /* Release reference from get_ccwdev_by_dev_id() */ 1566 put_device(&cdev->dev); 1567 goto out; 1568 } 1569 spin_lock_irqsave(&sch->lock, flags); 1570 ccw_device_trigger_reprobe(cdev); 1571 spin_unlock_irqrestore(&sch->lock, flags); 1572 /* Release reference from get_ccwdev_by_dev_id() */ 1573 put_device(&cdev->dev); 1574 break; 1575 default: 1576 break; 1577 } 1578 return 0; 1579 1580 out_unlock: 1581 spin_unlock_irqrestore(&sch->lock, flags); 1582 out: 1583 return rc; 1584 } 1585 1586 static void ccw_device_set_int_class(struct ccw_device *cdev) 1587 { 1588 struct ccw_driver *cdrv = cdev->drv; 1589 1590 /* Note: we interpret class 0 in this context as an uninitialized 1591 * field since it translates to a non-I/O interrupt class. */ 1592 if (cdrv->int_class != 0) 1593 cdev->private->int_class = cdrv->int_class; 1594 else 1595 cdev->private->int_class = IRQIO_CIO; 1596 } 1597 1598 #ifdef CONFIG_CCW_CONSOLE 1599 int __init ccw_device_enable_console(struct ccw_device *cdev) 1600 { 1601 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1602 int rc; 1603 1604 if (!cdev->drv || !cdev->handler) 1605 return -EINVAL; 1606 1607 io_subchannel_init_fields(sch); 1608 rc = cio_commit_config(sch); 1609 if (rc) 1610 return rc; 1611 sch->driver = &io_subchannel_driver; 1612 io_subchannel_recog(cdev, sch); 1613 /* Now wait for the async. recognition to come to an end. */ 1614 spin_lock_irq(cdev->ccwlock); 1615 while (!dev_fsm_final_state(cdev)) 1616 ccw_device_wait_idle(cdev); 1617 1618 /* Hold on to an extra reference while device is online. */ 1619 get_device(&cdev->dev); 1620 rc = ccw_device_online(cdev); 1621 if (rc) 1622 goto out_unlock; 1623 1624 while (!dev_fsm_final_state(cdev)) 1625 ccw_device_wait_idle(cdev); 1626 1627 if (cdev->private->state == DEV_STATE_ONLINE) 1628 cdev->online = 1; 1629 else 1630 rc = -EIO; 1631 out_unlock: 1632 spin_unlock_irq(cdev->ccwlock); 1633 if (rc) /* Give up online reference since onlining failed. */ 1634 put_device(&cdev->dev); 1635 return rc; 1636 } 1637 1638 struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) 1639 { 1640 struct io_subchannel_private *io_priv; 1641 struct ccw_device *cdev; 1642 struct subchannel *sch; 1643 1644 sch = cio_probe_console(); 1645 if (IS_ERR(sch)) 1646 return ERR_CAST(sch); 1647 1648 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1649 if (!io_priv) 1650 goto err_priv; 1651 io_priv->dma_area = dma_alloc_coherent(&sch->dev, 1652 sizeof(*io_priv->dma_area), 1653 &io_priv->dma_area_dma, GFP_KERNEL); 1654 if (!io_priv->dma_area) 1655 goto err_dma_area; 1656 set_io_private(sch, io_priv); 1657 cdev = io_subchannel_create_ccwdev(sch); 1658 if (IS_ERR(cdev)) { 1659 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1660 io_priv->dma_area, io_priv->dma_area_dma); 1661 set_io_private(sch, NULL); 1662 put_device(&sch->dev); 1663 kfree(io_priv); 1664 return cdev; 1665 } 1666 cdev->drv = drv; 1667 ccw_device_set_int_class(cdev); 1668 return cdev; 1669 1670 err_dma_area: 1671 kfree(io_priv); 1672 err_priv: 1673 put_device(&sch->dev); 1674 return ERR_PTR(-ENOMEM); 1675 } 1676 1677 void __init ccw_device_destroy_console(struct ccw_device *cdev) 1678 { 1679 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1680 struct io_subchannel_private *io_priv = to_io_private(sch); 1681 1682 set_io_private(sch, NULL); 1683 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1684 io_priv->dma_area, io_priv->dma_area_dma); 1685 put_device(&sch->dev); 1686 put_device(&cdev->dev); 1687 kfree(io_priv); 1688 } 1689 1690 /** 1691 * ccw_device_wait_idle() - busy wait for device to become idle 1692 * @cdev: ccw device 1693 * 1694 * Poll until activity control is zero, that is, no function or data 1695 * transfer is pending/active. 1696 * Called with device lock being held. 1697 */ 1698 void ccw_device_wait_idle(struct ccw_device *cdev) 1699 { 1700 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1701 1702 while (1) { 1703 cio_tsch(sch); 1704 if (sch->schib.scsw.cmd.actl == 0) 1705 break; 1706 udelay(100); 1707 } 1708 } 1709 #endif 1710 1711 /** 1712 * get_ccwdev_by_busid() - obtain device from a bus id 1713 * @cdrv: driver the device is owned by 1714 * @bus_id: bus id of the device to be searched 1715 * 1716 * This function searches all devices owned by @cdrv for a device with a bus 1717 * id matching @bus_id. 1718 * Returns: 1719 * If a match is found, its reference count of the found device is increased 1720 * and it is returned; else %NULL is returned. 1721 */ 1722 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 1723 const char *bus_id) 1724 { 1725 struct device *dev; 1726 1727 dev = driver_find_device_by_name(&cdrv->driver, bus_id); 1728 1729 return dev ? to_ccwdev(dev) : NULL; 1730 } 1731 1732 /************************** device driver handling ************************/ 1733 1734 /* This is the implementation of the ccw_driver class. The probe, remove 1735 * and release methods are initially very similar to the device_driver 1736 * implementations, with the difference that they have ccw_device 1737 * arguments. 1738 * 1739 * A ccw driver also contains the information that is needed for 1740 * device matching. 1741 */ 1742 static int 1743 ccw_device_probe (struct device *dev) 1744 { 1745 struct ccw_device *cdev = to_ccwdev(dev); 1746 struct ccw_driver *cdrv = to_ccwdrv(dev->driver); 1747 int ret; 1748 1749 cdev->drv = cdrv; /* to let the driver call _set_online */ 1750 ccw_device_set_int_class(cdev); 1751 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1752 if (ret) { 1753 cdev->drv = NULL; 1754 cdev->private->int_class = IRQIO_CIO; 1755 return ret; 1756 } 1757 1758 return 0; 1759 } 1760 1761 static void ccw_device_remove(struct device *dev) 1762 { 1763 struct ccw_device *cdev = to_ccwdev(dev); 1764 struct ccw_driver *cdrv = cdev->drv; 1765 struct subchannel *sch; 1766 int ret; 1767 1768 if (cdrv->remove) 1769 cdrv->remove(cdev); 1770 1771 spin_lock_irq(cdev->ccwlock); 1772 if (cdev->online) { 1773 cdev->online = 0; 1774 ret = ccw_device_offline(cdev); 1775 spin_unlock_irq(cdev->ccwlock); 1776 if (ret == 0) 1777 wait_event(cdev->private->wait_q, 1778 dev_fsm_final_state(cdev)); 1779 else 1780 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 1781 "device 0.%x.%04x\n", 1782 ret, cdev->private->dev_id.ssid, 1783 cdev->private->dev_id.devno); 1784 /* Give up reference obtained in ccw_device_set_online(). */ 1785 put_device(&cdev->dev); 1786 spin_lock_irq(cdev->ccwlock); 1787 } 1788 ccw_device_set_timeout(cdev, 0); 1789 cdev->drv = NULL; 1790 cdev->private->int_class = IRQIO_CIO; 1791 sch = to_subchannel(cdev->dev.parent); 1792 spin_unlock_irq(cdev->ccwlock); 1793 io_subchannel_quiesce(sch); 1794 __disable_cmf(cdev); 1795 } 1796 1797 static void ccw_device_shutdown(struct device *dev) 1798 { 1799 struct ccw_device *cdev; 1800 1801 cdev = to_ccwdev(dev); 1802 if (cdev->drv && cdev->drv->shutdown) 1803 cdev->drv->shutdown(cdev); 1804 __disable_cmf(cdev); 1805 } 1806 1807 static const struct bus_type ccw_bus_type = { 1808 .name = "ccw", 1809 .match = ccw_bus_match, 1810 .uevent = ccw_uevent, 1811 .probe = ccw_device_probe, 1812 .remove = ccw_device_remove, 1813 .shutdown = ccw_device_shutdown, 1814 }; 1815 1816 /** 1817 * ccw_driver_register() - register a ccw driver 1818 * @cdriver: driver to be registered 1819 * 1820 * This function is mainly a wrapper around driver_register(). 1821 * Returns: 1822 * %0 on success and a negative error value on failure. 1823 */ 1824 int ccw_driver_register(struct ccw_driver *cdriver) 1825 { 1826 struct device_driver *drv = &cdriver->driver; 1827 1828 drv->bus = &ccw_bus_type; 1829 1830 return driver_register(drv); 1831 } 1832 1833 /** 1834 * ccw_driver_unregister() - deregister a ccw driver 1835 * @cdriver: driver to be deregistered 1836 * 1837 * This function is mainly a wrapper around driver_unregister(). 1838 */ 1839 void ccw_driver_unregister(struct ccw_driver *cdriver) 1840 { 1841 driver_unregister(&cdriver->driver); 1842 } 1843 1844 static void ccw_device_todo(struct work_struct *work) 1845 { 1846 struct ccw_device_private *priv; 1847 struct ccw_device *cdev; 1848 struct subchannel *sch; 1849 enum cdev_todo todo; 1850 1851 priv = container_of(work, struct ccw_device_private, todo_work); 1852 cdev = priv->cdev; 1853 sch = to_subchannel(cdev->dev.parent); 1854 /* Find out todo. */ 1855 spin_lock_irq(cdev->ccwlock); 1856 todo = priv->todo; 1857 priv->todo = CDEV_TODO_NOTHING; 1858 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", 1859 priv->dev_id.ssid, priv->dev_id.devno, todo); 1860 spin_unlock_irq(cdev->ccwlock); 1861 /* Perform todo. */ 1862 switch (todo) { 1863 case CDEV_TODO_ENABLE_CMF: 1864 cmf_reenable(cdev); 1865 break; 1866 case CDEV_TODO_REBIND: 1867 ccw_device_do_unbind_bind(cdev); 1868 break; 1869 case CDEV_TODO_REGISTER: 1870 io_subchannel_register(cdev); 1871 break; 1872 case CDEV_TODO_UNREG_EVAL: 1873 if (!sch_is_pseudo_sch(sch)) 1874 css_schedule_eval(sch->schid); 1875 fallthrough; 1876 case CDEV_TODO_UNREG: 1877 spin_lock_irq(&sch->lock); 1878 sch_set_cdev(sch, NULL); 1879 spin_unlock_irq(&sch->lock); 1880 ccw_device_unregister(cdev); 1881 break; 1882 default: 1883 break; 1884 } 1885 /* Release workqueue ref. */ 1886 put_device(&cdev->dev); 1887 } 1888 1889 /** 1890 * ccw_device_sched_todo - schedule ccw device operation 1891 * @cdev: ccw device 1892 * @todo: todo 1893 * 1894 * Schedule the operation identified by @todo to be performed on the slow path 1895 * workqueue. Do nothing if another operation with higher priority is already 1896 * scheduled. Needs to be called with ccwdev lock held. 1897 */ 1898 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) 1899 { 1900 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", 1901 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 1902 todo); 1903 if (cdev->private->todo >= todo) 1904 return; 1905 cdev->private->todo = todo; 1906 /* Get workqueue ref. */ 1907 if (!get_device(&cdev->dev)) 1908 return; 1909 if (!queue_work(cio_work_q, &cdev->private->todo_work)) { 1910 /* Already queued, release workqueue ref. */ 1911 put_device(&cdev->dev); 1912 } 1913 } 1914 1915 /** 1916 * ccw_device_siosl() - initiate logging 1917 * @cdev: ccw device 1918 * 1919 * This function is used to invoke model-dependent logging within the channel 1920 * subsystem. 1921 */ 1922 int ccw_device_siosl(struct ccw_device *cdev) 1923 { 1924 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1925 1926 return chsc_siosl(sch->schid); 1927 } 1928 EXPORT_SYMBOL_GPL(ccw_device_siosl); 1929 1930 EXPORT_SYMBOL(ccw_device_set_online); 1931 EXPORT_SYMBOL(ccw_device_set_offline); 1932 EXPORT_SYMBOL(ccw_driver_register); 1933 EXPORT_SYMBOL(ccw_driver_unregister); 1934 EXPORT_SYMBOL(get_ccwdev_by_busid); 1935