1 /* 2 * drivers/s390/cio/css.c 3 * driver for channel subsystem 4 * 5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/device.h> 13 #include <linux/slab.h> 14 #include <linux/errno.h> 15 #include <linux/list.h> 16 #include <linux/reboot.h> 17 18 #include "css.h" 19 #include "cio.h" 20 #include "cio_debug.h" 21 #include "ioasm.h" 22 #include "chsc.h" 23 #include "device.h" 24 #include "idset.h" 25 #include "chp.h" 26 27 int css_init_done = 0; 28 static int need_reprobe = 0; 29 static int max_ssid = 0; 30 31 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 32 33 int css_characteristics_avail = 0; 34 35 int 36 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 37 { 38 struct subchannel_id schid; 39 int ret; 40 41 init_subchannel_id(&schid); 42 ret = -ENODEV; 43 do { 44 do { 45 ret = fn(schid, data); 46 if (ret) 47 break; 48 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 49 schid.sch_no = 0; 50 } while (schid.ssid++ < max_ssid); 51 return ret; 52 } 53 54 static struct subchannel * 55 css_alloc_subchannel(struct subchannel_id schid) 56 { 57 struct subchannel *sch; 58 int ret; 59 60 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 61 if (sch == NULL) 62 return ERR_PTR(-ENOMEM); 63 ret = cio_validate_subchannel (sch, schid); 64 if (ret < 0) { 65 kfree(sch); 66 return ERR_PTR(ret); 67 } 68 69 if (sch->st != SUBCHANNEL_TYPE_IO) { 70 /* For now we ignore all non-io subchannels. */ 71 kfree(sch); 72 return ERR_PTR(-EINVAL); 73 } 74 75 /* 76 * Set intparm to subchannel address. 77 * This is fine even on 64bit since the subchannel is always located 78 * under 2G. 79 */ 80 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 81 ret = cio_modify(sch); 82 if (ret) { 83 kfree(sch->lock); 84 kfree(sch); 85 return ERR_PTR(ret); 86 } 87 return sch; 88 } 89 90 static void 91 css_free_subchannel(struct subchannel *sch) 92 { 93 if (sch) { 94 /* Reset intparm to zeroes. */ 95 sch->schib.pmcw.intparm = 0; 96 cio_modify(sch); 97 kfree(sch->lock); 98 kfree(sch); 99 } 100 } 101 102 static void 103 css_subchannel_release(struct device *dev) 104 { 105 struct subchannel *sch; 106 107 sch = to_subchannel(dev); 108 if (!cio_is_console(sch->schid)) { 109 kfree(sch->lock); 110 kfree(sch); 111 } 112 } 113 114 static int css_sch_device_register(struct subchannel *sch) 115 { 116 int ret; 117 118 mutex_lock(&sch->reg_mutex); 119 ret = device_register(&sch->dev); 120 mutex_unlock(&sch->reg_mutex); 121 return ret; 122 } 123 124 void css_sch_device_unregister(struct subchannel *sch) 125 { 126 mutex_lock(&sch->reg_mutex); 127 device_unregister(&sch->dev); 128 mutex_unlock(&sch->reg_mutex); 129 } 130 131 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 132 { 133 int i; 134 int mask; 135 136 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 137 ssd->path_mask = pmcw->pim; 138 for (i = 0; i < 8; i++) { 139 mask = 0x80 >> i; 140 if (pmcw->pim & mask) { 141 chp_id_init(&ssd->chpid[i]); 142 ssd->chpid[i].id = pmcw->chpid[i]; 143 } 144 } 145 } 146 147 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 148 { 149 int i; 150 int mask; 151 152 for (i = 0; i < 8; i++) { 153 mask = 0x80 >> i; 154 if (ssd->path_mask & mask) 155 if (!chp_is_registered(ssd->chpid[i])) 156 chp_new(ssd->chpid[i]); 157 } 158 } 159 160 void css_update_ssd_info(struct subchannel *sch) 161 { 162 int ret; 163 164 if (cio_is_console(sch->schid)) { 165 /* Console is initialized too early for functions requiring 166 * memory allocation. */ 167 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 168 } else { 169 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 170 if (ret) 171 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 172 ssd_register_chpids(&sch->ssd_info); 173 } 174 } 175 176 static int css_register_subchannel(struct subchannel *sch) 177 { 178 int ret; 179 180 /* Initialize the subchannel structure */ 181 sch->dev.parent = &channel_subsystems[0]->device; 182 sch->dev.bus = &css_bus_type; 183 sch->dev.release = &css_subchannel_release; 184 sch->dev.groups = subch_attr_groups; 185 /* 186 * We don't want to generate uevents for I/O subchannels that don't 187 * have a working ccw device behind them since they will be 188 * unregistered before they can be used anyway, so we delay the add 189 * uevent until after device recognition was successful. 190 */ 191 if (!cio_is_console(sch->schid)) 192 /* Console is special, no need to suppress. */ 193 sch->dev.uevent_suppress = 1; 194 css_update_ssd_info(sch); 195 /* make it known to the system */ 196 ret = css_sch_device_register(sch); 197 if (ret) { 198 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 199 sch->schid.ssid, sch->schid.sch_no, ret); 200 return ret; 201 } 202 return ret; 203 } 204 205 static int css_probe_device(struct subchannel_id schid) 206 { 207 int ret; 208 struct subchannel *sch; 209 210 sch = css_alloc_subchannel(schid); 211 if (IS_ERR(sch)) 212 return PTR_ERR(sch); 213 ret = css_register_subchannel(sch); 214 if (ret) 215 css_free_subchannel(sch); 216 return ret; 217 } 218 219 static int 220 check_subchannel(struct device * dev, void * data) 221 { 222 struct subchannel *sch; 223 struct subchannel_id *schid = data; 224 225 sch = to_subchannel(dev); 226 return schid_equal(&sch->schid, schid); 227 } 228 229 struct subchannel * 230 get_subchannel_by_schid(struct subchannel_id schid) 231 { 232 struct device *dev; 233 234 dev = bus_find_device(&css_bus_type, NULL, 235 &schid, check_subchannel); 236 237 return dev ? to_subchannel(dev) : NULL; 238 } 239 240 static int css_get_subchannel_status(struct subchannel *sch) 241 { 242 struct schib schib; 243 244 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) 245 return CIO_GONE; 246 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) 247 return CIO_REVALIDATE; 248 if (!sch->lpm) 249 return CIO_NO_PATH; 250 return CIO_OPER; 251 } 252 253 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 254 { 255 int event, ret, disc; 256 unsigned long flags; 257 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; 258 259 spin_lock_irqsave(sch->lock, flags); 260 disc = device_is_disconnected(sch); 261 if (disc && slow) { 262 /* Disconnected devices are evaluated directly only.*/ 263 spin_unlock_irqrestore(sch->lock, flags); 264 return 0; 265 } 266 /* No interrupt after machine check - kill pending timers. */ 267 device_kill_pending_timer(sch); 268 if (!disc && !slow) { 269 /* Non-disconnected devices are evaluated on the slow path. */ 270 spin_unlock_irqrestore(sch->lock, flags); 271 return -EAGAIN; 272 } 273 event = css_get_subchannel_status(sch); 274 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 275 sch->schid.ssid, sch->schid.sch_no, event, 276 disc ? "disconnected" : "normal", 277 slow ? "slow" : "fast"); 278 /* Analyze subchannel status. */ 279 action = NONE; 280 switch (event) { 281 case CIO_NO_PATH: 282 if (disc) { 283 /* Check if paths have become available. */ 284 action = REPROBE; 285 break; 286 } 287 /* fall through */ 288 case CIO_GONE: 289 /* Prevent unwanted effects when opening lock. */ 290 cio_disable_subchannel(sch); 291 device_set_disconnected(sch); 292 /* Ask driver what to do with device. */ 293 action = UNREGISTER; 294 if (sch->driver && sch->driver->notify) { 295 spin_unlock_irqrestore(sch->lock, flags); 296 ret = sch->driver->notify(&sch->dev, event); 297 spin_lock_irqsave(sch->lock, flags); 298 if (ret) 299 action = NONE; 300 } 301 break; 302 case CIO_REVALIDATE: 303 /* Device will be removed, so no notify necessary. */ 304 if (disc) 305 /* Reprobe because immediate unregister might block. */ 306 action = REPROBE; 307 else 308 action = UNREGISTER_PROBE; 309 break; 310 case CIO_OPER: 311 if (disc) 312 /* Get device operational again. */ 313 action = REPROBE; 314 break; 315 } 316 /* Perform action. */ 317 ret = 0; 318 switch (action) { 319 case UNREGISTER: 320 case UNREGISTER_PROBE: 321 /* Unregister device (will use subchannel lock). */ 322 spin_unlock_irqrestore(sch->lock, flags); 323 css_sch_device_unregister(sch); 324 spin_lock_irqsave(sch->lock, flags); 325 326 /* Reset intparm to zeroes. */ 327 sch->schib.pmcw.intparm = 0; 328 cio_modify(sch); 329 break; 330 case REPROBE: 331 device_trigger_reprobe(sch); 332 break; 333 default: 334 break; 335 } 336 spin_unlock_irqrestore(sch->lock, flags); 337 /* Probe if necessary. */ 338 if (action == UNREGISTER_PROBE) 339 ret = css_probe_device(sch->schid); 340 341 return ret; 342 } 343 344 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 345 { 346 struct schib schib; 347 348 if (!slow) { 349 /* Will be done on the slow path. */ 350 return -EAGAIN; 351 } 352 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { 353 /* Unusable - ignore. */ 354 return 0; 355 } 356 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 357 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 358 359 return css_probe_device(schid); 360 } 361 362 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 363 { 364 struct subchannel *sch; 365 int ret; 366 367 sch = get_subchannel_by_schid(schid); 368 if (sch) { 369 ret = css_evaluate_known_subchannel(sch, slow); 370 put_device(&sch->dev); 371 } else 372 ret = css_evaluate_new_subchannel(schid, slow); 373 if (ret == -EAGAIN) 374 css_schedule_eval(schid); 375 } 376 377 static struct idset *slow_subchannel_set; 378 static spinlock_t slow_subchannel_lock; 379 380 static int __init slow_subchannel_init(void) 381 { 382 spin_lock_init(&slow_subchannel_lock); 383 slow_subchannel_set = idset_sch_new(); 384 if (!slow_subchannel_set) { 385 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 386 return -ENOMEM; 387 } 388 return 0; 389 } 390 391 static void css_slow_path_func(struct work_struct *unused) 392 { 393 struct subchannel_id schid; 394 395 CIO_TRACE_EVENT(4, "slowpath"); 396 spin_lock_irq(&slow_subchannel_lock); 397 init_subchannel_id(&schid); 398 while (idset_sch_get_first(slow_subchannel_set, &schid)) { 399 idset_sch_del(slow_subchannel_set, schid); 400 spin_unlock_irq(&slow_subchannel_lock); 401 css_evaluate_subchannel(schid, 1); 402 spin_lock_irq(&slow_subchannel_lock); 403 } 404 spin_unlock_irq(&slow_subchannel_lock); 405 } 406 407 static DECLARE_WORK(slow_path_work, css_slow_path_func); 408 struct workqueue_struct *slow_path_wq; 409 410 void css_schedule_eval(struct subchannel_id schid) 411 { 412 unsigned long flags; 413 414 spin_lock_irqsave(&slow_subchannel_lock, flags); 415 idset_sch_add(slow_subchannel_set, schid); 416 queue_work(slow_path_wq, &slow_path_work); 417 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 418 } 419 420 void css_schedule_eval_all(void) 421 { 422 unsigned long flags; 423 424 spin_lock_irqsave(&slow_subchannel_lock, flags); 425 idset_fill(slow_subchannel_set); 426 queue_work(slow_path_wq, &slow_path_work); 427 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 428 } 429 430 /* Reprobe subchannel if unregistered. */ 431 static int reprobe_subchannel(struct subchannel_id schid, void *data) 432 { 433 struct subchannel *sch; 434 int ret; 435 436 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", 437 schid.ssid, schid.sch_no); 438 if (need_reprobe) 439 return -EAGAIN; 440 441 sch = get_subchannel_by_schid(schid); 442 if (sch) { 443 /* Already known. */ 444 put_device(&sch->dev); 445 return 0; 446 } 447 448 ret = css_probe_device(schid); 449 switch (ret) { 450 case 0: 451 break; 452 case -ENXIO: 453 case -ENOMEM: 454 /* These should abort looping */ 455 break; 456 default: 457 ret = 0; 458 } 459 460 return ret; 461 } 462 463 /* Work function used to reprobe all unregistered subchannels. */ 464 static void reprobe_all(struct work_struct *unused) 465 { 466 int ret; 467 468 CIO_MSG_EVENT(2, "reprobe start\n"); 469 470 need_reprobe = 0; 471 /* Make sure initial subchannel scan is done. */ 472 wait_event(ccw_device_init_wq, 473 atomic_read(&ccw_device_init_count) == 0); 474 ret = for_each_subchannel(reprobe_subchannel, NULL); 475 476 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 477 need_reprobe); 478 } 479 480 static DECLARE_WORK(css_reprobe_work, reprobe_all); 481 482 /* Schedule reprobing of all unregistered subchannels. */ 483 void css_schedule_reprobe(void) 484 { 485 need_reprobe = 1; 486 queue_work(ccw_device_work, &css_reprobe_work); 487 } 488 489 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 490 491 /* 492 * Called from the machine check handler for subchannel report words. 493 */ 494 void css_process_crw(int rsid1, int rsid2) 495 { 496 struct subchannel_id mchk_schid; 497 498 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 499 rsid1, rsid2); 500 init_subchannel_id(&mchk_schid); 501 mchk_schid.sch_no = rsid1; 502 if (rsid2 != 0) 503 mchk_schid.ssid = (rsid2 >> 8) & 3; 504 505 /* 506 * Since we are always presented with IPI in the CRW, we have to 507 * use stsch() to find out if the subchannel in question has come 508 * or gone. 509 */ 510 css_evaluate_subchannel(mchk_schid, 0); 511 } 512 513 static int __init 514 __init_channel_subsystem(struct subchannel_id schid, void *data) 515 { 516 struct subchannel *sch; 517 int ret; 518 519 if (cio_is_console(schid)) 520 sch = cio_get_console_subchannel(); 521 else { 522 sch = css_alloc_subchannel(schid); 523 if (IS_ERR(sch)) 524 ret = PTR_ERR(sch); 525 else 526 ret = 0; 527 switch (ret) { 528 case 0: 529 break; 530 case -ENOMEM: 531 panic("Out of memory in init_channel_subsystem\n"); 532 /* -ENXIO: no more subchannels. */ 533 case -ENXIO: 534 return ret; 535 /* -EIO: this subchannel set not supported. */ 536 case -EIO: 537 return ret; 538 default: 539 return 0; 540 } 541 } 542 /* 543 * We register ALL valid subchannels in ioinfo, even those 544 * that have been present before init_channel_subsystem. 545 * These subchannels can't have been registered yet (kmalloc 546 * not working) so we do it now. This is true e.g. for the 547 * console subchannel. 548 */ 549 css_register_subchannel(sch); 550 return 0; 551 } 552 553 static void __init 554 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 555 { 556 if (css_characteristics_avail && css_general_characteristics.mcss) { 557 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 558 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 559 } else { 560 #ifdef CONFIG_SMP 561 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); 562 #else 563 css->global_pgid.pgid_high.cpu_addr = 0; 564 #endif 565 } 566 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 567 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 568 css->global_pgid.tod_high = tod_high; 569 570 } 571 572 static void 573 channel_subsystem_release(struct device *dev) 574 { 575 struct channel_subsystem *css; 576 577 css = to_css(dev); 578 mutex_destroy(&css->mutex); 579 kfree(css); 580 } 581 582 static ssize_t 583 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 584 char *buf) 585 { 586 struct channel_subsystem *css = to_css(dev); 587 588 if (!css) 589 return 0; 590 return sprintf(buf, "%x\n", css->cm_enabled); 591 } 592 593 static ssize_t 594 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 595 const char *buf, size_t count) 596 { 597 struct channel_subsystem *css = to_css(dev); 598 int ret; 599 600 switch (buf[0]) { 601 case '0': 602 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 603 break; 604 case '1': 605 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 606 break; 607 default: 608 ret = -EINVAL; 609 } 610 return ret < 0 ? ret : count; 611 } 612 613 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 614 615 static int __init setup_css(int nr) 616 { 617 u32 tod_high; 618 int ret; 619 struct channel_subsystem *css; 620 621 css = channel_subsystems[nr]; 622 memset(css, 0, sizeof(struct channel_subsystem)); 623 css->pseudo_subchannel = 624 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 625 if (!css->pseudo_subchannel) 626 return -ENOMEM; 627 css->pseudo_subchannel->dev.parent = &css->device; 628 css->pseudo_subchannel->dev.release = css_subchannel_release; 629 sprintf(css->pseudo_subchannel->dev.bus_id, "defunct"); 630 ret = cio_create_sch_lock(css->pseudo_subchannel); 631 if (ret) { 632 kfree(css->pseudo_subchannel); 633 return ret; 634 } 635 mutex_init(&css->mutex); 636 css->valid = 1; 637 css->cssid = nr; 638 sprintf(css->device.bus_id, "css%x", nr); 639 css->device.release = channel_subsystem_release; 640 tod_high = (u32) (get_clock() >> 32); 641 css_generate_pgid(css, tod_high); 642 return 0; 643 } 644 645 static int css_reboot_event(struct notifier_block *this, 646 unsigned long event, 647 void *ptr) 648 { 649 int ret, i; 650 651 ret = NOTIFY_DONE; 652 for (i = 0; i <= __MAX_CSSID; i++) { 653 struct channel_subsystem *css; 654 655 css = channel_subsystems[i]; 656 if (css->cm_enabled) 657 if (chsc_secm(css, 0)) 658 ret = NOTIFY_BAD; 659 } 660 661 return ret; 662 } 663 664 static struct notifier_block css_reboot_notifier = { 665 .notifier_call = css_reboot_event, 666 }; 667 668 /* 669 * Now that the driver core is running, we can setup our channel subsystem. 670 * The struct subchannel's are created during probing (except for the 671 * static console subchannel). 672 */ 673 static int __init 674 init_channel_subsystem (void) 675 { 676 int ret, i; 677 678 ret = chsc_determine_css_characteristics(); 679 if (ret == -ENOMEM) 680 goto out; /* No need to continue. */ 681 if (ret == 0) 682 css_characteristics_avail = 1; 683 684 ret = chsc_alloc_sei_area(); 685 if (ret) 686 goto out; 687 688 ret = slow_subchannel_init(); 689 if (ret) 690 goto out; 691 692 if ((ret = bus_register(&css_bus_type))) 693 goto out; 694 695 /* Try to enable MSS. */ 696 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 697 switch (ret) { 698 case 0: /* Success. */ 699 max_ssid = __MAX_SSID; 700 break; 701 case -ENOMEM: 702 goto out_bus; 703 default: 704 max_ssid = 0; 705 } 706 /* Setup css structure. */ 707 for (i = 0; i <= __MAX_CSSID; i++) { 708 struct channel_subsystem *css; 709 710 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 711 if (!css) { 712 ret = -ENOMEM; 713 goto out_unregister; 714 } 715 channel_subsystems[i] = css; 716 ret = setup_css(i); 717 if (ret) 718 goto out_free; 719 ret = device_register(&css->device); 720 if (ret) 721 goto out_free_all; 722 if (css_characteristics_avail && 723 css_chsc_characteristics.secm) { 724 ret = device_create_file(&css->device, 725 &dev_attr_cm_enable); 726 if (ret) 727 goto out_device; 728 } 729 ret = device_register(&css->pseudo_subchannel->dev); 730 if (ret) 731 goto out_file; 732 } 733 ret = register_reboot_notifier(&css_reboot_notifier); 734 if (ret) 735 goto out_pseudo; 736 css_init_done = 1; 737 738 ctl_set_bit(6, 28); 739 740 for_each_subchannel(__init_channel_subsystem, NULL); 741 return 0; 742 out_pseudo: 743 device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev); 744 out_file: 745 device_remove_file(&channel_subsystems[i]->device, 746 &dev_attr_cm_enable); 747 out_device: 748 device_unregister(&channel_subsystems[i]->device); 749 out_free_all: 750 kfree(channel_subsystems[i]->pseudo_subchannel->lock); 751 kfree(channel_subsystems[i]->pseudo_subchannel); 752 out_free: 753 kfree(channel_subsystems[i]); 754 out_unregister: 755 while (i > 0) { 756 struct channel_subsystem *css; 757 758 i--; 759 css = channel_subsystems[i]; 760 device_unregister(&css->pseudo_subchannel->dev); 761 if (css_characteristics_avail && css_chsc_characteristics.secm) 762 device_remove_file(&css->device, 763 &dev_attr_cm_enable); 764 device_unregister(&css->device); 765 } 766 out_bus: 767 bus_unregister(&css_bus_type); 768 out: 769 chsc_free_sei_area(); 770 kfree(slow_subchannel_set); 771 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", 772 ret); 773 return ret; 774 } 775 776 int sch_is_pseudo_sch(struct subchannel *sch) 777 { 778 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 779 } 780 781 /* 782 * find a driver for a subchannel. They identify by the subchannel 783 * type with the exception that the console subchannel driver has its own 784 * subchannel type although the device is an i/o subchannel 785 */ 786 static int 787 css_bus_match (struct device *dev, struct device_driver *drv) 788 { 789 struct subchannel *sch = container_of (dev, struct subchannel, dev); 790 struct css_driver *driver = container_of (drv, struct css_driver, drv); 791 792 if (sch->st == driver->subchannel_type) 793 return 1; 794 795 return 0; 796 } 797 798 static int 799 css_probe (struct device *dev) 800 { 801 struct subchannel *sch; 802 803 sch = to_subchannel(dev); 804 sch->driver = container_of (dev->driver, struct css_driver, drv); 805 return (sch->driver->probe ? sch->driver->probe(sch) : 0); 806 } 807 808 static int 809 css_remove (struct device *dev) 810 { 811 struct subchannel *sch; 812 813 sch = to_subchannel(dev); 814 return (sch->driver->remove ? sch->driver->remove(sch) : 0); 815 } 816 817 static void 818 css_shutdown (struct device *dev) 819 { 820 struct subchannel *sch; 821 822 sch = to_subchannel(dev); 823 if (sch->driver->shutdown) 824 sch->driver->shutdown(sch); 825 } 826 827 struct bus_type css_bus_type = { 828 .name = "css", 829 .match = css_bus_match, 830 .probe = css_probe, 831 .remove = css_remove, 832 .shutdown = css_shutdown, 833 }; 834 835 subsys_initcall(init_channel_subsystem); 836 837 MODULE_LICENSE("GPL"); 838 EXPORT_SYMBOL(css_bus_type); 839 EXPORT_SYMBOL_GPL(css_characteristics_avail); 840