1 /* 2 * drivers/s390/cio/css.c 3 * driver for channel subsystem 4 * 5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/device.h> 13 #include <linux/slab.h> 14 #include <linux/errno.h> 15 #include <linux/list.h> 16 17 #include "css.h" 18 #include "cio.h" 19 #include "cio_debug.h" 20 #include "ioasm.h" 21 #include "chsc.h" 22 #include "device.h" 23 #include "idset.h" 24 #include "chp.h" 25 26 int css_init_done = 0; 27 static int need_reprobe = 0; 28 static int max_ssid = 0; 29 30 struct channel_subsystem *css[__MAX_CSSID + 1]; 31 32 int css_characteristics_avail = 0; 33 34 int 35 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 36 { 37 struct subchannel_id schid; 38 int ret; 39 40 init_subchannel_id(&schid); 41 ret = -ENODEV; 42 do { 43 do { 44 ret = fn(schid, data); 45 if (ret) 46 break; 47 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 48 schid.sch_no = 0; 49 } while (schid.ssid++ < max_ssid); 50 return ret; 51 } 52 53 static struct subchannel * 54 css_alloc_subchannel(struct subchannel_id schid) 55 { 56 struct subchannel *sch; 57 int ret; 58 59 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 60 if (sch == NULL) 61 return ERR_PTR(-ENOMEM); 62 ret = cio_validate_subchannel (sch, schid); 63 if (ret < 0) { 64 kfree(sch); 65 return ERR_PTR(ret); 66 } 67 68 if (sch->st != SUBCHANNEL_TYPE_IO) { 69 /* For now we ignore all non-io subchannels. */ 70 kfree(sch); 71 return ERR_PTR(-EINVAL); 72 } 73 74 /* 75 * Set intparm to subchannel address. 76 * This is fine even on 64bit since the subchannel is always located 77 * under 2G. 78 */ 79 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 80 ret = cio_modify(sch); 81 if (ret) { 82 kfree(sch->lock); 83 kfree(sch); 84 return ERR_PTR(ret); 85 } 86 return sch; 87 } 88 89 static void 90 css_free_subchannel(struct subchannel *sch) 91 { 92 if (sch) { 93 /* Reset intparm to zeroes. */ 94 sch->schib.pmcw.intparm = 0; 95 cio_modify(sch); 96 kfree(sch->lock); 97 kfree(sch); 98 } 99 } 100 101 static void 102 css_subchannel_release(struct device *dev) 103 { 104 struct subchannel *sch; 105 106 sch = to_subchannel(dev); 107 if (!cio_is_console(sch->schid)) { 108 kfree(sch->lock); 109 kfree(sch); 110 } 111 } 112 113 static int css_sch_device_register(struct subchannel *sch) 114 { 115 int ret; 116 117 mutex_lock(&sch->reg_mutex); 118 ret = device_register(&sch->dev); 119 mutex_unlock(&sch->reg_mutex); 120 return ret; 121 } 122 123 void css_sch_device_unregister(struct subchannel *sch) 124 { 125 mutex_lock(&sch->reg_mutex); 126 device_unregister(&sch->dev); 127 mutex_unlock(&sch->reg_mutex); 128 } 129 130 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 131 { 132 int i; 133 int mask; 134 135 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 136 ssd->path_mask = pmcw->pim; 137 for (i = 0; i < 8; i++) { 138 mask = 0x80 >> i; 139 if (pmcw->pim & mask) { 140 chp_id_init(&ssd->chpid[i]); 141 ssd->chpid[i].id = pmcw->chpid[i]; 142 } 143 } 144 } 145 146 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 147 { 148 int i; 149 int mask; 150 151 for (i = 0; i < 8; i++) { 152 mask = 0x80 >> i; 153 if (ssd->path_mask & mask) 154 if (!chp_is_registered(ssd->chpid[i])) 155 chp_new(ssd->chpid[i]); 156 } 157 } 158 159 void css_update_ssd_info(struct subchannel *sch) 160 { 161 int ret; 162 163 if (cio_is_console(sch->schid)) { 164 /* Console is initialized too early for functions requiring 165 * memory allocation. */ 166 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 167 } else { 168 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 169 if (ret) 170 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 171 ssd_register_chpids(&sch->ssd_info); 172 } 173 } 174 175 static int css_register_subchannel(struct subchannel *sch) 176 { 177 int ret; 178 179 /* Initialize the subchannel structure */ 180 sch->dev.parent = &css[0]->device; 181 sch->dev.bus = &css_bus_type; 182 sch->dev.release = &css_subchannel_release; 183 sch->dev.groups = subch_attr_groups; 184 css_update_ssd_info(sch); 185 /* make it known to the system */ 186 ret = css_sch_device_register(sch); 187 if (ret) { 188 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 189 sch->schid.ssid, sch->schid.sch_no, ret); 190 return ret; 191 } 192 return ret; 193 } 194 195 static int css_probe_device(struct subchannel_id schid) 196 { 197 int ret; 198 struct subchannel *sch; 199 200 sch = css_alloc_subchannel(schid); 201 if (IS_ERR(sch)) 202 return PTR_ERR(sch); 203 ret = css_register_subchannel(sch); 204 if (ret) 205 css_free_subchannel(sch); 206 return ret; 207 } 208 209 static int 210 check_subchannel(struct device * dev, void * data) 211 { 212 struct subchannel *sch; 213 struct subchannel_id *schid = data; 214 215 sch = to_subchannel(dev); 216 return schid_equal(&sch->schid, schid); 217 } 218 219 struct subchannel * 220 get_subchannel_by_schid(struct subchannel_id schid) 221 { 222 struct device *dev; 223 224 dev = bus_find_device(&css_bus_type, NULL, 225 &schid, check_subchannel); 226 227 return dev ? to_subchannel(dev) : NULL; 228 } 229 230 static int css_get_subchannel_status(struct subchannel *sch) 231 { 232 struct schib schib; 233 234 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) 235 return CIO_GONE; 236 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) 237 return CIO_REVALIDATE; 238 if (!sch->lpm) 239 return CIO_NO_PATH; 240 return CIO_OPER; 241 } 242 243 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 244 { 245 int event, ret, disc; 246 unsigned long flags; 247 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; 248 249 spin_lock_irqsave(sch->lock, flags); 250 disc = device_is_disconnected(sch); 251 if (disc && slow) { 252 /* Disconnected devices are evaluated directly only.*/ 253 spin_unlock_irqrestore(sch->lock, flags); 254 return 0; 255 } 256 /* No interrupt after machine check - kill pending timers. */ 257 device_kill_pending_timer(sch); 258 if (!disc && !slow) { 259 /* Non-disconnected devices are evaluated on the slow path. */ 260 spin_unlock_irqrestore(sch->lock, flags); 261 return -EAGAIN; 262 } 263 event = css_get_subchannel_status(sch); 264 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 265 sch->schid.ssid, sch->schid.sch_no, event, 266 disc ? "disconnected" : "normal", 267 slow ? "slow" : "fast"); 268 /* Analyze subchannel status. */ 269 action = NONE; 270 switch (event) { 271 case CIO_NO_PATH: 272 if (disc) { 273 /* Check if paths have become available. */ 274 action = REPROBE; 275 break; 276 } 277 /* fall through */ 278 case CIO_GONE: 279 /* Prevent unwanted effects when opening lock. */ 280 cio_disable_subchannel(sch); 281 device_set_disconnected(sch); 282 /* Ask driver what to do with device. */ 283 action = UNREGISTER; 284 if (sch->driver && sch->driver->notify) { 285 spin_unlock_irqrestore(sch->lock, flags); 286 ret = sch->driver->notify(&sch->dev, event); 287 spin_lock_irqsave(sch->lock, flags); 288 if (ret) 289 action = NONE; 290 } 291 break; 292 case CIO_REVALIDATE: 293 /* Device will be removed, so no notify necessary. */ 294 if (disc) 295 /* Reprobe because immediate unregister might block. */ 296 action = REPROBE; 297 else 298 action = UNREGISTER_PROBE; 299 break; 300 case CIO_OPER: 301 if (disc) 302 /* Get device operational again. */ 303 action = REPROBE; 304 break; 305 } 306 /* Perform action. */ 307 ret = 0; 308 switch (action) { 309 case UNREGISTER: 310 case UNREGISTER_PROBE: 311 /* Unregister device (will use subchannel lock). */ 312 spin_unlock_irqrestore(sch->lock, flags); 313 css_sch_device_unregister(sch); 314 spin_lock_irqsave(sch->lock, flags); 315 316 /* Reset intparm to zeroes. */ 317 sch->schib.pmcw.intparm = 0; 318 cio_modify(sch); 319 break; 320 case REPROBE: 321 device_trigger_reprobe(sch); 322 break; 323 default: 324 break; 325 } 326 spin_unlock_irqrestore(sch->lock, flags); 327 /* Probe if necessary. */ 328 if (action == UNREGISTER_PROBE) 329 ret = css_probe_device(sch->schid); 330 331 return ret; 332 } 333 334 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 335 { 336 struct schib schib; 337 338 if (!slow) { 339 /* Will be done on the slow path. */ 340 return -EAGAIN; 341 } 342 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { 343 /* Unusable - ignore. */ 344 return 0; 345 } 346 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 347 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 348 349 return css_probe_device(schid); 350 } 351 352 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 353 { 354 struct subchannel *sch; 355 int ret; 356 357 sch = get_subchannel_by_schid(schid); 358 if (sch) { 359 ret = css_evaluate_known_subchannel(sch, slow); 360 put_device(&sch->dev); 361 } else 362 ret = css_evaluate_new_subchannel(schid, slow); 363 if (ret == -EAGAIN) 364 css_schedule_eval(schid); 365 } 366 367 static struct idset *slow_subchannel_set; 368 static spinlock_t slow_subchannel_lock; 369 370 static int __init slow_subchannel_init(void) 371 { 372 spin_lock_init(&slow_subchannel_lock); 373 slow_subchannel_set = idset_sch_new(); 374 if (!slow_subchannel_set) { 375 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 376 return -ENOMEM; 377 } 378 return 0; 379 } 380 381 static void css_slow_path_func(struct work_struct *unused) 382 { 383 struct subchannel_id schid; 384 385 CIO_TRACE_EVENT(4, "slowpath"); 386 spin_lock_irq(&slow_subchannel_lock); 387 init_subchannel_id(&schid); 388 while (idset_sch_get_first(slow_subchannel_set, &schid)) { 389 idset_sch_del(slow_subchannel_set, schid); 390 spin_unlock_irq(&slow_subchannel_lock); 391 css_evaluate_subchannel(schid, 1); 392 spin_lock_irq(&slow_subchannel_lock); 393 } 394 spin_unlock_irq(&slow_subchannel_lock); 395 } 396 397 static DECLARE_WORK(slow_path_work, css_slow_path_func); 398 struct workqueue_struct *slow_path_wq; 399 400 void css_schedule_eval(struct subchannel_id schid) 401 { 402 unsigned long flags; 403 404 spin_lock_irqsave(&slow_subchannel_lock, flags); 405 idset_sch_add(slow_subchannel_set, schid); 406 queue_work(slow_path_wq, &slow_path_work); 407 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 408 } 409 410 void css_schedule_eval_all(void) 411 { 412 unsigned long flags; 413 414 spin_lock_irqsave(&slow_subchannel_lock, flags); 415 idset_fill(slow_subchannel_set); 416 queue_work(slow_path_wq, &slow_path_work); 417 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 418 } 419 420 /* Reprobe subchannel if unregistered. */ 421 static int reprobe_subchannel(struct subchannel_id schid, void *data) 422 { 423 struct subchannel *sch; 424 int ret; 425 426 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", 427 schid.ssid, schid.sch_no); 428 if (need_reprobe) 429 return -EAGAIN; 430 431 sch = get_subchannel_by_schid(schid); 432 if (sch) { 433 /* Already known. */ 434 put_device(&sch->dev); 435 return 0; 436 } 437 438 ret = css_probe_device(schid); 439 switch (ret) { 440 case 0: 441 break; 442 case -ENXIO: 443 case -ENOMEM: 444 /* These should abort looping */ 445 break; 446 default: 447 ret = 0; 448 } 449 450 return ret; 451 } 452 453 /* Work function used to reprobe all unregistered subchannels. */ 454 static void reprobe_all(struct work_struct *unused) 455 { 456 int ret; 457 458 CIO_MSG_EVENT(2, "reprobe start\n"); 459 460 need_reprobe = 0; 461 /* Make sure initial subchannel scan is done. */ 462 wait_event(ccw_device_init_wq, 463 atomic_read(&ccw_device_init_count) == 0); 464 ret = for_each_subchannel(reprobe_subchannel, NULL); 465 466 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 467 need_reprobe); 468 } 469 470 static DECLARE_WORK(css_reprobe_work, reprobe_all); 471 472 /* Schedule reprobing of all unregistered subchannels. */ 473 void css_schedule_reprobe(void) 474 { 475 need_reprobe = 1; 476 queue_work(ccw_device_work, &css_reprobe_work); 477 } 478 479 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 480 481 /* 482 * Called from the machine check handler for subchannel report words. 483 */ 484 void css_process_crw(int rsid1, int rsid2) 485 { 486 struct subchannel_id mchk_schid; 487 488 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 489 rsid1, rsid2); 490 init_subchannel_id(&mchk_schid); 491 mchk_schid.sch_no = rsid1; 492 if (rsid2 != 0) 493 mchk_schid.ssid = (rsid2 >> 8) & 3; 494 495 /* 496 * Since we are always presented with IPI in the CRW, we have to 497 * use stsch() to find out if the subchannel in question has come 498 * or gone. 499 */ 500 css_evaluate_subchannel(mchk_schid, 0); 501 } 502 503 static int __init 504 __init_channel_subsystem(struct subchannel_id schid, void *data) 505 { 506 struct subchannel *sch; 507 int ret; 508 509 if (cio_is_console(schid)) 510 sch = cio_get_console_subchannel(); 511 else { 512 sch = css_alloc_subchannel(schid); 513 if (IS_ERR(sch)) 514 ret = PTR_ERR(sch); 515 else 516 ret = 0; 517 switch (ret) { 518 case 0: 519 break; 520 case -ENOMEM: 521 panic("Out of memory in init_channel_subsystem\n"); 522 /* -ENXIO: no more subchannels. */ 523 case -ENXIO: 524 return ret; 525 /* -EIO: this subchannel set not supported. */ 526 case -EIO: 527 return ret; 528 default: 529 return 0; 530 } 531 } 532 /* 533 * We register ALL valid subchannels in ioinfo, even those 534 * that have been present before init_channel_subsystem. 535 * These subchannels can't have been registered yet (kmalloc 536 * not working) so we do it now. This is true e.g. for the 537 * console subchannel. 538 */ 539 css_register_subchannel(sch); 540 return 0; 541 } 542 543 static void __init 544 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 545 { 546 if (css_characteristics_avail && css_general_characteristics.mcss) { 547 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 548 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 549 } else { 550 #ifdef CONFIG_SMP 551 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); 552 #else 553 css->global_pgid.pgid_high.cpu_addr = 0; 554 #endif 555 } 556 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 557 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 558 css->global_pgid.tod_high = tod_high; 559 560 } 561 562 static void 563 channel_subsystem_release(struct device *dev) 564 { 565 struct channel_subsystem *css; 566 567 css = to_css(dev); 568 mutex_destroy(&css->mutex); 569 kfree(css); 570 } 571 572 static ssize_t 573 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 574 char *buf) 575 { 576 struct channel_subsystem *css = to_css(dev); 577 578 if (!css) 579 return 0; 580 return sprintf(buf, "%x\n", css->cm_enabled); 581 } 582 583 static ssize_t 584 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 585 const char *buf, size_t count) 586 { 587 struct channel_subsystem *css = to_css(dev); 588 int ret; 589 590 switch (buf[0]) { 591 case '0': 592 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 593 break; 594 case '1': 595 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 596 break; 597 default: 598 ret = -EINVAL; 599 } 600 return ret < 0 ? ret : count; 601 } 602 603 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 604 605 static int __init setup_css(int nr) 606 { 607 u32 tod_high; 608 int ret; 609 610 memset(css[nr], 0, sizeof(struct channel_subsystem)); 611 css[nr]->pseudo_subchannel = 612 kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL); 613 if (!css[nr]->pseudo_subchannel) 614 return -ENOMEM; 615 css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device; 616 css[nr]->pseudo_subchannel->dev.release = css_subchannel_release; 617 sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct"); 618 ret = cio_create_sch_lock(css[nr]->pseudo_subchannel); 619 if (ret) { 620 kfree(css[nr]->pseudo_subchannel); 621 return ret; 622 } 623 mutex_init(&css[nr]->mutex); 624 css[nr]->valid = 1; 625 css[nr]->cssid = nr; 626 sprintf(css[nr]->device.bus_id, "css%x", nr); 627 css[nr]->device.release = channel_subsystem_release; 628 tod_high = (u32) (get_clock() >> 32); 629 css_generate_pgid(css[nr], tod_high); 630 return 0; 631 } 632 633 /* 634 * Now that the driver core is running, we can setup our channel subsystem. 635 * The struct subchannel's are created during probing (except for the 636 * static console subchannel). 637 */ 638 static int __init 639 init_channel_subsystem (void) 640 { 641 int ret, i; 642 643 ret = chsc_determine_css_characteristics(); 644 if (ret == -ENOMEM) 645 goto out; /* No need to continue. */ 646 if (ret == 0) 647 css_characteristics_avail = 1; 648 649 ret = chsc_alloc_sei_area(); 650 if (ret) 651 goto out; 652 653 ret = slow_subchannel_init(); 654 if (ret) 655 goto out; 656 657 if ((ret = bus_register(&css_bus_type))) 658 goto out; 659 660 /* Try to enable MSS. */ 661 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 662 switch (ret) { 663 case 0: /* Success. */ 664 max_ssid = __MAX_SSID; 665 break; 666 case -ENOMEM: 667 goto out_bus; 668 default: 669 max_ssid = 0; 670 } 671 /* Setup css structure. */ 672 for (i = 0; i <= __MAX_CSSID; i++) { 673 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 674 if (!css[i]) { 675 ret = -ENOMEM; 676 goto out_unregister; 677 } 678 ret = setup_css(i); 679 if (ret) 680 goto out_free; 681 ret = device_register(&css[i]->device); 682 if (ret) 683 goto out_free_all; 684 if (css_characteristics_avail && 685 css_chsc_characteristics.secm) { 686 ret = device_create_file(&css[i]->device, 687 &dev_attr_cm_enable); 688 if (ret) 689 goto out_device; 690 } 691 ret = device_register(&css[i]->pseudo_subchannel->dev); 692 if (ret) 693 goto out_file; 694 } 695 css_init_done = 1; 696 697 ctl_set_bit(6, 28); 698 699 for_each_subchannel(__init_channel_subsystem, NULL); 700 return 0; 701 out_file: 702 device_remove_file(&css[i]->device, &dev_attr_cm_enable); 703 out_device: 704 device_unregister(&css[i]->device); 705 out_free_all: 706 kfree(css[i]->pseudo_subchannel->lock); 707 kfree(css[i]->pseudo_subchannel); 708 out_free: 709 kfree(css[i]); 710 out_unregister: 711 while (i > 0) { 712 i--; 713 device_unregister(&css[i]->pseudo_subchannel->dev); 714 if (css_characteristics_avail && css_chsc_characteristics.secm) 715 device_remove_file(&css[i]->device, 716 &dev_attr_cm_enable); 717 device_unregister(&css[i]->device); 718 } 719 out_bus: 720 bus_unregister(&css_bus_type); 721 out: 722 chsc_free_sei_area(); 723 kfree(slow_subchannel_set); 724 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", 725 ret); 726 return ret; 727 } 728 729 int sch_is_pseudo_sch(struct subchannel *sch) 730 { 731 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 732 } 733 734 /* 735 * find a driver for a subchannel. They identify by the subchannel 736 * type with the exception that the console subchannel driver has its own 737 * subchannel type although the device is an i/o subchannel 738 */ 739 static int 740 css_bus_match (struct device *dev, struct device_driver *drv) 741 { 742 struct subchannel *sch = container_of (dev, struct subchannel, dev); 743 struct css_driver *driver = container_of (drv, struct css_driver, drv); 744 745 if (sch->st == driver->subchannel_type) 746 return 1; 747 748 return 0; 749 } 750 751 static int 752 css_probe (struct device *dev) 753 { 754 struct subchannel *sch; 755 756 sch = to_subchannel(dev); 757 sch->driver = container_of (dev->driver, struct css_driver, drv); 758 return (sch->driver->probe ? sch->driver->probe(sch) : 0); 759 } 760 761 static int 762 css_remove (struct device *dev) 763 { 764 struct subchannel *sch; 765 766 sch = to_subchannel(dev); 767 return (sch->driver->remove ? sch->driver->remove(sch) : 0); 768 } 769 770 static void 771 css_shutdown (struct device *dev) 772 { 773 struct subchannel *sch; 774 775 sch = to_subchannel(dev); 776 if (sch->driver->shutdown) 777 sch->driver->shutdown(sch); 778 } 779 780 struct bus_type css_bus_type = { 781 .name = "css", 782 .match = css_bus_match, 783 .probe = css_probe, 784 .remove = css_remove, 785 .shutdown = css_shutdown, 786 }; 787 788 subsys_initcall(init_channel_subsystem); 789 790 MODULE_LICENSE("GPL"); 791 EXPORT_SYMBOL(css_bus_type); 792 EXPORT_SYMBOL_GPL(css_characteristics_avail); 793