1 /* 2 * drivers/s390/cio/css.c 3 * driver for channel subsystem 4 * 5 * Copyright IBM Corp. 2002,2008 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <asm/isc.h> 21 22 #include "../s390mach.h" 23 #include "css.h" 24 #include "cio.h" 25 #include "cio_debug.h" 26 #include "ioasm.h" 27 #include "chsc.h" 28 #include "device.h" 29 #include "idset.h" 30 #include "chp.h" 31 32 int css_init_done = 0; 33 static int need_reprobe = 0; 34 static int max_ssid = 0; 35 36 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 37 38 int 39 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 40 { 41 struct subchannel_id schid; 42 int ret; 43 44 init_subchannel_id(&schid); 45 ret = -ENODEV; 46 do { 47 do { 48 ret = fn(schid, data); 49 if (ret) 50 break; 51 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 52 schid.sch_no = 0; 53 } while (schid.ssid++ < max_ssid); 54 return ret; 55 } 56 57 struct cb_data { 58 void *data; 59 struct idset *set; 60 int (*fn_known_sch)(struct subchannel *, void *); 61 int (*fn_unknown_sch)(struct subchannel_id, void *); 62 }; 63 64 static int call_fn_known_sch(struct device *dev, void *data) 65 { 66 struct subchannel *sch = to_subchannel(dev); 67 struct cb_data *cb = data; 68 int rc = 0; 69 70 idset_sch_del(cb->set, sch->schid); 71 if (cb->fn_known_sch) 72 rc = cb->fn_known_sch(sch, cb->data); 73 return rc; 74 } 75 76 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 77 { 78 struct cb_data *cb = data; 79 int rc = 0; 80 81 if (idset_sch_contains(cb->set, schid)) 82 rc = cb->fn_unknown_sch(schid, cb->data); 83 return rc; 84 } 85 86 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 87 int (*fn_unknown)(struct subchannel_id, 88 void *), void *data) 89 { 90 struct cb_data cb; 91 int rc; 92 93 cb.set = idset_sch_new(); 94 if (!cb.set) 95 return -ENOMEM; 96 idset_fill(cb.set); 97 cb.data = data; 98 cb.fn_known_sch = fn_known; 99 cb.fn_unknown_sch = fn_unknown; 100 /* Process registered subchannels. */ 101 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 102 if (rc) 103 goto out; 104 /* Process unregistered subchannels. */ 105 if (fn_unknown) 106 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 107 out: 108 idset_free(cb.set); 109 110 return rc; 111 } 112 113 static struct subchannel * 114 css_alloc_subchannel(struct subchannel_id schid) 115 { 116 struct subchannel *sch; 117 int ret; 118 119 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 120 if (sch == NULL) 121 return ERR_PTR(-ENOMEM); 122 ret = cio_validate_subchannel (sch, schid); 123 if (ret < 0) { 124 kfree(sch); 125 return ERR_PTR(ret); 126 } 127 return sch; 128 } 129 130 static void 131 css_free_subchannel(struct subchannel *sch) 132 { 133 if (sch) { 134 /* Reset intparm to zeroes. */ 135 sch->config.intparm = 0; 136 cio_commit_config(sch); 137 kfree(sch->lock); 138 kfree(sch); 139 } 140 } 141 142 static void 143 css_subchannel_release(struct device *dev) 144 { 145 struct subchannel *sch; 146 147 sch = to_subchannel(dev); 148 if (!cio_is_console(sch->schid)) { 149 kfree(sch->lock); 150 kfree(sch); 151 } 152 } 153 154 static int css_sch_device_register(struct subchannel *sch) 155 { 156 int ret; 157 158 mutex_lock(&sch->reg_mutex); 159 ret = device_register(&sch->dev); 160 mutex_unlock(&sch->reg_mutex); 161 return ret; 162 } 163 164 /** 165 * css_sch_device_unregister - unregister a subchannel 166 * @sch: subchannel to be unregistered 167 */ 168 void css_sch_device_unregister(struct subchannel *sch) 169 { 170 mutex_lock(&sch->reg_mutex); 171 if (device_is_registered(&sch->dev)) 172 device_unregister(&sch->dev); 173 mutex_unlock(&sch->reg_mutex); 174 } 175 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 176 177 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 178 { 179 int i; 180 int mask; 181 182 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 183 ssd->path_mask = pmcw->pim; 184 for (i = 0; i < 8; i++) { 185 mask = 0x80 >> i; 186 if (pmcw->pim & mask) { 187 chp_id_init(&ssd->chpid[i]); 188 ssd->chpid[i].id = pmcw->chpid[i]; 189 } 190 } 191 } 192 193 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 194 { 195 int i; 196 int mask; 197 198 for (i = 0; i < 8; i++) { 199 mask = 0x80 >> i; 200 if (ssd->path_mask & mask) 201 if (!chp_is_registered(ssd->chpid[i])) 202 chp_new(ssd->chpid[i]); 203 } 204 } 205 206 void css_update_ssd_info(struct subchannel *sch) 207 { 208 int ret; 209 210 if (cio_is_console(sch->schid)) { 211 /* Console is initialized too early for functions requiring 212 * memory allocation. */ 213 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 214 } else { 215 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 216 if (ret) 217 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 218 ssd_register_chpids(&sch->ssd_info); 219 } 220 } 221 222 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 223 char *buf) 224 { 225 struct subchannel *sch = to_subchannel(dev); 226 227 return sprintf(buf, "%01x\n", sch->st); 228 } 229 230 static DEVICE_ATTR(type, 0444, type_show, NULL); 231 232 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 233 char *buf) 234 { 235 struct subchannel *sch = to_subchannel(dev); 236 237 return sprintf(buf, "css:t%01X\n", sch->st); 238 } 239 240 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 241 242 static struct attribute *subch_attrs[] = { 243 &dev_attr_type.attr, 244 &dev_attr_modalias.attr, 245 NULL, 246 }; 247 248 static struct attribute_group subch_attr_group = { 249 .attrs = subch_attrs, 250 }; 251 252 static struct attribute_group *default_subch_attr_groups[] = { 253 &subch_attr_group, 254 NULL, 255 }; 256 257 static int css_register_subchannel(struct subchannel *sch) 258 { 259 int ret; 260 261 /* Initialize the subchannel structure */ 262 sch->dev.parent = &channel_subsystems[0]->device; 263 sch->dev.bus = &css_bus_type; 264 sch->dev.release = &css_subchannel_release; 265 sch->dev.groups = default_subch_attr_groups; 266 /* 267 * We don't want to generate uevents for I/O subchannels that don't 268 * have a working ccw device behind them since they will be 269 * unregistered before they can be used anyway, so we delay the add 270 * uevent until after device recognition was successful. 271 * Note that we suppress the uevent for all subchannel types; 272 * the subchannel driver can decide itself when it wants to inform 273 * userspace of its existence. 274 */ 275 dev_set_uevent_suppress(&sch->dev, 1); 276 css_update_ssd_info(sch); 277 /* make it known to the system */ 278 ret = css_sch_device_register(sch); 279 if (ret) { 280 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 281 sch->schid.ssid, sch->schid.sch_no, ret); 282 return ret; 283 } 284 if (!sch->driver) { 285 /* 286 * No driver matched. Generate the uevent now so that 287 * a fitting driver module may be loaded based on the 288 * modalias. 289 */ 290 dev_set_uevent_suppress(&sch->dev, 0); 291 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 292 } 293 return ret; 294 } 295 296 int css_probe_device(struct subchannel_id schid) 297 { 298 int ret; 299 struct subchannel *sch; 300 301 sch = css_alloc_subchannel(schid); 302 if (IS_ERR(sch)) 303 return PTR_ERR(sch); 304 ret = css_register_subchannel(sch); 305 if (ret) 306 css_free_subchannel(sch); 307 return ret; 308 } 309 310 static int 311 check_subchannel(struct device * dev, void * data) 312 { 313 struct subchannel *sch; 314 struct subchannel_id *schid = data; 315 316 sch = to_subchannel(dev); 317 return schid_equal(&sch->schid, schid); 318 } 319 320 struct subchannel * 321 get_subchannel_by_schid(struct subchannel_id schid) 322 { 323 struct device *dev; 324 325 dev = bus_find_device(&css_bus_type, NULL, 326 &schid, check_subchannel); 327 328 return dev ? to_subchannel(dev) : NULL; 329 } 330 331 /** 332 * css_sch_is_valid() - check if a subchannel is valid 333 * @schib: subchannel information block for the subchannel 334 */ 335 int css_sch_is_valid(struct schib *schib) 336 { 337 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 338 return 0; 339 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 340 return 0; 341 return 1; 342 } 343 EXPORT_SYMBOL_GPL(css_sch_is_valid); 344 345 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 346 { 347 struct schib schib; 348 349 if (!slow) { 350 /* Will be done on the slow path. */ 351 return -EAGAIN; 352 } 353 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 354 /* Unusable - ignore. */ 355 return 0; 356 } 357 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 358 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 359 360 return css_probe_device(schid); 361 } 362 363 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 364 { 365 int ret = 0; 366 367 if (sch->driver) { 368 if (sch->driver->sch_event) 369 ret = sch->driver->sch_event(sch, slow); 370 else 371 dev_dbg(&sch->dev, 372 "Got subchannel machine check but " 373 "no sch_event handler provided.\n"); 374 } 375 return ret; 376 } 377 378 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 379 { 380 struct subchannel *sch; 381 int ret; 382 383 sch = get_subchannel_by_schid(schid); 384 if (sch) { 385 ret = css_evaluate_known_subchannel(sch, slow); 386 put_device(&sch->dev); 387 } else 388 ret = css_evaluate_new_subchannel(schid, slow); 389 if (ret == -EAGAIN) 390 css_schedule_eval(schid); 391 } 392 393 static struct idset *slow_subchannel_set; 394 static spinlock_t slow_subchannel_lock; 395 396 static int __init slow_subchannel_init(void) 397 { 398 spin_lock_init(&slow_subchannel_lock); 399 slow_subchannel_set = idset_sch_new(); 400 if (!slow_subchannel_set) { 401 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 402 return -ENOMEM; 403 } 404 return 0; 405 } 406 407 static int slow_eval_known_fn(struct subchannel *sch, void *data) 408 { 409 int eval; 410 int rc; 411 412 spin_lock_irq(&slow_subchannel_lock); 413 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 414 idset_sch_del(slow_subchannel_set, sch->schid); 415 spin_unlock_irq(&slow_subchannel_lock); 416 if (eval) { 417 rc = css_evaluate_known_subchannel(sch, 1); 418 if (rc == -EAGAIN) 419 css_schedule_eval(sch->schid); 420 } 421 return 0; 422 } 423 424 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 425 { 426 int eval; 427 int rc = 0; 428 429 spin_lock_irq(&slow_subchannel_lock); 430 eval = idset_sch_contains(slow_subchannel_set, schid); 431 idset_sch_del(slow_subchannel_set, schid); 432 spin_unlock_irq(&slow_subchannel_lock); 433 if (eval) { 434 rc = css_evaluate_new_subchannel(schid, 1); 435 switch (rc) { 436 case -EAGAIN: 437 css_schedule_eval(schid); 438 rc = 0; 439 break; 440 case -ENXIO: 441 case -ENOMEM: 442 case -EIO: 443 /* These should abort looping */ 444 break; 445 default: 446 rc = 0; 447 } 448 } 449 return rc; 450 } 451 452 static void css_slow_path_func(struct work_struct *unused) 453 { 454 CIO_TRACE_EVENT(4, "slowpath"); 455 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 456 NULL); 457 } 458 459 static DECLARE_WORK(slow_path_work, css_slow_path_func); 460 struct workqueue_struct *slow_path_wq; 461 462 void css_schedule_eval(struct subchannel_id schid) 463 { 464 unsigned long flags; 465 466 spin_lock_irqsave(&slow_subchannel_lock, flags); 467 idset_sch_add(slow_subchannel_set, schid); 468 queue_work(slow_path_wq, &slow_path_work); 469 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 470 } 471 472 void css_schedule_eval_all(void) 473 { 474 unsigned long flags; 475 476 spin_lock_irqsave(&slow_subchannel_lock, flags); 477 idset_fill(slow_subchannel_set); 478 queue_work(slow_path_wq, &slow_path_work); 479 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 480 } 481 482 void css_wait_for_slow_path(void) 483 { 484 flush_workqueue(slow_path_wq); 485 } 486 487 /* Reprobe subchannel if unregistered. */ 488 static int reprobe_subchannel(struct subchannel_id schid, void *data) 489 { 490 int ret; 491 492 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", 493 schid.ssid, schid.sch_no); 494 if (need_reprobe) 495 return -EAGAIN; 496 497 ret = css_probe_device(schid); 498 switch (ret) { 499 case 0: 500 break; 501 case -ENXIO: 502 case -ENOMEM: 503 case -EIO: 504 /* These should abort looping */ 505 break; 506 default: 507 ret = 0; 508 } 509 510 return ret; 511 } 512 513 /* Work function used to reprobe all unregistered subchannels. */ 514 static void reprobe_all(struct work_struct *unused) 515 { 516 int ret; 517 518 CIO_MSG_EVENT(4, "reprobe start\n"); 519 520 need_reprobe = 0; 521 /* Make sure initial subchannel scan is done. */ 522 wait_event(ccw_device_init_wq, 523 atomic_read(&ccw_device_init_count) == 0); 524 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 525 526 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 527 need_reprobe); 528 } 529 530 static DECLARE_WORK(css_reprobe_work, reprobe_all); 531 532 /* Schedule reprobing of all unregistered subchannels. */ 533 void css_schedule_reprobe(void) 534 { 535 need_reprobe = 1; 536 queue_work(slow_path_wq, &css_reprobe_work); 537 } 538 539 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 540 541 /* 542 * Called from the machine check handler for subchannel report words. 543 */ 544 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 545 { 546 struct subchannel_id mchk_schid; 547 548 if (overflow) { 549 css_schedule_eval_all(); 550 return; 551 } 552 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 553 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 554 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 555 crw0->erc, crw0->rsid); 556 if (crw1) 557 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 558 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 559 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 560 crw1->anc, crw1->erc, crw1->rsid); 561 init_subchannel_id(&mchk_schid); 562 mchk_schid.sch_no = crw0->rsid; 563 if (crw1) 564 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 565 566 /* 567 * Since we are always presented with IPI in the CRW, we have to 568 * use stsch() to find out if the subchannel in question has come 569 * or gone. 570 */ 571 css_evaluate_subchannel(mchk_schid, 0); 572 } 573 574 static int __init 575 __init_channel_subsystem(struct subchannel_id schid, void *data) 576 { 577 struct subchannel *sch; 578 int ret; 579 580 if (cio_is_console(schid)) 581 sch = cio_get_console_subchannel(); 582 else { 583 sch = css_alloc_subchannel(schid); 584 if (IS_ERR(sch)) 585 ret = PTR_ERR(sch); 586 else 587 ret = 0; 588 switch (ret) { 589 case 0: 590 break; 591 case -ENOMEM: 592 panic("Out of memory in init_channel_subsystem\n"); 593 /* -ENXIO: no more subchannels. */ 594 case -ENXIO: 595 return ret; 596 /* -EIO: this subchannel set not supported. */ 597 case -EIO: 598 return ret; 599 default: 600 return 0; 601 } 602 } 603 /* 604 * We register ALL valid subchannels in ioinfo, even those 605 * that have been present before init_channel_subsystem. 606 * These subchannels can't have been registered yet (kmalloc 607 * not working) so we do it now. This is true e.g. for the 608 * console subchannel. 609 */ 610 css_register_subchannel(sch); 611 return 0; 612 } 613 614 static void __init 615 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 616 { 617 if (css_general_characteristics.mcss) { 618 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 619 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 620 } else { 621 #ifdef CONFIG_SMP 622 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); 623 #else 624 css->global_pgid.pgid_high.cpu_addr = 0; 625 #endif 626 } 627 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 628 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 629 css->global_pgid.tod_high = tod_high; 630 631 } 632 633 static void 634 channel_subsystem_release(struct device *dev) 635 { 636 struct channel_subsystem *css; 637 638 css = to_css(dev); 639 mutex_destroy(&css->mutex); 640 if (css->pseudo_subchannel) { 641 /* Implies that it has been generated but never registered. */ 642 css_subchannel_release(&css->pseudo_subchannel->dev); 643 css->pseudo_subchannel = NULL; 644 } 645 kfree(css); 646 } 647 648 static ssize_t 649 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 650 char *buf) 651 { 652 struct channel_subsystem *css = to_css(dev); 653 int ret; 654 655 if (!css) 656 return 0; 657 mutex_lock(&css->mutex); 658 ret = sprintf(buf, "%x\n", css->cm_enabled); 659 mutex_unlock(&css->mutex); 660 return ret; 661 } 662 663 static ssize_t 664 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 665 const char *buf, size_t count) 666 { 667 struct channel_subsystem *css = to_css(dev); 668 int ret; 669 unsigned long val; 670 671 ret = strict_strtoul(buf, 16, &val); 672 if (ret) 673 return ret; 674 mutex_lock(&css->mutex); 675 switch (val) { 676 case 0: 677 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 678 break; 679 case 1: 680 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 681 break; 682 default: 683 ret = -EINVAL; 684 } 685 mutex_unlock(&css->mutex); 686 return ret < 0 ? ret : count; 687 } 688 689 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 690 691 static int __init setup_css(int nr) 692 { 693 u32 tod_high; 694 int ret; 695 struct channel_subsystem *css; 696 697 css = channel_subsystems[nr]; 698 memset(css, 0, sizeof(struct channel_subsystem)); 699 css->pseudo_subchannel = 700 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 701 if (!css->pseudo_subchannel) 702 return -ENOMEM; 703 css->pseudo_subchannel->dev.parent = &css->device; 704 css->pseudo_subchannel->dev.release = css_subchannel_release; 705 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 706 ret = cio_create_sch_lock(css->pseudo_subchannel); 707 if (ret) { 708 kfree(css->pseudo_subchannel); 709 return ret; 710 } 711 mutex_init(&css->mutex); 712 css->valid = 1; 713 css->cssid = nr; 714 dev_set_name(&css->device, "css%x", nr); 715 css->device.release = channel_subsystem_release; 716 tod_high = (u32) (get_clock() >> 32); 717 css_generate_pgid(css, tod_high); 718 return 0; 719 } 720 721 static int css_reboot_event(struct notifier_block *this, 722 unsigned long event, 723 void *ptr) 724 { 725 int ret, i; 726 727 ret = NOTIFY_DONE; 728 for (i = 0; i <= __MAX_CSSID; i++) { 729 struct channel_subsystem *css; 730 731 css = channel_subsystems[i]; 732 mutex_lock(&css->mutex); 733 if (css->cm_enabled) 734 if (chsc_secm(css, 0)) 735 ret = NOTIFY_BAD; 736 mutex_unlock(&css->mutex); 737 } 738 739 return ret; 740 } 741 742 static struct notifier_block css_reboot_notifier = { 743 .notifier_call = css_reboot_event, 744 }; 745 746 /* 747 * Now that the driver core is running, we can setup our channel subsystem. 748 * The struct subchannel's are created during probing (except for the 749 * static console subchannel). 750 */ 751 static int __init 752 init_channel_subsystem (void) 753 { 754 int ret, i; 755 756 ret = chsc_determine_css_characteristics(); 757 if (ret == -ENOMEM) 758 goto out; /* No need to continue. */ 759 760 ret = chsc_alloc_sei_area(); 761 if (ret) 762 goto out; 763 764 ret = slow_subchannel_init(); 765 if (ret) 766 goto out; 767 768 ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); 769 if (ret) 770 goto out; 771 772 if ((ret = bus_register(&css_bus_type))) 773 goto out; 774 775 /* Try to enable MSS. */ 776 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 777 switch (ret) { 778 case 0: /* Success. */ 779 max_ssid = __MAX_SSID; 780 break; 781 case -ENOMEM: 782 goto out_bus; 783 default: 784 max_ssid = 0; 785 } 786 /* Setup css structure. */ 787 for (i = 0; i <= __MAX_CSSID; i++) { 788 struct channel_subsystem *css; 789 790 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 791 if (!css) { 792 ret = -ENOMEM; 793 goto out_unregister; 794 } 795 channel_subsystems[i] = css; 796 ret = setup_css(i); 797 if (ret) { 798 kfree(channel_subsystems[i]); 799 goto out_unregister; 800 } 801 ret = device_register(&css->device); 802 if (ret) { 803 put_device(&css->device); 804 goto out_unregister; 805 } 806 if (css_chsc_characteristics.secm) { 807 ret = device_create_file(&css->device, 808 &dev_attr_cm_enable); 809 if (ret) 810 goto out_device; 811 } 812 ret = device_register(&css->pseudo_subchannel->dev); 813 if (ret) 814 goto out_file; 815 } 816 ret = register_reboot_notifier(&css_reboot_notifier); 817 if (ret) 818 goto out_unregister; 819 css_init_done = 1; 820 821 /* Enable default isc for I/O subchannels. */ 822 isc_register(IO_SCH_ISC); 823 824 for_each_subchannel(__init_channel_subsystem, NULL); 825 return 0; 826 out_file: 827 if (css_chsc_characteristics.secm) 828 device_remove_file(&channel_subsystems[i]->device, 829 &dev_attr_cm_enable); 830 out_device: 831 device_unregister(&channel_subsystems[i]->device); 832 out_unregister: 833 while (i > 0) { 834 struct channel_subsystem *css; 835 836 i--; 837 css = channel_subsystems[i]; 838 device_unregister(&css->pseudo_subchannel->dev); 839 css->pseudo_subchannel = NULL; 840 if (css_chsc_characteristics.secm) 841 device_remove_file(&css->device, 842 &dev_attr_cm_enable); 843 device_unregister(&css->device); 844 } 845 out_bus: 846 bus_unregister(&css_bus_type); 847 out: 848 s390_unregister_crw_handler(CRW_RSC_CSS); 849 chsc_free_sei_area(); 850 kfree(slow_subchannel_set); 851 pr_alert("The CSS device driver initialization failed with " 852 "errno=%d\n", ret); 853 return ret; 854 } 855 856 int sch_is_pseudo_sch(struct subchannel *sch) 857 { 858 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 859 } 860 861 static int css_bus_match(struct device *dev, struct device_driver *drv) 862 { 863 struct subchannel *sch = to_subchannel(dev); 864 struct css_driver *driver = to_cssdriver(drv); 865 struct css_device_id *id; 866 867 for (id = driver->subchannel_type; id->match_flags; id++) { 868 if (sch->st == id->type) 869 return 1; 870 } 871 872 return 0; 873 } 874 875 static int css_probe(struct device *dev) 876 { 877 struct subchannel *sch; 878 int ret; 879 880 sch = to_subchannel(dev); 881 sch->driver = to_cssdriver(dev->driver); 882 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 883 if (ret) 884 sch->driver = NULL; 885 return ret; 886 } 887 888 static int css_remove(struct device *dev) 889 { 890 struct subchannel *sch; 891 int ret; 892 893 sch = to_subchannel(dev); 894 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 895 sch->driver = NULL; 896 return ret; 897 } 898 899 static void css_shutdown(struct device *dev) 900 { 901 struct subchannel *sch; 902 903 sch = to_subchannel(dev); 904 if (sch->driver && sch->driver->shutdown) 905 sch->driver->shutdown(sch); 906 } 907 908 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 909 { 910 struct subchannel *sch = to_subchannel(dev); 911 int ret; 912 913 ret = add_uevent_var(env, "ST=%01X", sch->st); 914 if (ret) 915 return ret; 916 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 917 return ret; 918 } 919 920 struct bus_type css_bus_type = { 921 .name = "css", 922 .match = css_bus_match, 923 .probe = css_probe, 924 .remove = css_remove, 925 .shutdown = css_shutdown, 926 .uevent = css_uevent, 927 }; 928 929 /** 930 * css_driver_register - register a css driver 931 * @cdrv: css driver to register 932 * 933 * This is mainly a wrapper around driver_register that sets name 934 * and bus_type in the embedded struct device_driver correctly. 935 */ 936 int css_driver_register(struct css_driver *cdrv) 937 { 938 cdrv->drv.name = cdrv->name; 939 cdrv->drv.bus = &css_bus_type; 940 cdrv->drv.owner = cdrv->owner; 941 return driver_register(&cdrv->drv); 942 } 943 EXPORT_SYMBOL_GPL(css_driver_register); 944 945 /** 946 * css_driver_unregister - unregister a css driver 947 * @cdrv: css driver to unregister 948 * 949 * This is a wrapper around driver_unregister. 950 */ 951 void css_driver_unregister(struct css_driver *cdrv) 952 { 953 driver_unregister(&cdrv->drv); 954 } 955 EXPORT_SYMBOL_GPL(css_driver_unregister); 956 957 subsys_initcall(init_channel_subsystem); 958 959 MODULE_LICENSE("GPL"); 960 EXPORT_SYMBOL(css_bus_type); 961