1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2009 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <asm/isc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chsc.h" 29 #include "device.h" 30 #include "idset.h" 31 #include "chp.h" 32 33 int css_init_done = 0; 34 static int need_reprobe = 0; 35 static int max_ssid = 0; 36 37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 38 39 int 40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 41 { 42 struct subchannel_id schid; 43 int ret; 44 45 init_subchannel_id(&schid); 46 ret = -ENODEV; 47 do { 48 do { 49 ret = fn(schid, data); 50 if (ret) 51 break; 52 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 53 schid.sch_no = 0; 54 } while (schid.ssid++ < max_ssid); 55 return ret; 56 } 57 58 struct cb_data { 59 void *data; 60 struct idset *set; 61 int (*fn_known_sch)(struct subchannel *, void *); 62 int (*fn_unknown_sch)(struct subchannel_id, void *); 63 }; 64 65 static int call_fn_known_sch(struct device *dev, void *data) 66 { 67 struct subchannel *sch = to_subchannel(dev); 68 struct cb_data *cb = data; 69 int rc = 0; 70 71 idset_sch_del(cb->set, sch->schid); 72 if (cb->fn_known_sch) 73 rc = cb->fn_known_sch(sch, cb->data); 74 return rc; 75 } 76 77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 78 { 79 struct cb_data *cb = data; 80 int rc = 0; 81 82 if (idset_sch_contains(cb->set, schid)) 83 rc = cb->fn_unknown_sch(schid, cb->data); 84 return rc; 85 } 86 87 static int call_fn_all_sch(struct subchannel_id schid, void *data) 88 { 89 struct cb_data *cb = data; 90 struct subchannel *sch; 91 int rc = 0; 92 93 sch = get_subchannel_by_schid(schid); 94 if (sch) { 95 if (cb->fn_known_sch) 96 rc = cb->fn_known_sch(sch, cb->data); 97 put_device(&sch->dev); 98 } else { 99 if (cb->fn_unknown_sch) 100 rc = cb->fn_unknown_sch(schid, cb->data); 101 } 102 103 return rc; 104 } 105 106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 107 int (*fn_unknown)(struct subchannel_id, 108 void *), void *data) 109 { 110 struct cb_data cb; 111 int rc; 112 113 cb.data = data; 114 cb.fn_known_sch = fn_known; 115 cb.fn_unknown_sch = fn_unknown; 116 117 cb.set = idset_sch_new(); 118 if (!cb.set) 119 /* fall back to brute force scanning in case of oom */ 120 return for_each_subchannel(call_fn_all_sch, &cb); 121 122 idset_fill(cb.set); 123 124 /* Process registered subchannels. */ 125 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 126 if (rc) 127 goto out; 128 /* Process unregistered subchannels. */ 129 if (fn_unknown) 130 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 131 out: 132 idset_free(cb.set); 133 134 return rc; 135 } 136 137 static struct subchannel * 138 css_alloc_subchannel(struct subchannel_id schid) 139 { 140 struct subchannel *sch; 141 int ret; 142 143 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 144 if (sch == NULL) 145 return ERR_PTR(-ENOMEM); 146 ret = cio_validate_subchannel (sch, schid); 147 if (ret < 0) { 148 kfree(sch); 149 return ERR_PTR(ret); 150 } 151 return sch; 152 } 153 154 static void 155 css_free_subchannel(struct subchannel *sch) 156 { 157 if (sch) { 158 /* Reset intparm to zeroes. */ 159 sch->config.intparm = 0; 160 cio_commit_config(sch); 161 kfree(sch->lock); 162 kfree(sch); 163 } 164 } 165 166 static void 167 css_subchannel_release(struct device *dev) 168 { 169 struct subchannel *sch; 170 171 sch = to_subchannel(dev); 172 if (!cio_is_console(sch->schid)) { 173 kfree(sch->lock); 174 kfree(sch); 175 } 176 } 177 178 static int css_sch_device_register(struct subchannel *sch) 179 { 180 int ret; 181 182 mutex_lock(&sch->reg_mutex); 183 ret = device_register(&sch->dev); 184 mutex_unlock(&sch->reg_mutex); 185 return ret; 186 } 187 188 /** 189 * css_sch_device_unregister - unregister a subchannel 190 * @sch: subchannel to be unregistered 191 */ 192 void css_sch_device_unregister(struct subchannel *sch) 193 { 194 mutex_lock(&sch->reg_mutex); 195 if (device_is_registered(&sch->dev)) 196 device_unregister(&sch->dev); 197 mutex_unlock(&sch->reg_mutex); 198 } 199 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 200 201 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 202 { 203 int i; 204 int mask; 205 206 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 207 ssd->path_mask = pmcw->pim; 208 for (i = 0; i < 8; i++) { 209 mask = 0x80 >> i; 210 if (pmcw->pim & mask) { 211 chp_id_init(&ssd->chpid[i]); 212 ssd->chpid[i].id = pmcw->chpid[i]; 213 } 214 } 215 } 216 217 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 218 { 219 int i; 220 int mask; 221 222 for (i = 0; i < 8; i++) { 223 mask = 0x80 >> i; 224 if (ssd->path_mask & mask) 225 if (!chp_is_registered(ssd->chpid[i])) 226 chp_new(ssd->chpid[i]); 227 } 228 } 229 230 void css_update_ssd_info(struct subchannel *sch) 231 { 232 int ret; 233 234 if (cio_is_console(sch->schid)) { 235 /* Console is initialized too early for functions requiring 236 * memory allocation. */ 237 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 238 } else { 239 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 240 if (ret) 241 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 242 ssd_register_chpids(&sch->ssd_info); 243 } 244 } 245 246 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 247 char *buf) 248 { 249 struct subchannel *sch = to_subchannel(dev); 250 251 return sprintf(buf, "%01x\n", sch->st); 252 } 253 254 static DEVICE_ATTR(type, 0444, type_show, NULL); 255 256 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 257 char *buf) 258 { 259 struct subchannel *sch = to_subchannel(dev); 260 261 return sprintf(buf, "css:t%01X\n", sch->st); 262 } 263 264 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 265 266 static struct attribute *subch_attrs[] = { 267 &dev_attr_type.attr, 268 &dev_attr_modalias.attr, 269 NULL, 270 }; 271 272 static struct attribute_group subch_attr_group = { 273 .attrs = subch_attrs, 274 }; 275 276 static struct attribute_group *default_subch_attr_groups[] = { 277 &subch_attr_group, 278 NULL, 279 }; 280 281 static int css_register_subchannel(struct subchannel *sch) 282 { 283 int ret; 284 285 /* Initialize the subchannel structure */ 286 sch->dev.parent = &channel_subsystems[0]->device; 287 sch->dev.bus = &css_bus_type; 288 sch->dev.release = &css_subchannel_release; 289 sch->dev.groups = default_subch_attr_groups; 290 /* 291 * We don't want to generate uevents for I/O subchannels that don't 292 * have a working ccw device behind them since they will be 293 * unregistered before they can be used anyway, so we delay the add 294 * uevent until after device recognition was successful. 295 * Note that we suppress the uevent for all subchannel types; 296 * the subchannel driver can decide itself when it wants to inform 297 * userspace of its existence. 298 */ 299 dev_set_uevent_suppress(&sch->dev, 1); 300 css_update_ssd_info(sch); 301 /* make it known to the system */ 302 ret = css_sch_device_register(sch); 303 if (ret) { 304 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 305 sch->schid.ssid, sch->schid.sch_no, ret); 306 return ret; 307 } 308 if (!sch->driver) { 309 /* 310 * No driver matched. Generate the uevent now so that 311 * a fitting driver module may be loaded based on the 312 * modalias. 313 */ 314 dev_set_uevent_suppress(&sch->dev, 0); 315 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 316 } 317 return ret; 318 } 319 320 int css_probe_device(struct subchannel_id schid) 321 { 322 int ret; 323 struct subchannel *sch; 324 325 sch = css_alloc_subchannel(schid); 326 if (IS_ERR(sch)) 327 return PTR_ERR(sch); 328 ret = css_register_subchannel(sch); 329 if (ret) 330 css_free_subchannel(sch); 331 return ret; 332 } 333 334 static int 335 check_subchannel(struct device * dev, void * data) 336 { 337 struct subchannel *sch; 338 struct subchannel_id *schid = data; 339 340 sch = to_subchannel(dev); 341 return schid_equal(&sch->schid, schid); 342 } 343 344 struct subchannel * 345 get_subchannel_by_schid(struct subchannel_id schid) 346 { 347 struct device *dev; 348 349 dev = bus_find_device(&css_bus_type, NULL, 350 &schid, check_subchannel); 351 352 return dev ? to_subchannel(dev) : NULL; 353 } 354 355 /** 356 * css_sch_is_valid() - check if a subchannel is valid 357 * @schib: subchannel information block for the subchannel 358 */ 359 int css_sch_is_valid(struct schib *schib) 360 { 361 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 362 return 0; 363 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 364 return 0; 365 return 1; 366 } 367 EXPORT_SYMBOL_GPL(css_sch_is_valid); 368 369 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 370 { 371 struct schib schib; 372 373 if (!slow) { 374 /* Will be done on the slow path. */ 375 return -EAGAIN; 376 } 377 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 378 /* Unusable - ignore. */ 379 return 0; 380 } 381 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 382 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 383 384 return css_probe_device(schid); 385 } 386 387 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 388 { 389 int ret = 0; 390 391 if (sch->driver) { 392 if (sch->driver->sch_event) 393 ret = sch->driver->sch_event(sch, slow); 394 else 395 dev_dbg(&sch->dev, 396 "Got subchannel machine check but " 397 "no sch_event handler provided.\n"); 398 } 399 return ret; 400 } 401 402 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 403 { 404 struct subchannel *sch; 405 int ret; 406 407 sch = get_subchannel_by_schid(schid); 408 if (sch) { 409 ret = css_evaluate_known_subchannel(sch, slow); 410 put_device(&sch->dev); 411 } else 412 ret = css_evaluate_new_subchannel(schid, slow); 413 if (ret == -EAGAIN) 414 css_schedule_eval(schid); 415 } 416 417 static struct idset *slow_subchannel_set; 418 static spinlock_t slow_subchannel_lock; 419 420 static int __init slow_subchannel_init(void) 421 { 422 spin_lock_init(&slow_subchannel_lock); 423 slow_subchannel_set = idset_sch_new(); 424 if (!slow_subchannel_set) { 425 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 426 return -ENOMEM; 427 } 428 return 0; 429 } 430 431 static int slow_eval_known_fn(struct subchannel *sch, void *data) 432 { 433 int eval; 434 int rc; 435 436 spin_lock_irq(&slow_subchannel_lock); 437 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 438 idset_sch_del(slow_subchannel_set, sch->schid); 439 spin_unlock_irq(&slow_subchannel_lock); 440 if (eval) { 441 rc = css_evaluate_known_subchannel(sch, 1); 442 if (rc == -EAGAIN) 443 css_schedule_eval(sch->schid); 444 } 445 return 0; 446 } 447 448 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 449 { 450 int eval; 451 int rc = 0; 452 453 spin_lock_irq(&slow_subchannel_lock); 454 eval = idset_sch_contains(slow_subchannel_set, schid); 455 idset_sch_del(slow_subchannel_set, schid); 456 spin_unlock_irq(&slow_subchannel_lock); 457 if (eval) { 458 rc = css_evaluate_new_subchannel(schid, 1); 459 switch (rc) { 460 case -EAGAIN: 461 css_schedule_eval(schid); 462 rc = 0; 463 break; 464 case -ENXIO: 465 case -ENOMEM: 466 case -EIO: 467 /* These should abort looping */ 468 break; 469 default: 470 rc = 0; 471 } 472 } 473 return rc; 474 } 475 476 static void css_slow_path_func(struct work_struct *unused) 477 { 478 CIO_TRACE_EVENT(4, "slowpath"); 479 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 480 NULL); 481 } 482 483 static DECLARE_WORK(slow_path_work, css_slow_path_func); 484 struct workqueue_struct *slow_path_wq; 485 486 void css_schedule_eval(struct subchannel_id schid) 487 { 488 unsigned long flags; 489 490 spin_lock_irqsave(&slow_subchannel_lock, flags); 491 idset_sch_add(slow_subchannel_set, schid); 492 queue_work(slow_path_wq, &slow_path_work); 493 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 494 } 495 496 void css_schedule_eval_all(void) 497 { 498 unsigned long flags; 499 500 spin_lock_irqsave(&slow_subchannel_lock, flags); 501 idset_fill(slow_subchannel_set); 502 queue_work(slow_path_wq, &slow_path_work); 503 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 504 } 505 506 void css_wait_for_slow_path(void) 507 { 508 flush_workqueue(slow_path_wq); 509 } 510 511 /* Reprobe subchannel if unregistered. */ 512 static int reprobe_subchannel(struct subchannel_id schid, void *data) 513 { 514 int ret; 515 516 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", 517 schid.ssid, schid.sch_no); 518 if (need_reprobe) 519 return -EAGAIN; 520 521 ret = css_probe_device(schid); 522 switch (ret) { 523 case 0: 524 break; 525 case -ENXIO: 526 case -ENOMEM: 527 case -EIO: 528 /* These should abort looping */ 529 break; 530 default: 531 ret = 0; 532 } 533 534 return ret; 535 } 536 537 static void reprobe_after_idle(struct work_struct *unused) 538 { 539 /* Make sure initial subchannel scan is done. */ 540 wait_event(ccw_device_init_wq, 541 atomic_read(&ccw_device_init_count) == 0); 542 if (need_reprobe) 543 css_schedule_reprobe(); 544 } 545 546 static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); 547 548 /* Work function used to reprobe all unregistered subchannels. */ 549 static void reprobe_all(struct work_struct *unused) 550 { 551 int ret; 552 553 CIO_MSG_EVENT(4, "reprobe start\n"); 554 555 /* Make sure initial subchannel scan is done. */ 556 if (atomic_read(&ccw_device_init_count) != 0) { 557 queue_work(ccw_device_work, &reprobe_idle_work); 558 return; 559 } 560 need_reprobe = 0; 561 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 562 563 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 564 need_reprobe); 565 } 566 567 static DECLARE_WORK(css_reprobe_work, reprobe_all); 568 569 /* Schedule reprobing of all unregistered subchannels. */ 570 void css_schedule_reprobe(void) 571 { 572 need_reprobe = 1; 573 queue_work(slow_path_wq, &css_reprobe_work); 574 } 575 576 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 577 578 /* 579 * Called from the machine check handler for subchannel report words. 580 */ 581 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 582 { 583 struct subchannel_id mchk_schid; 584 585 if (overflow) { 586 css_schedule_eval_all(); 587 return; 588 } 589 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 590 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 591 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 592 crw0->erc, crw0->rsid); 593 if (crw1) 594 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 595 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 596 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 597 crw1->anc, crw1->erc, crw1->rsid); 598 init_subchannel_id(&mchk_schid); 599 mchk_schid.sch_no = crw0->rsid; 600 if (crw1) 601 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 602 603 /* 604 * Since we are always presented with IPI in the CRW, we have to 605 * use stsch() to find out if the subchannel in question has come 606 * or gone. 607 */ 608 css_evaluate_subchannel(mchk_schid, 0); 609 } 610 611 static int __init 612 __init_channel_subsystem(struct subchannel_id schid, void *data) 613 { 614 struct subchannel *sch; 615 int ret; 616 617 if (cio_is_console(schid)) 618 sch = cio_get_console_subchannel(); 619 else { 620 sch = css_alloc_subchannel(schid); 621 if (IS_ERR(sch)) 622 ret = PTR_ERR(sch); 623 else 624 ret = 0; 625 switch (ret) { 626 case 0: 627 break; 628 case -ENOMEM: 629 panic("Out of memory in init_channel_subsystem\n"); 630 /* -ENXIO: no more subchannels. */ 631 case -ENXIO: 632 return ret; 633 /* -EIO: this subchannel set not supported. */ 634 case -EIO: 635 return ret; 636 default: 637 return 0; 638 } 639 } 640 /* 641 * We register ALL valid subchannels in ioinfo, even those 642 * that have been present before init_channel_subsystem. 643 * These subchannels can't have been registered yet (kmalloc 644 * not working) so we do it now. This is true e.g. for the 645 * console subchannel. 646 */ 647 css_register_subchannel(sch); 648 return 0; 649 } 650 651 static void __init 652 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 653 { 654 if (css_general_characteristics.mcss) { 655 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 656 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 657 } else { 658 #ifdef CONFIG_SMP 659 css->global_pgid.pgid_high.cpu_addr = stap(); 660 #else 661 css->global_pgid.pgid_high.cpu_addr = 0; 662 #endif 663 } 664 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 665 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 666 css->global_pgid.tod_high = tod_high; 667 668 } 669 670 static void 671 channel_subsystem_release(struct device *dev) 672 { 673 struct channel_subsystem *css; 674 675 css = to_css(dev); 676 mutex_destroy(&css->mutex); 677 if (css->pseudo_subchannel) { 678 /* Implies that it has been generated but never registered. */ 679 css_subchannel_release(&css->pseudo_subchannel->dev); 680 css->pseudo_subchannel = NULL; 681 } 682 kfree(css); 683 } 684 685 static ssize_t 686 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 687 char *buf) 688 { 689 struct channel_subsystem *css = to_css(dev); 690 int ret; 691 692 if (!css) 693 return 0; 694 mutex_lock(&css->mutex); 695 ret = sprintf(buf, "%x\n", css->cm_enabled); 696 mutex_unlock(&css->mutex); 697 return ret; 698 } 699 700 static ssize_t 701 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 702 const char *buf, size_t count) 703 { 704 struct channel_subsystem *css = to_css(dev); 705 int ret; 706 unsigned long val; 707 708 ret = strict_strtoul(buf, 16, &val); 709 if (ret) 710 return ret; 711 mutex_lock(&css->mutex); 712 switch (val) { 713 case 0: 714 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 715 break; 716 case 1: 717 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 718 break; 719 default: 720 ret = -EINVAL; 721 } 722 mutex_unlock(&css->mutex); 723 return ret < 0 ? ret : count; 724 } 725 726 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 727 728 static int __init setup_css(int nr) 729 { 730 u32 tod_high; 731 int ret; 732 struct channel_subsystem *css; 733 734 css = channel_subsystems[nr]; 735 memset(css, 0, sizeof(struct channel_subsystem)); 736 css->pseudo_subchannel = 737 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 738 if (!css->pseudo_subchannel) 739 return -ENOMEM; 740 css->pseudo_subchannel->dev.parent = &css->device; 741 css->pseudo_subchannel->dev.release = css_subchannel_release; 742 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 743 ret = cio_create_sch_lock(css->pseudo_subchannel); 744 if (ret) { 745 kfree(css->pseudo_subchannel); 746 return ret; 747 } 748 mutex_init(&css->mutex); 749 css->valid = 1; 750 css->cssid = nr; 751 dev_set_name(&css->device, "css%x", nr); 752 css->device.release = channel_subsystem_release; 753 tod_high = (u32) (get_clock() >> 32); 754 css_generate_pgid(css, tod_high); 755 return 0; 756 } 757 758 static int css_reboot_event(struct notifier_block *this, 759 unsigned long event, 760 void *ptr) 761 { 762 int ret, i; 763 764 ret = NOTIFY_DONE; 765 for (i = 0; i <= __MAX_CSSID; i++) { 766 struct channel_subsystem *css; 767 768 css = channel_subsystems[i]; 769 mutex_lock(&css->mutex); 770 if (css->cm_enabled) 771 if (chsc_secm(css, 0)) 772 ret = NOTIFY_BAD; 773 mutex_unlock(&css->mutex); 774 } 775 776 return ret; 777 } 778 779 static struct notifier_block css_reboot_notifier = { 780 .notifier_call = css_reboot_event, 781 }; 782 783 /* 784 * Since the css devices are neither on a bus nor have a class 785 * nor have a special device type, we cannot stop/restart channel 786 * path measurements via the normal suspend/resume callbacks, but have 787 * to use notifiers. 788 */ 789 static int css_power_event(struct notifier_block *this, unsigned long event, 790 void *ptr) 791 { 792 void *secm_area; 793 int ret, i; 794 795 switch (event) { 796 case PM_HIBERNATION_PREPARE: 797 case PM_SUSPEND_PREPARE: 798 ret = NOTIFY_DONE; 799 for (i = 0; i <= __MAX_CSSID; i++) { 800 struct channel_subsystem *css; 801 802 css = channel_subsystems[i]; 803 mutex_lock(&css->mutex); 804 if (!css->cm_enabled) { 805 mutex_unlock(&css->mutex); 806 continue; 807 } 808 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 809 GFP_DMA); 810 if (secm_area) { 811 if (__chsc_do_secm(css, 0, secm_area)) 812 ret = NOTIFY_BAD; 813 free_page((unsigned long)secm_area); 814 } else 815 ret = NOTIFY_BAD; 816 817 mutex_unlock(&css->mutex); 818 } 819 break; 820 case PM_POST_HIBERNATION: 821 case PM_POST_SUSPEND: 822 ret = NOTIFY_DONE; 823 for (i = 0; i <= __MAX_CSSID; i++) { 824 struct channel_subsystem *css; 825 826 css = channel_subsystems[i]; 827 mutex_lock(&css->mutex); 828 if (!css->cm_enabled) { 829 mutex_unlock(&css->mutex); 830 continue; 831 } 832 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 833 GFP_DMA); 834 if (secm_area) { 835 if (__chsc_do_secm(css, 1, secm_area)) 836 ret = NOTIFY_BAD; 837 free_page((unsigned long)secm_area); 838 } else 839 ret = NOTIFY_BAD; 840 841 mutex_unlock(&css->mutex); 842 } 843 /* search for subchannels, which appeared during hibernation */ 844 css_schedule_reprobe(); 845 break; 846 default: 847 ret = NOTIFY_DONE; 848 } 849 return ret; 850 851 } 852 static struct notifier_block css_power_notifier = { 853 .notifier_call = css_power_event, 854 }; 855 856 /* 857 * Now that the driver core is running, we can setup our channel subsystem. 858 * The struct subchannel's are created during probing (except for the 859 * static console subchannel). 860 */ 861 static int __init 862 init_channel_subsystem (void) 863 { 864 int ret, i; 865 866 ret = chsc_determine_css_characteristics(); 867 if (ret == -ENOMEM) 868 goto out; /* No need to continue. */ 869 870 ret = chsc_alloc_sei_area(); 871 if (ret) 872 goto out; 873 874 ret = slow_subchannel_init(); 875 if (ret) 876 goto out; 877 878 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 879 if (ret) 880 goto out; 881 882 if ((ret = bus_register(&css_bus_type))) 883 goto out; 884 885 /* Try to enable MSS. */ 886 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 887 switch (ret) { 888 case 0: /* Success. */ 889 max_ssid = __MAX_SSID; 890 break; 891 case -ENOMEM: 892 goto out_bus; 893 default: 894 max_ssid = 0; 895 } 896 /* Setup css structure. */ 897 for (i = 0; i <= __MAX_CSSID; i++) { 898 struct channel_subsystem *css; 899 900 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 901 if (!css) { 902 ret = -ENOMEM; 903 goto out_unregister; 904 } 905 channel_subsystems[i] = css; 906 ret = setup_css(i); 907 if (ret) { 908 kfree(channel_subsystems[i]); 909 goto out_unregister; 910 } 911 ret = device_register(&css->device); 912 if (ret) { 913 put_device(&css->device); 914 goto out_unregister; 915 } 916 if (css_chsc_characteristics.secm) { 917 ret = device_create_file(&css->device, 918 &dev_attr_cm_enable); 919 if (ret) 920 goto out_device; 921 } 922 ret = device_register(&css->pseudo_subchannel->dev); 923 if (ret) 924 goto out_file; 925 } 926 ret = register_reboot_notifier(&css_reboot_notifier); 927 if (ret) 928 goto out_unregister; 929 ret = register_pm_notifier(&css_power_notifier); 930 if (ret) { 931 unregister_reboot_notifier(&css_reboot_notifier); 932 goto out_unregister; 933 } 934 css_init_done = 1; 935 936 /* Enable default isc for I/O subchannels. */ 937 isc_register(IO_SCH_ISC); 938 939 for_each_subchannel(__init_channel_subsystem, NULL); 940 return 0; 941 out_file: 942 if (css_chsc_characteristics.secm) 943 device_remove_file(&channel_subsystems[i]->device, 944 &dev_attr_cm_enable); 945 out_device: 946 device_unregister(&channel_subsystems[i]->device); 947 out_unregister: 948 while (i > 0) { 949 struct channel_subsystem *css; 950 951 i--; 952 css = channel_subsystems[i]; 953 device_unregister(&css->pseudo_subchannel->dev); 954 css->pseudo_subchannel = NULL; 955 if (css_chsc_characteristics.secm) 956 device_remove_file(&css->device, 957 &dev_attr_cm_enable); 958 device_unregister(&css->device); 959 } 960 out_bus: 961 bus_unregister(&css_bus_type); 962 out: 963 crw_unregister_handler(CRW_RSC_CSS); 964 chsc_free_sei_area(); 965 kfree(slow_subchannel_set); 966 pr_alert("The CSS device driver initialization failed with " 967 "errno=%d\n", ret); 968 return ret; 969 } 970 971 int sch_is_pseudo_sch(struct subchannel *sch) 972 { 973 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 974 } 975 976 static int css_bus_match(struct device *dev, struct device_driver *drv) 977 { 978 struct subchannel *sch = to_subchannel(dev); 979 struct css_driver *driver = to_cssdriver(drv); 980 struct css_device_id *id; 981 982 for (id = driver->subchannel_type; id->match_flags; id++) { 983 if (sch->st == id->type) 984 return 1; 985 } 986 987 return 0; 988 } 989 990 static int css_probe(struct device *dev) 991 { 992 struct subchannel *sch; 993 int ret; 994 995 sch = to_subchannel(dev); 996 sch->driver = to_cssdriver(dev->driver); 997 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 998 if (ret) 999 sch->driver = NULL; 1000 return ret; 1001 } 1002 1003 static int css_remove(struct device *dev) 1004 { 1005 struct subchannel *sch; 1006 int ret; 1007 1008 sch = to_subchannel(dev); 1009 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1010 sch->driver = NULL; 1011 return ret; 1012 } 1013 1014 static void css_shutdown(struct device *dev) 1015 { 1016 struct subchannel *sch; 1017 1018 sch = to_subchannel(dev); 1019 if (sch->driver && sch->driver->shutdown) 1020 sch->driver->shutdown(sch); 1021 } 1022 1023 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1024 { 1025 struct subchannel *sch = to_subchannel(dev); 1026 int ret; 1027 1028 ret = add_uevent_var(env, "ST=%01X", sch->st); 1029 if (ret) 1030 return ret; 1031 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1032 return ret; 1033 } 1034 1035 static int css_pm_prepare(struct device *dev) 1036 { 1037 struct subchannel *sch = to_subchannel(dev); 1038 struct css_driver *drv; 1039 1040 if (mutex_is_locked(&sch->reg_mutex)) 1041 return -EAGAIN; 1042 if (!sch->dev.driver) 1043 return 0; 1044 drv = to_cssdriver(sch->dev.driver); 1045 /* Notify drivers that they may not register children. */ 1046 return drv->prepare ? drv->prepare(sch) : 0; 1047 } 1048 1049 static void css_pm_complete(struct device *dev) 1050 { 1051 struct subchannel *sch = to_subchannel(dev); 1052 struct css_driver *drv; 1053 1054 if (!sch->dev.driver) 1055 return; 1056 drv = to_cssdriver(sch->dev.driver); 1057 if (drv->complete) 1058 drv->complete(sch); 1059 } 1060 1061 static int css_pm_freeze(struct device *dev) 1062 { 1063 struct subchannel *sch = to_subchannel(dev); 1064 struct css_driver *drv; 1065 1066 if (!sch->dev.driver) 1067 return 0; 1068 drv = to_cssdriver(sch->dev.driver); 1069 return drv->freeze ? drv->freeze(sch) : 0; 1070 } 1071 1072 static int css_pm_thaw(struct device *dev) 1073 { 1074 struct subchannel *sch = to_subchannel(dev); 1075 struct css_driver *drv; 1076 1077 if (!sch->dev.driver) 1078 return 0; 1079 drv = to_cssdriver(sch->dev.driver); 1080 return drv->thaw ? drv->thaw(sch) : 0; 1081 } 1082 1083 static int css_pm_restore(struct device *dev) 1084 { 1085 struct subchannel *sch = to_subchannel(dev); 1086 struct css_driver *drv; 1087 1088 if (!sch->dev.driver) 1089 return 0; 1090 drv = to_cssdriver(sch->dev.driver); 1091 return drv->restore ? drv->restore(sch) : 0; 1092 } 1093 1094 static struct dev_pm_ops css_pm_ops = { 1095 .prepare = css_pm_prepare, 1096 .complete = css_pm_complete, 1097 .freeze = css_pm_freeze, 1098 .thaw = css_pm_thaw, 1099 .restore = css_pm_restore, 1100 }; 1101 1102 struct bus_type css_bus_type = { 1103 .name = "css", 1104 .match = css_bus_match, 1105 .probe = css_probe, 1106 .remove = css_remove, 1107 .shutdown = css_shutdown, 1108 .uevent = css_uevent, 1109 .pm = &css_pm_ops, 1110 }; 1111 1112 /** 1113 * css_driver_register - register a css driver 1114 * @cdrv: css driver to register 1115 * 1116 * This is mainly a wrapper around driver_register that sets name 1117 * and bus_type in the embedded struct device_driver correctly. 1118 */ 1119 int css_driver_register(struct css_driver *cdrv) 1120 { 1121 cdrv->drv.name = cdrv->name; 1122 cdrv->drv.bus = &css_bus_type; 1123 cdrv->drv.owner = cdrv->owner; 1124 return driver_register(&cdrv->drv); 1125 } 1126 EXPORT_SYMBOL_GPL(css_driver_register); 1127 1128 /** 1129 * css_driver_unregister - unregister a css driver 1130 * @cdrv: css driver to unregister 1131 * 1132 * This is a wrapper around driver_unregister. 1133 */ 1134 void css_driver_unregister(struct css_driver *cdrv) 1135 { 1136 driver_unregister(&cdrv->drv); 1137 } 1138 EXPORT_SYMBOL_GPL(css_driver_unregister); 1139 1140 subsys_initcall(init_channel_subsystem); 1141 1142 MODULE_LICENSE("GPL"); 1143 EXPORT_SYMBOL(css_bus_type); 1144