1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2009 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <asm/isc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chsc.h" 29 #include "device.h" 30 #include "idset.h" 31 #include "chp.h" 32 33 int css_init_done = 0; 34 int max_ssid; 35 36 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 37 38 int 39 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 40 { 41 struct subchannel_id schid; 42 int ret; 43 44 init_subchannel_id(&schid); 45 ret = -ENODEV; 46 do { 47 do { 48 ret = fn(schid, data); 49 if (ret) 50 break; 51 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 52 schid.sch_no = 0; 53 } while (schid.ssid++ < max_ssid); 54 return ret; 55 } 56 57 struct cb_data { 58 void *data; 59 struct idset *set; 60 int (*fn_known_sch)(struct subchannel *, void *); 61 int (*fn_unknown_sch)(struct subchannel_id, void *); 62 }; 63 64 static int call_fn_known_sch(struct device *dev, void *data) 65 { 66 struct subchannel *sch = to_subchannel(dev); 67 struct cb_data *cb = data; 68 int rc = 0; 69 70 idset_sch_del(cb->set, sch->schid); 71 if (cb->fn_known_sch) 72 rc = cb->fn_known_sch(sch, cb->data); 73 return rc; 74 } 75 76 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 77 { 78 struct cb_data *cb = data; 79 int rc = 0; 80 81 if (idset_sch_contains(cb->set, schid)) 82 rc = cb->fn_unknown_sch(schid, cb->data); 83 return rc; 84 } 85 86 static int call_fn_all_sch(struct subchannel_id schid, void *data) 87 { 88 struct cb_data *cb = data; 89 struct subchannel *sch; 90 int rc = 0; 91 92 sch = get_subchannel_by_schid(schid); 93 if (sch) { 94 if (cb->fn_known_sch) 95 rc = cb->fn_known_sch(sch, cb->data); 96 put_device(&sch->dev); 97 } else { 98 if (cb->fn_unknown_sch) 99 rc = cb->fn_unknown_sch(schid, cb->data); 100 } 101 102 return rc; 103 } 104 105 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 106 int (*fn_unknown)(struct subchannel_id, 107 void *), void *data) 108 { 109 struct cb_data cb; 110 int rc; 111 112 cb.data = data; 113 cb.fn_known_sch = fn_known; 114 cb.fn_unknown_sch = fn_unknown; 115 116 cb.set = idset_sch_new(); 117 if (!cb.set) 118 /* fall back to brute force scanning in case of oom */ 119 return for_each_subchannel(call_fn_all_sch, &cb); 120 121 idset_fill(cb.set); 122 123 /* Process registered subchannels. */ 124 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 125 if (rc) 126 goto out; 127 /* Process unregistered subchannels. */ 128 if (fn_unknown) 129 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 130 out: 131 idset_free(cb.set); 132 133 return rc; 134 } 135 136 static struct subchannel * 137 css_alloc_subchannel(struct subchannel_id schid) 138 { 139 struct subchannel *sch; 140 int ret; 141 142 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 143 if (sch == NULL) 144 return ERR_PTR(-ENOMEM); 145 ret = cio_validate_subchannel (sch, schid); 146 if (ret < 0) { 147 kfree(sch); 148 return ERR_PTR(ret); 149 } 150 return sch; 151 } 152 153 static void 154 css_subchannel_release(struct device *dev) 155 { 156 struct subchannel *sch; 157 158 sch = to_subchannel(dev); 159 if (!cio_is_console(sch->schid)) { 160 /* Reset intparm to zeroes. */ 161 sch->config.intparm = 0; 162 cio_commit_config(sch); 163 kfree(sch->lock); 164 kfree(sch); 165 } 166 } 167 168 static int css_sch_device_register(struct subchannel *sch) 169 { 170 int ret; 171 172 mutex_lock(&sch->reg_mutex); 173 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 174 sch->schid.sch_no); 175 ret = device_register(&sch->dev); 176 mutex_unlock(&sch->reg_mutex); 177 return ret; 178 } 179 180 /** 181 * css_sch_device_unregister - unregister a subchannel 182 * @sch: subchannel to be unregistered 183 */ 184 void css_sch_device_unregister(struct subchannel *sch) 185 { 186 mutex_lock(&sch->reg_mutex); 187 if (device_is_registered(&sch->dev)) 188 device_unregister(&sch->dev); 189 mutex_unlock(&sch->reg_mutex); 190 } 191 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 192 193 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 194 { 195 int i; 196 int mask; 197 198 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 199 ssd->path_mask = pmcw->pim; 200 for (i = 0; i < 8; i++) { 201 mask = 0x80 >> i; 202 if (pmcw->pim & mask) { 203 chp_id_init(&ssd->chpid[i]); 204 ssd->chpid[i].id = pmcw->chpid[i]; 205 } 206 } 207 } 208 209 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 210 { 211 int i; 212 int mask; 213 214 for (i = 0; i < 8; i++) { 215 mask = 0x80 >> i; 216 if (ssd->path_mask & mask) 217 if (!chp_is_registered(ssd->chpid[i])) 218 chp_new(ssd->chpid[i]); 219 } 220 } 221 222 void css_update_ssd_info(struct subchannel *sch) 223 { 224 int ret; 225 226 if (cio_is_console(sch->schid)) { 227 /* Console is initialized too early for functions requiring 228 * memory allocation. */ 229 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 230 } else { 231 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 232 if (ret) 233 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 234 ssd_register_chpids(&sch->ssd_info); 235 } 236 } 237 238 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 239 char *buf) 240 { 241 struct subchannel *sch = to_subchannel(dev); 242 243 return sprintf(buf, "%01x\n", sch->st); 244 } 245 246 static DEVICE_ATTR(type, 0444, type_show, NULL); 247 248 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 249 char *buf) 250 { 251 struct subchannel *sch = to_subchannel(dev); 252 253 return sprintf(buf, "css:t%01X\n", sch->st); 254 } 255 256 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 257 258 static struct attribute *subch_attrs[] = { 259 &dev_attr_type.attr, 260 &dev_attr_modalias.attr, 261 NULL, 262 }; 263 264 static struct attribute_group subch_attr_group = { 265 .attrs = subch_attrs, 266 }; 267 268 static const struct attribute_group *default_subch_attr_groups[] = { 269 &subch_attr_group, 270 NULL, 271 }; 272 273 static int css_register_subchannel(struct subchannel *sch) 274 { 275 int ret; 276 277 /* Initialize the subchannel structure */ 278 sch->dev.parent = &channel_subsystems[0]->device; 279 sch->dev.bus = &css_bus_type; 280 sch->dev.release = &css_subchannel_release; 281 sch->dev.groups = default_subch_attr_groups; 282 /* 283 * We don't want to generate uevents for I/O subchannels that don't 284 * have a working ccw device behind them since they will be 285 * unregistered before they can be used anyway, so we delay the add 286 * uevent until after device recognition was successful. 287 * Note that we suppress the uevent for all subchannel types; 288 * the subchannel driver can decide itself when it wants to inform 289 * userspace of its existence. 290 */ 291 dev_set_uevent_suppress(&sch->dev, 1); 292 css_update_ssd_info(sch); 293 /* make it known to the system */ 294 ret = css_sch_device_register(sch); 295 if (ret) { 296 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 297 sch->schid.ssid, sch->schid.sch_no, ret); 298 return ret; 299 } 300 if (!sch->driver) { 301 /* 302 * No driver matched. Generate the uevent now so that 303 * a fitting driver module may be loaded based on the 304 * modalias. 305 */ 306 dev_set_uevent_suppress(&sch->dev, 0); 307 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 308 } 309 return ret; 310 } 311 312 int css_probe_device(struct subchannel_id schid) 313 { 314 int ret; 315 struct subchannel *sch; 316 317 if (cio_is_console(schid)) 318 sch = cio_get_console_subchannel(); 319 else { 320 sch = css_alloc_subchannel(schid); 321 if (IS_ERR(sch)) 322 return PTR_ERR(sch); 323 } 324 ret = css_register_subchannel(sch); 325 if (ret) { 326 if (!cio_is_console(schid)) 327 put_device(&sch->dev); 328 } 329 return ret; 330 } 331 332 static int 333 check_subchannel(struct device * dev, void * data) 334 { 335 struct subchannel *sch; 336 struct subchannel_id *schid = data; 337 338 sch = to_subchannel(dev); 339 return schid_equal(&sch->schid, schid); 340 } 341 342 struct subchannel * 343 get_subchannel_by_schid(struct subchannel_id schid) 344 { 345 struct device *dev; 346 347 dev = bus_find_device(&css_bus_type, NULL, 348 &schid, check_subchannel); 349 350 return dev ? to_subchannel(dev) : NULL; 351 } 352 353 /** 354 * css_sch_is_valid() - check if a subchannel is valid 355 * @schib: subchannel information block for the subchannel 356 */ 357 int css_sch_is_valid(struct schib *schib) 358 { 359 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 360 return 0; 361 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 362 return 0; 363 return 1; 364 } 365 EXPORT_SYMBOL_GPL(css_sch_is_valid); 366 367 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 368 { 369 struct schib schib; 370 371 if (!slow) { 372 /* Will be done on the slow path. */ 373 return -EAGAIN; 374 } 375 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 376 /* Unusable - ignore. */ 377 return 0; 378 } 379 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 380 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 381 382 return css_probe_device(schid); 383 } 384 385 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 386 { 387 int ret = 0; 388 389 if (sch->driver) { 390 if (sch->driver->sch_event) 391 ret = sch->driver->sch_event(sch, slow); 392 else 393 dev_dbg(&sch->dev, 394 "Got subchannel machine check but " 395 "no sch_event handler provided.\n"); 396 } 397 return ret; 398 } 399 400 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 401 { 402 struct subchannel *sch; 403 int ret; 404 405 sch = get_subchannel_by_schid(schid); 406 if (sch) { 407 ret = css_evaluate_known_subchannel(sch, slow); 408 put_device(&sch->dev); 409 } else 410 ret = css_evaluate_new_subchannel(schid, slow); 411 if (ret == -EAGAIN) 412 css_schedule_eval(schid); 413 } 414 415 static struct idset *slow_subchannel_set; 416 static spinlock_t slow_subchannel_lock; 417 static wait_queue_head_t css_eval_wq; 418 static atomic_t css_eval_scheduled; 419 420 static int __init slow_subchannel_init(void) 421 { 422 spin_lock_init(&slow_subchannel_lock); 423 atomic_set(&css_eval_scheduled, 0); 424 init_waitqueue_head(&css_eval_wq); 425 slow_subchannel_set = idset_sch_new(); 426 if (!slow_subchannel_set) { 427 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 428 return -ENOMEM; 429 } 430 return 0; 431 } 432 433 static int slow_eval_known_fn(struct subchannel *sch, void *data) 434 { 435 int eval; 436 int rc; 437 438 spin_lock_irq(&slow_subchannel_lock); 439 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 440 idset_sch_del(slow_subchannel_set, sch->schid); 441 spin_unlock_irq(&slow_subchannel_lock); 442 if (eval) { 443 rc = css_evaluate_known_subchannel(sch, 1); 444 if (rc == -EAGAIN) 445 css_schedule_eval(sch->schid); 446 } 447 return 0; 448 } 449 450 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 451 { 452 int eval; 453 int rc = 0; 454 455 spin_lock_irq(&slow_subchannel_lock); 456 eval = idset_sch_contains(slow_subchannel_set, schid); 457 idset_sch_del(slow_subchannel_set, schid); 458 spin_unlock_irq(&slow_subchannel_lock); 459 if (eval) { 460 rc = css_evaluate_new_subchannel(schid, 1); 461 switch (rc) { 462 case -EAGAIN: 463 css_schedule_eval(schid); 464 rc = 0; 465 break; 466 case -ENXIO: 467 case -ENOMEM: 468 case -EIO: 469 /* These should abort looping */ 470 break; 471 default: 472 rc = 0; 473 } 474 } 475 return rc; 476 } 477 478 static void css_slow_path_func(struct work_struct *unused) 479 { 480 unsigned long flags; 481 482 CIO_TRACE_EVENT(4, "slowpath"); 483 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 484 NULL); 485 spin_lock_irqsave(&slow_subchannel_lock, flags); 486 if (idset_is_empty(slow_subchannel_set)) { 487 atomic_set(&css_eval_scheduled, 0); 488 wake_up(&css_eval_wq); 489 } 490 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 491 } 492 493 static DECLARE_WORK(slow_path_work, css_slow_path_func); 494 struct workqueue_struct *slow_path_wq; 495 496 void css_schedule_eval(struct subchannel_id schid) 497 { 498 unsigned long flags; 499 500 spin_lock_irqsave(&slow_subchannel_lock, flags); 501 idset_sch_add(slow_subchannel_set, schid); 502 atomic_set(&css_eval_scheduled, 1); 503 queue_work(slow_path_wq, &slow_path_work); 504 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 505 } 506 507 void css_schedule_eval_all(void) 508 { 509 unsigned long flags; 510 511 spin_lock_irqsave(&slow_subchannel_lock, flags); 512 idset_fill(slow_subchannel_set); 513 atomic_set(&css_eval_scheduled, 1); 514 queue_work(slow_path_wq, &slow_path_work); 515 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 516 } 517 518 static int __unset_registered(struct device *dev, void *data) 519 { 520 struct idset *set = data; 521 struct subchannel *sch = to_subchannel(dev); 522 523 idset_sch_del(set, sch->schid); 524 return 0; 525 } 526 527 void css_schedule_eval_all_unreg(void) 528 { 529 unsigned long flags; 530 struct idset *unreg_set; 531 532 /* Find unregistered subchannels. */ 533 unreg_set = idset_sch_new(); 534 if (!unreg_set) { 535 /* Fallback. */ 536 css_schedule_eval_all(); 537 return; 538 } 539 idset_fill(unreg_set); 540 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 541 /* Apply to slow_subchannel_set. */ 542 spin_lock_irqsave(&slow_subchannel_lock, flags); 543 idset_add_set(slow_subchannel_set, unreg_set); 544 atomic_set(&css_eval_scheduled, 1); 545 queue_work(slow_path_wq, &slow_path_work); 546 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 547 idset_free(unreg_set); 548 } 549 550 void css_wait_for_slow_path(void) 551 { 552 flush_workqueue(slow_path_wq); 553 } 554 555 /* Schedule reprobing of all unregistered subchannels. */ 556 void css_schedule_reprobe(void) 557 { 558 css_schedule_eval_all_unreg(); 559 } 560 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 561 562 /* 563 * Called from the machine check handler for subchannel report words. 564 */ 565 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 566 { 567 struct subchannel_id mchk_schid; 568 569 if (overflow) { 570 css_schedule_eval_all(); 571 return; 572 } 573 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 574 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 575 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 576 crw0->erc, crw0->rsid); 577 if (crw1) 578 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 579 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 580 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 581 crw1->anc, crw1->erc, crw1->rsid); 582 init_subchannel_id(&mchk_schid); 583 mchk_schid.sch_no = crw0->rsid; 584 if (crw1) 585 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 586 587 /* 588 * Since we are always presented with IPI in the CRW, we have to 589 * use stsch() to find out if the subchannel in question has come 590 * or gone. 591 */ 592 css_evaluate_subchannel(mchk_schid, 0); 593 } 594 595 static void __init 596 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 597 { 598 if (css_general_characteristics.mcss) { 599 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 600 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 601 } else { 602 #ifdef CONFIG_SMP 603 css->global_pgid.pgid_high.cpu_addr = stap(); 604 #else 605 css->global_pgid.pgid_high.cpu_addr = 0; 606 #endif 607 } 608 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; 609 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; 610 css->global_pgid.tod_high = tod_high; 611 612 } 613 614 static void 615 channel_subsystem_release(struct device *dev) 616 { 617 struct channel_subsystem *css; 618 619 css = to_css(dev); 620 mutex_destroy(&css->mutex); 621 if (css->pseudo_subchannel) { 622 /* Implies that it has been generated but never registered. */ 623 css_subchannel_release(&css->pseudo_subchannel->dev); 624 css->pseudo_subchannel = NULL; 625 } 626 kfree(css); 627 } 628 629 static ssize_t 630 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 631 char *buf) 632 { 633 struct channel_subsystem *css = to_css(dev); 634 int ret; 635 636 if (!css) 637 return 0; 638 mutex_lock(&css->mutex); 639 ret = sprintf(buf, "%x\n", css->cm_enabled); 640 mutex_unlock(&css->mutex); 641 return ret; 642 } 643 644 static ssize_t 645 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 646 const char *buf, size_t count) 647 { 648 struct channel_subsystem *css = to_css(dev); 649 int ret; 650 unsigned long val; 651 652 ret = strict_strtoul(buf, 16, &val); 653 if (ret) 654 return ret; 655 mutex_lock(&css->mutex); 656 switch (val) { 657 case 0: 658 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 659 break; 660 case 1: 661 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 662 break; 663 default: 664 ret = -EINVAL; 665 } 666 mutex_unlock(&css->mutex); 667 return ret < 0 ? ret : count; 668 } 669 670 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 671 672 static int __init setup_css(int nr) 673 { 674 u32 tod_high; 675 int ret; 676 struct channel_subsystem *css; 677 678 css = channel_subsystems[nr]; 679 memset(css, 0, sizeof(struct channel_subsystem)); 680 css->pseudo_subchannel = 681 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 682 if (!css->pseudo_subchannel) 683 return -ENOMEM; 684 css->pseudo_subchannel->dev.parent = &css->device; 685 css->pseudo_subchannel->dev.release = css_subchannel_release; 686 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 687 ret = cio_create_sch_lock(css->pseudo_subchannel); 688 if (ret) { 689 kfree(css->pseudo_subchannel); 690 return ret; 691 } 692 mutex_init(&css->mutex); 693 css->valid = 1; 694 css->cssid = nr; 695 dev_set_name(&css->device, "css%x", nr); 696 css->device.release = channel_subsystem_release; 697 tod_high = (u32) (get_clock() >> 32); 698 css_generate_pgid(css, tod_high); 699 return 0; 700 } 701 702 static int css_reboot_event(struct notifier_block *this, 703 unsigned long event, 704 void *ptr) 705 { 706 int ret, i; 707 708 ret = NOTIFY_DONE; 709 for (i = 0; i <= __MAX_CSSID; i++) { 710 struct channel_subsystem *css; 711 712 css = channel_subsystems[i]; 713 mutex_lock(&css->mutex); 714 if (css->cm_enabled) 715 if (chsc_secm(css, 0)) 716 ret = NOTIFY_BAD; 717 mutex_unlock(&css->mutex); 718 } 719 720 return ret; 721 } 722 723 static struct notifier_block css_reboot_notifier = { 724 .notifier_call = css_reboot_event, 725 }; 726 727 /* 728 * Since the css devices are neither on a bus nor have a class 729 * nor have a special device type, we cannot stop/restart channel 730 * path measurements via the normal suspend/resume callbacks, but have 731 * to use notifiers. 732 */ 733 static int css_power_event(struct notifier_block *this, unsigned long event, 734 void *ptr) 735 { 736 void *secm_area; 737 int ret, i; 738 739 switch (event) { 740 case PM_HIBERNATION_PREPARE: 741 case PM_SUSPEND_PREPARE: 742 ret = NOTIFY_DONE; 743 for (i = 0; i <= __MAX_CSSID; i++) { 744 struct channel_subsystem *css; 745 746 css = channel_subsystems[i]; 747 mutex_lock(&css->mutex); 748 if (!css->cm_enabled) { 749 mutex_unlock(&css->mutex); 750 continue; 751 } 752 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 753 GFP_DMA); 754 if (secm_area) { 755 if (__chsc_do_secm(css, 0, secm_area)) 756 ret = NOTIFY_BAD; 757 free_page((unsigned long)secm_area); 758 } else 759 ret = NOTIFY_BAD; 760 761 mutex_unlock(&css->mutex); 762 } 763 break; 764 case PM_POST_HIBERNATION: 765 case PM_POST_SUSPEND: 766 ret = NOTIFY_DONE; 767 for (i = 0; i <= __MAX_CSSID; i++) { 768 struct channel_subsystem *css; 769 770 css = channel_subsystems[i]; 771 mutex_lock(&css->mutex); 772 if (!css->cm_enabled) { 773 mutex_unlock(&css->mutex); 774 continue; 775 } 776 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 777 GFP_DMA); 778 if (secm_area) { 779 if (__chsc_do_secm(css, 1, secm_area)) 780 ret = NOTIFY_BAD; 781 free_page((unsigned long)secm_area); 782 } else 783 ret = NOTIFY_BAD; 784 785 mutex_unlock(&css->mutex); 786 } 787 /* search for subchannels, which appeared during hibernation */ 788 css_schedule_reprobe(); 789 break; 790 default: 791 ret = NOTIFY_DONE; 792 } 793 return ret; 794 795 } 796 static struct notifier_block css_power_notifier = { 797 .notifier_call = css_power_event, 798 }; 799 800 /* 801 * Now that the driver core is running, we can setup our channel subsystem. 802 * The struct subchannel's are created during probing (except for the 803 * static console subchannel). 804 */ 805 static int __init css_bus_init(void) 806 { 807 int ret, i; 808 809 ret = chsc_determine_css_characteristics(); 810 if (ret == -ENOMEM) 811 goto out; 812 813 ret = chsc_alloc_sei_area(); 814 if (ret) 815 goto out; 816 817 /* Try to enable MSS. */ 818 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 819 switch (ret) { 820 case 0: /* Success. */ 821 max_ssid = __MAX_SSID; 822 break; 823 case -ENOMEM: 824 goto out; 825 default: 826 max_ssid = 0; 827 } 828 829 ret = slow_subchannel_init(); 830 if (ret) 831 goto out; 832 833 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 834 if (ret) 835 goto out; 836 837 if ((ret = bus_register(&css_bus_type))) 838 goto out; 839 840 /* Setup css structure. */ 841 for (i = 0; i <= __MAX_CSSID; i++) { 842 struct channel_subsystem *css; 843 844 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 845 if (!css) { 846 ret = -ENOMEM; 847 goto out_unregister; 848 } 849 channel_subsystems[i] = css; 850 ret = setup_css(i); 851 if (ret) { 852 kfree(channel_subsystems[i]); 853 goto out_unregister; 854 } 855 ret = device_register(&css->device); 856 if (ret) { 857 put_device(&css->device); 858 goto out_unregister; 859 } 860 if (css_chsc_characteristics.secm) { 861 ret = device_create_file(&css->device, 862 &dev_attr_cm_enable); 863 if (ret) 864 goto out_device; 865 } 866 ret = device_register(&css->pseudo_subchannel->dev); 867 if (ret) { 868 put_device(&css->pseudo_subchannel->dev); 869 goto out_file; 870 } 871 } 872 ret = register_reboot_notifier(&css_reboot_notifier); 873 if (ret) 874 goto out_unregister; 875 ret = register_pm_notifier(&css_power_notifier); 876 if (ret) { 877 unregister_reboot_notifier(&css_reboot_notifier); 878 goto out_unregister; 879 } 880 css_init_done = 1; 881 882 /* Enable default isc for I/O subchannels. */ 883 isc_register(IO_SCH_ISC); 884 885 return 0; 886 out_file: 887 if (css_chsc_characteristics.secm) 888 device_remove_file(&channel_subsystems[i]->device, 889 &dev_attr_cm_enable); 890 out_device: 891 device_unregister(&channel_subsystems[i]->device); 892 out_unregister: 893 while (i > 0) { 894 struct channel_subsystem *css; 895 896 i--; 897 css = channel_subsystems[i]; 898 device_unregister(&css->pseudo_subchannel->dev); 899 css->pseudo_subchannel = NULL; 900 if (css_chsc_characteristics.secm) 901 device_remove_file(&css->device, 902 &dev_attr_cm_enable); 903 device_unregister(&css->device); 904 } 905 bus_unregister(&css_bus_type); 906 out: 907 crw_unregister_handler(CRW_RSC_CSS); 908 chsc_free_sei_area(); 909 idset_free(slow_subchannel_set); 910 pr_alert("The CSS device driver initialization failed with " 911 "errno=%d\n", ret); 912 return ret; 913 } 914 915 static void __init css_bus_cleanup(void) 916 { 917 struct channel_subsystem *css; 918 int i; 919 920 for (i = 0; i <= __MAX_CSSID; i++) { 921 css = channel_subsystems[i]; 922 device_unregister(&css->pseudo_subchannel->dev); 923 css->pseudo_subchannel = NULL; 924 if (css_chsc_characteristics.secm) 925 device_remove_file(&css->device, &dev_attr_cm_enable); 926 device_unregister(&css->device); 927 } 928 bus_unregister(&css_bus_type); 929 crw_unregister_handler(CRW_RSC_CSS); 930 chsc_free_sei_area(); 931 idset_free(slow_subchannel_set); 932 isc_unregister(IO_SCH_ISC); 933 } 934 935 static int __init channel_subsystem_init(void) 936 { 937 int ret; 938 939 ret = css_bus_init(); 940 if (ret) 941 return ret; 942 943 ret = io_subchannel_init(); 944 if (ret) 945 css_bus_cleanup(); 946 947 return ret; 948 } 949 subsys_initcall(channel_subsystem_init); 950 951 static int css_settle(struct device_driver *drv, void *unused) 952 { 953 struct css_driver *cssdrv = to_cssdriver(drv); 954 955 if (cssdrv->settle) 956 cssdrv->settle(); 957 return 0; 958 } 959 960 /* 961 * Wait for the initialization of devices to finish, to make sure we are 962 * done with our setup if the search for the root device starts. 963 */ 964 static int __init channel_subsystem_init_sync(void) 965 { 966 /* Start initial subchannel evaluation. */ 967 css_schedule_eval_all(); 968 /* Wait for the evaluation of subchannels to finish. */ 969 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 970 /* Wait for the subchannel type specific initialization to finish */ 971 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 972 } 973 subsys_initcall_sync(channel_subsystem_init_sync); 974 975 int sch_is_pseudo_sch(struct subchannel *sch) 976 { 977 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 978 } 979 980 static int css_bus_match(struct device *dev, struct device_driver *drv) 981 { 982 struct subchannel *sch = to_subchannel(dev); 983 struct css_driver *driver = to_cssdriver(drv); 984 struct css_device_id *id; 985 986 for (id = driver->subchannel_type; id->match_flags; id++) { 987 if (sch->st == id->type) 988 return 1; 989 } 990 991 return 0; 992 } 993 994 static int css_probe(struct device *dev) 995 { 996 struct subchannel *sch; 997 int ret; 998 999 sch = to_subchannel(dev); 1000 sch->driver = to_cssdriver(dev->driver); 1001 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1002 if (ret) 1003 sch->driver = NULL; 1004 return ret; 1005 } 1006 1007 static int css_remove(struct device *dev) 1008 { 1009 struct subchannel *sch; 1010 int ret; 1011 1012 sch = to_subchannel(dev); 1013 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1014 sch->driver = NULL; 1015 return ret; 1016 } 1017 1018 static void css_shutdown(struct device *dev) 1019 { 1020 struct subchannel *sch; 1021 1022 sch = to_subchannel(dev); 1023 if (sch->driver && sch->driver->shutdown) 1024 sch->driver->shutdown(sch); 1025 } 1026 1027 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1028 { 1029 struct subchannel *sch = to_subchannel(dev); 1030 int ret; 1031 1032 ret = add_uevent_var(env, "ST=%01X", sch->st); 1033 if (ret) 1034 return ret; 1035 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1036 return ret; 1037 } 1038 1039 static int css_pm_prepare(struct device *dev) 1040 { 1041 struct subchannel *sch = to_subchannel(dev); 1042 struct css_driver *drv; 1043 1044 if (mutex_is_locked(&sch->reg_mutex)) 1045 return -EAGAIN; 1046 if (!sch->dev.driver) 1047 return 0; 1048 drv = to_cssdriver(sch->dev.driver); 1049 /* Notify drivers that they may not register children. */ 1050 return drv->prepare ? drv->prepare(sch) : 0; 1051 } 1052 1053 static void css_pm_complete(struct device *dev) 1054 { 1055 struct subchannel *sch = to_subchannel(dev); 1056 struct css_driver *drv; 1057 1058 if (!sch->dev.driver) 1059 return; 1060 drv = to_cssdriver(sch->dev.driver); 1061 if (drv->complete) 1062 drv->complete(sch); 1063 } 1064 1065 static int css_pm_freeze(struct device *dev) 1066 { 1067 struct subchannel *sch = to_subchannel(dev); 1068 struct css_driver *drv; 1069 1070 if (!sch->dev.driver) 1071 return 0; 1072 drv = to_cssdriver(sch->dev.driver); 1073 return drv->freeze ? drv->freeze(sch) : 0; 1074 } 1075 1076 static int css_pm_thaw(struct device *dev) 1077 { 1078 struct subchannel *sch = to_subchannel(dev); 1079 struct css_driver *drv; 1080 1081 if (!sch->dev.driver) 1082 return 0; 1083 drv = to_cssdriver(sch->dev.driver); 1084 return drv->thaw ? drv->thaw(sch) : 0; 1085 } 1086 1087 static int css_pm_restore(struct device *dev) 1088 { 1089 struct subchannel *sch = to_subchannel(dev); 1090 struct css_driver *drv; 1091 1092 if (!sch->dev.driver) 1093 return 0; 1094 drv = to_cssdriver(sch->dev.driver); 1095 return drv->restore ? drv->restore(sch) : 0; 1096 } 1097 1098 static struct dev_pm_ops css_pm_ops = { 1099 .prepare = css_pm_prepare, 1100 .complete = css_pm_complete, 1101 .freeze = css_pm_freeze, 1102 .thaw = css_pm_thaw, 1103 .restore = css_pm_restore, 1104 }; 1105 1106 struct bus_type css_bus_type = { 1107 .name = "css", 1108 .match = css_bus_match, 1109 .probe = css_probe, 1110 .remove = css_remove, 1111 .shutdown = css_shutdown, 1112 .uevent = css_uevent, 1113 .pm = &css_pm_ops, 1114 }; 1115 1116 /** 1117 * css_driver_register - register a css driver 1118 * @cdrv: css driver to register 1119 * 1120 * This is mainly a wrapper around driver_register that sets name 1121 * and bus_type in the embedded struct device_driver correctly. 1122 */ 1123 int css_driver_register(struct css_driver *cdrv) 1124 { 1125 cdrv->drv.name = cdrv->name; 1126 cdrv->drv.bus = &css_bus_type; 1127 cdrv->drv.owner = cdrv->owner; 1128 return driver_register(&cdrv->drv); 1129 } 1130 EXPORT_SYMBOL_GPL(css_driver_register); 1131 1132 /** 1133 * css_driver_unregister - unregister a css driver 1134 * @cdrv: css driver to unregister 1135 * 1136 * This is a wrapper around driver_unregister. 1137 */ 1138 void css_driver_unregister(struct css_driver *cdrv) 1139 { 1140 driver_unregister(&cdrv->drv); 1141 } 1142 EXPORT_SYMBOL_GPL(css_driver_unregister); 1143 1144 MODULE_LICENSE("GPL"); 1145 EXPORT_SYMBOL(css_bus_type); 1146