1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2009 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <linux/proc_fs.h> 22 #include <asm/isc.h> 23 #include <asm/crw.h> 24 25 #include "css.h" 26 #include "cio.h" 27 #include "cio_debug.h" 28 #include "ioasm.h" 29 #include "chsc.h" 30 #include "device.h" 31 #include "idset.h" 32 #include "chp.h" 33 34 int css_init_done = 0; 35 int max_ssid; 36 37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 38 39 int 40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 41 { 42 struct subchannel_id schid; 43 int ret; 44 45 init_subchannel_id(&schid); 46 ret = -ENODEV; 47 do { 48 do { 49 ret = fn(schid, data); 50 if (ret) 51 break; 52 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 53 schid.sch_no = 0; 54 } while (schid.ssid++ < max_ssid); 55 return ret; 56 } 57 58 struct cb_data { 59 void *data; 60 struct idset *set; 61 int (*fn_known_sch)(struct subchannel *, void *); 62 int (*fn_unknown_sch)(struct subchannel_id, void *); 63 }; 64 65 static int call_fn_known_sch(struct device *dev, void *data) 66 { 67 struct subchannel *sch = to_subchannel(dev); 68 struct cb_data *cb = data; 69 int rc = 0; 70 71 idset_sch_del(cb->set, sch->schid); 72 if (cb->fn_known_sch) 73 rc = cb->fn_known_sch(sch, cb->data); 74 return rc; 75 } 76 77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 78 { 79 struct cb_data *cb = data; 80 int rc = 0; 81 82 if (idset_sch_contains(cb->set, schid)) 83 rc = cb->fn_unknown_sch(schid, cb->data); 84 return rc; 85 } 86 87 static int call_fn_all_sch(struct subchannel_id schid, void *data) 88 { 89 struct cb_data *cb = data; 90 struct subchannel *sch; 91 int rc = 0; 92 93 sch = get_subchannel_by_schid(schid); 94 if (sch) { 95 if (cb->fn_known_sch) 96 rc = cb->fn_known_sch(sch, cb->data); 97 put_device(&sch->dev); 98 } else { 99 if (cb->fn_unknown_sch) 100 rc = cb->fn_unknown_sch(schid, cb->data); 101 } 102 103 return rc; 104 } 105 106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 107 int (*fn_unknown)(struct subchannel_id, 108 void *), void *data) 109 { 110 struct cb_data cb; 111 int rc; 112 113 cb.data = data; 114 cb.fn_known_sch = fn_known; 115 cb.fn_unknown_sch = fn_unknown; 116 117 cb.set = idset_sch_new(); 118 if (!cb.set) 119 /* fall back to brute force scanning in case of oom */ 120 return for_each_subchannel(call_fn_all_sch, &cb); 121 122 idset_fill(cb.set); 123 124 /* Process registered subchannels. */ 125 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 126 if (rc) 127 goto out; 128 /* Process unregistered subchannels. */ 129 if (fn_unknown) 130 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 131 out: 132 idset_free(cb.set); 133 134 return rc; 135 } 136 137 static void css_sch_todo(struct work_struct *work); 138 139 static struct subchannel * 140 css_alloc_subchannel(struct subchannel_id schid) 141 { 142 struct subchannel *sch; 143 int ret; 144 145 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 146 if (sch == NULL) 147 return ERR_PTR(-ENOMEM); 148 ret = cio_validate_subchannel (sch, schid); 149 if (ret < 0) { 150 kfree(sch); 151 return ERR_PTR(ret); 152 } 153 INIT_WORK(&sch->todo_work, css_sch_todo); 154 return sch; 155 } 156 157 static void 158 css_subchannel_release(struct device *dev) 159 { 160 struct subchannel *sch; 161 162 sch = to_subchannel(dev); 163 if (!cio_is_console(sch->schid)) { 164 /* Reset intparm to zeroes. */ 165 sch->config.intparm = 0; 166 cio_commit_config(sch); 167 kfree(sch->lock); 168 kfree(sch); 169 } 170 } 171 172 static int css_sch_device_register(struct subchannel *sch) 173 { 174 int ret; 175 176 mutex_lock(&sch->reg_mutex); 177 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 178 sch->schid.sch_no); 179 ret = device_register(&sch->dev); 180 mutex_unlock(&sch->reg_mutex); 181 return ret; 182 } 183 184 /** 185 * css_sch_device_unregister - unregister a subchannel 186 * @sch: subchannel to be unregistered 187 */ 188 void css_sch_device_unregister(struct subchannel *sch) 189 { 190 mutex_lock(&sch->reg_mutex); 191 if (device_is_registered(&sch->dev)) 192 device_unregister(&sch->dev); 193 mutex_unlock(&sch->reg_mutex); 194 } 195 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 196 197 static void css_sch_todo(struct work_struct *work) 198 { 199 struct subchannel *sch; 200 enum sch_todo todo; 201 202 sch = container_of(work, struct subchannel, todo_work); 203 /* Find out todo. */ 204 spin_lock_irq(sch->lock); 205 todo = sch->todo; 206 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 207 sch->schid.sch_no, todo); 208 sch->todo = SCH_TODO_NOTHING; 209 spin_unlock_irq(sch->lock); 210 /* Perform todo. */ 211 if (todo == SCH_TODO_UNREG) 212 css_sch_device_unregister(sch); 213 /* Release workqueue ref. */ 214 put_device(&sch->dev); 215 } 216 217 /** 218 * css_sched_sch_todo - schedule a subchannel operation 219 * @sch: subchannel 220 * @todo: todo 221 * 222 * Schedule the operation identified by @todo to be performed on the slow path 223 * workqueue. Do nothing if another operation with higher priority is already 224 * scheduled. Needs to be called with subchannel lock held. 225 */ 226 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 227 { 228 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 229 sch->schid.ssid, sch->schid.sch_no, todo); 230 if (sch->todo >= todo) 231 return; 232 /* Get workqueue ref. */ 233 if (!get_device(&sch->dev)) 234 return; 235 sch->todo = todo; 236 if (!queue_work(cio_work_q, &sch->todo_work)) { 237 /* Already queued, release workqueue ref. */ 238 put_device(&sch->dev); 239 } 240 } 241 242 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 243 { 244 int i; 245 int mask; 246 247 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 248 ssd->path_mask = pmcw->pim; 249 for (i = 0; i < 8; i++) { 250 mask = 0x80 >> i; 251 if (pmcw->pim & mask) { 252 chp_id_init(&ssd->chpid[i]); 253 ssd->chpid[i].id = pmcw->chpid[i]; 254 } 255 } 256 } 257 258 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 259 { 260 int i; 261 int mask; 262 263 for (i = 0; i < 8; i++) { 264 mask = 0x80 >> i; 265 if (ssd->path_mask & mask) 266 if (!chp_is_registered(ssd->chpid[i])) 267 chp_new(ssd->chpid[i]); 268 } 269 } 270 271 void css_update_ssd_info(struct subchannel *sch) 272 { 273 int ret; 274 275 if (cio_is_console(sch->schid)) { 276 /* Console is initialized too early for functions requiring 277 * memory allocation. */ 278 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 279 } else { 280 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 281 if (ret) 282 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 283 ssd_register_chpids(&sch->ssd_info); 284 } 285 } 286 287 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 288 char *buf) 289 { 290 struct subchannel *sch = to_subchannel(dev); 291 292 return sprintf(buf, "%01x\n", sch->st); 293 } 294 295 static DEVICE_ATTR(type, 0444, type_show, NULL); 296 297 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 298 char *buf) 299 { 300 struct subchannel *sch = to_subchannel(dev); 301 302 return sprintf(buf, "css:t%01X\n", sch->st); 303 } 304 305 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 306 307 static struct attribute *subch_attrs[] = { 308 &dev_attr_type.attr, 309 &dev_attr_modalias.attr, 310 NULL, 311 }; 312 313 static struct attribute_group subch_attr_group = { 314 .attrs = subch_attrs, 315 }; 316 317 static const struct attribute_group *default_subch_attr_groups[] = { 318 &subch_attr_group, 319 NULL, 320 }; 321 322 static int css_register_subchannel(struct subchannel *sch) 323 { 324 int ret; 325 326 /* Initialize the subchannel structure */ 327 sch->dev.parent = &channel_subsystems[0]->device; 328 sch->dev.bus = &css_bus_type; 329 sch->dev.release = &css_subchannel_release; 330 sch->dev.groups = default_subch_attr_groups; 331 /* 332 * We don't want to generate uevents for I/O subchannels that don't 333 * have a working ccw device behind them since they will be 334 * unregistered before they can be used anyway, so we delay the add 335 * uevent until after device recognition was successful. 336 * Note that we suppress the uevent for all subchannel types; 337 * the subchannel driver can decide itself when it wants to inform 338 * userspace of its existence. 339 */ 340 dev_set_uevent_suppress(&sch->dev, 1); 341 css_update_ssd_info(sch); 342 /* make it known to the system */ 343 ret = css_sch_device_register(sch); 344 if (ret) { 345 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 346 sch->schid.ssid, sch->schid.sch_no, ret); 347 return ret; 348 } 349 if (!sch->driver) { 350 /* 351 * No driver matched. Generate the uevent now so that 352 * a fitting driver module may be loaded based on the 353 * modalias. 354 */ 355 dev_set_uevent_suppress(&sch->dev, 0); 356 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 357 } 358 return ret; 359 } 360 361 int css_probe_device(struct subchannel_id schid) 362 { 363 int ret; 364 struct subchannel *sch; 365 366 if (cio_is_console(schid)) 367 sch = cio_get_console_subchannel(); 368 else { 369 sch = css_alloc_subchannel(schid); 370 if (IS_ERR(sch)) 371 return PTR_ERR(sch); 372 } 373 ret = css_register_subchannel(sch); 374 if (ret) { 375 if (!cio_is_console(schid)) 376 put_device(&sch->dev); 377 } 378 return ret; 379 } 380 381 static int 382 check_subchannel(struct device * dev, void * data) 383 { 384 struct subchannel *sch; 385 struct subchannel_id *schid = data; 386 387 sch = to_subchannel(dev); 388 return schid_equal(&sch->schid, schid); 389 } 390 391 struct subchannel * 392 get_subchannel_by_schid(struct subchannel_id schid) 393 { 394 struct device *dev; 395 396 dev = bus_find_device(&css_bus_type, NULL, 397 &schid, check_subchannel); 398 399 return dev ? to_subchannel(dev) : NULL; 400 } 401 402 /** 403 * css_sch_is_valid() - check if a subchannel is valid 404 * @schib: subchannel information block for the subchannel 405 */ 406 int css_sch_is_valid(struct schib *schib) 407 { 408 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 409 return 0; 410 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 411 return 0; 412 return 1; 413 } 414 EXPORT_SYMBOL_GPL(css_sch_is_valid); 415 416 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 417 { 418 struct schib schib; 419 420 if (!slow) { 421 /* Will be done on the slow path. */ 422 return -EAGAIN; 423 } 424 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 425 /* Unusable - ignore. */ 426 return 0; 427 } 428 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 429 schid.sch_no); 430 431 return css_probe_device(schid); 432 } 433 434 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 435 { 436 int ret = 0; 437 438 if (sch->driver) { 439 if (sch->driver->sch_event) 440 ret = sch->driver->sch_event(sch, slow); 441 else 442 dev_dbg(&sch->dev, 443 "Got subchannel machine check but " 444 "no sch_event handler provided.\n"); 445 } 446 if (ret != 0 && ret != -EAGAIN) { 447 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 448 sch->schid.ssid, sch->schid.sch_no, ret); 449 } 450 return ret; 451 } 452 453 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 454 { 455 struct subchannel *sch; 456 int ret; 457 458 sch = get_subchannel_by_schid(schid); 459 if (sch) { 460 ret = css_evaluate_known_subchannel(sch, slow); 461 put_device(&sch->dev); 462 } else 463 ret = css_evaluate_new_subchannel(schid, slow); 464 if (ret == -EAGAIN) 465 css_schedule_eval(schid); 466 } 467 468 static struct idset *slow_subchannel_set; 469 static spinlock_t slow_subchannel_lock; 470 static wait_queue_head_t css_eval_wq; 471 static atomic_t css_eval_scheduled; 472 473 static int __init slow_subchannel_init(void) 474 { 475 spin_lock_init(&slow_subchannel_lock); 476 atomic_set(&css_eval_scheduled, 0); 477 init_waitqueue_head(&css_eval_wq); 478 slow_subchannel_set = idset_sch_new(); 479 if (!slow_subchannel_set) { 480 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 481 return -ENOMEM; 482 } 483 return 0; 484 } 485 486 static int slow_eval_known_fn(struct subchannel *sch, void *data) 487 { 488 int eval; 489 int rc; 490 491 spin_lock_irq(&slow_subchannel_lock); 492 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 493 idset_sch_del(slow_subchannel_set, sch->schid); 494 spin_unlock_irq(&slow_subchannel_lock); 495 if (eval) { 496 rc = css_evaluate_known_subchannel(sch, 1); 497 if (rc == -EAGAIN) 498 css_schedule_eval(sch->schid); 499 } 500 return 0; 501 } 502 503 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 504 { 505 int eval; 506 int rc = 0; 507 508 spin_lock_irq(&slow_subchannel_lock); 509 eval = idset_sch_contains(slow_subchannel_set, schid); 510 idset_sch_del(slow_subchannel_set, schid); 511 spin_unlock_irq(&slow_subchannel_lock); 512 if (eval) { 513 rc = css_evaluate_new_subchannel(schid, 1); 514 switch (rc) { 515 case -EAGAIN: 516 css_schedule_eval(schid); 517 rc = 0; 518 break; 519 case -ENXIO: 520 case -ENOMEM: 521 case -EIO: 522 /* These should abort looping */ 523 break; 524 default: 525 rc = 0; 526 } 527 } 528 return rc; 529 } 530 531 static void css_slow_path_func(struct work_struct *unused) 532 { 533 unsigned long flags; 534 535 CIO_TRACE_EVENT(4, "slowpath"); 536 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 537 NULL); 538 spin_lock_irqsave(&slow_subchannel_lock, flags); 539 if (idset_is_empty(slow_subchannel_set)) { 540 atomic_set(&css_eval_scheduled, 0); 541 wake_up(&css_eval_wq); 542 } 543 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 544 } 545 546 static DECLARE_WORK(slow_path_work, css_slow_path_func); 547 struct workqueue_struct *cio_work_q; 548 549 void css_schedule_eval(struct subchannel_id schid) 550 { 551 unsigned long flags; 552 553 spin_lock_irqsave(&slow_subchannel_lock, flags); 554 idset_sch_add(slow_subchannel_set, schid); 555 atomic_set(&css_eval_scheduled, 1); 556 queue_work(cio_work_q, &slow_path_work); 557 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 558 } 559 560 void css_schedule_eval_all(void) 561 { 562 unsigned long flags; 563 564 spin_lock_irqsave(&slow_subchannel_lock, flags); 565 idset_fill(slow_subchannel_set); 566 atomic_set(&css_eval_scheduled, 1); 567 queue_work(cio_work_q, &slow_path_work); 568 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 569 } 570 571 static int __unset_registered(struct device *dev, void *data) 572 { 573 struct idset *set = data; 574 struct subchannel *sch = to_subchannel(dev); 575 576 idset_sch_del(set, sch->schid); 577 return 0; 578 } 579 580 void css_schedule_eval_all_unreg(void) 581 { 582 unsigned long flags; 583 struct idset *unreg_set; 584 585 /* Find unregistered subchannels. */ 586 unreg_set = idset_sch_new(); 587 if (!unreg_set) { 588 /* Fallback. */ 589 css_schedule_eval_all(); 590 return; 591 } 592 idset_fill(unreg_set); 593 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 594 /* Apply to slow_subchannel_set. */ 595 spin_lock_irqsave(&slow_subchannel_lock, flags); 596 idset_add_set(slow_subchannel_set, unreg_set); 597 atomic_set(&css_eval_scheduled, 1); 598 queue_work(cio_work_q, &slow_path_work); 599 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 600 idset_free(unreg_set); 601 } 602 603 void css_wait_for_slow_path(void) 604 { 605 flush_workqueue(cio_work_q); 606 } 607 608 /* Schedule reprobing of all unregistered subchannels. */ 609 void css_schedule_reprobe(void) 610 { 611 css_schedule_eval_all_unreg(); 612 } 613 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 614 615 /* 616 * Called from the machine check handler for subchannel report words. 617 */ 618 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 619 { 620 struct subchannel_id mchk_schid; 621 622 if (overflow) { 623 css_schedule_eval_all(); 624 return; 625 } 626 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 627 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 628 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 629 crw0->erc, crw0->rsid); 630 if (crw1) 631 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 632 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 633 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 634 crw1->anc, crw1->erc, crw1->rsid); 635 init_subchannel_id(&mchk_schid); 636 mchk_schid.sch_no = crw0->rsid; 637 if (crw1) 638 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 639 640 /* 641 * Since we are always presented with IPI in the CRW, we have to 642 * use stsch() to find out if the subchannel in question has come 643 * or gone. 644 */ 645 css_evaluate_subchannel(mchk_schid, 0); 646 } 647 648 static void __init 649 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 650 { 651 if (css_general_characteristics.mcss) { 652 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 653 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 654 } else { 655 #ifdef CONFIG_SMP 656 css->global_pgid.pgid_high.cpu_addr = stap(); 657 #else 658 css->global_pgid.pgid_high.cpu_addr = 0; 659 #endif 660 } 661 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; 662 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; 663 css->global_pgid.tod_high = tod_high; 664 665 } 666 667 static void 668 channel_subsystem_release(struct device *dev) 669 { 670 struct channel_subsystem *css; 671 672 css = to_css(dev); 673 mutex_destroy(&css->mutex); 674 if (css->pseudo_subchannel) { 675 /* Implies that it has been generated but never registered. */ 676 css_subchannel_release(&css->pseudo_subchannel->dev); 677 css->pseudo_subchannel = NULL; 678 } 679 kfree(css); 680 } 681 682 static ssize_t 683 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 684 char *buf) 685 { 686 struct channel_subsystem *css = to_css(dev); 687 int ret; 688 689 if (!css) 690 return 0; 691 mutex_lock(&css->mutex); 692 ret = sprintf(buf, "%x\n", css->cm_enabled); 693 mutex_unlock(&css->mutex); 694 return ret; 695 } 696 697 static ssize_t 698 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 699 const char *buf, size_t count) 700 { 701 struct channel_subsystem *css = to_css(dev); 702 int ret; 703 unsigned long val; 704 705 ret = strict_strtoul(buf, 16, &val); 706 if (ret) 707 return ret; 708 mutex_lock(&css->mutex); 709 switch (val) { 710 case 0: 711 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 712 break; 713 case 1: 714 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 715 break; 716 default: 717 ret = -EINVAL; 718 } 719 mutex_unlock(&css->mutex); 720 return ret < 0 ? ret : count; 721 } 722 723 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 724 725 static int __init setup_css(int nr) 726 { 727 u32 tod_high; 728 int ret; 729 struct channel_subsystem *css; 730 731 css = channel_subsystems[nr]; 732 memset(css, 0, sizeof(struct channel_subsystem)); 733 css->pseudo_subchannel = 734 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 735 if (!css->pseudo_subchannel) 736 return -ENOMEM; 737 css->pseudo_subchannel->dev.parent = &css->device; 738 css->pseudo_subchannel->dev.release = css_subchannel_release; 739 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 740 mutex_init(&css->pseudo_subchannel->reg_mutex); 741 ret = cio_create_sch_lock(css->pseudo_subchannel); 742 if (ret) { 743 kfree(css->pseudo_subchannel); 744 return ret; 745 } 746 mutex_init(&css->mutex); 747 css->valid = 1; 748 css->cssid = nr; 749 dev_set_name(&css->device, "css%x", nr); 750 css->device.release = channel_subsystem_release; 751 tod_high = (u32) (get_clock() >> 32); 752 css_generate_pgid(css, tod_high); 753 return 0; 754 } 755 756 static int css_reboot_event(struct notifier_block *this, 757 unsigned long event, 758 void *ptr) 759 { 760 int ret, i; 761 762 ret = NOTIFY_DONE; 763 for (i = 0; i <= __MAX_CSSID; i++) { 764 struct channel_subsystem *css; 765 766 css = channel_subsystems[i]; 767 mutex_lock(&css->mutex); 768 if (css->cm_enabled) 769 if (chsc_secm(css, 0)) 770 ret = NOTIFY_BAD; 771 mutex_unlock(&css->mutex); 772 } 773 774 return ret; 775 } 776 777 static struct notifier_block css_reboot_notifier = { 778 .notifier_call = css_reboot_event, 779 }; 780 781 /* 782 * Since the css devices are neither on a bus nor have a class 783 * nor have a special device type, we cannot stop/restart channel 784 * path measurements via the normal suspend/resume callbacks, but have 785 * to use notifiers. 786 */ 787 static int css_power_event(struct notifier_block *this, unsigned long event, 788 void *ptr) 789 { 790 void *secm_area; 791 int ret, i; 792 793 switch (event) { 794 case PM_HIBERNATION_PREPARE: 795 case PM_SUSPEND_PREPARE: 796 ret = NOTIFY_DONE; 797 for (i = 0; i <= __MAX_CSSID; i++) { 798 struct channel_subsystem *css; 799 800 css = channel_subsystems[i]; 801 mutex_lock(&css->mutex); 802 if (!css->cm_enabled) { 803 mutex_unlock(&css->mutex); 804 continue; 805 } 806 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 807 GFP_DMA); 808 if (secm_area) { 809 if (__chsc_do_secm(css, 0, secm_area)) 810 ret = NOTIFY_BAD; 811 free_page((unsigned long)secm_area); 812 } else 813 ret = NOTIFY_BAD; 814 815 mutex_unlock(&css->mutex); 816 } 817 break; 818 case PM_POST_HIBERNATION: 819 case PM_POST_SUSPEND: 820 ret = NOTIFY_DONE; 821 for (i = 0; i <= __MAX_CSSID; i++) { 822 struct channel_subsystem *css; 823 824 css = channel_subsystems[i]; 825 mutex_lock(&css->mutex); 826 if (!css->cm_enabled) { 827 mutex_unlock(&css->mutex); 828 continue; 829 } 830 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 831 GFP_DMA); 832 if (secm_area) { 833 if (__chsc_do_secm(css, 1, secm_area)) 834 ret = NOTIFY_BAD; 835 free_page((unsigned long)secm_area); 836 } else 837 ret = NOTIFY_BAD; 838 839 mutex_unlock(&css->mutex); 840 } 841 /* search for subchannels, which appeared during hibernation */ 842 css_schedule_reprobe(); 843 break; 844 default: 845 ret = NOTIFY_DONE; 846 } 847 return ret; 848 849 } 850 static struct notifier_block css_power_notifier = { 851 .notifier_call = css_power_event, 852 }; 853 854 /* 855 * Now that the driver core is running, we can setup our channel subsystem. 856 * The struct subchannel's are created during probing (except for the 857 * static console subchannel). 858 */ 859 static int __init css_bus_init(void) 860 { 861 int ret, i; 862 863 ret = chsc_determine_css_characteristics(); 864 if (ret == -ENOMEM) 865 goto out; 866 867 ret = chsc_alloc_sei_area(); 868 if (ret) 869 goto out; 870 871 /* Try to enable MSS. */ 872 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 873 switch (ret) { 874 case 0: /* Success. */ 875 max_ssid = __MAX_SSID; 876 break; 877 case -ENOMEM: 878 goto out; 879 default: 880 max_ssid = 0; 881 } 882 883 ret = slow_subchannel_init(); 884 if (ret) 885 goto out; 886 887 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 888 if (ret) 889 goto out; 890 891 if ((ret = bus_register(&css_bus_type))) 892 goto out; 893 894 /* Setup css structure. */ 895 for (i = 0; i <= __MAX_CSSID; i++) { 896 struct channel_subsystem *css; 897 898 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 899 if (!css) { 900 ret = -ENOMEM; 901 goto out_unregister; 902 } 903 channel_subsystems[i] = css; 904 ret = setup_css(i); 905 if (ret) { 906 kfree(channel_subsystems[i]); 907 goto out_unregister; 908 } 909 ret = device_register(&css->device); 910 if (ret) { 911 put_device(&css->device); 912 goto out_unregister; 913 } 914 if (css_chsc_characteristics.secm) { 915 ret = device_create_file(&css->device, 916 &dev_attr_cm_enable); 917 if (ret) 918 goto out_device; 919 } 920 ret = device_register(&css->pseudo_subchannel->dev); 921 if (ret) { 922 put_device(&css->pseudo_subchannel->dev); 923 goto out_file; 924 } 925 } 926 ret = register_reboot_notifier(&css_reboot_notifier); 927 if (ret) 928 goto out_unregister; 929 ret = register_pm_notifier(&css_power_notifier); 930 if (ret) { 931 unregister_reboot_notifier(&css_reboot_notifier); 932 goto out_unregister; 933 } 934 css_init_done = 1; 935 936 /* Enable default isc for I/O subchannels. */ 937 isc_register(IO_SCH_ISC); 938 939 return 0; 940 out_file: 941 if (css_chsc_characteristics.secm) 942 device_remove_file(&channel_subsystems[i]->device, 943 &dev_attr_cm_enable); 944 out_device: 945 device_unregister(&channel_subsystems[i]->device); 946 out_unregister: 947 while (i > 0) { 948 struct channel_subsystem *css; 949 950 i--; 951 css = channel_subsystems[i]; 952 device_unregister(&css->pseudo_subchannel->dev); 953 css->pseudo_subchannel = NULL; 954 if (css_chsc_characteristics.secm) 955 device_remove_file(&css->device, 956 &dev_attr_cm_enable); 957 device_unregister(&css->device); 958 } 959 bus_unregister(&css_bus_type); 960 out: 961 crw_unregister_handler(CRW_RSC_CSS); 962 chsc_free_sei_area(); 963 idset_free(slow_subchannel_set); 964 pr_alert("The CSS device driver initialization failed with " 965 "errno=%d\n", ret); 966 return ret; 967 } 968 969 static void __init css_bus_cleanup(void) 970 { 971 struct channel_subsystem *css; 972 int i; 973 974 for (i = 0; i <= __MAX_CSSID; i++) { 975 css = channel_subsystems[i]; 976 device_unregister(&css->pseudo_subchannel->dev); 977 css->pseudo_subchannel = NULL; 978 if (css_chsc_characteristics.secm) 979 device_remove_file(&css->device, &dev_attr_cm_enable); 980 device_unregister(&css->device); 981 } 982 bus_unregister(&css_bus_type); 983 crw_unregister_handler(CRW_RSC_CSS); 984 chsc_free_sei_area(); 985 idset_free(slow_subchannel_set); 986 isc_unregister(IO_SCH_ISC); 987 } 988 989 static int __init channel_subsystem_init(void) 990 { 991 int ret; 992 993 ret = css_bus_init(); 994 if (ret) 995 return ret; 996 cio_work_q = create_singlethread_workqueue("cio"); 997 if (!cio_work_q) { 998 ret = -ENOMEM; 999 goto out_bus; 1000 } 1001 ret = io_subchannel_init(); 1002 if (ret) 1003 goto out_wq; 1004 1005 return ret; 1006 out_wq: 1007 destroy_workqueue(cio_work_q); 1008 out_bus: 1009 css_bus_cleanup(); 1010 return ret; 1011 } 1012 subsys_initcall(channel_subsystem_init); 1013 1014 static int css_settle(struct device_driver *drv, void *unused) 1015 { 1016 struct css_driver *cssdrv = to_cssdriver(drv); 1017 1018 if (cssdrv->settle) 1019 return cssdrv->settle(); 1020 return 0; 1021 } 1022 1023 int css_complete_work(void) 1024 { 1025 int ret; 1026 1027 /* Wait for the evaluation of subchannels to finish. */ 1028 ret = wait_event_interruptible(css_eval_wq, 1029 atomic_read(&css_eval_scheduled) == 0); 1030 if (ret) 1031 return -EINTR; 1032 flush_workqueue(cio_work_q); 1033 /* Wait for the subchannel type specific initialization to finish */ 1034 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1035 } 1036 1037 1038 /* 1039 * Wait for the initialization of devices to finish, to make sure we are 1040 * done with our setup if the search for the root device starts. 1041 */ 1042 static int __init channel_subsystem_init_sync(void) 1043 { 1044 /* Start initial subchannel evaluation. */ 1045 css_schedule_eval_all(); 1046 css_complete_work(); 1047 return 0; 1048 } 1049 subsys_initcall_sync(channel_subsystem_init_sync); 1050 1051 #ifdef CONFIG_PROC_FS 1052 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1053 size_t count, loff_t *ppos) 1054 { 1055 int ret; 1056 1057 /* Handle pending CRW's. */ 1058 crw_wait_for_channel_report(); 1059 ret = css_complete_work(); 1060 1061 return ret ? ret : count; 1062 } 1063 1064 static const struct file_operations cio_settle_proc_fops = { 1065 .write = cio_settle_write, 1066 }; 1067 1068 static int __init cio_settle_init(void) 1069 { 1070 struct proc_dir_entry *entry; 1071 1072 entry = proc_create("cio_settle", S_IWUSR, NULL, 1073 &cio_settle_proc_fops); 1074 if (!entry) 1075 return -ENOMEM; 1076 return 0; 1077 } 1078 device_initcall(cio_settle_init); 1079 #endif /*CONFIG_PROC_FS*/ 1080 1081 int sch_is_pseudo_sch(struct subchannel *sch) 1082 { 1083 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1084 } 1085 1086 static int css_bus_match(struct device *dev, struct device_driver *drv) 1087 { 1088 struct subchannel *sch = to_subchannel(dev); 1089 struct css_driver *driver = to_cssdriver(drv); 1090 struct css_device_id *id; 1091 1092 for (id = driver->subchannel_type; id->match_flags; id++) { 1093 if (sch->st == id->type) 1094 return 1; 1095 } 1096 1097 return 0; 1098 } 1099 1100 static int css_probe(struct device *dev) 1101 { 1102 struct subchannel *sch; 1103 int ret; 1104 1105 sch = to_subchannel(dev); 1106 sch->driver = to_cssdriver(dev->driver); 1107 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1108 if (ret) 1109 sch->driver = NULL; 1110 return ret; 1111 } 1112 1113 static int css_remove(struct device *dev) 1114 { 1115 struct subchannel *sch; 1116 int ret; 1117 1118 sch = to_subchannel(dev); 1119 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1120 sch->driver = NULL; 1121 return ret; 1122 } 1123 1124 static void css_shutdown(struct device *dev) 1125 { 1126 struct subchannel *sch; 1127 1128 sch = to_subchannel(dev); 1129 if (sch->driver && sch->driver->shutdown) 1130 sch->driver->shutdown(sch); 1131 } 1132 1133 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1134 { 1135 struct subchannel *sch = to_subchannel(dev); 1136 int ret; 1137 1138 ret = add_uevent_var(env, "ST=%01X", sch->st); 1139 if (ret) 1140 return ret; 1141 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1142 return ret; 1143 } 1144 1145 static int css_pm_prepare(struct device *dev) 1146 { 1147 struct subchannel *sch = to_subchannel(dev); 1148 struct css_driver *drv; 1149 1150 if (mutex_is_locked(&sch->reg_mutex)) 1151 return -EAGAIN; 1152 if (!sch->dev.driver) 1153 return 0; 1154 drv = to_cssdriver(sch->dev.driver); 1155 /* Notify drivers that they may not register children. */ 1156 return drv->prepare ? drv->prepare(sch) : 0; 1157 } 1158 1159 static void css_pm_complete(struct device *dev) 1160 { 1161 struct subchannel *sch = to_subchannel(dev); 1162 struct css_driver *drv; 1163 1164 if (!sch->dev.driver) 1165 return; 1166 drv = to_cssdriver(sch->dev.driver); 1167 if (drv->complete) 1168 drv->complete(sch); 1169 } 1170 1171 static int css_pm_freeze(struct device *dev) 1172 { 1173 struct subchannel *sch = to_subchannel(dev); 1174 struct css_driver *drv; 1175 1176 if (!sch->dev.driver) 1177 return 0; 1178 drv = to_cssdriver(sch->dev.driver); 1179 return drv->freeze ? drv->freeze(sch) : 0; 1180 } 1181 1182 static int css_pm_thaw(struct device *dev) 1183 { 1184 struct subchannel *sch = to_subchannel(dev); 1185 struct css_driver *drv; 1186 1187 if (!sch->dev.driver) 1188 return 0; 1189 drv = to_cssdriver(sch->dev.driver); 1190 return drv->thaw ? drv->thaw(sch) : 0; 1191 } 1192 1193 static int css_pm_restore(struct device *dev) 1194 { 1195 struct subchannel *sch = to_subchannel(dev); 1196 struct css_driver *drv; 1197 1198 if (!sch->dev.driver) 1199 return 0; 1200 drv = to_cssdriver(sch->dev.driver); 1201 return drv->restore ? drv->restore(sch) : 0; 1202 } 1203 1204 static const struct dev_pm_ops css_pm_ops = { 1205 .prepare = css_pm_prepare, 1206 .complete = css_pm_complete, 1207 .freeze = css_pm_freeze, 1208 .thaw = css_pm_thaw, 1209 .restore = css_pm_restore, 1210 }; 1211 1212 struct bus_type css_bus_type = { 1213 .name = "css", 1214 .match = css_bus_match, 1215 .probe = css_probe, 1216 .remove = css_remove, 1217 .shutdown = css_shutdown, 1218 .uevent = css_uevent, 1219 .pm = &css_pm_ops, 1220 }; 1221 1222 /** 1223 * css_driver_register - register a css driver 1224 * @cdrv: css driver to register 1225 * 1226 * This is mainly a wrapper around driver_register that sets name 1227 * and bus_type in the embedded struct device_driver correctly. 1228 */ 1229 int css_driver_register(struct css_driver *cdrv) 1230 { 1231 cdrv->drv.name = cdrv->name; 1232 cdrv->drv.bus = &css_bus_type; 1233 cdrv->drv.owner = cdrv->owner; 1234 return driver_register(&cdrv->drv); 1235 } 1236 EXPORT_SYMBOL_GPL(css_driver_register); 1237 1238 /** 1239 * css_driver_unregister - unregister a css driver 1240 * @cdrv: css driver to unregister 1241 * 1242 * This is a wrapper around driver_unregister. 1243 */ 1244 void css_driver_unregister(struct css_driver *cdrv) 1245 { 1246 driver_unregister(&cdrv->drv); 1247 } 1248 EXPORT_SYMBOL_GPL(css_driver_unregister); 1249 1250 MODULE_LICENSE("GPL"); 1251 EXPORT_SYMBOL(css_bus_type); 1252