1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2010 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <linux/proc_fs.h> 22 #include <asm/isc.h> 23 #include <asm/crw.h> 24 25 #include "css.h" 26 #include "cio.h" 27 #include "cio_debug.h" 28 #include "ioasm.h" 29 #include "chsc.h" 30 #include "device.h" 31 #include "idset.h" 32 #include "chp.h" 33 34 int css_init_done = 0; 35 int max_ssid; 36 37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 38 39 int 40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 41 { 42 struct subchannel_id schid; 43 int ret; 44 45 init_subchannel_id(&schid); 46 ret = -ENODEV; 47 do { 48 do { 49 ret = fn(schid, data); 50 if (ret) 51 break; 52 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 53 schid.sch_no = 0; 54 } while (schid.ssid++ < max_ssid); 55 return ret; 56 } 57 58 struct cb_data { 59 void *data; 60 struct idset *set; 61 int (*fn_known_sch)(struct subchannel *, void *); 62 int (*fn_unknown_sch)(struct subchannel_id, void *); 63 }; 64 65 static int call_fn_known_sch(struct device *dev, void *data) 66 { 67 struct subchannel *sch = to_subchannel(dev); 68 struct cb_data *cb = data; 69 int rc = 0; 70 71 idset_sch_del(cb->set, sch->schid); 72 if (cb->fn_known_sch) 73 rc = cb->fn_known_sch(sch, cb->data); 74 return rc; 75 } 76 77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 78 { 79 struct cb_data *cb = data; 80 int rc = 0; 81 82 if (idset_sch_contains(cb->set, schid)) 83 rc = cb->fn_unknown_sch(schid, cb->data); 84 return rc; 85 } 86 87 static int call_fn_all_sch(struct subchannel_id schid, void *data) 88 { 89 struct cb_data *cb = data; 90 struct subchannel *sch; 91 int rc = 0; 92 93 sch = get_subchannel_by_schid(schid); 94 if (sch) { 95 if (cb->fn_known_sch) 96 rc = cb->fn_known_sch(sch, cb->data); 97 put_device(&sch->dev); 98 } else { 99 if (cb->fn_unknown_sch) 100 rc = cb->fn_unknown_sch(schid, cb->data); 101 } 102 103 return rc; 104 } 105 106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 107 int (*fn_unknown)(struct subchannel_id, 108 void *), void *data) 109 { 110 struct cb_data cb; 111 int rc; 112 113 cb.data = data; 114 cb.fn_known_sch = fn_known; 115 cb.fn_unknown_sch = fn_unknown; 116 117 cb.set = idset_sch_new(); 118 if (!cb.set) 119 /* fall back to brute force scanning in case of oom */ 120 return for_each_subchannel(call_fn_all_sch, &cb); 121 122 idset_fill(cb.set); 123 124 /* Process registered subchannels. */ 125 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 126 if (rc) 127 goto out; 128 /* Process unregistered subchannels. */ 129 if (fn_unknown) 130 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 131 out: 132 idset_free(cb.set); 133 134 return rc; 135 } 136 137 static void css_sch_todo(struct work_struct *work); 138 139 static struct subchannel * 140 css_alloc_subchannel(struct subchannel_id schid) 141 { 142 struct subchannel *sch; 143 int ret; 144 145 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 146 if (sch == NULL) 147 return ERR_PTR(-ENOMEM); 148 ret = cio_validate_subchannel (sch, schid); 149 if (ret < 0) { 150 kfree(sch); 151 return ERR_PTR(ret); 152 } 153 INIT_WORK(&sch->todo_work, css_sch_todo); 154 return sch; 155 } 156 157 static void 158 css_subchannel_release(struct device *dev) 159 { 160 struct subchannel *sch; 161 162 sch = to_subchannel(dev); 163 if (!cio_is_console(sch->schid)) { 164 /* Reset intparm to zeroes. */ 165 sch->config.intparm = 0; 166 cio_commit_config(sch); 167 kfree(sch->lock); 168 kfree(sch); 169 } 170 } 171 172 static int css_sch_device_register(struct subchannel *sch) 173 { 174 int ret; 175 176 mutex_lock(&sch->reg_mutex); 177 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 178 sch->schid.sch_no); 179 ret = device_register(&sch->dev); 180 mutex_unlock(&sch->reg_mutex); 181 return ret; 182 } 183 184 /** 185 * css_sch_device_unregister - unregister a subchannel 186 * @sch: subchannel to be unregistered 187 */ 188 void css_sch_device_unregister(struct subchannel *sch) 189 { 190 mutex_lock(&sch->reg_mutex); 191 if (device_is_registered(&sch->dev)) 192 device_unregister(&sch->dev); 193 mutex_unlock(&sch->reg_mutex); 194 } 195 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 196 197 static void css_sch_todo(struct work_struct *work) 198 { 199 struct subchannel *sch; 200 enum sch_todo todo; 201 202 sch = container_of(work, struct subchannel, todo_work); 203 /* Find out todo. */ 204 spin_lock_irq(sch->lock); 205 todo = sch->todo; 206 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 207 sch->schid.sch_no, todo); 208 sch->todo = SCH_TODO_NOTHING; 209 spin_unlock_irq(sch->lock); 210 /* Perform todo. */ 211 if (todo == SCH_TODO_UNREG) 212 css_sch_device_unregister(sch); 213 /* Release workqueue ref. */ 214 put_device(&sch->dev); 215 } 216 217 /** 218 * css_sched_sch_todo - schedule a subchannel operation 219 * @sch: subchannel 220 * @todo: todo 221 * 222 * Schedule the operation identified by @todo to be performed on the slow path 223 * workqueue. Do nothing if another operation with higher priority is already 224 * scheduled. Needs to be called with subchannel lock held. 225 */ 226 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 227 { 228 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 229 sch->schid.ssid, sch->schid.sch_no, todo); 230 if (sch->todo >= todo) 231 return; 232 /* Get workqueue ref. */ 233 if (!get_device(&sch->dev)) 234 return; 235 sch->todo = todo; 236 if (!queue_work(cio_work_q, &sch->todo_work)) { 237 /* Already queued, release workqueue ref. */ 238 put_device(&sch->dev); 239 } 240 } 241 242 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 243 { 244 int i; 245 int mask; 246 247 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 248 ssd->path_mask = pmcw->pim; 249 for (i = 0; i < 8; i++) { 250 mask = 0x80 >> i; 251 if (pmcw->pim & mask) { 252 chp_id_init(&ssd->chpid[i]); 253 ssd->chpid[i].id = pmcw->chpid[i]; 254 } 255 } 256 } 257 258 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 259 { 260 int i; 261 int mask; 262 263 for (i = 0; i < 8; i++) { 264 mask = 0x80 >> i; 265 if (ssd->path_mask & mask) 266 if (!chp_is_registered(ssd->chpid[i])) 267 chp_new(ssd->chpid[i]); 268 } 269 } 270 271 void css_update_ssd_info(struct subchannel *sch) 272 { 273 int ret; 274 275 if (cio_is_console(sch->schid)) { 276 /* Console is initialized too early for functions requiring 277 * memory allocation. */ 278 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 279 } else { 280 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 281 if (ret) 282 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 283 ssd_register_chpids(&sch->ssd_info); 284 } 285 } 286 287 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 288 char *buf) 289 { 290 struct subchannel *sch = to_subchannel(dev); 291 292 return sprintf(buf, "%01x\n", sch->st); 293 } 294 295 static DEVICE_ATTR(type, 0444, type_show, NULL); 296 297 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 298 char *buf) 299 { 300 struct subchannel *sch = to_subchannel(dev); 301 302 return sprintf(buf, "css:t%01X\n", sch->st); 303 } 304 305 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 306 307 static struct attribute *subch_attrs[] = { 308 &dev_attr_type.attr, 309 &dev_attr_modalias.attr, 310 NULL, 311 }; 312 313 static struct attribute_group subch_attr_group = { 314 .attrs = subch_attrs, 315 }; 316 317 static const struct attribute_group *default_subch_attr_groups[] = { 318 &subch_attr_group, 319 NULL, 320 }; 321 322 static int css_register_subchannel(struct subchannel *sch) 323 { 324 int ret; 325 326 /* Initialize the subchannel structure */ 327 sch->dev.parent = &channel_subsystems[0]->device; 328 sch->dev.bus = &css_bus_type; 329 sch->dev.release = &css_subchannel_release; 330 sch->dev.groups = default_subch_attr_groups; 331 /* 332 * We don't want to generate uevents for I/O subchannels that don't 333 * have a working ccw device behind them since they will be 334 * unregistered before they can be used anyway, so we delay the add 335 * uevent until after device recognition was successful. 336 * Note that we suppress the uevent for all subchannel types; 337 * the subchannel driver can decide itself when it wants to inform 338 * userspace of its existence. 339 */ 340 dev_set_uevent_suppress(&sch->dev, 1); 341 css_update_ssd_info(sch); 342 /* make it known to the system */ 343 ret = css_sch_device_register(sch); 344 if (ret) { 345 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 346 sch->schid.ssid, sch->schid.sch_no, ret); 347 return ret; 348 } 349 if (!sch->driver) { 350 /* 351 * No driver matched. Generate the uevent now so that 352 * a fitting driver module may be loaded based on the 353 * modalias. 354 */ 355 dev_set_uevent_suppress(&sch->dev, 0); 356 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 357 } 358 return ret; 359 } 360 361 int css_probe_device(struct subchannel_id schid) 362 { 363 int ret; 364 struct subchannel *sch; 365 366 if (cio_is_console(schid)) 367 sch = cio_get_console_subchannel(); 368 else { 369 sch = css_alloc_subchannel(schid); 370 if (IS_ERR(sch)) 371 return PTR_ERR(sch); 372 } 373 ret = css_register_subchannel(sch); 374 if (ret) { 375 if (!cio_is_console(schid)) 376 put_device(&sch->dev); 377 } 378 return ret; 379 } 380 381 static int 382 check_subchannel(struct device * dev, void * data) 383 { 384 struct subchannel *sch; 385 struct subchannel_id *schid = data; 386 387 sch = to_subchannel(dev); 388 return schid_equal(&sch->schid, schid); 389 } 390 391 struct subchannel * 392 get_subchannel_by_schid(struct subchannel_id schid) 393 { 394 struct device *dev; 395 396 dev = bus_find_device(&css_bus_type, NULL, 397 &schid, check_subchannel); 398 399 return dev ? to_subchannel(dev) : NULL; 400 } 401 402 /** 403 * css_sch_is_valid() - check if a subchannel is valid 404 * @schib: subchannel information block for the subchannel 405 */ 406 int css_sch_is_valid(struct schib *schib) 407 { 408 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 409 return 0; 410 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 411 return 0; 412 return 1; 413 } 414 EXPORT_SYMBOL_GPL(css_sch_is_valid); 415 416 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 417 { 418 struct schib schib; 419 420 if (!slow) { 421 /* Will be done on the slow path. */ 422 return -EAGAIN; 423 } 424 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 425 /* Unusable - ignore. */ 426 return 0; 427 } 428 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 429 schid.sch_no); 430 431 return css_probe_device(schid); 432 } 433 434 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 435 { 436 int ret = 0; 437 438 if (sch->driver) { 439 if (sch->driver->sch_event) 440 ret = sch->driver->sch_event(sch, slow); 441 else 442 dev_dbg(&sch->dev, 443 "Got subchannel machine check but " 444 "no sch_event handler provided.\n"); 445 } 446 if (ret != 0 && ret != -EAGAIN) { 447 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 448 sch->schid.ssid, sch->schid.sch_no, ret); 449 } 450 return ret; 451 } 452 453 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 454 { 455 struct subchannel *sch; 456 int ret; 457 458 sch = get_subchannel_by_schid(schid); 459 if (sch) { 460 ret = css_evaluate_known_subchannel(sch, slow); 461 put_device(&sch->dev); 462 } else 463 ret = css_evaluate_new_subchannel(schid, slow); 464 if (ret == -EAGAIN) 465 css_schedule_eval(schid); 466 } 467 468 static struct idset *slow_subchannel_set; 469 static spinlock_t slow_subchannel_lock; 470 static wait_queue_head_t css_eval_wq; 471 static atomic_t css_eval_scheduled; 472 473 static int __init slow_subchannel_init(void) 474 { 475 spin_lock_init(&slow_subchannel_lock); 476 atomic_set(&css_eval_scheduled, 0); 477 init_waitqueue_head(&css_eval_wq); 478 slow_subchannel_set = idset_sch_new(); 479 if (!slow_subchannel_set) { 480 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 481 return -ENOMEM; 482 } 483 return 0; 484 } 485 486 static int slow_eval_known_fn(struct subchannel *sch, void *data) 487 { 488 int eval; 489 int rc; 490 491 spin_lock_irq(&slow_subchannel_lock); 492 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 493 idset_sch_del(slow_subchannel_set, sch->schid); 494 spin_unlock_irq(&slow_subchannel_lock); 495 if (eval) { 496 rc = css_evaluate_known_subchannel(sch, 1); 497 if (rc == -EAGAIN) 498 css_schedule_eval(sch->schid); 499 } 500 return 0; 501 } 502 503 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 504 { 505 int eval; 506 int rc = 0; 507 508 spin_lock_irq(&slow_subchannel_lock); 509 eval = idset_sch_contains(slow_subchannel_set, schid); 510 idset_sch_del(slow_subchannel_set, schid); 511 spin_unlock_irq(&slow_subchannel_lock); 512 if (eval) { 513 rc = css_evaluate_new_subchannel(schid, 1); 514 switch (rc) { 515 case -EAGAIN: 516 css_schedule_eval(schid); 517 rc = 0; 518 break; 519 case -ENXIO: 520 case -ENOMEM: 521 case -EIO: 522 /* These should abort looping */ 523 break; 524 default: 525 rc = 0; 526 } 527 } 528 return rc; 529 } 530 531 static void css_slow_path_func(struct work_struct *unused) 532 { 533 unsigned long flags; 534 535 CIO_TRACE_EVENT(4, "slowpath"); 536 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 537 NULL); 538 spin_lock_irqsave(&slow_subchannel_lock, flags); 539 if (idset_is_empty(slow_subchannel_set)) { 540 atomic_set(&css_eval_scheduled, 0); 541 wake_up(&css_eval_wq); 542 } 543 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 544 } 545 546 static DECLARE_WORK(slow_path_work, css_slow_path_func); 547 struct workqueue_struct *cio_work_q; 548 549 void css_schedule_eval(struct subchannel_id schid) 550 { 551 unsigned long flags; 552 553 spin_lock_irqsave(&slow_subchannel_lock, flags); 554 idset_sch_add(slow_subchannel_set, schid); 555 atomic_set(&css_eval_scheduled, 1); 556 queue_work(cio_work_q, &slow_path_work); 557 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 558 } 559 560 void css_schedule_eval_all(void) 561 { 562 unsigned long flags; 563 564 spin_lock_irqsave(&slow_subchannel_lock, flags); 565 idset_fill(slow_subchannel_set); 566 atomic_set(&css_eval_scheduled, 1); 567 queue_work(cio_work_q, &slow_path_work); 568 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 569 } 570 571 static int __unset_registered(struct device *dev, void *data) 572 { 573 struct idset *set = data; 574 struct subchannel *sch = to_subchannel(dev); 575 576 idset_sch_del(set, sch->schid); 577 return 0; 578 } 579 580 static void css_schedule_eval_all_unreg(void) 581 { 582 unsigned long flags; 583 struct idset *unreg_set; 584 585 /* Find unregistered subchannels. */ 586 unreg_set = idset_sch_new(); 587 if (!unreg_set) { 588 /* Fallback. */ 589 css_schedule_eval_all(); 590 return; 591 } 592 idset_fill(unreg_set); 593 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 594 /* Apply to slow_subchannel_set. */ 595 spin_lock_irqsave(&slow_subchannel_lock, flags); 596 idset_add_set(slow_subchannel_set, unreg_set); 597 atomic_set(&css_eval_scheduled, 1); 598 queue_work(cio_work_q, &slow_path_work); 599 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 600 idset_free(unreg_set); 601 } 602 603 void css_wait_for_slow_path(void) 604 { 605 flush_workqueue(cio_work_q); 606 } 607 608 /* Schedule reprobing of all unregistered subchannels. */ 609 void css_schedule_reprobe(void) 610 { 611 css_schedule_eval_all_unreg(); 612 } 613 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 614 615 /* 616 * Called from the machine check handler for subchannel report words. 617 */ 618 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 619 { 620 struct subchannel_id mchk_schid; 621 622 if (overflow) { 623 css_schedule_eval_all(); 624 return; 625 } 626 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 627 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 628 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 629 crw0->erc, crw0->rsid); 630 if (crw1) 631 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 632 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 633 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 634 crw1->anc, crw1->erc, crw1->rsid); 635 init_subchannel_id(&mchk_schid); 636 mchk_schid.sch_no = crw0->rsid; 637 if (crw1) 638 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 639 640 /* 641 * Since we are always presented with IPI in the CRW, we have to 642 * use stsch() to find out if the subchannel in question has come 643 * or gone. 644 */ 645 css_evaluate_subchannel(mchk_schid, 0); 646 } 647 648 static void __init 649 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 650 { 651 struct cpuid cpu_id; 652 653 if (css_general_characteristics.mcss) { 654 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 655 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 656 } else { 657 #ifdef CONFIG_SMP 658 css->global_pgid.pgid_high.cpu_addr = stap(); 659 #else 660 css->global_pgid.pgid_high.cpu_addr = 0; 661 #endif 662 } 663 get_cpu_id(&cpu_id); 664 css->global_pgid.cpu_id = cpu_id.ident; 665 css->global_pgid.cpu_model = cpu_id.machine; 666 css->global_pgid.tod_high = tod_high; 667 668 } 669 670 static void 671 channel_subsystem_release(struct device *dev) 672 { 673 struct channel_subsystem *css; 674 675 css = to_css(dev); 676 mutex_destroy(&css->mutex); 677 if (css->pseudo_subchannel) { 678 /* Implies that it has been generated but never registered. */ 679 css_subchannel_release(&css->pseudo_subchannel->dev); 680 css->pseudo_subchannel = NULL; 681 } 682 kfree(css); 683 } 684 685 static ssize_t 686 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 687 char *buf) 688 { 689 struct channel_subsystem *css = to_css(dev); 690 int ret; 691 692 if (!css) 693 return 0; 694 mutex_lock(&css->mutex); 695 ret = sprintf(buf, "%x\n", css->cm_enabled); 696 mutex_unlock(&css->mutex); 697 return ret; 698 } 699 700 static ssize_t 701 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 702 const char *buf, size_t count) 703 { 704 struct channel_subsystem *css = to_css(dev); 705 int ret; 706 unsigned long val; 707 708 ret = strict_strtoul(buf, 16, &val); 709 if (ret) 710 return ret; 711 mutex_lock(&css->mutex); 712 switch (val) { 713 case 0: 714 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 715 break; 716 case 1: 717 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 718 break; 719 default: 720 ret = -EINVAL; 721 } 722 mutex_unlock(&css->mutex); 723 return ret < 0 ? ret : count; 724 } 725 726 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 727 728 static int __init setup_css(int nr) 729 { 730 u32 tod_high; 731 int ret; 732 struct channel_subsystem *css; 733 734 css = channel_subsystems[nr]; 735 memset(css, 0, sizeof(struct channel_subsystem)); 736 css->pseudo_subchannel = 737 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 738 if (!css->pseudo_subchannel) 739 return -ENOMEM; 740 css->pseudo_subchannel->dev.parent = &css->device; 741 css->pseudo_subchannel->dev.release = css_subchannel_release; 742 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 743 mutex_init(&css->pseudo_subchannel->reg_mutex); 744 ret = cio_create_sch_lock(css->pseudo_subchannel); 745 if (ret) { 746 kfree(css->pseudo_subchannel); 747 return ret; 748 } 749 mutex_init(&css->mutex); 750 css->valid = 1; 751 css->cssid = nr; 752 dev_set_name(&css->device, "css%x", nr); 753 css->device.release = channel_subsystem_release; 754 tod_high = (u32) (get_clock() >> 32); 755 css_generate_pgid(css, tod_high); 756 return 0; 757 } 758 759 static int css_reboot_event(struct notifier_block *this, 760 unsigned long event, 761 void *ptr) 762 { 763 int ret, i; 764 765 ret = NOTIFY_DONE; 766 for (i = 0; i <= __MAX_CSSID; i++) { 767 struct channel_subsystem *css; 768 769 css = channel_subsystems[i]; 770 mutex_lock(&css->mutex); 771 if (css->cm_enabled) 772 if (chsc_secm(css, 0)) 773 ret = NOTIFY_BAD; 774 mutex_unlock(&css->mutex); 775 } 776 777 return ret; 778 } 779 780 static struct notifier_block css_reboot_notifier = { 781 .notifier_call = css_reboot_event, 782 }; 783 784 /* 785 * Since the css devices are neither on a bus nor have a class 786 * nor have a special device type, we cannot stop/restart channel 787 * path measurements via the normal suspend/resume callbacks, but have 788 * to use notifiers. 789 */ 790 static int css_power_event(struct notifier_block *this, unsigned long event, 791 void *ptr) 792 { 793 int ret, i; 794 795 switch (event) { 796 case PM_HIBERNATION_PREPARE: 797 case PM_SUSPEND_PREPARE: 798 ret = NOTIFY_DONE; 799 for (i = 0; i <= __MAX_CSSID; i++) { 800 struct channel_subsystem *css; 801 802 css = channel_subsystems[i]; 803 mutex_lock(&css->mutex); 804 if (!css->cm_enabled) { 805 mutex_unlock(&css->mutex); 806 continue; 807 } 808 if (__chsc_do_secm(css, 0)) 809 ret = NOTIFY_BAD; 810 mutex_unlock(&css->mutex); 811 } 812 break; 813 case PM_POST_HIBERNATION: 814 case PM_POST_SUSPEND: 815 ret = NOTIFY_DONE; 816 for (i = 0; i <= __MAX_CSSID; i++) { 817 struct channel_subsystem *css; 818 819 css = channel_subsystems[i]; 820 mutex_lock(&css->mutex); 821 if (!css->cm_enabled) { 822 mutex_unlock(&css->mutex); 823 continue; 824 } 825 if (__chsc_do_secm(css, 1)) 826 ret = NOTIFY_BAD; 827 mutex_unlock(&css->mutex); 828 } 829 /* search for subchannels, which appeared during hibernation */ 830 css_schedule_reprobe(); 831 break; 832 default: 833 ret = NOTIFY_DONE; 834 } 835 return ret; 836 837 } 838 static struct notifier_block css_power_notifier = { 839 .notifier_call = css_power_event, 840 }; 841 842 /* 843 * Now that the driver core is running, we can setup our channel subsystem. 844 * The struct subchannel's are created during probing (except for the 845 * static console subchannel). 846 */ 847 static int __init css_bus_init(void) 848 { 849 int ret, i; 850 851 ret = chsc_init(); 852 if (ret) 853 return ret; 854 855 chsc_determine_css_characteristics(); 856 /* Try to enable MSS. */ 857 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 858 if (ret) 859 max_ssid = 0; 860 else /* Success. */ 861 max_ssid = __MAX_SSID; 862 863 ret = slow_subchannel_init(); 864 if (ret) 865 goto out; 866 867 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 868 if (ret) 869 goto out; 870 871 if ((ret = bus_register(&css_bus_type))) 872 goto out; 873 874 /* Setup css structure. */ 875 for (i = 0; i <= __MAX_CSSID; i++) { 876 struct channel_subsystem *css; 877 878 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 879 if (!css) { 880 ret = -ENOMEM; 881 goto out_unregister; 882 } 883 channel_subsystems[i] = css; 884 ret = setup_css(i); 885 if (ret) { 886 kfree(channel_subsystems[i]); 887 goto out_unregister; 888 } 889 ret = device_register(&css->device); 890 if (ret) { 891 put_device(&css->device); 892 goto out_unregister; 893 } 894 if (css_chsc_characteristics.secm) { 895 ret = device_create_file(&css->device, 896 &dev_attr_cm_enable); 897 if (ret) 898 goto out_device; 899 } 900 ret = device_register(&css->pseudo_subchannel->dev); 901 if (ret) { 902 put_device(&css->pseudo_subchannel->dev); 903 goto out_file; 904 } 905 } 906 ret = register_reboot_notifier(&css_reboot_notifier); 907 if (ret) 908 goto out_unregister; 909 ret = register_pm_notifier(&css_power_notifier); 910 if (ret) { 911 unregister_reboot_notifier(&css_reboot_notifier); 912 goto out_unregister; 913 } 914 css_init_done = 1; 915 916 /* Enable default isc for I/O subchannels. */ 917 isc_register(IO_SCH_ISC); 918 919 return 0; 920 out_file: 921 if (css_chsc_characteristics.secm) 922 device_remove_file(&channel_subsystems[i]->device, 923 &dev_attr_cm_enable); 924 out_device: 925 device_unregister(&channel_subsystems[i]->device); 926 out_unregister: 927 while (i > 0) { 928 struct channel_subsystem *css; 929 930 i--; 931 css = channel_subsystems[i]; 932 device_unregister(&css->pseudo_subchannel->dev); 933 css->pseudo_subchannel = NULL; 934 if (css_chsc_characteristics.secm) 935 device_remove_file(&css->device, 936 &dev_attr_cm_enable); 937 device_unregister(&css->device); 938 } 939 bus_unregister(&css_bus_type); 940 out: 941 crw_unregister_handler(CRW_RSC_SCH); 942 idset_free(slow_subchannel_set); 943 chsc_init_cleanup(); 944 pr_alert("The CSS device driver initialization failed with " 945 "errno=%d\n", ret); 946 return ret; 947 } 948 949 static void __init css_bus_cleanup(void) 950 { 951 struct channel_subsystem *css; 952 int i; 953 954 for (i = 0; i <= __MAX_CSSID; i++) { 955 css = channel_subsystems[i]; 956 device_unregister(&css->pseudo_subchannel->dev); 957 css->pseudo_subchannel = NULL; 958 if (css_chsc_characteristics.secm) 959 device_remove_file(&css->device, &dev_attr_cm_enable); 960 device_unregister(&css->device); 961 } 962 bus_unregister(&css_bus_type); 963 crw_unregister_handler(CRW_RSC_SCH); 964 idset_free(slow_subchannel_set); 965 chsc_init_cleanup(); 966 isc_unregister(IO_SCH_ISC); 967 } 968 969 static int __init channel_subsystem_init(void) 970 { 971 int ret; 972 973 ret = css_bus_init(); 974 if (ret) 975 return ret; 976 cio_work_q = create_singlethread_workqueue("cio"); 977 if (!cio_work_q) { 978 ret = -ENOMEM; 979 goto out_bus; 980 } 981 ret = io_subchannel_init(); 982 if (ret) 983 goto out_wq; 984 985 return ret; 986 out_wq: 987 destroy_workqueue(cio_work_q); 988 out_bus: 989 css_bus_cleanup(); 990 return ret; 991 } 992 subsys_initcall(channel_subsystem_init); 993 994 static int css_settle(struct device_driver *drv, void *unused) 995 { 996 struct css_driver *cssdrv = to_cssdriver(drv); 997 998 if (cssdrv->settle) 999 return cssdrv->settle(); 1000 return 0; 1001 } 1002 1003 int css_complete_work(void) 1004 { 1005 int ret; 1006 1007 /* Wait for the evaluation of subchannels to finish. */ 1008 ret = wait_event_interruptible(css_eval_wq, 1009 atomic_read(&css_eval_scheduled) == 0); 1010 if (ret) 1011 return -EINTR; 1012 flush_workqueue(cio_work_q); 1013 /* Wait for the subchannel type specific initialization to finish */ 1014 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1015 } 1016 1017 1018 /* 1019 * Wait for the initialization of devices to finish, to make sure we are 1020 * done with our setup if the search for the root device starts. 1021 */ 1022 static int __init channel_subsystem_init_sync(void) 1023 { 1024 /* Start initial subchannel evaluation. */ 1025 css_schedule_eval_all(); 1026 css_complete_work(); 1027 return 0; 1028 } 1029 subsys_initcall_sync(channel_subsystem_init_sync); 1030 1031 void channel_subsystem_reinit(void) 1032 { 1033 struct channel_path *chp; 1034 struct chp_id chpid; 1035 1036 chsc_enable_facility(CHSC_SDA_OC_MSS); 1037 chp_id_for_each(&chpid) { 1038 chp = chpid_to_chp(chpid); 1039 if (!chp) 1040 continue; 1041 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 1042 } 1043 } 1044 1045 #ifdef CONFIG_PROC_FS 1046 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1047 size_t count, loff_t *ppos) 1048 { 1049 int ret; 1050 1051 /* Handle pending CRW's. */ 1052 crw_wait_for_channel_report(); 1053 ret = css_complete_work(); 1054 1055 return ret ? ret : count; 1056 } 1057 1058 static const struct file_operations cio_settle_proc_fops = { 1059 .open = nonseekable_open, 1060 .write = cio_settle_write, 1061 .llseek = no_llseek, 1062 }; 1063 1064 static int __init cio_settle_init(void) 1065 { 1066 struct proc_dir_entry *entry; 1067 1068 entry = proc_create("cio_settle", S_IWUSR, NULL, 1069 &cio_settle_proc_fops); 1070 if (!entry) 1071 return -ENOMEM; 1072 return 0; 1073 } 1074 device_initcall(cio_settle_init); 1075 #endif /*CONFIG_PROC_FS*/ 1076 1077 int sch_is_pseudo_sch(struct subchannel *sch) 1078 { 1079 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1080 } 1081 1082 static int css_bus_match(struct device *dev, struct device_driver *drv) 1083 { 1084 struct subchannel *sch = to_subchannel(dev); 1085 struct css_driver *driver = to_cssdriver(drv); 1086 struct css_device_id *id; 1087 1088 for (id = driver->subchannel_type; id->match_flags; id++) { 1089 if (sch->st == id->type) 1090 return 1; 1091 } 1092 1093 return 0; 1094 } 1095 1096 static int css_probe(struct device *dev) 1097 { 1098 struct subchannel *sch; 1099 int ret; 1100 1101 sch = to_subchannel(dev); 1102 sch->driver = to_cssdriver(dev->driver); 1103 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1104 if (ret) 1105 sch->driver = NULL; 1106 return ret; 1107 } 1108 1109 static int css_remove(struct device *dev) 1110 { 1111 struct subchannel *sch; 1112 int ret; 1113 1114 sch = to_subchannel(dev); 1115 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1116 sch->driver = NULL; 1117 return ret; 1118 } 1119 1120 static void css_shutdown(struct device *dev) 1121 { 1122 struct subchannel *sch; 1123 1124 sch = to_subchannel(dev); 1125 if (sch->driver && sch->driver->shutdown) 1126 sch->driver->shutdown(sch); 1127 } 1128 1129 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1130 { 1131 struct subchannel *sch = to_subchannel(dev); 1132 int ret; 1133 1134 ret = add_uevent_var(env, "ST=%01X", sch->st); 1135 if (ret) 1136 return ret; 1137 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1138 return ret; 1139 } 1140 1141 static int css_pm_prepare(struct device *dev) 1142 { 1143 struct subchannel *sch = to_subchannel(dev); 1144 struct css_driver *drv; 1145 1146 if (mutex_is_locked(&sch->reg_mutex)) 1147 return -EAGAIN; 1148 if (!sch->dev.driver) 1149 return 0; 1150 drv = to_cssdriver(sch->dev.driver); 1151 /* Notify drivers that they may not register children. */ 1152 return drv->prepare ? drv->prepare(sch) : 0; 1153 } 1154 1155 static void css_pm_complete(struct device *dev) 1156 { 1157 struct subchannel *sch = to_subchannel(dev); 1158 struct css_driver *drv; 1159 1160 if (!sch->dev.driver) 1161 return; 1162 drv = to_cssdriver(sch->dev.driver); 1163 if (drv->complete) 1164 drv->complete(sch); 1165 } 1166 1167 static int css_pm_freeze(struct device *dev) 1168 { 1169 struct subchannel *sch = to_subchannel(dev); 1170 struct css_driver *drv; 1171 1172 if (!sch->dev.driver) 1173 return 0; 1174 drv = to_cssdriver(sch->dev.driver); 1175 return drv->freeze ? drv->freeze(sch) : 0; 1176 } 1177 1178 static int css_pm_thaw(struct device *dev) 1179 { 1180 struct subchannel *sch = to_subchannel(dev); 1181 struct css_driver *drv; 1182 1183 if (!sch->dev.driver) 1184 return 0; 1185 drv = to_cssdriver(sch->dev.driver); 1186 return drv->thaw ? drv->thaw(sch) : 0; 1187 } 1188 1189 static int css_pm_restore(struct device *dev) 1190 { 1191 struct subchannel *sch = to_subchannel(dev); 1192 struct css_driver *drv; 1193 1194 css_update_ssd_info(sch); 1195 if (!sch->dev.driver) 1196 return 0; 1197 drv = to_cssdriver(sch->dev.driver); 1198 return drv->restore ? drv->restore(sch) : 0; 1199 } 1200 1201 static const struct dev_pm_ops css_pm_ops = { 1202 .prepare = css_pm_prepare, 1203 .complete = css_pm_complete, 1204 .freeze = css_pm_freeze, 1205 .thaw = css_pm_thaw, 1206 .restore = css_pm_restore, 1207 }; 1208 1209 struct bus_type css_bus_type = { 1210 .name = "css", 1211 .match = css_bus_match, 1212 .probe = css_probe, 1213 .remove = css_remove, 1214 .shutdown = css_shutdown, 1215 .uevent = css_uevent, 1216 .pm = &css_pm_ops, 1217 }; 1218 1219 /** 1220 * css_driver_register - register a css driver 1221 * @cdrv: css driver to register 1222 * 1223 * This is mainly a wrapper around driver_register that sets name 1224 * and bus_type in the embedded struct device_driver correctly. 1225 */ 1226 int css_driver_register(struct css_driver *cdrv) 1227 { 1228 cdrv->drv.name = cdrv->name; 1229 cdrv->drv.bus = &css_bus_type; 1230 cdrv->drv.owner = cdrv->owner; 1231 return driver_register(&cdrv->drv); 1232 } 1233 EXPORT_SYMBOL_GPL(css_driver_register); 1234 1235 /** 1236 * css_driver_unregister - unregister a css driver 1237 * @cdrv: css driver to unregister 1238 * 1239 * This is a wrapper around driver_unregister. 1240 */ 1241 void css_driver_unregister(struct css_driver *cdrv) 1242 { 1243 driver_unregister(&cdrv->drv); 1244 } 1245 EXPORT_SYMBOL_GPL(css_driver_unregister); 1246 1247 MODULE_LICENSE("GPL"); 1248 EXPORT_SYMBOL(css_bus_type); 1249