1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2010 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <linux/proc_fs.h> 22 #include <asm/isc.h> 23 #include <asm/crw.h> 24 25 #include "css.h" 26 #include "cio.h" 27 #include "cio_debug.h" 28 #include "ioasm.h" 29 #include "chsc.h" 30 #include "device.h" 31 #include "idset.h" 32 #include "chp.h" 33 34 int css_init_done = 0; 35 int max_ssid; 36 37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 38 static struct bus_type css_bus_type; 39 40 int 41 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 42 { 43 struct subchannel_id schid; 44 int ret; 45 46 init_subchannel_id(&schid); 47 ret = -ENODEV; 48 do { 49 do { 50 ret = fn(schid, data); 51 if (ret) 52 break; 53 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 54 schid.sch_no = 0; 55 } while (schid.ssid++ < max_ssid); 56 return ret; 57 } 58 59 struct cb_data { 60 void *data; 61 struct idset *set; 62 int (*fn_known_sch)(struct subchannel *, void *); 63 int (*fn_unknown_sch)(struct subchannel_id, void *); 64 }; 65 66 static int call_fn_known_sch(struct device *dev, void *data) 67 { 68 struct subchannel *sch = to_subchannel(dev); 69 struct cb_data *cb = data; 70 int rc = 0; 71 72 idset_sch_del(cb->set, sch->schid); 73 if (cb->fn_known_sch) 74 rc = cb->fn_known_sch(sch, cb->data); 75 return rc; 76 } 77 78 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 79 { 80 struct cb_data *cb = data; 81 int rc = 0; 82 83 if (idset_sch_contains(cb->set, schid)) 84 rc = cb->fn_unknown_sch(schid, cb->data); 85 return rc; 86 } 87 88 static int call_fn_all_sch(struct subchannel_id schid, void *data) 89 { 90 struct cb_data *cb = data; 91 struct subchannel *sch; 92 int rc = 0; 93 94 sch = get_subchannel_by_schid(schid); 95 if (sch) { 96 if (cb->fn_known_sch) 97 rc = cb->fn_known_sch(sch, cb->data); 98 put_device(&sch->dev); 99 } else { 100 if (cb->fn_unknown_sch) 101 rc = cb->fn_unknown_sch(schid, cb->data); 102 } 103 104 return rc; 105 } 106 107 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 108 int (*fn_unknown)(struct subchannel_id, 109 void *), void *data) 110 { 111 struct cb_data cb; 112 int rc; 113 114 cb.data = data; 115 cb.fn_known_sch = fn_known; 116 cb.fn_unknown_sch = fn_unknown; 117 118 cb.set = idset_sch_new(); 119 if (!cb.set) 120 /* fall back to brute force scanning in case of oom */ 121 return for_each_subchannel(call_fn_all_sch, &cb); 122 123 idset_fill(cb.set); 124 125 /* Process registered subchannels. */ 126 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 127 if (rc) 128 goto out; 129 /* Process unregistered subchannels. */ 130 if (fn_unknown) 131 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 132 out: 133 idset_free(cb.set); 134 135 return rc; 136 } 137 138 static void css_sch_todo(struct work_struct *work); 139 140 static struct subchannel * 141 css_alloc_subchannel(struct subchannel_id schid) 142 { 143 struct subchannel *sch; 144 int ret; 145 146 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 147 if (sch == NULL) 148 return ERR_PTR(-ENOMEM); 149 ret = cio_validate_subchannel (sch, schid); 150 if (ret < 0) { 151 kfree(sch); 152 return ERR_PTR(ret); 153 } 154 INIT_WORK(&sch->todo_work, css_sch_todo); 155 return sch; 156 } 157 158 static void 159 css_subchannel_release(struct device *dev) 160 { 161 struct subchannel *sch; 162 163 sch = to_subchannel(dev); 164 if (!cio_is_console(sch->schid)) { 165 /* Reset intparm to zeroes. */ 166 sch->config.intparm = 0; 167 cio_commit_config(sch); 168 kfree(sch->lock); 169 kfree(sch); 170 } 171 } 172 173 static int css_sch_device_register(struct subchannel *sch) 174 { 175 int ret; 176 177 mutex_lock(&sch->reg_mutex); 178 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 179 sch->schid.sch_no); 180 ret = device_register(&sch->dev); 181 mutex_unlock(&sch->reg_mutex); 182 return ret; 183 } 184 185 /** 186 * css_sch_device_unregister - unregister a subchannel 187 * @sch: subchannel to be unregistered 188 */ 189 void css_sch_device_unregister(struct subchannel *sch) 190 { 191 mutex_lock(&sch->reg_mutex); 192 if (device_is_registered(&sch->dev)) 193 device_unregister(&sch->dev); 194 mutex_unlock(&sch->reg_mutex); 195 } 196 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 197 198 static void css_sch_todo(struct work_struct *work) 199 { 200 struct subchannel *sch; 201 enum sch_todo todo; 202 203 sch = container_of(work, struct subchannel, todo_work); 204 /* Find out todo. */ 205 spin_lock_irq(sch->lock); 206 todo = sch->todo; 207 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 208 sch->schid.sch_no, todo); 209 sch->todo = SCH_TODO_NOTHING; 210 spin_unlock_irq(sch->lock); 211 /* Perform todo. */ 212 if (todo == SCH_TODO_UNREG) 213 css_sch_device_unregister(sch); 214 /* Release workqueue ref. */ 215 put_device(&sch->dev); 216 } 217 218 /** 219 * css_sched_sch_todo - schedule a subchannel operation 220 * @sch: subchannel 221 * @todo: todo 222 * 223 * Schedule the operation identified by @todo to be performed on the slow path 224 * workqueue. Do nothing if another operation with higher priority is already 225 * scheduled. Needs to be called with subchannel lock held. 226 */ 227 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 228 { 229 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 230 sch->schid.ssid, sch->schid.sch_no, todo); 231 if (sch->todo >= todo) 232 return; 233 /* Get workqueue ref. */ 234 if (!get_device(&sch->dev)) 235 return; 236 sch->todo = todo; 237 if (!queue_work(cio_work_q, &sch->todo_work)) { 238 /* Already queued, release workqueue ref. */ 239 put_device(&sch->dev); 240 } 241 } 242 243 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 244 { 245 int i; 246 int mask; 247 248 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 249 ssd->path_mask = pmcw->pim; 250 for (i = 0; i < 8; i++) { 251 mask = 0x80 >> i; 252 if (pmcw->pim & mask) { 253 chp_id_init(&ssd->chpid[i]); 254 ssd->chpid[i].id = pmcw->chpid[i]; 255 } 256 } 257 } 258 259 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 260 { 261 int i; 262 int mask; 263 264 for (i = 0; i < 8; i++) { 265 mask = 0x80 >> i; 266 if (ssd->path_mask & mask) 267 if (!chp_is_registered(ssd->chpid[i])) 268 chp_new(ssd->chpid[i]); 269 } 270 } 271 272 void css_update_ssd_info(struct subchannel *sch) 273 { 274 int ret; 275 276 if (cio_is_console(sch->schid)) { 277 /* Console is initialized too early for functions requiring 278 * memory allocation. */ 279 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 280 } else { 281 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 282 if (ret) 283 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 284 ssd_register_chpids(&sch->ssd_info); 285 } 286 } 287 288 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 289 char *buf) 290 { 291 struct subchannel *sch = to_subchannel(dev); 292 293 return sprintf(buf, "%01x\n", sch->st); 294 } 295 296 static DEVICE_ATTR(type, 0444, type_show, NULL); 297 298 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 299 char *buf) 300 { 301 struct subchannel *sch = to_subchannel(dev); 302 303 return sprintf(buf, "css:t%01X\n", sch->st); 304 } 305 306 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 307 308 static struct attribute *subch_attrs[] = { 309 &dev_attr_type.attr, 310 &dev_attr_modalias.attr, 311 NULL, 312 }; 313 314 static struct attribute_group subch_attr_group = { 315 .attrs = subch_attrs, 316 }; 317 318 static const struct attribute_group *default_subch_attr_groups[] = { 319 &subch_attr_group, 320 NULL, 321 }; 322 323 static int css_register_subchannel(struct subchannel *sch) 324 { 325 int ret; 326 327 /* Initialize the subchannel structure */ 328 sch->dev.parent = &channel_subsystems[0]->device; 329 sch->dev.bus = &css_bus_type; 330 sch->dev.release = &css_subchannel_release; 331 sch->dev.groups = default_subch_attr_groups; 332 /* 333 * We don't want to generate uevents for I/O subchannels that don't 334 * have a working ccw device behind them since they will be 335 * unregistered before they can be used anyway, so we delay the add 336 * uevent until after device recognition was successful. 337 * Note that we suppress the uevent for all subchannel types; 338 * the subchannel driver can decide itself when it wants to inform 339 * userspace of its existence. 340 */ 341 dev_set_uevent_suppress(&sch->dev, 1); 342 css_update_ssd_info(sch); 343 /* make it known to the system */ 344 ret = css_sch_device_register(sch); 345 if (ret) { 346 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 347 sch->schid.ssid, sch->schid.sch_no, ret); 348 return ret; 349 } 350 if (!sch->driver) { 351 /* 352 * No driver matched. Generate the uevent now so that 353 * a fitting driver module may be loaded based on the 354 * modalias. 355 */ 356 dev_set_uevent_suppress(&sch->dev, 0); 357 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 358 } 359 return ret; 360 } 361 362 int css_probe_device(struct subchannel_id schid) 363 { 364 int ret; 365 struct subchannel *sch; 366 367 if (cio_is_console(schid)) 368 sch = cio_get_console_subchannel(); 369 else { 370 sch = css_alloc_subchannel(schid); 371 if (IS_ERR(sch)) 372 return PTR_ERR(sch); 373 } 374 ret = css_register_subchannel(sch); 375 if (ret) { 376 if (!cio_is_console(schid)) 377 put_device(&sch->dev); 378 } 379 return ret; 380 } 381 382 static int 383 check_subchannel(struct device * dev, void * data) 384 { 385 struct subchannel *sch; 386 struct subchannel_id *schid = data; 387 388 sch = to_subchannel(dev); 389 return schid_equal(&sch->schid, schid); 390 } 391 392 struct subchannel * 393 get_subchannel_by_schid(struct subchannel_id schid) 394 { 395 struct device *dev; 396 397 dev = bus_find_device(&css_bus_type, NULL, 398 &schid, check_subchannel); 399 400 return dev ? to_subchannel(dev) : NULL; 401 } 402 403 /** 404 * css_sch_is_valid() - check if a subchannel is valid 405 * @schib: subchannel information block for the subchannel 406 */ 407 int css_sch_is_valid(struct schib *schib) 408 { 409 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 410 return 0; 411 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 412 return 0; 413 return 1; 414 } 415 EXPORT_SYMBOL_GPL(css_sch_is_valid); 416 417 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 418 { 419 struct schib schib; 420 421 if (!slow) { 422 /* Will be done on the slow path. */ 423 return -EAGAIN; 424 } 425 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 426 /* Unusable - ignore. */ 427 return 0; 428 } 429 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 430 schid.sch_no); 431 432 return css_probe_device(schid); 433 } 434 435 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 436 { 437 int ret = 0; 438 439 if (sch->driver) { 440 if (sch->driver->sch_event) 441 ret = sch->driver->sch_event(sch, slow); 442 else 443 dev_dbg(&sch->dev, 444 "Got subchannel machine check but " 445 "no sch_event handler provided.\n"); 446 } 447 if (ret != 0 && ret != -EAGAIN) { 448 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 449 sch->schid.ssid, sch->schid.sch_no, ret); 450 } 451 return ret; 452 } 453 454 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 455 { 456 struct subchannel *sch; 457 int ret; 458 459 sch = get_subchannel_by_schid(schid); 460 if (sch) { 461 ret = css_evaluate_known_subchannel(sch, slow); 462 put_device(&sch->dev); 463 } else 464 ret = css_evaluate_new_subchannel(schid, slow); 465 if (ret == -EAGAIN) 466 css_schedule_eval(schid); 467 } 468 469 static struct idset *slow_subchannel_set; 470 static spinlock_t slow_subchannel_lock; 471 static wait_queue_head_t css_eval_wq; 472 static atomic_t css_eval_scheduled; 473 474 static int __init slow_subchannel_init(void) 475 { 476 spin_lock_init(&slow_subchannel_lock); 477 atomic_set(&css_eval_scheduled, 0); 478 init_waitqueue_head(&css_eval_wq); 479 slow_subchannel_set = idset_sch_new(); 480 if (!slow_subchannel_set) { 481 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 482 return -ENOMEM; 483 } 484 return 0; 485 } 486 487 static int slow_eval_known_fn(struct subchannel *sch, void *data) 488 { 489 int eval; 490 int rc; 491 492 spin_lock_irq(&slow_subchannel_lock); 493 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 494 idset_sch_del(slow_subchannel_set, sch->schid); 495 spin_unlock_irq(&slow_subchannel_lock); 496 if (eval) { 497 rc = css_evaluate_known_subchannel(sch, 1); 498 if (rc == -EAGAIN) 499 css_schedule_eval(sch->schid); 500 } 501 return 0; 502 } 503 504 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 505 { 506 int eval; 507 int rc = 0; 508 509 spin_lock_irq(&slow_subchannel_lock); 510 eval = idset_sch_contains(slow_subchannel_set, schid); 511 idset_sch_del(slow_subchannel_set, schid); 512 spin_unlock_irq(&slow_subchannel_lock); 513 if (eval) { 514 rc = css_evaluate_new_subchannel(schid, 1); 515 switch (rc) { 516 case -EAGAIN: 517 css_schedule_eval(schid); 518 rc = 0; 519 break; 520 case -ENXIO: 521 case -ENOMEM: 522 case -EIO: 523 /* These should abort looping */ 524 break; 525 default: 526 rc = 0; 527 } 528 } 529 return rc; 530 } 531 532 static void css_slow_path_func(struct work_struct *unused) 533 { 534 unsigned long flags; 535 536 CIO_TRACE_EVENT(4, "slowpath"); 537 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 538 NULL); 539 spin_lock_irqsave(&slow_subchannel_lock, flags); 540 if (idset_is_empty(slow_subchannel_set)) { 541 atomic_set(&css_eval_scheduled, 0); 542 wake_up(&css_eval_wq); 543 } 544 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 545 } 546 547 static DECLARE_WORK(slow_path_work, css_slow_path_func); 548 struct workqueue_struct *cio_work_q; 549 550 void css_schedule_eval(struct subchannel_id schid) 551 { 552 unsigned long flags; 553 554 spin_lock_irqsave(&slow_subchannel_lock, flags); 555 idset_sch_add(slow_subchannel_set, schid); 556 atomic_set(&css_eval_scheduled, 1); 557 queue_work(cio_work_q, &slow_path_work); 558 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 559 } 560 561 void css_schedule_eval_all(void) 562 { 563 unsigned long flags; 564 565 spin_lock_irqsave(&slow_subchannel_lock, flags); 566 idset_fill(slow_subchannel_set); 567 atomic_set(&css_eval_scheduled, 1); 568 queue_work(cio_work_q, &slow_path_work); 569 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 570 } 571 572 static int __unset_registered(struct device *dev, void *data) 573 { 574 struct idset *set = data; 575 struct subchannel *sch = to_subchannel(dev); 576 577 idset_sch_del(set, sch->schid); 578 return 0; 579 } 580 581 static void css_schedule_eval_all_unreg(void) 582 { 583 unsigned long flags; 584 struct idset *unreg_set; 585 586 /* Find unregistered subchannels. */ 587 unreg_set = idset_sch_new(); 588 if (!unreg_set) { 589 /* Fallback. */ 590 css_schedule_eval_all(); 591 return; 592 } 593 idset_fill(unreg_set); 594 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 595 /* Apply to slow_subchannel_set. */ 596 spin_lock_irqsave(&slow_subchannel_lock, flags); 597 idset_add_set(slow_subchannel_set, unreg_set); 598 atomic_set(&css_eval_scheduled, 1); 599 queue_work(cio_work_q, &slow_path_work); 600 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 601 idset_free(unreg_set); 602 } 603 604 void css_wait_for_slow_path(void) 605 { 606 flush_workqueue(cio_work_q); 607 } 608 609 /* Schedule reprobing of all unregistered subchannels. */ 610 void css_schedule_reprobe(void) 611 { 612 css_schedule_eval_all_unreg(); 613 } 614 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 615 616 /* 617 * Called from the machine check handler for subchannel report words. 618 */ 619 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 620 { 621 struct subchannel_id mchk_schid; 622 struct subchannel *sch; 623 624 if (overflow) { 625 css_schedule_eval_all(); 626 return; 627 } 628 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 629 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 630 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 631 crw0->erc, crw0->rsid); 632 if (crw1) 633 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 634 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 635 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 636 crw1->anc, crw1->erc, crw1->rsid); 637 init_subchannel_id(&mchk_schid); 638 mchk_schid.sch_no = crw0->rsid; 639 if (crw1) 640 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 641 642 if (crw0->erc == CRW_ERC_PMOD) { 643 sch = get_subchannel_by_schid(mchk_schid); 644 if (sch) { 645 css_update_ssd_info(sch); 646 put_device(&sch->dev); 647 } 648 } 649 /* 650 * Since we are always presented with IPI in the CRW, we have to 651 * use stsch() to find out if the subchannel in question has come 652 * or gone. 653 */ 654 css_evaluate_subchannel(mchk_schid, 0); 655 } 656 657 static void __init 658 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 659 { 660 struct cpuid cpu_id; 661 662 if (css_general_characteristics.mcss) { 663 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 664 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 665 } else { 666 #ifdef CONFIG_SMP 667 css->global_pgid.pgid_high.cpu_addr = stap(); 668 #else 669 css->global_pgid.pgid_high.cpu_addr = 0; 670 #endif 671 } 672 get_cpu_id(&cpu_id); 673 css->global_pgid.cpu_id = cpu_id.ident; 674 css->global_pgid.cpu_model = cpu_id.machine; 675 css->global_pgid.tod_high = tod_high; 676 677 } 678 679 static void 680 channel_subsystem_release(struct device *dev) 681 { 682 struct channel_subsystem *css; 683 684 css = to_css(dev); 685 mutex_destroy(&css->mutex); 686 if (css->pseudo_subchannel) { 687 /* Implies that it has been generated but never registered. */ 688 css_subchannel_release(&css->pseudo_subchannel->dev); 689 css->pseudo_subchannel = NULL; 690 } 691 kfree(css); 692 } 693 694 static ssize_t 695 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 696 char *buf) 697 { 698 struct channel_subsystem *css = to_css(dev); 699 int ret; 700 701 if (!css) 702 return 0; 703 mutex_lock(&css->mutex); 704 ret = sprintf(buf, "%x\n", css->cm_enabled); 705 mutex_unlock(&css->mutex); 706 return ret; 707 } 708 709 static ssize_t 710 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 711 const char *buf, size_t count) 712 { 713 struct channel_subsystem *css = to_css(dev); 714 int ret; 715 unsigned long val; 716 717 ret = strict_strtoul(buf, 16, &val); 718 if (ret) 719 return ret; 720 mutex_lock(&css->mutex); 721 switch (val) { 722 case 0: 723 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 724 break; 725 case 1: 726 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 727 break; 728 default: 729 ret = -EINVAL; 730 } 731 mutex_unlock(&css->mutex); 732 return ret < 0 ? ret : count; 733 } 734 735 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 736 737 static int __init setup_css(int nr) 738 { 739 u32 tod_high; 740 int ret; 741 struct channel_subsystem *css; 742 743 css = channel_subsystems[nr]; 744 memset(css, 0, sizeof(struct channel_subsystem)); 745 css->pseudo_subchannel = 746 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 747 if (!css->pseudo_subchannel) 748 return -ENOMEM; 749 css->pseudo_subchannel->dev.parent = &css->device; 750 css->pseudo_subchannel->dev.release = css_subchannel_release; 751 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 752 mutex_init(&css->pseudo_subchannel->reg_mutex); 753 ret = cio_create_sch_lock(css->pseudo_subchannel); 754 if (ret) { 755 kfree(css->pseudo_subchannel); 756 return ret; 757 } 758 mutex_init(&css->mutex); 759 css->valid = 1; 760 css->cssid = nr; 761 dev_set_name(&css->device, "css%x", nr); 762 css->device.release = channel_subsystem_release; 763 tod_high = (u32) (get_clock() >> 32); 764 css_generate_pgid(css, tod_high); 765 return 0; 766 } 767 768 static int css_reboot_event(struct notifier_block *this, 769 unsigned long event, 770 void *ptr) 771 { 772 int ret, i; 773 774 ret = NOTIFY_DONE; 775 for (i = 0; i <= __MAX_CSSID; i++) { 776 struct channel_subsystem *css; 777 778 css = channel_subsystems[i]; 779 mutex_lock(&css->mutex); 780 if (css->cm_enabled) 781 if (chsc_secm(css, 0)) 782 ret = NOTIFY_BAD; 783 mutex_unlock(&css->mutex); 784 } 785 786 return ret; 787 } 788 789 static struct notifier_block css_reboot_notifier = { 790 .notifier_call = css_reboot_event, 791 }; 792 793 /* 794 * Since the css devices are neither on a bus nor have a class 795 * nor have a special device type, we cannot stop/restart channel 796 * path measurements via the normal suspend/resume callbacks, but have 797 * to use notifiers. 798 */ 799 static int css_power_event(struct notifier_block *this, unsigned long event, 800 void *ptr) 801 { 802 int ret, i; 803 804 switch (event) { 805 case PM_HIBERNATION_PREPARE: 806 case PM_SUSPEND_PREPARE: 807 ret = NOTIFY_DONE; 808 for (i = 0; i <= __MAX_CSSID; i++) { 809 struct channel_subsystem *css; 810 811 css = channel_subsystems[i]; 812 mutex_lock(&css->mutex); 813 if (!css->cm_enabled) { 814 mutex_unlock(&css->mutex); 815 continue; 816 } 817 ret = __chsc_do_secm(css, 0); 818 ret = notifier_from_errno(ret); 819 mutex_unlock(&css->mutex); 820 } 821 break; 822 case PM_POST_HIBERNATION: 823 case PM_POST_SUSPEND: 824 ret = NOTIFY_DONE; 825 for (i = 0; i <= __MAX_CSSID; i++) { 826 struct channel_subsystem *css; 827 828 css = channel_subsystems[i]; 829 mutex_lock(&css->mutex); 830 if (!css->cm_enabled) { 831 mutex_unlock(&css->mutex); 832 continue; 833 } 834 ret = __chsc_do_secm(css, 1); 835 ret = notifier_from_errno(ret); 836 mutex_unlock(&css->mutex); 837 } 838 /* search for subchannels, which appeared during hibernation */ 839 css_schedule_reprobe(); 840 break; 841 default: 842 ret = NOTIFY_DONE; 843 } 844 return ret; 845 846 } 847 static struct notifier_block css_power_notifier = { 848 .notifier_call = css_power_event, 849 }; 850 851 /* 852 * Now that the driver core is running, we can setup our channel subsystem. 853 * The struct subchannel's are created during probing (except for the 854 * static console subchannel). 855 */ 856 static int __init css_bus_init(void) 857 { 858 int ret, i; 859 860 ret = chsc_init(); 861 if (ret) 862 return ret; 863 864 chsc_determine_css_characteristics(); 865 /* Try to enable MSS. */ 866 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 867 if (ret) 868 max_ssid = 0; 869 else /* Success. */ 870 max_ssid = __MAX_SSID; 871 872 ret = slow_subchannel_init(); 873 if (ret) 874 goto out; 875 876 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 877 if (ret) 878 goto out; 879 880 if ((ret = bus_register(&css_bus_type))) 881 goto out; 882 883 /* Setup css structure. */ 884 for (i = 0; i <= __MAX_CSSID; i++) { 885 struct channel_subsystem *css; 886 887 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 888 if (!css) { 889 ret = -ENOMEM; 890 goto out_unregister; 891 } 892 channel_subsystems[i] = css; 893 ret = setup_css(i); 894 if (ret) { 895 kfree(channel_subsystems[i]); 896 goto out_unregister; 897 } 898 ret = device_register(&css->device); 899 if (ret) { 900 put_device(&css->device); 901 goto out_unregister; 902 } 903 if (css_chsc_characteristics.secm) { 904 ret = device_create_file(&css->device, 905 &dev_attr_cm_enable); 906 if (ret) 907 goto out_device; 908 } 909 ret = device_register(&css->pseudo_subchannel->dev); 910 if (ret) { 911 put_device(&css->pseudo_subchannel->dev); 912 goto out_file; 913 } 914 } 915 ret = register_reboot_notifier(&css_reboot_notifier); 916 if (ret) 917 goto out_unregister; 918 ret = register_pm_notifier(&css_power_notifier); 919 if (ret) { 920 unregister_reboot_notifier(&css_reboot_notifier); 921 goto out_unregister; 922 } 923 css_init_done = 1; 924 925 /* Enable default isc for I/O subchannels. */ 926 isc_register(IO_SCH_ISC); 927 928 return 0; 929 out_file: 930 if (css_chsc_characteristics.secm) 931 device_remove_file(&channel_subsystems[i]->device, 932 &dev_attr_cm_enable); 933 out_device: 934 device_unregister(&channel_subsystems[i]->device); 935 out_unregister: 936 while (i > 0) { 937 struct channel_subsystem *css; 938 939 i--; 940 css = channel_subsystems[i]; 941 device_unregister(&css->pseudo_subchannel->dev); 942 css->pseudo_subchannel = NULL; 943 if (css_chsc_characteristics.secm) 944 device_remove_file(&css->device, 945 &dev_attr_cm_enable); 946 device_unregister(&css->device); 947 } 948 bus_unregister(&css_bus_type); 949 out: 950 crw_unregister_handler(CRW_RSC_SCH); 951 idset_free(slow_subchannel_set); 952 chsc_init_cleanup(); 953 pr_alert("The CSS device driver initialization failed with " 954 "errno=%d\n", ret); 955 return ret; 956 } 957 958 static void __init css_bus_cleanup(void) 959 { 960 struct channel_subsystem *css; 961 int i; 962 963 for (i = 0; i <= __MAX_CSSID; i++) { 964 css = channel_subsystems[i]; 965 device_unregister(&css->pseudo_subchannel->dev); 966 css->pseudo_subchannel = NULL; 967 if (css_chsc_characteristics.secm) 968 device_remove_file(&css->device, &dev_attr_cm_enable); 969 device_unregister(&css->device); 970 } 971 bus_unregister(&css_bus_type); 972 crw_unregister_handler(CRW_RSC_SCH); 973 idset_free(slow_subchannel_set); 974 chsc_init_cleanup(); 975 isc_unregister(IO_SCH_ISC); 976 } 977 978 static int __init channel_subsystem_init(void) 979 { 980 int ret; 981 982 ret = css_bus_init(); 983 if (ret) 984 return ret; 985 cio_work_q = create_singlethread_workqueue("cio"); 986 if (!cio_work_q) { 987 ret = -ENOMEM; 988 goto out_bus; 989 } 990 ret = io_subchannel_init(); 991 if (ret) 992 goto out_wq; 993 994 return ret; 995 out_wq: 996 destroy_workqueue(cio_work_q); 997 out_bus: 998 css_bus_cleanup(); 999 return ret; 1000 } 1001 subsys_initcall(channel_subsystem_init); 1002 1003 static int css_settle(struct device_driver *drv, void *unused) 1004 { 1005 struct css_driver *cssdrv = to_cssdriver(drv); 1006 1007 if (cssdrv->settle) 1008 return cssdrv->settle(); 1009 return 0; 1010 } 1011 1012 int css_complete_work(void) 1013 { 1014 int ret; 1015 1016 /* Wait for the evaluation of subchannels to finish. */ 1017 ret = wait_event_interruptible(css_eval_wq, 1018 atomic_read(&css_eval_scheduled) == 0); 1019 if (ret) 1020 return -EINTR; 1021 flush_workqueue(cio_work_q); 1022 /* Wait for the subchannel type specific initialization to finish */ 1023 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1024 } 1025 1026 1027 /* 1028 * Wait for the initialization of devices to finish, to make sure we are 1029 * done with our setup if the search for the root device starts. 1030 */ 1031 static int __init channel_subsystem_init_sync(void) 1032 { 1033 /* Start initial subchannel evaluation. */ 1034 css_schedule_eval_all(); 1035 css_complete_work(); 1036 return 0; 1037 } 1038 subsys_initcall_sync(channel_subsystem_init_sync); 1039 1040 void channel_subsystem_reinit(void) 1041 { 1042 struct channel_path *chp; 1043 struct chp_id chpid; 1044 1045 chsc_enable_facility(CHSC_SDA_OC_MSS); 1046 chp_id_for_each(&chpid) { 1047 chp = chpid_to_chp(chpid); 1048 if (!chp) 1049 continue; 1050 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 1051 } 1052 } 1053 1054 #ifdef CONFIG_PROC_FS 1055 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1056 size_t count, loff_t *ppos) 1057 { 1058 int ret; 1059 1060 /* Handle pending CRW's. */ 1061 crw_wait_for_channel_report(); 1062 ret = css_complete_work(); 1063 1064 return ret ? ret : count; 1065 } 1066 1067 static const struct file_operations cio_settle_proc_fops = { 1068 .open = nonseekable_open, 1069 .write = cio_settle_write, 1070 .llseek = no_llseek, 1071 }; 1072 1073 static int __init cio_settle_init(void) 1074 { 1075 struct proc_dir_entry *entry; 1076 1077 entry = proc_create("cio_settle", S_IWUSR, NULL, 1078 &cio_settle_proc_fops); 1079 if (!entry) 1080 return -ENOMEM; 1081 return 0; 1082 } 1083 device_initcall(cio_settle_init); 1084 #endif /*CONFIG_PROC_FS*/ 1085 1086 int sch_is_pseudo_sch(struct subchannel *sch) 1087 { 1088 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1089 } 1090 1091 static int css_bus_match(struct device *dev, struct device_driver *drv) 1092 { 1093 struct subchannel *sch = to_subchannel(dev); 1094 struct css_driver *driver = to_cssdriver(drv); 1095 struct css_device_id *id; 1096 1097 for (id = driver->subchannel_type; id->match_flags; id++) { 1098 if (sch->st == id->type) 1099 return 1; 1100 } 1101 1102 return 0; 1103 } 1104 1105 static int css_probe(struct device *dev) 1106 { 1107 struct subchannel *sch; 1108 int ret; 1109 1110 sch = to_subchannel(dev); 1111 sch->driver = to_cssdriver(dev->driver); 1112 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1113 if (ret) 1114 sch->driver = NULL; 1115 return ret; 1116 } 1117 1118 static int css_remove(struct device *dev) 1119 { 1120 struct subchannel *sch; 1121 int ret; 1122 1123 sch = to_subchannel(dev); 1124 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1125 sch->driver = NULL; 1126 return ret; 1127 } 1128 1129 static void css_shutdown(struct device *dev) 1130 { 1131 struct subchannel *sch; 1132 1133 sch = to_subchannel(dev); 1134 if (sch->driver && sch->driver->shutdown) 1135 sch->driver->shutdown(sch); 1136 } 1137 1138 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1139 { 1140 struct subchannel *sch = to_subchannel(dev); 1141 int ret; 1142 1143 ret = add_uevent_var(env, "ST=%01X", sch->st); 1144 if (ret) 1145 return ret; 1146 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1147 return ret; 1148 } 1149 1150 static int css_pm_prepare(struct device *dev) 1151 { 1152 struct subchannel *sch = to_subchannel(dev); 1153 struct css_driver *drv; 1154 1155 if (mutex_is_locked(&sch->reg_mutex)) 1156 return -EAGAIN; 1157 if (!sch->dev.driver) 1158 return 0; 1159 drv = to_cssdriver(sch->dev.driver); 1160 /* Notify drivers that they may not register children. */ 1161 return drv->prepare ? drv->prepare(sch) : 0; 1162 } 1163 1164 static void css_pm_complete(struct device *dev) 1165 { 1166 struct subchannel *sch = to_subchannel(dev); 1167 struct css_driver *drv; 1168 1169 if (!sch->dev.driver) 1170 return; 1171 drv = to_cssdriver(sch->dev.driver); 1172 if (drv->complete) 1173 drv->complete(sch); 1174 } 1175 1176 static int css_pm_freeze(struct device *dev) 1177 { 1178 struct subchannel *sch = to_subchannel(dev); 1179 struct css_driver *drv; 1180 1181 if (!sch->dev.driver) 1182 return 0; 1183 drv = to_cssdriver(sch->dev.driver); 1184 return drv->freeze ? drv->freeze(sch) : 0; 1185 } 1186 1187 static int css_pm_thaw(struct device *dev) 1188 { 1189 struct subchannel *sch = to_subchannel(dev); 1190 struct css_driver *drv; 1191 1192 if (!sch->dev.driver) 1193 return 0; 1194 drv = to_cssdriver(sch->dev.driver); 1195 return drv->thaw ? drv->thaw(sch) : 0; 1196 } 1197 1198 static int css_pm_restore(struct device *dev) 1199 { 1200 struct subchannel *sch = to_subchannel(dev); 1201 struct css_driver *drv; 1202 1203 css_update_ssd_info(sch); 1204 if (!sch->dev.driver) 1205 return 0; 1206 drv = to_cssdriver(sch->dev.driver); 1207 return drv->restore ? drv->restore(sch) : 0; 1208 } 1209 1210 static const struct dev_pm_ops css_pm_ops = { 1211 .prepare = css_pm_prepare, 1212 .complete = css_pm_complete, 1213 .freeze = css_pm_freeze, 1214 .thaw = css_pm_thaw, 1215 .restore = css_pm_restore, 1216 }; 1217 1218 static struct bus_type css_bus_type = { 1219 .name = "css", 1220 .match = css_bus_match, 1221 .probe = css_probe, 1222 .remove = css_remove, 1223 .shutdown = css_shutdown, 1224 .uevent = css_uevent, 1225 .pm = &css_pm_ops, 1226 }; 1227 1228 /** 1229 * css_driver_register - register a css driver 1230 * @cdrv: css driver to register 1231 * 1232 * This is mainly a wrapper around driver_register that sets name 1233 * and bus_type in the embedded struct device_driver correctly. 1234 */ 1235 int css_driver_register(struct css_driver *cdrv) 1236 { 1237 cdrv->drv.bus = &css_bus_type; 1238 return driver_register(&cdrv->drv); 1239 } 1240 EXPORT_SYMBOL_GPL(css_driver_register); 1241 1242 /** 1243 * css_driver_unregister - unregister a css driver 1244 * @cdrv: css driver to unregister 1245 * 1246 * This is a wrapper around driver_unregister. 1247 */ 1248 void css_driver_unregister(struct css_driver *cdrv) 1249 { 1250 driver_unregister(&cdrv->drv); 1251 } 1252 EXPORT_SYMBOL_GPL(css_driver_unregister); 1253 1254 MODULE_LICENSE("GPL"); 1255