1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2010 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <linux/proc_fs.h> 22 #include <asm/isc.h> 23 #include <asm/crw.h> 24 25 #include "css.h" 26 #include "cio.h" 27 #include "cio_debug.h" 28 #include "ioasm.h" 29 #include "chsc.h" 30 #include "device.h" 31 #include "idset.h" 32 #include "chp.h" 33 34 int css_init_done = 0; 35 int max_ssid; 36 37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 38 static struct bus_type css_bus_type; 39 40 int 41 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 42 { 43 struct subchannel_id schid; 44 int ret; 45 46 init_subchannel_id(&schid); 47 ret = -ENODEV; 48 do { 49 do { 50 ret = fn(schid, data); 51 if (ret) 52 break; 53 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 54 schid.sch_no = 0; 55 } while (schid.ssid++ < max_ssid); 56 return ret; 57 } 58 59 struct cb_data { 60 void *data; 61 struct idset *set; 62 int (*fn_known_sch)(struct subchannel *, void *); 63 int (*fn_unknown_sch)(struct subchannel_id, void *); 64 }; 65 66 static int call_fn_known_sch(struct device *dev, void *data) 67 { 68 struct subchannel *sch = to_subchannel(dev); 69 struct cb_data *cb = data; 70 int rc = 0; 71 72 idset_sch_del(cb->set, sch->schid); 73 if (cb->fn_known_sch) 74 rc = cb->fn_known_sch(sch, cb->data); 75 return rc; 76 } 77 78 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 79 { 80 struct cb_data *cb = data; 81 int rc = 0; 82 83 if (idset_sch_contains(cb->set, schid)) 84 rc = cb->fn_unknown_sch(schid, cb->data); 85 return rc; 86 } 87 88 static int call_fn_all_sch(struct subchannel_id schid, void *data) 89 { 90 struct cb_data *cb = data; 91 struct subchannel *sch; 92 int rc = 0; 93 94 sch = get_subchannel_by_schid(schid); 95 if (sch) { 96 if (cb->fn_known_sch) 97 rc = cb->fn_known_sch(sch, cb->data); 98 put_device(&sch->dev); 99 } else { 100 if (cb->fn_unknown_sch) 101 rc = cb->fn_unknown_sch(schid, cb->data); 102 } 103 104 return rc; 105 } 106 107 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 108 int (*fn_unknown)(struct subchannel_id, 109 void *), void *data) 110 { 111 struct cb_data cb; 112 int rc; 113 114 cb.data = data; 115 cb.fn_known_sch = fn_known; 116 cb.fn_unknown_sch = fn_unknown; 117 118 cb.set = idset_sch_new(); 119 if (!cb.set) 120 /* fall back to brute force scanning in case of oom */ 121 return for_each_subchannel(call_fn_all_sch, &cb); 122 123 idset_fill(cb.set); 124 125 /* Process registered subchannels. */ 126 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 127 if (rc) 128 goto out; 129 /* Process unregistered subchannels. */ 130 if (fn_unknown) 131 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 132 out: 133 idset_free(cb.set); 134 135 return rc; 136 } 137 138 static void css_sch_todo(struct work_struct *work); 139 140 static struct subchannel * 141 css_alloc_subchannel(struct subchannel_id schid) 142 { 143 struct subchannel *sch; 144 int ret; 145 146 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 147 if (sch == NULL) 148 return ERR_PTR(-ENOMEM); 149 ret = cio_validate_subchannel (sch, schid); 150 if (ret < 0) { 151 kfree(sch); 152 return ERR_PTR(ret); 153 } 154 INIT_WORK(&sch->todo_work, css_sch_todo); 155 return sch; 156 } 157 158 static void 159 css_subchannel_release(struct device *dev) 160 { 161 struct subchannel *sch; 162 163 sch = to_subchannel(dev); 164 if (!cio_is_console(sch->schid)) { 165 /* Reset intparm to zeroes. */ 166 sch->config.intparm = 0; 167 cio_commit_config(sch); 168 kfree(sch->lock); 169 kfree(sch); 170 } 171 } 172 173 static int css_sch_device_register(struct subchannel *sch) 174 { 175 int ret; 176 177 mutex_lock(&sch->reg_mutex); 178 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 179 sch->schid.sch_no); 180 ret = device_register(&sch->dev); 181 mutex_unlock(&sch->reg_mutex); 182 return ret; 183 } 184 185 /** 186 * css_sch_device_unregister - unregister a subchannel 187 * @sch: subchannel to be unregistered 188 */ 189 void css_sch_device_unregister(struct subchannel *sch) 190 { 191 mutex_lock(&sch->reg_mutex); 192 if (device_is_registered(&sch->dev)) 193 device_unregister(&sch->dev); 194 mutex_unlock(&sch->reg_mutex); 195 } 196 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 197 198 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 199 { 200 int i; 201 int mask; 202 203 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 204 ssd->path_mask = pmcw->pim; 205 for (i = 0; i < 8; i++) { 206 mask = 0x80 >> i; 207 if (pmcw->pim & mask) { 208 chp_id_init(&ssd->chpid[i]); 209 ssd->chpid[i].id = pmcw->chpid[i]; 210 } 211 } 212 } 213 214 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 215 { 216 int i; 217 int mask; 218 219 for (i = 0; i < 8; i++) { 220 mask = 0x80 >> i; 221 if (ssd->path_mask & mask) 222 if (!chp_is_registered(ssd->chpid[i])) 223 chp_new(ssd->chpid[i]); 224 } 225 } 226 227 void css_update_ssd_info(struct subchannel *sch) 228 { 229 int ret; 230 231 if (cio_is_console(sch->schid)) { 232 /* Console is initialized too early for functions requiring 233 * memory allocation. */ 234 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 235 } else { 236 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 237 if (ret) 238 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 239 ssd_register_chpids(&sch->ssd_info); 240 } 241 } 242 243 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 244 char *buf) 245 { 246 struct subchannel *sch = to_subchannel(dev); 247 248 return sprintf(buf, "%01x\n", sch->st); 249 } 250 251 static DEVICE_ATTR(type, 0444, type_show, NULL); 252 253 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 254 char *buf) 255 { 256 struct subchannel *sch = to_subchannel(dev); 257 258 return sprintf(buf, "css:t%01X\n", sch->st); 259 } 260 261 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 262 263 static struct attribute *subch_attrs[] = { 264 &dev_attr_type.attr, 265 &dev_attr_modalias.attr, 266 NULL, 267 }; 268 269 static struct attribute_group subch_attr_group = { 270 .attrs = subch_attrs, 271 }; 272 273 static const struct attribute_group *default_subch_attr_groups[] = { 274 &subch_attr_group, 275 NULL, 276 }; 277 278 static int css_register_subchannel(struct subchannel *sch) 279 { 280 int ret; 281 282 /* Initialize the subchannel structure */ 283 sch->dev.parent = &channel_subsystems[0]->device; 284 sch->dev.bus = &css_bus_type; 285 sch->dev.release = &css_subchannel_release; 286 sch->dev.groups = default_subch_attr_groups; 287 /* 288 * We don't want to generate uevents for I/O subchannels that don't 289 * have a working ccw device behind them since they will be 290 * unregistered before they can be used anyway, so we delay the add 291 * uevent until after device recognition was successful. 292 * Note that we suppress the uevent for all subchannel types; 293 * the subchannel driver can decide itself when it wants to inform 294 * userspace of its existence. 295 */ 296 dev_set_uevent_suppress(&sch->dev, 1); 297 css_update_ssd_info(sch); 298 /* make it known to the system */ 299 ret = css_sch_device_register(sch); 300 if (ret) { 301 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 302 sch->schid.ssid, sch->schid.sch_no, ret); 303 return ret; 304 } 305 if (!sch->driver) { 306 /* 307 * No driver matched. Generate the uevent now so that 308 * a fitting driver module may be loaded based on the 309 * modalias. 310 */ 311 dev_set_uevent_suppress(&sch->dev, 0); 312 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 313 } 314 return ret; 315 } 316 317 int css_probe_device(struct subchannel_id schid) 318 { 319 int ret; 320 struct subchannel *sch; 321 322 if (cio_is_console(schid)) 323 sch = cio_get_console_subchannel(); 324 else { 325 sch = css_alloc_subchannel(schid); 326 if (IS_ERR(sch)) 327 return PTR_ERR(sch); 328 } 329 ret = css_register_subchannel(sch); 330 if (ret) { 331 if (!cio_is_console(schid)) 332 put_device(&sch->dev); 333 } 334 return ret; 335 } 336 337 static int 338 check_subchannel(struct device * dev, void * data) 339 { 340 struct subchannel *sch; 341 struct subchannel_id *schid = data; 342 343 sch = to_subchannel(dev); 344 return schid_equal(&sch->schid, schid); 345 } 346 347 struct subchannel * 348 get_subchannel_by_schid(struct subchannel_id schid) 349 { 350 struct device *dev; 351 352 dev = bus_find_device(&css_bus_type, NULL, 353 &schid, check_subchannel); 354 355 return dev ? to_subchannel(dev) : NULL; 356 } 357 358 /** 359 * css_sch_is_valid() - check if a subchannel is valid 360 * @schib: subchannel information block for the subchannel 361 */ 362 int css_sch_is_valid(struct schib *schib) 363 { 364 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 365 return 0; 366 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 367 return 0; 368 return 1; 369 } 370 EXPORT_SYMBOL_GPL(css_sch_is_valid); 371 372 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 373 { 374 struct schib schib; 375 376 if (!slow) { 377 /* Will be done on the slow path. */ 378 return -EAGAIN; 379 } 380 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 381 /* Unusable - ignore. */ 382 return 0; 383 } 384 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 385 schid.sch_no); 386 387 return css_probe_device(schid); 388 } 389 390 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 391 { 392 int ret = 0; 393 394 if (sch->driver) { 395 if (sch->driver->sch_event) 396 ret = sch->driver->sch_event(sch, slow); 397 else 398 dev_dbg(&sch->dev, 399 "Got subchannel machine check but " 400 "no sch_event handler provided.\n"); 401 } 402 if (ret != 0 && ret != -EAGAIN) { 403 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 404 sch->schid.ssid, sch->schid.sch_no, ret); 405 } 406 return ret; 407 } 408 409 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 410 { 411 struct subchannel *sch; 412 int ret; 413 414 sch = get_subchannel_by_schid(schid); 415 if (sch) { 416 ret = css_evaluate_known_subchannel(sch, slow); 417 put_device(&sch->dev); 418 } else 419 ret = css_evaluate_new_subchannel(schid, slow); 420 if (ret == -EAGAIN) 421 css_schedule_eval(schid); 422 } 423 424 /** 425 * css_sched_sch_todo - schedule a subchannel operation 426 * @sch: subchannel 427 * @todo: todo 428 * 429 * Schedule the operation identified by @todo to be performed on the slow path 430 * workqueue. Do nothing if another operation with higher priority is already 431 * scheduled. Needs to be called with subchannel lock held. 432 */ 433 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 434 { 435 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 436 sch->schid.ssid, sch->schid.sch_no, todo); 437 if (sch->todo >= todo) 438 return; 439 /* Get workqueue ref. */ 440 if (!get_device(&sch->dev)) 441 return; 442 sch->todo = todo; 443 if (!queue_work(cio_work_q, &sch->todo_work)) { 444 /* Already queued, release workqueue ref. */ 445 put_device(&sch->dev); 446 } 447 } 448 EXPORT_SYMBOL_GPL(css_sched_sch_todo); 449 450 static void css_sch_todo(struct work_struct *work) 451 { 452 struct subchannel *sch; 453 enum sch_todo todo; 454 int ret; 455 456 sch = container_of(work, struct subchannel, todo_work); 457 /* Find out todo. */ 458 spin_lock_irq(sch->lock); 459 todo = sch->todo; 460 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 461 sch->schid.sch_no, todo); 462 sch->todo = SCH_TODO_NOTHING; 463 spin_unlock_irq(sch->lock); 464 /* Perform todo. */ 465 switch (todo) { 466 case SCH_TODO_NOTHING: 467 break; 468 case SCH_TODO_EVAL: 469 ret = css_evaluate_known_subchannel(sch, 1); 470 if (ret == -EAGAIN) { 471 spin_lock_irq(sch->lock); 472 css_sched_sch_todo(sch, todo); 473 spin_unlock_irq(sch->lock); 474 } 475 break; 476 case SCH_TODO_UNREG: 477 css_sch_device_unregister(sch); 478 break; 479 } 480 /* Release workqueue ref. */ 481 put_device(&sch->dev); 482 } 483 484 static struct idset *slow_subchannel_set; 485 static spinlock_t slow_subchannel_lock; 486 static wait_queue_head_t css_eval_wq; 487 static atomic_t css_eval_scheduled; 488 489 static int __init slow_subchannel_init(void) 490 { 491 spin_lock_init(&slow_subchannel_lock); 492 atomic_set(&css_eval_scheduled, 0); 493 init_waitqueue_head(&css_eval_wq); 494 slow_subchannel_set = idset_sch_new(); 495 if (!slow_subchannel_set) { 496 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 497 return -ENOMEM; 498 } 499 return 0; 500 } 501 502 static int slow_eval_known_fn(struct subchannel *sch, void *data) 503 { 504 int eval; 505 int rc; 506 507 spin_lock_irq(&slow_subchannel_lock); 508 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 509 idset_sch_del(slow_subchannel_set, sch->schid); 510 spin_unlock_irq(&slow_subchannel_lock); 511 if (eval) { 512 rc = css_evaluate_known_subchannel(sch, 1); 513 if (rc == -EAGAIN) 514 css_schedule_eval(sch->schid); 515 } 516 return 0; 517 } 518 519 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 520 { 521 int eval; 522 int rc = 0; 523 524 spin_lock_irq(&slow_subchannel_lock); 525 eval = idset_sch_contains(slow_subchannel_set, schid); 526 idset_sch_del(slow_subchannel_set, schid); 527 spin_unlock_irq(&slow_subchannel_lock); 528 if (eval) { 529 rc = css_evaluate_new_subchannel(schid, 1); 530 switch (rc) { 531 case -EAGAIN: 532 css_schedule_eval(schid); 533 rc = 0; 534 break; 535 case -ENXIO: 536 case -ENOMEM: 537 case -EIO: 538 /* These should abort looping */ 539 break; 540 default: 541 rc = 0; 542 } 543 } 544 return rc; 545 } 546 547 static void css_slow_path_func(struct work_struct *unused) 548 { 549 unsigned long flags; 550 551 CIO_TRACE_EVENT(4, "slowpath"); 552 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 553 NULL); 554 spin_lock_irqsave(&slow_subchannel_lock, flags); 555 if (idset_is_empty(slow_subchannel_set)) { 556 atomic_set(&css_eval_scheduled, 0); 557 wake_up(&css_eval_wq); 558 } 559 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 560 } 561 562 static DECLARE_WORK(slow_path_work, css_slow_path_func); 563 struct workqueue_struct *cio_work_q; 564 565 void css_schedule_eval(struct subchannel_id schid) 566 { 567 unsigned long flags; 568 569 spin_lock_irqsave(&slow_subchannel_lock, flags); 570 idset_sch_add(slow_subchannel_set, schid); 571 atomic_set(&css_eval_scheduled, 1); 572 queue_work(cio_work_q, &slow_path_work); 573 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 574 } 575 576 void css_schedule_eval_all(void) 577 { 578 unsigned long flags; 579 580 spin_lock_irqsave(&slow_subchannel_lock, flags); 581 idset_fill(slow_subchannel_set); 582 atomic_set(&css_eval_scheduled, 1); 583 queue_work(cio_work_q, &slow_path_work); 584 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 585 } 586 587 static int __unset_registered(struct device *dev, void *data) 588 { 589 struct idset *set = data; 590 struct subchannel *sch = to_subchannel(dev); 591 592 idset_sch_del(set, sch->schid); 593 return 0; 594 } 595 596 static void css_schedule_eval_all_unreg(void) 597 { 598 unsigned long flags; 599 struct idset *unreg_set; 600 601 /* Find unregistered subchannels. */ 602 unreg_set = idset_sch_new(); 603 if (!unreg_set) { 604 /* Fallback. */ 605 css_schedule_eval_all(); 606 return; 607 } 608 idset_fill(unreg_set); 609 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 610 /* Apply to slow_subchannel_set. */ 611 spin_lock_irqsave(&slow_subchannel_lock, flags); 612 idset_add_set(slow_subchannel_set, unreg_set); 613 atomic_set(&css_eval_scheduled, 1); 614 queue_work(cio_work_q, &slow_path_work); 615 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 616 idset_free(unreg_set); 617 } 618 619 void css_wait_for_slow_path(void) 620 { 621 flush_workqueue(cio_work_q); 622 } 623 624 /* Schedule reprobing of all unregistered subchannels. */ 625 void css_schedule_reprobe(void) 626 { 627 css_schedule_eval_all_unreg(); 628 } 629 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 630 631 /* 632 * Called from the machine check handler for subchannel report words. 633 */ 634 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 635 { 636 struct subchannel_id mchk_schid; 637 struct subchannel *sch; 638 639 if (overflow) { 640 css_schedule_eval_all(); 641 return; 642 } 643 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 644 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 645 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 646 crw0->erc, crw0->rsid); 647 if (crw1) 648 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 649 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 650 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 651 crw1->anc, crw1->erc, crw1->rsid); 652 init_subchannel_id(&mchk_schid); 653 mchk_schid.sch_no = crw0->rsid; 654 if (crw1) 655 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 656 657 if (crw0->erc == CRW_ERC_PMOD) { 658 sch = get_subchannel_by_schid(mchk_schid); 659 if (sch) { 660 css_update_ssd_info(sch); 661 put_device(&sch->dev); 662 } 663 } 664 /* 665 * Since we are always presented with IPI in the CRW, we have to 666 * use stsch() to find out if the subchannel in question has come 667 * or gone. 668 */ 669 css_evaluate_subchannel(mchk_schid, 0); 670 } 671 672 static void __init 673 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 674 { 675 struct cpuid cpu_id; 676 677 if (css_general_characteristics.mcss) { 678 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 679 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 680 } else { 681 #ifdef CONFIG_SMP 682 css->global_pgid.pgid_high.cpu_addr = stap(); 683 #else 684 css->global_pgid.pgid_high.cpu_addr = 0; 685 #endif 686 } 687 get_cpu_id(&cpu_id); 688 css->global_pgid.cpu_id = cpu_id.ident; 689 css->global_pgid.cpu_model = cpu_id.machine; 690 css->global_pgid.tod_high = tod_high; 691 692 } 693 694 static void 695 channel_subsystem_release(struct device *dev) 696 { 697 struct channel_subsystem *css; 698 699 css = to_css(dev); 700 mutex_destroy(&css->mutex); 701 if (css->pseudo_subchannel) { 702 /* Implies that it has been generated but never registered. */ 703 css_subchannel_release(&css->pseudo_subchannel->dev); 704 css->pseudo_subchannel = NULL; 705 } 706 kfree(css); 707 } 708 709 static ssize_t 710 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 711 char *buf) 712 { 713 struct channel_subsystem *css = to_css(dev); 714 int ret; 715 716 if (!css) 717 return 0; 718 mutex_lock(&css->mutex); 719 ret = sprintf(buf, "%x\n", css->cm_enabled); 720 mutex_unlock(&css->mutex); 721 return ret; 722 } 723 724 static ssize_t 725 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 726 const char *buf, size_t count) 727 { 728 struct channel_subsystem *css = to_css(dev); 729 int ret; 730 unsigned long val; 731 732 ret = strict_strtoul(buf, 16, &val); 733 if (ret) 734 return ret; 735 mutex_lock(&css->mutex); 736 switch (val) { 737 case 0: 738 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 739 break; 740 case 1: 741 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 742 break; 743 default: 744 ret = -EINVAL; 745 } 746 mutex_unlock(&css->mutex); 747 return ret < 0 ? ret : count; 748 } 749 750 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 751 752 static int __init setup_css(int nr) 753 { 754 u32 tod_high; 755 int ret; 756 struct channel_subsystem *css; 757 758 css = channel_subsystems[nr]; 759 memset(css, 0, sizeof(struct channel_subsystem)); 760 css->pseudo_subchannel = 761 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 762 if (!css->pseudo_subchannel) 763 return -ENOMEM; 764 css->pseudo_subchannel->dev.parent = &css->device; 765 css->pseudo_subchannel->dev.release = css_subchannel_release; 766 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 767 mutex_init(&css->pseudo_subchannel->reg_mutex); 768 ret = cio_create_sch_lock(css->pseudo_subchannel); 769 if (ret) { 770 kfree(css->pseudo_subchannel); 771 return ret; 772 } 773 mutex_init(&css->mutex); 774 css->valid = 1; 775 css->cssid = nr; 776 dev_set_name(&css->device, "css%x", nr); 777 css->device.release = channel_subsystem_release; 778 tod_high = (u32) (get_clock() >> 32); 779 css_generate_pgid(css, tod_high); 780 return 0; 781 } 782 783 static int css_reboot_event(struct notifier_block *this, 784 unsigned long event, 785 void *ptr) 786 { 787 int ret, i; 788 789 ret = NOTIFY_DONE; 790 for (i = 0; i <= __MAX_CSSID; i++) { 791 struct channel_subsystem *css; 792 793 css = channel_subsystems[i]; 794 mutex_lock(&css->mutex); 795 if (css->cm_enabled) 796 if (chsc_secm(css, 0)) 797 ret = NOTIFY_BAD; 798 mutex_unlock(&css->mutex); 799 } 800 801 return ret; 802 } 803 804 static struct notifier_block css_reboot_notifier = { 805 .notifier_call = css_reboot_event, 806 }; 807 808 /* 809 * Since the css devices are neither on a bus nor have a class 810 * nor have a special device type, we cannot stop/restart channel 811 * path measurements via the normal suspend/resume callbacks, but have 812 * to use notifiers. 813 */ 814 static int css_power_event(struct notifier_block *this, unsigned long event, 815 void *ptr) 816 { 817 int ret, i; 818 819 switch (event) { 820 case PM_HIBERNATION_PREPARE: 821 case PM_SUSPEND_PREPARE: 822 ret = NOTIFY_DONE; 823 for (i = 0; i <= __MAX_CSSID; i++) { 824 struct channel_subsystem *css; 825 826 css = channel_subsystems[i]; 827 mutex_lock(&css->mutex); 828 if (!css->cm_enabled) { 829 mutex_unlock(&css->mutex); 830 continue; 831 } 832 ret = __chsc_do_secm(css, 0); 833 ret = notifier_from_errno(ret); 834 mutex_unlock(&css->mutex); 835 } 836 break; 837 case PM_POST_HIBERNATION: 838 case PM_POST_SUSPEND: 839 ret = NOTIFY_DONE; 840 for (i = 0; i <= __MAX_CSSID; i++) { 841 struct channel_subsystem *css; 842 843 css = channel_subsystems[i]; 844 mutex_lock(&css->mutex); 845 if (!css->cm_enabled) { 846 mutex_unlock(&css->mutex); 847 continue; 848 } 849 ret = __chsc_do_secm(css, 1); 850 ret = notifier_from_errno(ret); 851 mutex_unlock(&css->mutex); 852 } 853 /* search for subchannels, which appeared during hibernation */ 854 css_schedule_reprobe(); 855 break; 856 default: 857 ret = NOTIFY_DONE; 858 } 859 return ret; 860 861 } 862 static struct notifier_block css_power_notifier = { 863 .notifier_call = css_power_event, 864 }; 865 866 /* 867 * Now that the driver core is running, we can setup our channel subsystem. 868 * The struct subchannel's are created during probing (except for the 869 * static console subchannel). 870 */ 871 static int __init css_bus_init(void) 872 { 873 int ret, i; 874 875 ret = chsc_init(); 876 if (ret) 877 return ret; 878 879 chsc_determine_css_characteristics(); 880 /* Try to enable MSS. */ 881 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 882 if (ret) 883 max_ssid = 0; 884 else /* Success. */ 885 max_ssid = __MAX_SSID; 886 887 ret = slow_subchannel_init(); 888 if (ret) 889 goto out; 890 891 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 892 if (ret) 893 goto out; 894 895 if ((ret = bus_register(&css_bus_type))) 896 goto out; 897 898 /* Setup css structure. */ 899 for (i = 0; i <= __MAX_CSSID; i++) { 900 struct channel_subsystem *css; 901 902 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 903 if (!css) { 904 ret = -ENOMEM; 905 goto out_unregister; 906 } 907 channel_subsystems[i] = css; 908 ret = setup_css(i); 909 if (ret) { 910 kfree(channel_subsystems[i]); 911 goto out_unregister; 912 } 913 ret = device_register(&css->device); 914 if (ret) { 915 put_device(&css->device); 916 goto out_unregister; 917 } 918 if (css_chsc_characteristics.secm) { 919 ret = device_create_file(&css->device, 920 &dev_attr_cm_enable); 921 if (ret) 922 goto out_device; 923 } 924 ret = device_register(&css->pseudo_subchannel->dev); 925 if (ret) { 926 put_device(&css->pseudo_subchannel->dev); 927 goto out_file; 928 } 929 } 930 ret = register_reboot_notifier(&css_reboot_notifier); 931 if (ret) 932 goto out_unregister; 933 ret = register_pm_notifier(&css_power_notifier); 934 if (ret) { 935 unregister_reboot_notifier(&css_reboot_notifier); 936 goto out_unregister; 937 } 938 css_init_done = 1; 939 940 /* Enable default isc for I/O subchannels. */ 941 isc_register(IO_SCH_ISC); 942 943 return 0; 944 out_file: 945 if (css_chsc_characteristics.secm) 946 device_remove_file(&channel_subsystems[i]->device, 947 &dev_attr_cm_enable); 948 out_device: 949 device_unregister(&channel_subsystems[i]->device); 950 out_unregister: 951 while (i > 0) { 952 struct channel_subsystem *css; 953 954 i--; 955 css = channel_subsystems[i]; 956 device_unregister(&css->pseudo_subchannel->dev); 957 css->pseudo_subchannel = NULL; 958 if (css_chsc_characteristics.secm) 959 device_remove_file(&css->device, 960 &dev_attr_cm_enable); 961 device_unregister(&css->device); 962 } 963 bus_unregister(&css_bus_type); 964 out: 965 crw_unregister_handler(CRW_RSC_SCH); 966 idset_free(slow_subchannel_set); 967 chsc_init_cleanup(); 968 pr_alert("The CSS device driver initialization failed with " 969 "errno=%d\n", ret); 970 return ret; 971 } 972 973 static void __init css_bus_cleanup(void) 974 { 975 struct channel_subsystem *css; 976 int i; 977 978 for (i = 0; i <= __MAX_CSSID; i++) { 979 css = channel_subsystems[i]; 980 device_unregister(&css->pseudo_subchannel->dev); 981 css->pseudo_subchannel = NULL; 982 if (css_chsc_characteristics.secm) 983 device_remove_file(&css->device, &dev_attr_cm_enable); 984 device_unregister(&css->device); 985 } 986 bus_unregister(&css_bus_type); 987 crw_unregister_handler(CRW_RSC_SCH); 988 idset_free(slow_subchannel_set); 989 chsc_init_cleanup(); 990 isc_unregister(IO_SCH_ISC); 991 } 992 993 static int __init channel_subsystem_init(void) 994 { 995 int ret; 996 997 ret = css_bus_init(); 998 if (ret) 999 return ret; 1000 cio_work_q = create_singlethread_workqueue("cio"); 1001 if (!cio_work_q) { 1002 ret = -ENOMEM; 1003 goto out_bus; 1004 } 1005 ret = io_subchannel_init(); 1006 if (ret) 1007 goto out_wq; 1008 1009 return ret; 1010 out_wq: 1011 destroy_workqueue(cio_work_q); 1012 out_bus: 1013 css_bus_cleanup(); 1014 return ret; 1015 } 1016 subsys_initcall(channel_subsystem_init); 1017 1018 static int css_settle(struct device_driver *drv, void *unused) 1019 { 1020 struct css_driver *cssdrv = to_cssdriver(drv); 1021 1022 if (cssdrv->settle) 1023 return cssdrv->settle(); 1024 return 0; 1025 } 1026 1027 int css_complete_work(void) 1028 { 1029 int ret; 1030 1031 /* Wait for the evaluation of subchannels to finish. */ 1032 ret = wait_event_interruptible(css_eval_wq, 1033 atomic_read(&css_eval_scheduled) == 0); 1034 if (ret) 1035 return -EINTR; 1036 flush_workqueue(cio_work_q); 1037 /* Wait for the subchannel type specific initialization to finish */ 1038 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1039 } 1040 1041 1042 /* 1043 * Wait for the initialization of devices to finish, to make sure we are 1044 * done with our setup if the search for the root device starts. 1045 */ 1046 static int __init channel_subsystem_init_sync(void) 1047 { 1048 /* Start initial subchannel evaluation. */ 1049 css_schedule_eval_all(); 1050 css_complete_work(); 1051 return 0; 1052 } 1053 subsys_initcall_sync(channel_subsystem_init_sync); 1054 1055 void channel_subsystem_reinit(void) 1056 { 1057 struct channel_path *chp; 1058 struct chp_id chpid; 1059 1060 chsc_enable_facility(CHSC_SDA_OC_MSS); 1061 chp_id_for_each(&chpid) { 1062 chp = chpid_to_chp(chpid); 1063 if (!chp) 1064 continue; 1065 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 1066 } 1067 } 1068 1069 #ifdef CONFIG_PROC_FS 1070 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1071 size_t count, loff_t *ppos) 1072 { 1073 int ret; 1074 1075 /* Handle pending CRW's. */ 1076 crw_wait_for_channel_report(); 1077 ret = css_complete_work(); 1078 1079 return ret ? ret : count; 1080 } 1081 1082 static const struct file_operations cio_settle_proc_fops = { 1083 .open = nonseekable_open, 1084 .write = cio_settle_write, 1085 .llseek = no_llseek, 1086 }; 1087 1088 static int __init cio_settle_init(void) 1089 { 1090 struct proc_dir_entry *entry; 1091 1092 entry = proc_create("cio_settle", S_IWUSR, NULL, 1093 &cio_settle_proc_fops); 1094 if (!entry) 1095 return -ENOMEM; 1096 return 0; 1097 } 1098 device_initcall(cio_settle_init); 1099 #endif /*CONFIG_PROC_FS*/ 1100 1101 int sch_is_pseudo_sch(struct subchannel *sch) 1102 { 1103 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1104 } 1105 1106 static int css_bus_match(struct device *dev, struct device_driver *drv) 1107 { 1108 struct subchannel *sch = to_subchannel(dev); 1109 struct css_driver *driver = to_cssdriver(drv); 1110 struct css_device_id *id; 1111 1112 for (id = driver->subchannel_type; id->match_flags; id++) { 1113 if (sch->st == id->type) 1114 return 1; 1115 } 1116 1117 return 0; 1118 } 1119 1120 static int css_probe(struct device *dev) 1121 { 1122 struct subchannel *sch; 1123 int ret; 1124 1125 sch = to_subchannel(dev); 1126 sch->driver = to_cssdriver(dev->driver); 1127 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1128 if (ret) 1129 sch->driver = NULL; 1130 return ret; 1131 } 1132 1133 static int css_remove(struct device *dev) 1134 { 1135 struct subchannel *sch; 1136 int ret; 1137 1138 sch = to_subchannel(dev); 1139 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1140 sch->driver = NULL; 1141 return ret; 1142 } 1143 1144 static void css_shutdown(struct device *dev) 1145 { 1146 struct subchannel *sch; 1147 1148 sch = to_subchannel(dev); 1149 if (sch->driver && sch->driver->shutdown) 1150 sch->driver->shutdown(sch); 1151 } 1152 1153 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1154 { 1155 struct subchannel *sch = to_subchannel(dev); 1156 int ret; 1157 1158 ret = add_uevent_var(env, "ST=%01X", sch->st); 1159 if (ret) 1160 return ret; 1161 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1162 return ret; 1163 } 1164 1165 static int css_pm_prepare(struct device *dev) 1166 { 1167 struct subchannel *sch = to_subchannel(dev); 1168 struct css_driver *drv; 1169 1170 if (mutex_is_locked(&sch->reg_mutex)) 1171 return -EAGAIN; 1172 if (!sch->dev.driver) 1173 return 0; 1174 drv = to_cssdriver(sch->dev.driver); 1175 /* Notify drivers that they may not register children. */ 1176 return drv->prepare ? drv->prepare(sch) : 0; 1177 } 1178 1179 static void css_pm_complete(struct device *dev) 1180 { 1181 struct subchannel *sch = to_subchannel(dev); 1182 struct css_driver *drv; 1183 1184 if (!sch->dev.driver) 1185 return; 1186 drv = to_cssdriver(sch->dev.driver); 1187 if (drv->complete) 1188 drv->complete(sch); 1189 } 1190 1191 static int css_pm_freeze(struct device *dev) 1192 { 1193 struct subchannel *sch = to_subchannel(dev); 1194 struct css_driver *drv; 1195 1196 if (!sch->dev.driver) 1197 return 0; 1198 drv = to_cssdriver(sch->dev.driver); 1199 return drv->freeze ? drv->freeze(sch) : 0; 1200 } 1201 1202 static int css_pm_thaw(struct device *dev) 1203 { 1204 struct subchannel *sch = to_subchannel(dev); 1205 struct css_driver *drv; 1206 1207 if (!sch->dev.driver) 1208 return 0; 1209 drv = to_cssdriver(sch->dev.driver); 1210 return drv->thaw ? drv->thaw(sch) : 0; 1211 } 1212 1213 static int css_pm_restore(struct device *dev) 1214 { 1215 struct subchannel *sch = to_subchannel(dev); 1216 struct css_driver *drv; 1217 1218 css_update_ssd_info(sch); 1219 if (!sch->dev.driver) 1220 return 0; 1221 drv = to_cssdriver(sch->dev.driver); 1222 return drv->restore ? drv->restore(sch) : 0; 1223 } 1224 1225 static const struct dev_pm_ops css_pm_ops = { 1226 .prepare = css_pm_prepare, 1227 .complete = css_pm_complete, 1228 .freeze = css_pm_freeze, 1229 .thaw = css_pm_thaw, 1230 .restore = css_pm_restore, 1231 }; 1232 1233 static struct bus_type css_bus_type = { 1234 .name = "css", 1235 .match = css_bus_match, 1236 .probe = css_probe, 1237 .remove = css_remove, 1238 .shutdown = css_shutdown, 1239 .uevent = css_uevent, 1240 .pm = &css_pm_ops, 1241 }; 1242 1243 /** 1244 * css_driver_register - register a css driver 1245 * @cdrv: css driver to register 1246 * 1247 * This is mainly a wrapper around driver_register that sets name 1248 * and bus_type in the embedded struct device_driver correctly. 1249 */ 1250 int css_driver_register(struct css_driver *cdrv) 1251 { 1252 cdrv->drv.bus = &css_bus_type; 1253 return driver_register(&cdrv->drv); 1254 } 1255 EXPORT_SYMBOL_GPL(css_driver_register); 1256 1257 /** 1258 * css_driver_unregister - unregister a css driver 1259 * @cdrv: css driver to unregister 1260 * 1261 * This is a wrapper around driver_unregister. 1262 */ 1263 void css_driver_unregister(struct css_driver *cdrv) 1264 { 1265 driver_unregister(&cdrv->drv); 1266 } 1267 EXPORT_SYMBOL_GPL(css_driver_unregister); 1268 1269 MODULE_LICENSE("GPL"); 1270