1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2010 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * 9 * License: GPL 10 */ 11 12 #define KMSG_COMPONENT "cio" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/export.h> 16 #include <linux/init.h> 17 #include <linux/device.h> 18 #include <linux/slab.h> 19 #include <linux/errno.h> 20 #include <linux/list.h> 21 #include <linux/reboot.h> 22 #include <linux/suspend.h> 23 #include <linux/proc_fs.h> 24 #include <asm/isc.h> 25 #include <asm/crw.h> 26 27 #include "css.h" 28 #include "cio.h" 29 #include "cio_debug.h" 30 #include "ioasm.h" 31 #include "chsc.h" 32 #include "device.h" 33 #include "idset.h" 34 #include "chp.h" 35 36 int css_init_done = 0; 37 int max_ssid; 38 39 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 40 static struct bus_type css_bus_type; 41 42 int 43 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 44 { 45 struct subchannel_id schid; 46 int ret; 47 48 init_subchannel_id(&schid); 49 do { 50 do { 51 ret = fn(schid, data); 52 if (ret) 53 break; 54 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 55 schid.sch_no = 0; 56 } while (schid.ssid++ < max_ssid); 57 return ret; 58 } 59 60 struct cb_data { 61 void *data; 62 struct idset *set; 63 int (*fn_known_sch)(struct subchannel *, void *); 64 int (*fn_unknown_sch)(struct subchannel_id, void *); 65 }; 66 67 static int call_fn_known_sch(struct device *dev, void *data) 68 { 69 struct subchannel *sch = to_subchannel(dev); 70 struct cb_data *cb = data; 71 int rc = 0; 72 73 if (cb->set) 74 idset_sch_del(cb->set, sch->schid); 75 if (cb->fn_known_sch) 76 rc = cb->fn_known_sch(sch, cb->data); 77 return rc; 78 } 79 80 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 81 { 82 struct cb_data *cb = data; 83 int rc = 0; 84 85 if (idset_sch_contains(cb->set, schid)) 86 rc = cb->fn_unknown_sch(schid, cb->data); 87 return rc; 88 } 89 90 static int call_fn_all_sch(struct subchannel_id schid, void *data) 91 { 92 struct cb_data *cb = data; 93 struct subchannel *sch; 94 int rc = 0; 95 96 sch = get_subchannel_by_schid(schid); 97 if (sch) { 98 if (cb->fn_known_sch) 99 rc = cb->fn_known_sch(sch, cb->data); 100 put_device(&sch->dev); 101 } else { 102 if (cb->fn_unknown_sch) 103 rc = cb->fn_unknown_sch(schid, cb->data); 104 } 105 106 return rc; 107 } 108 109 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 110 int (*fn_unknown)(struct subchannel_id, 111 void *), void *data) 112 { 113 struct cb_data cb; 114 int rc; 115 116 cb.data = data; 117 cb.fn_known_sch = fn_known; 118 cb.fn_unknown_sch = fn_unknown; 119 120 if (fn_known && !fn_unknown) { 121 /* Skip idset allocation in case of known-only loop. */ 122 cb.set = NULL; 123 return bus_for_each_dev(&css_bus_type, NULL, &cb, 124 call_fn_known_sch); 125 } 126 127 cb.set = idset_sch_new(); 128 if (!cb.set) 129 /* fall back to brute force scanning in case of oom */ 130 return for_each_subchannel(call_fn_all_sch, &cb); 131 132 idset_fill(cb.set); 133 134 /* Process registered subchannels. */ 135 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 136 if (rc) 137 goto out; 138 /* Process unregistered subchannels. */ 139 if (fn_unknown) 140 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 141 out: 142 idset_free(cb.set); 143 144 return rc; 145 } 146 147 static void css_sch_todo(struct work_struct *work); 148 149 static int css_sch_create_locks(struct subchannel *sch) 150 { 151 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); 152 if (!sch->lock) 153 return -ENOMEM; 154 155 spin_lock_init(sch->lock); 156 mutex_init(&sch->reg_mutex); 157 158 return 0; 159 } 160 161 static void css_subchannel_release(struct device *dev) 162 { 163 struct subchannel *sch = to_subchannel(dev); 164 165 sch->config.intparm = 0; 166 cio_commit_config(sch); 167 kfree(sch->lock); 168 kfree(sch); 169 } 170 171 struct subchannel *css_alloc_subchannel(struct subchannel_id schid) 172 { 173 struct subchannel *sch; 174 int ret; 175 176 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); 177 if (!sch) 178 return ERR_PTR(-ENOMEM); 179 180 ret = cio_validate_subchannel(sch, schid); 181 if (ret < 0) 182 goto err; 183 184 ret = css_sch_create_locks(sch); 185 if (ret) 186 goto err; 187 188 INIT_WORK(&sch->todo_work, css_sch_todo); 189 sch->dev.release = &css_subchannel_release; 190 device_initialize(&sch->dev); 191 return sch; 192 193 err: 194 kfree(sch); 195 return ERR_PTR(ret); 196 } 197 198 static int css_sch_device_register(struct subchannel *sch) 199 { 200 int ret; 201 202 mutex_lock(&sch->reg_mutex); 203 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 204 sch->schid.sch_no); 205 ret = device_add(&sch->dev); 206 mutex_unlock(&sch->reg_mutex); 207 return ret; 208 } 209 210 /** 211 * css_sch_device_unregister - unregister a subchannel 212 * @sch: subchannel to be unregistered 213 */ 214 void css_sch_device_unregister(struct subchannel *sch) 215 { 216 mutex_lock(&sch->reg_mutex); 217 if (device_is_registered(&sch->dev)) 218 device_unregister(&sch->dev); 219 mutex_unlock(&sch->reg_mutex); 220 } 221 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 222 223 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 224 { 225 int i; 226 int mask; 227 228 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 229 ssd->path_mask = pmcw->pim; 230 for (i = 0; i < 8; i++) { 231 mask = 0x80 >> i; 232 if (pmcw->pim & mask) { 233 chp_id_init(&ssd->chpid[i]); 234 ssd->chpid[i].id = pmcw->chpid[i]; 235 } 236 } 237 } 238 239 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 240 { 241 int i; 242 int mask; 243 244 for (i = 0; i < 8; i++) { 245 mask = 0x80 >> i; 246 if (ssd->path_mask & mask) 247 if (!chp_is_registered(ssd->chpid[i])) 248 chp_new(ssd->chpid[i]); 249 } 250 } 251 252 void css_update_ssd_info(struct subchannel *sch) 253 { 254 int ret; 255 256 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 257 if (ret) 258 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 259 260 ssd_register_chpids(&sch->ssd_info); 261 } 262 263 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 264 char *buf) 265 { 266 struct subchannel *sch = to_subchannel(dev); 267 268 return sprintf(buf, "%01x\n", sch->st); 269 } 270 271 static DEVICE_ATTR(type, 0444, type_show, NULL); 272 273 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 274 char *buf) 275 { 276 struct subchannel *sch = to_subchannel(dev); 277 278 return sprintf(buf, "css:t%01X\n", sch->st); 279 } 280 281 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 282 283 static struct attribute *subch_attrs[] = { 284 &dev_attr_type.attr, 285 &dev_attr_modalias.attr, 286 NULL, 287 }; 288 289 static struct attribute_group subch_attr_group = { 290 .attrs = subch_attrs, 291 }; 292 293 static const struct attribute_group *default_subch_attr_groups[] = { 294 &subch_attr_group, 295 NULL, 296 }; 297 298 int css_register_subchannel(struct subchannel *sch) 299 { 300 int ret; 301 302 /* Initialize the subchannel structure */ 303 sch->dev.parent = &channel_subsystems[0]->device; 304 sch->dev.bus = &css_bus_type; 305 sch->dev.groups = default_subch_attr_groups; 306 /* 307 * We don't want to generate uevents for I/O subchannels that don't 308 * have a working ccw device behind them since they will be 309 * unregistered before they can be used anyway, so we delay the add 310 * uevent until after device recognition was successful. 311 * Note that we suppress the uevent for all subchannel types; 312 * the subchannel driver can decide itself when it wants to inform 313 * userspace of its existence. 314 */ 315 dev_set_uevent_suppress(&sch->dev, 1); 316 css_update_ssd_info(sch); 317 /* make it known to the system */ 318 ret = css_sch_device_register(sch); 319 if (ret) { 320 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 321 sch->schid.ssid, sch->schid.sch_no, ret); 322 return ret; 323 } 324 if (!sch->driver) { 325 /* 326 * No driver matched. Generate the uevent now so that 327 * a fitting driver module may be loaded based on the 328 * modalias. 329 */ 330 dev_set_uevent_suppress(&sch->dev, 0); 331 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 332 } 333 return ret; 334 } 335 336 static int css_probe_device(struct subchannel_id schid) 337 { 338 struct subchannel *sch; 339 int ret; 340 341 sch = css_alloc_subchannel(schid); 342 if (IS_ERR(sch)) 343 return PTR_ERR(sch); 344 345 ret = css_register_subchannel(sch); 346 if (ret) 347 put_device(&sch->dev); 348 349 return ret; 350 } 351 352 static int 353 check_subchannel(struct device * dev, void * data) 354 { 355 struct subchannel *sch; 356 struct subchannel_id *schid = data; 357 358 sch = to_subchannel(dev); 359 return schid_equal(&sch->schid, schid); 360 } 361 362 struct subchannel * 363 get_subchannel_by_schid(struct subchannel_id schid) 364 { 365 struct device *dev; 366 367 dev = bus_find_device(&css_bus_type, NULL, 368 &schid, check_subchannel); 369 370 return dev ? to_subchannel(dev) : NULL; 371 } 372 373 /** 374 * css_sch_is_valid() - check if a subchannel is valid 375 * @schib: subchannel information block for the subchannel 376 */ 377 int css_sch_is_valid(struct schib *schib) 378 { 379 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 380 return 0; 381 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 382 return 0; 383 return 1; 384 } 385 EXPORT_SYMBOL_GPL(css_sch_is_valid); 386 387 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 388 { 389 struct schib schib; 390 391 if (!slow) { 392 /* Will be done on the slow path. */ 393 return -EAGAIN; 394 } 395 if (stsch(schid, &schib)) { 396 /* Subchannel is not provided. */ 397 return -ENXIO; 398 } 399 if (!css_sch_is_valid(&schib)) { 400 /* Unusable - ignore. */ 401 return 0; 402 } 403 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 404 schid.sch_no); 405 406 return css_probe_device(schid); 407 } 408 409 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 410 { 411 int ret = 0; 412 413 if (sch->driver) { 414 if (sch->driver->sch_event) 415 ret = sch->driver->sch_event(sch, slow); 416 else 417 dev_dbg(&sch->dev, 418 "Got subchannel machine check but " 419 "no sch_event handler provided.\n"); 420 } 421 if (ret != 0 && ret != -EAGAIN) { 422 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 423 sch->schid.ssid, sch->schid.sch_no, ret); 424 } 425 return ret; 426 } 427 428 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 429 { 430 struct subchannel *sch; 431 int ret; 432 433 sch = get_subchannel_by_schid(schid); 434 if (sch) { 435 ret = css_evaluate_known_subchannel(sch, slow); 436 put_device(&sch->dev); 437 } else 438 ret = css_evaluate_new_subchannel(schid, slow); 439 if (ret == -EAGAIN) 440 css_schedule_eval(schid); 441 } 442 443 /** 444 * css_sched_sch_todo - schedule a subchannel operation 445 * @sch: subchannel 446 * @todo: todo 447 * 448 * Schedule the operation identified by @todo to be performed on the slow path 449 * workqueue. Do nothing if another operation with higher priority is already 450 * scheduled. Needs to be called with subchannel lock held. 451 */ 452 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 453 { 454 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 455 sch->schid.ssid, sch->schid.sch_no, todo); 456 if (sch->todo >= todo) 457 return; 458 /* Get workqueue ref. */ 459 if (!get_device(&sch->dev)) 460 return; 461 sch->todo = todo; 462 if (!queue_work(cio_work_q, &sch->todo_work)) { 463 /* Already queued, release workqueue ref. */ 464 put_device(&sch->dev); 465 } 466 } 467 EXPORT_SYMBOL_GPL(css_sched_sch_todo); 468 469 static void css_sch_todo(struct work_struct *work) 470 { 471 struct subchannel *sch; 472 enum sch_todo todo; 473 int ret; 474 475 sch = container_of(work, struct subchannel, todo_work); 476 /* Find out todo. */ 477 spin_lock_irq(sch->lock); 478 todo = sch->todo; 479 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 480 sch->schid.sch_no, todo); 481 sch->todo = SCH_TODO_NOTHING; 482 spin_unlock_irq(sch->lock); 483 /* Perform todo. */ 484 switch (todo) { 485 case SCH_TODO_NOTHING: 486 break; 487 case SCH_TODO_EVAL: 488 ret = css_evaluate_known_subchannel(sch, 1); 489 if (ret == -EAGAIN) { 490 spin_lock_irq(sch->lock); 491 css_sched_sch_todo(sch, todo); 492 spin_unlock_irq(sch->lock); 493 } 494 break; 495 case SCH_TODO_UNREG: 496 css_sch_device_unregister(sch); 497 break; 498 } 499 /* Release workqueue ref. */ 500 put_device(&sch->dev); 501 } 502 503 static struct idset *slow_subchannel_set; 504 static spinlock_t slow_subchannel_lock; 505 static wait_queue_head_t css_eval_wq; 506 static atomic_t css_eval_scheduled; 507 508 static int __init slow_subchannel_init(void) 509 { 510 spin_lock_init(&slow_subchannel_lock); 511 atomic_set(&css_eval_scheduled, 0); 512 init_waitqueue_head(&css_eval_wq); 513 slow_subchannel_set = idset_sch_new(); 514 if (!slow_subchannel_set) { 515 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 516 return -ENOMEM; 517 } 518 return 0; 519 } 520 521 static int slow_eval_known_fn(struct subchannel *sch, void *data) 522 { 523 int eval; 524 int rc; 525 526 spin_lock_irq(&slow_subchannel_lock); 527 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 528 idset_sch_del(slow_subchannel_set, sch->schid); 529 spin_unlock_irq(&slow_subchannel_lock); 530 if (eval) { 531 rc = css_evaluate_known_subchannel(sch, 1); 532 if (rc == -EAGAIN) 533 css_schedule_eval(sch->schid); 534 } 535 return 0; 536 } 537 538 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 539 { 540 int eval; 541 int rc = 0; 542 543 spin_lock_irq(&slow_subchannel_lock); 544 eval = idset_sch_contains(slow_subchannel_set, schid); 545 idset_sch_del(slow_subchannel_set, schid); 546 spin_unlock_irq(&slow_subchannel_lock); 547 if (eval) { 548 rc = css_evaluate_new_subchannel(schid, 1); 549 switch (rc) { 550 case -EAGAIN: 551 css_schedule_eval(schid); 552 rc = 0; 553 break; 554 case -ENXIO: 555 case -ENOMEM: 556 case -EIO: 557 /* These should abort looping */ 558 spin_lock_irq(&slow_subchannel_lock); 559 idset_sch_del_subseq(slow_subchannel_set, schid); 560 spin_unlock_irq(&slow_subchannel_lock); 561 break; 562 default: 563 rc = 0; 564 } 565 /* Allow scheduling here since the containing loop might 566 * take a while. */ 567 cond_resched(); 568 } 569 return rc; 570 } 571 572 static void css_slow_path_func(struct work_struct *unused) 573 { 574 unsigned long flags; 575 576 CIO_TRACE_EVENT(4, "slowpath"); 577 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 578 NULL); 579 spin_lock_irqsave(&slow_subchannel_lock, flags); 580 if (idset_is_empty(slow_subchannel_set)) { 581 atomic_set(&css_eval_scheduled, 0); 582 wake_up(&css_eval_wq); 583 } 584 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 585 } 586 587 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); 588 struct workqueue_struct *cio_work_q; 589 590 void css_schedule_eval(struct subchannel_id schid) 591 { 592 unsigned long flags; 593 594 spin_lock_irqsave(&slow_subchannel_lock, flags); 595 idset_sch_add(slow_subchannel_set, schid); 596 atomic_set(&css_eval_scheduled, 1); 597 queue_delayed_work(cio_work_q, &slow_path_work, 0); 598 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 599 } 600 601 void css_schedule_eval_all(void) 602 { 603 unsigned long flags; 604 605 spin_lock_irqsave(&slow_subchannel_lock, flags); 606 idset_fill(slow_subchannel_set); 607 atomic_set(&css_eval_scheduled, 1); 608 queue_delayed_work(cio_work_q, &slow_path_work, 0); 609 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 610 } 611 612 static int __unset_registered(struct device *dev, void *data) 613 { 614 struct idset *set = data; 615 struct subchannel *sch = to_subchannel(dev); 616 617 idset_sch_del(set, sch->schid); 618 return 0; 619 } 620 621 void css_schedule_eval_all_unreg(unsigned long delay) 622 { 623 unsigned long flags; 624 struct idset *unreg_set; 625 626 /* Find unregistered subchannels. */ 627 unreg_set = idset_sch_new(); 628 if (!unreg_set) { 629 /* Fallback. */ 630 css_schedule_eval_all(); 631 return; 632 } 633 idset_fill(unreg_set); 634 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 635 /* Apply to slow_subchannel_set. */ 636 spin_lock_irqsave(&slow_subchannel_lock, flags); 637 idset_add_set(slow_subchannel_set, unreg_set); 638 atomic_set(&css_eval_scheduled, 1); 639 queue_delayed_work(cio_work_q, &slow_path_work, delay); 640 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 641 idset_free(unreg_set); 642 } 643 644 void css_wait_for_slow_path(void) 645 { 646 flush_workqueue(cio_work_q); 647 } 648 649 /* Schedule reprobing of all unregistered subchannels. */ 650 void css_schedule_reprobe(void) 651 { 652 /* Schedule with a delay to allow merging of subsequent calls. */ 653 css_schedule_eval_all_unreg(1 * HZ); 654 } 655 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 656 657 /* 658 * Called from the machine check handler for subchannel report words. 659 */ 660 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 661 { 662 struct subchannel_id mchk_schid; 663 struct subchannel *sch; 664 665 if (overflow) { 666 css_schedule_eval_all(); 667 return; 668 } 669 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 670 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 671 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 672 crw0->erc, crw0->rsid); 673 if (crw1) 674 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 675 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 676 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 677 crw1->anc, crw1->erc, crw1->rsid); 678 init_subchannel_id(&mchk_schid); 679 mchk_schid.sch_no = crw0->rsid; 680 if (crw1) 681 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 682 683 if (crw0->erc == CRW_ERC_PMOD) { 684 sch = get_subchannel_by_schid(mchk_schid); 685 if (sch) { 686 css_update_ssd_info(sch); 687 put_device(&sch->dev); 688 } 689 } 690 /* 691 * Since we are always presented with IPI in the CRW, we have to 692 * use stsch() to find out if the subchannel in question has come 693 * or gone. 694 */ 695 css_evaluate_subchannel(mchk_schid, 0); 696 } 697 698 static void __init 699 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 700 { 701 struct cpuid cpu_id; 702 703 if (css_general_characteristics.mcss) { 704 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 705 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 706 } else { 707 css->global_pgid.pgid_high.cpu_addr = stap(); 708 } 709 get_cpu_id(&cpu_id); 710 css->global_pgid.cpu_id = cpu_id.ident; 711 css->global_pgid.cpu_model = cpu_id.machine; 712 css->global_pgid.tod_high = tod_high; 713 } 714 715 static void 716 channel_subsystem_release(struct device *dev) 717 { 718 struct channel_subsystem *css; 719 720 css = to_css(dev); 721 mutex_destroy(&css->mutex); 722 if (css->pseudo_subchannel) { 723 /* Implies that it has been generated but never registered. */ 724 css_subchannel_release(&css->pseudo_subchannel->dev); 725 css->pseudo_subchannel = NULL; 726 } 727 kfree(css); 728 } 729 730 static ssize_t 731 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 732 char *buf) 733 { 734 struct channel_subsystem *css = to_css(dev); 735 int ret; 736 737 if (!css) 738 return 0; 739 mutex_lock(&css->mutex); 740 ret = sprintf(buf, "%x\n", css->cm_enabled); 741 mutex_unlock(&css->mutex); 742 return ret; 743 } 744 745 static ssize_t 746 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 747 const char *buf, size_t count) 748 { 749 struct channel_subsystem *css = to_css(dev); 750 int ret; 751 unsigned long val; 752 753 ret = kstrtoul(buf, 16, &val); 754 if (ret) 755 return ret; 756 mutex_lock(&css->mutex); 757 switch (val) { 758 case 0: 759 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 760 break; 761 case 1: 762 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 763 break; 764 default: 765 ret = -EINVAL; 766 } 767 mutex_unlock(&css->mutex); 768 return ret < 0 ? ret : count; 769 } 770 771 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 772 773 static int __init setup_css(int nr) 774 { 775 u32 tod_high; 776 int ret; 777 struct channel_subsystem *css; 778 779 css = channel_subsystems[nr]; 780 memset(css, 0, sizeof(struct channel_subsystem)); 781 css->pseudo_subchannel = 782 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 783 if (!css->pseudo_subchannel) 784 return -ENOMEM; 785 css->pseudo_subchannel->dev.parent = &css->device; 786 css->pseudo_subchannel->dev.release = css_subchannel_release; 787 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 788 mutex_init(&css->pseudo_subchannel->reg_mutex); 789 ret = css_sch_create_locks(css->pseudo_subchannel); 790 if (ret) { 791 kfree(css->pseudo_subchannel); 792 return ret; 793 } 794 mutex_init(&css->mutex); 795 css->valid = 1; 796 css->cssid = nr; 797 dev_set_name(&css->device, "css%x", nr); 798 css->device.release = channel_subsystem_release; 799 tod_high = (u32) (get_tod_clock() >> 32); 800 css_generate_pgid(css, tod_high); 801 return 0; 802 } 803 804 static int css_reboot_event(struct notifier_block *this, 805 unsigned long event, 806 void *ptr) 807 { 808 int ret, i; 809 810 ret = NOTIFY_DONE; 811 for (i = 0; i <= __MAX_CSSID; i++) { 812 struct channel_subsystem *css; 813 814 css = channel_subsystems[i]; 815 mutex_lock(&css->mutex); 816 if (css->cm_enabled) 817 if (chsc_secm(css, 0)) 818 ret = NOTIFY_BAD; 819 mutex_unlock(&css->mutex); 820 } 821 822 return ret; 823 } 824 825 static struct notifier_block css_reboot_notifier = { 826 .notifier_call = css_reboot_event, 827 }; 828 829 /* 830 * Since the css devices are neither on a bus nor have a class 831 * nor have a special device type, we cannot stop/restart channel 832 * path measurements via the normal suspend/resume callbacks, but have 833 * to use notifiers. 834 */ 835 static int css_power_event(struct notifier_block *this, unsigned long event, 836 void *ptr) 837 { 838 int ret, i; 839 840 switch (event) { 841 case PM_HIBERNATION_PREPARE: 842 case PM_SUSPEND_PREPARE: 843 ret = NOTIFY_DONE; 844 for (i = 0; i <= __MAX_CSSID; i++) { 845 struct channel_subsystem *css; 846 847 css = channel_subsystems[i]; 848 mutex_lock(&css->mutex); 849 if (!css->cm_enabled) { 850 mutex_unlock(&css->mutex); 851 continue; 852 } 853 ret = __chsc_do_secm(css, 0); 854 ret = notifier_from_errno(ret); 855 mutex_unlock(&css->mutex); 856 } 857 break; 858 case PM_POST_HIBERNATION: 859 case PM_POST_SUSPEND: 860 ret = NOTIFY_DONE; 861 for (i = 0; i <= __MAX_CSSID; i++) { 862 struct channel_subsystem *css; 863 864 css = channel_subsystems[i]; 865 mutex_lock(&css->mutex); 866 if (!css->cm_enabled) { 867 mutex_unlock(&css->mutex); 868 continue; 869 } 870 ret = __chsc_do_secm(css, 1); 871 ret = notifier_from_errno(ret); 872 mutex_unlock(&css->mutex); 873 } 874 /* search for subchannels, which appeared during hibernation */ 875 css_schedule_reprobe(); 876 break; 877 default: 878 ret = NOTIFY_DONE; 879 } 880 return ret; 881 882 } 883 static struct notifier_block css_power_notifier = { 884 .notifier_call = css_power_event, 885 }; 886 887 /* 888 * Now that the driver core is running, we can setup our channel subsystem. 889 * The struct subchannel's are created during probing. 890 */ 891 static int __init css_bus_init(void) 892 { 893 int ret, i; 894 895 ret = chsc_init(); 896 if (ret) 897 return ret; 898 899 chsc_determine_css_characteristics(); 900 /* Try to enable MSS. */ 901 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 902 if (ret) 903 max_ssid = 0; 904 else /* Success. */ 905 max_ssid = __MAX_SSID; 906 907 ret = slow_subchannel_init(); 908 if (ret) 909 goto out; 910 911 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 912 if (ret) 913 goto out; 914 915 if ((ret = bus_register(&css_bus_type))) 916 goto out; 917 918 /* Setup css structure. */ 919 for (i = 0; i <= __MAX_CSSID; i++) { 920 struct channel_subsystem *css; 921 922 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 923 if (!css) { 924 ret = -ENOMEM; 925 goto out_unregister; 926 } 927 channel_subsystems[i] = css; 928 ret = setup_css(i); 929 if (ret) { 930 kfree(channel_subsystems[i]); 931 goto out_unregister; 932 } 933 ret = device_register(&css->device); 934 if (ret) { 935 put_device(&css->device); 936 goto out_unregister; 937 } 938 if (css_chsc_characteristics.secm) { 939 ret = device_create_file(&css->device, 940 &dev_attr_cm_enable); 941 if (ret) 942 goto out_device; 943 } 944 ret = device_register(&css->pseudo_subchannel->dev); 945 if (ret) { 946 put_device(&css->pseudo_subchannel->dev); 947 goto out_file; 948 } 949 } 950 ret = register_reboot_notifier(&css_reboot_notifier); 951 if (ret) 952 goto out_unregister; 953 ret = register_pm_notifier(&css_power_notifier); 954 if (ret) { 955 unregister_reboot_notifier(&css_reboot_notifier); 956 goto out_unregister; 957 } 958 css_init_done = 1; 959 960 /* Enable default isc for I/O subchannels. */ 961 isc_register(IO_SCH_ISC); 962 963 return 0; 964 out_file: 965 if (css_chsc_characteristics.secm) 966 device_remove_file(&channel_subsystems[i]->device, 967 &dev_attr_cm_enable); 968 out_device: 969 device_unregister(&channel_subsystems[i]->device); 970 out_unregister: 971 while (i > 0) { 972 struct channel_subsystem *css; 973 974 i--; 975 css = channel_subsystems[i]; 976 device_unregister(&css->pseudo_subchannel->dev); 977 css->pseudo_subchannel = NULL; 978 if (css_chsc_characteristics.secm) 979 device_remove_file(&css->device, 980 &dev_attr_cm_enable); 981 device_unregister(&css->device); 982 } 983 bus_unregister(&css_bus_type); 984 out: 985 crw_unregister_handler(CRW_RSC_SCH); 986 idset_free(slow_subchannel_set); 987 chsc_init_cleanup(); 988 pr_alert("The CSS device driver initialization failed with " 989 "errno=%d\n", ret); 990 return ret; 991 } 992 993 static void __init css_bus_cleanup(void) 994 { 995 struct channel_subsystem *css; 996 int i; 997 998 for (i = 0; i <= __MAX_CSSID; i++) { 999 css = channel_subsystems[i]; 1000 device_unregister(&css->pseudo_subchannel->dev); 1001 css->pseudo_subchannel = NULL; 1002 if (css_chsc_characteristics.secm) 1003 device_remove_file(&css->device, &dev_attr_cm_enable); 1004 device_unregister(&css->device); 1005 } 1006 bus_unregister(&css_bus_type); 1007 crw_unregister_handler(CRW_RSC_SCH); 1008 idset_free(slow_subchannel_set); 1009 chsc_init_cleanup(); 1010 isc_unregister(IO_SCH_ISC); 1011 } 1012 1013 static int __init channel_subsystem_init(void) 1014 { 1015 int ret; 1016 1017 ret = css_bus_init(); 1018 if (ret) 1019 return ret; 1020 cio_work_q = create_singlethread_workqueue("cio"); 1021 if (!cio_work_q) { 1022 ret = -ENOMEM; 1023 goto out_bus; 1024 } 1025 ret = io_subchannel_init(); 1026 if (ret) 1027 goto out_wq; 1028 1029 return ret; 1030 out_wq: 1031 destroy_workqueue(cio_work_q); 1032 out_bus: 1033 css_bus_cleanup(); 1034 return ret; 1035 } 1036 subsys_initcall(channel_subsystem_init); 1037 1038 static int css_settle(struct device_driver *drv, void *unused) 1039 { 1040 struct css_driver *cssdrv = to_cssdriver(drv); 1041 1042 if (cssdrv->settle) 1043 return cssdrv->settle(); 1044 return 0; 1045 } 1046 1047 int css_complete_work(void) 1048 { 1049 int ret; 1050 1051 /* Wait for the evaluation of subchannels to finish. */ 1052 ret = wait_event_interruptible(css_eval_wq, 1053 atomic_read(&css_eval_scheduled) == 0); 1054 if (ret) 1055 return -EINTR; 1056 flush_workqueue(cio_work_q); 1057 /* Wait for the subchannel type specific initialization to finish */ 1058 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1059 } 1060 1061 1062 /* 1063 * Wait for the initialization of devices to finish, to make sure we are 1064 * done with our setup if the search for the root device starts. 1065 */ 1066 static int __init channel_subsystem_init_sync(void) 1067 { 1068 /* Register subchannels which are already in use. */ 1069 cio_register_early_subchannels(); 1070 /* Start initial subchannel evaluation. */ 1071 css_schedule_eval_all(); 1072 css_complete_work(); 1073 return 0; 1074 } 1075 subsys_initcall_sync(channel_subsystem_init_sync); 1076 1077 void channel_subsystem_reinit(void) 1078 { 1079 struct channel_path *chp; 1080 struct chp_id chpid; 1081 1082 chsc_enable_facility(CHSC_SDA_OC_MSS); 1083 chp_id_for_each(&chpid) { 1084 chp = chpid_to_chp(chpid); 1085 if (chp) 1086 chp_update_desc(chp); 1087 } 1088 cmf_reactivate(); 1089 } 1090 1091 #ifdef CONFIG_PROC_FS 1092 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1093 size_t count, loff_t *ppos) 1094 { 1095 int ret; 1096 1097 /* Handle pending CRW's. */ 1098 crw_wait_for_channel_report(); 1099 ret = css_complete_work(); 1100 1101 return ret ? ret : count; 1102 } 1103 1104 static const struct file_operations cio_settle_proc_fops = { 1105 .open = nonseekable_open, 1106 .write = cio_settle_write, 1107 .llseek = no_llseek, 1108 }; 1109 1110 static int __init cio_settle_init(void) 1111 { 1112 struct proc_dir_entry *entry; 1113 1114 entry = proc_create("cio_settle", S_IWUSR, NULL, 1115 &cio_settle_proc_fops); 1116 if (!entry) 1117 return -ENOMEM; 1118 return 0; 1119 } 1120 device_initcall(cio_settle_init); 1121 #endif /*CONFIG_PROC_FS*/ 1122 1123 int sch_is_pseudo_sch(struct subchannel *sch) 1124 { 1125 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1126 } 1127 1128 static int css_bus_match(struct device *dev, struct device_driver *drv) 1129 { 1130 struct subchannel *sch = to_subchannel(dev); 1131 struct css_driver *driver = to_cssdriver(drv); 1132 struct css_device_id *id; 1133 1134 for (id = driver->subchannel_type; id->match_flags; id++) { 1135 if (sch->st == id->type) 1136 return 1; 1137 } 1138 1139 return 0; 1140 } 1141 1142 static int css_probe(struct device *dev) 1143 { 1144 struct subchannel *sch; 1145 int ret; 1146 1147 sch = to_subchannel(dev); 1148 sch->driver = to_cssdriver(dev->driver); 1149 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1150 if (ret) 1151 sch->driver = NULL; 1152 return ret; 1153 } 1154 1155 static int css_remove(struct device *dev) 1156 { 1157 struct subchannel *sch; 1158 int ret; 1159 1160 sch = to_subchannel(dev); 1161 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1162 sch->driver = NULL; 1163 return ret; 1164 } 1165 1166 static void css_shutdown(struct device *dev) 1167 { 1168 struct subchannel *sch; 1169 1170 sch = to_subchannel(dev); 1171 if (sch->driver && sch->driver->shutdown) 1172 sch->driver->shutdown(sch); 1173 } 1174 1175 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1176 { 1177 struct subchannel *sch = to_subchannel(dev); 1178 int ret; 1179 1180 ret = add_uevent_var(env, "ST=%01X", sch->st); 1181 if (ret) 1182 return ret; 1183 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1184 return ret; 1185 } 1186 1187 static int css_pm_prepare(struct device *dev) 1188 { 1189 struct subchannel *sch = to_subchannel(dev); 1190 struct css_driver *drv; 1191 1192 if (mutex_is_locked(&sch->reg_mutex)) 1193 return -EAGAIN; 1194 if (!sch->dev.driver) 1195 return 0; 1196 drv = to_cssdriver(sch->dev.driver); 1197 /* Notify drivers that they may not register children. */ 1198 return drv->prepare ? drv->prepare(sch) : 0; 1199 } 1200 1201 static void css_pm_complete(struct device *dev) 1202 { 1203 struct subchannel *sch = to_subchannel(dev); 1204 struct css_driver *drv; 1205 1206 if (!sch->dev.driver) 1207 return; 1208 drv = to_cssdriver(sch->dev.driver); 1209 if (drv->complete) 1210 drv->complete(sch); 1211 } 1212 1213 static int css_pm_freeze(struct device *dev) 1214 { 1215 struct subchannel *sch = to_subchannel(dev); 1216 struct css_driver *drv; 1217 1218 if (!sch->dev.driver) 1219 return 0; 1220 drv = to_cssdriver(sch->dev.driver); 1221 return drv->freeze ? drv->freeze(sch) : 0; 1222 } 1223 1224 static int css_pm_thaw(struct device *dev) 1225 { 1226 struct subchannel *sch = to_subchannel(dev); 1227 struct css_driver *drv; 1228 1229 if (!sch->dev.driver) 1230 return 0; 1231 drv = to_cssdriver(sch->dev.driver); 1232 return drv->thaw ? drv->thaw(sch) : 0; 1233 } 1234 1235 static int css_pm_restore(struct device *dev) 1236 { 1237 struct subchannel *sch = to_subchannel(dev); 1238 struct css_driver *drv; 1239 1240 css_update_ssd_info(sch); 1241 if (!sch->dev.driver) 1242 return 0; 1243 drv = to_cssdriver(sch->dev.driver); 1244 return drv->restore ? drv->restore(sch) : 0; 1245 } 1246 1247 static const struct dev_pm_ops css_pm_ops = { 1248 .prepare = css_pm_prepare, 1249 .complete = css_pm_complete, 1250 .freeze = css_pm_freeze, 1251 .thaw = css_pm_thaw, 1252 .restore = css_pm_restore, 1253 }; 1254 1255 static struct bus_type css_bus_type = { 1256 .name = "css", 1257 .match = css_bus_match, 1258 .probe = css_probe, 1259 .remove = css_remove, 1260 .shutdown = css_shutdown, 1261 .uevent = css_uevent, 1262 .pm = &css_pm_ops, 1263 }; 1264 1265 /** 1266 * css_driver_register - register a css driver 1267 * @cdrv: css driver to register 1268 * 1269 * This is mainly a wrapper around driver_register that sets name 1270 * and bus_type in the embedded struct device_driver correctly. 1271 */ 1272 int css_driver_register(struct css_driver *cdrv) 1273 { 1274 cdrv->drv.bus = &css_bus_type; 1275 return driver_register(&cdrv->drv); 1276 } 1277 EXPORT_SYMBOL_GPL(css_driver_register); 1278 1279 /** 1280 * css_driver_unregister - unregister a css driver 1281 * @cdrv: css driver to unregister 1282 * 1283 * This is a wrapper around driver_unregister. 1284 */ 1285 void css_driver_unregister(struct css_driver *cdrv) 1286 { 1287 driver_unregister(&cdrv->drv); 1288 } 1289 EXPORT_SYMBOL_GPL(css_driver_unregister); 1290