1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for channel subsystem 4 * 5 * Copyright IBM Corp. 2002, 2010 6 * 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/list.h> 20 #include <linux/reboot.h> 21 #include <linux/suspend.h> 22 #include <linux/proc_fs.h> 23 #include <asm/isc.h> 24 #include <asm/crw.h> 25 26 #include "css.h" 27 #include "cio.h" 28 #include "cio_debug.h" 29 #include "ioasm.h" 30 #include "chsc.h" 31 #include "device.h" 32 #include "idset.h" 33 #include "chp.h" 34 35 int css_init_done = 0; 36 int max_ssid; 37 38 #define MAX_CSS_IDX 0 39 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1]; 40 static struct bus_type css_bus_type; 41 42 int 43 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 44 { 45 struct subchannel_id schid; 46 int ret; 47 48 init_subchannel_id(&schid); 49 do { 50 do { 51 ret = fn(schid, data); 52 if (ret) 53 break; 54 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 55 schid.sch_no = 0; 56 } while (schid.ssid++ < max_ssid); 57 return ret; 58 } 59 60 struct cb_data { 61 void *data; 62 struct idset *set; 63 int (*fn_known_sch)(struct subchannel *, void *); 64 int (*fn_unknown_sch)(struct subchannel_id, void *); 65 }; 66 67 static int call_fn_known_sch(struct device *dev, void *data) 68 { 69 struct subchannel *sch = to_subchannel(dev); 70 struct cb_data *cb = data; 71 int rc = 0; 72 73 if (cb->set) 74 idset_sch_del(cb->set, sch->schid); 75 if (cb->fn_known_sch) 76 rc = cb->fn_known_sch(sch, cb->data); 77 return rc; 78 } 79 80 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 81 { 82 struct cb_data *cb = data; 83 int rc = 0; 84 85 if (idset_sch_contains(cb->set, schid)) 86 rc = cb->fn_unknown_sch(schid, cb->data); 87 return rc; 88 } 89 90 static int call_fn_all_sch(struct subchannel_id schid, void *data) 91 { 92 struct cb_data *cb = data; 93 struct subchannel *sch; 94 int rc = 0; 95 96 sch = get_subchannel_by_schid(schid); 97 if (sch) { 98 if (cb->fn_known_sch) 99 rc = cb->fn_known_sch(sch, cb->data); 100 put_device(&sch->dev); 101 } else { 102 if (cb->fn_unknown_sch) 103 rc = cb->fn_unknown_sch(schid, cb->data); 104 } 105 106 return rc; 107 } 108 109 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 110 int (*fn_unknown)(struct subchannel_id, 111 void *), void *data) 112 { 113 struct cb_data cb; 114 int rc; 115 116 cb.data = data; 117 cb.fn_known_sch = fn_known; 118 cb.fn_unknown_sch = fn_unknown; 119 120 if (fn_known && !fn_unknown) { 121 /* Skip idset allocation in case of known-only loop. */ 122 cb.set = NULL; 123 return bus_for_each_dev(&css_bus_type, NULL, &cb, 124 call_fn_known_sch); 125 } 126 127 cb.set = idset_sch_new(); 128 if (!cb.set) 129 /* fall back to brute force scanning in case of oom */ 130 return for_each_subchannel(call_fn_all_sch, &cb); 131 132 idset_fill(cb.set); 133 134 /* Process registered subchannels. */ 135 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 136 if (rc) 137 goto out; 138 /* Process unregistered subchannels. */ 139 if (fn_unknown) 140 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 141 out: 142 idset_free(cb.set); 143 144 return rc; 145 } 146 147 static void css_sch_todo(struct work_struct *work); 148 149 static int css_sch_create_locks(struct subchannel *sch) 150 { 151 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); 152 if (!sch->lock) 153 return -ENOMEM; 154 155 spin_lock_init(sch->lock); 156 mutex_init(&sch->reg_mutex); 157 158 return 0; 159 } 160 161 static void css_subchannel_release(struct device *dev) 162 { 163 struct subchannel *sch = to_subchannel(dev); 164 165 sch->config.intparm = 0; 166 cio_commit_config(sch); 167 kfree(sch->lock); 168 kfree(sch); 169 } 170 171 struct subchannel *css_alloc_subchannel(struct subchannel_id schid) 172 { 173 struct subchannel *sch; 174 int ret; 175 176 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); 177 if (!sch) 178 return ERR_PTR(-ENOMEM); 179 180 ret = cio_validate_subchannel(sch, schid); 181 if (ret < 0) 182 goto err; 183 184 ret = css_sch_create_locks(sch); 185 if (ret) 186 goto err; 187 188 INIT_WORK(&sch->todo_work, css_sch_todo); 189 sch->dev.release = &css_subchannel_release; 190 device_initialize(&sch->dev); 191 return sch; 192 193 err: 194 kfree(sch); 195 return ERR_PTR(ret); 196 } 197 198 static int css_sch_device_register(struct subchannel *sch) 199 { 200 int ret; 201 202 mutex_lock(&sch->reg_mutex); 203 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 204 sch->schid.sch_no); 205 ret = device_add(&sch->dev); 206 mutex_unlock(&sch->reg_mutex); 207 return ret; 208 } 209 210 /** 211 * css_sch_device_unregister - unregister a subchannel 212 * @sch: subchannel to be unregistered 213 */ 214 void css_sch_device_unregister(struct subchannel *sch) 215 { 216 mutex_lock(&sch->reg_mutex); 217 if (device_is_registered(&sch->dev)) 218 device_unregister(&sch->dev); 219 mutex_unlock(&sch->reg_mutex); 220 } 221 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 222 223 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 224 { 225 int i; 226 int mask; 227 228 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 229 ssd->path_mask = pmcw->pim; 230 for (i = 0; i < 8; i++) { 231 mask = 0x80 >> i; 232 if (pmcw->pim & mask) { 233 chp_id_init(&ssd->chpid[i]); 234 ssd->chpid[i].id = pmcw->chpid[i]; 235 } 236 } 237 } 238 239 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 240 { 241 int i; 242 int mask; 243 244 for (i = 0; i < 8; i++) { 245 mask = 0x80 >> i; 246 if (ssd->path_mask & mask) 247 if (!chp_is_registered(ssd->chpid[i])) 248 chp_new(ssd->chpid[i]); 249 } 250 } 251 252 void css_update_ssd_info(struct subchannel *sch) 253 { 254 int ret; 255 256 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 257 if (ret) 258 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 259 260 ssd_register_chpids(&sch->ssd_info); 261 } 262 263 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 264 char *buf) 265 { 266 struct subchannel *sch = to_subchannel(dev); 267 268 return sprintf(buf, "%01x\n", sch->st); 269 } 270 271 static DEVICE_ATTR(type, 0444, type_show, NULL); 272 273 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 274 char *buf) 275 { 276 struct subchannel *sch = to_subchannel(dev); 277 278 return sprintf(buf, "css:t%01X\n", sch->st); 279 } 280 281 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 282 283 static struct attribute *subch_attrs[] = { 284 &dev_attr_type.attr, 285 &dev_attr_modalias.attr, 286 NULL, 287 }; 288 289 static struct attribute_group subch_attr_group = { 290 .attrs = subch_attrs, 291 }; 292 293 static const struct attribute_group *default_subch_attr_groups[] = { 294 &subch_attr_group, 295 NULL, 296 }; 297 298 static ssize_t chpids_show(struct device *dev, 299 struct device_attribute *attr, 300 char *buf) 301 { 302 struct subchannel *sch = to_subchannel(dev); 303 struct chsc_ssd_info *ssd = &sch->ssd_info; 304 ssize_t ret = 0; 305 int mask; 306 int chp; 307 308 for (chp = 0; chp < 8; chp++) { 309 mask = 0x80 >> chp; 310 if (ssd->path_mask & mask) 311 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 312 else 313 ret += sprintf(buf + ret, "00 "); 314 } 315 ret += sprintf(buf + ret, "\n"); 316 return ret; 317 } 318 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 319 320 static ssize_t pimpampom_show(struct device *dev, 321 struct device_attribute *attr, 322 char *buf) 323 { 324 struct subchannel *sch = to_subchannel(dev); 325 struct pmcw *pmcw = &sch->schib.pmcw; 326 327 return sprintf(buf, "%02x %02x %02x\n", 328 pmcw->pim, pmcw->pam, pmcw->pom); 329 } 330 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 331 332 static struct attribute *io_subchannel_type_attrs[] = { 333 &dev_attr_chpids.attr, 334 &dev_attr_pimpampom.attr, 335 NULL, 336 }; 337 ATTRIBUTE_GROUPS(io_subchannel_type); 338 339 static const struct device_type io_subchannel_type = { 340 .groups = io_subchannel_type_groups, 341 }; 342 343 int css_register_subchannel(struct subchannel *sch) 344 { 345 int ret; 346 347 /* Initialize the subchannel structure */ 348 sch->dev.parent = &channel_subsystems[0]->device; 349 sch->dev.bus = &css_bus_type; 350 sch->dev.groups = default_subch_attr_groups; 351 352 if (sch->st == SUBCHANNEL_TYPE_IO) 353 sch->dev.type = &io_subchannel_type; 354 355 /* 356 * We don't want to generate uevents for I/O subchannels that don't 357 * have a working ccw device behind them since they will be 358 * unregistered before they can be used anyway, so we delay the add 359 * uevent until after device recognition was successful. 360 * Note that we suppress the uevent for all subchannel types; 361 * the subchannel driver can decide itself when it wants to inform 362 * userspace of its existence. 363 */ 364 dev_set_uevent_suppress(&sch->dev, 1); 365 css_update_ssd_info(sch); 366 /* make it known to the system */ 367 ret = css_sch_device_register(sch); 368 if (ret) { 369 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 370 sch->schid.ssid, sch->schid.sch_no, ret); 371 return ret; 372 } 373 if (!sch->driver) { 374 /* 375 * No driver matched. Generate the uevent now so that 376 * a fitting driver module may be loaded based on the 377 * modalias. 378 */ 379 dev_set_uevent_suppress(&sch->dev, 0); 380 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 381 } 382 return ret; 383 } 384 385 static int css_probe_device(struct subchannel_id schid) 386 { 387 struct subchannel *sch; 388 int ret; 389 390 sch = css_alloc_subchannel(schid); 391 if (IS_ERR(sch)) 392 return PTR_ERR(sch); 393 394 ret = css_register_subchannel(sch); 395 if (ret) 396 put_device(&sch->dev); 397 398 return ret; 399 } 400 401 static int 402 check_subchannel(struct device * dev, void * data) 403 { 404 struct subchannel *sch; 405 struct subchannel_id *schid = data; 406 407 sch = to_subchannel(dev); 408 return schid_equal(&sch->schid, schid); 409 } 410 411 struct subchannel * 412 get_subchannel_by_schid(struct subchannel_id schid) 413 { 414 struct device *dev; 415 416 dev = bus_find_device(&css_bus_type, NULL, 417 &schid, check_subchannel); 418 419 return dev ? to_subchannel(dev) : NULL; 420 } 421 422 /** 423 * css_sch_is_valid() - check if a subchannel is valid 424 * @schib: subchannel information block for the subchannel 425 */ 426 int css_sch_is_valid(struct schib *schib) 427 { 428 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 429 return 0; 430 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 431 return 0; 432 return 1; 433 } 434 EXPORT_SYMBOL_GPL(css_sch_is_valid); 435 436 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 437 { 438 struct schib schib; 439 440 if (!slow) { 441 /* Will be done on the slow path. */ 442 return -EAGAIN; 443 } 444 if (stsch(schid, &schib)) { 445 /* Subchannel is not provided. */ 446 return -ENXIO; 447 } 448 if (!css_sch_is_valid(&schib)) { 449 /* Unusable - ignore. */ 450 return 0; 451 } 452 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 453 schid.sch_no); 454 455 return css_probe_device(schid); 456 } 457 458 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 459 { 460 int ret = 0; 461 462 if (sch->driver) { 463 if (sch->driver->sch_event) 464 ret = sch->driver->sch_event(sch, slow); 465 else 466 dev_dbg(&sch->dev, 467 "Got subchannel machine check but " 468 "no sch_event handler provided.\n"); 469 } 470 if (ret != 0 && ret != -EAGAIN) { 471 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 472 sch->schid.ssid, sch->schid.sch_no, ret); 473 } 474 return ret; 475 } 476 477 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 478 { 479 struct subchannel *sch; 480 int ret; 481 482 sch = get_subchannel_by_schid(schid); 483 if (sch) { 484 ret = css_evaluate_known_subchannel(sch, slow); 485 put_device(&sch->dev); 486 } else 487 ret = css_evaluate_new_subchannel(schid, slow); 488 if (ret == -EAGAIN) 489 css_schedule_eval(schid); 490 } 491 492 /** 493 * css_sched_sch_todo - schedule a subchannel operation 494 * @sch: subchannel 495 * @todo: todo 496 * 497 * Schedule the operation identified by @todo to be performed on the slow path 498 * workqueue. Do nothing if another operation with higher priority is already 499 * scheduled. Needs to be called with subchannel lock held. 500 */ 501 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 502 { 503 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 504 sch->schid.ssid, sch->schid.sch_no, todo); 505 if (sch->todo >= todo) 506 return; 507 /* Get workqueue ref. */ 508 if (!get_device(&sch->dev)) 509 return; 510 sch->todo = todo; 511 if (!queue_work(cio_work_q, &sch->todo_work)) { 512 /* Already queued, release workqueue ref. */ 513 put_device(&sch->dev); 514 } 515 } 516 EXPORT_SYMBOL_GPL(css_sched_sch_todo); 517 518 static void css_sch_todo(struct work_struct *work) 519 { 520 struct subchannel *sch; 521 enum sch_todo todo; 522 int ret; 523 524 sch = container_of(work, struct subchannel, todo_work); 525 /* Find out todo. */ 526 spin_lock_irq(sch->lock); 527 todo = sch->todo; 528 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 529 sch->schid.sch_no, todo); 530 sch->todo = SCH_TODO_NOTHING; 531 spin_unlock_irq(sch->lock); 532 /* Perform todo. */ 533 switch (todo) { 534 case SCH_TODO_NOTHING: 535 break; 536 case SCH_TODO_EVAL: 537 ret = css_evaluate_known_subchannel(sch, 1); 538 if (ret == -EAGAIN) { 539 spin_lock_irq(sch->lock); 540 css_sched_sch_todo(sch, todo); 541 spin_unlock_irq(sch->lock); 542 } 543 break; 544 case SCH_TODO_UNREG: 545 css_sch_device_unregister(sch); 546 break; 547 } 548 /* Release workqueue ref. */ 549 put_device(&sch->dev); 550 } 551 552 static struct idset *slow_subchannel_set; 553 static spinlock_t slow_subchannel_lock; 554 static wait_queue_head_t css_eval_wq; 555 static atomic_t css_eval_scheduled; 556 557 static int __init slow_subchannel_init(void) 558 { 559 spin_lock_init(&slow_subchannel_lock); 560 atomic_set(&css_eval_scheduled, 0); 561 init_waitqueue_head(&css_eval_wq); 562 slow_subchannel_set = idset_sch_new(); 563 if (!slow_subchannel_set) { 564 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 565 return -ENOMEM; 566 } 567 return 0; 568 } 569 570 static int slow_eval_known_fn(struct subchannel *sch, void *data) 571 { 572 int eval; 573 int rc; 574 575 spin_lock_irq(&slow_subchannel_lock); 576 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 577 idset_sch_del(slow_subchannel_set, sch->schid); 578 spin_unlock_irq(&slow_subchannel_lock); 579 if (eval) { 580 rc = css_evaluate_known_subchannel(sch, 1); 581 if (rc == -EAGAIN) 582 css_schedule_eval(sch->schid); 583 } 584 return 0; 585 } 586 587 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 588 { 589 int eval; 590 int rc = 0; 591 592 spin_lock_irq(&slow_subchannel_lock); 593 eval = idset_sch_contains(slow_subchannel_set, schid); 594 idset_sch_del(slow_subchannel_set, schid); 595 spin_unlock_irq(&slow_subchannel_lock); 596 if (eval) { 597 rc = css_evaluate_new_subchannel(schid, 1); 598 switch (rc) { 599 case -EAGAIN: 600 css_schedule_eval(schid); 601 rc = 0; 602 break; 603 case -ENXIO: 604 case -ENOMEM: 605 case -EIO: 606 /* These should abort looping */ 607 spin_lock_irq(&slow_subchannel_lock); 608 idset_sch_del_subseq(slow_subchannel_set, schid); 609 spin_unlock_irq(&slow_subchannel_lock); 610 break; 611 default: 612 rc = 0; 613 } 614 /* Allow scheduling here since the containing loop might 615 * take a while. */ 616 cond_resched(); 617 } 618 return rc; 619 } 620 621 static void css_slow_path_func(struct work_struct *unused) 622 { 623 unsigned long flags; 624 625 CIO_TRACE_EVENT(4, "slowpath"); 626 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 627 NULL); 628 spin_lock_irqsave(&slow_subchannel_lock, flags); 629 if (idset_is_empty(slow_subchannel_set)) { 630 atomic_set(&css_eval_scheduled, 0); 631 wake_up(&css_eval_wq); 632 } 633 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 634 } 635 636 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); 637 struct workqueue_struct *cio_work_q; 638 639 void css_schedule_eval(struct subchannel_id schid) 640 { 641 unsigned long flags; 642 643 spin_lock_irqsave(&slow_subchannel_lock, flags); 644 idset_sch_add(slow_subchannel_set, schid); 645 atomic_set(&css_eval_scheduled, 1); 646 queue_delayed_work(cio_work_q, &slow_path_work, 0); 647 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 648 } 649 650 void css_schedule_eval_all(void) 651 { 652 unsigned long flags; 653 654 spin_lock_irqsave(&slow_subchannel_lock, flags); 655 idset_fill(slow_subchannel_set); 656 atomic_set(&css_eval_scheduled, 1); 657 queue_delayed_work(cio_work_q, &slow_path_work, 0); 658 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 659 } 660 661 static int __unset_registered(struct device *dev, void *data) 662 { 663 struct idset *set = data; 664 struct subchannel *sch = to_subchannel(dev); 665 666 idset_sch_del(set, sch->schid); 667 return 0; 668 } 669 670 void css_schedule_eval_all_unreg(unsigned long delay) 671 { 672 unsigned long flags; 673 struct idset *unreg_set; 674 675 /* Find unregistered subchannels. */ 676 unreg_set = idset_sch_new(); 677 if (!unreg_set) { 678 /* Fallback. */ 679 css_schedule_eval_all(); 680 return; 681 } 682 idset_fill(unreg_set); 683 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 684 /* Apply to slow_subchannel_set. */ 685 spin_lock_irqsave(&slow_subchannel_lock, flags); 686 idset_add_set(slow_subchannel_set, unreg_set); 687 atomic_set(&css_eval_scheduled, 1); 688 queue_delayed_work(cio_work_q, &slow_path_work, delay); 689 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 690 idset_free(unreg_set); 691 } 692 693 void css_wait_for_slow_path(void) 694 { 695 flush_workqueue(cio_work_q); 696 } 697 698 /* Schedule reprobing of all unregistered subchannels. */ 699 void css_schedule_reprobe(void) 700 { 701 /* Schedule with a delay to allow merging of subsequent calls. */ 702 css_schedule_eval_all_unreg(1 * HZ); 703 } 704 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 705 706 /* 707 * Called from the machine check handler for subchannel report words. 708 */ 709 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 710 { 711 struct subchannel_id mchk_schid; 712 struct subchannel *sch; 713 714 if (overflow) { 715 css_schedule_eval_all(); 716 return; 717 } 718 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 719 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 720 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 721 crw0->erc, crw0->rsid); 722 if (crw1) 723 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 724 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 725 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 726 crw1->anc, crw1->erc, crw1->rsid); 727 init_subchannel_id(&mchk_schid); 728 mchk_schid.sch_no = crw0->rsid; 729 if (crw1) 730 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 731 732 if (crw0->erc == CRW_ERC_PMOD) { 733 sch = get_subchannel_by_schid(mchk_schid); 734 if (sch) { 735 css_update_ssd_info(sch); 736 put_device(&sch->dev); 737 } 738 } 739 /* 740 * Since we are always presented with IPI in the CRW, we have to 741 * use stsch() to find out if the subchannel in question has come 742 * or gone. 743 */ 744 css_evaluate_subchannel(mchk_schid, 0); 745 } 746 747 static void __init 748 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 749 { 750 struct cpuid cpu_id; 751 752 if (css_general_characteristics.mcss) { 753 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 754 css->global_pgid.pgid_high.ext_cssid.cssid = 755 (css->cssid < 0) ? 0 : css->cssid; 756 } else { 757 css->global_pgid.pgid_high.cpu_addr = stap(); 758 } 759 get_cpu_id(&cpu_id); 760 css->global_pgid.cpu_id = cpu_id.ident; 761 css->global_pgid.cpu_model = cpu_id.machine; 762 css->global_pgid.tod_high = tod_high; 763 } 764 765 static void channel_subsystem_release(struct device *dev) 766 { 767 struct channel_subsystem *css = to_css(dev); 768 769 mutex_destroy(&css->mutex); 770 kfree(css); 771 } 772 773 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a, 774 char *buf) 775 { 776 struct channel_subsystem *css = to_css(dev); 777 778 if (css->cssid < 0) 779 return -EINVAL; 780 781 return sprintf(buf, "%x\n", css->cssid); 782 } 783 static DEVICE_ATTR_RO(real_cssid); 784 785 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a, 786 char *buf) 787 { 788 struct channel_subsystem *css = to_css(dev); 789 int ret; 790 791 mutex_lock(&css->mutex); 792 ret = sprintf(buf, "%x\n", css->cm_enabled); 793 mutex_unlock(&css->mutex); 794 return ret; 795 } 796 797 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a, 798 const char *buf, size_t count) 799 { 800 struct channel_subsystem *css = to_css(dev); 801 unsigned long val; 802 int ret; 803 804 ret = kstrtoul(buf, 16, &val); 805 if (ret) 806 return ret; 807 mutex_lock(&css->mutex); 808 switch (val) { 809 case 0: 810 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 811 break; 812 case 1: 813 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 814 break; 815 default: 816 ret = -EINVAL; 817 } 818 mutex_unlock(&css->mutex); 819 return ret < 0 ? ret : count; 820 } 821 static DEVICE_ATTR_RW(cm_enable); 822 823 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr, 824 int index) 825 { 826 return css_chsc_characteristics.secm ? attr->mode : 0; 827 } 828 829 static struct attribute *cssdev_attrs[] = { 830 &dev_attr_real_cssid.attr, 831 NULL, 832 }; 833 834 static struct attribute_group cssdev_attr_group = { 835 .attrs = cssdev_attrs, 836 }; 837 838 static struct attribute *cssdev_cm_attrs[] = { 839 &dev_attr_cm_enable.attr, 840 NULL, 841 }; 842 843 static struct attribute_group cssdev_cm_attr_group = { 844 .attrs = cssdev_cm_attrs, 845 .is_visible = cm_enable_mode, 846 }; 847 848 static const struct attribute_group *cssdev_attr_groups[] = { 849 &cssdev_attr_group, 850 &cssdev_cm_attr_group, 851 NULL, 852 }; 853 854 static int __init setup_css(int nr) 855 { 856 struct channel_subsystem *css; 857 int ret; 858 859 css = kzalloc(sizeof(*css), GFP_KERNEL); 860 if (!css) 861 return -ENOMEM; 862 863 channel_subsystems[nr] = css; 864 dev_set_name(&css->device, "css%x", nr); 865 css->device.groups = cssdev_attr_groups; 866 css->device.release = channel_subsystem_release; 867 868 mutex_init(&css->mutex); 869 css->cssid = chsc_get_cssid(nr); 870 css_generate_pgid(css, (u32) (get_tod_clock() >> 32)); 871 872 ret = device_register(&css->device); 873 if (ret) { 874 put_device(&css->device); 875 goto out_err; 876 } 877 878 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel), 879 GFP_KERNEL); 880 if (!css->pseudo_subchannel) { 881 device_unregister(&css->device); 882 ret = -ENOMEM; 883 goto out_err; 884 } 885 886 css->pseudo_subchannel->dev.parent = &css->device; 887 css->pseudo_subchannel->dev.release = css_subchannel_release; 888 mutex_init(&css->pseudo_subchannel->reg_mutex); 889 ret = css_sch_create_locks(css->pseudo_subchannel); 890 if (ret) { 891 kfree(css->pseudo_subchannel); 892 device_unregister(&css->device); 893 goto out_err; 894 } 895 896 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 897 ret = device_register(&css->pseudo_subchannel->dev); 898 if (ret) { 899 put_device(&css->pseudo_subchannel->dev); 900 device_unregister(&css->device); 901 goto out_err; 902 } 903 904 return ret; 905 out_err: 906 channel_subsystems[nr] = NULL; 907 return ret; 908 } 909 910 static int css_reboot_event(struct notifier_block *this, 911 unsigned long event, 912 void *ptr) 913 { 914 struct channel_subsystem *css; 915 int ret; 916 917 ret = NOTIFY_DONE; 918 for_each_css(css) { 919 mutex_lock(&css->mutex); 920 if (css->cm_enabled) 921 if (chsc_secm(css, 0)) 922 ret = NOTIFY_BAD; 923 mutex_unlock(&css->mutex); 924 } 925 926 return ret; 927 } 928 929 static struct notifier_block css_reboot_notifier = { 930 .notifier_call = css_reboot_event, 931 }; 932 933 /* 934 * Since the css devices are neither on a bus nor have a class 935 * nor have a special device type, we cannot stop/restart channel 936 * path measurements via the normal suspend/resume callbacks, but have 937 * to use notifiers. 938 */ 939 static int css_power_event(struct notifier_block *this, unsigned long event, 940 void *ptr) 941 { 942 struct channel_subsystem *css; 943 int ret; 944 945 switch (event) { 946 case PM_HIBERNATION_PREPARE: 947 case PM_SUSPEND_PREPARE: 948 ret = NOTIFY_DONE; 949 for_each_css(css) { 950 mutex_lock(&css->mutex); 951 if (!css->cm_enabled) { 952 mutex_unlock(&css->mutex); 953 continue; 954 } 955 ret = __chsc_do_secm(css, 0); 956 ret = notifier_from_errno(ret); 957 mutex_unlock(&css->mutex); 958 } 959 break; 960 case PM_POST_HIBERNATION: 961 case PM_POST_SUSPEND: 962 ret = NOTIFY_DONE; 963 for_each_css(css) { 964 mutex_lock(&css->mutex); 965 if (!css->cm_enabled) { 966 mutex_unlock(&css->mutex); 967 continue; 968 } 969 ret = __chsc_do_secm(css, 1); 970 ret = notifier_from_errno(ret); 971 mutex_unlock(&css->mutex); 972 } 973 /* search for subchannels, which appeared during hibernation */ 974 css_schedule_reprobe(); 975 break; 976 default: 977 ret = NOTIFY_DONE; 978 } 979 return ret; 980 981 } 982 static struct notifier_block css_power_notifier = { 983 .notifier_call = css_power_event, 984 }; 985 986 /* 987 * Now that the driver core is running, we can setup our channel subsystem. 988 * The struct subchannel's are created during probing. 989 */ 990 static int __init css_bus_init(void) 991 { 992 int ret, i; 993 994 ret = chsc_init(); 995 if (ret) 996 return ret; 997 998 chsc_determine_css_characteristics(); 999 /* Try to enable MSS. */ 1000 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 1001 if (ret) 1002 max_ssid = 0; 1003 else /* Success. */ 1004 max_ssid = __MAX_SSID; 1005 1006 ret = slow_subchannel_init(); 1007 if (ret) 1008 goto out; 1009 1010 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 1011 if (ret) 1012 goto out; 1013 1014 if ((ret = bus_register(&css_bus_type))) 1015 goto out; 1016 1017 /* Setup css structure. */ 1018 for (i = 0; i <= MAX_CSS_IDX; i++) { 1019 ret = setup_css(i); 1020 if (ret) 1021 goto out_unregister; 1022 } 1023 ret = register_reboot_notifier(&css_reboot_notifier); 1024 if (ret) 1025 goto out_unregister; 1026 ret = register_pm_notifier(&css_power_notifier); 1027 if (ret) { 1028 unregister_reboot_notifier(&css_reboot_notifier); 1029 goto out_unregister; 1030 } 1031 css_init_done = 1; 1032 1033 /* Enable default isc for I/O subchannels. */ 1034 isc_register(IO_SCH_ISC); 1035 1036 return 0; 1037 out_unregister: 1038 while (i-- > 0) { 1039 struct channel_subsystem *css = channel_subsystems[i]; 1040 device_unregister(&css->pseudo_subchannel->dev); 1041 device_unregister(&css->device); 1042 } 1043 bus_unregister(&css_bus_type); 1044 out: 1045 crw_unregister_handler(CRW_RSC_SCH); 1046 idset_free(slow_subchannel_set); 1047 chsc_init_cleanup(); 1048 pr_alert("The CSS device driver initialization failed with " 1049 "errno=%d\n", ret); 1050 return ret; 1051 } 1052 1053 static void __init css_bus_cleanup(void) 1054 { 1055 struct channel_subsystem *css; 1056 1057 for_each_css(css) { 1058 device_unregister(&css->pseudo_subchannel->dev); 1059 device_unregister(&css->device); 1060 } 1061 bus_unregister(&css_bus_type); 1062 crw_unregister_handler(CRW_RSC_SCH); 1063 idset_free(slow_subchannel_set); 1064 chsc_init_cleanup(); 1065 isc_unregister(IO_SCH_ISC); 1066 } 1067 1068 static int __init channel_subsystem_init(void) 1069 { 1070 int ret; 1071 1072 ret = css_bus_init(); 1073 if (ret) 1074 return ret; 1075 cio_work_q = create_singlethread_workqueue("cio"); 1076 if (!cio_work_q) { 1077 ret = -ENOMEM; 1078 goto out_bus; 1079 } 1080 ret = io_subchannel_init(); 1081 if (ret) 1082 goto out_wq; 1083 1084 return ret; 1085 out_wq: 1086 destroy_workqueue(cio_work_q); 1087 out_bus: 1088 css_bus_cleanup(); 1089 return ret; 1090 } 1091 subsys_initcall(channel_subsystem_init); 1092 1093 static int css_settle(struct device_driver *drv, void *unused) 1094 { 1095 struct css_driver *cssdrv = to_cssdriver(drv); 1096 1097 if (cssdrv->settle) 1098 return cssdrv->settle(); 1099 return 0; 1100 } 1101 1102 int css_complete_work(void) 1103 { 1104 int ret; 1105 1106 /* Wait for the evaluation of subchannels to finish. */ 1107 ret = wait_event_interruptible(css_eval_wq, 1108 atomic_read(&css_eval_scheduled) == 0); 1109 if (ret) 1110 return -EINTR; 1111 flush_workqueue(cio_work_q); 1112 /* Wait for the subchannel type specific initialization to finish */ 1113 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1114 } 1115 1116 1117 /* 1118 * Wait for the initialization of devices to finish, to make sure we are 1119 * done with our setup if the search for the root device starts. 1120 */ 1121 static int __init channel_subsystem_init_sync(void) 1122 { 1123 /* Register subchannels which are already in use. */ 1124 cio_register_early_subchannels(); 1125 /* Start initial subchannel evaluation. */ 1126 css_schedule_eval_all(); 1127 css_complete_work(); 1128 return 0; 1129 } 1130 subsys_initcall_sync(channel_subsystem_init_sync); 1131 1132 void channel_subsystem_reinit(void) 1133 { 1134 struct channel_path *chp; 1135 struct chp_id chpid; 1136 1137 chsc_enable_facility(CHSC_SDA_OC_MSS); 1138 chp_id_for_each(&chpid) { 1139 chp = chpid_to_chp(chpid); 1140 if (chp) 1141 chp_update_desc(chp); 1142 } 1143 cmf_reactivate(); 1144 } 1145 1146 #ifdef CONFIG_PROC_FS 1147 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1148 size_t count, loff_t *ppos) 1149 { 1150 int ret; 1151 1152 /* Handle pending CRW's. */ 1153 crw_wait_for_channel_report(); 1154 ret = css_complete_work(); 1155 1156 return ret ? ret : count; 1157 } 1158 1159 static const struct file_operations cio_settle_proc_fops = { 1160 .open = nonseekable_open, 1161 .write = cio_settle_write, 1162 .llseek = no_llseek, 1163 }; 1164 1165 static int __init cio_settle_init(void) 1166 { 1167 struct proc_dir_entry *entry; 1168 1169 entry = proc_create("cio_settle", S_IWUSR, NULL, 1170 &cio_settle_proc_fops); 1171 if (!entry) 1172 return -ENOMEM; 1173 return 0; 1174 } 1175 device_initcall(cio_settle_init); 1176 #endif /*CONFIG_PROC_FS*/ 1177 1178 int sch_is_pseudo_sch(struct subchannel *sch) 1179 { 1180 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1181 } 1182 1183 static int css_bus_match(struct device *dev, struct device_driver *drv) 1184 { 1185 struct subchannel *sch = to_subchannel(dev); 1186 struct css_driver *driver = to_cssdriver(drv); 1187 struct css_device_id *id; 1188 1189 for (id = driver->subchannel_type; id->match_flags; id++) { 1190 if (sch->st == id->type) 1191 return 1; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int css_probe(struct device *dev) 1198 { 1199 struct subchannel *sch; 1200 int ret; 1201 1202 sch = to_subchannel(dev); 1203 sch->driver = to_cssdriver(dev->driver); 1204 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1205 if (ret) 1206 sch->driver = NULL; 1207 return ret; 1208 } 1209 1210 static int css_remove(struct device *dev) 1211 { 1212 struct subchannel *sch; 1213 int ret; 1214 1215 sch = to_subchannel(dev); 1216 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1217 sch->driver = NULL; 1218 return ret; 1219 } 1220 1221 static void css_shutdown(struct device *dev) 1222 { 1223 struct subchannel *sch; 1224 1225 sch = to_subchannel(dev); 1226 if (sch->driver && sch->driver->shutdown) 1227 sch->driver->shutdown(sch); 1228 } 1229 1230 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1231 { 1232 struct subchannel *sch = to_subchannel(dev); 1233 int ret; 1234 1235 ret = add_uevent_var(env, "ST=%01X", sch->st); 1236 if (ret) 1237 return ret; 1238 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1239 return ret; 1240 } 1241 1242 static int css_pm_prepare(struct device *dev) 1243 { 1244 struct subchannel *sch = to_subchannel(dev); 1245 struct css_driver *drv; 1246 1247 if (mutex_is_locked(&sch->reg_mutex)) 1248 return -EAGAIN; 1249 if (!sch->dev.driver) 1250 return 0; 1251 drv = to_cssdriver(sch->dev.driver); 1252 /* Notify drivers that they may not register children. */ 1253 return drv->prepare ? drv->prepare(sch) : 0; 1254 } 1255 1256 static void css_pm_complete(struct device *dev) 1257 { 1258 struct subchannel *sch = to_subchannel(dev); 1259 struct css_driver *drv; 1260 1261 if (!sch->dev.driver) 1262 return; 1263 drv = to_cssdriver(sch->dev.driver); 1264 if (drv->complete) 1265 drv->complete(sch); 1266 } 1267 1268 static int css_pm_freeze(struct device *dev) 1269 { 1270 struct subchannel *sch = to_subchannel(dev); 1271 struct css_driver *drv; 1272 1273 if (!sch->dev.driver) 1274 return 0; 1275 drv = to_cssdriver(sch->dev.driver); 1276 return drv->freeze ? drv->freeze(sch) : 0; 1277 } 1278 1279 static int css_pm_thaw(struct device *dev) 1280 { 1281 struct subchannel *sch = to_subchannel(dev); 1282 struct css_driver *drv; 1283 1284 if (!sch->dev.driver) 1285 return 0; 1286 drv = to_cssdriver(sch->dev.driver); 1287 return drv->thaw ? drv->thaw(sch) : 0; 1288 } 1289 1290 static int css_pm_restore(struct device *dev) 1291 { 1292 struct subchannel *sch = to_subchannel(dev); 1293 struct css_driver *drv; 1294 1295 css_update_ssd_info(sch); 1296 if (!sch->dev.driver) 1297 return 0; 1298 drv = to_cssdriver(sch->dev.driver); 1299 return drv->restore ? drv->restore(sch) : 0; 1300 } 1301 1302 static const struct dev_pm_ops css_pm_ops = { 1303 .prepare = css_pm_prepare, 1304 .complete = css_pm_complete, 1305 .freeze = css_pm_freeze, 1306 .thaw = css_pm_thaw, 1307 .restore = css_pm_restore, 1308 }; 1309 1310 static struct bus_type css_bus_type = { 1311 .name = "css", 1312 .match = css_bus_match, 1313 .probe = css_probe, 1314 .remove = css_remove, 1315 .shutdown = css_shutdown, 1316 .uevent = css_uevent, 1317 .pm = &css_pm_ops, 1318 }; 1319 1320 /** 1321 * css_driver_register - register a css driver 1322 * @cdrv: css driver to register 1323 * 1324 * This is mainly a wrapper around driver_register that sets name 1325 * and bus_type in the embedded struct device_driver correctly. 1326 */ 1327 int css_driver_register(struct css_driver *cdrv) 1328 { 1329 cdrv->drv.bus = &css_bus_type; 1330 return driver_register(&cdrv->drv); 1331 } 1332 EXPORT_SYMBOL_GPL(css_driver_register); 1333 1334 /** 1335 * css_driver_unregister - unregister a css driver 1336 * @cdrv: css driver to unregister 1337 * 1338 * This is a wrapper around driver_unregister. 1339 */ 1340 void css_driver_unregister(struct css_driver *cdrv) 1341 { 1342 driver_unregister(&cdrv->drv); 1343 } 1344 EXPORT_SYMBOL_GPL(css_driver_unregister); 1345