1 /* 2 * drivers/s390/cio/cio.c 3 * S/390 common I/O routines -- low level i/o calls 4 * 5 * Copyright IBM Corp. 1999,2008 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 10 */ 11 12 #define KMSG_COMPONENT "cio" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/ftrace.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> 19 #include <linux/device.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/interrupt.h> 22 #include <asm/cio.h> 23 #include <asm/delay.h> 24 #include <asm/irq.h> 25 #include <asm/irq_regs.h> 26 #include <asm/setup.h> 27 #include <asm/reset.h> 28 #include <asm/ipl.h> 29 #include <asm/chpid.h> 30 #include <asm/airq.h> 31 #include <asm/isc.h> 32 #include <asm/cputime.h> 33 #include <asm/fcx.h> 34 #include <asm/nmi.h> 35 #include <asm/crw.h> 36 #include "cio.h" 37 #include "css.h" 38 #include "chsc.h" 39 #include "ioasm.h" 40 #include "io_sch.h" 41 #include "blacklist.h" 42 #include "cio_debug.h" 43 #include "chp.h" 44 45 debug_info_t *cio_debug_msg_id; 46 debug_info_t *cio_debug_trace_id; 47 debug_info_t *cio_debug_crw_id; 48 49 /* 50 * Function: cio_debug_init 51 * Initializes three debug logs for common I/O: 52 * - cio_msg logs generic cio messages 53 * - cio_trace logs the calling of different functions 54 * - cio_crw logs machine check related cio messages 55 */ 56 static int __init cio_debug_init(void) 57 { 58 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); 59 if (!cio_debug_msg_id) 60 goto out_unregister; 61 debug_register_view(cio_debug_msg_id, &debug_sprintf_view); 62 debug_set_level(cio_debug_msg_id, 2); 63 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16); 64 if (!cio_debug_trace_id) 65 goto out_unregister; 66 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); 67 debug_set_level(cio_debug_trace_id, 2); 68 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); 69 if (!cio_debug_crw_id) 70 goto out_unregister; 71 debug_register_view(cio_debug_crw_id, &debug_sprintf_view); 72 debug_set_level(cio_debug_crw_id, 4); 73 return 0; 74 75 out_unregister: 76 if (cio_debug_msg_id) 77 debug_unregister(cio_debug_msg_id); 78 if (cio_debug_trace_id) 79 debug_unregister(cio_debug_trace_id); 80 if (cio_debug_crw_id) 81 debug_unregister(cio_debug_crw_id); 82 return -1; 83 } 84 85 arch_initcall (cio_debug_init); 86 87 int 88 cio_set_options (struct subchannel *sch, int flags) 89 { 90 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; 91 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; 92 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; 93 return 0; 94 } 95 96 /* FIXME: who wants to use this? */ 97 int 98 cio_get_options (struct subchannel *sch) 99 { 100 int flags; 101 102 flags = 0; 103 if (sch->options.suspend) 104 flags |= DOIO_ALLOW_SUSPEND; 105 if (sch->options.prefetch) 106 flags |= DOIO_DENY_PREFETCH; 107 if (sch->options.inter) 108 flags |= DOIO_SUPPRESS_INTER; 109 return flags; 110 } 111 112 static int 113 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 114 { 115 char dbf_text[15]; 116 117 if (lpm != 0) 118 sch->lpm &= ~lpm; 119 else 120 sch->lpm = 0; 121 122 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " 123 "subchannel 0.%x.%04x!\n", sch->schid.ssid, 124 sch->schid.sch_no); 125 126 if (cio_update_schib(sch)) 127 return -ENODEV; 128 129 sprintf(dbf_text, "no%s", dev_name(&sch->dev)); 130 CIO_TRACE_EVENT(0, dbf_text); 131 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 132 133 return (sch->lpm ? -EACCES : -ENODEV); 134 } 135 136 int 137 cio_start_key (struct subchannel *sch, /* subchannel structure */ 138 struct ccw1 * cpa, /* logical channel prog addr */ 139 __u8 lpm, /* logical path mask */ 140 __u8 key) /* storage key */ 141 { 142 int ccode; 143 union orb *orb; 144 145 CIO_TRACE_EVENT(5, "stIO"); 146 CIO_TRACE_EVENT(5, dev_name(&sch->dev)); 147 148 orb = &to_io_private(sch)->orb; 149 memset(orb, 0, sizeof(union orb)); 150 /* sch is always under 2G. */ 151 orb->cmd.intparm = (u32)(addr_t)sch; 152 orb->cmd.fmt = 1; 153 154 orb->cmd.pfch = sch->options.prefetch == 0; 155 orb->cmd.spnd = sch->options.suspend; 156 orb->cmd.ssic = sch->options.suspend && sch->options.inter; 157 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; 158 #ifdef CONFIG_64BIT 159 /* 160 * for 64 bit we always support 64 bit IDAWs with 4k page size only 161 */ 162 orb->cmd.c64 = 1; 163 orb->cmd.i2k = 0; 164 #endif 165 orb->cmd.key = key >> 4; 166 /* issue "Start Subchannel" */ 167 orb->cmd.cpa = (__u32) __pa(cpa); 168 ccode = ssch(sch->schid, orb); 169 170 /* process condition code */ 171 CIO_HEX_EVENT(5, &ccode, sizeof(ccode)); 172 173 switch (ccode) { 174 case 0: 175 /* 176 * initialize device status information 177 */ 178 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 179 return 0; 180 case 1: /* status pending */ 181 case 2: /* busy */ 182 return -EBUSY; 183 case 3: /* device/path not operational */ 184 return cio_start_handle_notoper(sch, lpm); 185 default: 186 return ccode; 187 } 188 } 189 190 int 191 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) 192 { 193 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); 194 } 195 196 /* 197 * resume suspended I/O operation 198 */ 199 int 200 cio_resume (struct subchannel *sch) 201 { 202 int ccode; 203 204 CIO_TRACE_EVENT(4, "resIO"); 205 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 206 207 ccode = rsch (sch->schid); 208 209 CIO_HEX_EVENT(4, &ccode, sizeof(ccode)); 210 211 switch (ccode) { 212 case 0: 213 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; 214 return 0; 215 case 1: 216 return -EBUSY; 217 case 2: 218 return -EINVAL; 219 default: 220 /* 221 * useless to wait for request completion 222 * as device is no longer operational ! 223 */ 224 return -ENODEV; 225 } 226 } 227 228 /* 229 * halt I/O operation 230 */ 231 int 232 cio_halt(struct subchannel *sch) 233 { 234 int ccode; 235 236 if (!sch) 237 return -ENODEV; 238 239 CIO_TRACE_EVENT(2, "haltIO"); 240 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 241 242 /* 243 * Issue "Halt subchannel" and process condition code 244 */ 245 ccode = hsch (sch->schid); 246 247 CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); 248 249 switch (ccode) { 250 case 0: 251 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; 252 return 0; 253 case 1: /* status pending */ 254 case 2: /* busy */ 255 return -EBUSY; 256 default: /* device not operational */ 257 return -ENODEV; 258 } 259 } 260 261 /* 262 * Clear I/O operation 263 */ 264 int 265 cio_clear(struct subchannel *sch) 266 { 267 int ccode; 268 269 if (!sch) 270 return -ENODEV; 271 272 CIO_TRACE_EVENT(2, "clearIO"); 273 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 274 275 /* 276 * Issue "Clear subchannel" and process condition code 277 */ 278 ccode = csch (sch->schid); 279 280 CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); 281 282 switch (ccode) { 283 case 0: 284 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; 285 return 0; 286 default: /* device not operational */ 287 return -ENODEV; 288 } 289 } 290 291 /* 292 * Function: cio_cancel 293 * Issues a "Cancel Subchannel" on the specified subchannel 294 * Note: We don't need any fancy intparms and flags here 295 * since xsch is executed synchronously. 296 * Only for common I/O internal use as for now. 297 */ 298 int 299 cio_cancel (struct subchannel *sch) 300 { 301 int ccode; 302 303 if (!sch) 304 return -ENODEV; 305 306 CIO_TRACE_EVENT(2, "cancelIO"); 307 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 308 309 ccode = xsch (sch->schid); 310 311 CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); 312 313 switch (ccode) { 314 case 0: /* success */ 315 /* Update information in scsw. */ 316 if (cio_update_schib(sch)) 317 return -ENODEV; 318 return 0; 319 case 1: /* status pending */ 320 return -EBUSY; 321 case 2: /* not applicable */ 322 return -EINVAL; 323 default: /* not oper */ 324 return -ENODEV; 325 } 326 } 327 328 329 static void cio_apply_config(struct subchannel *sch, struct schib *schib) 330 { 331 schib->pmcw.intparm = sch->config.intparm; 332 schib->pmcw.mbi = sch->config.mbi; 333 schib->pmcw.isc = sch->config.isc; 334 schib->pmcw.ena = sch->config.ena; 335 schib->pmcw.mme = sch->config.mme; 336 schib->pmcw.mp = sch->config.mp; 337 schib->pmcw.csense = sch->config.csense; 338 schib->pmcw.mbfc = sch->config.mbfc; 339 if (sch->config.mbfc) 340 schib->mba = sch->config.mba; 341 } 342 343 static int cio_check_config(struct subchannel *sch, struct schib *schib) 344 { 345 return (schib->pmcw.intparm == sch->config.intparm) && 346 (schib->pmcw.mbi == sch->config.mbi) && 347 (schib->pmcw.isc == sch->config.isc) && 348 (schib->pmcw.ena == sch->config.ena) && 349 (schib->pmcw.mme == sch->config.mme) && 350 (schib->pmcw.mp == sch->config.mp) && 351 (schib->pmcw.csense == sch->config.csense) && 352 (schib->pmcw.mbfc == sch->config.mbfc) && 353 (!sch->config.mbfc || (schib->mba == sch->config.mba)); 354 } 355 356 /* 357 * cio_commit_config - apply configuration to the subchannel 358 */ 359 int cio_commit_config(struct subchannel *sch) 360 { 361 struct schib schib; 362 int ccode, retry, ret = 0; 363 364 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) 365 return -ENODEV; 366 367 for (retry = 0; retry < 5; retry++) { 368 /* copy desired changes to local schib */ 369 cio_apply_config(sch, &schib); 370 ccode = msch_err(sch->schid, &schib); 371 if (ccode < 0) /* -EIO if msch gets a program check. */ 372 return ccode; 373 switch (ccode) { 374 case 0: /* successful */ 375 if (stsch_err(sch->schid, &schib) || 376 !css_sch_is_valid(&schib)) 377 return -ENODEV; 378 if (cio_check_config(sch, &schib)) { 379 /* commit changes from local schib */ 380 memcpy(&sch->schib, &schib, sizeof(schib)); 381 return 0; 382 } 383 ret = -EAGAIN; 384 break; 385 case 1: /* status pending */ 386 return -EBUSY; 387 case 2: /* busy */ 388 udelay(100); /* allow for recovery */ 389 ret = -EBUSY; 390 break; 391 case 3: /* not operational */ 392 return -ENODEV; 393 } 394 } 395 return ret; 396 } 397 398 /** 399 * cio_update_schib - Perform stsch and update schib if subchannel is valid. 400 * @sch: subchannel on which to perform stsch 401 * Return zero on success, -ENODEV otherwise. 402 */ 403 int cio_update_schib(struct subchannel *sch) 404 { 405 struct schib schib; 406 407 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) 408 return -ENODEV; 409 410 memcpy(&sch->schib, &schib, sizeof(schib)); 411 return 0; 412 } 413 EXPORT_SYMBOL_GPL(cio_update_schib); 414 415 /** 416 * cio_enable_subchannel - enable a subchannel. 417 * @sch: subchannel to be enabled 418 * @intparm: interruption parameter to set 419 */ 420 int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 421 { 422 int retry; 423 int ret; 424 425 CIO_TRACE_EVENT(2, "ensch"); 426 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 427 428 if (sch_is_pseudo_sch(sch)) 429 return -EINVAL; 430 if (cio_update_schib(sch)) 431 return -ENODEV; 432 433 sch->config.ena = 1; 434 sch->config.isc = sch->isc; 435 sch->config.intparm = intparm; 436 437 for (retry = 0; retry < 3; retry++) { 438 ret = cio_commit_config(sch); 439 if (ret == -EIO) { 440 /* 441 * Got a program check in msch. Try without 442 * the concurrent sense bit the next time. 443 */ 444 sch->config.csense = 0; 445 } else if (ret == -EBUSY) { 446 struct irb irb; 447 if (tsch(sch->schid, &irb) != 0) 448 break; 449 } else 450 break; 451 } 452 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 453 return ret; 454 } 455 EXPORT_SYMBOL_GPL(cio_enable_subchannel); 456 457 /** 458 * cio_disable_subchannel - disable a subchannel. 459 * @sch: subchannel to disable 460 */ 461 int cio_disable_subchannel(struct subchannel *sch) 462 { 463 int retry; 464 int ret; 465 466 CIO_TRACE_EVENT(2, "dissch"); 467 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 468 469 if (sch_is_pseudo_sch(sch)) 470 return 0; 471 if (cio_update_schib(sch)) 472 return -ENODEV; 473 474 sch->config.ena = 0; 475 476 for (retry = 0; retry < 3; retry++) { 477 ret = cio_commit_config(sch); 478 if (ret == -EBUSY) { 479 struct irb irb; 480 if (tsch(sch->schid, &irb) != 0) 481 break; 482 } else 483 break; 484 } 485 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 486 return ret; 487 } 488 EXPORT_SYMBOL_GPL(cio_disable_subchannel); 489 490 int cio_create_sch_lock(struct subchannel *sch) 491 { 492 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 493 if (!sch->lock) 494 return -ENOMEM; 495 spin_lock_init(sch->lock); 496 return 0; 497 } 498 499 static int cio_check_devno_blacklisted(struct subchannel *sch) 500 { 501 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { 502 /* 503 * This device must not be known to Linux. So we simply 504 * say that there is no device and return ENODEV. 505 */ 506 CIO_MSG_EVENT(6, "Blacklisted device detected " 507 "at devno %04X, subchannel set %x\n", 508 sch->schib.pmcw.dev, sch->schid.ssid); 509 return -ENODEV; 510 } 511 return 0; 512 } 513 514 static int cio_validate_io_subchannel(struct subchannel *sch) 515 { 516 /* Initialization for io subchannels. */ 517 if (!css_sch_is_valid(&sch->schib)) 518 return -ENODEV; 519 520 /* Devno is valid. */ 521 return cio_check_devno_blacklisted(sch); 522 } 523 524 static int cio_validate_msg_subchannel(struct subchannel *sch) 525 { 526 /* Initialization for message subchannels. */ 527 if (!css_sch_is_valid(&sch->schib)) 528 return -ENODEV; 529 530 /* Devno is valid. */ 531 return cio_check_devno_blacklisted(sch); 532 } 533 534 /** 535 * cio_validate_subchannel - basic validation of subchannel 536 * @sch: subchannel structure to be filled out 537 * @schid: subchannel id 538 * 539 * Find out subchannel type and initialize struct subchannel. 540 * Return codes: 541 * 0 on success 542 * -ENXIO for non-defined subchannels 543 * -ENODEV for invalid subchannels or blacklisted devices 544 * -EIO for subchannels in an invalid subchannel set 545 */ 546 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) 547 { 548 char dbf_txt[15]; 549 int ccode; 550 int err; 551 552 sprintf(dbf_txt, "valsch%x", schid.sch_no); 553 CIO_TRACE_EVENT(4, dbf_txt); 554 555 /* Nuke all fields. */ 556 memset(sch, 0, sizeof(struct subchannel)); 557 558 sch->schid = schid; 559 if (cio_is_console(schid)) { 560 sch->lock = cio_get_console_lock(); 561 } else { 562 err = cio_create_sch_lock(sch); 563 if (err) 564 goto out; 565 } 566 mutex_init(&sch->reg_mutex); 567 568 /* 569 * The first subchannel that is not-operational (ccode==3) 570 * indicates that there aren't any more devices available. 571 * If stsch gets an exception, it means the current subchannel set 572 * is not valid. 573 */ 574 ccode = stsch_err (schid, &sch->schib); 575 if (ccode) { 576 err = (ccode == 3) ? -ENXIO : ccode; 577 goto out; 578 } 579 /* Copy subchannel type from path management control word. */ 580 sch->st = sch->schib.pmcw.st; 581 582 switch (sch->st) { 583 case SUBCHANNEL_TYPE_IO: 584 err = cio_validate_io_subchannel(sch); 585 break; 586 case SUBCHANNEL_TYPE_MSG: 587 err = cio_validate_msg_subchannel(sch); 588 break; 589 default: 590 err = 0; 591 } 592 if (err) 593 goto out; 594 595 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", 596 sch->schid.ssid, sch->schid.sch_no, sch->st); 597 return 0; 598 out: 599 if (!cio_is_console(schid)) 600 kfree(sch->lock); 601 sch->lock = NULL; 602 return err; 603 } 604 605 /* 606 * do_IRQ() handles all normal I/O device IRQ's (the special 607 * SMP cross-CPU interrupts have their own specific 608 * handlers). 609 * 610 */ 611 void __irq_entry do_IRQ(struct pt_regs *regs) 612 { 613 struct tpi_info *tpi_info; 614 struct subchannel *sch; 615 struct irb *irb; 616 struct pt_regs *old_regs; 617 618 old_regs = set_irq_regs(regs); 619 s390_idle_check(regs, S390_lowcore.int_clock, 620 S390_lowcore.async_enter_timer); 621 irq_enter(); 622 __this_cpu_write(s390_idle.nohz_delay, 1); 623 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 624 /* Serve timer interrupts first. */ 625 clock_comparator_work(); 626 /* 627 * Get interrupt information from lowcore 628 */ 629 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 630 irb = (struct irb *)&S390_lowcore.irb; 631 do { 632 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 633 /* 634 * Non I/O-subchannel thin interrupts are processed differently 635 */ 636 if (tpi_info->adapter_IO == 1 && 637 tpi_info->int_type == IO_INTERRUPT_TYPE) { 638 do_adapter_IO(tpi_info->isc); 639 continue; 640 } 641 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 642 if (!sch) { 643 /* Clear pending interrupt condition. */ 644 tsch(tpi_info->schid, irb); 645 continue; 646 } 647 spin_lock(sch->lock); 648 /* Store interrupt response block to lowcore. */ 649 if (tsch(tpi_info->schid, irb) == 0) { 650 /* Keep subchannel information word up to date. */ 651 memcpy (&sch->schib.scsw, &irb->scsw, 652 sizeof (irb->scsw)); 653 /* Call interrupt handler if there is one. */ 654 if (sch->driver && sch->driver->irq) 655 sch->driver->irq(sch); 656 } 657 spin_unlock(sch->lock); 658 /* 659 * Are more interrupts pending? 660 * If so, the tpi instruction will update the lowcore 661 * to hold the info for the next interrupt. 662 * We don't do this for VM because a tpi drops the cpu 663 * out of the sie which costs more cycles than it saves. 664 */ 665 } while (MACHINE_IS_LPAR && tpi(NULL) != 0); 666 irq_exit(); 667 set_irq_regs(old_regs); 668 } 669 670 #ifdef CONFIG_CCW_CONSOLE 671 static struct subchannel console_subchannel; 672 static struct io_subchannel_private console_priv; 673 static int console_subchannel_in_use; 674 675 /* 676 * Use tpi to get a pending interrupt, call the interrupt handler and 677 * return a pointer to the subchannel structure. 678 */ 679 static int cio_tpi(void) 680 { 681 struct tpi_info *tpi_info; 682 struct subchannel *sch; 683 struct irb *irb; 684 int irq_context; 685 686 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 687 if (tpi(NULL) != 1) 688 return 0; 689 irb = (struct irb *)&S390_lowcore.irb; 690 /* Store interrupt response block to lowcore. */ 691 if (tsch(tpi_info->schid, irb) != 0) 692 /* Not status pending or not operational. */ 693 return 1; 694 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 695 if (!sch) 696 return 1; 697 irq_context = in_interrupt(); 698 if (!irq_context) 699 local_bh_disable(); 700 irq_enter(); 701 spin_lock(sch->lock); 702 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); 703 if (sch->driver && sch->driver->irq) 704 sch->driver->irq(sch); 705 spin_unlock(sch->lock); 706 irq_exit(); 707 if (!irq_context) 708 _local_bh_enable(); 709 return 1; 710 } 711 712 void *cio_get_console_priv(void) 713 { 714 return &console_priv; 715 } 716 717 /* 718 * busy wait for the next interrupt on the console 719 */ 720 void wait_cons_dev(void) 721 __releases(console_subchannel.lock) 722 __acquires(console_subchannel.lock) 723 { 724 unsigned long cr6 __attribute__ ((aligned (8))); 725 unsigned long save_cr6 __attribute__ ((aligned (8))); 726 727 /* 728 * before entering the spinlock we may already have 729 * processed the interrupt on a different CPU... 730 */ 731 if (!console_subchannel_in_use) 732 return; 733 734 /* disable all but the console isc */ 735 __ctl_store (save_cr6, 6, 6); 736 cr6 = 1UL << (31 - CONSOLE_ISC); 737 __ctl_load (cr6, 6, 6); 738 739 do { 740 spin_unlock(console_subchannel.lock); 741 if (!cio_tpi()) 742 cpu_relax(); 743 spin_lock(console_subchannel.lock); 744 } while (console_subchannel.schib.scsw.cmd.actl != 0); 745 /* 746 * restore previous isc value 747 */ 748 __ctl_load (save_cr6, 6, 6); 749 } 750 751 static int 752 cio_test_for_console(struct subchannel_id schid, void *data) 753 { 754 if (stsch_err(schid, &console_subchannel.schib) != 0) 755 return -ENXIO; 756 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && 757 console_subchannel.schib.pmcw.dnv && 758 (console_subchannel.schib.pmcw.dev == console_devno)) { 759 console_irq = schid.sch_no; 760 return 1; /* found */ 761 } 762 return 0; 763 } 764 765 766 static int 767 cio_get_console_sch_no(void) 768 { 769 struct subchannel_id schid; 770 771 init_subchannel_id(&schid); 772 if (console_irq != -1) { 773 /* VM provided us with the irq number of the console. */ 774 schid.sch_no = console_irq; 775 if (stsch_err(schid, &console_subchannel.schib) != 0 || 776 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || 777 !console_subchannel.schib.pmcw.dnv) 778 return -1; 779 console_devno = console_subchannel.schib.pmcw.dev; 780 } else if (console_devno != -1) { 781 /* At least the console device number is known. */ 782 for_each_subchannel(cio_test_for_console, NULL); 783 if (console_irq == -1) 784 return -1; 785 } else { 786 /* unlike in 2.4, we cannot autoprobe here, since 787 * the channel subsystem is not fully initialized. 788 * With some luck, the HWC console can take over */ 789 return -1; 790 } 791 return console_irq; 792 } 793 794 struct subchannel * 795 cio_probe_console(void) 796 { 797 int sch_no, ret; 798 struct subchannel_id schid; 799 800 if (xchg(&console_subchannel_in_use, 1) != 0) 801 return ERR_PTR(-EBUSY); 802 sch_no = cio_get_console_sch_no(); 803 if (sch_no == -1) { 804 console_subchannel_in_use = 0; 805 pr_warning("No CCW console was found\n"); 806 return ERR_PTR(-ENODEV); 807 } 808 memset(&console_subchannel, 0, sizeof(struct subchannel)); 809 init_subchannel_id(&schid); 810 schid.sch_no = sch_no; 811 ret = cio_validate_subchannel(&console_subchannel, schid); 812 if (ret) { 813 console_subchannel_in_use = 0; 814 return ERR_PTR(-ENODEV); 815 } 816 817 /* 818 * enable console I/O-interrupt subclass 819 */ 820 isc_register(CONSOLE_ISC); 821 console_subchannel.config.isc = CONSOLE_ISC; 822 console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; 823 ret = cio_commit_config(&console_subchannel); 824 if (ret) { 825 isc_unregister(CONSOLE_ISC); 826 console_subchannel_in_use = 0; 827 return ERR_PTR(ret); 828 } 829 return &console_subchannel; 830 } 831 832 void 833 cio_release_console(void) 834 { 835 console_subchannel.config.intparm = 0; 836 cio_commit_config(&console_subchannel); 837 isc_unregister(CONSOLE_ISC); 838 console_subchannel_in_use = 0; 839 } 840 841 /* Bah... hack to catch console special sausages. */ 842 int 843 cio_is_console(struct subchannel_id schid) 844 { 845 if (!console_subchannel_in_use) 846 return 0; 847 return schid_equal(&schid, &console_subchannel.schid); 848 } 849 850 struct subchannel * 851 cio_get_console_subchannel(void) 852 { 853 if (!console_subchannel_in_use) 854 return NULL; 855 return &console_subchannel; 856 } 857 858 #endif 859 static int 860 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 861 { 862 int retry, cc; 863 864 cc = 0; 865 for (retry=0;retry<3;retry++) { 866 schib->pmcw.ena = 0; 867 cc = msch_err(schid, schib); 868 if (cc) 869 return (cc==3?-ENODEV:-EBUSY); 870 if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) 871 return -ENODEV; 872 if (!schib->pmcw.ena) 873 return 0; 874 } 875 return -EBUSY; /* uhm... */ 876 } 877 878 static int 879 __clear_io_subchannel_easy(struct subchannel_id schid) 880 { 881 int retry; 882 883 if (csch(schid)) 884 return -ENODEV; 885 for (retry=0;retry<20;retry++) { 886 struct tpi_info ti; 887 888 if (tpi(&ti)) { 889 tsch(ti.schid, (struct irb *)&S390_lowcore.irb); 890 if (schid_equal(&ti.schid, &schid)) 891 return 0; 892 } 893 udelay_simple(100); 894 } 895 return -EBUSY; 896 } 897 898 static void __clear_chsc_subchannel_easy(void) 899 { 900 /* It seems we can only wait for a bit here :/ */ 901 udelay_simple(100); 902 } 903 904 static int pgm_check_occured; 905 906 static void cio_reset_pgm_check_handler(void) 907 { 908 pgm_check_occured = 1; 909 } 910 911 static int stsch_reset(struct subchannel_id schid, struct schib *addr) 912 { 913 int rc; 914 915 pgm_check_occured = 0; 916 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; 917 rc = stsch_err(schid, addr); 918 s390_base_pgm_handler_fn = NULL; 919 920 /* The program check handler could have changed pgm_check_occured. */ 921 barrier(); 922 923 if (pgm_check_occured) 924 return -EIO; 925 else 926 return rc; 927 } 928 929 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) 930 { 931 struct schib schib; 932 933 if (stsch_reset(schid, &schib)) 934 return -ENXIO; 935 if (!schib.pmcw.ena) 936 return 0; 937 switch(__disable_subchannel_easy(schid, &schib)) { 938 case 0: 939 case -ENODEV: 940 break; 941 default: /* -EBUSY */ 942 switch (schib.pmcw.st) { 943 case SUBCHANNEL_TYPE_IO: 944 if (__clear_io_subchannel_easy(schid)) 945 goto out; /* give up... */ 946 break; 947 case SUBCHANNEL_TYPE_CHSC: 948 __clear_chsc_subchannel_easy(); 949 break; 950 default: 951 /* No default clear strategy */ 952 break; 953 } 954 stsch_err(schid, &schib); 955 __disable_subchannel_easy(schid, &schib); 956 } 957 out: 958 return 0; 959 } 960 961 static atomic_t chpid_reset_count; 962 963 static void s390_reset_chpids_mcck_handler(void) 964 { 965 struct crw crw; 966 struct mci *mci; 967 968 /* Check for pending channel report word. */ 969 mci = (struct mci *)&S390_lowcore.mcck_interruption_code; 970 if (!mci->cp) 971 return; 972 /* Process channel report words. */ 973 while (stcrw(&crw) == 0) { 974 /* Check for responses to RCHP. */ 975 if (crw.slct && crw.rsc == CRW_RSC_CPATH) 976 atomic_dec(&chpid_reset_count); 977 } 978 } 979 980 #define RCHP_TIMEOUT (30 * USEC_PER_SEC) 981 static void css_reset(void) 982 { 983 int i, ret; 984 unsigned long long timeout; 985 struct chp_id chpid; 986 987 /* Reset subchannels. */ 988 for_each_subchannel(__shutdown_subchannel_easy, NULL); 989 /* Reset channel paths. */ 990 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; 991 /* Enable channel report machine checks. */ 992 __ctl_set_bit(14, 28); 993 /* Temporarily reenable machine checks. */ 994 local_mcck_enable(); 995 chp_id_init(&chpid); 996 for (i = 0; i <= __MAX_CHPID; i++) { 997 chpid.id = i; 998 ret = rchp(chpid); 999 if ((ret == 0) || (ret == 2)) 1000 /* 1001 * rchp either succeeded, or another rchp is already 1002 * in progress. In either case, we'll get a crw. 1003 */ 1004 atomic_inc(&chpid_reset_count); 1005 } 1006 /* Wait for machine check for all channel paths. */ 1007 timeout = get_clock() + (RCHP_TIMEOUT << 12); 1008 while (atomic_read(&chpid_reset_count) != 0) { 1009 if (get_clock() > timeout) 1010 break; 1011 cpu_relax(); 1012 } 1013 /* Disable machine checks again. */ 1014 local_mcck_disable(); 1015 /* Disable channel report machine checks. */ 1016 __ctl_clear_bit(14, 28); 1017 s390_base_mcck_handler_fn = NULL; 1018 } 1019 1020 static struct reset_call css_reset_call = { 1021 .fn = css_reset, 1022 }; 1023 1024 static int __init init_css_reset_call(void) 1025 { 1026 atomic_set(&chpid_reset_count, 0); 1027 register_reset_call(&css_reset_call); 1028 return 0; 1029 } 1030 1031 arch_initcall(init_css_reset_call); 1032 1033 struct sch_match_id { 1034 struct subchannel_id schid; 1035 struct ccw_dev_id devid; 1036 int rc; 1037 }; 1038 1039 static int __reipl_subchannel_match(struct subchannel_id schid, void *data) 1040 { 1041 struct schib schib; 1042 struct sch_match_id *match_id = data; 1043 1044 if (stsch_reset(schid, &schib)) 1045 return -ENXIO; 1046 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && 1047 (schib.pmcw.dev == match_id->devid.devno) && 1048 (schid.ssid == match_id->devid.ssid)) { 1049 match_id->schid = schid; 1050 match_id->rc = 0; 1051 return 1; 1052 } 1053 return 0; 1054 } 1055 1056 static int reipl_find_schid(struct ccw_dev_id *devid, 1057 struct subchannel_id *schid) 1058 { 1059 struct sch_match_id match_id; 1060 1061 match_id.devid = *devid; 1062 match_id.rc = -ENODEV; 1063 for_each_subchannel(__reipl_subchannel_match, &match_id); 1064 if (match_id.rc == 0) 1065 *schid = match_id.schid; 1066 return match_id.rc; 1067 } 1068 1069 extern void do_reipl_asm(__u32 schid); 1070 1071 /* Make sure all subchannels are quiet before we re-ipl an lpar. */ 1072 void reipl_ccw_dev(struct ccw_dev_id *devid) 1073 { 1074 struct subchannel_id schid; 1075 1076 s390_reset_system(); 1077 if (reipl_find_schid(devid, &schid) != 0) 1078 panic("IPL Device not found\n"); 1079 do_reipl_asm(*((__u32*)&schid)); 1080 } 1081 1082 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) 1083 { 1084 struct subchannel_id schid; 1085 struct schib schib; 1086 1087 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; 1088 if (!schid.one) 1089 return -ENODEV; 1090 if (stsch_err(schid, &schib)) 1091 return -ENODEV; 1092 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 1093 return -ENODEV; 1094 if (!schib.pmcw.dnv) 1095 return -ENODEV; 1096 iplinfo->devno = schib.pmcw.dev; 1097 iplinfo->is_qdio = schib.pmcw.qf; 1098 return 0; 1099 } 1100 1101 /** 1102 * cio_tm_start_key - perform start function 1103 * @sch: subchannel on which to perform the start function 1104 * @tcw: transport-command word to be started 1105 * @lpm: mask of paths to use 1106 * @key: storage key to use for storage access 1107 * 1108 * Start the tcw on the given subchannel. Return zero on success, non-zero 1109 * otherwise. 1110 */ 1111 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) 1112 { 1113 int cc; 1114 union orb *orb = &to_io_private(sch)->orb; 1115 1116 memset(orb, 0, sizeof(union orb)); 1117 orb->tm.intparm = (u32) (addr_t) sch; 1118 orb->tm.key = key >> 4; 1119 orb->tm.b = 1; 1120 orb->tm.lpm = lpm ? lpm : sch->lpm; 1121 orb->tm.tcw = (u32) (addr_t) tcw; 1122 cc = ssch(sch->schid, orb); 1123 switch (cc) { 1124 case 0: 1125 return 0; 1126 case 1: 1127 case 2: 1128 return -EBUSY; 1129 default: 1130 return cio_start_handle_notoper(sch, lpm); 1131 } 1132 } 1133 1134 /** 1135 * cio_tm_intrg - perform interrogate function 1136 * @sch - subchannel on which to perform the interrogate function 1137 * 1138 * If the specified subchannel is running in transport-mode, perform the 1139 * interrogate function. Return zero on success, non-zero otherwie. 1140 */ 1141 int cio_tm_intrg(struct subchannel *sch) 1142 { 1143 int cc; 1144 1145 if (!to_io_private(sch)->orb.tm.b) 1146 return -EINVAL; 1147 cc = xsch(sch->schid); 1148 switch (cc) { 1149 case 0: 1150 case 2: 1151 return 0; 1152 case 1: 1153 return -EBUSY; 1154 default: 1155 return -ENODEV; 1156 } 1157 } 1158