1 /* 2 * drivers/s390/cio/cio.c 3 * S/390 common I/O routines -- low level i/o calls 4 * 5 * Copyright (C) IBM Corp. 1999,2006 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 10 */ 11 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/device.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/interrupt.h> 18 #include <asm/cio.h> 19 #include <asm/delay.h> 20 #include <asm/irq.h> 21 #include <asm/irq_regs.h> 22 #include <asm/setup.h> 23 #include <asm/reset.h> 24 #include "airq.h" 25 #include "cio.h" 26 #include "css.h" 27 #include "chsc.h" 28 #include "ioasm.h" 29 #include "blacklist.h" 30 #include "cio_debug.h" 31 #include "../s390mach.h" 32 33 debug_info_t *cio_debug_msg_id; 34 debug_info_t *cio_debug_trace_id; 35 debug_info_t *cio_debug_crw_id; 36 37 int cio_show_msg; 38 39 static int __init 40 cio_setup (char *parm) 41 { 42 if (!strcmp (parm, "yes")) 43 cio_show_msg = 1; 44 else if (!strcmp (parm, "no")) 45 cio_show_msg = 0; 46 else 47 printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'", 48 parm); 49 return 1; 50 } 51 52 __setup ("cio_msg=", cio_setup); 53 54 /* 55 * Function: cio_debug_init 56 * Initializes three debug logs (under /proc/s390dbf) for common I/O: 57 * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on 58 * - cio_trace logs the calling of different functions 59 * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on 60 * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW 61 */ 62 static int __init 63 cio_debug_init (void) 64 { 65 cio_debug_msg_id = debug_register ("cio_msg", 16, 4, 16*sizeof (long)); 66 if (!cio_debug_msg_id) 67 goto out_unregister; 68 debug_register_view (cio_debug_msg_id, &debug_sprintf_view); 69 debug_set_level (cio_debug_msg_id, 2); 70 cio_debug_trace_id = debug_register ("cio_trace", 16, 4, 16); 71 if (!cio_debug_trace_id) 72 goto out_unregister; 73 debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view); 74 debug_set_level (cio_debug_trace_id, 2); 75 cio_debug_crw_id = debug_register ("cio_crw", 4, 4, 16*sizeof (long)); 76 if (!cio_debug_crw_id) 77 goto out_unregister; 78 debug_register_view (cio_debug_crw_id, &debug_sprintf_view); 79 debug_set_level (cio_debug_crw_id, 2); 80 pr_debug("debugging initialized\n"); 81 return 0; 82 83 out_unregister: 84 if (cio_debug_msg_id) 85 debug_unregister (cio_debug_msg_id); 86 if (cio_debug_trace_id) 87 debug_unregister (cio_debug_trace_id); 88 if (cio_debug_crw_id) 89 debug_unregister (cio_debug_crw_id); 90 pr_debug("could not initialize debugging\n"); 91 return -1; 92 } 93 94 arch_initcall (cio_debug_init); 95 96 int 97 cio_set_options (struct subchannel *sch, int flags) 98 { 99 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; 100 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; 101 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; 102 return 0; 103 } 104 105 /* FIXME: who wants to use this? */ 106 int 107 cio_get_options (struct subchannel *sch) 108 { 109 int flags; 110 111 flags = 0; 112 if (sch->options.suspend) 113 flags |= DOIO_ALLOW_SUSPEND; 114 if (sch->options.prefetch) 115 flags |= DOIO_DENY_PREFETCH; 116 if (sch->options.inter) 117 flags |= DOIO_SUPPRESS_INTER; 118 return flags; 119 } 120 121 /* 122 * Use tpi to get a pending interrupt, call the interrupt handler and 123 * return a pointer to the subchannel structure. 124 */ 125 static int 126 cio_tpi(void) 127 { 128 struct tpi_info *tpi_info; 129 struct subchannel *sch; 130 struct irb *irb; 131 132 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 133 if (tpi (NULL) != 1) 134 return 0; 135 irb = (struct irb *) __LC_IRB; 136 /* Store interrupt response block to lowcore. */ 137 if (tsch (tpi_info->schid, irb) != 0) 138 /* Not status pending or not operational. */ 139 return 1; 140 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 141 if (!sch) 142 return 1; 143 local_bh_disable(); 144 irq_enter (); 145 spin_lock(sch->lock); 146 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 147 if (sch->driver && sch->driver->irq) 148 sch->driver->irq(&sch->dev); 149 spin_unlock(sch->lock); 150 irq_exit (); 151 _local_bh_enable(); 152 return 1; 153 } 154 155 static int 156 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 157 { 158 char dbf_text[15]; 159 160 if (lpm != 0) 161 sch->lpm &= ~lpm; 162 else 163 sch->lpm = 0; 164 165 stsch (sch->schid, &sch->schib); 166 167 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " 168 "subchannel 0.%x.%04x!\n", sch->schid.ssid, 169 sch->schid.sch_no); 170 sprintf(dbf_text, "no%s", sch->dev.bus_id); 171 CIO_TRACE_EVENT(0, dbf_text); 172 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 173 174 return (sch->lpm ? -EACCES : -ENODEV); 175 } 176 177 int 178 cio_start_key (struct subchannel *sch, /* subchannel structure */ 179 struct ccw1 * cpa, /* logical channel prog addr */ 180 __u8 lpm, /* logical path mask */ 181 __u8 key) /* storage key */ 182 { 183 char dbf_txt[15]; 184 int ccode; 185 186 CIO_TRACE_EVENT (4, "stIO"); 187 CIO_TRACE_EVENT (4, sch->dev.bus_id); 188 189 /* sch is always under 2G. */ 190 sch->orb.intparm = (__u32)(unsigned long)sch; 191 sch->orb.fmt = 1; 192 193 sch->orb.pfch = sch->options.prefetch == 0; 194 sch->orb.spnd = sch->options.suspend; 195 sch->orb.ssic = sch->options.suspend && sch->options.inter; 196 sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm; 197 #ifdef CONFIG_64BIT 198 /* 199 * for 64 bit we always support 64 bit IDAWs with 4k page size only 200 */ 201 sch->orb.c64 = 1; 202 sch->orb.i2k = 0; 203 #endif 204 sch->orb.key = key >> 4; 205 /* issue "Start Subchannel" */ 206 sch->orb.cpa = (__u32) __pa (cpa); 207 ccode = ssch (sch->schid, &sch->orb); 208 209 /* process condition code */ 210 sprintf (dbf_txt, "ccode:%d", ccode); 211 CIO_TRACE_EVENT (4, dbf_txt); 212 213 switch (ccode) { 214 case 0: 215 /* 216 * initialize device status information 217 */ 218 sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; 219 return 0; 220 case 1: /* status pending */ 221 case 2: /* busy */ 222 return -EBUSY; 223 default: /* device/path not operational */ 224 return cio_start_handle_notoper(sch, lpm); 225 } 226 } 227 228 int 229 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) 230 { 231 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); 232 } 233 234 /* 235 * resume suspended I/O operation 236 */ 237 int 238 cio_resume (struct subchannel *sch) 239 { 240 char dbf_txt[15]; 241 int ccode; 242 243 CIO_TRACE_EVENT (4, "resIO"); 244 CIO_TRACE_EVENT (4, sch->dev.bus_id); 245 246 ccode = rsch (sch->schid); 247 248 sprintf (dbf_txt, "ccode:%d", ccode); 249 CIO_TRACE_EVENT (4, dbf_txt); 250 251 switch (ccode) { 252 case 0: 253 sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; 254 return 0; 255 case 1: 256 return -EBUSY; 257 case 2: 258 return -EINVAL; 259 default: 260 /* 261 * useless to wait for request completion 262 * as device is no longer operational ! 263 */ 264 return -ENODEV; 265 } 266 } 267 268 /* 269 * halt I/O operation 270 */ 271 int 272 cio_halt(struct subchannel *sch) 273 { 274 char dbf_txt[15]; 275 int ccode; 276 277 if (!sch) 278 return -ENODEV; 279 280 CIO_TRACE_EVENT (2, "haltIO"); 281 CIO_TRACE_EVENT (2, sch->dev.bus_id); 282 283 /* 284 * Issue "Halt subchannel" and process condition code 285 */ 286 ccode = hsch (sch->schid); 287 288 sprintf (dbf_txt, "ccode:%d", ccode); 289 CIO_TRACE_EVENT (2, dbf_txt); 290 291 switch (ccode) { 292 case 0: 293 sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; 294 return 0; 295 case 1: /* status pending */ 296 case 2: /* busy */ 297 return -EBUSY; 298 default: /* device not operational */ 299 return -ENODEV; 300 } 301 } 302 303 /* 304 * Clear I/O operation 305 */ 306 int 307 cio_clear(struct subchannel *sch) 308 { 309 char dbf_txt[15]; 310 int ccode; 311 312 if (!sch) 313 return -ENODEV; 314 315 CIO_TRACE_EVENT (2, "clearIO"); 316 CIO_TRACE_EVENT (2, sch->dev.bus_id); 317 318 /* 319 * Issue "Clear subchannel" and process condition code 320 */ 321 ccode = csch (sch->schid); 322 323 sprintf (dbf_txt, "ccode:%d", ccode); 324 CIO_TRACE_EVENT (2, dbf_txt); 325 326 switch (ccode) { 327 case 0: 328 sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; 329 return 0; 330 default: /* device not operational */ 331 return -ENODEV; 332 } 333 } 334 335 /* 336 * Function: cio_cancel 337 * Issues a "Cancel Subchannel" on the specified subchannel 338 * Note: We don't need any fancy intparms and flags here 339 * since xsch is executed synchronously. 340 * Only for common I/O internal use as for now. 341 */ 342 int 343 cio_cancel (struct subchannel *sch) 344 { 345 char dbf_txt[15]; 346 int ccode; 347 348 if (!sch) 349 return -ENODEV; 350 351 CIO_TRACE_EVENT (2, "cancelIO"); 352 CIO_TRACE_EVENT (2, sch->dev.bus_id); 353 354 ccode = xsch (sch->schid); 355 356 sprintf (dbf_txt, "ccode:%d", ccode); 357 CIO_TRACE_EVENT (2, dbf_txt); 358 359 switch (ccode) { 360 case 0: /* success */ 361 /* Update information in scsw. */ 362 stsch (sch->schid, &sch->schib); 363 return 0; 364 case 1: /* status pending */ 365 return -EBUSY; 366 case 2: /* not applicable */ 367 return -EINVAL; 368 default: /* not oper */ 369 return -ENODEV; 370 } 371 } 372 373 /* 374 * Function: cio_modify 375 * Issues a "Modify Subchannel" on the specified subchannel 376 */ 377 int 378 cio_modify (struct subchannel *sch) 379 { 380 int ccode, retry, ret; 381 382 ret = 0; 383 for (retry = 0; retry < 5; retry++) { 384 ccode = msch_err (sch->schid, &sch->schib); 385 if (ccode < 0) /* -EIO if msch gets a program check. */ 386 return ccode; 387 switch (ccode) { 388 case 0: /* successfull */ 389 return 0; 390 case 1: /* status pending */ 391 return -EBUSY; 392 case 2: /* busy */ 393 udelay (100); /* allow for recovery */ 394 ret = -EBUSY; 395 break; 396 case 3: /* not operational */ 397 return -ENODEV; 398 } 399 } 400 return ret; 401 } 402 403 /* 404 * Enable subchannel. 405 */ 406 int 407 cio_enable_subchannel (struct subchannel *sch, unsigned int isc) 408 { 409 char dbf_txt[15]; 410 int ccode; 411 int retry; 412 int ret; 413 414 CIO_TRACE_EVENT (2, "ensch"); 415 CIO_TRACE_EVENT (2, sch->dev.bus_id); 416 417 if (sch_is_pseudo_sch(sch)) 418 return -EINVAL; 419 ccode = stsch (sch->schid, &sch->schib); 420 if (ccode) 421 return -ENODEV; 422 423 for (retry = 5, ret = 0; retry > 0; retry--) { 424 sch->schib.pmcw.ena = 1; 425 sch->schib.pmcw.isc = isc; 426 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 427 ret = cio_modify(sch); 428 if (ret == -ENODEV) 429 break; 430 if (ret == -EIO) 431 /* 432 * Got a program check in cio_modify. Try without 433 * the concurrent sense bit the next time. 434 */ 435 sch->schib.pmcw.csense = 0; 436 if (ret == 0) { 437 stsch (sch->schid, &sch->schib); 438 if (sch->schib.pmcw.ena) 439 break; 440 } 441 if (ret == -EBUSY) { 442 struct irb irb; 443 if (tsch(sch->schid, &irb) != 0) 444 break; 445 } 446 } 447 sprintf (dbf_txt, "ret:%d", ret); 448 CIO_TRACE_EVENT (2, dbf_txt); 449 return ret; 450 } 451 452 /* 453 * Disable subchannel. 454 */ 455 int 456 cio_disable_subchannel (struct subchannel *sch) 457 { 458 char dbf_txt[15]; 459 int ccode; 460 int retry; 461 int ret; 462 463 CIO_TRACE_EVENT (2, "dissch"); 464 CIO_TRACE_EVENT (2, sch->dev.bus_id); 465 466 if (sch_is_pseudo_sch(sch)) 467 return 0; 468 ccode = stsch (sch->schid, &sch->schib); 469 if (ccode == 3) /* Not operational. */ 470 return -ENODEV; 471 472 if (sch->schib.scsw.actl != 0) 473 /* 474 * the disable function must not be called while there are 475 * requests pending for completion ! 476 */ 477 return -EBUSY; 478 479 for (retry = 5, ret = 0; retry > 0; retry--) { 480 sch->schib.pmcw.ena = 0; 481 ret = cio_modify(sch); 482 if (ret == -ENODEV) 483 break; 484 if (ret == -EBUSY) 485 /* 486 * The subchannel is busy or status pending. 487 * We'll disable when the next interrupt was delivered 488 * via the state machine. 489 */ 490 break; 491 if (ret == 0) { 492 stsch (sch->schid, &sch->schib); 493 if (!sch->schib.pmcw.ena) 494 break; 495 } 496 } 497 sprintf (dbf_txt, "ret:%d", ret); 498 CIO_TRACE_EVENT (2, dbf_txt); 499 return ret; 500 } 501 502 int cio_create_sch_lock(struct subchannel *sch) 503 { 504 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 505 if (!sch->lock) 506 return -ENOMEM; 507 spin_lock_init(sch->lock); 508 return 0; 509 } 510 511 /* 512 * cio_validate_subchannel() 513 * 514 * Find out subchannel type and initialize struct subchannel. 515 * Return codes: 516 * SUBCHANNEL_TYPE_IO for a normal io subchannel 517 * SUBCHANNEL_TYPE_CHSC for a chsc subchannel 518 * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel 519 * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel 520 * -ENXIO for non-defined subchannels 521 * -ENODEV for subchannels with invalid device number or blacklisted devices 522 */ 523 int 524 cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) 525 { 526 char dbf_txt[15]; 527 int ccode; 528 int err; 529 530 sprintf (dbf_txt, "valsch%x", schid.sch_no); 531 CIO_TRACE_EVENT (4, dbf_txt); 532 533 /* Nuke all fields. */ 534 memset(sch, 0, sizeof(struct subchannel)); 535 536 sch->schid = schid; 537 if (cio_is_console(schid)) { 538 sch->lock = cio_get_console_lock(); 539 } else { 540 err = cio_create_sch_lock(sch); 541 if (err) 542 goto out; 543 } 544 mutex_init(&sch->reg_mutex); 545 /* Set a name for the subchannel */ 546 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, 547 schid.sch_no); 548 549 /* 550 * The first subchannel that is not-operational (ccode==3) 551 * indicates that there aren't any more devices available. 552 * If stsch gets an exception, it means the current subchannel set 553 * is not valid. 554 */ 555 ccode = stsch_err (schid, &sch->schib); 556 if (ccode) { 557 err = (ccode == 3) ? -ENXIO : ccode; 558 goto out; 559 } 560 /* Copy subchannel type from path management control word. */ 561 sch->st = sch->schib.pmcw.st; 562 563 /* 564 * ... just being curious we check for non I/O subchannels 565 */ 566 if (sch->st != 0) { 567 CIO_DEBUG(KERN_INFO, 0, 568 "Subchannel 0.%x.%04x reports " 569 "non-I/O subchannel type %04X\n", 570 sch->schid.ssid, sch->schid.sch_no, sch->st); 571 /* We stop here for non-io subchannels. */ 572 err = sch->st; 573 goto out; 574 } 575 576 /* Initialization for io subchannels. */ 577 if (!sch->schib.pmcw.dnv) { 578 /* io subchannel but device number is invalid. */ 579 err = -ENODEV; 580 goto out; 581 } 582 /* Devno is valid. */ 583 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { 584 /* 585 * This device must not be known to Linux. So we simply 586 * say that there is no device and return ENODEV. 587 */ 588 CIO_MSG_EVENT(4, "Blacklisted device detected " 589 "at devno %04X, subchannel set %x\n", 590 sch->schib.pmcw.dev, sch->schid.ssid); 591 err = -ENODEV; 592 goto out; 593 } 594 sch->opm = 0xff; 595 if (!cio_is_console(sch->schid)) 596 chsc_validate_chpids(sch); 597 sch->lpm = sch->schib.pmcw.pam & sch->opm; 598 599 CIO_DEBUG(KERN_INFO, 0, 600 "Detected device %04x on subchannel 0.%x.%04X" 601 " - PIM = %02X, PAM = %02X, POM = %02X\n", 602 sch->schib.pmcw.dev, sch->schid.ssid, 603 sch->schid.sch_no, sch->schib.pmcw.pim, 604 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 605 606 /* 607 * We now have to initially ... 608 * ... set "interruption subclass" 609 * ... enable "concurrent sense" 610 * ... enable "multipath mode" if more than one 611 * CHPID is available. This is done regardless 612 * whether multiple paths are available for us. 613 */ 614 sch->schib.pmcw.isc = 3; /* could be smth. else */ 615 sch->schib.pmcw.csense = 1; /* concurrent sense */ 616 sch->schib.pmcw.ena = 0; 617 if ((sch->lpm & (sch->lpm - 1)) != 0) 618 sch->schib.pmcw.mp = 1; /* multipath mode */ 619 return 0; 620 out: 621 if (!cio_is_console(schid)) 622 kfree(sch->lock); 623 sch->lock = NULL; 624 return err; 625 } 626 627 /* 628 * do_IRQ() handles all normal I/O device IRQ's (the special 629 * SMP cross-CPU interrupts have their own specific 630 * handlers). 631 * 632 */ 633 void 634 do_IRQ (struct pt_regs *regs) 635 { 636 struct tpi_info *tpi_info; 637 struct subchannel *sch; 638 struct irb *irb; 639 struct pt_regs *old_regs; 640 641 old_regs = set_irq_regs(regs); 642 irq_enter(); 643 asm volatile ("mc 0,0"); 644 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) 645 /** 646 * Make sure that the i/o interrupt did not "overtake" 647 * the last HZ timer interrupt. 648 */ 649 account_ticks(S390_lowcore.int_clock); 650 /* 651 * Get interrupt information from lowcore 652 */ 653 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 654 irb = (struct irb *) __LC_IRB; 655 do { 656 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 657 /* 658 * Non I/O-subchannel thin interrupts are processed differently 659 */ 660 if (tpi_info->adapter_IO == 1 && 661 tpi_info->int_type == IO_INTERRUPT_TYPE) { 662 do_adapter_IO(); 663 continue; 664 } 665 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 666 if (sch) 667 spin_lock(sch->lock); 668 /* Store interrupt response block to lowcore. */ 669 if (tsch (tpi_info->schid, irb) == 0 && sch) { 670 /* Keep subchannel information word up to date. */ 671 memcpy (&sch->schib.scsw, &irb->scsw, 672 sizeof (irb->scsw)); 673 /* Call interrupt handler if there is one. */ 674 if (sch->driver && sch->driver->irq) 675 sch->driver->irq(&sch->dev); 676 } 677 if (sch) 678 spin_unlock(sch->lock); 679 /* 680 * Are more interrupts pending? 681 * If so, the tpi instruction will update the lowcore 682 * to hold the info for the next interrupt. 683 * We don't do this for VM because a tpi drops the cpu 684 * out of the sie which costs more cycles than it saves. 685 */ 686 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 687 irq_exit(); 688 set_irq_regs(old_regs); 689 } 690 691 #ifdef CONFIG_CCW_CONSOLE 692 static struct subchannel console_subchannel; 693 static int console_subchannel_in_use; 694 695 /* 696 * busy wait for the next interrupt on the console 697 */ 698 void 699 wait_cons_dev (void) 700 { 701 unsigned long cr6 __attribute__ ((aligned (8))); 702 unsigned long save_cr6 __attribute__ ((aligned (8))); 703 704 /* 705 * before entering the spinlock we may already have 706 * processed the interrupt on a different CPU... 707 */ 708 if (!console_subchannel_in_use) 709 return; 710 711 /* disable all but isc 7 (console device) */ 712 __ctl_store (save_cr6, 6, 6); 713 cr6 = 0x01000000; 714 __ctl_load (cr6, 6, 6); 715 716 do { 717 spin_unlock(console_subchannel.lock); 718 if (!cio_tpi()) 719 cpu_relax(); 720 spin_lock(console_subchannel.lock); 721 } while (console_subchannel.schib.scsw.actl != 0); 722 /* 723 * restore previous isc value 724 */ 725 __ctl_load (save_cr6, 6, 6); 726 } 727 728 static int 729 cio_test_for_console(struct subchannel_id schid, void *data) 730 { 731 if (stsch_err(schid, &console_subchannel.schib) != 0) 732 return -ENXIO; 733 if (console_subchannel.schib.pmcw.dnv && 734 console_subchannel.schib.pmcw.dev == 735 console_devno) { 736 console_irq = schid.sch_no; 737 return 1; /* found */ 738 } 739 return 0; 740 } 741 742 743 static int 744 cio_get_console_sch_no(void) 745 { 746 struct subchannel_id schid; 747 748 init_subchannel_id(&schid); 749 if (console_irq != -1) { 750 /* VM provided us with the irq number of the console. */ 751 schid.sch_no = console_irq; 752 if (stsch(schid, &console_subchannel.schib) != 0 || 753 !console_subchannel.schib.pmcw.dnv) 754 return -1; 755 console_devno = console_subchannel.schib.pmcw.dev; 756 } else if (console_devno != -1) { 757 /* At least the console device number is known. */ 758 for_each_subchannel(cio_test_for_console, NULL); 759 if (console_irq == -1) 760 return -1; 761 } else { 762 /* unlike in 2.4, we cannot autoprobe here, since 763 * the channel subsystem is not fully initialized. 764 * With some luck, the HWC console can take over */ 765 printk(KERN_WARNING "No ccw console found!\n"); 766 return -1; 767 } 768 return console_irq; 769 } 770 771 struct subchannel * 772 cio_probe_console(void) 773 { 774 int sch_no, ret; 775 struct subchannel_id schid; 776 777 if (xchg(&console_subchannel_in_use, 1) != 0) 778 return ERR_PTR(-EBUSY); 779 sch_no = cio_get_console_sch_no(); 780 if (sch_no == -1) { 781 console_subchannel_in_use = 0; 782 return ERR_PTR(-ENODEV); 783 } 784 memset(&console_subchannel, 0, sizeof(struct subchannel)); 785 init_subchannel_id(&schid); 786 schid.sch_no = sch_no; 787 ret = cio_validate_subchannel(&console_subchannel, schid); 788 if (ret) { 789 console_subchannel_in_use = 0; 790 return ERR_PTR(-ENODEV); 791 } 792 793 /* 794 * enable console I/O-interrupt subclass 7 795 */ 796 ctl_set_bit(6, 24); 797 console_subchannel.schib.pmcw.isc = 7; 798 console_subchannel.schib.pmcw.intparm = 799 (__u32)(unsigned long)&console_subchannel; 800 ret = cio_modify(&console_subchannel); 801 if (ret) { 802 console_subchannel_in_use = 0; 803 return ERR_PTR(ret); 804 } 805 return &console_subchannel; 806 } 807 808 void 809 cio_release_console(void) 810 { 811 console_subchannel.schib.pmcw.intparm = 0; 812 cio_modify(&console_subchannel); 813 ctl_clear_bit(6, 24); 814 console_subchannel_in_use = 0; 815 } 816 817 /* Bah... hack to catch console special sausages. */ 818 int 819 cio_is_console(struct subchannel_id schid) 820 { 821 if (!console_subchannel_in_use) 822 return 0; 823 return schid_equal(&schid, &console_subchannel.schid); 824 } 825 826 struct subchannel * 827 cio_get_console_subchannel(void) 828 { 829 if (!console_subchannel_in_use) 830 return NULL; 831 return &console_subchannel; 832 } 833 834 #endif 835 static int 836 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 837 { 838 int retry, cc; 839 840 cc = 0; 841 for (retry=0;retry<3;retry++) { 842 schib->pmcw.ena = 0; 843 cc = msch(schid, schib); 844 if (cc) 845 return (cc==3?-ENODEV:-EBUSY); 846 stsch(schid, schib); 847 if (!schib->pmcw.ena) 848 return 0; 849 } 850 return -EBUSY; /* uhm... */ 851 } 852 853 /* we can't use the normal udelay here, since it enables external interrupts */ 854 855 static void udelay_reset(unsigned long usecs) 856 { 857 uint64_t start_cc, end_cc; 858 859 asm volatile ("STCK %0" : "=m" (start_cc)); 860 do { 861 cpu_relax(); 862 asm volatile ("STCK %0" : "=m" (end_cc)); 863 } while (((end_cc - start_cc)/4096) < usecs); 864 } 865 866 static int 867 __clear_subchannel_easy(struct subchannel_id schid) 868 { 869 int retry; 870 871 if (csch(schid)) 872 return -ENODEV; 873 for (retry=0;retry<20;retry++) { 874 struct tpi_info ti; 875 876 if (tpi(&ti)) { 877 tsch(ti.schid, (struct irb *)__LC_IRB); 878 if (schid_equal(&ti.schid, &schid)) 879 return 0; 880 } 881 udelay_reset(100); 882 } 883 return -EBUSY; 884 } 885 886 static int pgm_check_occured; 887 888 static void cio_reset_pgm_check_handler(void) 889 { 890 pgm_check_occured = 1; 891 } 892 893 static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) 894 { 895 int rc; 896 897 pgm_check_occured = 0; 898 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; 899 rc = stsch(schid, addr); 900 s390_base_pgm_handler_fn = NULL; 901 902 /* The program check handler could have changed pgm_check_occured. */ 903 barrier(); 904 905 if (pgm_check_occured) 906 return -EIO; 907 else 908 return rc; 909 } 910 911 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) 912 { 913 struct schib schib; 914 915 if (stsch_reset(schid, &schib)) 916 return -ENXIO; 917 if (!schib.pmcw.ena) 918 return 0; 919 switch(__disable_subchannel_easy(schid, &schib)) { 920 case 0: 921 case -ENODEV: 922 break; 923 default: /* -EBUSY */ 924 if (__clear_subchannel_easy(schid)) 925 break; /* give up... */ 926 stsch(schid, &schib); 927 __disable_subchannel_easy(schid, &schib); 928 } 929 return 0; 930 } 931 932 static atomic_t chpid_reset_count; 933 934 static void s390_reset_chpids_mcck_handler(void) 935 { 936 struct crw crw; 937 struct mci *mci; 938 939 /* Check for pending channel report word. */ 940 mci = (struct mci *)&S390_lowcore.mcck_interruption_code; 941 if (!mci->cp) 942 return; 943 /* Process channel report words. */ 944 while (stcrw(&crw) == 0) { 945 /* Check for responses to RCHP. */ 946 if (crw.slct && crw.rsc == CRW_RSC_CPATH) 947 atomic_dec(&chpid_reset_count); 948 } 949 } 950 951 #define RCHP_TIMEOUT (30 * USEC_PER_SEC) 952 static void css_reset(void) 953 { 954 int i, ret; 955 unsigned long long timeout; 956 957 /* Reset subchannels. */ 958 for_each_subchannel(__shutdown_subchannel_easy, NULL); 959 /* Reset channel paths. */ 960 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; 961 /* Enable channel report machine checks. */ 962 __ctl_set_bit(14, 28); 963 /* Temporarily reenable machine checks. */ 964 local_mcck_enable(); 965 for (i = 0; i <= __MAX_CHPID; i++) { 966 ret = rchp(i); 967 if ((ret == 0) || (ret == 2)) 968 /* 969 * rchp either succeeded, or another rchp is already 970 * in progress. In either case, we'll get a crw. 971 */ 972 atomic_inc(&chpid_reset_count); 973 } 974 /* Wait for machine check for all channel paths. */ 975 timeout = get_clock() + (RCHP_TIMEOUT << 12); 976 while (atomic_read(&chpid_reset_count) != 0) { 977 if (get_clock() > timeout) 978 break; 979 cpu_relax(); 980 } 981 /* Disable machine checks again. */ 982 local_mcck_disable(); 983 /* Disable channel report machine checks. */ 984 __ctl_clear_bit(14, 28); 985 s390_base_mcck_handler_fn = NULL; 986 } 987 988 static struct reset_call css_reset_call = { 989 .fn = css_reset, 990 }; 991 992 static int __init init_css_reset_call(void) 993 { 994 atomic_set(&chpid_reset_count, 0); 995 register_reset_call(&css_reset_call); 996 return 0; 997 } 998 999 arch_initcall(init_css_reset_call); 1000 1001 struct sch_match_id { 1002 struct subchannel_id schid; 1003 struct ccw_dev_id devid; 1004 int rc; 1005 }; 1006 1007 static int __reipl_subchannel_match(struct subchannel_id schid, void *data) 1008 { 1009 struct schib schib; 1010 struct sch_match_id *match_id = data; 1011 1012 if (stsch_reset(schid, &schib)) 1013 return -ENXIO; 1014 if (schib.pmcw.dnv && 1015 (schib.pmcw.dev == match_id->devid.devno) && 1016 (schid.ssid == match_id->devid.ssid)) { 1017 match_id->schid = schid; 1018 match_id->rc = 0; 1019 return 1; 1020 } 1021 return 0; 1022 } 1023 1024 static int reipl_find_schid(struct ccw_dev_id *devid, 1025 struct subchannel_id *schid) 1026 { 1027 struct sch_match_id match_id; 1028 1029 match_id.devid = *devid; 1030 match_id.rc = -ENODEV; 1031 for_each_subchannel(__reipl_subchannel_match, &match_id); 1032 if (match_id.rc == 0) 1033 *schid = match_id.schid; 1034 return match_id.rc; 1035 } 1036 1037 extern void do_reipl_asm(__u32 schid); 1038 1039 /* Make sure all subchannels are quiet before we re-ipl an lpar. */ 1040 void reipl_ccw_dev(struct ccw_dev_id *devid) 1041 { 1042 struct subchannel_id schid; 1043 1044 s390_reset_system(); 1045 if (reipl_find_schid(devid, &schid) != 0) 1046 panic("IPL Device not found\n"); 1047 do_reipl_asm(*((__u32*)&schid)); 1048 } 1049 1050 extern struct schib ipl_schib; 1051 1052 /* 1053 * ipl_save_parameters gets called very early. It is not allowed to access 1054 * anything in the bss section at all. The bss section is not cleared yet, 1055 * but may contain some ipl parameters written by the firmware. 1056 * These parameters (if present) are copied to 0x2000. 1057 * To avoid corruption of the ipl parameters, all variables used by this 1058 * function must reside on the stack or in the data section. 1059 */ 1060 void ipl_save_parameters(void) 1061 { 1062 struct subchannel_id schid; 1063 unsigned int *ipl_ptr; 1064 void *src, *dst; 1065 1066 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1067 if (!schid.one) 1068 return; 1069 if (stsch(schid, &ipl_schib)) 1070 return; 1071 if (!ipl_schib.pmcw.dnv) 1072 return; 1073 ipl_devno = ipl_schib.pmcw.dev; 1074 ipl_flags |= IPL_DEVNO_VALID; 1075 if (!ipl_schib.pmcw.qf) 1076 return; 1077 ipl_flags |= IPL_PARMBLOCK_VALID; 1078 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; 1079 src = (void *)(unsigned long)*ipl_ptr; 1080 dst = (void *)IPL_PARMBLOCK_ORIGIN; 1081 memmove(dst, src, PAGE_SIZE); 1082 *ipl_ptr = IPL_PARMBLOCK_ORIGIN; 1083 } 1084