1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * finite state machine for device handling 4 * 5 * Copyright IBM Corp. 2002, 2008 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/io.h> 13 #include <linux/jiffies.h> 14 #include <linux/string.h> 15 16 #include <asm/ccwdev.h> 17 #include <asm/cio.h> 18 #include <asm/chpid.h> 19 20 #include "cio.h" 21 #include "cio_debug.h" 22 #include "css.h" 23 #include "device.h" 24 #include "chsc.h" 25 #include "ioasm.h" 26 #include "chp.h" 27 28 static int timeout_log_enabled; 29 30 static int __init ccw_timeout_log_setup(char *unused) 31 { 32 timeout_log_enabled = 1; 33 return 1; 34 } 35 36 __setup("ccw_timeout_log", ccw_timeout_log_setup); 37 38 static void ccw_timeout_log(struct ccw_device *cdev) 39 { 40 struct schib schib; 41 struct subchannel *sch; 42 struct io_subchannel_private *private; 43 union orb *orb; 44 int cc; 45 46 sch = to_subchannel(cdev->dev.parent); 47 private = to_io_private(sch); 48 orb = &private->orb; 49 cc = stsch(sch->schid, &schib); 50 51 printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, " 52 "device information:\n", get_tod_clock()); 53 printk(KERN_WARNING "cio: orb:\n"); 54 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 55 orb, sizeof(*orb), 0); 56 printk(KERN_WARNING "cio: ccw device bus id: %s\n", 57 dev_name(&cdev->dev)); 58 printk(KERN_WARNING "cio: subchannel bus id: %s\n", 59 dev_name(&sch->dev)); 60 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 61 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 62 63 if (orb->tm.b) { 64 printk(KERN_WARNING "cio: orb indicates transport mode\n"); 65 printk(KERN_WARNING "cio: last tcw:\n"); 66 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 67 phys_to_virt(orb->tm.tcw), 68 sizeof(struct tcw), 0); 69 } else { 70 printk(KERN_WARNING "cio: orb indicates command mode\n"); 71 if ((void *)(addr_t)orb->cmd.cpa == 72 &private->dma_area->sense_ccw || 73 (void *)(addr_t)orb->cmd.cpa == 74 cdev->private->dma_area->iccws) 75 printk(KERN_WARNING "cio: last channel program " 76 "(intern):\n"); 77 else 78 printk(KERN_WARNING "cio: last channel program:\n"); 79 80 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 81 phys_to_virt(orb->cmd.cpa), 82 sizeof(struct ccw1), 0); 83 } 84 printk(KERN_WARNING "cio: ccw device state: %d\n", 85 cdev->private->state); 86 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 87 printk(KERN_WARNING "cio: schib:\n"); 88 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 89 &schib, sizeof(schib), 0); 90 printk(KERN_WARNING "cio: ccw device flags:\n"); 91 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 92 &cdev->private->flags, sizeof(cdev->private->flags), 0); 93 } 94 95 /* 96 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 97 */ 98 void 99 ccw_device_timeout(struct timer_list *t) 100 { 101 struct ccw_device_private *priv = from_timer(priv, t, timer); 102 struct ccw_device *cdev = priv->cdev; 103 104 spin_lock_irq(cdev->ccwlock); 105 if (timeout_log_enabled) 106 ccw_timeout_log(cdev); 107 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 108 spin_unlock_irq(cdev->ccwlock); 109 } 110 111 /* 112 * Set timeout 113 */ 114 void 115 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 116 { 117 if (expires == 0) 118 del_timer(&cdev->private->timer); 119 else 120 mod_timer(&cdev->private->timer, jiffies + expires); 121 } 122 123 int 124 ccw_device_cancel_halt_clear(struct ccw_device *cdev) 125 { 126 struct subchannel *sch; 127 int ret; 128 129 sch = to_subchannel(cdev->dev.parent); 130 ret = cio_cancel_halt_clear(sch, &cdev->private->iretry); 131 132 if (ret == -EIO) 133 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", 134 cdev->private->dev_id.ssid, 135 cdev->private->dev_id.devno); 136 137 return ret; 138 } 139 140 void ccw_device_update_sense_data(struct ccw_device *cdev) 141 { 142 memset(&cdev->id, 0, sizeof(cdev->id)); 143 cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type; 144 cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model; 145 cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type; 146 cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model; 147 } 148 149 int ccw_device_test_sense_data(struct ccw_device *cdev) 150 { 151 return cdev->id.cu_type == 152 cdev->private->dma_area->senseid.cu_type && 153 cdev->id.cu_model == 154 cdev->private->dma_area->senseid.cu_model && 155 cdev->id.dev_type == 156 cdev->private->dma_area->senseid.dev_type && 157 cdev->id.dev_model == 158 cdev->private->dma_area->senseid.dev_model; 159 } 160 161 /* 162 * The machine won't give us any notification by machine check if a chpid has 163 * been varied online on the SE so we have to find out by magic (i. e. driving 164 * the channel subsystem to device selection and updating our path masks). 165 */ 166 static void 167 __recover_lost_chpids(struct subchannel *sch, int old_lpm) 168 { 169 int mask, i; 170 struct chp_id chpid; 171 172 chp_id_init(&chpid); 173 for (i = 0; i<8; i++) { 174 mask = 0x80 >> i; 175 if (!(sch->lpm & mask)) 176 continue; 177 if (old_lpm & mask) 178 continue; 179 chpid.id = sch->schib.pmcw.chpid[i]; 180 if (!chp_is_registered(chpid)) 181 css_schedule_eval_all(); 182 } 183 } 184 185 /* 186 * Stop device recognition. 187 */ 188 static void 189 ccw_device_recog_done(struct ccw_device *cdev, int state) 190 { 191 struct subchannel *sch; 192 int old_lpm; 193 194 sch = to_subchannel(cdev->dev.parent); 195 196 if (cio_disable_subchannel(sch)) 197 state = DEV_STATE_NOT_OPER; 198 /* 199 * Now that we tried recognition, we have performed device selection 200 * through ssch() and the path information is up to date. 201 */ 202 old_lpm = sch->lpm; 203 204 /* Check since device may again have become not operational. */ 205 if (cio_update_schib(sch)) 206 state = DEV_STATE_NOT_OPER; 207 else 208 sch->lpm = sch->schib.pmcw.pam & sch->opm; 209 210 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) 211 /* Force reprobe on all chpids. */ 212 old_lpm = 0; 213 if (sch->lpm != old_lpm) 214 __recover_lost_chpids(sch, old_lpm); 215 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID && 216 (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) { 217 cdev->private->flags.recog_done = 1; 218 cdev->private->state = DEV_STATE_DISCONNECTED; 219 wake_up(&cdev->private->wait_q); 220 return; 221 } 222 switch (state) { 223 case DEV_STATE_NOT_OPER: 224 break; 225 case DEV_STATE_OFFLINE: 226 if (!cdev->online) { 227 ccw_device_update_sense_data(cdev); 228 break; 229 } 230 cdev->private->state = DEV_STATE_OFFLINE; 231 cdev->private->flags.recog_done = 1; 232 if (ccw_device_test_sense_data(cdev)) { 233 cdev->private->flags.donotify = 1; 234 ccw_device_online(cdev); 235 wake_up(&cdev->private->wait_q); 236 } else { 237 ccw_device_update_sense_data(cdev); 238 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 239 } 240 return; 241 case DEV_STATE_BOXED: 242 if (cdev->id.cu_type != 0) { /* device was recognized before */ 243 cdev->private->flags.recog_done = 1; 244 cdev->private->state = DEV_STATE_BOXED; 245 wake_up(&cdev->private->wait_q); 246 return; 247 } 248 break; 249 } 250 cdev->private->state = state; 251 io_subchannel_recog_done(cdev); 252 wake_up(&cdev->private->wait_q); 253 } 254 255 /* 256 * Function called from device_id.c after sense id has completed. 257 */ 258 void 259 ccw_device_sense_id_done(struct ccw_device *cdev, int err) 260 { 261 switch (err) { 262 case 0: 263 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); 264 break; 265 case -ETIME: /* Sense id stopped by timeout. */ 266 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 267 break; 268 default: 269 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 270 break; 271 } 272 } 273 274 /** 275 * ccw_device_notify() - inform the device's driver about an event 276 * @cdev: device for which an event occurred 277 * @event: event that occurred 278 * 279 * Returns: 280 * -%EINVAL if the device is offline or has no driver. 281 * -%EOPNOTSUPP if the device's driver has no notifier registered. 282 * %NOTIFY_OK if the driver wants to keep the device. 283 * %NOTIFY_BAD if the driver doesn't want to keep the device. 284 */ 285 int ccw_device_notify(struct ccw_device *cdev, int event) 286 { 287 int ret = -EINVAL; 288 289 if (!cdev->drv) 290 goto out; 291 if (!cdev->online) 292 goto out; 293 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 294 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 295 event); 296 if (!cdev->drv->notify) { 297 ret = -EOPNOTSUPP; 298 goto out; 299 } 300 if (cdev->drv->notify(cdev, event)) 301 ret = NOTIFY_OK; 302 else 303 ret = NOTIFY_BAD; 304 out: 305 return ret; 306 } 307 308 static void ccw_device_oper_notify(struct ccw_device *cdev) 309 { 310 struct subchannel *sch = to_subchannel(cdev->dev.parent); 311 312 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 313 /* Reenable channel measurements, if needed. */ 314 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 315 /* Save indication for new paths. */ 316 cdev->private->path_new_mask = sch->vpm; 317 return; 318 } 319 /* Driver doesn't want device back. */ 320 ccw_device_set_notoper(cdev); 321 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 322 } 323 324 /* 325 * Finished with online/offline processing. 326 */ 327 static void 328 ccw_device_done(struct ccw_device *cdev, int state) 329 { 330 struct subchannel *sch; 331 332 sch = to_subchannel(cdev->dev.parent); 333 334 ccw_device_set_timeout(cdev, 0); 335 336 if (state != DEV_STATE_ONLINE) 337 cio_disable_subchannel(sch); 338 339 /* Reset device status. */ 340 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 341 342 cdev->private->state = state; 343 344 switch (state) { 345 case DEV_STATE_BOXED: 346 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 347 cdev->private->dev_id.devno, sch->schid.sch_no); 348 if (cdev->online && 349 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) 350 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 351 cdev->private->flags.donotify = 0; 352 break; 353 case DEV_STATE_NOT_OPER: 354 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 355 cdev->private->dev_id.devno, sch->schid.sch_no); 356 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 357 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 358 else 359 ccw_device_set_disconnected(cdev); 360 cdev->private->flags.donotify = 0; 361 break; 362 case DEV_STATE_DISCONNECTED: 363 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 364 "%04x\n", cdev->private->dev_id.devno, 365 sch->schid.sch_no); 366 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) { 367 cdev->private->state = DEV_STATE_NOT_OPER; 368 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 369 } else 370 ccw_device_set_disconnected(cdev); 371 cdev->private->flags.donotify = 0; 372 break; 373 default: 374 break; 375 } 376 377 if (cdev->private->flags.donotify) { 378 cdev->private->flags.donotify = 0; 379 ccw_device_oper_notify(cdev); 380 } 381 wake_up(&cdev->private->wait_q); 382 } 383 384 /* 385 * Start device recognition. 386 */ 387 void ccw_device_recognition(struct ccw_device *cdev) 388 { 389 struct subchannel *sch = to_subchannel(cdev->dev.parent); 390 391 /* 392 * We used to start here with a sense pgid to find out whether a device 393 * is locked by someone else. Unfortunately, the sense pgid command 394 * code has other meanings on devices predating the path grouping 395 * algorithm, so we start with sense id and box the device after an 396 * timeout (or if sense pgid during path verification detects the device 397 * is locked, as may happen on newer devices). 398 */ 399 cdev->private->flags.recog_done = 0; 400 cdev->private->state = DEV_STATE_SENSE_ID; 401 if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch))) { 402 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 403 return; 404 } 405 ccw_device_sense_id_start(cdev); 406 } 407 408 /* 409 * Handle events for states that use the ccw request infrastructure. 410 */ 411 static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) 412 { 413 switch (e) { 414 case DEV_EVENT_NOTOPER: 415 ccw_request_notoper(cdev); 416 break; 417 case DEV_EVENT_INTERRUPT: 418 ccw_request_handler(cdev); 419 break; 420 case DEV_EVENT_TIMEOUT: 421 ccw_request_timeout(cdev); 422 break; 423 default: 424 break; 425 } 426 } 427 428 static void ccw_device_report_path_events(struct ccw_device *cdev) 429 { 430 struct subchannel *sch = to_subchannel(cdev->dev.parent); 431 int path_event[8]; 432 int chp, mask; 433 434 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) { 435 path_event[chp] = PE_NONE; 436 if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) 437 path_event[chp] |= PE_PATH_GONE; 438 if (mask & cdev->private->path_new_mask & sch->vpm) 439 path_event[chp] |= PE_PATH_AVAILABLE; 440 if (mask & cdev->private->pgid_reset_mask & sch->vpm) 441 path_event[chp] |= PE_PATHGROUP_ESTABLISHED; 442 } 443 if (cdev->online && cdev->drv->path_event) 444 cdev->drv->path_event(cdev, path_event); 445 } 446 447 static void ccw_device_reset_path_events(struct ccw_device *cdev) 448 { 449 cdev->private->path_gone_mask = 0; 450 cdev->private->path_new_mask = 0; 451 cdev->private->pgid_reset_mask = 0; 452 } 453 454 static void create_fake_irb(struct irb *irb, int type) 455 { 456 memset(irb, 0, sizeof(*irb)); 457 if (type == FAKE_CMD_IRB) { 458 struct cmd_scsw *scsw = &irb->scsw.cmd; 459 scsw->cc = 1; 460 scsw->fctl = SCSW_FCTL_START_FUNC; 461 scsw->actl = SCSW_ACTL_START_PEND; 462 scsw->stctl = SCSW_STCTL_STATUS_PEND; 463 } else if (type == FAKE_TM_IRB) { 464 struct tm_scsw *scsw = &irb->scsw.tm; 465 scsw->x = 1; 466 scsw->cc = 1; 467 scsw->fctl = SCSW_FCTL_START_FUNC; 468 scsw->actl = SCSW_ACTL_START_PEND; 469 scsw->stctl = SCSW_STCTL_STATUS_PEND; 470 } 471 } 472 473 static void ccw_device_handle_broken_paths(struct ccw_device *cdev) 474 { 475 struct subchannel *sch = to_subchannel(cdev->dev.parent); 476 u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; 477 478 if (broken_paths && (cdev->private->path_broken_mask != broken_paths)) 479 ccw_device_schedule_recovery(); 480 481 cdev->private->path_broken_mask = broken_paths; 482 } 483 484 void ccw_device_verify_done(struct ccw_device *cdev, int err) 485 { 486 struct subchannel *sch; 487 488 sch = to_subchannel(cdev->dev.parent); 489 /* Update schib - pom may have changed. */ 490 if (cio_update_schib(sch)) { 491 err = -ENODEV; 492 goto callback; 493 } 494 /* Update lpm with verified path mask. */ 495 sch->lpm = sch->vpm; 496 /* Repeat path verification? */ 497 if (cdev->private->flags.doverify) { 498 ccw_device_verify_start(cdev); 499 return; 500 } 501 callback: 502 switch (err) { 503 case 0: 504 ccw_device_done(cdev, DEV_STATE_ONLINE); 505 /* Deliver fake irb to device driver, if needed. */ 506 if (cdev->private->flags.fake_irb) { 507 create_fake_irb(&cdev->private->dma_area->irb, 508 cdev->private->flags.fake_irb); 509 cdev->private->flags.fake_irb = 0; 510 if (cdev->handler) 511 cdev->handler(cdev, cdev->private->intparm, 512 &cdev->private->dma_area->irb); 513 memset(&cdev->private->dma_area->irb, 0, 514 sizeof(struct irb)); 515 } 516 ccw_device_report_path_events(cdev); 517 ccw_device_handle_broken_paths(cdev); 518 break; 519 case -ETIME: 520 case -EUSERS: 521 /* Reset oper notify indication after verify error. */ 522 cdev->private->flags.donotify = 0; 523 ccw_device_done(cdev, DEV_STATE_BOXED); 524 break; 525 case -EACCES: 526 /* Reset oper notify indication after verify error. */ 527 cdev->private->flags.donotify = 0; 528 ccw_device_done(cdev, DEV_STATE_DISCONNECTED); 529 break; 530 default: 531 /* Reset oper notify indication after verify error. */ 532 cdev->private->flags.donotify = 0; 533 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 534 break; 535 } 536 ccw_device_reset_path_events(cdev); 537 } 538 539 /* 540 * Get device online. 541 */ 542 int 543 ccw_device_online(struct ccw_device *cdev) 544 { 545 struct subchannel *sch; 546 int ret; 547 548 if ((cdev->private->state != DEV_STATE_OFFLINE) && 549 (cdev->private->state != DEV_STATE_BOXED)) 550 return -EINVAL; 551 sch = to_subchannel(cdev->dev.parent); 552 ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch)); 553 if (ret != 0) { 554 /* Couldn't enable the subchannel for i/o. Sick device. */ 555 if (ret == -ENODEV) 556 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 557 return ret; 558 } 559 /* Start initial path verification. */ 560 cdev->private->state = DEV_STATE_VERIFY; 561 ccw_device_verify_start(cdev); 562 return 0; 563 } 564 565 void 566 ccw_device_disband_done(struct ccw_device *cdev, int err) 567 { 568 switch (err) { 569 case 0: 570 ccw_device_done(cdev, DEV_STATE_OFFLINE); 571 break; 572 case -ETIME: 573 ccw_device_done(cdev, DEV_STATE_BOXED); 574 break; 575 default: 576 cdev->private->flags.donotify = 0; 577 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 578 break; 579 } 580 } 581 582 /* 583 * Shutdown device. 584 */ 585 int 586 ccw_device_offline(struct ccw_device *cdev) 587 { 588 struct subchannel *sch; 589 590 /* Allow ccw_device_offline while disconnected. */ 591 if (cdev->private->state == DEV_STATE_DISCONNECTED || 592 cdev->private->state == DEV_STATE_NOT_OPER) { 593 cdev->private->flags.donotify = 0; 594 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 595 return 0; 596 } 597 if (cdev->private->state == DEV_STATE_BOXED) { 598 ccw_device_done(cdev, DEV_STATE_BOXED); 599 return 0; 600 } 601 if (ccw_device_is_orphan(cdev)) { 602 ccw_device_done(cdev, DEV_STATE_OFFLINE); 603 return 0; 604 } 605 sch = to_subchannel(cdev->dev.parent); 606 if (cio_update_schib(sch)) 607 return -ENODEV; 608 if (scsw_actl(&sch->schib.scsw) != 0) 609 return -EBUSY; 610 if (cdev->private->state != DEV_STATE_ONLINE) 611 return -EINVAL; 612 /* Are we doing path grouping? */ 613 if (!cdev->private->flags.pgroup) { 614 /* No, set state offline immediately. */ 615 ccw_device_done(cdev, DEV_STATE_OFFLINE); 616 return 0; 617 } 618 /* Start Set Path Group commands. */ 619 cdev->private->state = DEV_STATE_DISBAND_PGID; 620 ccw_device_disband_start(cdev); 621 return 0; 622 } 623 624 /* 625 * Handle not operational event in non-special state. 626 */ 627 static void ccw_device_generic_notoper(struct ccw_device *cdev, 628 enum dev_event dev_event) 629 { 630 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 631 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 632 else 633 ccw_device_set_disconnected(cdev); 634 } 635 636 /* 637 * Handle path verification event in offline state. 638 */ 639 static void ccw_device_offline_verify(struct ccw_device *cdev, 640 enum dev_event dev_event) 641 { 642 struct subchannel *sch = to_subchannel(cdev->dev.parent); 643 644 css_schedule_eval(sch->schid); 645 } 646 647 /* 648 * Handle path verification event. 649 */ 650 static void 651 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) 652 { 653 struct subchannel *sch; 654 655 if (cdev->private->state == DEV_STATE_W4SENSE) { 656 cdev->private->flags.doverify = 1; 657 return; 658 } 659 sch = to_subchannel(cdev->dev.parent); 660 /* 661 * Since we might not just be coming from an interrupt from the 662 * subchannel we have to update the schib. 663 */ 664 if (cio_update_schib(sch)) { 665 ccw_device_verify_done(cdev, -ENODEV); 666 return; 667 } 668 669 if (scsw_actl(&sch->schib.scsw) != 0 || 670 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || 671 (scsw_stctl(&cdev->private->dma_area->irb.scsw) & 672 SCSW_STCTL_STATUS_PEND)) { 673 /* 674 * No final status yet or final status not yet delivered 675 * to the device driver. Can't do path verification now, 676 * delay until final status was delivered. 677 */ 678 cdev->private->flags.doverify = 1; 679 return; 680 } 681 /* Device is idle, we can do the path verification. */ 682 cdev->private->state = DEV_STATE_VERIFY; 683 ccw_device_verify_start(cdev); 684 } 685 686 /* 687 * Handle path verification event in boxed state. 688 */ 689 static void ccw_device_boxed_verify(struct ccw_device *cdev, 690 enum dev_event dev_event) 691 { 692 struct subchannel *sch = to_subchannel(cdev->dev.parent); 693 694 if (cdev->online) { 695 if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch))) 696 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 697 else 698 ccw_device_online_verify(cdev, dev_event); 699 } else 700 css_schedule_eval(sch->schid); 701 } 702 703 /* 704 * Pass interrupt to device driver. 705 */ 706 static int ccw_device_call_handler(struct ccw_device *cdev) 707 { 708 unsigned int stctl; 709 int ending_status; 710 711 /* 712 * we allow for the device action handler if . 713 * - we received ending status 714 * - the action handler requested to see all interrupts 715 * - we received an intermediate status 716 * - fast notification was requested (primary status) 717 * - unsolicited interrupts 718 */ 719 stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw); 720 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 721 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 722 (stctl == SCSW_STCTL_STATUS_PEND); 723 if (!ending_status && 724 !cdev->private->options.repall && 725 !(stctl & SCSW_STCTL_INTER_STATUS) && 726 !(cdev->private->options.fast && 727 (stctl & SCSW_STCTL_PRIM_STATUS))) 728 return 0; 729 730 if (ending_status) 731 ccw_device_set_timeout(cdev, 0); 732 733 if (cdev->handler) 734 cdev->handler(cdev, cdev->private->intparm, 735 &cdev->private->dma_area->irb); 736 737 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 738 return 1; 739 } 740 741 /* 742 * Got an interrupt for a normal io (state online). 743 */ 744 static void 745 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 746 { 747 struct irb *irb; 748 int is_cmd; 749 750 irb = this_cpu_ptr(&cio_irb); 751 is_cmd = !scsw_is_tm(&irb->scsw); 752 /* Check for unsolicited interrupt. */ 753 if (!scsw_is_solicited(&irb->scsw)) { 754 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 755 !irb->esw.esw0.erw.cons) { 756 /* Unit check but no sense data. Need basic sense. */ 757 if (ccw_device_do_sense(cdev, irb) != 0) 758 goto call_handler_unsol; 759 memcpy(&cdev->private->dma_area->irb, irb, 760 sizeof(struct irb)); 761 cdev->private->state = DEV_STATE_W4SENSE; 762 cdev->private->intparm = 0; 763 return; 764 } 765 call_handler_unsol: 766 if (cdev->handler) 767 cdev->handler (cdev, 0, irb); 768 if (cdev->private->flags.doverify) 769 ccw_device_online_verify(cdev, 0); 770 return; 771 } 772 /* Accumulate status and find out if a basic sense is needed. */ 773 ccw_device_accumulate_irb(cdev, irb); 774 if (is_cmd && cdev->private->flags.dosense) { 775 if (ccw_device_do_sense(cdev, irb) == 0) { 776 cdev->private->state = DEV_STATE_W4SENSE; 777 } 778 return; 779 } 780 /* Call the handler. */ 781 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 782 /* Start delayed path verification. */ 783 ccw_device_online_verify(cdev, 0); 784 } 785 786 /* 787 * Got an timeout in online state. 788 */ 789 static void 790 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) 791 { 792 int ret; 793 794 ccw_device_set_timeout(cdev, 0); 795 cdev->private->iretry = 255; 796 cdev->private->async_kill_io_rc = -ETIMEDOUT; 797 ret = ccw_device_cancel_halt_clear(cdev); 798 if (ret == -EBUSY) { 799 ccw_device_set_timeout(cdev, 3*HZ); 800 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 801 return; 802 } 803 if (ret) 804 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 805 else if (cdev->handler) 806 cdev->handler(cdev, cdev->private->intparm, 807 ERR_PTR(-ETIMEDOUT)); 808 } 809 810 /* 811 * Got an interrupt for a basic sense. 812 */ 813 static void 814 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 815 { 816 struct irb *irb; 817 818 irb = this_cpu_ptr(&cio_irb); 819 /* Check for unsolicited interrupt. */ 820 if (scsw_stctl(&irb->scsw) == 821 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 822 if (scsw_cc(&irb->scsw) == 1) 823 /* Basic sense hasn't started. Try again. */ 824 ccw_device_do_sense(cdev, irb); 825 else { 826 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited " 827 "interrupt during w4sense...\n", 828 cdev->private->dev_id.ssid, 829 cdev->private->dev_id.devno); 830 if (cdev->handler) 831 cdev->handler (cdev, 0, irb); 832 } 833 return; 834 } 835 /* 836 * Check if a halt or clear has been issued in the meanwhile. If yes, 837 * only deliver the halt/clear interrupt to the device driver as if it 838 * had killed the original request. 839 */ 840 if (scsw_fctl(&irb->scsw) & 841 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 842 cdev->private->flags.dosense = 0; 843 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 844 ccw_device_accumulate_irb(cdev, irb); 845 goto call_handler; 846 } 847 /* Add basic sense info to irb. */ 848 ccw_device_accumulate_basic_sense(cdev, irb); 849 if (cdev->private->flags.dosense) { 850 /* Another basic sense is needed. */ 851 ccw_device_do_sense(cdev, irb); 852 return; 853 } 854 call_handler: 855 cdev->private->state = DEV_STATE_ONLINE; 856 /* In case sensing interfered with setting the device online */ 857 wake_up(&cdev->private->wait_q); 858 /* Call the handler. */ 859 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 860 /* Start delayed path verification. */ 861 ccw_device_online_verify(cdev, 0); 862 } 863 864 static void 865 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 866 { 867 ccw_device_set_timeout(cdev, 0); 868 /* Start delayed path verification. */ 869 ccw_device_online_verify(cdev, 0); 870 /* OK, i/o is dead now. Call interrupt handler. */ 871 if (cdev->handler) 872 cdev->handler(cdev, cdev->private->intparm, 873 ERR_PTR(cdev->private->async_kill_io_rc)); 874 } 875 876 static void 877 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) 878 { 879 int ret; 880 881 ret = ccw_device_cancel_halt_clear(cdev); 882 if (ret == -EBUSY) { 883 ccw_device_set_timeout(cdev, 3*HZ); 884 return; 885 } 886 /* Start delayed path verification. */ 887 ccw_device_online_verify(cdev, 0); 888 if (cdev->handler) 889 cdev->handler(cdev, cdev->private->intparm, 890 ERR_PTR(cdev->private->async_kill_io_rc)); 891 } 892 893 void ccw_device_kill_io(struct ccw_device *cdev) 894 { 895 int ret; 896 897 ccw_device_set_timeout(cdev, 0); 898 cdev->private->iretry = 255; 899 cdev->private->async_kill_io_rc = -EIO; 900 ret = ccw_device_cancel_halt_clear(cdev); 901 if (ret == -EBUSY) { 902 ccw_device_set_timeout(cdev, 3*HZ); 903 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 904 return; 905 } 906 /* Start delayed path verification. */ 907 ccw_device_online_verify(cdev, 0); 908 if (cdev->handler) 909 cdev->handler(cdev, cdev->private->intparm, 910 ERR_PTR(-EIO)); 911 } 912 913 static void 914 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) 915 { 916 /* Start verification after current task finished. */ 917 cdev->private->flags.doverify = 1; 918 } 919 920 static void 921 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 922 { 923 struct subchannel *sch; 924 925 sch = to_subchannel(cdev->dev.parent); 926 if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)) != 0) 927 /* Couldn't enable the subchannel for i/o. Sick device. */ 928 return; 929 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 930 ccw_device_sense_id_start(cdev); 931 } 932 933 void ccw_device_trigger_reprobe(struct ccw_device *cdev) 934 { 935 struct subchannel *sch; 936 937 if (cdev->private->state != DEV_STATE_DISCONNECTED) 938 return; 939 940 sch = to_subchannel(cdev->dev.parent); 941 /* Update some values. */ 942 if (cio_update_schib(sch)) 943 return; 944 /* 945 * The pim, pam, pom values may not be accurate, but they are the best 946 * we have before performing device selection :/ 947 */ 948 sch->lpm = sch->schib.pmcw.pam & sch->opm; 949 /* 950 * Use the initial configuration since we can't be shure that the old 951 * paths are valid. 952 */ 953 io_subchannel_init_config(sch); 954 if (cio_commit_config(sch)) 955 return; 956 957 /* We should also udate ssd info, but this has to wait. */ 958 /* Check if this is another device which appeared on the same sch. */ 959 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) 960 css_schedule_eval(sch->schid); 961 else 962 ccw_device_start_id(cdev, 0); 963 } 964 965 static void ccw_device_disabled_irq(struct ccw_device *cdev, 966 enum dev_event dev_event) 967 { 968 struct subchannel *sch; 969 970 sch = to_subchannel(cdev->dev.parent); 971 /* 972 * An interrupt in a disabled state means a previous disable was not 973 * successful - should not happen, but we try to disable again. 974 */ 975 cio_disable_subchannel(sch); 976 } 977 978 static void 979 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) 980 { 981 retry_set_schib(cdev); 982 cdev->private->state = DEV_STATE_ONLINE; 983 dev_fsm_event(cdev, dev_event); 984 } 985 986 static void ccw_device_update_cmfblock(struct ccw_device *cdev, 987 enum dev_event dev_event) 988 { 989 cmf_retry_copy_block(cdev); 990 cdev->private->state = DEV_STATE_ONLINE; 991 dev_fsm_event(cdev, dev_event); 992 } 993 994 static void 995 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 996 { 997 ccw_device_set_timeout(cdev, 0); 998 cdev->private->state = DEV_STATE_NOT_OPER; 999 wake_up(&cdev->private->wait_q); 1000 } 1001 1002 static void 1003 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1004 { 1005 int ret; 1006 1007 ret = ccw_device_cancel_halt_clear(cdev); 1008 if (ret == -EBUSY) { 1009 ccw_device_set_timeout(cdev, HZ/10); 1010 } else { 1011 cdev->private->state = DEV_STATE_NOT_OPER; 1012 wake_up(&cdev->private->wait_q); 1013 } 1014 } 1015 1016 /* 1017 * No operation action. This is used e.g. to ignore a timeout event in 1018 * state offline. 1019 */ 1020 static void 1021 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) 1022 { 1023 } 1024 1025 /* 1026 * device statemachine 1027 */ 1028 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 1029 [DEV_STATE_NOT_OPER] = { 1030 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1031 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1032 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1033 [DEV_EVENT_VERIFY] = ccw_device_nop, 1034 }, 1035 [DEV_STATE_SENSE_ID] = { 1036 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1037 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1038 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1039 [DEV_EVENT_VERIFY] = ccw_device_nop, 1040 }, 1041 [DEV_STATE_OFFLINE] = { 1042 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1043 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1044 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1045 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 1046 }, 1047 [DEV_STATE_VERIFY] = { 1048 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1049 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1050 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1051 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1052 }, 1053 [DEV_STATE_ONLINE] = { 1054 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1055 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1056 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1057 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1058 }, 1059 [DEV_STATE_W4SENSE] = { 1060 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1061 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1062 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1063 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1064 }, 1065 [DEV_STATE_DISBAND_PGID] = { 1066 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1067 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1068 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1069 [DEV_EVENT_VERIFY] = ccw_device_nop, 1070 }, 1071 [DEV_STATE_BOXED] = { 1072 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1073 [DEV_EVENT_INTERRUPT] = ccw_device_nop, 1074 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1075 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify, 1076 }, 1077 /* states to wait for i/o completion before doing something */ 1078 [DEV_STATE_TIMEOUT_KILL] = { 1079 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1080 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1081 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1082 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1083 }, 1084 [DEV_STATE_QUIESCE] = { 1085 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1086 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, 1087 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, 1088 [DEV_EVENT_VERIFY] = ccw_device_nop, 1089 }, 1090 /* special states for devices gone not operational */ 1091 [DEV_STATE_DISCONNECTED] = { 1092 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1093 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1094 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1095 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1096 }, 1097 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1098 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1099 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1100 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1101 [DEV_EVENT_VERIFY] = ccw_device_nop, 1102 }, 1103 [DEV_STATE_CMFCHANGE] = { 1104 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, 1105 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, 1106 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1107 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1108 }, 1109 [DEV_STATE_CMFUPDATE] = { 1110 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, 1111 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, 1112 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1113 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1114 }, 1115 [DEV_STATE_STEAL_LOCK] = { 1116 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1117 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1118 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1119 [DEV_EVENT_VERIFY] = ccw_device_nop, 1120 }, 1121 }; 1122 1123 EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1124