1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * finite state machine for device handling 4 * 5 * Copyright IBM Corp. 2002, 2008 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/jiffies.h> 13 #include <linux/string.h> 14 15 #include <asm/ccwdev.h> 16 #include <asm/cio.h> 17 #include <asm/chpid.h> 18 19 #include "cio.h" 20 #include "cio_debug.h" 21 #include "css.h" 22 #include "device.h" 23 #include "chsc.h" 24 #include "ioasm.h" 25 #include "chp.h" 26 27 static int timeout_log_enabled; 28 29 static int __init ccw_timeout_log_setup(char *unused) 30 { 31 timeout_log_enabled = 1; 32 return 1; 33 } 34 35 __setup("ccw_timeout_log", ccw_timeout_log_setup); 36 37 static void ccw_timeout_log(struct ccw_device *cdev) 38 { 39 struct schib schib; 40 struct subchannel *sch; 41 struct io_subchannel_private *private; 42 union orb *orb; 43 int cc; 44 45 sch = to_subchannel(cdev->dev.parent); 46 private = to_io_private(sch); 47 orb = &private->orb; 48 cc = stsch(sch->schid, &schib); 49 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 51 "device information:\n", get_tod_clock()); 52 printk(KERN_WARNING "cio: orb:\n"); 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 54 orb, sizeof(*orb), 0); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", 56 dev_name(&cdev->dev)); 57 printk(KERN_WARNING "cio: subchannel bus id: %s\n", 58 dev_name(&sch->dev)); 59 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 60 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 61 62 if (orb->tm.b) { 63 printk(KERN_WARNING "cio: orb indicates transport mode\n"); 64 printk(KERN_WARNING "cio: last tcw:\n"); 65 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 66 (void *)(addr_t)orb->tm.tcw, 67 sizeof(struct tcw), 0); 68 } else { 69 printk(KERN_WARNING "cio: orb indicates command mode\n"); 70 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || 71 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) 72 printk(KERN_WARNING "cio: last channel program " 73 "(intern):\n"); 74 else 75 printk(KERN_WARNING "cio: last channel program:\n"); 76 77 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 78 (void *)(addr_t)orb->cmd.cpa, 79 sizeof(struct ccw1), 0); 80 } 81 printk(KERN_WARNING "cio: ccw device state: %d\n", 82 cdev->private->state); 83 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 84 printk(KERN_WARNING "cio: schib:\n"); 85 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 86 &schib, sizeof(schib), 0); 87 printk(KERN_WARNING "cio: ccw device flags:\n"); 88 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 89 &cdev->private->flags, sizeof(cdev->private->flags), 0); 90 } 91 92 /* 93 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 94 */ 95 void 96 ccw_device_timeout(struct timer_list *t) 97 { 98 struct ccw_device_private *priv = from_timer(priv, t, timer); 99 struct ccw_device *cdev = priv->cdev; 100 101 spin_lock_irq(cdev->ccwlock); 102 if (timeout_log_enabled) 103 ccw_timeout_log(cdev); 104 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 105 spin_unlock_irq(cdev->ccwlock); 106 } 107 108 /* 109 * Set timeout 110 */ 111 void 112 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 113 { 114 if (expires == 0) { 115 del_timer(&cdev->private->timer); 116 return; 117 } 118 if (timer_pending(&cdev->private->timer)) { 119 if (mod_timer(&cdev->private->timer, jiffies + expires)) 120 return; 121 } 122 cdev->private->timer.expires = jiffies + expires; 123 add_timer(&cdev->private->timer); 124 } 125 126 int 127 ccw_device_cancel_halt_clear(struct ccw_device *cdev) 128 { 129 struct subchannel *sch; 130 int ret; 131 132 sch = to_subchannel(cdev->dev.parent); 133 ret = cio_cancel_halt_clear(sch, &cdev->private->iretry); 134 135 if (ret == -EIO) 136 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", 137 cdev->private->dev_id.ssid, 138 cdev->private->dev_id.devno); 139 140 return ret; 141 } 142 143 void ccw_device_update_sense_data(struct ccw_device *cdev) 144 { 145 memset(&cdev->id, 0, sizeof(cdev->id)); 146 cdev->id.cu_type = cdev->private->senseid.cu_type; 147 cdev->id.cu_model = cdev->private->senseid.cu_model; 148 cdev->id.dev_type = cdev->private->senseid.dev_type; 149 cdev->id.dev_model = cdev->private->senseid.dev_model; 150 } 151 152 int ccw_device_test_sense_data(struct ccw_device *cdev) 153 { 154 return cdev->id.cu_type == cdev->private->senseid.cu_type && 155 cdev->id.cu_model == cdev->private->senseid.cu_model && 156 cdev->id.dev_type == cdev->private->senseid.dev_type && 157 cdev->id.dev_model == cdev->private->senseid.dev_model; 158 } 159 160 /* 161 * The machine won't give us any notification by machine check if a chpid has 162 * been varied online on the SE so we have to find out by magic (i. e. driving 163 * the channel subsystem to device selection and updating our path masks). 164 */ 165 static void 166 __recover_lost_chpids(struct subchannel *sch, int old_lpm) 167 { 168 int mask, i; 169 struct chp_id chpid; 170 171 chp_id_init(&chpid); 172 for (i = 0; i<8; i++) { 173 mask = 0x80 >> i; 174 if (!(sch->lpm & mask)) 175 continue; 176 if (old_lpm & mask) 177 continue; 178 chpid.id = sch->schib.pmcw.chpid[i]; 179 if (!chp_is_registered(chpid)) 180 css_schedule_eval_all(); 181 } 182 } 183 184 /* 185 * Stop device recognition. 186 */ 187 static void 188 ccw_device_recog_done(struct ccw_device *cdev, int state) 189 { 190 struct subchannel *sch; 191 int old_lpm; 192 193 sch = to_subchannel(cdev->dev.parent); 194 195 if (cio_disable_subchannel(sch)) 196 state = DEV_STATE_NOT_OPER; 197 /* 198 * Now that we tried recognition, we have performed device selection 199 * through ssch() and the path information is up to date. 200 */ 201 old_lpm = sch->lpm; 202 203 /* Check since device may again have become not operational. */ 204 if (cio_update_schib(sch)) 205 state = DEV_STATE_NOT_OPER; 206 else 207 sch->lpm = sch->schib.pmcw.pam & sch->opm; 208 209 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) 210 /* Force reprobe on all chpids. */ 211 old_lpm = 0; 212 if (sch->lpm != old_lpm) 213 __recover_lost_chpids(sch, old_lpm); 214 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID && 215 (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) { 216 cdev->private->flags.recog_done = 1; 217 cdev->private->state = DEV_STATE_DISCONNECTED; 218 wake_up(&cdev->private->wait_q); 219 return; 220 } 221 if (cdev->private->flags.resuming) { 222 cdev->private->state = state; 223 cdev->private->flags.recog_done = 1; 224 wake_up(&cdev->private->wait_q); 225 return; 226 } 227 switch (state) { 228 case DEV_STATE_NOT_OPER: 229 break; 230 case DEV_STATE_OFFLINE: 231 if (!cdev->online) { 232 ccw_device_update_sense_data(cdev); 233 break; 234 } 235 cdev->private->state = DEV_STATE_OFFLINE; 236 cdev->private->flags.recog_done = 1; 237 if (ccw_device_test_sense_data(cdev)) { 238 cdev->private->flags.donotify = 1; 239 ccw_device_online(cdev); 240 wake_up(&cdev->private->wait_q); 241 } else { 242 ccw_device_update_sense_data(cdev); 243 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 244 } 245 return; 246 case DEV_STATE_BOXED: 247 if (cdev->id.cu_type != 0) { /* device was recognized before */ 248 cdev->private->flags.recog_done = 1; 249 cdev->private->state = DEV_STATE_BOXED; 250 wake_up(&cdev->private->wait_q); 251 return; 252 } 253 break; 254 } 255 cdev->private->state = state; 256 io_subchannel_recog_done(cdev); 257 wake_up(&cdev->private->wait_q); 258 } 259 260 /* 261 * Function called from device_id.c after sense id has completed. 262 */ 263 void 264 ccw_device_sense_id_done(struct ccw_device *cdev, int err) 265 { 266 switch (err) { 267 case 0: 268 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); 269 break; 270 case -ETIME: /* Sense id stopped by timeout. */ 271 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 272 break; 273 default: 274 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 275 break; 276 } 277 } 278 279 /** 280 * ccw_device_notify() - inform the device's driver about an event 281 * @cdev: device for which an event occurred 282 * @event: event that occurred 283 * 284 * Returns: 285 * -%EINVAL if the device is offline or has no driver. 286 * -%EOPNOTSUPP if the device's driver has no notifier registered. 287 * %NOTIFY_OK if the driver wants to keep the device. 288 * %NOTIFY_BAD if the driver doesn't want to keep the device. 289 */ 290 int ccw_device_notify(struct ccw_device *cdev, int event) 291 { 292 int ret = -EINVAL; 293 294 if (!cdev->drv) 295 goto out; 296 if (!cdev->online) 297 goto out; 298 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 299 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 300 event); 301 if (!cdev->drv->notify) { 302 ret = -EOPNOTSUPP; 303 goto out; 304 } 305 if (cdev->drv->notify(cdev, event)) 306 ret = NOTIFY_OK; 307 else 308 ret = NOTIFY_BAD; 309 out: 310 return ret; 311 } 312 313 static void ccw_device_oper_notify(struct ccw_device *cdev) 314 { 315 struct subchannel *sch = to_subchannel(cdev->dev.parent); 316 317 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 318 /* Reenable channel measurements, if needed. */ 319 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 320 /* Save indication for new paths. */ 321 cdev->private->path_new_mask = sch->vpm; 322 return; 323 } 324 /* Driver doesn't want device back. */ 325 ccw_device_set_notoper(cdev); 326 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 327 } 328 329 /* 330 * Finished with online/offline processing. 331 */ 332 static void 333 ccw_device_done(struct ccw_device *cdev, int state) 334 { 335 struct subchannel *sch; 336 337 sch = to_subchannel(cdev->dev.parent); 338 339 ccw_device_set_timeout(cdev, 0); 340 341 if (state != DEV_STATE_ONLINE) 342 cio_disable_subchannel(sch); 343 344 /* Reset device status. */ 345 memset(&cdev->private->irb, 0, sizeof(struct irb)); 346 347 cdev->private->state = state; 348 349 switch (state) { 350 case DEV_STATE_BOXED: 351 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 352 cdev->private->dev_id.devno, sch->schid.sch_no); 353 if (cdev->online && 354 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) 355 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 356 cdev->private->flags.donotify = 0; 357 break; 358 case DEV_STATE_NOT_OPER: 359 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 360 cdev->private->dev_id.devno, sch->schid.sch_no); 361 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 362 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 363 else 364 ccw_device_set_disconnected(cdev); 365 cdev->private->flags.donotify = 0; 366 break; 367 case DEV_STATE_DISCONNECTED: 368 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 369 "%04x\n", cdev->private->dev_id.devno, 370 sch->schid.sch_no); 371 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) { 372 cdev->private->state = DEV_STATE_NOT_OPER; 373 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 374 } else 375 ccw_device_set_disconnected(cdev); 376 cdev->private->flags.donotify = 0; 377 break; 378 default: 379 break; 380 } 381 382 if (cdev->private->flags.donotify) { 383 cdev->private->flags.donotify = 0; 384 ccw_device_oper_notify(cdev); 385 } 386 wake_up(&cdev->private->wait_q); 387 } 388 389 /* 390 * Start device recognition. 391 */ 392 void ccw_device_recognition(struct ccw_device *cdev) 393 { 394 struct subchannel *sch = to_subchannel(cdev->dev.parent); 395 396 /* 397 * We used to start here with a sense pgid to find out whether a device 398 * is locked by someone else. Unfortunately, the sense pgid command 399 * code has other meanings on devices predating the path grouping 400 * algorithm, so we start with sense id and box the device after an 401 * timeout (or if sense pgid during path verification detects the device 402 * is locked, as may happen on newer devices). 403 */ 404 cdev->private->flags.recog_done = 0; 405 cdev->private->state = DEV_STATE_SENSE_ID; 406 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) { 407 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 408 return; 409 } 410 ccw_device_sense_id_start(cdev); 411 } 412 413 /* 414 * Handle events for states that use the ccw request infrastructure. 415 */ 416 static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) 417 { 418 switch (e) { 419 case DEV_EVENT_NOTOPER: 420 ccw_request_notoper(cdev); 421 break; 422 case DEV_EVENT_INTERRUPT: 423 ccw_request_handler(cdev); 424 break; 425 case DEV_EVENT_TIMEOUT: 426 ccw_request_timeout(cdev); 427 break; 428 default: 429 break; 430 } 431 } 432 433 static void ccw_device_report_path_events(struct ccw_device *cdev) 434 { 435 struct subchannel *sch = to_subchannel(cdev->dev.parent); 436 int path_event[8]; 437 int chp, mask; 438 439 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) { 440 path_event[chp] = PE_NONE; 441 if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) 442 path_event[chp] |= PE_PATH_GONE; 443 if (mask & cdev->private->path_new_mask & sch->vpm) 444 path_event[chp] |= PE_PATH_AVAILABLE; 445 if (mask & cdev->private->pgid_reset_mask & sch->vpm) 446 path_event[chp] |= PE_PATHGROUP_ESTABLISHED; 447 } 448 if (cdev->online && cdev->drv->path_event) 449 cdev->drv->path_event(cdev, path_event); 450 } 451 452 static void ccw_device_reset_path_events(struct ccw_device *cdev) 453 { 454 cdev->private->path_gone_mask = 0; 455 cdev->private->path_new_mask = 0; 456 cdev->private->pgid_reset_mask = 0; 457 } 458 459 static void create_fake_irb(struct irb *irb, int type) 460 { 461 memset(irb, 0, sizeof(*irb)); 462 if (type == FAKE_CMD_IRB) { 463 struct cmd_scsw *scsw = &irb->scsw.cmd; 464 scsw->cc = 1; 465 scsw->fctl = SCSW_FCTL_START_FUNC; 466 scsw->actl = SCSW_ACTL_START_PEND; 467 scsw->stctl = SCSW_STCTL_STATUS_PEND; 468 } else if (type == FAKE_TM_IRB) { 469 struct tm_scsw *scsw = &irb->scsw.tm; 470 scsw->x = 1; 471 scsw->cc = 1; 472 scsw->fctl = SCSW_FCTL_START_FUNC; 473 scsw->actl = SCSW_ACTL_START_PEND; 474 scsw->stctl = SCSW_STCTL_STATUS_PEND; 475 } 476 } 477 478 static void ccw_device_handle_broken_paths(struct ccw_device *cdev) 479 { 480 struct subchannel *sch = to_subchannel(cdev->dev.parent); 481 u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; 482 483 if (broken_paths && (cdev->private->path_broken_mask != broken_paths)) 484 ccw_device_schedule_recovery(); 485 486 cdev->private->path_broken_mask = broken_paths; 487 } 488 489 void ccw_device_verify_done(struct ccw_device *cdev, int err) 490 { 491 struct subchannel *sch; 492 493 sch = to_subchannel(cdev->dev.parent); 494 /* Update schib - pom may have changed. */ 495 if (cio_update_schib(sch)) { 496 err = -ENODEV; 497 goto callback; 498 } 499 /* Update lpm with verified path mask. */ 500 sch->lpm = sch->vpm; 501 /* Repeat path verification? */ 502 if (cdev->private->flags.doverify) { 503 ccw_device_verify_start(cdev); 504 return; 505 } 506 callback: 507 switch (err) { 508 case 0: 509 ccw_device_done(cdev, DEV_STATE_ONLINE); 510 /* Deliver fake irb to device driver, if needed. */ 511 if (cdev->private->flags.fake_irb) { 512 create_fake_irb(&cdev->private->irb, 513 cdev->private->flags.fake_irb); 514 cdev->private->flags.fake_irb = 0; 515 if (cdev->handler) 516 cdev->handler(cdev, cdev->private->intparm, 517 &cdev->private->irb); 518 memset(&cdev->private->irb, 0, sizeof(struct irb)); 519 } 520 ccw_device_report_path_events(cdev); 521 ccw_device_handle_broken_paths(cdev); 522 break; 523 case -ETIME: 524 case -EUSERS: 525 /* Reset oper notify indication after verify error. */ 526 cdev->private->flags.donotify = 0; 527 ccw_device_done(cdev, DEV_STATE_BOXED); 528 break; 529 case -EACCES: 530 /* Reset oper notify indication after verify error. */ 531 cdev->private->flags.donotify = 0; 532 ccw_device_done(cdev, DEV_STATE_DISCONNECTED); 533 break; 534 default: 535 /* Reset oper notify indication after verify error. */ 536 cdev->private->flags.donotify = 0; 537 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 538 break; 539 } 540 ccw_device_reset_path_events(cdev); 541 } 542 543 /* 544 * Get device online. 545 */ 546 int 547 ccw_device_online(struct ccw_device *cdev) 548 { 549 struct subchannel *sch; 550 int ret; 551 552 if ((cdev->private->state != DEV_STATE_OFFLINE) && 553 (cdev->private->state != DEV_STATE_BOXED)) 554 return -EINVAL; 555 sch = to_subchannel(cdev->dev.parent); 556 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 557 if (ret != 0) { 558 /* Couldn't enable the subchannel for i/o. Sick device. */ 559 if (ret == -ENODEV) 560 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 561 return ret; 562 } 563 /* Start initial path verification. */ 564 cdev->private->state = DEV_STATE_VERIFY; 565 ccw_device_verify_start(cdev); 566 return 0; 567 } 568 569 void 570 ccw_device_disband_done(struct ccw_device *cdev, int err) 571 { 572 switch (err) { 573 case 0: 574 ccw_device_done(cdev, DEV_STATE_OFFLINE); 575 break; 576 case -ETIME: 577 ccw_device_done(cdev, DEV_STATE_BOXED); 578 break; 579 default: 580 cdev->private->flags.donotify = 0; 581 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 582 break; 583 } 584 } 585 586 /* 587 * Shutdown device. 588 */ 589 int 590 ccw_device_offline(struct ccw_device *cdev) 591 { 592 struct subchannel *sch; 593 594 /* Allow ccw_device_offline while disconnected. */ 595 if (cdev->private->state == DEV_STATE_DISCONNECTED || 596 cdev->private->state == DEV_STATE_NOT_OPER) { 597 cdev->private->flags.donotify = 0; 598 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 599 return 0; 600 } 601 if (cdev->private->state == DEV_STATE_BOXED) { 602 ccw_device_done(cdev, DEV_STATE_BOXED); 603 return 0; 604 } 605 if (ccw_device_is_orphan(cdev)) { 606 ccw_device_done(cdev, DEV_STATE_OFFLINE); 607 return 0; 608 } 609 sch = to_subchannel(cdev->dev.parent); 610 if (cio_update_schib(sch)) 611 return -ENODEV; 612 if (scsw_actl(&sch->schib.scsw) != 0) 613 return -EBUSY; 614 if (cdev->private->state != DEV_STATE_ONLINE) 615 return -EINVAL; 616 /* Are we doing path grouping? */ 617 if (!cdev->private->flags.pgroup) { 618 /* No, set state offline immediately. */ 619 ccw_device_done(cdev, DEV_STATE_OFFLINE); 620 return 0; 621 } 622 /* Start Set Path Group commands. */ 623 cdev->private->state = DEV_STATE_DISBAND_PGID; 624 ccw_device_disband_start(cdev); 625 return 0; 626 } 627 628 /* 629 * Handle not operational event in non-special state. 630 */ 631 static void ccw_device_generic_notoper(struct ccw_device *cdev, 632 enum dev_event dev_event) 633 { 634 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 635 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 636 else 637 ccw_device_set_disconnected(cdev); 638 } 639 640 /* 641 * Handle path verification event in offline state. 642 */ 643 static void ccw_device_offline_verify(struct ccw_device *cdev, 644 enum dev_event dev_event) 645 { 646 struct subchannel *sch = to_subchannel(cdev->dev.parent); 647 648 css_schedule_eval(sch->schid); 649 } 650 651 /* 652 * Handle path verification event. 653 */ 654 static void 655 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) 656 { 657 struct subchannel *sch; 658 659 if (cdev->private->state == DEV_STATE_W4SENSE) { 660 cdev->private->flags.doverify = 1; 661 return; 662 } 663 sch = to_subchannel(cdev->dev.parent); 664 /* 665 * Since we might not just be coming from an interrupt from the 666 * subchannel we have to update the schib. 667 */ 668 if (cio_update_schib(sch)) { 669 ccw_device_verify_done(cdev, -ENODEV); 670 return; 671 } 672 673 if (scsw_actl(&sch->schib.scsw) != 0 || 674 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || 675 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { 676 /* 677 * No final status yet or final status not yet delivered 678 * to the device driver. Can't do path verification now, 679 * delay until final status was delivered. 680 */ 681 cdev->private->flags.doverify = 1; 682 return; 683 } 684 /* Device is idle, we can do the path verification. */ 685 cdev->private->state = DEV_STATE_VERIFY; 686 ccw_device_verify_start(cdev); 687 } 688 689 /* 690 * Handle path verification event in boxed state. 691 */ 692 static void ccw_device_boxed_verify(struct ccw_device *cdev, 693 enum dev_event dev_event) 694 { 695 struct subchannel *sch = to_subchannel(cdev->dev.parent); 696 697 if (cdev->online) { 698 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) 699 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 700 else 701 ccw_device_online_verify(cdev, dev_event); 702 } else 703 css_schedule_eval(sch->schid); 704 } 705 706 /* 707 * Pass interrupt to device driver. 708 */ 709 static int ccw_device_call_handler(struct ccw_device *cdev) 710 { 711 unsigned int stctl; 712 int ending_status; 713 714 /* 715 * we allow for the device action handler if . 716 * - we received ending status 717 * - the action handler requested to see all interrupts 718 * - we received an intermediate status 719 * - fast notification was requested (primary status) 720 * - unsolicited interrupts 721 */ 722 stctl = scsw_stctl(&cdev->private->irb.scsw); 723 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 724 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 725 (stctl == SCSW_STCTL_STATUS_PEND); 726 if (!ending_status && 727 !cdev->private->options.repall && 728 !(stctl & SCSW_STCTL_INTER_STATUS) && 729 !(cdev->private->options.fast && 730 (stctl & SCSW_STCTL_PRIM_STATUS))) 731 return 0; 732 733 if (ending_status) 734 ccw_device_set_timeout(cdev, 0); 735 736 if (cdev->handler) 737 cdev->handler(cdev, cdev->private->intparm, 738 &cdev->private->irb); 739 740 memset(&cdev->private->irb, 0, sizeof(struct irb)); 741 return 1; 742 } 743 744 /* 745 * Got an interrupt for a normal io (state online). 746 */ 747 static void 748 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 749 { 750 struct irb *irb; 751 int is_cmd; 752 753 irb = this_cpu_ptr(&cio_irb); 754 is_cmd = !scsw_is_tm(&irb->scsw); 755 /* Check for unsolicited interrupt. */ 756 if (!scsw_is_solicited(&irb->scsw)) { 757 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 758 !irb->esw.esw0.erw.cons) { 759 /* Unit check but no sense data. Need basic sense. */ 760 if (ccw_device_do_sense(cdev, irb) != 0) 761 goto call_handler_unsol; 762 memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 763 cdev->private->state = DEV_STATE_W4SENSE; 764 cdev->private->intparm = 0; 765 return; 766 } 767 call_handler_unsol: 768 if (cdev->handler) 769 cdev->handler (cdev, 0, irb); 770 if (cdev->private->flags.doverify) 771 ccw_device_online_verify(cdev, 0); 772 return; 773 } 774 /* Accumulate status and find out if a basic sense is needed. */ 775 ccw_device_accumulate_irb(cdev, irb); 776 if (is_cmd && cdev->private->flags.dosense) { 777 if (ccw_device_do_sense(cdev, irb) == 0) { 778 cdev->private->state = DEV_STATE_W4SENSE; 779 } 780 return; 781 } 782 /* Call the handler. */ 783 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 784 /* Start delayed path verification. */ 785 ccw_device_online_verify(cdev, 0); 786 } 787 788 /* 789 * Got an timeout in online state. 790 */ 791 static void 792 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) 793 { 794 int ret; 795 796 ccw_device_set_timeout(cdev, 0); 797 cdev->private->iretry = 255; 798 cdev->private->async_kill_io_rc = -ETIMEDOUT; 799 ret = ccw_device_cancel_halt_clear(cdev); 800 if (ret == -EBUSY) { 801 ccw_device_set_timeout(cdev, 3*HZ); 802 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 803 return; 804 } 805 if (ret) 806 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 807 else if (cdev->handler) 808 cdev->handler(cdev, cdev->private->intparm, 809 ERR_PTR(-ETIMEDOUT)); 810 } 811 812 /* 813 * Got an interrupt for a basic sense. 814 */ 815 static void 816 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 817 { 818 struct irb *irb; 819 820 irb = this_cpu_ptr(&cio_irb); 821 /* Check for unsolicited interrupt. */ 822 if (scsw_stctl(&irb->scsw) == 823 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 824 if (scsw_cc(&irb->scsw) == 1) 825 /* Basic sense hasn't started. Try again. */ 826 ccw_device_do_sense(cdev, irb); 827 else { 828 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited " 829 "interrupt during w4sense...\n", 830 cdev->private->dev_id.ssid, 831 cdev->private->dev_id.devno); 832 if (cdev->handler) 833 cdev->handler (cdev, 0, irb); 834 } 835 return; 836 } 837 /* 838 * Check if a halt or clear has been issued in the meanwhile. If yes, 839 * only deliver the halt/clear interrupt to the device driver as if it 840 * had killed the original request. 841 */ 842 if (scsw_fctl(&irb->scsw) & 843 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 844 cdev->private->flags.dosense = 0; 845 memset(&cdev->private->irb, 0, sizeof(struct irb)); 846 ccw_device_accumulate_irb(cdev, irb); 847 goto call_handler; 848 } 849 /* Add basic sense info to irb. */ 850 ccw_device_accumulate_basic_sense(cdev, irb); 851 if (cdev->private->flags.dosense) { 852 /* Another basic sense is needed. */ 853 ccw_device_do_sense(cdev, irb); 854 return; 855 } 856 call_handler: 857 cdev->private->state = DEV_STATE_ONLINE; 858 /* In case sensing interfered with setting the device online */ 859 wake_up(&cdev->private->wait_q); 860 /* Call the handler. */ 861 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 862 /* Start delayed path verification. */ 863 ccw_device_online_verify(cdev, 0); 864 } 865 866 static void 867 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 868 { 869 ccw_device_set_timeout(cdev, 0); 870 /* Start delayed path verification. */ 871 ccw_device_online_verify(cdev, 0); 872 /* OK, i/o is dead now. Call interrupt handler. */ 873 if (cdev->handler) 874 cdev->handler(cdev, cdev->private->intparm, 875 ERR_PTR(cdev->private->async_kill_io_rc)); 876 } 877 878 static void 879 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) 880 { 881 int ret; 882 883 ret = ccw_device_cancel_halt_clear(cdev); 884 if (ret == -EBUSY) { 885 ccw_device_set_timeout(cdev, 3*HZ); 886 return; 887 } 888 /* Start delayed path verification. */ 889 ccw_device_online_verify(cdev, 0); 890 if (cdev->handler) 891 cdev->handler(cdev, cdev->private->intparm, 892 ERR_PTR(cdev->private->async_kill_io_rc)); 893 } 894 895 void ccw_device_kill_io(struct ccw_device *cdev) 896 { 897 int ret; 898 899 ccw_device_set_timeout(cdev, 0); 900 cdev->private->iretry = 255; 901 cdev->private->async_kill_io_rc = -EIO; 902 ret = ccw_device_cancel_halt_clear(cdev); 903 if (ret == -EBUSY) { 904 ccw_device_set_timeout(cdev, 3*HZ); 905 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 906 return; 907 } 908 /* Start delayed path verification. */ 909 ccw_device_online_verify(cdev, 0); 910 if (cdev->handler) 911 cdev->handler(cdev, cdev->private->intparm, 912 ERR_PTR(-EIO)); 913 } 914 915 static void 916 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) 917 { 918 /* Start verification after current task finished. */ 919 cdev->private->flags.doverify = 1; 920 } 921 922 static void 923 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 924 { 925 struct subchannel *sch; 926 927 sch = to_subchannel(cdev->dev.parent); 928 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 929 /* Couldn't enable the subchannel for i/o. Sick device. */ 930 return; 931 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 932 ccw_device_sense_id_start(cdev); 933 } 934 935 void ccw_device_trigger_reprobe(struct ccw_device *cdev) 936 { 937 struct subchannel *sch; 938 939 if (cdev->private->state != DEV_STATE_DISCONNECTED) 940 return; 941 942 sch = to_subchannel(cdev->dev.parent); 943 /* Update some values. */ 944 if (cio_update_schib(sch)) 945 return; 946 /* 947 * The pim, pam, pom values may not be accurate, but they are the best 948 * we have before performing device selection :/ 949 */ 950 sch->lpm = sch->schib.pmcw.pam & sch->opm; 951 /* 952 * Use the initial configuration since we can't be shure that the old 953 * paths are valid. 954 */ 955 io_subchannel_init_config(sch); 956 if (cio_commit_config(sch)) 957 return; 958 959 /* We should also udate ssd info, but this has to wait. */ 960 /* Check if this is another device which appeared on the same sch. */ 961 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) 962 css_schedule_eval(sch->schid); 963 else 964 ccw_device_start_id(cdev, 0); 965 } 966 967 static void ccw_device_disabled_irq(struct ccw_device *cdev, 968 enum dev_event dev_event) 969 { 970 struct subchannel *sch; 971 972 sch = to_subchannel(cdev->dev.parent); 973 /* 974 * An interrupt in a disabled state means a previous disable was not 975 * successful - should not happen, but we try to disable again. 976 */ 977 cio_disable_subchannel(sch); 978 } 979 980 static void 981 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) 982 { 983 retry_set_schib(cdev); 984 cdev->private->state = DEV_STATE_ONLINE; 985 dev_fsm_event(cdev, dev_event); 986 } 987 988 static void ccw_device_update_cmfblock(struct ccw_device *cdev, 989 enum dev_event dev_event) 990 { 991 cmf_retry_copy_block(cdev); 992 cdev->private->state = DEV_STATE_ONLINE; 993 dev_fsm_event(cdev, dev_event); 994 } 995 996 static void 997 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 998 { 999 ccw_device_set_timeout(cdev, 0); 1000 cdev->private->state = DEV_STATE_NOT_OPER; 1001 wake_up(&cdev->private->wait_q); 1002 } 1003 1004 static void 1005 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1006 { 1007 int ret; 1008 1009 ret = ccw_device_cancel_halt_clear(cdev); 1010 if (ret == -EBUSY) { 1011 ccw_device_set_timeout(cdev, HZ/10); 1012 } else { 1013 cdev->private->state = DEV_STATE_NOT_OPER; 1014 wake_up(&cdev->private->wait_q); 1015 } 1016 } 1017 1018 /* 1019 * No operation action. This is used e.g. to ignore a timeout event in 1020 * state offline. 1021 */ 1022 static void 1023 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) 1024 { 1025 } 1026 1027 /* 1028 * device statemachine 1029 */ 1030 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 1031 [DEV_STATE_NOT_OPER] = { 1032 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1033 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1034 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1035 [DEV_EVENT_VERIFY] = ccw_device_nop, 1036 }, 1037 [DEV_STATE_SENSE_ID] = { 1038 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1039 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1040 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1041 [DEV_EVENT_VERIFY] = ccw_device_nop, 1042 }, 1043 [DEV_STATE_OFFLINE] = { 1044 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1045 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1046 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1047 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 1048 }, 1049 [DEV_STATE_VERIFY] = { 1050 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1051 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1052 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1053 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1054 }, 1055 [DEV_STATE_ONLINE] = { 1056 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1057 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1058 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1059 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1060 }, 1061 [DEV_STATE_W4SENSE] = { 1062 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1063 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1064 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1065 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1066 }, 1067 [DEV_STATE_DISBAND_PGID] = { 1068 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1069 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1070 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1071 [DEV_EVENT_VERIFY] = ccw_device_nop, 1072 }, 1073 [DEV_STATE_BOXED] = { 1074 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1075 [DEV_EVENT_INTERRUPT] = ccw_device_nop, 1076 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1077 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify, 1078 }, 1079 /* states to wait for i/o completion before doing something */ 1080 [DEV_STATE_TIMEOUT_KILL] = { 1081 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1082 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1083 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1084 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1085 }, 1086 [DEV_STATE_QUIESCE] = { 1087 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1088 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, 1089 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, 1090 [DEV_EVENT_VERIFY] = ccw_device_nop, 1091 }, 1092 /* special states for devices gone not operational */ 1093 [DEV_STATE_DISCONNECTED] = { 1094 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1095 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1096 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1097 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1098 }, 1099 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1100 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1101 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1102 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1103 [DEV_EVENT_VERIFY] = ccw_device_nop, 1104 }, 1105 [DEV_STATE_CMFCHANGE] = { 1106 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, 1107 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, 1108 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1109 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1110 }, 1111 [DEV_STATE_CMFUPDATE] = { 1112 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, 1113 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, 1114 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1115 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1116 }, 1117 [DEV_STATE_STEAL_LOCK] = { 1118 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1119 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1120 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1121 [DEV_EVENT_VERIFY] = ccw_device_nop, 1122 }, 1123 }; 1124 1125 EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1126