1 /* 2 * finite state machine for device handling 3 * 4 * Copyright IBM Corp. 2002, 2008 5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 */ 8 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/jiffies.h> 12 #include <linux/string.h> 13 14 #include <asm/ccwdev.h> 15 #include <asm/cio.h> 16 #include <asm/chpid.h> 17 18 #include "cio.h" 19 #include "cio_debug.h" 20 #include "css.h" 21 #include "device.h" 22 #include "chsc.h" 23 #include "ioasm.h" 24 #include "chp.h" 25 26 static int timeout_log_enabled; 27 28 static int __init ccw_timeout_log_setup(char *unused) 29 { 30 timeout_log_enabled = 1; 31 return 1; 32 } 33 34 __setup("ccw_timeout_log", ccw_timeout_log_setup); 35 36 static void ccw_timeout_log(struct ccw_device *cdev) 37 { 38 struct schib schib; 39 struct subchannel *sch; 40 struct io_subchannel_private *private; 41 union orb *orb; 42 int cc; 43 44 sch = to_subchannel(cdev->dev.parent); 45 private = to_io_private(sch); 46 orb = &private->orb; 47 cc = stsch(sch->schid, &schib); 48 49 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 "device information:\n", get_tod_clock()); 51 printk(KERN_WARNING "cio: orb:\n"); 52 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 orb, sizeof(*orb), 0); 54 printk(KERN_WARNING "cio: ccw device bus id: %s\n", 55 dev_name(&cdev->dev)); 56 printk(KERN_WARNING "cio: subchannel bus id: %s\n", 57 dev_name(&sch->dev)); 58 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 59 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 60 61 if (orb->tm.b) { 62 printk(KERN_WARNING "cio: orb indicates transport mode\n"); 63 printk(KERN_WARNING "cio: last tcw:\n"); 64 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 65 (void *)(addr_t)orb->tm.tcw, 66 sizeof(struct tcw), 0); 67 } else { 68 printk(KERN_WARNING "cio: orb indicates command mode\n"); 69 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || 70 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) 71 printk(KERN_WARNING "cio: last channel program " 72 "(intern):\n"); 73 else 74 printk(KERN_WARNING "cio: last channel program:\n"); 75 76 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 77 (void *)(addr_t)orb->cmd.cpa, 78 sizeof(struct ccw1), 0); 79 } 80 printk(KERN_WARNING "cio: ccw device state: %d\n", 81 cdev->private->state); 82 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 83 printk(KERN_WARNING "cio: schib:\n"); 84 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 85 &schib, sizeof(schib), 0); 86 printk(KERN_WARNING "cio: ccw device flags:\n"); 87 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 88 &cdev->private->flags, sizeof(cdev->private->flags), 0); 89 } 90 91 /* 92 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 93 */ 94 static void 95 ccw_device_timeout(unsigned long data) 96 { 97 struct ccw_device *cdev; 98 99 cdev = (struct ccw_device *) data; 100 spin_lock_irq(cdev->ccwlock); 101 if (timeout_log_enabled) 102 ccw_timeout_log(cdev); 103 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 104 spin_unlock_irq(cdev->ccwlock); 105 } 106 107 /* 108 * Set timeout 109 */ 110 void 111 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 112 { 113 if (expires == 0) { 114 del_timer(&cdev->private->timer); 115 return; 116 } 117 if (timer_pending(&cdev->private->timer)) { 118 if (mod_timer(&cdev->private->timer, jiffies + expires)) 119 return; 120 } 121 cdev->private->timer.function = ccw_device_timeout; 122 cdev->private->timer.data = (unsigned long) cdev; 123 cdev->private->timer.expires = jiffies + expires; 124 add_timer(&cdev->private->timer); 125 } 126 127 /* 128 * Cancel running i/o. This is called repeatedly since halt/clear are 129 * asynchronous operations. We do one try with cio_cancel, two tries 130 * with cio_halt, 255 tries with cio_clear. If everythings fails panic. 131 * Returns 0 if device now idle, -ENODEV for device not operational and 132 * -EBUSY if an interrupt is expected (either from halt/clear or from a 133 * status pending). 134 */ 135 int 136 ccw_device_cancel_halt_clear(struct ccw_device *cdev) 137 { 138 struct subchannel *sch; 139 int ret; 140 141 sch = to_subchannel(cdev->dev.parent); 142 if (cio_update_schib(sch)) 143 return -ENODEV; 144 if (!sch->schib.pmcw.ena) 145 /* Not operational -> done. */ 146 return 0; 147 /* Stage 1: cancel io. */ 148 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && 149 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { 150 if (!scsw_is_tm(&sch->schib.scsw)) { 151 ret = cio_cancel(sch); 152 if (ret != -EINVAL) 153 return ret; 154 } 155 /* cancel io unsuccessful or not applicable (transport mode). 156 * Continue with asynchronous instructions. */ 157 cdev->private->iretry = 3; /* 3 halt retries. */ 158 } 159 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { 160 /* Stage 2: halt io. */ 161 if (cdev->private->iretry) { 162 cdev->private->iretry--; 163 ret = cio_halt(sch); 164 if (ret != -EBUSY) 165 return (ret == 0) ? -EBUSY : ret; 166 } 167 /* halt io unsuccessful. */ 168 cdev->private->iretry = 255; /* 255 clear retries. */ 169 } 170 /* Stage 3: clear io. */ 171 if (cdev->private->iretry) { 172 cdev->private->iretry--; 173 ret = cio_clear (sch); 174 return (ret == 0) ? -EBUSY : ret; 175 } 176 /* Function was unsuccessful */ 177 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", 178 cdev->private->dev_id.ssid, cdev->private->dev_id.devno); 179 return -EIO; 180 } 181 182 void ccw_device_update_sense_data(struct ccw_device *cdev) 183 { 184 memset(&cdev->id, 0, sizeof(cdev->id)); 185 cdev->id.cu_type = cdev->private->senseid.cu_type; 186 cdev->id.cu_model = cdev->private->senseid.cu_model; 187 cdev->id.dev_type = cdev->private->senseid.dev_type; 188 cdev->id.dev_model = cdev->private->senseid.dev_model; 189 } 190 191 int ccw_device_test_sense_data(struct ccw_device *cdev) 192 { 193 return cdev->id.cu_type == cdev->private->senseid.cu_type && 194 cdev->id.cu_model == cdev->private->senseid.cu_model && 195 cdev->id.dev_type == cdev->private->senseid.dev_type && 196 cdev->id.dev_model == cdev->private->senseid.dev_model; 197 } 198 199 /* 200 * The machine won't give us any notification by machine check if a chpid has 201 * been varied online on the SE so we have to find out by magic (i. e. driving 202 * the channel subsystem to device selection and updating our path masks). 203 */ 204 static void 205 __recover_lost_chpids(struct subchannel *sch, int old_lpm) 206 { 207 int mask, i; 208 struct chp_id chpid; 209 210 chp_id_init(&chpid); 211 for (i = 0; i<8; i++) { 212 mask = 0x80 >> i; 213 if (!(sch->lpm & mask)) 214 continue; 215 if (old_lpm & mask) 216 continue; 217 chpid.id = sch->schib.pmcw.chpid[i]; 218 if (!chp_is_registered(chpid)) 219 css_schedule_eval_all(); 220 } 221 } 222 223 /* 224 * Stop device recognition. 225 */ 226 static void 227 ccw_device_recog_done(struct ccw_device *cdev, int state) 228 { 229 struct subchannel *sch; 230 int old_lpm; 231 232 sch = to_subchannel(cdev->dev.parent); 233 234 if (cio_disable_subchannel(sch)) 235 state = DEV_STATE_NOT_OPER; 236 /* 237 * Now that we tried recognition, we have performed device selection 238 * through ssch() and the path information is up to date. 239 */ 240 old_lpm = sch->lpm; 241 242 /* Check since device may again have become not operational. */ 243 if (cio_update_schib(sch)) 244 state = DEV_STATE_NOT_OPER; 245 else 246 sch->lpm = sch->schib.pmcw.pam & sch->opm; 247 248 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) 249 /* Force reprobe on all chpids. */ 250 old_lpm = 0; 251 if (sch->lpm != old_lpm) 252 __recover_lost_chpids(sch, old_lpm); 253 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID && 254 (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) { 255 cdev->private->flags.recog_done = 1; 256 cdev->private->state = DEV_STATE_DISCONNECTED; 257 wake_up(&cdev->private->wait_q); 258 return; 259 } 260 if (cdev->private->flags.resuming) { 261 cdev->private->state = state; 262 cdev->private->flags.recog_done = 1; 263 wake_up(&cdev->private->wait_q); 264 return; 265 } 266 switch (state) { 267 case DEV_STATE_NOT_OPER: 268 break; 269 case DEV_STATE_OFFLINE: 270 if (!cdev->online) { 271 ccw_device_update_sense_data(cdev); 272 break; 273 } 274 cdev->private->state = DEV_STATE_OFFLINE; 275 cdev->private->flags.recog_done = 1; 276 if (ccw_device_test_sense_data(cdev)) { 277 cdev->private->flags.donotify = 1; 278 ccw_device_online(cdev); 279 wake_up(&cdev->private->wait_q); 280 } else { 281 ccw_device_update_sense_data(cdev); 282 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 283 } 284 return; 285 case DEV_STATE_BOXED: 286 if (cdev->id.cu_type != 0) { /* device was recognized before */ 287 cdev->private->flags.recog_done = 1; 288 cdev->private->state = DEV_STATE_BOXED; 289 wake_up(&cdev->private->wait_q); 290 return; 291 } 292 break; 293 } 294 cdev->private->state = state; 295 io_subchannel_recog_done(cdev); 296 wake_up(&cdev->private->wait_q); 297 } 298 299 /* 300 * Function called from device_id.c after sense id has completed. 301 */ 302 void 303 ccw_device_sense_id_done(struct ccw_device *cdev, int err) 304 { 305 switch (err) { 306 case 0: 307 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); 308 break; 309 case -ETIME: /* Sense id stopped by timeout. */ 310 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 311 break; 312 default: 313 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 314 break; 315 } 316 } 317 318 /** 319 * ccw_device_notify() - inform the device's driver about an event 320 * @cdev: device for which an event occurred 321 * @event: event that occurred 322 * 323 * Returns: 324 * -%EINVAL if the device is offline or has no driver. 325 * -%EOPNOTSUPP if the device's driver has no notifier registered. 326 * %NOTIFY_OK if the driver wants to keep the device. 327 * %NOTIFY_BAD if the driver doesn't want to keep the device. 328 */ 329 int ccw_device_notify(struct ccw_device *cdev, int event) 330 { 331 int ret = -EINVAL; 332 333 if (!cdev->drv) 334 goto out; 335 if (!cdev->online) 336 goto out; 337 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 338 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 339 event); 340 if (!cdev->drv->notify) { 341 ret = -EOPNOTSUPP; 342 goto out; 343 } 344 if (cdev->drv->notify(cdev, event)) 345 ret = NOTIFY_OK; 346 else 347 ret = NOTIFY_BAD; 348 out: 349 return ret; 350 } 351 352 static void ccw_device_oper_notify(struct ccw_device *cdev) 353 { 354 struct subchannel *sch = to_subchannel(cdev->dev.parent); 355 356 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 357 /* Reenable channel measurements, if needed. */ 358 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 359 /* Save indication for new paths. */ 360 cdev->private->path_new_mask = sch->vpm; 361 return; 362 } 363 /* Driver doesn't want device back. */ 364 ccw_device_set_notoper(cdev); 365 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 366 } 367 368 /* 369 * Finished with online/offline processing. 370 */ 371 static void 372 ccw_device_done(struct ccw_device *cdev, int state) 373 { 374 struct subchannel *sch; 375 376 sch = to_subchannel(cdev->dev.parent); 377 378 ccw_device_set_timeout(cdev, 0); 379 380 if (state != DEV_STATE_ONLINE) 381 cio_disable_subchannel(sch); 382 383 /* Reset device status. */ 384 memset(&cdev->private->irb, 0, sizeof(struct irb)); 385 386 cdev->private->state = state; 387 388 switch (state) { 389 case DEV_STATE_BOXED: 390 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 391 cdev->private->dev_id.devno, sch->schid.sch_no); 392 if (cdev->online && 393 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) 394 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 395 cdev->private->flags.donotify = 0; 396 break; 397 case DEV_STATE_NOT_OPER: 398 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 399 cdev->private->dev_id.devno, sch->schid.sch_no); 400 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 401 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 402 else 403 ccw_device_set_disconnected(cdev); 404 cdev->private->flags.donotify = 0; 405 break; 406 case DEV_STATE_DISCONNECTED: 407 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 408 "%04x\n", cdev->private->dev_id.devno, 409 sch->schid.sch_no); 410 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) { 411 cdev->private->state = DEV_STATE_NOT_OPER; 412 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 413 } else 414 ccw_device_set_disconnected(cdev); 415 cdev->private->flags.donotify = 0; 416 break; 417 default: 418 break; 419 } 420 421 if (cdev->private->flags.donotify) { 422 cdev->private->flags.donotify = 0; 423 ccw_device_oper_notify(cdev); 424 } 425 wake_up(&cdev->private->wait_q); 426 } 427 428 /* 429 * Start device recognition. 430 */ 431 void ccw_device_recognition(struct ccw_device *cdev) 432 { 433 struct subchannel *sch = to_subchannel(cdev->dev.parent); 434 435 /* 436 * We used to start here with a sense pgid to find out whether a device 437 * is locked by someone else. Unfortunately, the sense pgid command 438 * code has other meanings on devices predating the path grouping 439 * algorithm, so we start with sense id and box the device after an 440 * timeout (or if sense pgid during path verification detects the device 441 * is locked, as may happen on newer devices). 442 */ 443 cdev->private->flags.recog_done = 0; 444 cdev->private->state = DEV_STATE_SENSE_ID; 445 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) { 446 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 447 return; 448 } 449 ccw_device_sense_id_start(cdev); 450 } 451 452 /* 453 * Handle events for states that use the ccw request infrastructure. 454 */ 455 static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) 456 { 457 switch (e) { 458 case DEV_EVENT_NOTOPER: 459 ccw_request_notoper(cdev); 460 break; 461 case DEV_EVENT_INTERRUPT: 462 ccw_request_handler(cdev); 463 break; 464 case DEV_EVENT_TIMEOUT: 465 ccw_request_timeout(cdev); 466 break; 467 default: 468 break; 469 } 470 } 471 472 static void ccw_device_report_path_events(struct ccw_device *cdev) 473 { 474 struct subchannel *sch = to_subchannel(cdev->dev.parent); 475 int path_event[8]; 476 int chp, mask; 477 478 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) { 479 path_event[chp] = PE_NONE; 480 if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) 481 path_event[chp] |= PE_PATH_GONE; 482 if (mask & cdev->private->path_new_mask & sch->vpm) 483 path_event[chp] |= PE_PATH_AVAILABLE; 484 if (mask & cdev->private->pgid_reset_mask & sch->vpm) 485 path_event[chp] |= PE_PATHGROUP_ESTABLISHED; 486 } 487 if (cdev->online && cdev->drv->path_event) 488 cdev->drv->path_event(cdev, path_event); 489 } 490 491 static void ccw_device_reset_path_events(struct ccw_device *cdev) 492 { 493 cdev->private->path_gone_mask = 0; 494 cdev->private->path_new_mask = 0; 495 cdev->private->pgid_reset_mask = 0; 496 } 497 498 static void create_fake_irb(struct irb *irb, int type) 499 { 500 memset(irb, 0, sizeof(*irb)); 501 if (type == FAKE_CMD_IRB) { 502 struct cmd_scsw *scsw = &irb->scsw.cmd; 503 scsw->cc = 1; 504 scsw->fctl = SCSW_FCTL_START_FUNC; 505 scsw->actl = SCSW_ACTL_START_PEND; 506 scsw->stctl = SCSW_STCTL_STATUS_PEND; 507 } else if (type == FAKE_TM_IRB) { 508 struct tm_scsw *scsw = &irb->scsw.tm; 509 scsw->x = 1; 510 scsw->cc = 1; 511 scsw->fctl = SCSW_FCTL_START_FUNC; 512 scsw->actl = SCSW_ACTL_START_PEND; 513 scsw->stctl = SCSW_STCTL_STATUS_PEND; 514 } 515 } 516 517 void ccw_device_verify_done(struct ccw_device *cdev, int err) 518 { 519 struct subchannel *sch; 520 521 sch = to_subchannel(cdev->dev.parent); 522 /* Update schib - pom may have changed. */ 523 if (cio_update_schib(sch)) { 524 err = -ENODEV; 525 goto callback; 526 } 527 /* Update lpm with verified path mask. */ 528 sch->lpm = sch->vpm; 529 /* Repeat path verification? */ 530 if (cdev->private->flags.doverify) { 531 ccw_device_verify_start(cdev); 532 return; 533 } 534 callback: 535 switch (err) { 536 case 0: 537 ccw_device_done(cdev, DEV_STATE_ONLINE); 538 /* Deliver fake irb to device driver, if needed. */ 539 if (cdev->private->flags.fake_irb) { 540 create_fake_irb(&cdev->private->irb, 541 cdev->private->flags.fake_irb); 542 cdev->private->flags.fake_irb = 0; 543 if (cdev->handler) 544 cdev->handler(cdev, cdev->private->intparm, 545 &cdev->private->irb); 546 memset(&cdev->private->irb, 0, sizeof(struct irb)); 547 } 548 ccw_device_report_path_events(cdev); 549 break; 550 case -ETIME: 551 case -EUSERS: 552 /* Reset oper notify indication after verify error. */ 553 cdev->private->flags.donotify = 0; 554 ccw_device_done(cdev, DEV_STATE_BOXED); 555 break; 556 case -EACCES: 557 /* Reset oper notify indication after verify error. */ 558 cdev->private->flags.donotify = 0; 559 ccw_device_done(cdev, DEV_STATE_DISCONNECTED); 560 break; 561 default: 562 /* Reset oper notify indication after verify error. */ 563 cdev->private->flags.donotify = 0; 564 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 565 break; 566 } 567 ccw_device_reset_path_events(cdev); 568 } 569 570 /* 571 * Get device online. 572 */ 573 int 574 ccw_device_online(struct ccw_device *cdev) 575 { 576 struct subchannel *sch; 577 int ret; 578 579 if ((cdev->private->state != DEV_STATE_OFFLINE) && 580 (cdev->private->state != DEV_STATE_BOXED)) 581 return -EINVAL; 582 sch = to_subchannel(cdev->dev.parent); 583 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 584 if (ret != 0) { 585 /* Couldn't enable the subchannel for i/o. Sick device. */ 586 if (ret == -ENODEV) 587 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 588 return ret; 589 } 590 /* Start initial path verification. */ 591 cdev->private->state = DEV_STATE_VERIFY; 592 ccw_device_verify_start(cdev); 593 return 0; 594 } 595 596 void 597 ccw_device_disband_done(struct ccw_device *cdev, int err) 598 { 599 switch (err) { 600 case 0: 601 ccw_device_done(cdev, DEV_STATE_OFFLINE); 602 break; 603 case -ETIME: 604 ccw_device_done(cdev, DEV_STATE_BOXED); 605 break; 606 default: 607 cdev->private->flags.donotify = 0; 608 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 609 break; 610 } 611 } 612 613 /* 614 * Shutdown device. 615 */ 616 int 617 ccw_device_offline(struct ccw_device *cdev) 618 { 619 struct subchannel *sch; 620 621 /* Allow ccw_device_offline while disconnected. */ 622 if (cdev->private->state == DEV_STATE_DISCONNECTED || 623 cdev->private->state == DEV_STATE_NOT_OPER) { 624 cdev->private->flags.donotify = 0; 625 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 626 return 0; 627 } 628 if (cdev->private->state == DEV_STATE_BOXED) { 629 ccw_device_done(cdev, DEV_STATE_BOXED); 630 return 0; 631 } 632 if (ccw_device_is_orphan(cdev)) { 633 ccw_device_done(cdev, DEV_STATE_OFFLINE); 634 return 0; 635 } 636 sch = to_subchannel(cdev->dev.parent); 637 if (cio_update_schib(sch)) 638 return -ENODEV; 639 if (scsw_actl(&sch->schib.scsw) != 0) 640 return -EBUSY; 641 if (cdev->private->state != DEV_STATE_ONLINE) 642 return -EINVAL; 643 /* Are we doing path grouping? */ 644 if (!cdev->private->flags.pgroup) { 645 /* No, set state offline immediately. */ 646 ccw_device_done(cdev, DEV_STATE_OFFLINE); 647 return 0; 648 } 649 /* Start Set Path Group commands. */ 650 cdev->private->state = DEV_STATE_DISBAND_PGID; 651 ccw_device_disband_start(cdev); 652 return 0; 653 } 654 655 /* 656 * Handle not operational event in non-special state. 657 */ 658 static void ccw_device_generic_notoper(struct ccw_device *cdev, 659 enum dev_event dev_event) 660 { 661 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 662 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 663 else 664 ccw_device_set_disconnected(cdev); 665 } 666 667 /* 668 * Handle path verification event in offline state. 669 */ 670 static void ccw_device_offline_verify(struct ccw_device *cdev, 671 enum dev_event dev_event) 672 { 673 struct subchannel *sch = to_subchannel(cdev->dev.parent); 674 675 css_schedule_eval(sch->schid); 676 } 677 678 /* 679 * Handle path verification event. 680 */ 681 static void 682 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) 683 { 684 struct subchannel *sch; 685 686 if (cdev->private->state == DEV_STATE_W4SENSE) { 687 cdev->private->flags.doverify = 1; 688 return; 689 } 690 sch = to_subchannel(cdev->dev.parent); 691 /* 692 * Since we might not just be coming from an interrupt from the 693 * subchannel we have to update the schib. 694 */ 695 if (cio_update_schib(sch)) { 696 ccw_device_verify_done(cdev, -ENODEV); 697 return; 698 } 699 700 if (scsw_actl(&sch->schib.scsw) != 0 || 701 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || 702 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { 703 /* 704 * No final status yet or final status not yet delivered 705 * to the device driver. Can't do path verification now, 706 * delay until final status was delivered. 707 */ 708 cdev->private->flags.doverify = 1; 709 return; 710 } 711 /* Device is idle, we can do the path verification. */ 712 cdev->private->state = DEV_STATE_VERIFY; 713 ccw_device_verify_start(cdev); 714 } 715 716 /* 717 * Handle path verification event in boxed state. 718 */ 719 static void ccw_device_boxed_verify(struct ccw_device *cdev, 720 enum dev_event dev_event) 721 { 722 struct subchannel *sch = to_subchannel(cdev->dev.parent); 723 724 if (cdev->online) { 725 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) 726 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 727 else 728 ccw_device_online_verify(cdev, dev_event); 729 } else 730 css_schedule_eval(sch->schid); 731 } 732 733 /* 734 * Pass interrupt to device driver. 735 */ 736 static int ccw_device_call_handler(struct ccw_device *cdev) 737 { 738 unsigned int stctl; 739 int ending_status; 740 741 /* 742 * we allow for the device action handler if . 743 * - we received ending status 744 * - the action handler requested to see all interrupts 745 * - we received an intermediate status 746 * - fast notification was requested (primary status) 747 * - unsolicited interrupts 748 */ 749 stctl = scsw_stctl(&cdev->private->irb.scsw); 750 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 751 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 752 (stctl == SCSW_STCTL_STATUS_PEND); 753 if (!ending_status && 754 !cdev->private->options.repall && 755 !(stctl & SCSW_STCTL_INTER_STATUS) && 756 !(cdev->private->options.fast && 757 (stctl & SCSW_STCTL_PRIM_STATUS))) 758 return 0; 759 760 if (ending_status) 761 ccw_device_set_timeout(cdev, 0); 762 763 if (cdev->handler) 764 cdev->handler(cdev, cdev->private->intparm, 765 &cdev->private->irb); 766 767 memset(&cdev->private->irb, 0, sizeof(struct irb)); 768 return 1; 769 } 770 771 /* 772 * Got an interrupt for a normal io (state online). 773 */ 774 static void 775 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 776 { 777 struct irb *irb; 778 int is_cmd; 779 780 irb = this_cpu_ptr(&cio_irb); 781 is_cmd = !scsw_is_tm(&irb->scsw); 782 /* Check for unsolicited interrupt. */ 783 if (!scsw_is_solicited(&irb->scsw)) { 784 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 785 !irb->esw.esw0.erw.cons) { 786 /* Unit check but no sense data. Need basic sense. */ 787 if (ccw_device_do_sense(cdev, irb) != 0) 788 goto call_handler_unsol; 789 memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 790 cdev->private->state = DEV_STATE_W4SENSE; 791 cdev->private->intparm = 0; 792 return; 793 } 794 call_handler_unsol: 795 if (cdev->handler) 796 cdev->handler (cdev, 0, irb); 797 if (cdev->private->flags.doverify) 798 ccw_device_online_verify(cdev, 0); 799 return; 800 } 801 /* Accumulate status and find out if a basic sense is needed. */ 802 ccw_device_accumulate_irb(cdev, irb); 803 if (is_cmd && cdev->private->flags.dosense) { 804 if (ccw_device_do_sense(cdev, irb) == 0) { 805 cdev->private->state = DEV_STATE_W4SENSE; 806 } 807 return; 808 } 809 /* Call the handler. */ 810 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 811 /* Start delayed path verification. */ 812 ccw_device_online_verify(cdev, 0); 813 } 814 815 /* 816 * Got an timeout in online state. 817 */ 818 static void 819 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) 820 { 821 int ret; 822 823 ccw_device_set_timeout(cdev, 0); 824 cdev->private->iretry = 255; 825 ret = ccw_device_cancel_halt_clear(cdev); 826 if (ret == -EBUSY) { 827 ccw_device_set_timeout(cdev, 3*HZ); 828 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 829 return; 830 } 831 if (ret) 832 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 833 else if (cdev->handler) 834 cdev->handler(cdev, cdev->private->intparm, 835 ERR_PTR(-ETIMEDOUT)); 836 } 837 838 /* 839 * Got an interrupt for a basic sense. 840 */ 841 static void 842 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 843 { 844 struct irb *irb; 845 846 irb = this_cpu_ptr(&cio_irb); 847 /* Check for unsolicited interrupt. */ 848 if (scsw_stctl(&irb->scsw) == 849 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 850 if (scsw_cc(&irb->scsw) == 1) 851 /* Basic sense hasn't started. Try again. */ 852 ccw_device_do_sense(cdev, irb); 853 else { 854 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited " 855 "interrupt during w4sense...\n", 856 cdev->private->dev_id.ssid, 857 cdev->private->dev_id.devno); 858 if (cdev->handler) 859 cdev->handler (cdev, 0, irb); 860 } 861 return; 862 } 863 /* 864 * Check if a halt or clear has been issued in the meanwhile. If yes, 865 * only deliver the halt/clear interrupt to the device driver as if it 866 * had killed the original request. 867 */ 868 if (scsw_fctl(&irb->scsw) & 869 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 870 cdev->private->flags.dosense = 0; 871 memset(&cdev->private->irb, 0, sizeof(struct irb)); 872 ccw_device_accumulate_irb(cdev, irb); 873 goto call_handler; 874 } 875 /* Add basic sense info to irb. */ 876 ccw_device_accumulate_basic_sense(cdev, irb); 877 if (cdev->private->flags.dosense) { 878 /* Another basic sense is needed. */ 879 ccw_device_do_sense(cdev, irb); 880 return; 881 } 882 call_handler: 883 cdev->private->state = DEV_STATE_ONLINE; 884 /* In case sensing interfered with setting the device online */ 885 wake_up(&cdev->private->wait_q); 886 /* Call the handler. */ 887 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 888 /* Start delayed path verification. */ 889 ccw_device_online_verify(cdev, 0); 890 } 891 892 static void 893 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 894 { 895 ccw_device_set_timeout(cdev, 0); 896 /* Start delayed path verification. */ 897 ccw_device_online_verify(cdev, 0); 898 /* OK, i/o is dead now. Call interrupt handler. */ 899 if (cdev->handler) 900 cdev->handler(cdev, cdev->private->intparm, 901 ERR_PTR(-EIO)); 902 } 903 904 static void 905 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) 906 { 907 int ret; 908 909 ret = ccw_device_cancel_halt_clear(cdev); 910 if (ret == -EBUSY) { 911 ccw_device_set_timeout(cdev, 3*HZ); 912 return; 913 } 914 /* Start delayed path verification. */ 915 ccw_device_online_verify(cdev, 0); 916 if (cdev->handler) 917 cdev->handler(cdev, cdev->private->intparm, 918 ERR_PTR(-EIO)); 919 } 920 921 void ccw_device_kill_io(struct ccw_device *cdev) 922 { 923 int ret; 924 925 cdev->private->iretry = 255; 926 ret = ccw_device_cancel_halt_clear(cdev); 927 if (ret == -EBUSY) { 928 ccw_device_set_timeout(cdev, 3*HZ); 929 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 930 return; 931 } 932 /* Start delayed path verification. */ 933 ccw_device_online_verify(cdev, 0); 934 if (cdev->handler) 935 cdev->handler(cdev, cdev->private->intparm, 936 ERR_PTR(-EIO)); 937 } 938 939 static void 940 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) 941 { 942 /* Start verification after current task finished. */ 943 cdev->private->flags.doverify = 1; 944 } 945 946 static void 947 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 948 { 949 struct subchannel *sch; 950 951 sch = to_subchannel(cdev->dev.parent); 952 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 953 /* Couldn't enable the subchannel for i/o. Sick device. */ 954 return; 955 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 956 ccw_device_sense_id_start(cdev); 957 } 958 959 void ccw_device_trigger_reprobe(struct ccw_device *cdev) 960 { 961 struct subchannel *sch; 962 963 if (cdev->private->state != DEV_STATE_DISCONNECTED) 964 return; 965 966 sch = to_subchannel(cdev->dev.parent); 967 /* Update some values. */ 968 if (cio_update_schib(sch)) 969 return; 970 /* 971 * The pim, pam, pom values may not be accurate, but they are the best 972 * we have before performing device selection :/ 973 */ 974 sch->lpm = sch->schib.pmcw.pam & sch->opm; 975 /* 976 * Use the initial configuration since we can't be shure that the old 977 * paths are valid. 978 */ 979 io_subchannel_init_config(sch); 980 if (cio_commit_config(sch)) 981 return; 982 983 /* We should also udate ssd info, but this has to wait. */ 984 /* Check if this is another device which appeared on the same sch. */ 985 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) 986 css_schedule_eval(sch->schid); 987 else 988 ccw_device_start_id(cdev, 0); 989 } 990 991 static void ccw_device_disabled_irq(struct ccw_device *cdev, 992 enum dev_event dev_event) 993 { 994 struct subchannel *sch; 995 996 sch = to_subchannel(cdev->dev.parent); 997 /* 998 * An interrupt in a disabled state means a previous disable was not 999 * successful - should not happen, but we try to disable again. 1000 */ 1001 cio_disable_subchannel(sch); 1002 } 1003 1004 static void 1005 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) 1006 { 1007 retry_set_schib(cdev); 1008 cdev->private->state = DEV_STATE_ONLINE; 1009 dev_fsm_event(cdev, dev_event); 1010 } 1011 1012 static void ccw_device_update_cmfblock(struct ccw_device *cdev, 1013 enum dev_event dev_event) 1014 { 1015 cmf_retry_copy_block(cdev); 1016 cdev->private->state = DEV_STATE_ONLINE; 1017 dev_fsm_event(cdev, dev_event); 1018 } 1019 1020 static void 1021 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 1022 { 1023 ccw_device_set_timeout(cdev, 0); 1024 cdev->private->state = DEV_STATE_NOT_OPER; 1025 wake_up(&cdev->private->wait_q); 1026 } 1027 1028 static void 1029 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1030 { 1031 int ret; 1032 1033 ret = ccw_device_cancel_halt_clear(cdev); 1034 if (ret == -EBUSY) { 1035 ccw_device_set_timeout(cdev, HZ/10); 1036 } else { 1037 cdev->private->state = DEV_STATE_NOT_OPER; 1038 wake_up(&cdev->private->wait_q); 1039 } 1040 } 1041 1042 /* 1043 * No operation action. This is used e.g. to ignore a timeout event in 1044 * state offline. 1045 */ 1046 static void 1047 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) 1048 { 1049 } 1050 1051 /* 1052 * device statemachine 1053 */ 1054 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 1055 [DEV_STATE_NOT_OPER] = { 1056 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1057 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1058 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1059 [DEV_EVENT_VERIFY] = ccw_device_nop, 1060 }, 1061 [DEV_STATE_SENSE_PGID] = { 1062 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1063 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1064 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1065 [DEV_EVENT_VERIFY] = ccw_device_nop, 1066 }, 1067 [DEV_STATE_SENSE_ID] = { 1068 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1069 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1070 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1071 [DEV_EVENT_VERIFY] = ccw_device_nop, 1072 }, 1073 [DEV_STATE_OFFLINE] = { 1074 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1075 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1076 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1077 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 1078 }, 1079 [DEV_STATE_VERIFY] = { 1080 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1081 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1082 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1083 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1084 }, 1085 [DEV_STATE_ONLINE] = { 1086 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1087 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1088 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1089 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1090 }, 1091 [DEV_STATE_W4SENSE] = { 1092 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1093 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1094 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1095 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1096 }, 1097 [DEV_STATE_DISBAND_PGID] = { 1098 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1099 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1100 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1101 [DEV_EVENT_VERIFY] = ccw_device_nop, 1102 }, 1103 [DEV_STATE_BOXED] = { 1104 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1105 [DEV_EVENT_INTERRUPT] = ccw_device_nop, 1106 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1107 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify, 1108 }, 1109 /* states to wait for i/o completion before doing something */ 1110 [DEV_STATE_TIMEOUT_KILL] = { 1111 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1112 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1113 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1114 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1115 }, 1116 [DEV_STATE_QUIESCE] = { 1117 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1118 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, 1119 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, 1120 [DEV_EVENT_VERIFY] = ccw_device_nop, 1121 }, 1122 /* special states for devices gone not operational */ 1123 [DEV_STATE_DISCONNECTED] = { 1124 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1125 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1126 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1127 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1128 }, 1129 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1130 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1131 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1132 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1133 [DEV_EVENT_VERIFY] = ccw_device_nop, 1134 }, 1135 [DEV_STATE_CMFCHANGE] = { 1136 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, 1137 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, 1138 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1139 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1140 }, 1141 [DEV_STATE_CMFUPDATE] = { 1142 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, 1143 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, 1144 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1145 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1146 }, 1147 [DEV_STATE_STEAL_LOCK] = { 1148 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1149 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1150 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1151 [DEV_EVENT_VERIFY] = ccw_device_nop, 1152 }, 1153 }; 1154 1155 EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1156