1 /* 2 * drivers/s390/cio/device_fsm.c 3 * finite state machine for device handling 4 * 5 * Copyright IBM Corp. 2002,2008 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/jiffies.h> 13 #include <linux/string.h> 14 15 #include <asm/ccwdev.h> 16 #include <asm/cio.h> 17 #include <asm/chpid.h> 18 19 #include "cio.h" 20 #include "cio_debug.h" 21 #include "css.h" 22 #include "device.h" 23 #include "chsc.h" 24 #include "ioasm.h" 25 #include "chp.h" 26 27 static int timeout_log_enabled; 28 29 static int __init ccw_timeout_log_setup(char *unused) 30 { 31 timeout_log_enabled = 1; 32 return 1; 33 } 34 35 __setup("ccw_timeout_log", ccw_timeout_log_setup); 36 37 static void ccw_timeout_log(struct ccw_device *cdev) 38 { 39 struct schib schib; 40 struct subchannel *sch; 41 struct io_subchannel_private *private; 42 union orb *orb; 43 int cc; 44 45 sch = to_subchannel(cdev->dev.parent); 46 private = to_io_private(sch); 47 orb = &private->orb; 48 cc = stsch(sch->schid, &schib); 49 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 51 "device information:\n", get_clock()); 52 printk(KERN_WARNING "cio: orb:\n"); 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 54 orb, sizeof(*orb), 0); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", 56 dev_name(&cdev->dev)); 57 printk(KERN_WARNING "cio: subchannel bus id: %s\n", 58 dev_name(&sch->dev)); 59 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 60 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 61 62 if (orb->tm.b) { 63 printk(KERN_WARNING "cio: orb indicates transport mode\n"); 64 printk(KERN_WARNING "cio: last tcw:\n"); 65 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 66 (void *)(addr_t)orb->tm.tcw, 67 sizeof(struct tcw), 0); 68 } else { 69 printk(KERN_WARNING "cio: orb indicates command mode\n"); 70 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || 71 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) 72 printk(KERN_WARNING "cio: last channel program " 73 "(intern):\n"); 74 else 75 printk(KERN_WARNING "cio: last channel program:\n"); 76 77 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 78 (void *)(addr_t)orb->cmd.cpa, 79 sizeof(struct ccw1), 0); 80 } 81 printk(KERN_WARNING "cio: ccw device state: %d\n", 82 cdev->private->state); 83 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 84 printk(KERN_WARNING "cio: schib:\n"); 85 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 86 &schib, sizeof(schib), 0); 87 printk(KERN_WARNING "cio: ccw device flags:\n"); 88 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 89 &cdev->private->flags, sizeof(cdev->private->flags), 0); 90 } 91 92 /* 93 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 94 */ 95 static void 96 ccw_device_timeout(unsigned long data) 97 { 98 struct ccw_device *cdev; 99 100 cdev = (struct ccw_device *) data; 101 spin_lock_irq(cdev->ccwlock); 102 if (timeout_log_enabled) 103 ccw_timeout_log(cdev); 104 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 105 spin_unlock_irq(cdev->ccwlock); 106 } 107 108 /* 109 * Set timeout 110 */ 111 void 112 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 113 { 114 if (expires == 0) { 115 del_timer(&cdev->private->timer); 116 return; 117 } 118 if (timer_pending(&cdev->private->timer)) { 119 if (mod_timer(&cdev->private->timer, jiffies + expires)) 120 return; 121 } 122 cdev->private->timer.function = ccw_device_timeout; 123 cdev->private->timer.data = (unsigned long) cdev; 124 cdev->private->timer.expires = jiffies + expires; 125 add_timer(&cdev->private->timer); 126 } 127 128 /* 129 * Cancel running i/o. This is called repeatedly since halt/clear are 130 * asynchronous operations. We do one try with cio_cancel, two tries 131 * with cio_halt, 255 tries with cio_clear. If everythings fails panic. 132 * Returns 0 if device now idle, -ENODEV for device not operational and 133 * -EBUSY if an interrupt is expected (either from halt/clear or from a 134 * status pending). 135 */ 136 int 137 ccw_device_cancel_halt_clear(struct ccw_device *cdev) 138 { 139 struct subchannel *sch; 140 int ret; 141 142 sch = to_subchannel(cdev->dev.parent); 143 if (cio_update_schib(sch)) 144 return -ENODEV; 145 if (!sch->schib.pmcw.ena) 146 /* Not operational -> done. */ 147 return 0; 148 /* Stage 1: cancel io. */ 149 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && 150 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { 151 if (!scsw_is_tm(&sch->schib.scsw)) { 152 ret = cio_cancel(sch); 153 if (ret != -EINVAL) 154 return ret; 155 } 156 /* cancel io unsuccessful or not applicable (transport mode). 157 * Continue with asynchronous instructions. */ 158 cdev->private->iretry = 3; /* 3 halt retries. */ 159 } 160 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { 161 /* Stage 2: halt io. */ 162 if (cdev->private->iretry) { 163 cdev->private->iretry--; 164 ret = cio_halt(sch); 165 if (ret != -EBUSY) 166 return (ret == 0) ? -EBUSY : ret; 167 } 168 /* halt io unsuccessful. */ 169 cdev->private->iretry = 255; /* 255 clear retries. */ 170 } 171 /* Stage 3: clear io. */ 172 if (cdev->private->iretry) { 173 cdev->private->iretry--; 174 ret = cio_clear (sch); 175 return (ret == 0) ? -EBUSY : ret; 176 } 177 panic("Can't stop i/o on subchannel.\n"); 178 } 179 180 void ccw_device_update_sense_data(struct ccw_device *cdev) 181 { 182 memset(&cdev->id, 0, sizeof(cdev->id)); 183 cdev->id.cu_type = cdev->private->senseid.cu_type; 184 cdev->id.cu_model = cdev->private->senseid.cu_model; 185 cdev->id.dev_type = cdev->private->senseid.dev_type; 186 cdev->id.dev_model = cdev->private->senseid.dev_model; 187 } 188 189 int ccw_device_test_sense_data(struct ccw_device *cdev) 190 { 191 return cdev->id.cu_type == cdev->private->senseid.cu_type && 192 cdev->id.cu_model == cdev->private->senseid.cu_model && 193 cdev->id.dev_type == cdev->private->senseid.dev_type && 194 cdev->id.dev_model == cdev->private->senseid.dev_model; 195 } 196 197 /* 198 * The machine won't give us any notification by machine check if a chpid has 199 * been varied online on the SE so we have to find out by magic (i. e. driving 200 * the channel subsystem to device selection and updating our path masks). 201 */ 202 static void 203 __recover_lost_chpids(struct subchannel *sch, int old_lpm) 204 { 205 int mask, i; 206 struct chp_id chpid; 207 208 chp_id_init(&chpid); 209 for (i = 0; i<8; i++) { 210 mask = 0x80 >> i; 211 if (!(sch->lpm & mask)) 212 continue; 213 if (old_lpm & mask) 214 continue; 215 chpid.id = sch->schib.pmcw.chpid[i]; 216 if (!chp_is_registered(chpid)) 217 css_schedule_eval_all(); 218 } 219 } 220 221 /* 222 * Stop device recognition. 223 */ 224 static void 225 ccw_device_recog_done(struct ccw_device *cdev, int state) 226 { 227 struct subchannel *sch; 228 int old_lpm; 229 230 sch = to_subchannel(cdev->dev.parent); 231 232 ccw_device_set_timeout(cdev, 0); 233 cio_disable_subchannel(sch); 234 /* 235 * Now that we tried recognition, we have performed device selection 236 * through ssch() and the path information is up to date. 237 */ 238 old_lpm = sch->lpm; 239 240 /* Check since device may again have become not operational. */ 241 if (cio_update_schib(sch)) 242 state = DEV_STATE_NOT_OPER; 243 else 244 sch->lpm = sch->schib.pmcw.pam & sch->opm; 245 246 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) 247 /* Force reprobe on all chpids. */ 248 old_lpm = 0; 249 if (sch->lpm != old_lpm) 250 __recover_lost_chpids(sch, old_lpm); 251 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID && 252 (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) { 253 cdev->private->flags.recog_done = 1; 254 cdev->private->state = DEV_STATE_DISCONNECTED; 255 wake_up(&cdev->private->wait_q); 256 return; 257 } 258 if (cdev->private->flags.resuming) { 259 cdev->private->state = state; 260 cdev->private->flags.recog_done = 1; 261 wake_up(&cdev->private->wait_q); 262 return; 263 } 264 switch (state) { 265 case DEV_STATE_NOT_OPER: 266 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on " 267 "subchannel 0.%x.%04x\n", 268 cdev->private->dev_id.devno, 269 sch->schid.ssid, sch->schid.sch_no); 270 break; 271 case DEV_STATE_OFFLINE: 272 if (!cdev->online) { 273 ccw_device_update_sense_data(cdev); 274 /* Issue device info message. */ 275 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: " 276 "CU Type/Mod = %04X/%02X, Dev Type/Mod " 277 "= %04X/%02X\n", 278 cdev->private->dev_id.ssid, 279 cdev->private->dev_id.devno, 280 cdev->id.cu_type, cdev->id.cu_model, 281 cdev->id.dev_type, cdev->id.dev_model); 282 break; 283 } 284 cdev->private->state = DEV_STATE_OFFLINE; 285 cdev->private->flags.recog_done = 1; 286 if (ccw_device_test_sense_data(cdev)) { 287 cdev->private->flags.donotify = 1; 288 ccw_device_online(cdev); 289 wake_up(&cdev->private->wait_q); 290 } else { 291 ccw_device_update_sense_data(cdev); 292 PREPARE_WORK(&cdev->private->kick_work, 293 ccw_device_do_unbind_bind); 294 queue_work(ccw_device_work, &cdev->private->kick_work); 295 } 296 return; 297 case DEV_STATE_BOXED: 298 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on " 299 " subchannel 0.%x.%04x\n", 300 cdev->private->dev_id.devno, 301 sch->schid.ssid, sch->schid.sch_no); 302 if (cdev->id.cu_type != 0) { /* device was recognized before */ 303 cdev->private->flags.recog_done = 1; 304 cdev->private->state = DEV_STATE_BOXED; 305 wake_up(&cdev->private->wait_q); 306 return; 307 } 308 break; 309 } 310 cdev->private->state = state; 311 io_subchannel_recog_done(cdev); 312 wake_up(&cdev->private->wait_q); 313 } 314 315 /* 316 * Function called from device_id.c after sense id has completed. 317 */ 318 void 319 ccw_device_sense_id_done(struct ccw_device *cdev, int err) 320 { 321 switch (err) { 322 case 0: 323 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); 324 break; 325 case -ETIME: /* Sense id stopped by timeout. */ 326 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 327 break; 328 default: 329 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 330 break; 331 } 332 } 333 334 int ccw_device_notify(struct ccw_device *cdev, int event) 335 { 336 if (!cdev->drv) 337 return 0; 338 if (!cdev->online) 339 return 0; 340 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 341 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 342 event); 343 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 344 } 345 346 static void cmf_reenable_delayed(struct work_struct *work) 347 { 348 struct ccw_device_private *priv; 349 struct ccw_device *cdev; 350 351 priv = container_of(work, struct ccw_device_private, kick_work); 352 cdev = priv->cdev; 353 cmf_reenable(cdev); 354 } 355 356 static void ccw_device_oper_notify(struct ccw_device *cdev) 357 { 358 if (ccw_device_notify(cdev, CIO_OPER)) { 359 /* Reenable channel measurements, if needed. */ 360 PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); 361 queue_work(ccw_device_work, &cdev->private->kick_work); 362 return; 363 } 364 /* Driver doesn't want device back. */ 365 ccw_device_set_notoper(cdev); 366 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); 367 queue_work(ccw_device_work, &cdev->private->kick_work); 368 } 369 370 /* 371 * Finished with online/offline processing. 372 */ 373 static void 374 ccw_device_done(struct ccw_device *cdev, int state) 375 { 376 struct subchannel *sch; 377 378 sch = to_subchannel(cdev->dev.parent); 379 380 ccw_device_set_timeout(cdev, 0); 381 382 if (state != DEV_STATE_ONLINE) 383 cio_disable_subchannel(sch); 384 385 /* Reset device status. */ 386 memset(&cdev->private->irb, 0, sizeof(struct irb)); 387 388 cdev->private->state = state; 389 390 switch (state) { 391 case DEV_STATE_BOXED: 392 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 393 cdev->private->dev_id.devno, sch->schid.sch_no); 394 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 395 ccw_device_schedule_sch_unregister(cdev); 396 cdev->private->flags.donotify = 0; 397 break; 398 case DEV_STATE_NOT_OPER: 399 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 400 cdev->private->dev_id.devno, sch->schid.sch_no); 401 if (!ccw_device_notify(cdev, CIO_GONE)) 402 ccw_device_schedule_sch_unregister(cdev); 403 else 404 ccw_device_set_disconnected(cdev); 405 cdev->private->flags.donotify = 0; 406 break; 407 case DEV_STATE_DISCONNECTED: 408 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 409 "%04x\n", cdev->private->dev_id.devno, 410 sch->schid.sch_no); 411 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 412 ccw_device_schedule_sch_unregister(cdev); 413 else 414 ccw_device_set_disconnected(cdev); 415 cdev->private->flags.donotify = 0; 416 break; 417 default: 418 break; 419 } 420 421 if (cdev->private->flags.donotify) { 422 cdev->private->flags.donotify = 0; 423 ccw_device_oper_notify(cdev); 424 } 425 wake_up(&cdev->private->wait_q); 426 } 427 428 static int cmp_pgid(struct pgid *p1, struct pgid *p2) 429 { 430 char *c1; 431 char *c2; 432 433 c1 = (char *)p1; 434 c2 = (char *)p2; 435 436 return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1); 437 } 438 439 static void __ccw_device_get_common_pgid(struct ccw_device *cdev) 440 { 441 int i; 442 int last; 443 444 last = 0; 445 for (i = 0; i < 8; i++) { 446 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET) 447 /* No PGID yet */ 448 continue; 449 if (cdev->private->pgid[last].inf.ps.state1 == 450 SNID_STATE1_RESET) { 451 /* First non-zero PGID */ 452 last = i; 453 continue; 454 } 455 if (cmp_pgid(&cdev->private->pgid[i], 456 &cdev->private->pgid[last]) == 0) 457 /* Non-conflicting PGIDs */ 458 continue; 459 460 /* PGID mismatch, can't pathgroup. */ 461 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " 462 "0.%x.%04x, can't pathgroup\n", 463 cdev->private->dev_id.ssid, 464 cdev->private->dev_id.devno); 465 cdev->private->options.pgroup = 0; 466 return; 467 } 468 if (cdev->private->pgid[last].inf.ps.state1 == 469 SNID_STATE1_RESET) 470 /* No previous pgid found */ 471 memcpy(&cdev->private->pgid[0], 472 &channel_subsystems[0]->global_pgid, 473 sizeof(struct pgid)); 474 else 475 /* Use existing pgid */ 476 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last], 477 sizeof(struct pgid)); 478 } 479 480 /* 481 * Function called from device_pgid.c after sense path ground has completed. 482 */ 483 void 484 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) 485 { 486 struct subchannel *sch; 487 488 sch = to_subchannel(cdev->dev.parent); 489 switch (err) { 490 case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */ 491 cdev->private->options.pgroup = 0; 492 break; 493 case 0: /* success */ 494 case -EACCES: /* partial success, some paths not operational */ 495 /* Check if all pgids are equal or 0. */ 496 __ccw_device_get_common_pgid(cdev); 497 break; 498 case -ETIME: /* Sense path group id stopped by timeout. */ 499 case -EUSERS: /* device is reserved for someone else. */ 500 ccw_device_done(cdev, DEV_STATE_BOXED); 501 return; 502 default: 503 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 504 return; 505 } 506 /* Start Path Group verification. */ 507 cdev->private->state = DEV_STATE_VERIFY; 508 cdev->private->flags.doverify = 0; 509 ccw_device_verify_start(cdev); 510 } 511 512 /* 513 * Start device recognition. 514 */ 515 int 516 ccw_device_recognition(struct ccw_device *cdev) 517 { 518 struct subchannel *sch; 519 int ret; 520 521 sch = to_subchannel(cdev->dev.parent); 522 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 523 if (ret != 0) 524 /* Couldn't enable the subchannel for i/o. Sick device. */ 525 return ret; 526 527 /* After 60s the device recognition is considered to have failed. */ 528 ccw_device_set_timeout(cdev, 60*HZ); 529 530 /* 531 * We used to start here with a sense pgid to find out whether a device 532 * is locked by someone else. Unfortunately, the sense pgid command 533 * code has other meanings on devices predating the path grouping 534 * algorithm, so we start with sense id and box the device after an 535 * timeout (or if sense pgid during path verification detects the device 536 * is locked, as may happen on newer devices). 537 */ 538 cdev->private->flags.recog_done = 0; 539 cdev->private->state = DEV_STATE_SENSE_ID; 540 ccw_device_sense_id_start(cdev); 541 return 0; 542 } 543 544 /* 545 * Handle timeout in device recognition. 546 */ 547 static void 548 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) 549 { 550 int ret; 551 552 ret = ccw_device_cancel_halt_clear(cdev); 553 switch (ret) { 554 case 0: 555 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 556 break; 557 case -ENODEV: 558 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 559 break; 560 default: 561 ccw_device_set_timeout(cdev, 3*HZ); 562 } 563 } 564 565 566 void 567 ccw_device_verify_done(struct ccw_device *cdev, int err) 568 { 569 struct subchannel *sch; 570 571 sch = to_subchannel(cdev->dev.parent); 572 /* Update schib - pom may have changed. */ 573 if (cio_update_schib(sch)) { 574 cdev->private->flags.donotify = 0; 575 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 576 return; 577 } 578 /* Update lpm with verified path mask. */ 579 sch->lpm = sch->vpm; 580 /* Repeat path verification? */ 581 if (cdev->private->flags.doverify) { 582 cdev->private->flags.doverify = 0; 583 ccw_device_verify_start(cdev); 584 return; 585 } 586 switch (err) { 587 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 588 cdev->private->options.pgroup = 0; 589 case 0: 590 ccw_device_done(cdev, DEV_STATE_ONLINE); 591 /* Deliver fake irb to device driver, if needed. */ 592 if (cdev->private->flags.fake_irb) { 593 memset(&cdev->private->irb, 0, sizeof(struct irb)); 594 cdev->private->irb.scsw.cmd.cc = 1; 595 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; 596 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; 597 cdev->private->irb.scsw.cmd.stctl = 598 SCSW_STCTL_STATUS_PEND; 599 cdev->private->flags.fake_irb = 0; 600 if (cdev->handler) 601 cdev->handler(cdev, cdev->private->intparm, 602 &cdev->private->irb); 603 memset(&cdev->private->irb, 0, sizeof(struct irb)); 604 } 605 break; 606 case -ETIME: 607 /* Reset oper notify indication after verify error. */ 608 cdev->private->flags.donotify = 0; 609 ccw_device_done(cdev, DEV_STATE_BOXED); 610 break; 611 default: 612 /* Reset oper notify indication after verify error. */ 613 cdev->private->flags.donotify = 0; 614 if (cdev->online) { 615 ccw_device_set_timeout(cdev, 0); 616 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 617 } else 618 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 619 break; 620 } 621 } 622 623 /* 624 * Get device online. 625 */ 626 int 627 ccw_device_online(struct ccw_device *cdev) 628 { 629 struct subchannel *sch; 630 int ret; 631 632 if ((cdev->private->state != DEV_STATE_OFFLINE) && 633 (cdev->private->state != DEV_STATE_BOXED)) 634 return -EINVAL; 635 sch = to_subchannel(cdev->dev.parent); 636 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 637 if (ret != 0) { 638 /* Couldn't enable the subchannel for i/o. Sick device. */ 639 if (ret == -ENODEV) 640 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 641 return ret; 642 } 643 /* Do we want to do path grouping? */ 644 if (!cdev->private->options.pgroup) { 645 /* Start initial path verification. */ 646 cdev->private->state = DEV_STATE_VERIFY; 647 cdev->private->flags.doverify = 0; 648 ccw_device_verify_start(cdev); 649 return 0; 650 } 651 /* Do a SensePGID first. */ 652 cdev->private->state = DEV_STATE_SENSE_PGID; 653 ccw_device_sense_pgid_start(cdev); 654 return 0; 655 } 656 657 void 658 ccw_device_disband_done(struct ccw_device *cdev, int err) 659 { 660 switch (err) { 661 case 0: 662 ccw_device_done(cdev, DEV_STATE_OFFLINE); 663 break; 664 case -ETIME: 665 ccw_device_done(cdev, DEV_STATE_BOXED); 666 break; 667 default: 668 cdev->private->flags.donotify = 0; 669 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 670 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 671 break; 672 } 673 } 674 675 /* 676 * Shutdown device. 677 */ 678 int 679 ccw_device_offline(struct ccw_device *cdev) 680 { 681 struct subchannel *sch; 682 683 /* Allow ccw_device_offline while disconnected. */ 684 if (cdev->private->state == DEV_STATE_DISCONNECTED || 685 cdev->private->state == DEV_STATE_NOT_OPER) { 686 cdev->private->flags.donotify = 0; 687 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 688 return 0; 689 } 690 if (cdev->private->state == DEV_STATE_BOXED) { 691 ccw_device_done(cdev, DEV_STATE_BOXED); 692 return 0; 693 } 694 if (ccw_device_is_orphan(cdev)) { 695 ccw_device_done(cdev, DEV_STATE_OFFLINE); 696 return 0; 697 } 698 sch = to_subchannel(cdev->dev.parent); 699 if (cio_update_schib(sch)) 700 return -ENODEV; 701 if (scsw_actl(&sch->schib.scsw) != 0) 702 return -EBUSY; 703 if (cdev->private->state != DEV_STATE_ONLINE) 704 return -EINVAL; 705 /* Are we doing path grouping? */ 706 if (!cdev->private->options.pgroup) { 707 /* No, set state offline immediately. */ 708 ccw_device_done(cdev, DEV_STATE_OFFLINE); 709 return 0; 710 } 711 /* Start Set Path Group commands. */ 712 cdev->private->state = DEV_STATE_DISBAND_PGID; 713 ccw_device_disband_start(cdev); 714 return 0; 715 } 716 717 /* 718 * Handle timeout in device online/offline process. 719 */ 720 static void 721 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event) 722 { 723 int ret; 724 725 ret = ccw_device_cancel_halt_clear(cdev); 726 switch (ret) { 727 case 0: 728 ccw_device_done(cdev, DEV_STATE_BOXED); 729 break; 730 case -ENODEV: 731 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 732 break; 733 default: 734 ccw_device_set_timeout(cdev, 3*HZ); 735 } 736 } 737 738 /* 739 * Handle not oper event in device recognition. 740 */ 741 static void 742 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event) 743 { 744 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 745 } 746 747 /* 748 * Handle not operational event in non-special state. 749 */ 750 static void ccw_device_generic_notoper(struct ccw_device *cdev, 751 enum dev_event dev_event) 752 { 753 if (!ccw_device_notify(cdev, CIO_GONE)) 754 ccw_device_schedule_sch_unregister(cdev); 755 else 756 ccw_device_set_disconnected(cdev); 757 } 758 759 /* 760 * Handle path verification event in offline state. 761 */ 762 static void ccw_device_offline_verify(struct ccw_device *cdev, 763 enum dev_event dev_event) 764 { 765 struct subchannel *sch = to_subchannel(cdev->dev.parent); 766 767 css_schedule_eval(sch->schid); 768 } 769 770 /* 771 * Handle path verification event. 772 */ 773 static void 774 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) 775 { 776 struct subchannel *sch; 777 778 if (cdev->private->state == DEV_STATE_W4SENSE) { 779 cdev->private->flags.doverify = 1; 780 return; 781 } 782 sch = to_subchannel(cdev->dev.parent); 783 /* 784 * Since we might not just be coming from an interrupt from the 785 * subchannel we have to update the schib. 786 */ 787 if (cio_update_schib(sch)) { 788 ccw_device_verify_done(cdev, -ENODEV); 789 return; 790 } 791 792 if (scsw_actl(&sch->schib.scsw) != 0 || 793 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || 794 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { 795 /* 796 * No final status yet or final status not yet delivered 797 * to the device driver. Can't do path verfication now, 798 * delay until final status was delivered. 799 */ 800 cdev->private->flags.doverify = 1; 801 return; 802 } 803 /* Device is idle, we can do the path verification. */ 804 cdev->private->state = DEV_STATE_VERIFY; 805 cdev->private->flags.doverify = 0; 806 ccw_device_verify_start(cdev); 807 } 808 809 /* 810 * Got an interrupt for a normal io (state online). 811 */ 812 static void 813 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 814 { 815 struct irb *irb; 816 int is_cmd; 817 818 irb = (struct irb *) __LC_IRB; 819 is_cmd = !scsw_is_tm(&irb->scsw); 820 /* Check for unsolicited interrupt. */ 821 if (!scsw_is_solicited(&irb->scsw)) { 822 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 823 !irb->esw.esw0.erw.cons) { 824 /* Unit check but no sense data. Need basic sense. */ 825 if (ccw_device_do_sense(cdev, irb) != 0) 826 goto call_handler_unsol; 827 memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 828 cdev->private->state = DEV_STATE_W4SENSE; 829 cdev->private->intparm = 0; 830 return; 831 } 832 call_handler_unsol: 833 if (cdev->handler) 834 cdev->handler (cdev, 0, irb); 835 if (cdev->private->flags.doverify) 836 ccw_device_online_verify(cdev, 0); 837 return; 838 } 839 /* Accumulate status and find out if a basic sense is needed. */ 840 ccw_device_accumulate_irb(cdev, irb); 841 if (is_cmd && cdev->private->flags.dosense) { 842 if (ccw_device_do_sense(cdev, irb) == 0) { 843 cdev->private->state = DEV_STATE_W4SENSE; 844 } 845 return; 846 } 847 /* Call the handler. */ 848 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 849 /* Start delayed path verification. */ 850 ccw_device_online_verify(cdev, 0); 851 } 852 853 /* 854 * Got an timeout in online state. 855 */ 856 static void 857 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) 858 { 859 int ret; 860 861 ccw_device_set_timeout(cdev, 0); 862 ret = ccw_device_cancel_halt_clear(cdev); 863 if (ret == -EBUSY) { 864 ccw_device_set_timeout(cdev, 3*HZ); 865 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 866 return; 867 } 868 if (ret == -ENODEV) 869 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 870 else if (cdev->handler) 871 cdev->handler(cdev, cdev->private->intparm, 872 ERR_PTR(-ETIMEDOUT)); 873 } 874 875 /* 876 * Got an interrupt for a basic sense. 877 */ 878 static void 879 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 880 { 881 struct irb *irb; 882 883 irb = (struct irb *) __LC_IRB; 884 /* Check for unsolicited interrupt. */ 885 if (scsw_stctl(&irb->scsw) == 886 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 887 if (scsw_cc(&irb->scsw) == 1) 888 /* Basic sense hasn't started. Try again. */ 889 ccw_device_do_sense(cdev, irb); 890 else { 891 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited " 892 "interrupt during w4sense...\n", 893 cdev->private->dev_id.ssid, 894 cdev->private->dev_id.devno); 895 if (cdev->handler) 896 cdev->handler (cdev, 0, irb); 897 } 898 return; 899 } 900 /* 901 * Check if a halt or clear has been issued in the meanwhile. If yes, 902 * only deliver the halt/clear interrupt to the device driver as if it 903 * had killed the original request. 904 */ 905 if (scsw_fctl(&irb->scsw) & 906 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 907 /* Retry Basic Sense if requested. */ 908 if (cdev->private->flags.intretry) { 909 cdev->private->flags.intretry = 0; 910 ccw_device_do_sense(cdev, irb); 911 return; 912 } 913 cdev->private->flags.dosense = 0; 914 memset(&cdev->private->irb, 0, sizeof(struct irb)); 915 ccw_device_accumulate_irb(cdev, irb); 916 goto call_handler; 917 } 918 /* Add basic sense info to irb. */ 919 ccw_device_accumulate_basic_sense(cdev, irb); 920 if (cdev->private->flags.dosense) { 921 /* Another basic sense is needed. */ 922 ccw_device_do_sense(cdev, irb); 923 return; 924 } 925 call_handler: 926 cdev->private->state = DEV_STATE_ONLINE; 927 /* In case sensing interfered with setting the device online */ 928 wake_up(&cdev->private->wait_q); 929 /* Call the handler. */ 930 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 931 /* Start delayed path verification. */ 932 ccw_device_online_verify(cdev, 0); 933 } 934 935 static void 936 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) 937 { 938 struct irb *irb; 939 940 irb = (struct irb *) __LC_IRB; 941 /* Accumulate status. We don't do basic sense. */ 942 ccw_device_accumulate_irb(cdev, irb); 943 /* Remember to clear irb to avoid residuals. */ 944 memset(&cdev->private->irb, 0, sizeof(struct irb)); 945 /* Try to start delayed device verification. */ 946 ccw_device_online_verify(cdev, 0); 947 /* Note: Don't call handler for cio initiated clear! */ 948 } 949 950 static void 951 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 952 { 953 struct subchannel *sch; 954 955 sch = to_subchannel(cdev->dev.parent); 956 ccw_device_set_timeout(cdev, 0); 957 /* Start delayed path verification. */ 958 ccw_device_online_verify(cdev, 0); 959 /* OK, i/o is dead now. Call interrupt handler. */ 960 if (cdev->handler) 961 cdev->handler(cdev, cdev->private->intparm, 962 ERR_PTR(-EIO)); 963 } 964 965 static void 966 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) 967 { 968 int ret; 969 970 ret = ccw_device_cancel_halt_clear(cdev); 971 if (ret == -EBUSY) { 972 ccw_device_set_timeout(cdev, 3*HZ); 973 return; 974 } 975 /* Start delayed path verification. */ 976 ccw_device_online_verify(cdev, 0); 977 if (cdev->handler) 978 cdev->handler(cdev, cdev->private->intparm, 979 ERR_PTR(-EIO)); 980 } 981 982 void ccw_device_kill_io(struct ccw_device *cdev) 983 { 984 int ret; 985 986 ret = ccw_device_cancel_halt_clear(cdev); 987 if (ret == -EBUSY) { 988 ccw_device_set_timeout(cdev, 3*HZ); 989 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 990 return; 991 } 992 /* Start delayed path verification. */ 993 ccw_device_online_verify(cdev, 0); 994 if (cdev->handler) 995 cdev->handler(cdev, cdev->private->intparm, 996 ERR_PTR(-EIO)); 997 } 998 999 static void 1000 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) 1001 { 1002 /* Start verification after current task finished. */ 1003 cdev->private->flags.doverify = 1; 1004 } 1005 1006 static void 1007 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) 1008 { 1009 struct irb *irb; 1010 1011 switch (dev_event) { 1012 case DEV_EVENT_INTERRUPT: 1013 irb = (struct irb *) __LC_IRB; 1014 /* Check for unsolicited interrupt. */ 1015 if ((scsw_stctl(&irb->scsw) == 1016 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && 1017 (!scsw_cc(&irb->scsw))) 1018 /* FIXME: we should restart stlck here, but this 1019 * is extremely unlikely ... */ 1020 goto out_wakeup; 1021 1022 ccw_device_accumulate_irb(cdev, irb); 1023 /* We don't care about basic sense etc. */ 1024 break; 1025 default: /* timeout */ 1026 break; 1027 } 1028 out_wakeup: 1029 wake_up(&cdev->private->wait_q); 1030 } 1031 1032 static void 1033 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 1034 { 1035 struct subchannel *sch; 1036 1037 sch = to_subchannel(cdev->dev.parent); 1038 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 1039 /* Couldn't enable the subchannel for i/o. Sick device. */ 1040 return; 1041 1042 /* After 60s the device recognition is considered to have failed. */ 1043 ccw_device_set_timeout(cdev, 60*HZ); 1044 1045 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 1046 ccw_device_sense_id_start(cdev); 1047 } 1048 1049 void ccw_device_trigger_reprobe(struct ccw_device *cdev) 1050 { 1051 struct subchannel *sch; 1052 1053 if (cdev->private->state != DEV_STATE_DISCONNECTED) 1054 return; 1055 1056 sch = to_subchannel(cdev->dev.parent); 1057 /* Update some values. */ 1058 if (cio_update_schib(sch)) 1059 return; 1060 /* 1061 * The pim, pam, pom values may not be accurate, but they are the best 1062 * we have before performing device selection :/ 1063 */ 1064 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1065 /* 1066 * Use the initial configuration since we can't be shure that the old 1067 * paths are valid. 1068 */ 1069 io_subchannel_init_config(sch); 1070 if (cio_commit_config(sch)) 1071 return; 1072 1073 /* We should also udate ssd info, but this has to wait. */ 1074 /* Check if this is another device which appeared on the same sch. */ 1075 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1076 PREPARE_WORK(&cdev->private->kick_work, 1077 ccw_device_move_to_orphanage); 1078 queue_work(slow_path_wq, &cdev->private->kick_work); 1079 } else 1080 ccw_device_start_id(cdev, 0); 1081 } 1082 1083 static void 1084 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) 1085 { 1086 struct subchannel *sch; 1087 1088 sch = to_subchannel(cdev->dev.parent); 1089 /* 1090 * An interrupt in state offline means a previous disable was not 1091 * successful - should not happen, but we try to disable again. 1092 */ 1093 cio_disable_subchannel(sch); 1094 } 1095 1096 static void 1097 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) 1098 { 1099 retry_set_schib(cdev); 1100 cdev->private->state = DEV_STATE_ONLINE; 1101 dev_fsm_event(cdev, dev_event); 1102 } 1103 1104 static void ccw_device_update_cmfblock(struct ccw_device *cdev, 1105 enum dev_event dev_event) 1106 { 1107 cmf_retry_copy_block(cdev); 1108 cdev->private->state = DEV_STATE_ONLINE; 1109 dev_fsm_event(cdev, dev_event); 1110 } 1111 1112 static void 1113 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 1114 { 1115 ccw_device_set_timeout(cdev, 0); 1116 if (dev_event == DEV_EVENT_NOTOPER) 1117 cdev->private->state = DEV_STATE_NOT_OPER; 1118 else 1119 cdev->private->state = DEV_STATE_OFFLINE; 1120 wake_up(&cdev->private->wait_q); 1121 } 1122 1123 static void 1124 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1125 { 1126 int ret; 1127 1128 ret = ccw_device_cancel_halt_clear(cdev); 1129 switch (ret) { 1130 case 0: 1131 cdev->private->state = DEV_STATE_OFFLINE; 1132 wake_up(&cdev->private->wait_q); 1133 break; 1134 case -ENODEV: 1135 cdev->private->state = DEV_STATE_NOT_OPER; 1136 wake_up(&cdev->private->wait_q); 1137 break; 1138 default: 1139 ccw_device_set_timeout(cdev, HZ/10); 1140 } 1141 } 1142 1143 /* 1144 * No operation action. This is used e.g. to ignore a timeout event in 1145 * state offline. 1146 */ 1147 static void 1148 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) 1149 { 1150 } 1151 1152 /* 1153 * Bug operation action. 1154 */ 1155 static void 1156 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) 1157 { 1158 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device " 1159 "0.%x.%04x\n", cdev->private->state, dev_event, 1160 cdev->private->dev_id.ssid, 1161 cdev->private->dev_id.devno); 1162 BUG(); 1163 } 1164 1165 /* 1166 * device statemachine 1167 */ 1168 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 1169 [DEV_STATE_NOT_OPER] = { 1170 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1171 [DEV_EVENT_INTERRUPT] = ccw_device_bug, 1172 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1173 [DEV_EVENT_VERIFY] = ccw_device_nop, 1174 }, 1175 [DEV_STATE_SENSE_PGID] = { 1176 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1177 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 1178 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1179 [DEV_EVENT_VERIFY] = ccw_device_nop, 1180 }, 1181 [DEV_STATE_SENSE_ID] = { 1182 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1183 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1184 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1185 [DEV_EVENT_VERIFY] = ccw_device_nop, 1186 }, 1187 [DEV_STATE_OFFLINE] = { 1188 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1189 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 1190 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1191 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 1192 }, 1193 [DEV_STATE_VERIFY] = { 1194 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1195 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1196 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1197 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1198 }, 1199 [DEV_STATE_ONLINE] = { 1200 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1201 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1202 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1203 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1204 }, 1205 [DEV_STATE_W4SENSE] = { 1206 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1207 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1208 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1209 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1210 }, 1211 [DEV_STATE_DISBAND_PGID] = { 1212 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1213 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 1214 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1215 [DEV_EVENT_VERIFY] = ccw_device_nop, 1216 }, 1217 [DEV_STATE_BOXED] = { 1218 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1219 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 1220 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, 1221 [DEV_EVENT_VERIFY] = ccw_device_nop, 1222 }, 1223 /* states to wait for i/o completion before doing something */ 1224 [DEV_STATE_CLEAR_VERIFY] = { 1225 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1226 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, 1227 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1228 [DEV_EVENT_VERIFY] = ccw_device_nop, 1229 }, 1230 [DEV_STATE_TIMEOUT_KILL] = { 1231 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1232 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1233 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1234 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1235 }, 1236 [DEV_STATE_QUIESCE] = { 1237 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1238 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, 1239 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, 1240 [DEV_EVENT_VERIFY] = ccw_device_nop, 1241 }, 1242 /* special states for devices gone not operational */ 1243 [DEV_STATE_DISCONNECTED] = { 1244 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1245 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1246 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1247 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1248 }, 1249 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1250 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1251 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1252 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1253 [DEV_EVENT_VERIFY] = ccw_device_nop, 1254 }, 1255 [DEV_STATE_CMFCHANGE] = { 1256 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, 1257 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, 1258 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1259 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1260 }, 1261 [DEV_STATE_CMFUPDATE] = { 1262 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, 1263 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, 1264 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1265 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1266 }, 1267 }; 1268 1269 EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1270