1 /* 2 * drivers/s390/cio/device_ops.c 3 * 4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/list.h> 14 #include <linux/device.h> 15 #include <linux/delay.h> 16 17 #include <asm/ccwdev.h> 18 #include <asm/idals.h> 19 20 #include "cio.h" 21 #include "cio_debug.h" 22 #include "css.h" 23 #include "chsc.h" 24 #include "device.h" 25 26 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) 27 { 28 /* 29 * The flag usage is mutal exclusive ... 30 */ 31 if ((flags & CCWDEV_EARLY_NOTIFICATION) && 32 (flags & CCWDEV_REPORT_ALL)) 33 return -EINVAL; 34 cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0; 35 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; 36 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; 37 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; 38 return 0; 39 } 40 41 int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) 42 { 43 /* 44 * The flag usage is mutal exclusive ... 45 */ 46 if (((flags & CCWDEV_EARLY_NOTIFICATION) && 47 (flags & CCWDEV_REPORT_ALL)) || 48 ((flags & CCWDEV_EARLY_NOTIFICATION) && 49 cdev->private->options.repall) || 50 ((flags & CCWDEV_REPORT_ALL) && 51 cdev->private->options.fast)) 52 return -EINVAL; 53 cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0; 54 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0; 55 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0; 56 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0; 57 return 0; 58 } 59 60 void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags) 61 { 62 cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0; 63 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0; 64 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0; 65 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; 66 } 67 68 int 69 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) 70 { 71 struct subchannel *sch; 72 int ret; 73 74 if (!cdev) 75 return -ENODEV; 76 if (cdev->private->state == DEV_STATE_NOT_OPER) 77 return -ENODEV; 78 if (cdev->private->state != DEV_STATE_ONLINE && 79 cdev->private->state != DEV_STATE_W4SENSE) 80 return -EINVAL; 81 sch = to_subchannel(cdev->dev.parent); 82 if (!sch) 83 return -ENODEV; 84 ret = cio_clear(sch); 85 if (ret == 0) 86 cdev->private->intparm = intparm; 87 return ret; 88 } 89 90 int 91 ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, 92 unsigned long intparm, __u8 lpm, __u8 key, 93 unsigned long flags) 94 { 95 struct subchannel *sch; 96 int ret; 97 98 if (!cdev) 99 return -ENODEV; 100 sch = to_subchannel(cdev->dev.parent); 101 if (!sch) 102 return -ENODEV; 103 if (cdev->private->state == DEV_STATE_NOT_OPER) 104 return -ENODEV; 105 if (cdev->private->state == DEV_STATE_VERIFY || 106 cdev->private->state == DEV_STATE_CLEAR_VERIFY) { 107 /* Remember to fake irb when finished. */ 108 if (!cdev->private->flags.fake_irb) { 109 cdev->private->flags.fake_irb = 1; 110 cdev->private->intparm = intparm; 111 return 0; 112 } else 113 /* There's already a fake I/O around. */ 114 return -EBUSY; 115 } 116 if (cdev->private->state != DEV_STATE_ONLINE || 117 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 118 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 119 cdev->private->flags.doverify) 120 return -EBUSY; 121 ret = cio_set_options (sch, flags); 122 if (ret) 123 return ret; 124 /* Adjust requested path mask to excluded varied off paths. */ 125 if (lpm) { 126 lpm &= sch->opm; 127 if (lpm == 0) 128 return -EACCES; 129 } 130 ret = cio_start_key (sch, cpa, lpm, key); 131 if (ret == 0) 132 cdev->private->intparm = intparm; 133 return ret; 134 } 135 136 137 int 138 ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, 139 unsigned long intparm, __u8 lpm, __u8 key, 140 unsigned long flags, int expires) 141 { 142 int ret; 143 144 if (!cdev) 145 return -ENODEV; 146 ccw_device_set_timeout(cdev, expires); 147 ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); 148 if (ret != 0) 149 ccw_device_set_timeout(cdev, 0); 150 return ret; 151 } 152 153 int 154 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, 155 unsigned long intparm, __u8 lpm, unsigned long flags) 156 { 157 return ccw_device_start_key(cdev, cpa, intparm, lpm, 158 PAGE_DEFAULT_KEY, flags); 159 } 160 161 int 162 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, 163 unsigned long intparm, __u8 lpm, unsigned long flags, 164 int expires) 165 { 166 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, 167 PAGE_DEFAULT_KEY, flags, 168 expires); 169 } 170 171 172 int 173 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) 174 { 175 struct subchannel *sch; 176 int ret; 177 178 if (!cdev) 179 return -ENODEV; 180 if (cdev->private->state == DEV_STATE_NOT_OPER) 181 return -ENODEV; 182 if (cdev->private->state != DEV_STATE_ONLINE && 183 cdev->private->state != DEV_STATE_W4SENSE) 184 return -EINVAL; 185 sch = to_subchannel(cdev->dev.parent); 186 if (!sch) 187 return -ENODEV; 188 ret = cio_halt(sch); 189 if (ret == 0) 190 cdev->private->intparm = intparm; 191 return ret; 192 } 193 194 int 195 ccw_device_resume(struct ccw_device *cdev) 196 { 197 struct subchannel *sch; 198 199 if (!cdev) 200 return -ENODEV; 201 sch = to_subchannel(cdev->dev.parent); 202 if (!sch) 203 return -ENODEV; 204 if (cdev->private->state == DEV_STATE_NOT_OPER) 205 return -ENODEV; 206 if (cdev->private->state != DEV_STATE_ONLINE || 207 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) 208 return -EINVAL; 209 return cio_resume(sch); 210 } 211 212 /* 213 * Pass interrupt to device driver. 214 */ 215 int 216 ccw_device_call_handler(struct ccw_device *cdev) 217 { 218 struct subchannel *sch; 219 unsigned int stctl; 220 int ending_status; 221 222 sch = to_subchannel(cdev->dev.parent); 223 224 /* 225 * we allow for the device action handler if . 226 * - we received ending status 227 * - the action handler requested to see all interrupts 228 * - we received an intermediate status 229 * - fast notification was requested (primary status) 230 * - unsolicited interrupts 231 */ 232 stctl = cdev->private->irb.scsw.stctl; 233 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 234 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 235 (stctl == SCSW_STCTL_STATUS_PEND); 236 if (!ending_status && 237 !cdev->private->options.repall && 238 !(stctl & SCSW_STCTL_INTER_STATUS) && 239 !(cdev->private->options.fast && 240 (stctl & SCSW_STCTL_PRIM_STATUS))) 241 return 0; 242 243 /* Clear pending timers for device driver initiated I/O. */ 244 if (ending_status) 245 ccw_device_set_timeout(cdev, 0); 246 /* 247 * Now we are ready to call the device driver interrupt handler. 248 */ 249 if (cdev->handler) 250 cdev->handler(cdev, cdev->private->intparm, 251 &cdev->private->irb); 252 253 /* 254 * Clear the old and now useless interrupt response block. 255 */ 256 memset(&cdev->private->irb, 0, sizeof(struct irb)); 257 258 return 1; 259 } 260 261 /* 262 * Search for CIW command in extended sense data. 263 */ 264 struct ciw * 265 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) 266 { 267 int ciw_cnt; 268 269 if (cdev->private->flags.esid == 0) 270 return NULL; 271 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++) 272 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct) 273 return cdev->private->senseid.ciw + ciw_cnt; 274 return NULL; 275 } 276 277 __u8 278 ccw_device_get_path_mask(struct ccw_device *cdev) 279 { 280 struct subchannel *sch; 281 282 sch = to_subchannel(cdev->dev.parent); 283 if (!sch) 284 return 0; 285 else 286 return sch->lpm; 287 } 288 289 static void 290 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) 291 { 292 if (!ip) 293 /* unsolicited interrupt */ 294 return; 295 296 /* Abuse intparm for error reporting. */ 297 if (IS_ERR(irb)) 298 cdev->private->intparm = -EIO; 299 else if (irb->scsw.cc == 1) 300 /* Retry for deferred condition code. */ 301 cdev->private->intparm = -EAGAIN; 302 else if ((irb->scsw.dstat != 303 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 304 (irb->scsw.cstat != 0)) { 305 /* 306 * We didn't get channel end / device end. Check if path 307 * verification has been started; we can retry after it has 308 * finished. We also retry unit checks except for command reject 309 * or intervention required. Also check for long busy 310 * conditions. 311 */ 312 if (cdev->private->flags.doverify || 313 cdev->private->state == DEV_STATE_VERIFY) 314 cdev->private->intparm = -EAGAIN; 315 else if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 316 !(irb->ecw[0] & 317 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ))) 318 cdev->private->intparm = -EAGAIN; 319 else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) && 320 (irb->scsw.dstat & DEV_STAT_DEV_END) && 321 (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP)) 322 cdev->private->intparm = -EAGAIN; 323 else 324 cdev->private->intparm = -EIO; 325 326 } else 327 cdev->private->intparm = 0; 328 wake_up(&cdev->private->wait_q); 329 } 330 331 static int 332 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) 333 { 334 int ret; 335 struct subchannel *sch; 336 337 sch = to_subchannel(cdev->dev.parent); 338 do { 339 ccw_device_set_timeout(cdev, 60 * HZ); 340 ret = cio_start (sch, ccw, lpm); 341 if (ret != 0) 342 ccw_device_set_timeout(cdev, 0); 343 if (ret == -EBUSY) { 344 /* Try again later. */ 345 spin_unlock_irq(sch->lock); 346 msleep(10); 347 spin_lock_irq(sch->lock); 348 continue; 349 } 350 if (ret != 0) 351 /* Non-retryable error. */ 352 break; 353 /* Wait for end of request. */ 354 cdev->private->intparm = magic; 355 spin_unlock_irq(sch->lock); 356 wait_event(cdev->private->wait_q, 357 (cdev->private->intparm == -EIO) || 358 (cdev->private->intparm == -EAGAIN) || 359 (cdev->private->intparm == 0)); 360 spin_lock_irq(sch->lock); 361 /* Check at least for channel end / device end */ 362 if (cdev->private->intparm == -EIO) { 363 /* Non-retryable error. */ 364 ret = -EIO; 365 break; 366 } 367 if (cdev->private->intparm == 0) 368 /* Success. */ 369 break; 370 /* Try again later. */ 371 spin_unlock_irq(sch->lock); 372 msleep(10); 373 spin_lock_irq(sch->lock); 374 } while (1); 375 376 return ret; 377 } 378 379 /** 380 * read_dev_chars() - read device characteristics 381 * @param cdev target ccw device 382 * @param buffer pointer to buffer for rdc data 383 * @param length size of rdc data 384 * @returns 0 for success, negative error value on failure 385 * 386 * Context: 387 * called for online device, lock not held 388 **/ 389 int 390 read_dev_chars (struct ccw_device *cdev, void **buffer, int length) 391 { 392 void (*handler)(struct ccw_device *, unsigned long, struct irb *); 393 struct subchannel *sch; 394 int ret; 395 struct ccw1 *rdc_ccw; 396 397 if (!cdev) 398 return -ENODEV; 399 if (!buffer || !length) 400 return -EINVAL; 401 sch = to_subchannel(cdev->dev.parent); 402 403 CIO_TRACE_EVENT (4, "rddevch"); 404 CIO_TRACE_EVENT (4, sch->dev.bus_id); 405 406 rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 407 if (!rdc_ccw) 408 return -ENOMEM; 409 rdc_ccw->cmd_code = CCW_CMD_RDC; 410 rdc_ccw->count = length; 411 rdc_ccw->flags = CCW_FLAG_SLI; 412 ret = set_normalized_cda (rdc_ccw, (*buffer)); 413 if (ret != 0) { 414 kfree(rdc_ccw); 415 return ret; 416 } 417 418 spin_lock_irq(sch->lock); 419 /* Save interrupt handler. */ 420 handler = cdev->handler; 421 /* Temporarily install own handler. */ 422 cdev->handler = ccw_device_wake_up; 423 if (cdev->private->state != DEV_STATE_ONLINE) 424 ret = -ENODEV; 425 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 426 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 427 cdev->private->flags.doverify) 428 ret = -EBUSY; 429 else 430 /* 0x00D9C4C3 == ebcdic "RDC" */ 431 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0); 432 433 /* Restore interrupt handler. */ 434 cdev->handler = handler; 435 spin_unlock_irq(sch->lock); 436 437 clear_normalized_cda (rdc_ccw); 438 kfree(rdc_ccw); 439 440 return ret; 441 } 442 443 /* 444 * Read Configuration data using path mask 445 */ 446 int 447 read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm) 448 { 449 void (*handler)(struct ccw_device *, unsigned long, struct irb *); 450 struct subchannel *sch; 451 struct ciw *ciw; 452 char *rcd_buf; 453 int ret; 454 struct ccw1 *rcd_ccw; 455 456 if (!cdev) 457 return -ENODEV; 458 if (!buffer || !length) 459 return -EINVAL; 460 sch = to_subchannel(cdev->dev.parent); 461 462 CIO_TRACE_EVENT (4, "rdconf"); 463 CIO_TRACE_EVENT (4, sch->dev.bus_id); 464 465 /* 466 * scan for RCD command in extended SenseID data 467 */ 468 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD); 469 if (!ciw || ciw->cmd == 0) 470 return -EOPNOTSUPP; 471 472 /* Adjust requested path mask to excluded varied off paths. */ 473 if (lpm) { 474 lpm &= sch->opm; 475 if (lpm == 0) 476 return -EACCES; 477 } 478 479 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 480 if (!rcd_ccw) 481 return -ENOMEM; 482 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 483 if (!rcd_buf) { 484 kfree(rcd_ccw); 485 return -ENOMEM; 486 } 487 rcd_ccw->cmd_code = ciw->cmd; 488 rcd_ccw->cda = (__u32) __pa (rcd_buf); 489 rcd_ccw->count = ciw->count; 490 rcd_ccw->flags = CCW_FLAG_SLI; 491 492 spin_lock_irq(sch->lock); 493 /* Save interrupt handler. */ 494 handler = cdev->handler; 495 /* Temporarily install own handler. */ 496 cdev->handler = ccw_device_wake_up; 497 if (cdev->private->state != DEV_STATE_ONLINE) 498 ret = -ENODEV; 499 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 500 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 501 cdev->private->flags.doverify) 502 ret = -EBUSY; 503 else 504 /* 0x00D9C3C4 == ebcdic "RCD" */ 505 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm); 506 507 /* Restore interrupt handler. */ 508 cdev->handler = handler; 509 spin_unlock_irq(sch->lock); 510 511 /* 512 * on success we update the user input parms 513 */ 514 if (ret) { 515 kfree (rcd_buf); 516 *buffer = NULL; 517 *length = 0; 518 } else { 519 *length = ciw->count; 520 *buffer = rcd_buf; 521 } 522 kfree(rcd_ccw); 523 524 return ret; 525 } 526 527 /* 528 * Read Configuration data 529 */ 530 int 531 read_conf_data (struct ccw_device *cdev, void **buffer, int *length) 532 { 533 return read_conf_data_lpm (cdev, buffer, length, 0); 534 } 535 536 /* 537 * Try to break the lock on a boxed device. 538 */ 539 int 540 ccw_device_stlck(struct ccw_device *cdev) 541 { 542 void *buf, *buf2; 543 unsigned long flags; 544 struct subchannel *sch; 545 int ret; 546 547 if (!cdev) 548 return -ENODEV; 549 550 if (cdev->drv && !cdev->private->options.force) 551 return -EINVAL; 552 553 sch = to_subchannel(cdev->dev.parent); 554 555 CIO_TRACE_EVENT(2, "stl lock"); 556 CIO_TRACE_EVENT(2, cdev->dev.bus_id); 557 558 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 559 if (!buf) 560 return -ENOMEM; 561 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 562 if (!buf2) { 563 kfree(buf); 564 return -ENOMEM; 565 } 566 spin_lock_irqsave(sch->lock, flags); 567 ret = cio_enable_subchannel(sch, 3); 568 if (ret) 569 goto out_unlock; 570 /* 571 * Setup ccw. We chain an unconditional reserve and a release so we 572 * only break the lock. 573 */ 574 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK; 575 cdev->private->iccws[0].cda = (__u32) __pa(buf); 576 cdev->private->iccws[0].count = 32; 577 cdev->private->iccws[0].flags = CCW_FLAG_CC; 578 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE; 579 cdev->private->iccws[1].cda = (__u32) __pa(buf2); 580 cdev->private->iccws[1].count = 32; 581 cdev->private->iccws[1].flags = 0; 582 ret = cio_start(sch, cdev->private->iccws, 0); 583 if (ret) { 584 cio_disable_subchannel(sch); //FIXME: return code? 585 goto out_unlock; 586 } 587 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; 588 spin_unlock_irqrestore(sch->lock, flags); 589 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); 590 spin_lock_irqsave(sch->lock, flags); 591 cio_disable_subchannel(sch); //FIXME: return code? 592 if ((cdev->private->irb.scsw.dstat != 593 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 594 (cdev->private->irb.scsw.cstat != 0)) 595 ret = -EIO; 596 /* Clear irb. */ 597 memset(&cdev->private->irb, 0, sizeof(struct irb)); 598 out_unlock: 599 kfree(buf); 600 kfree(buf2); 601 spin_unlock_irqrestore(sch->lock, flags); 602 return ret; 603 } 604 605 void * 606 ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 607 { 608 struct subchannel *sch; 609 610 sch = to_subchannel(cdev->dev.parent); 611 return chsc_get_chp_desc(sch, chp_no); 612 } 613 614 // FIXME: these have to go: 615 616 int 617 _ccw_device_get_subchannel_number(struct ccw_device *cdev) 618 { 619 return cdev->private->schid.sch_no; 620 } 621 622 int 623 _ccw_device_get_device_number(struct ccw_device *cdev) 624 { 625 return cdev->private->dev_id.devno; 626 } 627 628 629 MODULE_LICENSE("GPL"); 630 EXPORT_SYMBOL(ccw_device_set_options_mask); 631 EXPORT_SYMBOL(ccw_device_set_options); 632 EXPORT_SYMBOL(ccw_device_clear_options); 633 EXPORT_SYMBOL(ccw_device_clear); 634 EXPORT_SYMBOL(ccw_device_halt); 635 EXPORT_SYMBOL(ccw_device_resume); 636 EXPORT_SYMBOL(ccw_device_start_timeout); 637 EXPORT_SYMBOL(ccw_device_start); 638 EXPORT_SYMBOL(ccw_device_start_timeout_key); 639 EXPORT_SYMBOL(ccw_device_start_key); 640 EXPORT_SYMBOL(ccw_device_get_ciw); 641 EXPORT_SYMBOL(ccw_device_get_path_mask); 642 EXPORT_SYMBOL(read_conf_data); 643 EXPORT_SYMBOL(read_dev_chars); 644 EXPORT_SYMBOL(_ccw_device_get_subchannel_number); 645 EXPORT_SYMBOL(_ccw_device_get_device_number); 646 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); 647 EXPORT_SYMBOL_GPL(read_conf_data_lpm); 648