1 /* 2 * drivers/s390/cio/device_ops.c 3 * 4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 #include <linux/config.h> 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 #include <linux/device.h> 16 #include <linux/delay.h> 17 18 #include <asm/ccwdev.h> 19 #include <asm/idals.h> 20 21 #include "cio.h" 22 #include "cio_debug.h" 23 #include "css.h" 24 #include "chsc.h" 25 #include "device.h" 26 27 int 28 ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) 29 { 30 /* 31 * The flag usage is mutal exclusive ... 32 */ 33 if ((flags & CCWDEV_EARLY_NOTIFICATION) && 34 (flags & CCWDEV_REPORT_ALL)) 35 return -EINVAL; 36 cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0; 37 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; 38 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; 39 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; 40 return 0; 41 } 42 43 int 44 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) 45 { 46 struct subchannel *sch; 47 int ret; 48 49 if (!cdev) 50 return -ENODEV; 51 if (cdev->private->state == DEV_STATE_NOT_OPER) 52 return -ENODEV; 53 if (cdev->private->state != DEV_STATE_ONLINE && 54 cdev->private->state != DEV_STATE_WAIT4IO && 55 cdev->private->state != DEV_STATE_W4SENSE) 56 return -EINVAL; 57 sch = to_subchannel(cdev->dev.parent); 58 if (!sch) 59 return -ENODEV; 60 ret = cio_clear(sch); 61 if (ret == 0) 62 cdev->private->intparm = intparm; 63 return ret; 64 } 65 66 int 67 ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, 68 unsigned long intparm, __u8 lpm, __u8 key, 69 unsigned long flags) 70 { 71 struct subchannel *sch; 72 int ret; 73 74 if (!cdev) 75 return -ENODEV; 76 sch = to_subchannel(cdev->dev.parent); 77 if (!sch) 78 return -ENODEV; 79 if (cdev->private->state == DEV_STATE_NOT_OPER) 80 return -ENODEV; 81 if (cdev->private->state == DEV_STATE_VERIFY) { 82 /* Remember to fake irb when finished. */ 83 if (!cdev->private->flags.fake_irb) { 84 cdev->private->flags.fake_irb = 1; 85 cdev->private->intparm = intparm; 86 return 0; 87 } else 88 /* There's already a fake I/O around. */ 89 return -EBUSY; 90 } 91 if (cdev->private->state != DEV_STATE_ONLINE || 92 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 93 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 94 cdev->private->flags.doverify) 95 return -EBUSY; 96 ret = cio_set_options (sch, flags); 97 if (ret) 98 return ret; 99 ret = cio_start_key (sch, cpa, lpm, key); 100 if (ret == 0) 101 cdev->private->intparm = intparm; 102 return ret; 103 } 104 105 106 int 107 ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, 108 unsigned long intparm, __u8 lpm, __u8 key, 109 unsigned long flags, int expires) 110 { 111 int ret; 112 113 if (!cdev) 114 return -ENODEV; 115 ccw_device_set_timeout(cdev, expires); 116 ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); 117 if (ret != 0) 118 ccw_device_set_timeout(cdev, 0); 119 return ret; 120 } 121 122 int 123 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, 124 unsigned long intparm, __u8 lpm, unsigned long flags) 125 { 126 return ccw_device_start_key(cdev, cpa, intparm, lpm, 127 PAGE_DEFAULT_KEY, flags); 128 } 129 130 int 131 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, 132 unsigned long intparm, __u8 lpm, unsigned long flags, 133 int expires) 134 { 135 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, 136 PAGE_DEFAULT_KEY, flags, 137 expires); 138 } 139 140 141 int 142 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) 143 { 144 struct subchannel *sch; 145 int ret; 146 147 if (!cdev) 148 return -ENODEV; 149 if (cdev->private->state == DEV_STATE_NOT_OPER) 150 return -ENODEV; 151 if (cdev->private->state != DEV_STATE_ONLINE && 152 cdev->private->state != DEV_STATE_WAIT4IO && 153 cdev->private->state != DEV_STATE_W4SENSE) 154 return -EINVAL; 155 sch = to_subchannel(cdev->dev.parent); 156 if (!sch) 157 return -ENODEV; 158 ret = cio_halt(sch); 159 if (ret == 0) 160 cdev->private->intparm = intparm; 161 return ret; 162 } 163 164 int 165 ccw_device_resume(struct ccw_device *cdev) 166 { 167 struct subchannel *sch; 168 169 if (!cdev) 170 return -ENODEV; 171 sch = to_subchannel(cdev->dev.parent); 172 if (!sch) 173 return -ENODEV; 174 if (cdev->private->state == DEV_STATE_NOT_OPER) 175 return -ENODEV; 176 if (cdev->private->state != DEV_STATE_ONLINE || 177 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) 178 return -EINVAL; 179 return cio_resume(sch); 180 } 181 182 /* 183 * Pass interrupt to device driver. 184 */ 185 int 186 ccw_device_call_handler(struct ccw_device *cdev) 187 { 188 struct subchannel *sch; 189 unsigned int stctl; 190 int ending_status; 191 192 sch = to_subchannel(cdev->dev.parent); 193 194 /* 195 * we allow for the device action handler if . 196 * - we received ending status 197 * - the action handler requested to see all interrupts 198 * - we received an intermediate status 199 * - fast notification was requested (primary status) 200 * - unsolicited interrupts 201 */ 202 stctl = cdev->private->irb.scsw.stctl; 203 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 204 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 205 (stctl == SCSW_STCTL_STATUS_PEND); 206 if (!ending_status && 207 !cdev->private->options.repall && 208 !(stctl & SCSW_STCTL_INTER_STATUS) && 209 !(cdev->private->options.fast && 210 (stctl & SCSW_STCTL_PRIM_STATUS))) 211 return 0; 212 213 /* 214 * Now we are ready to call the device driver interrupt handler. 215 */ 216 if (cdev->handler) 217 cdev->handler(cdev, cdev->private->intparm, 218 &cdev->private->irb); 219 220 /* 221 * Clear the old and now useless interrupt response block. 222 */ 223 memset(&cdev->private->irb, 0, sizeof(struct irb)); 224 225 return 1; 226 } 227 228 /* 229 * Search for CIW command in extended sense data. 230 */ 231 struct ciw * 232 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) 233 { 234 int ciw_cnt; 235 236 if (cdev->private->flags.esid == 0) 237 return NULL; 238 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++) 239 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct) 240 return cdev->private->senseid.ciw + ciw_cnt; 241 return NULL; 242 } 243 244 __u8 245 ccw_device_get_path_mask(struct ccw_device *cdev) 246 { 247 struct subchannel *sch; 248 249 sch = to_subchannel(cdev->dev.parent); 250 if (!sch) 251 return 0; 252 else 253 return sch->vpm; 254 } 255 256 static void 257 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) 258 { 259 if (!ip) 260 /* unsolicited interrupt */ 261 return; 262 263 /* Abuse intparm for error reporting. */ 264 if (IS_ERR(irb)) 265 cdev->private->intparm = -EIO; 266 else if ((irb->scsw.dstat != 267 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 268 (irb->scsw.cstat != 0)) { 269 /* 270 * We didn't get channel end / device end. Check if path 271 * verification has been started; we can retry after it has 272 * finished. We also retry unit checks except for command reject 273 * or intervention required. 274 */ 275 if (cdev->private->flags.doverify || 276 cdev->private->state == DEV_STATE_VERIFY) 277 cdev->private->intparm = -EAGAIN; 278 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 279 !(irb->ecw[0] & 280 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ))) 281 cdev->private->intparm = -EAGAIN; 282 else 283 cdev->private->intparm = -EIO; 284 285 } else 286 cdev->private->intparm = 0; 287 wake_up(&cdev->private->wait_q); 288 } 289 290 static inline int 291 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) 292 { 293 int ret; 294 struct subchannel *sch; 295 296 sch = to_subchannel(cdev->dev.parent); 297 do { 298 ret = cio_start (sch, ccw, lpm); 299 if ((ret == -EBUSY) || (ret == -EACCES)) { 300 /* Try again later. */ 301 spin_unlock_irq(&sch->lock); 302 msleep(10); 303 spin_lock_irq(&sch->lock); 304 continue; 305 } 306 if (ret != 0) 307 /* Non-retryable error. */ 308 break; 309 /* Wait for end of request. */ 310 cdev->private->intparm = magic; 311 spin_unlock_irq(&sch->lock); 312 wait_event(cdev->private->wait_q, 313 (cdev->private->intparm == -EIO) || 314 (cdev->private->intparm == -EAGAIN) || 315 (cdev->private->intparm == 0)); 316 spin_lock_irq(&sch->lock); 317 /* Check at least for channel end / device end */ 318 if (cdev->private->intparm == -EIO) { 319 /* Non-retryable error. */ 320 ret = -EIO; 321 break; 322 } 323 if (cdev->private->intparm == 0) 324 /* Success. */ 325 break; 326 /* Try again later. */ 327 spin_unlock_irq(&sch->lock); 328 msleep(10); 329 spin_lock_irq(&sch->lock); 330 } while (1); 331 332 return ret; 333 } 334 335 /** 336 * read_dev_chars() - read device characteristics 337 * @param cdev target ccw device 338 * @param buffer pointer to buffer for rdc data 339 * @param length size of rdc data 340 * @returns 0 for success, negative error value on failure 341 * 342 * Context: 343 * called for online device, lock not held 344 **/ 345 int 346 read_dev_chars (struct ccw_device *cdev, void **buffer, int length) 347 { 348 void (*handler)(struct ccw_device *, unsigned long, struct irb *); 349 struct subchannel *sch; 350 int ret; 351 struct ccw1 *rdc_ccw; 352 353 if (!cdev) 354 return -ENODEV; 355 if (!buffer || !length) 356 return -EINVAL; 357 sch = to_subchannel(cdev->dev.parent); 358 359 CIO_TRACE_EVENT (4, "rddevch"); 360 CIO_TRACE_EVENT (4, sch->dev.bus_id); 361 362 rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 363 if (!rdc_ccw) 364 return -ENOMEM; 365 rdc_ccw->cmd_code = CCW_CMD_RDC; 366 rdc_ccw->count = length; 367 rdc_ccw->flags = CCW_FLAG_SLI; 368 ret = set_normalized_cda (rdc_ccw, (*buffer)); 369 if (ret != 0) { 370 kfree(rdc_ccw); 371 return ret; 372 } 373 374 spin_lock_irq(&sch->lock); 375 /* Save interrupt handler. */ 376 handler = cdev->handler; 377 /* Temporarily install own handler. */ 378 cdev->handler = ccw_device_wake_up; 379 if (cdev->private->state != DEV_STATE_ONLINE) 380 ret = -ENODEV; 381 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 382 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 383 cdev->private->flags.doverify) 384 ret = -EBUSY; 385 else 386 /* 0x00D9C4C3 == ebcdic "RDC" */ 387 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0); 388 389 /* Restore interrupt handler. */ 390 cdev->handler = handler; 391 spin_unlock_irq(&sch->lock); 392 393 clear_normalized_cda (rdc_ccw); 394 kfree(rdc_ccw); 395 396 return ret; 397 } 398 399 /* 400 * Read Configuration data using path mask 401 */ 402 int 403 read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm) 404 { 405 void (*handler)(struct ccw_device *, unsigned long, struct irb *); 406 struct subchannel *sch; 407 struct ciw *ciw; 408 char *rcd_buf; 409 int ret; 410 struct ccw1 *rcd_ccw; 411 412 if (!cdev) 413 return -ENODEV; 414 if (!buffer || !length) 415 return -EINVAL; 416 sch = to_subchannel(cdev->dev.parent); 417 418 CIO_TRACE_EVENT (4, "rdconf"); 419 CIO_TRACE_EVENT (4, sch->dev.bus_id); 420 421 /* 422 * scan for RCD command in extended SenseID data 423 */ 424 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD); 425 if (!ciw || ciw->cmd == 0) 426 return -EOPNOTSUPP; 427 428 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 429 if (!rcd_ccw) 430 return -ENOMEM; 431 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 432 if (!rcd_buf) { 433 kfree(rcd_ccw); 434 return -ENOMEM; 435 } 436 rcd_ccw->cmd_code = ciw->cmd; 437 rcd_ccw->cda = (__u32) __pa (rcd_buf); 438 rcd_ccw->count = ciw->count; 439 rcd_ccw->flags = CCW_FLAG_SLI; 440 441 spin_lock_irq(&sch->lock); 442 /* Save interrupt handler. */ 443 handler = cdev->handler; 444 /* Temporarily install own handler. */ 445 cdev->handler = ccw_device_wake_up; 446 if (cdev->private->state != DEV_STATE_ONLINE) 447 ret = -ENODEV; 448 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 449 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 450 cdev->private->flags.doverify) 451 ret = -EBUSY; 452 else 453 /* 0x00D9C3C4 == ebcdic "RCD" */ 454 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm); 455 456 /* Restore interrupt handler. */ 457 cdev->handler = handler; 458 spin_unlock_irq(&sch->lock); 459 460 /* 461 * on success we update the user input parms 462 */ 463 if (ret) { 464 kfree (rcd_buf); 465 *buffer = NULL; 466 *length = 0; 467 } else { 468 *length = ciw->count; 469 *buffer = rcd_buf; 470 } 471 kfree(rcd_ccw); 472 473 return ret; 474 } 475 476 /* 477 * Read Configuration data 478 */ 479 int 480 read_conf_data (struct ccw_device *cdev, void **buffer, int *length) 481 { 482 return read_conf_data_lpm (cdev, buffer, length, 0); 483 } 484 485 /* 486 * Try to break the lock on a boxed device. 487 */ 488 int 489 ccw_device_stlck(struct ccw_device *cdev) 490 { 491 void *buf, *buf2; 492 unsigned long flags; 493 struct subchannel *sch; 494 int ret; 495 496 if (!cdev) 497 return -ENODEV; 498 499 if (cdev->drv && !cdev->private->options.force) 500 return -EINVAL; 501 502 sch = to_subchannel(cdev->dev.parent); 503 504 CIO_TRACE_EVENT(2, "stl lock"); 505 CIO_TRACE_EVENT(2, cdev->dev.bus_id); 506 507 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 508 if (!buf) 509 return -ENOMEM; 510 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 511 if (!buf2) { 512 kfree(buf); 513 return -ENOMEM; 514 } 515 spin_lock_irqsave(&sch->lock, flags); 516 ret = cio_enable_subchannel(sch, 3); 517 if (ret) 518 goto out_unlock; 519 /* 520 * Setup ccw. We chain an unconditional reserve and a release so we 521 * only break the lock. 522 */ 523 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK; 524 cdev->private->iccws[0].cda = (__u32) __pa(buf); 525 cdev->private->iccws[0].count = 32; 526 cdev->private->iccws[0].flags = CCW_FLAG_CC; 527 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE; 528 cdev->private->iccws[1].cda = (__u32) __pa(buf2); 529 cdev->private->iccws[1].count = 32; 530 cdev->private->iccws[1].flags = 0; 531 ret = cio_start(sch, cdev->private->iccws, 0); 532 if (ret) { 533 cio_disable_subchannel(sch); //FIXME: return code? 534 goto out_unlock; 535 } 536 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; 537 spin_unlock_irqrestore(&sch->lock, flags); 538 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); 539 spin_lock_irqsave(&sch->lock, flags); 540 cio_disable_subchannel(sch); //FIXME: return code? 541 if ((cdev->private->irb.scsw.dstat != 542 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 543 (cdev->private->irb.scsw.cstat != 0)) 544 ret = -EIO; 545 /* Clear irb. */ 546 memset(&cdev->private->irb, 0, sizeof(struct irb)); 547 out_unlock: 548 kfree(buf); 549 kfree(buf2); 550 spin_unlock_irqrestore(&sch->lock, flags); 551 return ret; 552 } 553 554 void * 555 ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 556 { 557 struct subchannel *sch; 558 559 sch = to_subchannel(cdev->dev.parent); 560 return chsc_get_chp_desc(sch, chp_no); 561 } 562 563 // FIXME: these have to go: 564 565 int 566 _ccw_device_get_subchannel_number(struct ccw_device *cdev) 567 { 568 return cdev->private->sch_no; 569 } 570 571 int 572 _ccw_device_get_device_number(struct ccw_device *cdev) 573 { 574 return cdev->private->devno; 575 } 576 577 578 MODULE_LICENSE("GPL"); 579 EXPORT_SYMBOL(ccw_device_set_options); 580 EXPORT_SYMBOL(ccw_device_clear); 581 EXPORT_SYMBOL(ccw_device_halt); 582 EXPORT_SYMBOL(ccw_device_resume); 583 EXPORT_SYMBOL(ccw_device_start_timeout); 584 EXPORT_SYMBOL(ccw_device_start); 585 EXPORT_SYMBOL(ccw_device_start_timeout_key); 586 EXPORT_SYMBOL(ccw_device_start_key); 587 EXPORT_SYMBOL(ccw_device_get_ciw); 588 EXPORT_SYMBOL(ccw_device_get_path_mask); 589 EXPORT_SYMBOL(read_conf_data); 590 EXPORT_SYMBOL(read_dev_chars); 591 EXPORT_SYMBOL(_ccw_device_get_subchannel_number); 592 EXPORT_SYMBOL(_ccw_device_get_device_number); 593 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); 594 EXPORT_SYMBOL_GPL(read_conf_data_lpm); 595