1 /* 2 * drivers/s390/char/tape_core.c 3 * basic function of the tape device driver 4 * 5 * S390 and zSeries version 6 * Copyright IBM Corp. 2001, 2009 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14 #define KMSG_COMPONENT "tape" 15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 17 #include <linux/module.h> 18 #include <linux/init.h> // for kernel parameters 19 #include <linux/kmod.h> // for requesting modules 20 #include <linux/spinlock.h> // for locks 21 #include <linux/vmalloc.h> 22 #include <linux/list.h> 23 #include <linux/slab.h> 24 25 #include <asm/types.h> // for variable types 26 27 #define TAPE_DBF_AREA tape_core_dbf 28 29 #include "tape.h" 30 #include "tape_std.h" 31 32 #define LONG_BUSY_TIMEOUT 180 /* seconds */ 33 34 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 35 static void tape_delayed_next_request(struct work_struct *); 36 static void tape_long_busy_timeout(unsigned long data); 37 38 /* 39 * One list to contain all tape devices of all disciplines, so 40 * we can assign the devices to minor numbers of the same major 41 * The list is protected by the rwlock 42 */ 43 static LIST_HEAD(tape_device_list); 44 static DEFINE_RWLOCK(tape_device_lock); 45 46 /* 47 * Pointer to debug area. 48 */ 49 debug_info_t *TAPE_DBF_AREA = NULL; 50 EXPORT_SYMBOL(TAPE_DBF_AREA); 51 52 /* 53 * Printable strings for tape enumerations. 54 */ 55 const char *tape_state_verbose[TS_SIZE] = 56 { 57 [TS_UNUSED] = "UNUSED", 58 [TS_IN_USE] = "IN_USE", 59 [TS_BLKUSE] = "BLKUSE", 60 [TS_INIT] = "INIT ", 61 [TS_NOT_OPER] = "NOT_OP" 62 }; 63 64 const char *tape_op_verbose[TO_SIZE] = 65 { 66 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 67 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 68 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 69 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 70 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 71 [TO_RFO] = "RFO", [TO_REW] = "REW", 72 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 73 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 74 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 75 [TO_READ_ATTMSG] = "RAT", 76 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 77 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", 78 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", 79 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC", 80 }; 81 82 static int devid_to_int(struct ccw_dev_id *dev_id) 83 { 84 return dev_id->devno + (dev_id->ssid << 16); 85 } 86 87 /* 88 * Some channel attached tape specific attributes. 89 * 90 * FIXME: In the future the first_minor and blocksize attribute should be 91 * replaced by a link to the cdev tree. 92 */ 93 static ssize_t 94 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 95 { 96 struct tape_device *tdev; 97 98 tdev = dev_get_drvdata(dev); 99 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 100 } 101 102 static 103 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 104 105 static ssize_t 106 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 107 { 108 struct tape_device *tdev; 109 110 tdev = dev_get_drvdata(dev); 111 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 112 } 113 114 static 115 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 116 117 static ssize_t 118 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 119 { 120 struct tape_device *tdev; 121 122 tdev = dev_get_drvdata(dev); 123 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 124 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 125 } 126 127 static 128 DEVICE_ATTR(state, 0444, tape_state_show, NULL); 129 130 static ssize_t 131 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 132 { 133 struct tape_device *tdev; 134 ssize_t rc; 135 136 tdev = dev_get_drvdata(dev); 137 if (tdev->first_minor < 0) 138 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 139 140 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 141 if (list_empty(&tdev->req_queue)) 142 rc = scnprintf(buf, PAGE_SIZE, "---\n"); 143 else { 144 struct tape_request *req; 145 146 req = list_entry(tdev->req_queue.next, struct tape_request, 147 list); 148 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); 149 } 150 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 151 return rc; 152 } 153 154 static 155 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 156 157 static ssize_t 158 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 159 { 160 struct tape_device *tdev; 161 162 tdev = dev_get_drvdata(dev); 163 164 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 165 } 166 167 static 168 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 169 170 static struct attribute *tape_attrs[] = { 171 &dev_attr_medium_state.attr, 172 &dev_attr_first_minor.attr, 173 &dev_attr_state.attr, 174 &dev_attr_operation.attr, 175 &dev_attr_blocksize.attr, 176 NULL 177 }; 178 179 static struct attribute_group tape_attr_group = { 180 .attrs = tape_attrs, 181 }; 182 183 /* 184 * Tape state functions 185 */ 186 void 187 tape_state_set(struct tape_device *device, enum tape_state newstate) 188 { 189 const char *str; 190 191 if (device->tape_state == TS_NOT_OPER) { 192 DBF_EVENT(3, "ts_set err: not oper\n"); 193 return; 194 } 195 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 196 DBF_EVENT(4, "old ts:\t\n"); 197 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) 198 str = tape_state_verbose[device->tape_state]; 199 else 200 str = "UNKNOWN TS"; 201 DBF_EVENT(4, "%s\n", str); 202 DBF_EVENT(4, "new ts:\t\n"); 203 if (newstate < TS_SIZE && newstate >= 0) 204 str = tape_state_verbose[newstate]; 205 else 206 str = "UNKNOWN TS"; 207 DBF_EVENT(4, "%s\n", str); 208 device->tape_state = newstate; 209 wake_up(&device->state_change_wq); 210 } 211 212 void 213 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 214 { 215 if (device->medium_state == newstate) 216 return; 217 switch(newstate){ 218 case MS_UNLOADED: 219 device->tape_generic_status |= GMT_DR_OPEN(~0); 220 if (device->medium_state == MS_LOADED) 221 pr_info("%s: The tape cartridge has been successfully " 222 "unloaded\n", dev_name(&device->cdev->dev)); 223 break; 224 case MS_LOADED: 225 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 226 if (device->medium_state == MS_UNLOADED) 227 pr_info("%s: A tape cartridge has been mounted\n", 228 dev_name(&device->cdev->dev)); 229 break; 230 default: 231 // print nothing 232 break; 233 } 234 device->medium_state = newstate; 235 wake_up(&device->state_change_wq); 236 } 237 238 /* 239 * Stop running ccw. Has to be called with the device lock held. 240 */ 241 static int 242 __tape_cancel_io(struct tape_device *device, struct tape_request *request) 243 { 244 int retries; 245 int rc; 246 247 /* Check if interrupt has already been processed */ 248 if (request->callback == NULL) 249 return 0; 250 251 rc = 0; 252 for (retries = 0; retries < 5; retries++) { 253 rc = ccw_device_clear(device->cdev, (long) request); 254 255 switch (rc) { 256 case 0: 257 request->status = TAPE_REQUEST_DONE; 258 return 0; 259 case -EBUSY: 260 request->status = TAPE_REQUEST_CANCEL; 261 schedule_delayed_work(&device->tape_dnr, 0); 262 return 0; 263 case -ENODEV: 264 DBF_EXCEPTION(2, "device gone, retry\n"); 265 break; 266 case -EIO: 267 DBF_EXCEPTION(2, "I/O error, retry\n"); 268 break; 269 default: 270 BUG(); 271 } 272 } 273 274 return rc; 275 } 276 277 /* 278 * Add device into the sorted list, giving it the first 279 * available minor number. 280 */ 281 static int 282 tape_assign_minor(struct tape_device *device) 283 { 284 struct tape_device *tmp; 285 int minor; 286 287 minor = 0; 288 write_lock(&tape_device_lock); 289 list_for_each_entry(tmp, &tape_device_list, node) { 290 if (minor < tmp->first_minor) 291 break; 292 minor += TAPE_MINORS_PER_DEV; 293 } 294 if (minor >= 256) { 295 write_unlock(&tape_device_lock); 296 return -ENODEV; 297 } 298 device->first_minor = minor; 299 list_add_tail(&device->node, &tmp->node); 300 write_unlock(&tape_device_lock); 301 return 0; 302 } 303 304 /* remove device from the list */ 305 static void 306 tape_remove_minor(struct tape_device *device) 307 { 308 write_lock(&tape_device_lock); 309 list_del_init(&device->node); 310 device->first_minor = -1; 311 write_unlock(&tape_device_lock); 312 } 313 314 /* 315 * Set a device online. 316 * 317 * This function is called by the common I/O layer to move a device from the 318 * detected but offline into the online state. 319 * If we return an error (RC < 0) the device remains in the offline state. This 320 * can happen if the device is assigned somewhere else, for example. 321 */ 322 int 323 tape_generic_online(struct tape_device *device, 324 struct tape_discipline *discipline) 325 { 326 int rc; 327 328 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 329 330 if (device->tape_state != TS_INIT) { 331 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 332 return -EINVAL; 333 } 334 335 init_timer(&device->lb_timeout); 336 device->lb_timeout.function = tape_long_busy_timeout; 337 338 /* Let the discipline have a go at the device. */ 339 device->discipline = discipline; 340 if (!try_module_get(discipline->owner)) { 341 return -EINVAL; 342 } 343 344 rc = discipline->setup_device(device); 345 if (rc) 346 goto out; 347 rc = tape_assign_minor(device); 348 if (rc) 349 goto out_discipline; 350 351 rc = tapechar_setup_device(device); 352 if (rc) 353 goto out_minor; 354 rc = tapeblock_setup_device(device); 355 if (rc) 356 goto out_char; 357 358 tape_state_set(device, TS_UNUSED); 359 360 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 361 362 return 0; 363 364 out_char: 365 tapechar_cleanup_device(device); 366 out_minor: 367 tape_remove_minor(device); 368 out_discipline: 369 device->discipline->cleanup_device(device); 370 device->discipline = NULL; 371 out: 372 module_put(discipline->owner); 373 return rc; 374 } 375 376 static void 377 tape_cleanup_device(struct tape_device *device) 378 { 379 tapeblock_cleanup_device(device); 380 tapechar_cleanup_device(device); 381 device->discipline->cleanup_device(device); 382 module_put(device->discipline->owner); 383 tape_remove_minor(device); 384 tape_med_state_set(device, MS_UNKNOWN); 385 } 386 387 /* 388 * Suspend device. 389 * 390 * Called by the common I/O layer if the drive should be suspended on user 391 * request. We refuse to suspend if the device is loaded or in use for the 392 * following reason: 393 * While the Linux guest is suspended, it might be logged off which causes 394 * devices to be detached. Tape devices are automatically rewound and unloaded 395 * during DETACH processing (unless the tape device was attached with the 396 * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to 397 * resume the original state of the tape device, since we would need to 398 * manually re-load the cartridge which was active at suspend time. 399 */ 400 int tape_generic_pm_suspend(struct ccw_device *cdev) 401 { 402 struct tape_device *device; 403 404 device = dev_get_drvdata(&cdev->dev); 405 if (!device) { 406 return -ENODEV; 407 } 408 409 DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", 410 device->cdev_id, device); 411 412 if (device->medium_state != MS_UNLOADED) { 413 pr_err("A cartridge is loaded in tape device %s, " 414 "refusing to suspend\n", dev_name(&cdev->dev)); 415 return -EBUSY; 416 } 417 418 spin_lock_irq(get_ccwdev_lock(device->cdev)); 419 switch (device->tape_state) { 420 case TS_INIT: 421 case TS_NOT_OPER: 422 case TS_UNUSED: 423 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 424 break; 425 default: 426 pr_err("Tape device %s is busy, refusing to " 427 "suspend\n", dev_name(&cdev->dev)); 428 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 429 return -EBUSY; 430 } 431 432 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); 433 return 0; 434 } 435 436 /* 437 * Set device offline. 438 * 439 * Called by the common I/O layer if the drive should set offline on user 440 * request. We may prevent this by returning an error. 441 * Manual offline is only allowed while the drive is not in use. 442 */ 443 int 444 tape_generic_offline(struct ccw_device *cdev) 445 { 446 struct tape_device *device; 447 448 device = dev_get_drvdata(&cdev->dev); 449 if (!device) { 450 return -ENODEV; 451 } 452 453 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 454 device->cdev_id, device); 455 456 spin_lock_irq(get_ccwdev_lock(device->cdev)); 457 switch (device->tape_state) { 458 case TS_INIT: 459 case TS_NOT_OPER: 460 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 461 break; 462 case TS_UNUSED: 463 tape_state_set(device, TS_INIT); 464 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 465 tape_cleanup_device(device); 466 break; 467 default: 468 DBF_EVENT(3, "(%08x): Set offline failed " 469 "- drive in use.\n", 470 device->cdev_id); 471 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 472 return -EBUSY; 473 } 474 475 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 476 return 0; 477 } 478 479 /* 480 * Allocate memory for a new device structure. 481 */ 482 static struct tape_device * 483 tape_alloc_device(void) 484 { 485 struct tape_device *device; 486 487 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); 488 if (device == NULL) { 489 DBF_EXCEPTION(2, "ti:no mem\n"); 490 return ERR_PTR(-ENOMEM); 491 } 492 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); 493 if (device->modeset_byte == NULL) { 494 DBF_EXCEPTION(2, "ti:no mem\n"); 495 kfree(device); 496 return ERR_PTR(-ENOMEM); 497 } 498 mutex_init(&device->mutex); 499 INIT_LIST_HEAD(&device->req_queue); 500 INIT_LIST_HEAD(&device->node); 501 init_waitqueue_head(&device->state_change_wq); 502 init_waitqueue_head(&device->wait_queue); 503 device->tape_state = TS_INIT; 504 device->medium_state = MS_UNKNOWN; 505 *device->modeset_byte = 0; 506 device->first_minor = -1; 507 atomic_set(&device->ref_count, 1); 508 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); 509 510 return device; 511 } 512 513 /* 514 * Get a reference to an existing device structure. This will automatically 515 * increment the reference count. 516 */ 517 struct tape_device * 518 tape_get_device(struct tape_device *device) 519 { 520 int count; 521 522 count = atomic_inc_return(&device->ref_count); 523 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); 524 return device; 525 } 526 527 /* 528 * Decrease the reference counter of a devices structure. If the 529 * reference counter reaches zero free the device structure. 530 * The function returns a NULL pointer to be used by the caller 531 * for clearing reference pointers. 532 */ 533 void 534 tape_put_device(struct tape_device *device) 535 { 536 int count; 537 538 count = atomic_dec_return(&device->ref_count); 539 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); 540 BUG_ON(count < 0); 541 if (count == 0) { 542 kfree(device->modeset_byte); 543 kfree(device); 544 } 545 } 546 547 /* 548 * Find tape device by a device index. 549 */ 550 struct tape_device * 551 tape_find_device(int devindex) 552 { 553 struct tape_device *device, *tmp; 554 555 device = ERR_PTR(-ENODEV); 556 read_lock(&tape_device_lock); 557 list_for_each_entry(tmp, &tape_device_list, node) { 558 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 559 device = tape_get_device(tmp); 560 break; 561 } 562 } 563 read_unlock(&tape_device_lock); 564 return device; 565 } 566 567 /* 568 * Driverfs tape probe function. 569 */ 570 int 571 tape_generic_probe(struct ccw_device *cdev) 572 { 573 struct tape_device *device; 574 int ret; 575 struct ccw_dev_id dev_id; 576 577 device = tape_alloc_device(); 578 if (IS_ERR(device)) 579 return -ENODEV; 580 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | 581 CCWDEV_DO_MULTIPATH); 582 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 583 if (ret) { 584 tape_put_device(device); 585 return ret; 586 } 587 dev_set_drvdata(&cdev->dev, device); 588 cdev->handler = __tape_do_irq; 589 device->cdev = cdev; 590 ccw_device_get_id(cdev, &dev_id); 591 device->cdev_id = devid_to_int(&dev_id); 592 return ret; 593 } 594 595 static void 596 __tape_discard_requests(struct tape_device *device) 597 { 598 struct tape_request * request; 599 struct list_head * l, *n; 600 601 list_for_each_safe(l, n, &device->req_queue) { 602 request = list_entry(l, struct tape_request, list); 603 if (request->status == TAPE_REQUEST_IN_IO) 604 request->status = TAPE_REQUEST_DONE; 605 list_del(&request->list); 606 607 /* Decrease ref_count for removed request. */ 608 request->device = NULL; 609 tape_put_device(device); 610 request->rc = -EIO; 611 if (request->callback != NULL) 612 request->callback(request, request->callback_data); 613 } 614 } 615 616 /* 617 * Driverfs tape remove function. 618 * 619 * This function is called whenever the common I/O layer detects the device 620 * gone. This can happen at any time and we cannot refuse. 621 */ 622 void 623 tape_generic_remove(struct ccw_device *cdev) 624 { 625 struct tape_device * device; 626 627 device = dev_get_drvdata(&cdev->dev); 628 if (!device) { 629 return; 630 } 631 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 632 633 spin_lock_irq(get_ccwdev_lock(device->cdev)); 634 switch (device->tape_state) { 635 case TS_INIT: 636 tape_state_set(device, TS_NOT_OPER); 637 case TS_NOT_OPER: 638 /* 639 * Nothing to do. 640 */ 641 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 642 break; 643 case TS_UNUSED: 644 /* 645 * Need only to release the device. 646 */ 647 tape_state_set(device, TS_NOT_OPER); 648 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 649 tape_cleanup_device(device); 650 break; 651 default: 652 /* 653 * There may be requests on the queue. We will not get 654 * an interrupt for a request that was running. So we 655 * just post them all as I/O errors. 656 */ 657 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 658 device->cdev_id); 659 pr_warning("%s: A tape unit was detached while in " 660 "use\n", dev_name(&device->cdev->dev)); 661 tape_state_set(device, TS_NOT_OPER); 662 __tape_discard_requests(device); 663 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 664 tape_cleanup_device(device); 665 } 666 667 device = dev_get_drvdata(&cdev->dev); 668 if (device) { 669 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 670 dev_set_drvdata(&cdev->dev, NULL); 671 tape_put_device(device); 672 } 673 } 674 675 /* 676 * Allocate a new tape ccw request 677 */ 678 struct tape_request * 679 tape_alloc_request(int cplength, int datasize) 680 { 681 struct tape_request *request; 682 683 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 684 685 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 686 687 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); 688 if (request == NULL) { 689 DBF_EXCEPTION(1, "cqra nomem\n"); 690 return ERR_PTR(-ENOMEM); 691 } 692 /* allocate channel program */ 693 if (cplength > 0) { 694 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 695 GFP_ATOMIC | GFP_DMA); 696 if (request->cpaddr == NULL) { 697 DBF_EXCEPTION(1, "cqra nomem\n"); 698 kfree(request); 699 return ERR_PTR(-ENOMEM); 700 } 701 } 702 /* alloc small kernel buffer */ 703 if (datasize > 0) { 704 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); 705 if (request->cpdata == NULL) { 706 DBF_EXCEPTION(1, "cqra nomem\n"); 707 kfree(request->cpaddr); 708 kfree(request); 709 return ERR_PTR(-ENOMEM); 710 } 711 } 712 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 713 request->cpdata); 714 715 return request; 716 } 717 718 /* 719 * Free tape ccw request 720 */ 721 void 722 tape_free_request (struct tape_request * request) 723 { 724 DBF_LH(6, "Free request %p\n", request); 725 726 if (request->device) 727 tape_put_device(request->device); 728 kfree(request->cpdata); 729 kfree(request->cpaddr); 730 kfree(request); 731 } 732 733 static int 734 __tape_start_io(struct tape_device *device, struct tape_request *request) 735 { 736 int rc; 737 738 #ifdef CONFIG_S390_TAPE_BLOCK 739 if (request->op == TO_BLOCK) 740 device->discipline->check_locate(device, request); 741 #endif 742 rc = ccw_device_start( 743 device->cdev, 744 request->cpaddr, 745 (unsigned long) request, 746 0x00, 747 request->options 748 ); 749 if (rc == 0) { 750 request->status = TAPE_REQUEST_IN_IO; 751 } else if (rc == -EBUSY) { 752 /* The common I/O subsystem is currently busy. Retry later. */ 753 request->status = TAPE_REQUEST_QUEUED; 754 schedule_delayed_work(&device->tape_dnr, 0); 755 rc = 0; 756 } else { 757 /* Start failed. Remove request and indicate failure. */ 758 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 759 } 760 return rc; 761 } 762 763 static void 764 __tape_start_next_request(struct tape_device *device) 765 { 766 struct list_head *l, *n; 767 struct tape_request *request; 768 int rc; 769 770 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 771 /* 772 * Try to start each request on request queue until one is 773 * started successful. 774 */ 775 list_for_each_safe(l, n, &device->req_queue) { 776 request = list_entry(l, struct tape_request, list); 777 778 /* 779 * Avoid race condition if bottom-half was triggered more than 780 * once. 781 */ 782 if (request->status == TAPE_REQUEST_IN_IO) 783 return; 784 /* 785 * Request has already been stopped. We have to wait until 786 * the request is removed from the queue in the interrupt 787 * handling. 788 */ 789 if (request->status == TAPE_REQUEST_DONE) 790 return; 791 792 /* 793 * We wanted to cancel the request but the common I/O layer 794 * was busy at that time. This can only happen if this 795 * function is called by delayed_next_request. 796 * Otherwise we start the next request on the queue. 797 */ 798 if (request->status == TAPE_REQUEST_CANCEL) { 799 rc = __tape_cancel_io(device, request); 800 } else { 801 rc = __tape_start_io(device, request); 802 } 803 if (rc == 0) 804 return; 805 806 /* Set ending status. */ 807 request->rc = rc; 808 request->status = TAPE_REQUEST_DONE; 809 810 /* Remove from request queue. */ 811 list_del(&request->list); 812 813 /* Do callback. */ 814 if (request->callback != NULL) 815 request->callback(request, request->callback_data); 816 } 817 } 818 819 static void 820 tape_delayed_next_request(struct work_struct *work) 821 { 822 struct tape_device *device = 823 container_of(work, struct tape_device, tape_dnr.work); 824 825 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 826 spin_lock_irq(get_ccwdev_lock(device->cdev)); 827 __tape_start_next_request(device); 828 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 829 } 830 831 static void tape_long_busy_timeout(unsigned long data) 832 { 833 struct tape_request *request; 834 struct tape_device *device; 835 836 device = (struct tape_device *) data; 837 spin_lock_irq(get_ccwdev_lock(device->cdev)); 838 request = list_entry(device->req_queue.next, struct tape_request, list); 839 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 840 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 841 __tape_start_next_request(device); 842 device->lb_timeout.data = 0UL; 843 tape_put_device(device); 844 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 845 } 846 847 static void 848 __tape_end_request( 849 struct tape_device * device, 850 struct tape_request * request, 851 int rc) 852 { 853 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 854 if (request) { 855 request->rc = rc; 856 request->status = TAPE_REQUEST_DONE; 857 858 /* Remove from request queue. */ 859 list_del(&request->list); 860 861 /* Do callback. */ 862 if (request->callback != NULL) 863 request->callback(request, request->callback_data); 864 } 865 866 /* Start next request. */ 867 if (!list_empty(&device->req_queue)) 868 __tape_start_next_request(device); 869 } 870 871 /* 872 * Write sense data to dbf 873 */ 874 void 875 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 876 struct irb *irb) 877 { 878 unsigned int *sptr; 879 const char* op; 880 881 if (request != NULL) 882 op = tape_op_verbose[request->op]; 883 else 884 op = "---"; 885 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 886 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); 887 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 888 sptr = (unsigned int *) irb->ecw; 889 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 890 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 891 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 892 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 893 } 894 895 /* 896 * I/O helper function. Adds the request to the request queue 897 * and starts it if the tape is idle. Has to be called with 898 * the device lock held. 899 */ 900 static int 901 __tape_start_request(struct tape_device *device, struct tape_request *request) 902 { 903 int rc; 904 905 switch (request->op) { 906 case TO_MSEN: 907 case TO_ASSIGN: 908 case TO_UNASSIGN: 909 case TO_READ_ATTMSG: 910 case TO_RDC: 911 if (device->tape_state == TS_INIT) 912 break; 913 if (device->tape_state == TS_UNUSED) 914 break; 915 default: 916 if (device->tape_state == TS_BLKUSE) 917 break; 918 if (device->tape_state != TS_IN_USE) 919 return -ENODEV; 920 } 921 922 /* Increase use count of device for the added request. */ 923 request->device = tape_get_device(device); 924 925 if (list_empty(&device->req_queue)) { 926 /* No other requests are on the queue. Start this one. */ 927 rc = __tape_start_io(device, request); 928 if (rc) 929 return rc; 930 931 DBF_LH(5, "Request %p added for execution.\n", request); 932 list_add(&request->list, &device->req_queue); 933 } else { 934 DBF_LH(5, "Request %p add to queue.\n", request); 935 request->status = TAPE_REQUEST_QUEUED; 936 list_add_tail(&request->list, &device->req_queue); 937 } 938 return 0; 939 } 940 941 /* 942 * Add the request to the request queue, try to start it if the 943 * tape is idle. Return without waiting for end of i/o. 944 */ 945 int 946 tape_do_io_async(struct tape_device *device, struct tape_request *request) 947 { 948 int rc; 949 950 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 951 952 spin_lock_irq(get_ccwdev_lock(device->cdev)); 953 /* Add request to request queue and try to start it. */ 954 rc = __tape_start_request(device, request); 955 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 956 return rc; 957 } 958 959 /* 960 * tape_do_io/__tape_wake_up 961 * Add the request to the request queue, try to start it if the 962 * tape is idle and wait uninterruptible for its completion. 963 */ 964 static void 965 __tape_wake_up(struct tape_request *request, void *data) 966 { 967 request->callback = NULL; 968 wake_up((wait_queue_head_t *) data); 969 } 970 971 int 972 tape_do_io(struct tape_device *device, struct tape_request *request) 973 { 974 int rc; 975 976 spin_lock_irq(get_ccwdev_lock(device->cdev)); 977 /* Setup callback */ 978 request->callback = __tape_wake_up; 979 request->callback_data = &device->wait_queue; 980 /* Add request to request queue and try to start it. */ 981 rc = __tape_start_request(device, request); 982 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 983 if (rc) 984 return rc; 985 /* Request added to the queue. Wait for its completion. */ 986 wait_event(device->wait_queue, (request->callback == NULL)); 987 /* Get rc from request */ 988 return request->rc; 989 } 990 991 /* 992 * tape_do_io_interruptible/__tape_wake_up_interruptible 993 * Add the request to the request queue, try to start it if the 994 * tape is idle and wait uninterruptible for its completion. 995 */ 996 static void 997 __tape_wake_up_interruptible(struct tape_request *request, void *data) 998 { 999 request->callback = NULL; 1000 wake_up_interruptible((wait_queue_head_t *) data); 1001 } 1002 1003 int 1004 tape_do_io_interruptible(struct tape_device *device, 1005 struct tape_request *request) 1006 { 1007 int rc; 1008 1009 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1010 /* Setup callback */ 1011 request->callback = __tape_wake_up_interruptible; 1012 request->callback_data = &device->wait_queue; 1013 rc = __tape_start_request(device, request); 1014 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1015 if (rc) 1016 return rc; 1017 /* Request added to the queue. Wait for its completion. */ 1018 rc = wait_event_interruptible(device->wait_queue, 1019 (request->callback == NULL)); 1020 if (rc != -ERESTARTSYS) 1021 /* Request finished normally. */ 1022 return request->rc; 1023 1024 /* Interrupted by a signal. We have to stop the current request. */ 1025 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1026 rc = __tape_cancel_io(device, request); 1027 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1028 if (rc == 0) { 1029 /* Wait for the interrupt that acknowledges the halt. */ 1030 do { 1031 rc = wait_event_interruptible( 1032 device->wait_queue, 1033 (request->callback == NULL) 1034 ); 1035 } while (rc == -ERESTARTSYS); 1036 1037 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1038 rc = -ERESTARTSYS; 1039 } 1040 return rc; 1041 } 1042 1043 /* 1044 * Stop running ccw. 1045 */ 1046 int 1047 tape_cancel_io(struct tape_device *device, struct tape_request *request) 1048 { 1049 int rc; 1050 1051 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1052 rc = __tape_cancel_io(device, request); 1053 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1054 return rc; 1055 } 1056 1057 /* 1058 * Tape interrupt routine, called from the ccw_device layer 1059 */ 1060 static void 1061 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1062 { 1063 struct tape_device *device; 1064 struct tape_request *request; 1065 int rc; 1066 1067 device = dev_get_drvdata(&cdev->dev); 1068 if (device == NULL) { 1069 return; 1070 } 1071 request = (struct tape_request *) intparm; 1072 1073 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1074 1075 /* On special conditions irb is an error pointer */ 1076 if (IS_ERR(irb)) { 1077 /* FIXME: What to do with the request? */ 1078 switch (PTR_ERR(irb)) { 1079 case -ETIMEDOUT: 1080 DBF_LH(1, "(%s): Request timed out\n", 1081 dev_name(&cdev->dev)); 1082 case -EIO: 1083 __tape_end_request(device, request, -EIO); 1084 break; 1085 default: 1086 DBF_LH(1, "(%s): Unexpected i/o error %li\n", 1087 dev_name(&cdev->dev), 1088 PTR_ERR(irb)); 1089 } 1090 return; 1091 } 1092 1093 /* 1094 * If the condition code is not zero and the start function bit is 1095 * still set, this is an deferred error and the last start I/O did 1096 * not succeed. At this point the condition that caused the deferred 1097 * error might still apply. So we just schedule the request to be 1098 * started later. 1099 */ 1100 if (irb->scsw.cmd.cc != 0 && 1101 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1102 (request->status == TAPE_REQUEST_IN_IO)) { 1103 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1104 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); 1105 request->status = TAPE_REQUEST_QUEUED; 1106 schedule_delayed_work(&device->tape_dnr, HZ); 1107 return; 1108 } 1109 1110 /* May be an unsolicited irq */ 1111 if(request != NULL) 1112 request->rescnt = irb->scsw.cmd.count; 1113 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && 1114 !list_empty(&device->req_queue)) { 1115 /* Not Ready to Ready after long busy ? */ 1116 struct tape_request *req; 1117 req = list_entry(device->req_queue.next, 1118 struct tape_request, list); 1119 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1120 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1121 if (del_timer(&device->lb_timeout)) { 1122 device->lb_timeout.data = 0UL; 1123 tape_put_device(device); 1124 __tape_start_next_request(device); 1125 } 1126 return; 1127 } 1128 } 1129 if (irb->scsw.cmd.dstat != 0x0c) { 1130 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1131 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1132 device->tape_generic_status |= GMT_ONLINE(~0); 1133 else 1134 device->tape_generic_status &= ~GMT_ONLINE(~0); 1135 1136 /* 1137 * Any request that does not come back with channel end 1138 * and device end is unusual. Log the sense data. 1139 */ 1140 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1141 tape_dump_sense_dbf(device, request, irb); 1142 } else { 1143 /* Upon normal completion the device _is_ online */ 1144 device->tape_generic_status |= GMT_ONLINE(~0); 1145 } 1146 if (device->tape_state == TS_NOT_OPER) { 1147 DBF_EVENT(6, "tape:device is not operational\n"); 1148 return; 1149 } 1150 1151 /* 1152 * Request that were canceled still come back with an interrupt. 1153 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1154 */ 1155 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1156 __tape_end_request(device, request, -EIO); 1157 return; 1158 } 1159 1160 rc = device->discipline->irq(device, request, irb); 1161 /* 1162 * rc < 0 : request finished unsuccessfully. 1163 * rc == TAPE_IO_SUCCESS: request finished successfully. 1164 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1165 * rc == TAPE_IO_RETRY: request finished but needs another go. 1166 * rc == TAPE_IO_STOP: request needs to get terminated. 1167 */ 1168 switch (rc) { 1169 case TAPE_IO_SUCCESS: 1170 /* Upon normal completion the device _is_ online */ 1171 device->tape_generic_status |= GMT_ONLINE(~0); 1172 __tape_end_request(device, request, rc); 1173 break; 1174 case TAPE_IO_PENDING: 1175 break; 1176 case TAPE_IO_LONG_BUSY: 1177 device->lb_timeout.data = 1178 (unsigned long) tape_get_device(device); 1179 device->lb_timeout.expires = jiffies + 1180 LONG_BUSY_TIMEOUT * HZ; 1181 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1182 add_timer(&device->lb_timeout); 1183 request->status = TAPE_REQUEST_LONG_BUSY; 1184 break; 1185 case TAPE_IO_RETRY: 1186 rc = __tape_start_io(device, request); 1187 if (rc) 1188 __tape_end_request(device, request, rc); 1189 break; 1190 case TAPE_IO_STOP: 1191 rc = __tape_cancel_io(device, request); 1192 if (rc) 1193 __tape_end_request(device, request, rc); 1194 break; 1195 default: 1196 if (rc > 0) { 1197 DBF_EVENT(6, "xunknownrc\n"); 1198 __tape_end_request(device, request, -EIO); 1199 } else { 1200 __tape_end_request(device, request, rc); 1201 } 1202 break; 1203 } 1204 } 1205 1206 /* 1207 * Tape device open function used by tape_char & tape_block frontends. 1208 */ 1209 int 1210 tape_open(struct tape_device *device) 1211 { 1212 int rc; 1213 1214 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1215 if (device->tape_state == TS_NOT_OPER) { 1216 DBF_EVENT(6, "TAPE:nodev\n"); 1217 rc = -ENODEV; 1218 } else if (device->tape_state == TS_IN_USE) { 1219 DBF_EVENT(6, "TAPE:dbusy\n"); 1220 rc = -EBUSY; 1221 } else if (device->tape_state == TS_BLKUSE) { 1222 DBF_EVENT(6, "TAPE:dbusy\n"); 1223 rc = -EBUSY; 1224 } else if (device->discipline != NULL && 1225 !try_module_get(device->discipline->owner)) { 1226 DBF_EVENT(6, "TAPE:nodisc\n"); 1227 rc = -ENODEV; 1228 } else { 1229 tape_state_set(device, TS_IN_USE); 1230 rc = 0; 1231 } 1232 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1233 return rc; 1234 } 1235 1236 /* 1237 * Tape device release function used by tape_char & tape_block frontends. 1238 */ 1239 int 1240 tape_release(struct tape_device *device) 1241 { 1242 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1243 if (device->tape_state == TS_IN_USE) 1244 tape_state_set(device, TS_UNUSED); 1245 module_put(device->discipline->owner); 1246 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1247 return 0; 1248 } 1249 1250 /* 1251 * Execute a magnetic tape command a number of times. 1252 */ 1253 int 1254 tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1255 { 1256 tape_mtop_fn fn; 1257 int rc; 1258 1259 DBF_EVENT(6, "TAPE:mtio\n"); 1260 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1261 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1262 1263 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1264 return -EINVAL; 1265 fn = device->discipline->mtop_array[mt_op]; 1266 if (fn == NULL) 1267 return -EINVAL; 1268 1269 /* We assume that the backends can handle count up to 500. */ 1270 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1271 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1272 rc = 0; 1273 for (; mt_count > 500; mt_count -= 500) 1274 if ((rc = fn(device, 500)) != 0) 1275 break; 1276 if (rc == 0) 1277 rc = fn(device, mt_count); 1278 } else 1279 rc = fn(device, mt_count); 1280 return rc; 1281 1282 } 1283 1284 /* 1285 * Tape init function. 1286 */ 1287 static int 1288 tape_init (void) 1289 { 1290 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1291 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1292 #ifdef DBF_LIKE_HELL 1293 debug_set_level(TAPE_DBF_AREA, 6); 1294 #endif 1295 DBF_EVENT(3, "tape init\n"); 1296 tape_proc_init(); 1297 tapechar_init (); 1298 tapeblock_init (); 1299 return 0; 1300 } 1301 1302 /* 1303 * Tape exit function. 1304 */ 1305 static void 1306 tape_exit(void) 1307 { 1308 DBF_EVENT(6, "tape exit\n"); 1309 1310 /* Get rid of the frontends */ 1311 tapechar_exit(); 1312 tapeblock_exit(); 1313 tape_proc_cleanup(); 1314 debug_unregister (TAPE_DBF_AREA); 1315 } 1316 1317 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1318 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1319 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver"); 1320 MODULE_LICENSE("GPL"); 1321 1322 module_init(tape_init); 1323 module_exit(tape_exit); 1324 1325 EXPORT_SYMBOL(tape_generic_remove); 1326 EXPORT_SYMBOL(tape_generic_probe); 1327 EXPORT_SYMBOL(tape_generic_online); 1328 EXPORT_SYMBOL(tape_generic_offline); 1329 EXPORT_SYMBOL(tape_generic_pm_suspend); 1330 EXPORT_SYMBOL(tape_put_device); 1331 EXPORT_SYMBOL(tape_get_device); 1332 EXPORT_SYMBOL(tape_state_verbose); 1333 EXPORT_SYMBOL(tape_op_verbose); 1334 EXPORT_SYMBOL(tape_state_set); 1335 EXPORT_SYMBOL(tape_med_state_set); 1336 EXPORT_SYMBOL(tape_alloc_request); 1337 EXPORT_SYMBOL(tape_free_request); 1338 EXPORT_SYMBOL(tape_dump_sense_dbf); 1339 EXPORT_SYMBOL(tape_do_io); 1340 EXPORT_SYMBOL(tape_do_io_async); 1341 EXPORT_SYMBOL(tape_do_io_interruptible); 1342 EXPORT_SYMBOL(tape_cancel_io); 1343 EXPORT_SYMBOL(tape_mtop); 1344