1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * basic function of the tape device driver 4 * 5 * S390 and zSeries version 6 * Copyright IBM Corp. 2001, 2009 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14 #define pr_fmt(fmt) "tape: " fmt 15 16 #include <linux/export.h> 17 #include <linux/module.h> 18 #include <linux/init.h> // for kernel parameters 19 #include <linux/kmod.h> // for requesting modules 20 #include <linux/spinlock.h> // for locks 21 #include <linux/vmalloc.h> 22 #include <linux/list.h> 23 #include <linux/slab.h> 24 25 #include <asm/types.h> // for variable types 26 27 #define TAPE_DBF_AREA tape_core_dbf 28 29 #include "tape.h" 30 #include "tape_std.h" 31 #include "tape_class.h" 32 33 #define LONG_BUSY_TIMEOUT 180 /* seconds */ 34 35 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 36 static void tape_delayed_next_request(struct work_struct *); 37 static void tape_long_busy_timeout(struct timer_list *t); 38 39 /* 40 * One list to contain all tape devices of all disciplines, so 41 * we can assign the devices to minor numbers of the same major 42 * The list is protected by the rwlock 43 */ 44 static LIST_HEAD(tape_device_list); 45 static DEFINE_RWLOCK(tape_device_lock); 46 47 /* 48 * Pointer to debug area. 49 */ 50 debug_info_t *TAPE_DBF_AREA = NULL; 51 EXPORT_SYMBOL(TAPE_DBF_AREA); 52 53 /* 54 * Printable strings for tape enumerations. 55 */ 56 const char *tape_state_verbose[TS_SIZE] = 57 { 58 [TS_UNUSED] = "UNUSED", 59 [TS_IN_USE] = "IN_USE", 60 [TS_BLKUSE] = "BLKUSE", 61 [TS_INIT] = "INIT ", 62 [TS_NOT_OPER] = "NOT_OP" 63 }; 64 65 const char *tape_op_verbose[TO_SIZE] = 66 { 67 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 68 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 69 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 70 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 71 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 72 [TO_RFO] = "RFO", [TO_REW] = "REW", 73 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 74 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 75 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 76 [TO_READ_ATTMSG] = "RAT", 77 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 78 [TO_UNASSIGN] = "UAS", [TO_RDC] = "RDC", 79 }; 80 81 static int devid_to_int(struct ccw_dev_id *dev_id) 82 { 83 return dev_id->devno + (dev_id->ssid << 16); 84 } 85 86 /* 87 * Some channel attached tape specific attributes. 88 * 89 * FIXME: In the future the first_minor and blocksize attribute should be 90 * replaced by a link to the cdev tree. 91 */ 92 static ssize_t 93 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 94 { 95 struct tape_device *tdev; 96 97 tdev = dev_get_drvdata(dev); 98 return sysfs_emit(buf, "%i\n", tdev->medium_state); 99 } 100 101 static 102 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 103 104 static ssize_t 105 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 106 { 107 struct tape_device *tdev; 108 109 tdev = dev_get_drvdata(dev); 110 return sysfs_emit(buf, "%i\n", tdev->first_minor); 111 } 112 113 static 114 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 115 116 static ssize_t 117 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 118 { 119 struct tape_device *tdev; 120 121 tdev = dev_get_drvdata(dev); 122 return sysfs_emit(buf, "%s\n", (tdev->first_minor < 0) ? 123 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 124 } 125 126 static 127 DEVICE_ATTR(state, 0444, tape_state_show, NULL); 128 129 static ssize_t 130 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 131 { 132 struct tape_device *tdev; 133 ssize_t rc; 134 135 tdev = dev_get_drvdata(dev); 136 if (tdev->first_minor < 0) 137 return sysfs_emit(buf, "N/A\n"); 138 139 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 140 if (list_empty(&tdev->req_queue)) 141 rc = sysfs_emit(buf, "---\n"); 142 else { 143 struct tape_request *req; 144 145 req = list_entry(tdev->req_queue.next, struct tape_request, 146 list); 147 rc = sysfs_emit(buf, "%s\n", tape_op_verbose[req->op]); 148 } 149 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 150 return rc; 151 } 152 153 static 154 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 155 156 static ssize_t 157 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 158 { 159 struct tape_device *tdev; 160 161 tdev = dev_get_drvdata(dev); 162 163 return sysfs_emit(buf, "%i\n", tdev->char_data.block_size); 164 } 165 166 static 167 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 168 169 static struct attribute *tape_attrs[] = { 170 &dev_attr_medium_state.attr, 171 &dev_attr_first_minor.attr, 172 &dev_attr_state.attr, 173 &dev_attr_operation.attr, 174 &dev_attr_blocksize.attr, 175 NULL 176 }; 177 178 static const struct attribute_group tape_attr_group = { 179 .attrs = tape_attrs, 180 }; 181 182 /* 183 * Tape state functions 184 */ 185 void 186 tape_state_set(struct tape_device *device, enum tape_state newstate) 187 { 188 const char *str; 189 190 if (device->tape_state == TS_NOT_OPER) { 191 DBF_EVENT(3, "ts_set err: not oper\n"); 192 return; 193 } 194 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 195 DBF_EVENT(4, "old ts:\t\n"); 196 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) 197 str = tape_state_verbose[device->tape_state]; 198 else 199 str = "UNKNOWN TS"; 200 DBF_EVENT(4, "%s\n", str); 201 DBF_EVENT(4, "new ts:\t\n"); 202 if (newstate < TS_SIZE && newstate >= 0) 203 str = tape_state_verbose[newstate]; 204 else 205 str = "UNKNOWN TS"; 206 DBF_EVENT(4, "%s\n", str); 207 device->tape_state = newstate; 208 wake_up(&device->state_change_wq); 209 } 210 211 struct tape_med_state_work_data { 212 struct tape_device *device; 213 enum tape_medium_state state; 214 struct work_struct work; 215 }; 216 217 static void 218 tape_med_state_work_handler(struct work_struct *work) 219 { 220 static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; 221 static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; 222 struct tape_med_state_work_data *p = 223 container_of(work, struct tape_med_state_work_data, work); 224 struct tape_device *device = p->device; 225 char *envp[] = { NULL, NULL }; 226 227 switch (p->state) { 228 case MS_UNLOADED: 229 pr_info("%s: The tape cartridge has been successfully " 230 "unloaded\n", dev_name(&device->cdev->dev)); 231 envp[0] = env_state_unloaded; 232 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); 233 break; 234 case MS_LOADED: 235 pr_info("%s: A tape cartridge has been mounted\n", 236 dev_name(&device->cdev->dev)); 237 envp[0] = env_state_loaded; 238 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); 239 break; 240 default: 241 break; 242 } 243 tape_put_device(device); 244 kfree(p); 245 } 246 247 static void 248 tape_med_state_work(struct tape_device *device, enum tape_medium_state state) 249 { 250 struct tape_med_state_work_data *p; 251 252 p = kzalloc(sizeof(*p), GFP_ATOMIC); 253 if (p) { 254 INIT_WORK(&p->work, tape_med_state_work_handler); 255 p->device = tape_get_device(device); 256 p->state = state; 257 schedule_work(&p->work); 258 } 259 } 260 261 void 262 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 263 { 264 enum tape_medium_state oldstate; 265 266 oldstate = device->medium_state; 267 if (oldstate == newstate) 268 return; 269 device->medium_state = newstate; 270 switch(newstate){ 271 case MS_UNLOADED: 272 device->tape_generic_status |= GMT_DR_OPEN(~0); 273 if (oldstate == MS_LOADED) 274 tape_med_state_work(device, MS_UNLOADED); 275 break; 276 case MS_LOADED: 277 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 278 if (oldstate == MS_UNLOADED) 279 tape_med_state_work(device, MS_LOADED); 280 break; 281 default: 282 break; 283 } 284 wake_up(&device->state_change_wq); 285 } 286 287 /* 288 * Stop running ccw. Has to be called with the device lock held. 289 */ 290 static int 291 __tape_cancel_io(struct tape_device *device, struct tape_request *request) 292 { 293 int retries; 294 int rc; 295 296 /* Check if interrupt has already been processed */ 297 if (request->callback == NULL) 298 return 0; 299 300 rc = 0; 301 for (retries = 0; retries < 5; retries++) { 302 rc = ccw_device_clear(device->cdev, (long) request); 303 304 switch (rc) { 305 case 0: 306 request->status = TAPE_REQUEST_DONE; 307 return 0; 308 case -EBUSY: 309 request->status = TAPE_REQUEST_CANCEL; 310 schedule_delayed_work(&device->tape_dnr, 0); 311 return 0; 312 case -ENODEV: 313 DBF_EXCEPTION(2, "device gone, retry\n"); 314 break; 315 case -EIO: 316 DBF_EXCEPTION(2, "I/O error, retry\n"); 317 break; 318 default: 319 BUG(); 320 } 321 } 322 323 return rc; 324 } 325 326 /* 327 * Add device into the sorted list, giving it the first 328 * available minor number. 329 */ 330 static int 331 tape_assign_minor(struct tape_device *device) 332 { 333 struct tape_device *tmp; 334 int minor; 335 336 minor = 0; 337 write_lock(&tape_device_lock); 338 list_for_each_entry(tmp, &tape_device_list, node) { 339 if (minor < tmp->first_minor) 340 break; 341 minor += TAPE_MINORS_PER_DEV; 342 } 343 if (minor >= 256) { 344 write_unlock(&tape_device_lock); 345 return -ENODEV; 346 } 347 device->first_minor = minor; 348 list_add_tail(&device->node, &tmp->node); 349 write_unlock(&tape_device_lock); 350 return 0; 351 } 352 353 /* remove device from the list */ 354 static void 355 tape_remove_minor(struct tape_device *device) 356 { 357 write_lock(&tape_device_lock); 358 list_del_init(&device->node); 359 device->first_minor = -1; 360 write_unlock(&tape_device_lock); 361 } 362 363 /* 364 * Set a device online. 365 * 366 * This function is called by the common I/O layer to move a device from the 367 * detected but offline into the online state. 368 * If we return an error (RC < 0) the device remains in the offline state. This 369 * can happen if the device is assigned somewhere else, for example. 370 */ 371 int 372 tape_generic_online(struct tape_device *device, 373 struct tape_discipline *discipline) 374 { 375 int rc; 376 377 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 378 379 if (device->tape_state != TS_INIT) { 380 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 381 return -EINVAL; 382 } 383 384 timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0); 385 386 /* Let the discipline have a go at the device. */ 387 device->discipline = discipline; 388 if (!try_module_get(discipline->owner)) { 389 return -EINVAL; 390 } 391 392 rc = discipline->setup_device(device); 393 if (rc) 394 goto out; 395 rc = tape_assign_minor(device); 396 if (rc) 397 goto out_discipline; 398 399 rc = tapechar_setup_device(device); 400 if (rc) 401 goto out_minor; 402 403 tape_state_set(device, TS_UNUSED); 404 405 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 406 407 return 0; 408 409 out_minor: 410 tape_remove_minor(device); 411 out_discipline: 412 device->discipline->cleanup_device(device); 413 device->discipline = NULL; 414 out: 415 module_put(discipline->owner); 416 return rc; 417 } 418 419 static void 420 tape_cleanup_device(struct tape_device *device) 421 { 422 tapechar_cleanup_device(device); 423 device->discipline->cleanup_device(device); 424 module_put(device->discipline->owner); 425 tape_remove_minor(device); 426 tape_med_state_set(device, MS_UNKNOWN); 427 } 428 429 /* 430 * Set device offline. 431 * 432 * Called by the common I/O layer if the drive should set offline on user 433 * request. We may prevent this by returning an error. 434 * Manual offline is only allowed while the drive is not in use. 435 */ 436 int 437 tape_generic_offline(struct ccw_device *cdev) 438 { 439 struct tape_device *device; 440 441 device = dev_get_drvdata(&cdev->dev); 442 if (!device) { 443 return -ENODEV; 444 } 445 446 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 447 device->cdev_id, device); 448 449 spin_lock_irq(get_ccwdev_lock(device->cdev)); 450 switch (device->tape_state) { 451 case TS_INIT: 452 case TS_NOT_OPER: 453 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 454 break; 455 case TS_UNUSED: 456 tape_state_set(device, TS_INIT); 457 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 458 tape_cleanup_device(device); 459 break; 460 default: 461 DBF_EVENT(3, "(%08x): Set offline failed " 462 "- drive in use.\n", 463 device->cdev_id); 464 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 465 return -EBUSY; 466 } 467 468 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 469 return 0; 470 } 471 472 /* 473 * Allocate memory for a new device structure. 474 */ 475 static struct tape_device * 476 tape_alloc_device(void) 477 { 478 struct tape_device *device; 479 480 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); 481 if (device == NULL) { 482 DBF_EXCEPTION(2, "ti:no mem\n"); 483 return ERR_PTR(-ENOMEM); 484 } 485 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); 486 if (device->modeset_byte == NULL) { 487 DBF_EXCEPTION(2, "ti:no mem\n"); 488 kfree(device); 489 return ERR_PTR(-ENOMEM); 490 } 491 mutex_init(&device->mutex); 492 INIT_LIST_HEAD(&device->req_queue); 493 INIT_LIST_HEAD(&device->node); 494 init_waitqueue_head(&device->state_change_wq); 495 init_waitqueue_head(&device->wait_queue); 496 device->tape_state = TS_INIT; 497 device->medium_state = MS_UNKNOWN; 498 *device->modeset_byte = 0; 499 device->first_minor = -1; 500 atomic_set(&device->ref_count, 1); 501 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); 502 503 return device; 504 } 505 506 /* 507 * Get a reference to an existing device structure. This will automatically 508 * increment the reference count. 509 */ 510 struct tape_device * 511 tape_get_device(struct tape_device *device) 512 { 513 int count; 514 515 count = atomic_inc_return(&device->ref_count); 516 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); 517 return device; 518 } 519 520 /* 521 * Decrease the reference counter of a devices structure. If the 522 * reference counter reaches zero free the device structure. 523 * The function returns a NULL pointer to be used by the caller 524 * for clearing reference pointers. 525 */ 526 void 527 tape_put_device(struct tape_device *device) 528 { 529 int count; 530 531 count = atomic_dec_return(&device->ref_count); 532 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); 533 BUG_ON(count < 0); 534 if (count == 0) { 535 kfree(device->modeset_byte); 536 kfree(device); 537 } 538 } 539 540 /* 541 * Find tape device by a device index. 542 */ 543 struct tape_device * 544 tape_find_device(int devindex) 545 { 546 struct tape_device *device, *tmp; 547 548 device = ERR_PTR(-ENODEV); 549 read_lock(&tape_device_lock); 550 list_for_each_entry(tmp, &tape_device_list, node) { 551 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 552 device = tape_get_device(tmp); 553 break; 554 } 555 } 556 read_unlock(&tape_device_lock); 557 return device; 558 } 559 560 /* 561 * Driverfs tape probe function. 562 */ 563 int 564 tape_generic_probe(struct ccw_device *cdev) 565 { 566 struct tape_device *device; 567 int ret; 568 struct ccw_dev_id dev_id; 569 570 device = tape_alloc_device(); 571 if (IS_ERR(device)) 572 return -ENODEV; 573 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | 574 CCWDEV_DO_MULTIPATH); 575 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 576 if (ret) { 577 tape_put_device(device); 578 return ret; 579 } 580 dev_set_drvdata(&cdev->dev, device); 581 cdev->handler = __tape_do_irq; 582 device->cdev = cdev; 583 ccw_device_get_id(cdev, &dev_id); 584 device->cdev_id = devid_to_int(&dev_id); 585 return ret; 586 } 587 588 static void 589 __tape_discard_requests(struct tape_device *device) 590 { 591 struct tape_request * request; 592 struct list_head * l, *n; 593 594 list_for_each_safe(l, n, &device->req_queue) { 595 request = list_entry(l, struct tape_request, list); 596 if (request->status == TAPE_REQUEST_IN_IO) 597 request->status = TAPE_REQUEST_DONE; 598 list_del(&request->list); 599 600 /* Decrease ref_count for removed request. */ 601 request->device = NULL; 602 tape_put_device(device); 603 request->rc = -EIO; 604 if (request->callback != NULL) 605 request->callback(request, request->callback_data); 606 } 607 } 608 609 /* 610 * Driverfs tape remove function. 611 * 612 * This function is called whenever the common I/O layer detects the device 613 * gone. This can happen at any time and we cannot refuse. 614 */ 615 void 616 tape_generic_remove(struct ccw_device *cdev) 617 { 618 struct tape_device * device; 619 620 device = dev_get_drvdata(&cdev->dev); 621 if (!device) { 622 return; 623 } 624 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 625 626 spin_lock_irq(get_ccwdev_lock(device->cdev)); 627 switch (device->tape_state) { 628 case TS_INIT: 629 tape_state_set(device, TS_NOT_OPER); 630 fallthrough; 631 case TS_NOT_OPER: 632 /* 633 * Nothing to do. 634 */ 635 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 636 break; 637 case TS_UNUSED: 638 /* 639 * Need only to release the device. 640 */ 641 tape_state_set(device, TS_NOT_OPER); 642 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 643 tape_cleanup_device(device); 644 break; 645 default: 646 /* 647 * There may be requests on the queue. We will not get 648 * an interrupt for a request that was running. So we 649 * just post them all as I/O errors. 650 */ 651 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 652 device->cdev_id); 653 pr_warn("%s: A tape unit was detached while in use\n", 654 dev_name(&device->cdev->dev)); 655 tape_state_set(device, TS_NOT_OPER); 656 __tape_discard_requests(device); 657 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 658 tape_cleanup_device(device); 659 } 660 661 device = dev_get_drvdata(&cdev->dev); 662 if (device) { 663 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 664 dev_set_drvdata(&cdev->dev, NULL); 665 tape_put_device(device); 666 } 667 } 668 669 /* 670 * Allocate a new tape ccw request 671 */ 672 struct tape_request * 673 tape_alloc_request(int cplength, int datasize) 674 { 675 struct tape_request *request; 676 677 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 678 679 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 680 681 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); 682 if (request == NULL) { 683 DBF_EXCEPTION(1, "cqra nomem\n"); 684 return ERR_PTR(-ENOMEM); 685 } 686 /* allocate channel program */ 687 if (cplength > 0) { 688 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 689 GFP_ATOMIC | GFP_DMA); 690 if (request->cpaddr == NULL) { 691 DBF_EXCEPTION(1, "cqra nomem\n"); 692 kfree(request); 693 return ERR_PTR(-ENOMEM); 694 } 695 } 696 /* alloc small kernel buffer */ 697 if (datasize > 0) { 698 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); 699 if (request->cpdata == NULL) { 700 DBF_EXCEPTION(1, "cqra nomem\n"); 701 kfree(request->cpaddr); 702 kfree(request); 703 return ERR_PTR(-ENOMEM); 704 } 705 } 706 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 707 request->cpdata); 708 709 return request; 710 } 711 712 /* 713 * Free tape ccw request 714 */ 715 void 716 tape_free_request (struct tape_request * request) 717 { 718 DBF_LH(6, "Free request %p\n", request); 719 720 if (request->device) 721 tape_put_device(request->device); 722 kfree(request->cpdata); 723 kfree(request->cpaddr); 724 kfree(request); 725 } 726 727 int 728 tape_check_idalbuffer(struct tape_device *device, size_t size) 729 { 730 struct idal_buffer **new; 731 size_t old_size = 0; 732 733 old_size = idal_buffer_array_datasize(device->char_data.ibs); 734 if (old_size == size) 735 return 0; 736 737 if (size > MAX_BLOCKSIZE) { 738 DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", 739 size, MAX_BLOCKSIZE); 740 return -EINVAL; 741 } 742 743 /* The current idal buffer is not correct. Allocate a new one. */ 744 new = idal_buffer_array_alloc(size, 0); 745 if (IS_ERR(new)) 746 return -ENOMEM; 747 748 /* Free old idal buffer array */ 749 if (device->char_data.ibs) 750 idal_buffer_array_free(&device->char_data.ibs); 751 752 device->char_data.ibs = new; 753 754 return 0; 755 } 756 757 static int 758 __tape_start_io(struct tape_device *device, struct tape_request *request) 759 { 760 int rc; 761 762 rc = ccw_device_start( 763 device->cdev, 764 request->cpaddr, 765 (unsigned long) request, 766 0x00, 767 request->options 768 ); 769 if (rc == 0) { 770 request->status = TAPE_REQUEST_IN_IO; 771 } else if (rc == -EBUSY) { 772 /* The common I/O subsystem is currently busy. Retry later. */ 773 request->status = TAPE_REQUEST_QUEUED; 774 schedule_delayed_work(&device->tape_dnr, 0); 775 rc = 0; 776 } else { 777 /* Start failed. Remove request and indicate failure. */ 778 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 779 } 780 return rc; 781 } 782 783 static void 784 __tape_start_next_request(struct tape_device *device) 785 { 786 struct list_head *l, *n; 787 struct tape_request *request; 788 int rc; 789 790 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 791 /* 792 * Try to start each request on request queue until one is 793 * started successful. 794 */ 795 list_for_each_safe(l, n, &device->req_queue) { 796 request = list_entry(l, struct tape_request, list); 797 798 /* 799 * Avoid race condition if bottom-half was triggered more than 800 * once. 801 */ 802 if (request->status == TAPE_REQUEST_IN_IO) 803 return; 804 /* 805 * Request has already been stopped. We have to wait until 806 * the request is removed from the queue in the interrupt 807 * handling. 808 */ 809 if (request->status == TAPE_REQUEST_DONE) 810 return; 811 812 /* 813 * We wanted to cancel the request but the common I/O layer 814 * was busy at that time. This can only happen if this 815 * function is called by delayed_next_request. 816 * Otherwise we start the next request on the queue. 817 */ 818 if (request->status == TAPE_REQUEST_CANCEL) { 819 rc = __tape_cancel_io(device, request); 820 } else { 821 rc = __tape_start_io(device, request); 822 } 823 if (rc == 0) 824 return; 825 826 /* Set ending status. */ 827 request->rc = rc; 828 request->status = TAPE_REQUEST_DONE; 829 830 /* Remove from request queue. */ 831 list_del(&request->list); 832 833 /* Do callback. */ 834 if (request->callback != NULL) 835 request->callback(request, request->callback_data); 836 } 837 } 838 839 static void 840 tape_delayed_next_request(struct work_struct *work) 841 { 842 struct tape_device *device = 843 container_of(work, struct tape_device, tape_dnr.work); 844 845 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 846 spin_lock_irq(get_ccwdev_lock(device->cdev)); 847 __tape_start_next_request(device); 848 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 849 } 850 851 static void tape_long_busy_timeout(struct timer_list *t) 852 { 853 struct tape_device *device = timer_container_of(device, t, lb_timeout); 854 struct tape_request *request; 855 856 spin_lock_irq(get_ccwdev_lock(device->cdev)); 857 request = list_entry(device->req_queue.next, struct tape_request, list); 858 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 859 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 860 __tape_start_next_request(device); 861 tape_put_device(device); 862 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 863 } 864 865 static void 866 __tape_end_request( 867 struct tape_device * device, 868 struct tape_request * request, 869 int rc) 870 { 871 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 872 if (request) { 873 request->rc = rc; 874 request->status = TAPE_REQUEST_DONE; 875 876 /* Remove from request queue. */ 877 list_del(&request->list); 878 879 /* Do callback. */ 880 if (request->callback != NULL) 881 request->callback(request, request->callback_data); 882 } 883 884 /* Start next request. */ 885 if (!list_empty(&device->req_queue)) 886 __tape_start_next_request(device); 887 } 888 889 /* 890 * Write sense data to dbf 891 */ 892 void 893 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 894 struct irb *irb) 895 { 896 unsigned int *sptr; 897 const char* op; 898 899 if (request != NULL) 900 op = tape_op_verbose[request->op]; 901 else 902 op = "---"; 903 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 904 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); 905 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 906 sptr = (unsigned int *) irb->ecw; 907 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 908 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 909 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 910 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 911 } 912 913 /* 914 * I/O helper function. Adds the request to the request queue 915 * and starts it if the tape is idle. Has to be called with 916 * the device lock held. 917 */ 918 static int 919 __tape_start_request(struct tape_device *device, struct tape_request *request) 920 { 921 int rc; 922 923 switch (request->op) { 924 case TO_MSEN: 925 case TO_ASSIGN: 926 case TO_UNASSIGN: 927 case TO_READ_ATTMSG: 928 case TO_RDC: 929 if (device->tape_state == TS_INIT) 930 break; 931 if (device->tape_state == TS_UNUSED) 932 break; 933 fallthrough; 934 default: 935 if (device->tape_state == TS_BLKUSE) 936 break; 937 if (device->tape_state != TS_IN_USE) 938 return -ENODEV; 939 } 940 941 /* Increase use count of device for the added request. */ 942 request->device = tape_get_device(device); 943 944 if (list_empty(&device->req_queue)) { 945 /* No other requests are on the queue. Start this one. */ 946 rc = __tape_start_io(device, request); 947 if (rc) 948 return rc; 949 950 DBF_LH(5, "Request %p added for execution.\n", request); 951 list_add(&request->list, &device->req_queue); 952 } else { 953 DBF_LH(5, "Request %p add to queue.\n", request); 954 request->status = TAPE_REQUEST_QUEUED; 955 list_add_tail(&request->list, &device->req_queue); 956 } 957 return 0; 958 } 959 960 /* 961 * Add the request to the request queue, try to start it if the 962 * tape is idle. Return without waiting for end of i/o. 963 */ 964 int 965 tape_do_io_async(struct tape_device *device, struct tape_request *request) 966 { 967 int rc; 968 969 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 970 971 spin_lock_irq(get_ccwdev_lock(device->cdev)); 972 /* Add request to request queue and try to start it. */ 973 rc = __tape_start_request(device, request); 974 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 975 return rc; 976 } 977 978 /* 979 * tape_do_io/__tape_wake_up 980 * Add the request to the request queue, try to start it if the 981 * tape is idle and wait uninterruptible for its completion. 982 */ 983 static void 984 __tape_wake_up(struct tape_request *request, void *data) 985 { 986 request->callback = NULL; 987 wake_up((wait_queue_head_t *) data); 988 } 989 990 int 991 tape_do_io(struct tape_device *device, struct tape_request *request) 992 { 993 int rc; 994 995 spin_lock_irq(get_ccwdev_lock(device->cdev)); 996 /* Setup callback */ 997 request->callback = __tape_wake_up; 998 request->callback_data = &device->wait_queue; 999 /* Add request to request queue and try to start it. */ 1000 rc = __tape_start_request(device, request); 1001 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1002 if (rc) 1003 return rc; 1004 /* Request added to the queue. Wait for its completion. */ 1005 wait_event(device->wait_queue, (request->callback == NULL)); 1006 /* Get rc from request */ 1007 return request->rc; 1008 } 1009 1010 /* 1011 * tape_do_io_interruptible/__tape_wake_up_interruptible 1012 * Add the request to the request queue, try to start it if the 1013 * tape is idle and wait uninterruptible for its completion. 1014 */ 1015 static void 1016 __tape_wake_up_interruptible(struct tape_request *request, void *data) 1017 { 1018 request->callback = NULL; 1019 wake_up_interruptible((wait_queue_head_t *) data); 1020 } 1021 1022 int 1023 tape_do_io_interruptible(struct tape_device *device, 1024 struct tape_request *request) 1025 { 1026 int rc; 1027 1028 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1029 /* Setup callback */ 1030 request->callback = __tape_wake_up_interruptible; 1031 request->callback_data = &device->wait_queue; 1032 rc = __tape_start_request(device, request); 1033 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1034 if (rc) 1035 return rc; 1036 /* Request added to the queue. Wait for its completion. */ 1037 rc = wait_event_interruptible(device->wait_queue, 1038 (request->callback == NULL)); 1039 if (rc != -ERESTARTSYS) 1040 /* Request finished normally. */ 1041 return request->rc; 1042 1043 /* Interrupted by a signal. We have to stop the current request. */ 1044 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1045 rc = __tape_cancel_io(device, request); 1046 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1047 if (rc == 0) { 1048 /* Wait for the interrupt that acknowledges the halt. */ 1049 do { 1050 rc = wait_event_interruptible( 1051 device->wait_queue, 1052 (request->callback == NULL) 1053 ); 1054 } while (rc == -ERESTARTSYS); 1055 1056 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1057 rc = -ERESTARTSYS; 1058 } 1059 return rc; 1060 } 1061 1062 /* 1063 * Stop running ccw. 1064 */ 1065 int 1066 tape_cancel_io(struct tape_device *device, struct tape_request *request) 1067 { 1068 int rc; 1069 1070 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1071 rc = __tape_cancel_io(device, request); 1072 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1073 return rc; 1074 } 1075 1076 /* 1077 * Tape interrupt routine, called from the ccw_device layer 1078 */ 1079 static void 1080 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1081 { 1082 struct tape_device *device; 1083 struct tape_request *request; 1084 int rc; 1085 1086 device = dev_get_drvdata(&cdev->dev); 1087 if (device == NULL) { 1088 return; 1089 } 1090 request = (struct tape_request *) intparm; 1091 1092 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1093 1094 /* On special conditions irb is an error pointer */ 1095 if (IS_ERR(irb)) { 1096 /* FIXME: What to do with the request? */ 1097 switch (PTR_ERR(irb)) { 1098 case -ETIMEDOUT: 1099 DBF_LH(1, "(%08x): Request timed out\n", 1100 device->cdev_id); 1101 fallthrough; 1102 case -EIO: 1103 __tape_end_request(device, request, -EIO); 1104 break; 1105 default: 1106 DBF_LH(1, "(%08x): Unexpected i/o error %li\n", 1107 device->cdev_id, PTR_ERR(irb)); 1108 } 1109 return; 1110 } 1111 1112 /* 1113 * If the condition code is not zero and the start function bit is 1114 * still set, this is an deferred error and the last start I/O did 1115 * not succeed. At this point the condition that caused the deferred 1116 * error might still apply. So we just schedule the request to be 1117 * started later. 1118 */ 1119 if (irb->scsw.cmd.cc != 0 && 1120 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1121 (request->status == TAPE_REQUEST_IN_IO)) { 1122 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1123 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); 1124 request->status = TAPE_REQUEST_QUEUED; 1125 schedule_delayed_work(&device->tape_dnr, HZ); 1126 return; 1127 } 1128 1129 /* May be an unsolicited irq */ 1130 if (request != NULL) { 1131 request->rescnt = irb->scsw.cmd.count; 1132 memcpy(&request->irb, irb, sizeof(*irb)); 1133 } else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && 1134 !list_empty(&device->req_queue)) { 1135 /* Not Ready to Ready after long busy ? */ 1136 struct tape_request *req; 1137 req = list_entry(device->req_queue.next, 1138 struct tape_request, list); 1139 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1140 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1141 if (timer_delete(&device->lb_timeout)) { 1142 tape_put_device(device); 1143 __tape_start_next_request(device); 1144 } 1145 return; 1146 } 1147 } 1148 if (irb->scsw.cmd.dstat != 0x0c) { 1149 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1150 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1151 device->tape_generic_status |= GMT_ONLINE(~0); 1152 else 1153 device->tape_generic_status &= ~GMT_ONLINE(~0); 1154 1155 /* 1156 * Any request that does not come back with channel end 1157 * and device end is unusual. Log the sense data. 1158 */ 1159 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1160 tape_dump_sense_dbf(device, request, irb); 1161 } else { 1162 /* Upon normal completion the device _is_ online */ 1163 device->tape_generic_status |= GMT_ONLINE(~0); 1164 } 1165 if (device->tape_state == TS_NOT_OPER) { 1166 DBF_EVENT(6, "tape:device is not operational\n"); 1167 return; 1168 } 1169 1170 /* 1171 * Request that were canceled still come back with an interrupt. 1172 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1173 */ 1174 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1175 __tape_end_request(device, request, -EIO); 1176 return; 1177 } 1178 1179 rc = device->discipline->irq(device, request, irb); 1180 /* 1181 * rc < 0 : request finished unsuccessfully. 1182 * rc == TAPE_IO_SUCCESS: request finished successfully. 1183 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1184 * rc == TAPE_IO_RETRY: request finished but needs another go. 1185 * rc == TAPE_IO_STOP: request needs to get terminated. 1186 */ 1187 switch (rc) { 1188 case TAPE_IO_SUCCESS: 1189 /* Upon normal completion the device _is_ online */ 1190 device->tape_generic_status |= GMT_ONLINE(~0); 1191 __tape_end_request(device, request, rc); 1192 break; 1193 case TAPE_IO_PENDING: 1194 break; 1195 case TAPE_IO_LONG_BUSY: 1196 device->lb_timeout.expires = jiffies + 1197 LONG_BUSY_TIMEOUT * HZ; 1198 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1199 add_timer(&device->lb_timeout); 1200 request->status = TAPE_REQUEST_LONG_BUSY; 1201 break; 1202 case TAPE_IO_RETRY: 1203 rc = __tape_start_io(device, request); 1204 if (rc) 1205 __tape_end_request(device, request, rc); 1206 break; 1207 case TAPE_IO_STOP: 1208 rc = __tape_cancel_io(device, request); 1209 if (rc) 1210 __tape_end_request(device, request, rc); 1211 break; 1212 default: 1213 if (rc > 0) { 1214 DBF_EVENT(6, "xunknownrc\n"); 1215 __tape_end_request(device, request, -EIO); 1216 } else { 1217 __tape_end_request(device, request, rc); 1218 } 1219 break; 1220 } 1221 } 1222 1223 /* 1224 * Tape device open function used by tape_char frontend. 1225 */ 1226 int 1227 tape_open(struct tape_device *device) 1228 { 1229 int rc; 1230 1231 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1232 if (device->tape_state == TS_NOT_OPER) { 1233 DBF_EVENT(6, "TAPE:nodev\n"); 1234 rc = -ENODEV; 1235 } else if (device->tape_state == TS_IN_USE) { 1236 DBF_EVENT(6, "TAPE:dbusy\n"); 1237 rc = -EBUSY; 1238 } else if (device->tape_state == TS_BLKUSE) { 1239 DBF_EVENT(6, "TAPE:dbusy\n"); 1240 rc = -EBUSY; 1241 } else if (device->discipline != NULL && 1242 !try_module_get(device->discipline->owner)) { 1243 DBF_EVENT(6, "TAPE:nodisc\n"); 1244 rc = -ENODEV; 1245 } else { 1246 tape_state_set(device, TS_IN_USE); 1247 rc = 0; 1248 } 1249 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1250 return rc; 1251 } 1252 1253 /* 1254 * Tape device release function used by tape_char frontend. 1255 */ 1256 int 1257 tape_release(struct tape_device *device) 1258 { 1259 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1260 if (device->tape_state == TS_IN_USE) 1261 tape_state_set(device, TS_UNUSED); 1262 module_put(device->discipline->owner); 1263 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1264 return 0; 1265 } 1266 1267 /* 1268 * Execute a magnetic tape command a number of times. 1269 */ 1270 int 1271 tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1272 { 1273 tape_mtop_fn fn; 1274 int rc; 1275 1276 DBF_EVENT(6, "TAPE:mtio\n"); 1277 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1278 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1279 1280 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1281 return -EINVAL; 1282 fn = device->discipline->mtop_array[mt_op]; 1283 if (fn == NULL) 1284 return -EINVAL; 1285 1286 /* We assume that the backends can handle count up to 500. */ 1287 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1288 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1289 rc = 0; 1290 for (; mt_count > 500; mt_count -= 500) 1291 if ((rc = fn(device, 500)) != 0) 1292 break; 1293 if (rc == 0) 1294 rc = fn(device, mt_count); 1295 } else 1296 rc = fn(device, mt_count); 1297 return rc; 1298 1299 } 1300 1301 /* 1302 * Tape init function. 1303 */ 1304 static int 1305 tape_init (void) 1306 { 1307 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1308 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1309 #ifdef DBF_LIKE_HELL 1310 debug_set_level(TAPE_DBF_AREA, 6); 1311 #endif 1312 DBF_EVENT(3, "tape init\n"); 1313 tape_proc_init(); 1314 tape_class_init(); 1315 tapechar_init (); 1316 tape_3490_init(); 1317 return 0; 1318 } 1319 1320 /* 1321 * Tape exit function. 1322 */ 1323 static void 1324 tape_exit(void) 1325 { 1326 DBF_EVENT(6, "tape exit\n"); 1327 1328 /* Get rid of the frontends */ 1329 tape_3490_exit(); 1330 tapechar_exit(); 1331 tape_class_exit(); 1332 tape_proc_cleanup(); 1333 debug_unregister (TAPE_DBF_AREA); 1334 } 1335 1336 MODULE_AUTHOR("IBM Corporation"); 1337 MODULE_DESCRIPTION("s390 channel-attached tape device driver"); 1338 MODULE_LICENSE("GPL"); 1339 1340 module_init(tape_init); 1341 module_exit(tape_exit); 1342 1343 EXPORT_SYMBOL(tape_generic_remove); 1344 EXPORT_SYMBOL(tape_generic_probe); 1345 EXPORT_SYMBOL(tape_generic_online); 1346 EXPORT_SYMBOL(tape_generic_offline); 1347 EXPORT_SYMBOL(tape_put_device); 1348 EXPORT_SYMBOL(tape_get_device); 1349 EXPORT_SYMBOL(tape_state_verbose); 1350 EXPORT_SYMBOL(tape_op_verbose); 1351 EXPORT_SYMBOL(tape_state_set); 1352 EXPORT_SYMBOL(tape_med_state_set); 1353 EXPORT_SYMBOL(tape_alloc_request); 1354 EXPORT_SYMBOL(tape_free_request); 1355 EXPORT_SYMBOL(tape_dump_sense_dbf); 1356 EXPORT_SYMBOL(tape_do_io); 1357 EXPORT_SYMBOL(tape_do_io_async); 1358 EXPORT_SYMBOL(tape_do_io_interruptible); 1359 EXPORT_SYMBOL(tape_cancel_io); 1360 EXPORT_SYMBOL(tape_mtop); 1361