1 /* 2 * drivers/s390/char/tape_core.c 3 * basic function of the tape device driver 4 * 5 * S390 and zSeries version 6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14 #include <linux/config.h> 15 #include <linux/module.h> 16 #include <linux/init.h> // for kernel parameters 17 #include <linux/kmod.h> // for requesting modules 18 #include <linux/spinlock.h> // for locks 19 #include <linux/vmalloc.h> 20 #include <linux/list.h> 21 22 #include <asm/types.h> // for variable types 23 24 #define TAPE_DBF_AREA tape_core_dbf 25 26 #include "tape.h" 27 #include "tape_std.h" 28 29 #define PRINTK_HEADER "TAPE_CORE: " 30 31 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 32 static void tape_delayed_next_request(void * data); 33 34 /* 35 * One list to contain all tape devices of all disciplines, so 36 * we can assign the devices to minor numbers of the same major 37 * The list is protected by the rwlock 38 */ 39 static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list); 40 static DEFINE_RWLOCK(tape_device_lock); 41 42 /* 43 * Pointer to debug area. 44 */ 45 debug_info_t *TAPE_DBF_AREA = NULL; 46 EXPORT_SYMBOL(TAPE_DBF_AREA); 47 48 /* 49 * Printable strings for tape enumerations. 50 */ 51 const char *tape_state_verbose[TS_SIZE] = 52 { 53 [TS_UNUSED] = "UNUSED", 54 [TS_IN_USE] = "IN_USE", 55 [TS_BLKUSE] = "BLKUSE", 56 [TS_INIT] = "INIT ", 57 [TS_NOT_OPER] = "NOT_OP" 58 }; 59 60 const char *tape_op_verbose[TO_SIZE] = 61 { 62 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 63 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 64 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 65 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 66 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 67 [TO_RFO] = "RFO", [TO_REW] = "REW", 68 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 69 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 70 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_READ_ATTMSG] = "RAT", 72 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_UNASSIGN] = "UAS" 74 }; 75 76 static inline int 77 busid_to_int(char *bus_id) 78 { 79 int dec; 80 int d; 81 char * s; 82 83 for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++) 84 d = (d * 10) + (*s - '0'); 85 dec = d; 86 for(s++, d = 0; *s != '\0' && *s != '.'; s++) 87 d = (d * 10) + (*s - '0'); 88 dec = (dec << 8) + d; 89 90 for(s++; *s != '\0'; s++) { 91 if (*s >= '0' && *s <= '9') { 92 d = *s - '0'; 93 } else if (*s >= 'a' && *s <= 'f') { 94 d = *s - 'a' + 10; 95 } else { 96 d = *s - 'A' + 10; 97 } 98 dec = (dec << 4) + d; 99 } 100 101 return dec; 102 } 103 104 /* 105 * Some channel attached tape specific attributes. 106 * 107 * FIXME: In the future the first_minor and blocksize attribute should be 108 * replaced by a link to the cdev tree. 109 */ 110 static ssize_t 111 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 112 { 113 struct tape_device *tdev; 114 115 tdev = (struct tape_device *) dev->driver_data; 116 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 117 } 118 119 static 120 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 121 122 static ssize_t 123 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 124 { 125 struct tape_device *tdev; 126 127 tdev = (struct tape_device *) dev->driver_data; 128 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 129 } 130 131 static 132 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 133 134 static ssize_t 135 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 136 { 137 struct tape_device *tdev; 138 139 tdev = (struct tape_device *) dev->driver_data; 140 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 141 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 142 } 143 144 static 145 DEVICE_ATTR(state, 0444, tape_state_show, NULL); 146 147 static ssize_t 148 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 149 { 150 struct tape_device *tdev; 151 ssize_t rc; 152 153 tdev = (struct tape_device *) dev->driver_data; 154 if (tdev->first_minor < 0) 155 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 156 157 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 158 if (list_empty(&tdev->req_queue)) 159 rc = scnprintf(buf, PAGE_SIZE, "---\n"); 160 else { 161 struct tape_request *req; 162 163 req = list_entry(tdev->req_queue.next, struct tape_request, 164 list); 165 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); 166 } 167 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 168 return rc; 169 } 170 171 static 172 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 173 174 static ssize_t 175 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 176 { 177 struct tape_device *tdev; 178 179 tdev = (struct tape_device *) dev->driver_data; 180 181 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 182 } 183 184 static 185 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 186 187 static struct attribute *tape_attrs[] = { 188 &dev_attr_medium_state.attr, 189 &dev_attr_first_minor.attr, 190 &dev_attr_state.attr, 191 &dev_attr_operation.attr, 192 &dev_attr_blocksize.attr, 193 NULL 194 }; 195 196 static struct attribute_group tape_attr_group = { 197 .attrs = tape_attrs, 198 }; 199 200 /* 201 * Tape state functions 202 */ 203 void 204 tape_state_set(struct tape_device *device, enum tape_state newstate) 205 { 206 const char *str; 207 208 if (device->tape_state == TS_NOT_OPER) { 209 DBF_EVENT(3, "ts_set err: not oper\n"); 210 return; 211 } 212 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 213 DBF_EVENT(4, "old ts:\t\n"); 214 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) 215 str = tape_state_verbose[device->tape_state]; 216 else 217 str = "UNKNOWN TS"; 218 DBF_EVENT(4, "%s\n", str); 219 DBF_EVENT(4, "new ts:\t\n"); 220 if (newstate < TS_SIZE && newstate >= 0) 221 str = tape_state_verbose[newstate]; 222 else 223 str = "UNKNOWN TS"; 224 DBF_EVENT(4, "%s\n", str); 225 device->tape_state = newstate; 226 wake_up(&device->state_change_wq); 227 } 228 229 void 230 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 231 { 232 if (device->medium_state == newstate) 233 return; 234 switch(newstate){ 235 case MS_UNLOADED: 236 device->tape_generic_status |= GMT_DR_OPEN(~0); 237 PRINT_INFO("(%s): Tape is unloaded\n", 238 device->cdev->dev.bus_id); 239 break; 240 case MS_LOADED: 241 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 242 PRINT_INFO("(%s): Tape has been mounted\n", 243 device->cdev->dev.bus_id); 244 break; 245 default: 246 // print nothing 247 break; 248 } 249 device->medium_state = newstate; 250 wake_up(&device->state_change_wq); 251 } 252 253 /* 254 * Stop running ccw. Has to be called with the device lock held. 255 */ 256 static inline int 257 __tape_cancel_io(struct tape_device *device, struct tape_request *request) 258 { 259 int retries; 260 int rc; 261 262 /* Check if interrupt has already been processed */ 263 if (request->callback == NULL) 264 return 0; 265 266 rc = 0; 267 for (retries = 0; retries < 5; retries++) { 268 rc = ccw_device_clear(device->cdev, (long) request); 269 270 switch (rc) { 271 case 0: 272 request->status = TAPE_REQUEST_DONE; 273 return 0; 274 case -EBUSY: 275 request->status = TAPE_REQUEST_CANCEL; 276 schedule_work(&device->tape_dnr); 277 return 0; 278 case -ENODEV: 279 DBF_EXCEPTION(2, "device gone, retry\n"); 280 break; 281 case -EIO: 282 DBF_EXCEPTION(2, "I/O error, retry\n"); 283 break; 284 default: 285 BUG(); 286 } 287 } 288 289 return rc; 290 } 291 292 /* 293 * Add device into the sorted list, giving it the first 294 * available minor number. 295 */ 296 static int 297 tape_assign_minor(struct tape_device *device) 298 { 299 struct tape_device *tmp; 300 int minor; 301 302 minor = 0; 303 write_lock(&tape_device_lock); 304 list_for_each_entry(tmp, &tape_device_list, node) { 305 if (minor < tmp->first_minor) 306 break; 307 minor += TAPE_MINORS_PER_DEV; 308 } 309 if (minor >= 256) { 310 write_unlock(&tape_device_lock); 311 return -ENODEV; 312 } 313 device->first_minor = minor; 314 list_add_tail(&device->node, &tmp->node); 315 write_unlock(&tape_device_lock); 316 return 0; 317 } 318 319 /* remove device from the list */ 320 static void 321 tape_remove_minor(struct tape_device *device) 322 { 323 write_lock(&tape_device_lock); 324 list_del_init(&device->node); 325 device->first_minor = -1; 326 write_unlock(&tape_device_lock); 327 } 328 329 /* 330 * Set a device online. 331 * 332 * This function is called by the common I/O layer to move a device from the 333 * detected but offline into the online state. 334 * If we return an error (RC < 0) the device remains in the offline state. This 335 * can happen if the device is assigned somewhere else, for example. 336 */ 337 int 338 tape_generic_online(struct tape_device *device, 339 struct tape_discipline *discipline) 340 { 341 int rc; 342 343 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 344 345 if (device->tape_state != TS_INIT) { 346 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 347 return -EINVAL; 348 } 349 350 /* Let the discipline have a go at the device. */ 351 device->discipline = discipline; 352 if (!try_module_get(discipline->owner)) { 353 PRINT_ERR("Cannot get module. Module gone.\n"); 354 return -EINVAL; 355 } 356 357 rc = discipline->setup_device(device); 358 if (rc) 359 goto out; 360 rc = tape_assign_minor(device); 361 if (rc) 362 goto out_discipline; 363 364 rc = tapechar_setup_device(device); 365 if (rc) 366 goto out_minor; 367 rc = tapeblock_setup_device(device); 368 if (rc) 369 goto out_char; 370 371 tape_state_set(device, TS_UNUSED); 372 373 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 374 375 return 0; 376 377 out_char: 378 tapechar_cleanup_device(device); 379 out_discipline: 380 device->discipline->cleanup_device(device); 381 device->discipline = NULL; 382 out_minor: 383 tape_remove_minor(device); 384 out: 385 module_put(discipline->owner); 386 return rc; 387 } 388 389 static inline void 390 tape_cleanup_device(struct tape_device *device) 391 { 392 tapeblock_cleanup_device(device); 393 tapechar_cleanup_device(device); 394 device->discipline->cleanup_device(device); 395 module_put(device->discipline->owner); 396 tape_remove_minor(device); 397 tape_med_state_set(device, MS_UNKNOWN); 398 } 399 400 /* 401 * Set device offline. 402 * 403 * Called by the common I/O layer if the drive should set offline on user 404 * request. We may prevent this by returning an error. 405 * Manual offline is only allowed while the drive is not in use. 406 */ 407 int 408 tape_generic_offline(struct tape_device *device) 409 { 410 if (!device) { 411 PRINT_ERR("tape_generic_offline: no such device\n"); 412 return -ENODEV; 413 } 414 415 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 416 device->cdev_id, device); 417 418 spin_lock_irq(get_ccwdev_lock(device->cdev)); 419 switch (device->tape_state) { 420 case TS_INIT: 421 case TS_NOT_OPER: 422 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 423 break; 424 case TS_UNUSED: 425 tape_state_set(device, TS_INIT); 426 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 427 tape_cleanup_device(device); 428 break; 429 default: 430 DBF_EVENT(3, "(%08x): Set offline failed " 431 "- drive in use.\n", 432 device->cdev_id); 433 PRINT_WARN("(%s): Set offline failed " 434 "- drive in use.\n", 435 device->cdev->dev.bus_id); 436 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 437 return -EBUSY; 438 } 439 440 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 441 return 0; 442 } 443 444 /* 445 * Allocate memory for a new device structure. 446 */ 447 static struct tape_device * 448 tape_alloc_device(void) 449 { 450 struct tape_device *device; 451 452 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); 453 if (device == NULL) { 454 DBF_EXCEPTION(2, "ti:no mem\n"); 455 PRINT_INFO ("can't allocate memory for " 456 "tape info structure\n"); 457 return ERR_PTR(-ENOMEM); 458 } 459 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); 460 if (device->modeset_byte == NULL) { 461 DBF_EXCEPTION(2, "ti:no mem\n"); 462 PRINT_INFO("can't allocate memory for modeset byte\n"); 463 kfree(device); 464 return ERR_PTR(-ENOMEM); 465 } 466 INIT_LIST_HEAD(&device->req_queue); 467 INIT_LIST_HEAD(&device->node); 468 init_waitqueue_head(&device->state_change_wq); 469 device->tape_state = TS_INIT; 470 device->medium_state = MS_UNKNOWN; 471 *device->modeset_byte = 0; 472 device->first_minor = -1; 473 atomic_set(&device->ref_count, 1); 474 INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); 475 476 return device; 477 } 478 479 /* 480 * Get a reference to an existing device structure. This will automatically 481 * increment the reference count. 482 */ 483 struct tape_device * 484 tape_get_device_reference(struct tape_device *device) 485 { 486 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device, 487 atomic_inc_return(&device->ref_count)); 488 489 return device; 490 } 491 492 /* 493 * Decrease the reference counter of a devices structure. If the 494 * reference counter reaches zero free the device structure. 495 * The function returns a NULL pointer to be used by the caller 496 * for clearing reference pointers. 497 */ 498 struct tape_device * 499 tape_put_device(struct tape_device *device) 500 { 501 int remain; 502 503 remain = atomic_dec_return(&device->ref_count); 504 if (remain > 0) { 505 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain); 506 } else { 507 if (remain < 0) { 508 DBF_EVENT(4, "put device without reference\n"); 509 PRINT_ERR("put device without reference\n"); 510 } else { 511 DBF_EVENT(4, "tape_free_device(%p)\n", device); 512 kfree(device->modeset_byte); 513 kfree(device); 514 } 515 } 516 517 return NULL; 518 } 519 520 /* 521 * Find tape device by a device index. 522 */ 523 struct tape_device * 524 tape_get_device(int devindex) 525 { 526 struct tape_device *device, *tmp; 527 528 device = ERR_PTR(-ENODEV); 529 read_lock(&tape_device_lock); 530 list_for_each_entry(tmp, &tape_device_list, node) { 531 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 532 device = tape_get_device_reference(tmp); 533 break; 534 } 535 } 536 read_unlock(&tape_device_lock); 537 return device; 538 } 539 540 /* 541 * Driverfs tape probe function. 542 */ 543 int 544 tape_generic_probe(struct ccw_device *cdev) 545 { 546 struct tape_device *device; 547 548 device = tape_alloc_device(); 549 if (IS_ERR(device)) 550 return -ENODEV; 551 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 552 cdev->dev.driver_data = device; 553 device->cdev = cdev; 554 device->cdev_id = busid_to_int(cdev->dev.bus_id); 555 cdev->handler = __tape_do_irq; 556 557 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 558 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 559 560 return 0; 561 } 562 563 static inline void 564 __tape_discard_requests(struct tape_device *device) 565 { 566 struct tape_request * request; 567 struct list_head * l, *n; 568 569 list_for_each_safe(l, n, &device->req_queue) { 570 request = list_entry(l, struct tape_request, list); 571 if (request->status == TAPE_REQUEST_IN_IO) 572 request->status = TAPE_REQUEST_DONE; 573 list_del(&request->list); 574 575 /* Decrease ref_count for removed request. */ 576 request->device = tape_put_device(device); 577 request->rc = -EIO; 578 if (request->callback != NULL) 579 request->callback(request, request->callback_data); 580 } 581 } 582 583 /* 584 * Driverfs tape remove function. 585 * 586 * This function is called whenever the common I/O layer detects the device 587 * gone. This can happen at any time and we cannot refuse. 588 */ 589 void 590 tape_generic_remove(struct ccw_device *cdev) 591 { 592 struct tape_device * device; 593 594 device = cdev->dev.driver_data; 595 if (!device) { 596 PRINT_ERR("No device pointer in tape_generic_remove!\n"); 597 return; 598 } 599 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 600 601 spin_lock_irq(get_ccwdev_lock(device->cdev)); 602 switch (device->tape_state) { 603 case TS_INIT: 604 tape_state_set(device, TS_NOT_OPER); 605 case TS_NOT_OPER: 606 /* 607 * Nothing to do. 608 */ 609 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 610 break; 611 case TS_UNUSED: 612 /* 613 * Need only to release the device. 614 */ 615 tape_state_set(device, TS_NOT_OPER); 616 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 617 tape_cleanup_device(device); 618 break; 619 default: 620 /* 621 * There may be requests on the queue. We will not get 622 * an interrupt for a request that was running. So we 623 * just post them all as I/O errors. 624 */ 625 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 626 device->cdev_id); 627 PRINT_WARN("(%s): Drive in use vanished - " 628 "expect trouble!\n", 629 device->cdev->dev.bus_id); 630 PRINT_WARN("State was %i\n", device->tape_state); 631 tape_state_set(device, TS_NOT_OPER); 632 __tape_discard_requests(device); 633 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 634 tape_cleanup_device(device); 635 } 636 637 if (cdev->dev.driver_data != NULL) { 638 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 639 cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data); 640 } 641 } 642 643 /* 644 * Allocate a new tape ccw request 645 */ 646 struct tape_request * 647 tape_alloc_request(int cplength, int datasize) 648 { 649 struct tape_request *request; 650 651 if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE) 652 BUG(); 653 654 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 655 656 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); 657 if (request == NULL) { 658 DBF_EXCEPTION(1, "cqra nomem\n"); 659 return ERR_PTR(-ENOMEM); 660 } 661 /* allocate channel program */ 662 if (cplength > 0) { 663 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 664 GFP_ATOMIC | GFP_DMA); 665 if (request->cpaddr == NULL) { 666 DBF_EXCEPTION(1, "cqra nomem\n"); 667 kfree(request); 668 return ERR_PTR(-ENOMEM); 669 } 670 } 671 /* alloc small kernel buffer */ 672 if (datasize > 0) { 673 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); 674 if (request->cpdata == NULL) { 675 DBF_EXCEPTION(1, "cqra nomem\n"); 676 kfree(request->cpaddr); 677 kfree(request); 678 return ERR_PTR(-ENOMEM); 679 } 680 } 681 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 682 request->cpdata); 683 684 return request; 685 } 686 687 /* 688 * Free tape ccw request 689 */ 690 void 691 tape_free_request (struct tape_request * request) 692 { 693 DBF_LH(6, "Free request %p\n", request); 694 695 if (request->device != NULL) { 696 request->device = tape_put_device(request->device); 697 } 698 kfree(request->cpdata); 699 kfree(request->cpaddr); 700 kfree(request); 701 } 702 703 static inline int 704 __tape_start_io(struct tape_device *device, struct tape_request *request) 705 { 706 int rc; 707 708 #ifdef CONFIG_S390_TAPE_BLOCK 709 if (request->op == TO_BLOCK) 710 device->discipline->check_locate(device, request); 711 #endif 712 rc = ccw_device_start( 713 device->cdev, 714 request->cpaddr, 715 (unsigned long) request, 716 0x00, 717 request->options 718 ); 719 if (rc == 0) { 720 request->status = TAPE_REQUEST_IN_IO; 721 } else if (rc == -EBUSY) { 722 /* The common I/O subsystem is currently busy. Retry later. */ 723 request->status = TAPE_REQUEST_QUEUED; 724 schedule_work(&device->tape_dnr); 725 rc = 0; 726 } else { 727 /* Start failed. Remove request and indicate failure. */ 728 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 729 } 730 return rc; 731 } 732 733 static inline void 734 __tape_start_next_request(struct tape_device *device) 735 { 736 struct list_head *l, *n; 737 struct tape_request *request; 738 int rc; 739 740 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 741 /* 742 * Try to start each request on request queue until one is 743 * started successful. 744 */ 745 list_for_each_safe(l, n, &device->req_queue) { 746 request = list_entry(l, struct tape_request, list); 747 748 /* 749 * Avoid race condition if bottom-half was triggered more than 750 * once. 751 */ 752 if (request->status == TAPE_REQUEST_IN_IO) 753 return; 754 /* 755 * Request has already been stopped. We have to wait until 756 * the request is removed from the queue in the interrupt 757 * handling. 758 */ 759 if (request->status == TAPE_REQUEST_DONE) 760 return; 761 762 /* 763 * We wanted to cancel the request but the common I/O layer 764 * was busy at that time. This can only happen if this 765 * function is called by delayed_next_request. 766 * Otherwise we start the next request on the queue. 767 */ 768 if (request->status == TAPE_REQUEST_CANCEL) { 769 rc = __tape_cancel_io(device, request); 770 } else { 771 rc = __tape_start_io(device, request); 772 } 773 if (rc == 0) 774 return; 775 776 /* Set ending status. */ 777 request->rc = rc; 778 request->status = TAPE_REQUEST_DONE; 779 780 /* Remove from request queue. */ 781 list_del(&request->list); 782 783 /* Do callback. */ 784 if (request->callback != NULL) 785 request->callback(request, request->callback_data); 786 } 787 } 788 789 static void 790 tape_delayed_next_request(void *data) 791 { 792 struct tape_device * device; 793 794 device = (struct tape_device *) data; 795 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 796 spin_lock_irq(get_ccwdev_lock(device->cdev)); 797 __tape_start_next_request(device); 798 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 799 } 800 801 static inline void 802 __tape_end_request( 803 struct tape_device * device, 804 struct tape_request * request, 805 int rc) 806 { 807 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 808 if (request) { 809 request->rc = rc; 810 request->status = TAPE_REQUEST_DONE; 811 812 /* Remove from request queue. */ 813 list_del(&request->list); 814 815 /* Do callback. */ 816 if (request->callback != NULL) 817 request->callback(request, request->callback_data); 818 } 819 820 /* Start next request. */ 821 if (!list_empty(&device->req_queue)) 822 __tape_start_next_request(device); 823 } 824 825 /* 826 * Write sense data to console/dbf 827 */ 828 void 829 tape_dump_sense(struct tape_device* device, struct tape_request *request, 830 struct irb *irb) 831 { 832 unsigned int *sptr; 833 834 PRINT_INFO("-------------------------------------------------\n"); 835 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", 836 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); 837 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); 838 if (request != NULL) 839 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); 840 841 sptr = (unsigned int *) irb->ecw; 842 PRINT_INFO("Sense data: %08X %08X %08X %08X \n", 843 sptr[0], sptr[1], sptr[2], sptr[3]); 844 PRINT_INFO("Sense data: %08X %08X %08X %08X \n", 845 sptr[4], sptr[5], sptr[6], sptr[7]); 846 PRINT_INFO("--------------------------------------------------\n"); 847 } 848 849 /* 850 * Write sense data to dbf 851 */ 852 void 853 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 854 struct irb *irb) 855 { 856 unsigned int *sptr; 857 const char* op; 858 859 if (request != NULL) 860 op = tape_op_verbose[request->op]; 861 else 862 op = "---"; 863 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 864 irb->scsw.dstat,irb->scsw.cstat); 865 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 866 sptr = (unsigned int *) irb->ecw; 867 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 868 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 869 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 870 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 871 } 872 873 /* 874 * I/O helper function. Adds the request to the request queue 875 * and starts it if the tape is idle. Has to be called with 876 * the device lock held. 877 */ 878 static inline int 879 __tape_start_request(struct tape_device *device, struct tape_request *request) 880 { 881 int rc; 882 883 switch (request->op) { 884 case TO_MSEN: 885 case TO_ASSIGN: 886 case TO_UNASSIGN: 887 case TO_READ_ATTMSG: 888 if (device->tape_state == TS_INIT) 889 break; 890 if (device->tape_state == TS_UNUSED) 891 break; 892 default: 893 if (device->tape_state == TS_BLKUSE) 894 break; 895 if (device->tape_state != TS_IN_USE) 896 return -ENODEV; 897 } 898 899 /* Increase use count of device for the added request. */ 900 request->device = tape_get_device_reference(device); 901 902 if (list_empty(&device->req_queue)) { 903 /* No other requests are on the queue. Start this one. */ 904 rc = __tape_start_io(device, request); 905 if (rc) 906 return rc; 907 908 DBF_LH(5, "Request %p added for execution.\n", request); 909 list_add(&request->list, &device->req_queue); 910 } else { 911 DBF_LH(5, "Request %p add to queue.\n", request); 912 request->status = TAPE_REQUEST_QUEUED; 913 list_add_tail(&request->list, &device->req_queue); 914 } 915 return 0; 916 } 917 918 /* 919 * Add the request to the request queue, try to start it if the 920 * tape is idle. Return without waiting for end of i/o. 921 */ 922 int 923 tape_do_io_async(struct tape_device *device, struct tape_request *request) 924 { 925 int rc; 926 927 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 928 929 spin_lock_irq(get_ccwdev_lock(device->cdev)); 930 /* Add request to request queue and try to start it. */ 931 rc = __tape_start_request(device, request); 932 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 933 return rc; 934 } 935 936 /* 937 * tape_do_io/__tape_wake_up 938 * Add the request to the request queue, try to start it if the 939 * tape is idle and wait uninterruptible for its completion. 940 */ 941 static void 942 __tape_wake_up(struct tape_request *request, void *data) 943 { 944 request->callback = NULL; 945 wake_up((wait_queue_head_t *) data); 946 } 947 948 int 949 tape_do_io(struct tape_device *device, struct tape_request *request) 950 { 951 wait_queue_head_t wq; 952 int rc; 953 954 init_waitqueue_head(&wq); 955 spin_lock_irq(get_ccwdev_lock(device->cdev)); 956 /* Setup callback */ 957 request->callback = __tape_wake_up; 958 request->callback_data = &wq; 959 /* Add request to request queue and try to start it. */ 960 rc = __tape_start_request(device, request); 961 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 962 if (rc) 963 return rc; 964 /* Request added to the queue. Wait for its completion. */ 965 wait_event(wq, (request->callback == NULL)); 966 /* Get rc from request */ 967 return request->rc; 968 } 969 970 /* 971 * tape_do_io_interruptible/__tape_wake_up_interruptible 972 * Add the request to the request queue, try to start it if the 973 * tape is idle and wait uninterruptible for its completion. 974 */ 975 static void 976 __tape_wake_up_interruptible(struct tape_request *request, void *data) 977 { 978 request->callback = NULL; 979 wake_up_interruptible((wait_queue_head_t *) data); 980 } 981 982 int 983 tape_do_io_interruptible(struct tape_device *device, 984 struct tape_request *request) 985 { 986 wait_queue_head_t wq; 987 int rc; 988 989 init_waitqueue_head(&wq); 990 spin_lock_irq(get_ccwdev_lock(device->cdev)); 991 /* Setup callback */ 992 request->callback = __tape_wake_up_interruptible; 993 request->callback_data = &wq; 994 rc = __tape_start_request(device, request); 995 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 996 if (rc) 997 return rc; 998 /* Request added to the queue. Wait for its completion. */ 999 rc = wait_event_interruptible(wq, (request->callback == NULL)); 1000 if (rc != -ERESTARTSYS) 1001 /* Request finished normally. */ 1002 return request->rc; 1003 1004 /* Interrupted by a signal. We have to stop the current request. */ 1005 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1006 rc = __tape_cancel_io(device, request); 1007 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1008 if (rc == 0) { 1009 /* Wait for the interrupt that acknowledges the halt. */ 1010 do { 1011 rc = wait_event_interruptible( 1012 wq, 1013 (request->callback == NULL) 1014 ); 1015 } while (rc == -ERESTARTSYS); 1016 1017 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1018 rc = -ERESTARTSYS; 1019 } 1020 return rc; 1021 } 1022 1023 /* 1024 * Stop running ccw. 1025 */ 1026 int 1027 tape_cancel_io(struct tape_device *device, struct tape_request *request) 1028 { 1029 int rc; 1030 1031 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1032 rc = __tape_cancel_io(device, request); 1033 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1034 return rc; 1035 } 1036 1037 /* 1038 * Tape interrupt routine, called from the ccw_device layer 1039 */ 1040 static void 1041 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1042 { 1043 struct tape_device *device; 1044 struct tape_request *request; 1045 int rc; 1046 1047 device = (struct tape_device *) cdev->dev.driver_data; 1048 if (device == NULL) { 1049 PRINT_ERR("could not get device structure for %s " 1050 "in interrupt\n", cdev->dev.bus_id); 1051 return; 1052 } 1053 request = (struct tape_request *) intparm; 1054 1055 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1056 1057 /* On special conditions irb is an error pointer */ 1058 if (IS_ERR(irb)) { 1059 /* FIXME: What to do with the request? */ 1060 switch (PTR_ERR(irb)) { 1061 case -ETIMEDOUT: 1062 PRINT_WARN("(%s): Request timed out\n", 1063 cdev->dev.bus_id); 1064 case -EIO: 1065 __tape_end_request(device, request, -EIO); 1066 break; 1067 default: 1068 PRINT_ERR("(%s): Unexpected i/o error %li\n", 1069 cdev->dev.bus_id, 1070 PTR_ERR(irb)); 1071 } 1072 return; 1073 } 1074 1075 /* 1076 * If the condition code is not zero and the start function bit is 1077 * still set, this is an deferred error and the last start I/O did 1078 * not succeed. At this point the condition that caused the deferred 1079 * error might still apply. So we just schedule the request to be 1080 * started later. 1081 */ 1082 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1083 (request->status == TAPE_REQUEST_IN_IO)) { 1084 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1085 device->cdev_id, irb->scsw.cc, irb->scsw.fctl); 1086 request->status = TAPE_REQUEST_QUEUED; 1087 schedule_delayed_work(&device->tape_dnr, HZ); 1088 return; 1089 } 1090 1091 /* May be an unsolicited irq */ 1092 if(request != NULL) 1093 request->rescnt = irb->scsw.count; 1094 1095 if (irb->scsw.dstat != 0x0c) { 1096 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1097 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1098 device->tape_generic_status |= GMT_ONLINE(~0); 1099 else 1100 device->tape_generic_status &= ~GMT_ONLINE(~0); 1101 1102 /* 1103 * Any request that does not come back with channel end 1104 * and device end is unusual. Log the sense data. 1105 */ 1106 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1107 tape_dump_sense_dbf(device, request, irb); 1108 } else { 1109 /* Upon normal completion the device _is_ online */ 1110 device->tape_generic_status |= GMT_ONLINE(~0); 1111 } 1112 if (device->tape_state == TS_NOT_OPER) { 1113 DBF_EVENT(6, "tape:device is not operational\n"); 1114 return; 1115 } 1116 1117 /* 1118 * Request that were canceled still come back with an interrupt. 1119 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1120 */ 1121 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1122 __tape_end_request(device, request, -EIO); 1123 return; 1124 } 1125 1126 rc = device->discipline->irq(device, request, irb); 1127 /* 1128 * rc < 0 : request finished unsuccessfully. 1129 * rc == TAPE_IO_SUCCESS: request finished successfully. 1130 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1131 * rc == TAPE_IO_RETRY: request finished but needs another go. 1132 * rc == TAPE_IO_STOP: request needs to get terminated. 1133 */ 1134 switch (rc) { 1135 case TAPE_IO_SUCCESS: 1136 /* Upon normal completion the device _is_ online */ 1137 device->tape_generic_status |= GMT_ONLINE(~0); 1138 __tape_end_request(device, request, rc); 1139 break; 1140 case TAPE_IO_PENDING: 1141 break; 1142 case TAPE_IO_RETRY: 1143 rc = __tape_start_io(device, request); 1144 if (rc) 1145 __tape_end_request(device, request, rc); 1146 break; 1147 case TAPE_IO_STOP: 1148 rc = __tape_cancel_io(device, request); 1149 if (rc) 1150 __tape_end_request(device, request, rc); 1151 break; 1152 default: 1153 if (rc > 0) { 1154 DBF_EVENT(6, "xunknownrc\n"); 1155 PRINT_ERR("Invalid return code from discipline " 1156 "interrupt function.\n"); 1157 __tape_end_request(device, request, -EIO); 1158 } else { 1159 __tape_end_request(device, request, rc); 1160 } 1161 break; 1162 } 1163 } 1164 1165 /* 1166 * Tape device open function used by tape_char & tape_block frontends. 1167 */ 1168 int 1169 tape_open(struct tape_device *device) 1170 { 1171 int rc; 1172 1173 spin_lock(get_ccwdev_lock(device->cdev)); 1174 if (device->tape_state == TS_NOT_OPER) { 1175 DBF_EVENT(6, "TAPE:nodev\n"); 1176 rc = -ENODEV; 1177 } else if (device->tape_state == TS_IN_USE) { 1178 DBF_EVENT(6, "TAPE:dbusy\n"); 1179 rc = -EBUSY; 1180 } else if (device->tape_state == TS_BLKUSE) { 1181 DBF_EVENT(6, "TAPE:dbusy\n"); 1182 rc = -EBUSY; 1183 } else if (device->discipline != NULL && 1184 !try_module_get(device->discipline->owner)) { 1185 DBF_EVENT(6, "TAPE:nodisc\n"); 1186 rc = -ENODEV; 1187 } else { 1188 tape_state_set(device, TS_IN_USE); 1189 rc = 0; 1190 } 1191 spin_unlock(get_ccwdev_lock(device->cdev)); 1192 return rc; 1193 } 1194 1195 /* 1196 * Tape device release function used by tape_char & tape_block frontends. 1197 */ 1198 int 1199 tape_release(struct tape_device *device) 1200 { 1201 spin_lock(get_ccwdev_lock(device->cdev)); 1202 if (device->tape_state == TS_IN_USE) 1203 tape_state_set(device, TS_UNUSED); 1204 module_put(device->discipline->owner); 1205 spin_unlock(get_ccwdev_lock(device->cdev)); 1206 return 0; 1207 } 1208 1209 /* 1210 * Execute a magnetic tape command a number of times. 1211 */ 1212 int 1213 tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1214 { 1215 tape_mtop_fn fn; 1216 int rc; 1217 1218 DBF_EVENT(6, "TAPE:mtio\n"); 1219 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1220 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1221 1222 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1223 return -EINVAL; 1224 fn = device->discipline->mtop_array[mt_op]; 1225 if (fn == NULL) 1226 return -EINVAL; 1227 1228 /* We assume that the backends can handle count up to 500. */ 1229 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1230 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1231 rc = 0; 1232 for (; mt_count > 500; mt_count -= 500) 1233 if ((rc = fn(device, 500)) != 0) 1234 break; 1235 if (rc == 0) 1236 rc = fn(device, mt_count); 1237 } else 1238 rc = fn(device, mt_count); 1239 return rc; 1240 1241 } 1242 1243 /* 1244 * Tape init function. 1245 */ 1246 static int 1247 tape_init (void) 1248 { 1249 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1250 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1251 #ifdef DBF_LIKE_HELL 1252 debug_set_level(TAPE_DBF_AREA, 6); 1253 #endif 1254 DBF_EVENT(3, "tape init\n"); 1255 tape_proc_init(); 1256 tapechar_init (); 1257 tapeblock_init (); 1258 return 0; 1259 } 1260 1261 /* 1262 * Tape exit function. 1263 */ 1264 static void 1265 tape_exit(void) 1266 { 1267 DBF_EVENT(6, "tape exit\n"); 1268 1269 /* Get rid of the frontends */ 1270 tapechar_exit(); 1271 tapeblock_exit(); 1272 tape_proc_cleanup(); 1273 debug_unregister (TAPE_DBF_AREA); 1274 } 1275 1276 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1277 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1278 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver"); 1279 MODULE_LICENSE("GPL"); 1280 1281 module_init(tape_init); 1282 module_exit(tape_exit); 1283 1284 EXPORT_SYMBOL(tape_generic_remove); 1285 EXPORT_SYMBOL(tape_generic_probe); 1286 EXPORT_SYMBOL(tape_generic_online); 1287 EXPORT_SYMBOL(tape_generic_offline); 1288 EXPORT_SYMBOL(tape_put_device); 1289 EXPORT_SYMBOL(tape_get_device_reference); 1290 EXPORT_SYMBOL(tape_state_verbose); 1291 EXPORT_SYMBOL(tape_op_verbose); 1292 EXPORT_SYMBOL(tape_state_set); 1293 EXPORT_SYMBOL(tape_med_state_set); 1294 EXPORT_SYMBOL(tape_alloc_request); 1295 EXPORT_SYMBOL(tape_free_request); 1296 EXPORT_SYMBOL(tape_dump_sense); 1297 EXPORT_SYMBOL(tape_dump_sense_dbf); 1298 EXPORT_SYMBOL(tape_do_io); 1299 EXPORT_SYMBOL(tape_do_io_async); 1300 EXPORT_SYMBOL(tape_do_io_interruptible); 1301 EXPORT_SYMBOL(tape_cancel_io); 1302 EXPORT_SYMBOL(tape_mtop); 1303