1 /* 2 * drivers/s390/char/tape_core.c 3 * basic function of the tape device driver 4 * 5 * S390 and zSeries version 6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14 #include <linux/config.h> 15 #include <linux/module.h> 16 #include <linux/init.h> // for kernel parameters 17 #include <linux/kmod.h> // for requesting modules 18 #include <linux/spinlock.h> // for locks 19 #include <linux/vmalloc.h> 20 #include <linux/list.h> 21 22 #include <asm/types.h> // for variable types 23 24 #define TAPE_DBF_AREA tape_core_dbf 25 26 #include "tape.h" 27 #include "tape_std.h" 28 29 #define PRINTK_HEADER "TAPE_CORE: " 30 31 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 32 static void tape_delayed_next_request(void * data); 33 34 /* 35 * One list to contain all tape devices of all disciplines, so 36 * we can assign the devices to minor numbers of the same major 37 * The list is protected by the rwlock 38 */ 39 static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list); 40 static DEFINE_RWLOCK(tape_device_lock); 41 42 /* 43 * Pointer to debug area. 44 */ 45 debug_info_t *TAPE_DBF_AREA = NULL; 46 EXPORT_SYMBOL(TAPE_DBF_AREA); 47 48 /* 49 * Printable strings for tape enumerations. 50 */ 51 const char *tape_state_verbose[TS_SIZE] = 52 { 53 [TS_UNUSED] = "UNUSED", 54 [TS_IN_USE] = "IN_USE", 55 [TS_BLKUSE] = "BLKUSE", 56 [TS_INIT] = "INIT ", 57 [TS_NOT_OPER] = "NOT_OP" 58 }; 59 60 const char *tape_op_verbose[TO_SIZE] = 61 { 62 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 63 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 64 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 65 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 66 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 67 [TO_RFO] = "RFO", [TO_REW] = "REW", 68 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 69 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 70 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_READ_ATTMSG] = "RAT", 72 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_UNASSIGN] = "UAS" 74 }; 75 76 static inline int 77 busid_to_int(char *bus_id) 78 { 79 int dec; 80 int d; 81 char * s; 82 83 for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++) 84 d = (d * 10) + (*s - '0'); 85 dec = d; 86 for(s++, d = 0; *s != '\0' && *s != '.'; s++) 87 d = (d * 10) + (*s - '0'); 88 dec = (dec << 8) + d; 89 90 for(s++; *s != '\0'; s++) { 91 if (*s >= '0' && *s <= '9') { 92 d = *s - '0'; 93 } else if (*s >= 'a' && *s <= 'f') { 94 d = *s - 'a' + 10; 95 } else { 96 d = *s - 'A' + 10; 97 } 98 dec = (dec << 4) + d; 99 } 100 101 return dec; 102 } 103 104 /* 105 * Some channel attached tape specific attributes. 106 * 107 * FIXME: In the future the first_minor and blocksize attribute should be 108 * replaced by a link to the cdev tree. 109 */ 110 static ssize_t 111 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 112 { 113 struct tape_device *tdev; 114 115 tdev = (struct tape_device *) dev->driver_data; 116 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 117 } 118 119 static 120 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 121 122 static ssize_t 123 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 124 { 125 struct tape_device *tdev; 126 127 tdev = (struct tape_device *) dev->driver_data; 128 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 129 } 130 131 static 132 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 133 134 static ssize_t 135 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 136 { 137 struct tape_device *tdev; 138 139 tdev = (struct tape_device *) dev->driver_data; 140 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 141 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 142 } 143 144 static 145 DEVICE_ATTR(state, 0444, tape_state_show, NULL); 146 147 static ssize_t 148 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 149 { 150 struct tape_device *tdev; 151 ssize_t rc; 152 153 tdev = (struct tape_device *) dev->driver_data; 154 if (tdev->first_minor < 0) 155 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 156 157 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 158 if (list_empty(&tdev->req_queue)) 159 rc = scnprintf(buf, PAGE_SIZE, "---\n"); 160 else { 161 struct tape_request *req; 162 163 req = list_entry(tdev->req_queue.next, struct tape_request, 164 list); 165 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); 166 } 167 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 168 return rc; 169 } 170 171 static 172 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 173 174 static ssize_t 175 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 176 { 177 struct tape_device *tdev; 178 179 tdev = (struct tape_device *) dev->driver_data; 180 181 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 182 } 183 184 static 185 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 186 187 static struct attribute *tape_attrs[] = { 188 &dev_attr_medium_state.attr, 189 &dev_attr_first_minor.attr, 190 &dev_attr_state.attr, 191 &dev_attr_operation.attr, 192 &dev_attr_blocksize.attr, 193 NULL 194 }; 195 196 static struct attribute_group tape_attr_group = { 197 .attrs = tape_attrs, 198 }; 199 200 /* 201 * Tape state functions 202 */ 203 void 204 tape_state_set(struct tape_device *device, enum tape_state newstate) 205 { 206 const char *str; 207 208 if (device->tape_state == TS_NOT_OPER) { 209 DBF_EVENT(3, "ts_set err: not oper\n"); 210 return; 211 } 212 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 213 if (device->tape_state < TO_SIZE && device->tape_state >= 0) 214 str = tape_state_verbose[device->tape_state]; 215 else 216 str = "UNKNOWN TS"; 217 DBF_EVENT(4, "old ts: %s\n", str); 218 if (device->tape_state < TO_SIZE && device->tape_state >=0 ) 219 str = tape_state_verbose[device->tape_state]; 220 else 221 str = "UNKNOWN TS"; 222 DBF_EVENT(4, "%s\n", str); 223 DBF_EVENT(4, "new ts:\t\n"); 224 if (newstate < TO_SIZE && newstate >= 0) 225 str = tape_state_verbose[newstate]; 226 else 227 str = "UNKNOWN TS"; 228 DBF_EVENT(4, "%s\n", str); 229 device->tape_state = newstate; 230 wake_up(&device->state_change_wq); 231 } 232 233 void 234 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 235 { 236 if (device->medium_state == newstate) 237 return; 238 switch(newstate){ 239 case MS_UNLOADED: 240 device->tape_generic_status |= GMT_DR_OPEN(~0); 241 PRINT_INFO("(%s): Tape is unloaded\n", 242 device->cdev->dev.bus_id); 243 break; 244 case MS_LOADED: 245 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 246 PRINT_INFO("(%s): Tape has been mounted\n", 247 device->cdev->dev.bus_id); 248 break; 249 default: 250 // print nothing 251 break; 252 } 253 device->medium_state = newstate; 254 wake_up(&device->state_change_wq); 255 } 256 257 /* 258 * Stop running ccw. Has to be called with the device lock held. 259 */ 260 static inline int 261 __tape_cancel_io(struct tape_device *device, struct tape_request *request) 262 { 263 int retries; 264 int rc; 265 266 /* Check if interrupt has already been processed */ 267 if (request->callback == NULL) 268 return 0; 269 270 rc = 0; 271 for (retries = 0; retries < 5; retries++) { 272 rc = ccw_device_clear(device->cdev, (long) request); 273 274 switch (rc) { 275 case 0: 276 request->status = TAPE_REQUEST_DONE; 277 return 0; 278 case -EBUSY: 279 request->status = TAPE_REQUEST_CANCEL; 280 schedule_work(&device->tape_dnr); 281 return 0; 282 case -ENODEV: 283 DBF_EXCEPTION(2, "device gone, retry\n"); 284 break; 285 case -EIO: 286 DBF_EXCEPTION(2, "I/O error, retry\n"); 287 break; 288 default: 289 BUG(); 290 } 291 } 292 293 return rc; 294 } 295 296 /* 297 * Add device into the sorted list, giving it the first 298 * available minor number. 299 */ 300 static int 301 tape_assign_minor(struct tape_device *device) 302 { 303 struct tape_device *tmp; 304 int minor; 305 306 minor = 0; 307 write_lock(&tape_device_lock); 308 list_for_each_entry(tmp, &tape_device_list, node) { 309 if (minor < tmp->first_minor) 310 break; 311 minor += TAPE_MINORS_PER_DEV; 312 } 313 if (minor >= 256) { 314 write_unlock(&tape_device_lock); 315 return -ENODEV; 316 } 317 device->first_minor = minor; 318 list_add_tail(&device->node, &tmp->node); 319 write_unlock(&tape_device_lock); 320 return 0; 321 } 322 323 /* remove device from the list */ 324 static void 325 tape_remove_minor(struct tape_device *device) 326 { 327 write_lock(&tape_device_lock); 328 list_del_init(&device->node); 329 device->first_minor = -1; 330 write_unlock(&tape_device_lock); 331 } 332 333 /* 334 * Set a device online. 335 * 336 * This function is called by the common I/O layer to move a device from the 337 * detected but offline into the online state. 338 * If we return an error (RC < 0) the device remains in the offline state. This 339 * can happen if the device is assigned somewhere else, for example. 340 */ 341 int 342 tape_generic_online(struct tape_device *device, 343 struct tape_discipline *discipline) 344 { 345 int rc; 346 347 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 348 349 if (device->tape_state != TS_INIT) { 350 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 351 return -EINVAL; 352 } 353 354 /* Let the discipline have a go at the device. */ 355 device->discipline = discipline; 356 if (!try_module_get(discipline->owner)) { 357 PRINT_ERR("Cannot get module. Module gone.\n"); 358 return -EINVAL; 359 } 360 361 rc = discipline->setup_device(device); 362 if (rc) 363 goto out; 364 rc = tape_assign_minor(device); 365 if (rc) 366 goto out_discipline; 367 368 rc = tapechar_setup_device(device); 369 if (rc) 370 goto out_minor; 371 rc = tapeblock_setup_device(device); 372 if (rc) 373 goto out_char; 374 375 tape_state_set(device, TS_UNUSED); 376 377 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 378 379 return 0; 380 381 out_char: 382 tapechar_cleanup_device(device); 383 out_discipline: 384 device->discipline->cleanup_device(device); 385 device->discipline = NULL; 386 out_minor: 387 tape_remove_minor(device); 388 out: 389 module_put(discipline->owner); 390 return rc; 391 } 392 393 static inline void 394 tape_cleanup_device(struct tape_device *device) 395 { 396 tapeblock_cleanup_device(device); 397 tapechar_cleanup_device(device); 398 device->discipline->cleanup_device(device); 399 module_put(device->discipline->owner); 400 tape_remove_minor(device); 401 tape_med_state_set(device, MS_UNKNOWN); 402 } 403 404 /* 405 * Set device offline. 406 * 407 * Called by the common I/O layer if the drive should set offline on user 408 * request. We may prevent this by returning an error. 409 * Manual offline is only allowed while the drive is not in use. 410 */ 411 int 412 tape_generic_offline(struct tape_device *device) 413 { 414 if (!device) { 415 PRINT_ERR("tape_generic_offline: no such device\n"); 416 return -ENODEV; 417 } 418 419 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 420 device->cdev_id, device); 421 422 spin_lock_irq(get_ccwdev_lock(device->cdev)); 423 switch (device->tape_state) { 424 case TS_INIT: 425 case TS_NOT_OPER: 426 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 427 break; 428 case TS_UNUSED: 429 tape_state_set(device, TS_INIT); 430 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 431 tape_cleanup_device(device); 432 break; 433 default: 434 DBF_EVENT(3, "(%08x): Set offline failed " 435 "- drive in use.\n", 436 device->cdev_id); 437 PRINT_WARN("(%s): Set offline failed " 438 "- drive in use.\n", 439 device->cdev->dev.bus_id); 440 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 441 return -EBUSY; 442 } 443 444 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 445 return 0; 446 } 447 448 /* 449 * Allocate memory for a new device structure. 450 */ 451 static struct tape_device * 452 tape_alloc_device(void) 453 { 454 struct tape_device *device; 455 456 device = (struct tape_device *) 457 kmalloc(sizeof(struct tape_device), GFP_KERNEL); 458 if (device == NULL) { 459 DBF_EXCEPTION(2, "ti:no mem\n"); 460 PRINT_INFO ("can't allocate memory for " 461 "tape info structure\n"); 462 return ERR_PTR(-ENOMEM); 463 } 464 memset(device, 0, sizeof(struct tape_device)); 465 device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA); 466 if (device->modeset_byte == NULL) { 467 DBF_EXCEPTION(2, "ti:no mem\n"); 468 PRINT_INFO("can't allocate memory for modeset byte\n"); 469 kfree(device); 470 return ERR_PTR(-ENOMEM); 471 } 472 INIT_LIST_HEAD(&device->req_queue); 473 INIT_LIST_HEAD(&device->node); 474 init_waitqueue_head(&device->state_change_wq); 475 device->tape_state = TS_INIT; 476 device->medium_state = MS_UNKNOWN; 477 *device->modeset_byte = 0; 478 device->first_minor = -1; 479 atomic_set(&device->ref_count, 1); 480 INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); 481 482 return device; 483 } 484 485 /* 486 * Get a reference to an existing device structure. This will automatically 487 * increment the reference count. 488 */ 489 struct tape_device * 490 tape_get_device_reference(struct tape_device *device) 491 { 492 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device, 493 atomic_inc_return(&device->ref_count)); 494 495 return device; 496 } 497 498 /* 499 * Decrease the reference counter of a devices structure. If the 500 * reference counter reaches zero free the device structure. 501 * The function returns a NULL pointer to be used by the caller 502 * for clearing reference pointers. 503 */ 504 struct tape_device * 505 tape_put_device(struct tape_device *device) 506 { 507 int remain; 508 509 remain = atomic_dec_return(&device->ref_count); 510 if (remain > 0) { 511 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain); 512 } else { 513 if (remain < 0) { 514 DBF_EVENT(4, "put device without reference\n"); 515 PRINT_ERR("put device without reference\n"); 516 } else { 517 DBF_EVENT(4, "tape_free_device(%p)\n", device); 518 kfree(device->modeset_byte); 519 kfree(device); 520 } 521 } 522 523 return NULL; 524 } 525 526 /* 527 * Find tape device by a device index. 528 */ 529 struct tape_device * 530 tape_get_device(int devindex) 531 { 532 struct tape_device *device, *tmp; 533 534 device = ERR_PTR(-ENODEV); 535 read_lock(&tape_device_lock); 536 list_for_each_entry(tmp, &tape_device_list, node) { 537 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 538 device = tape_get_device_reference(tmp); 539 break; 540 } 541 } 542 read_unlock(&tape_device_lock); 543 return device; 544 } 545 546 /* 547 * Driverfs tape probe function. 548 */ 549 int 550 tape_generic_probe(struct ccw_device *cdev) 551 { 552 struct tape_device *device; 553 554 device = tape_alloc_device(); 555 if (IS_ERR(device)) 556 return -ENODEV; 557 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 558 cdev->dev.driver_data = device; 559 device->cdev = cdev; 560 device->cdev_id = busid_to_int(cdev->dev.bus_id); 561 cdev->handler = __tape_do_irq; 562 563 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 564 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 565 566 return 0; 567 } 568 569 static inline void 570 __tape_discard_requests(struct tape_device *device) 571 { 572 struct tape_request * request; 573 struct list_head * l, *n; 574 575 list_for_each_safe(l, n, &device->req_queue) { 576 request = list_entry(l, struct tape_request, list); 577 if (request->status == TAPE_REQUEST_IN_IO) 578 request->status = TAPE_REQUEST_DONE; 579 list_del(&request->list); 580 581 /* Decrease ref_count for removed request. */ 582 request->device = tape_put_device(device); 583 request->rc = -EIO; 584 if (request->callback != NULL) 585 request->callback(request, request->callback_data); 586 } 587 } 588 589 /* 590 * Driverfs tape remove function. 591 * 592 * This function is called whenever the common I/O layer detects the device 593 * gone. This can happen at any time and we cannot refuse. 594 */ 595 void 596 tape_generic_remove(struct ccw_device *cdev) 597 { 598 struct tape_device * device; 599 600 device = cdev->dev.driver_data; 601 if (!device) { 602 PRINT_ERR("No device pointer in tape_generic_remove!\n"); 603 return; 604 } 605 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 606 607 spin_lock_irq(get_ccwdev_lock(device->cdev)); 608 switch (device->tape_state) { 609 case TS_INIT: 610 tape_state_set(device, TS_NOT_OPER); 611 case TS_NOT_OPER: 612 /* 613 * Nothing to do. 614 */ 615 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 616 break; 617 case TS_UNUSED: 618 /* 619 * Need only to release the device. 620 */ 621 tape_state_set(device, TS_NOT_OPER); 622 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 623 tape_cleanup_device(device); 624 break; 625 default: 626 /* 627 * There may be requests on the queue. We will not get 628 * an interrupt for a request that was running. So we 629 * just post them all as I/O errors. 630 */ 631 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 632 device->cdev_id); 633 PRINT_WARN("(%s): Drive in use vanished - " 634 "expect trouble!\n", 635 device->cdev->dev.bus_id); 636 PRINT_WARN("State was %i\n", device->tape_state); 637 tape_state_set(device, TS_NOT_OPER); 638 __tape_discard_requests(device); 639 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 640 tape_cleanup_device(device); 641 } 642 643 if (cdev->dev.driver_data != NULL) { 644 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 645 cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data); 646 } 647 } 648 649 /* 650 * Allocate a new tape ccw request 651 */ 652 struct tape_request * 653 tape_alloc_request(int cplength, int datasize) 654 { 655 struct tape_request *request; 656 657 if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE) 658 BUG(); 659 660 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 661 662 request = (struct tape_request *) kmalloc(sizeof(struct tape_request), 663 GFP_KERNEL); 664 if (request == NULL) { 665 DBF_EXCEPTION(1, "cqra nomem\n"); 666 return ERR_PTR(-ENOMEM); 667 } 668 memset(request, 0, sizeof(struct tape_request)); 669 /* allocate channel program */ 670 if (cplength > 0) { 671 request->cpaddr = kmalloc(cplength*sizeof(struct ccw1), 672 GFP_ATOMIC | GFP_DMA); 673 if (request->cpaddr == NULL) { 674 DBF_EXCEPTION(1, "cqra nomem\n"); 675 kfree(request); 676 return ERR_PTR(-ENOMEM); 677 } 678 memset(request->cpaddr, 0, cplength*sizeof(struct ccw1)); 679 } 680 /* alloc small kernel buffer */ 681 if (datasize > 0) { 682 request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA); 683 if (request->cpdata == NULL) { 684 DBF_EXCEPTION(1, "cqra nomem\n"); 685 if (request->cpaddr != NULL) 686 kfree(request->cpaddr); 687 kfree(request); 688 return ERR_PTR(-ENOMEM); 689 } 690 memset(request->cpdata, 0, datasize); 691 } 692 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 693 request->cpdata); 694 695 return request; 696 } 697 698 /* 699 * Free tape ccw request 700 */ 701 void 702 tape_free_request (struct tape_request * request) 703 { 704 DBF_LH(6, "Free request %p\n", request); 705 706 if (request->device != NULL) { 707 request->device = tape_put_device(request->device); 708 } 709 if (request->cpdata != NULL) 710 kfree(request->cpdata); 711 if (request->cpaddr != NULL) 712 kfree(request->cpaddr); 713 kfree(request); 714 } 715 716 static inline int 717 __tape_start_io(struct tape_device *device, struct tape_request *request) 718 { 719 int rc; 720 721 #ifdef CONFIG_S390_TAPE_BLOCK 722 if (request->op == TO_BLOCK) 723 device->discipline->check_locate(device, request); 724 #endif 725 rc = ccw_device_start( 726 device->cdev, 727 request->cpaddr, 728 (unsigned long) request, 729 0x00, 730 request->options 731 ); 732 if (rc == 0) { 733 request->status = TAPE_REQUEST_IN_IO; 734 } else if (rc == -EBUSY) { 735 /* The common I/O subsystem is currently busy. Retry later. */ 736 request->status = TAPE_REQUEST_QUEUED; 737 schedule_work(&device->tape_dnr); 738 rc = 0; 739 } else { 740 /* Start failed. Remove request and indicate failure. */ 741 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 742 } 743 return rc; 744 } 745 746 static inline void 747 __tape_start_next_request(struct tape_device *device) 748 { 749 struct list_head *l, *n; 750 struct tape_request *request; 751 int rc; 752 753 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 754 /* 755 * Try to start each request on request queue until one is 756 * started successful. 757 */ 758 list_for_each_safe(l, n, &device->req_queue) { 759 request = list_entry(l, struct tape_request, list); 760 761 /* 762 * Avoid race condition if bottom-half was triggered more than 763 * once. 764 */ 765 if (request->status == TAPE_REQUEST_IN_IO) 766 return; 767 768 /* 769 * We wanted to cancel the request but the common I/O layer 770 * was busy at that time. This can only happen if this 771 * function is called by delayed_next_request. 772 * Otherwise we start the next request on the queue. 773 */ 774 if (request->status == TAPE_REQUEST_CANCEL) { 775 rc = __tape_cancel_io(device, request); 776 } else { 777 rc = __tape_start_io(device, request); 778 } 779 if (rc == 0) 780 return; 781 782 /* Set ending status. */ 783 request->rc = rc; 784 request->status = TAPE_REQUEST_DONE; 785 786 /* Remove from request queue. */ 787 list_del(&request->list); 788 789 /* Do callback. */ 790 if (request->callback != NULL) 791 request->callback(request, request->callback_data); 792 } 793 } 794 795 static void 796 tape_delayed_next_request(void *data) 797 { 798 struct tape_device * device; 799 800 device = (struct tape_device *) data; 801 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 802 spin_lock_irq(get_ccwdev_lock(device->cdev)); 803 __tape_start_next_request(device); 804 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 805 } 806 807 static inline void 808 __tape_end_request( 809 struct tape_device * device, 810 struct tape_request * request, 811 int rc) 812 { 813 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 814 if (request) { 815 request->rc = rc; 816 request->status = TAPE_REQUEST_DONE; 817 818 /* Remove from request queue. */ 819 list_del(&request->list); 820 821 /* Do callback. */ 822 if (request->callback != NULL) 823 request->callback(request, request->callback_data); 824 } 825 826 /* Start next request. */ 827 if (!list_empty(&device->req_queue)) 828 __tape_start_next_request(device); 829 } 830 831 /* 832 * Write sense data to console/dbf 833 */ 834 void 835 tape_dump_sense(struct tape_device* device, struct tape_request *request, 836 struct irb *irb) 837 { 838 unsigned int *sptr; 839 840 PRINT_INFO("-------------------------------------------------\n"); 841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", 842 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); 843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); 844 if (request != NULL) 845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); 846 847 sptr = (unsigned int *) irb->ecw; 848 PRINT_INFO("Sense data: %08X %08X %08X %08X \n", 849 sptr[0], sptr[1], sptr[2], sptr[3]); 850 PRINT_INFO("Sense data: %08X %08X %08X %08X \n", 851 sptr[4], sptr[5], sptr[6], sptr[7]); 852 PRINT_INFO("--------------------------------------------------\n"); 853 } 854 855 /* 856 * Write sense data to dbf 857 */ 858 void 859 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 860 struct irb *irb) 861 { 862 unsigned int *sptr; 863 const char* op; 864 865 if (request != NULL) 866 op = tape_op_verbose[request->op]; 867 else 868 op = "---"; 869 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 870 irb->scsw.dstat,irb->scsw.cstat); 871 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 872 sptr = (unsigned int *) irb->ecw; 873 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 874 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 875 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 876 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 877 } 878 879 /* 880 * I/O helper function. Adds the request to the request queue 881 * and starts it if the tape is idle. Has to be called with 882 * the device lock held. 883 */ 884 static inline int 885 __tape_start_request(struct tape_device *device, struct tape_request *request) 886 { 887 int rc; 888 889 switch (request->op) { 890 case TO_MSEN: 891 case TO_ASSIGN: 892 case TO_UNASSIGN: 893 case TO_READ_ATTMSG: 894 if (device->tape_state == TS_INIT) 895 break; 896 if (device->tape_state == TS_UNUSED) 897 break; 898 default: 899 if (device->tape_state == TS_BLKUSE) 900 break; 901 if (device->tape_state != TS_IN_USE) 902 return -ENODEV; 903 } 904 905 /* Increase use count of device for the added request. */ 906 request->device = tape_get_device_reference(device); 907 908 if (list_empty(&device->req_queue)) { 909 /* No other requests are on the queue. Start this one. */ 910 rc = __tape_start_io(device, request); 911 if (rc) 912 return rc; 913 914 DBF_LH(5, "Request %p added for execution.\n", request); 915 list_add(&request->list, &device->req_queue); 916 } else { 917 DBF_LH(5, "Request %p add to queue.\n", request); 918 request->status = TAPE_REQUEST_QUEUED; 919 list_add_tail(&request->list, &device->req_queue); 920 } 921 return 0; 922 } 923 924 /* 925 * Add the request to the request queue, try to start it if the 926 * tape is idle. Return without waiting for end of i/o. 927 */ 928 int 929 tape_do_io_async(struct tape_device *device, struct tape_request *request) 930 { 931 int rc; 932 933 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 934 935 spin_lock_irq(get_ccwdev_lock(device->cdev)); 936 /* Add request to request queue and try to start it. */ 937 rc = __tape_start_request(device, request); 938 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 939 return rc; 940 } 941 942 /* 943 * tape_do_io/__tape_wake_up 944 * Add the request to the request queue, try to start it if the 945 * tape is idle and wait uninterruptible for its completion. 946 */ 947 static void 948 __tape_wake_up(struct tape_request *request, void *data) 949 { 950 request->callback = NULL; 951 wake_up((wait_queue_head_t *) data); 952 } 953 954 int 955 tape_do_io(struct tape_device *device, struct tape_request *request) 956 { 957 wait_queue_head_t wq; 958 int rc; 959 960 init_waitqueue_head(&wq); 961 spin_lock_irq(get_ccwdev_lock(device->cdev)); 962 /* Setup callback */ 963 request->callback = __tape_wake_up; 964 request->callback_data = &wq; 965 /* Add request to request queue and try to start it. */ 966 rc = __tape_start_request(device, request); 967 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 968 if (rc) 969 return rc; 970 /* Request added to the queue. Wait for its completion. */ 971 wait_event(wq, (request->callback == NULL)); 972 /* Get rc from request */ 973 return request->rc; 974 } 975 976 /* 977 * tape_do_io_interruptible/__tape_wake_up_interruptible 978 * Add the request to the request queue, try to start it if the 979 * tape is idle and wait uninterruptible for its completion. 980 */ 981 static void 982 __tape_wake_up_interruptible(struct tape_request *request, void *data) 983 { 984 request->callback = NULL; 985 wake_up_interruptible((wait_queue_head_t *) data); 986 } 987 988 int 989 tape_do_io_interruptible(struct tape_device *device, 990 struct tape_request *request) 991 { 992 wait_queue_head_t wq; 993 int rc; 994 995 init_waitqueue_head(&wq); 996 spin_lock_irq(get_ccwdev_lock(device->cdev)); 997 /* Setup callback */ 998 request->callback = __tape_wake_up_interruptible; 999 request->callback_data = &wq; 1000 rc = __tape_start_request(device, request); 1001 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1002 if (rc) 1003 return rc; 1004 /* Request added to the queue. Wait for its completion. */ 1005 rc = wait_event_interruptible(wq, (request->callback == NULL)); 1006 if (rc != -ERESTARTSYS) 1007 /* Request finished normally. */ 1008 return request->rc; 1009 1010 /* Interrupted by a signal. We have to stop the current request. */ 1011 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1012 rc = __tape_cancel_io(device, request); 1013 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1014 if (rc == 0) { 1015 /* Wait for the interrupt that acknowledges the halt. */ 1016 do { 1017 rc = wait_event_interruptible( 1018 wq, 1019 (request->callback == NULL) 1020 ); 1021 } while (rc != -ERESTARTSYS); 1022 1023 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1024 rc = -ERESTARTSYS; 1025 } 1026 return rc; 1027 } 1028 1029 /* 1030 * Tape interrupt routine, called from the ccw_device layer 1031 */ 1032 static void 1033 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1034 { 1035 struct tape_device *device; 1036 struct tape_request *request; 1037 int rc; 1038 1039 device = (struct tape_device *) cdev->dev.driver_data; 1040 if (device == NULL) { 1041 PRINT_ERR("could not get device structure for %s " 1042 "in interrupt\n", cdev->dev.bus_id); 1043 return; 1044 } 1045 request = (struct tape_request *) intparm; 1046 1047 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1048 1049 /* On special conditions irb is an error pointer */ 1050 if (IS_ERR(irb)) { 1051 /* FIXME: What to do with the request? */ 1052 switch (PTR_ERR(irb)) { 1053 case -ETIMEDOUT: 1054 PRINT_WARN("(%s): Request timed out\n", 1055 cdev->dev.bus_id); 1056 case -EIO: 1057 __tape_end_request(device, request, -EIO); 1058 break; 1059 default: 1060 PRINT_ERR("(%s): Unexpected i/o error %li\n", 1061 cdev->dev.bus_id, 1062 PTR_ERR(irb)); 1063 } 1064 return; 1065 } 1066 1067 /* 1068 * If the condition code is not zero and the start function bit is 1069 * still set, this is an deferred error and the last start I/O did 1070 * not succeed. Restart the request now. 1071 */ 1072 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { 1073 PRINT_WARN("(%s): deferred cc=%i. restaring\n", 1074 cdev->dev.bus_id, 1075 irb->scsw.cc); 1076 rc = __tape_start_io(device, request); 1077 if (rc) 1078 __tape_end_request(device, request, rc); 1079 return; 1080 } 1081 1082 /* May be an unsolicited irq */ 1083 if(request != NULL) 1084 request->rescnt = irb->scsw.count; 1085 1086 if (irb->scsw.dstat != 0x0c) { 1087 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1088 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1089 device->tape_generic_status |= GMT_ONLINE(~0); 1090 else 1091 device->tape_generic_status &= ~GMT_ONLINE(~0); 1092 1093 /* 1094 * Any request that does not come back with channel end 1095 * and device end is unusual. Log the sense data. 1096 */ 1097 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1098 tape_dump_sense_dbf(device, request, irb); 1099 } else { 1100 /* Upon normal completion the device _is_ online */ 1101 device->tape_generic_status |= GMT_ONLINE(~0); 1102 } 1103 if (device->tape_state == TS_NOT_OPER) { 1104 DBF_EVENT(6, "tape:device is not operational\n"); 1105 return; 1106 } 1107 1108 /* 1109 * Request that were canceled still come back with an interrupt. 1110 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1111 */ 1112 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1113 __tape_end_request(device, request, -EIO); 1114 return; 1115 } 1116 1117 rc = device->discipline->irq(device, request, irb); 1118 /* 1119 * rc < 0 : request finished unsuccessfully. 1120 * rc == TAPE_IO_SUCCESS: request finished successfully. 1121 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1122 * rc == TAPE_IO_RETRY: request finished but needs another go. 1123 * rc == TAPE_IO_STOP: request needs to get terminated. 1124 */ 1125 switch (rc) { 1126 case TAPE_IO_SUCCESS: 1127 /* Upon normal completion the device _is_ online */ 1128 device->tape_generic_status |= GMT_ONLINE(~0); 1129 __tape_end_request(device, request, rc); 1130 break; 1131 case TAPE_IO_PENDING: 1132 break; 1133 case TAPE_IO_RETRY: 1134 rc = __tape_start_io(device, request); 1135 if (rc) 1136 __tape_end_request(device, request, rc); 1137 break; 1138 case TAPE_IO_STOP: 1139 rc = __tape_cancel_io(device, request); 1140 if (rc) 1141 __tape_end_request(device, request, rc); 1142 break; 1143 default: 1144 if (rc > 0) { 1145 DBF_EVENT(6, "xunknownrc\n"); 1146 PRINT_ERR("Invalid return code from discipline " 1147 "interrupt function.\n"); 1148 __tape_end_request(device, request, -EIO); 1149 } else { 1150 __tape_end_request(device, request, rc); 1151 } 1152 break; 1153 } 1154 } 1155 1156 /* 1157 * Tape device open function used by tape_char & tape_block frontends. 1158 */ 1159 int 1160 tape_open(struct tape_device *device) 1161 { 1162 int rc; 1163 1164 spin_lock(get_ccwdev_lock(device->cdev)); 1165 if (device->tape_state == TS_NOT_OPER) { 1166 DBF_EVENT(6, "TAPE:nodev\n"); 1167 rc = -ENODEV; 1168 } else if (device->tape_state == TS_IN_USE) { 1169 DBF_EVENT(6, "TAPE:dbusy\n"); 1170 rc = -EBUSY; 1171 } else if (device->tape_state == TS_BLKUSE) { 1172 DBF_EVENT(6, "TAPE:dbusy\n"); 1173 rc = -EBUSY; 1174 } else if (device->discipline != NULL && 1175 !try_module_get(device->discipline->owner)) { 1176 DBF_EVENT(6, "TAPE:nodisc\n"); 1177 rc = -ENODEV; 1178 } else { 1179 tape_state_set(device, TS_IN_USE); 1180 rc = 0; 1181 } 1182 spin_unlock(get_ccwdev_lock(device->cdev)); 1183 return rc; 1184 } 1185 1186 /* 1187 * Tape device release function used by tape_char & tape_block frontends. 1188 */ 1189 int 1190 tape_release(struct tape_device *device) 1191 { 1192 spin_lock(get_ccwdev_lock(device->cdev)); 1193 if (device->tape_state == TS_IN_USE) 1194 tape_state_set(device, TS_UNUSED); 1195 module_put(device->discipline->owner); 1196 spin_unlock(get_ccwdev_lock(device->cdev)); 1197 return 0; 1198 } 1199 1200 /* 1201 * Execute a magnetic tape command a number of times. 1202 */ 1203 int 1204 tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1205 { 1206 tape_mtop_fn fn; 1207 int rc; 1208 1209 DBF_EVENT(6, "TAPE:mtio\n"); 1210 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1211 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1212 1213 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1214 return -EINVAL; 1215 fn = device->discipline->mtop_array[mt_op]; 1216 if (fn == NULL) 1217 return -EINVAL; 1218 1219 /* We assume that the backends can handle count up to 500. */ 1220 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1221 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1222 rc = 0; 1223 for (; mt_count > 500; mt_count -= 500) 1224 if ((rc = fn(device, 500)) != 0) 1225 break; 1226 if (rc == 0) 1227 rc = fn(device, mt_count); 1228 } else 1229 rc = fn(device, mt_count); 1230 return rc; 1231 1232 } 1233 1234 /* 1235 * Tape init function. 1236 */ 1237 static int 1238 tape_init (void) 1239 { 1240 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1241 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1242 #ifdef DBF_LIKE_HELL 1243 debug_set_level(TAPE_DBF_AREA, 6); 1244 #endif 1245 DBF_EVENT(3, "tape init: ($Revision: 1.54 $)\n"); 1246 tape_proc_init(); 1247 tapechar_init (); 1248 tapeblock_init (); 1249 return 0; 1250 } 1251 1252 /* 1253 * Tape exit function. 1254 */ 1255 static void 1256 tape_exit(void) 1257 { 1258 DBF_EVENT(6, "tape exit\n"); 1259 1260 /* Get rid of the frontends */ 1261 tapechar_exit(); 1262 tapeblock_exit(); 1263 tape_proc_cleanup(); 1264 debug_unregister (TAPE_DBF_AREA); 1265 } 1266 1267 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1268 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1269 MODULE_DESCRIPTION("Linux on zSeries channel attached " 1270 "tape device driver ($Revision: 1.54 $)"); 1271 MODULE_LICENSE("GPL"); 1272 1273 module_init(tape_init); 1274 module_exit(tape_exit); 1275 1276 EXPORT_SYMBOL(tape_generic_remove); 1277 EXPORT_SYMBOL(tape_generic_probe); 1278 EXPORT_SYMBOL(tape_generic_online); 1279 EXPORT_SYMBOL(tape_generic_offline); 1280 EXPORT_SYMBOL(tape_put_device); 1281 EXPORT_SYMBOL(tape_get_device_reference); 1282 EXPORT_SYMBOL(tape_state_verbose); 1283 EXPORT_SYMBOL(tape_op_verbose); 1284 EXPORT_SYMBOL(tape_state_set); 1285 EXPORT_SYMBOL(tape_med_state_set); 1286 EXPORT_SYMBOL(tape_alloc_request); 1287 EXPORT_SYMBOL(tape_free_request); 1288 EXPORT_SYMBOL(tape_dump_sense); 1289 EXPORT_SYMBOL(tape_dump_sense_dbf); 1290 EXPORT_SYMBOL(tape_do_io); 1291 EXPORT_SYMBOL(tape_do_io_async); 1292 EXPORT_SYMBOL(tape_do_io_interruptible); 1293 EXPORT_SYMBOL(tape_mtop); 1294